]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/kern/clock.c
xnu-3789.1.32.tar.gz
[apple/xnu.git] / osfmk / kern / clock.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 */
33
34#include <mach/mach_types.h>
35
36#include <kern/spl.h>
37#include <kern/sched_prim.h>
38#include <kern/thread.h>
39#include <kern/clock.h>
40#include <kern/host_notify.h>
41#include <kern/thread_call.h>
42#include <libkern/OSAtomic.h>
43
44#include <IOKit/IOPlatformExpert.h>
45
46#include <machine/commpage.h>
47
48#include <mach/mach_traps.h>
49#include <mach/mach_time.h>
50
51#include <sys/kdebug.h>
52
53uint32_t hz_tick_interval = 1;
54
55
56decl_simple_lock_data(,clock_lock)
57
58#define clock_lock() \
59 simple_lock(&clock_lock)
60
61#define clock_unlock() \
62 simple_unlock(&clock_lock)
63
64#define clock_lock_init() \
65 simple_lock_init(&clock_lock, 0)
66
67#ifdef kdp_simple_lock_is_acquired
68boolean_t kdp_clock_is_locked()
69{
70 return kdp_simple_lock_is_acquired(&clock_lock);
71}
72#endif
73
74/*
75 * Time of day (calendar) variables.
76 *
77 * Algorithm:
78 *
79 * TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset)
80 *
81 * where CONV converts absolute time units into seconds and a fraction.
82 */
83static struct clock_calend {
84 uint64_t epoch;
85 uint64_t offset;
86 uint64_t epoch_absolute;
87
88 int32_t adjdelta; /* Nanosecond time delta for this adjustment period */
89 uint64_t adjstart; /* Absolute time value for start of this adjustment period */
90 uint32_t adjoffset; /* Absolute time offset for this adjustment period as absolute value */
91} clock_calend;
92
93#if CONFIG_DTRACE
94
95/*
96 * Unlocked calendar flipflop; this is used to track a clock_calend such
97 * that we can safely access a snapshot of a valid clock_calend structure
98 * without needing to take any locks to do it.
99 *
100 * The trick is to use a generation count and set the low bit when it is
101 * being updated/read; by doing this, we guarantee, through use of the
102 * hw_atomic functions, that the generation is incremented when the bit
103 * is cleared atomically (by using a 1 bit add).
104 */
105static struct unlocked_clock_calend {
106 struct clock_calend calend; /* copy of calendar */
107 uint32_t gen; /* generation count */
108} flipflop[ 2];
109
110static void clock_track_calend_nowait(void);
111
112#endif
113
114/*
115 * Calendar adjustment variables and values.
116 */
117#define calend_adjperiod (NSEC_PER_SEC / 100) /* adjustment period, ns */
118#define calend_adjskew (40 * NSEC_PER_USEC) /* "standard" skew, ns / period */
119#define calend_adjbig (NSEC_PER_SEC) /* use 10x skew above adjbig ns */
120
121static int64_t calend_adjtotal; /* Nanosecond remaining total adjustment */
122static uint64_t calend_adjdeadline; /* Absolute time value for next adjustment period */
123static uint32_t calend_adjinterval; /* Absolute time interval of adjustment period */
124
125static timer_call_data_t calend_adjcall;
126static uint32_t calend_adjactive;
127
128static uint32_t calend_set_adjustment(
129 long *secs,
130 int *microsecs);
131
132static void calend_adjust_call(void);
133static uint32_t calend_adjust(void);
134
135void _clock_delay_until_deadline(uint64_t interval,
136 uint64_t deadline);
137void _clock_delay_until_deadline_with_leeway(uint64_t interval,
138 uint64_t deadline,
139 uint64_t leeway);
140
141/* Seconds boottime epoch */
142static uint64_t clock_boottime;
143static uint32_t clock_boottime_usec;
144
145#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
146MACRO_BEGIN \
147 if (((rfrac) += (frac)) >= (unit)) { \
148 (rfrac) -= (unit); \
149 (rsecs) += 1; \
150 } \
151 (rsecs) += (secs); \
152MACRO_END
153
154#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
155MACRO_BEGIN \
156 if ((int)((rfrac) -= (frac)) < 0) { \
157 (rfrac) += (unit); \
158 (rsecs) -= 1; \
159 } \
160 (rsecs) -= (secs); \
161MACRO_END
162
163/*
164 * clock_config:
165 *
166 * Called once at boot to configure the clock subsystem.
167 */
168void
169clock_config(void)
170{
171 clock_lock_init();
172
173 timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
174
175 clock_oldconfig();
176}
177
178/*
179 * clock_init:
180 *
181 * Called on a processor each time started.
182 */
183void
184clock_init(void)
185{
186 clock_oldinit();
187}
188
189/*
190 * clock_timebase_init:
191 *
192 * Called by machine dependent code
193 * to initialize areas dependent on the
194 * timebase value. May be called multiple
195 * times during start up.
196 */
197void
198clock_timebase_init(void)
199{
200 uint64_t abstime;
201
202 nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
203 calend_adjinterval = (uint32_t)abstime;
204
205 nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
206 hz_tick_interval = (uint32_t)abstime;
207
208 sched_timebase_init();
209}
210
211/*
212 * mach_timebase_info_trap:
213 *
214 * User trap returns timebase constant.
215 */
216kern_return_t
217mach_timebase_info_trap(
218 struct mach_timebase_info_trap_args *args)
219{
220 mach_vm_address_t out_info_addr = args->info;
221 mach_timebase_info_data_t info;
222
223 clock_timebase_info(&info);
224
225 copyout((void *)&info, out_info_addr, sizeof (info));
226
227 return (KERN_SUCCESS);
228}
229
230/*
231 * Calendar routines.
232 */
233
234/*
235 * clock_get_calendar_microtime:
236 *
237 * Returns the current calendar value,
238 * microseconds as the fraction.
239 */
240void
241clock_get_calendar_microtime(
242 clock_sec_t *secs,
243 clock_usec_t *microsecs)
244{
245 clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
246}
247
248static void
249clock_get_calendar_absolute_and_microtime_locked(
250 clock_sec_t *secs,
251 clock_usec_t *microsecs,
252 uint64_t *abstime)
253{
254 uint64_t now = mach_absolute_time();
255 if (abstime)
256 *abstime = now;
257
258 if (clock_calend.adjdelta < 0) {
259 uint32_t t32;
260
261 /*
262 * Since offset is decremented during a negative adjustment,
263 * ensure that time increases monotonically without going
264 * temporarily backwards.
265 * If the delta has not yet passed, now is set to the start
266 * of the current adjustment period; otherwise, we're between
267 * the expiry of the delta and the next call to calend_adjust(),
268 * and we offset accordingly.
269 */
270 if (now > clock_calend.adjstart) {
271 t32 = (uint32_t)(now - clock_calend.adjstart);
272
273 if (t32 > clock_calend.adjoffset)
274 now -= clock_calend.adjoffset;
275 else
276 now = clock_calend.adjstart;
277 }
278 }
279
280 now += clock_calend.offset;
281
282 absolutetime_to_microtime(now, secs, microsecs);
283
284 *secs += (clock_sec_t)clock_calend.epoch;
285}
286
287/*
288 * clock_get_calendar_absolute_and_microtime:
289 *
290 * Returns the current calendar value,
291 * microseconds as the fraction. Also
292 * returns mach_absolute_time if abstime
293 * is not NULL.
294 */
295void
296clock_get_calendar_absolute_and_microtime(
297 clock_sec_t *secs,
298 clock_usec_t *microsecs,
299 uint64_t *abstime)
300{
301 spl_t s;
302
303 s = splclock();
304 clock_lock();
305
306 clock_get_calendar_absolute_and_microtime_locked(secs, microsecs, abstime);
307
308 clock_unlock();
309 splx(s);
310}
311
312/*
313 * clock_get_calendar_nanotime:
314 *
315 * Returns the current calendar value,
316 * nanoseconds as the fraction.
317 *
318 * Since we do not have an interface to
319 * set the calendar with resolution greater
320 * than a microsecond, we honor that here.
321 */
322void
323clock_get_calendar_nanotime(
324 clock_sec_t *secs,
325 clock_nsec_t *nanosecs)
326{
327 spl_t s;
328
329 s = splclock();
330 clock_lock();
331
332 clock_get_calendar_absolute_and_microtime_locked(secs, nanosecs, NULL);
333
334 *nanosecs *= NSEC_PER_USEC;
335
336 clock_unlock();
337 splx(s);
338}
339
340/*
341 * clock_gettimeofday:
342 *
343 * Kernel interface for commpage implementation of
344 * gettimeofday() syscall.
345 *
346 * Returns the current calendar value, and updates the
347 * commpage info as appropriate. Because most calls to
348 * gettimeofday() are handled in user mode by the commpage,
349 * this routine should be used infrequently.
350 */
351void
352clock_gettimeofday(
353 clock_sec_t *secs,
354 clock_usec_t *microsecs)
355{
356 clock_gettimeofday_and_absolute_time(secs, microsecs, NULL);
357}
358
359void
360clock_gettimeofday_and_absolute_time(
361 clock_sec_t *secs,
362 clock_usec_t *microsecs,
363 uint64_t *mach_time)
364{
365 uint64_t now;
366 spl_t s;
367
368 s = splclock();
369 clock_lock();
370
371 now = mach_absolute_time();
372
373 if (clock_calend.adjdelta >= 0) {
374 clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
375 }
376 else {
377 uint32_t t32;
378
379 if (now > clock_calend.adjstart) {
380 t32 = (uint32_t)(now - clock_calend.adjstart);
381
382 if (t32 > clock_calend.adjoffset)
383 now -= clock_calend.adjoffset;
384 else
385 now = clock_calend.adjstart;
386 }
387
388 now += clock_calend.offset;
389
390 absolutetime_to_microtime(now, secs, microsecs);
391
392 *secs += (clock_sec_t)clock_calend.epoch;
393 }
394
395 clock_unlock();
396 splx(s);
397
398 if (mach_time) {
399 *mach_time = now;
400 }
401}
402
403/*
404 * clock_set_calendar_microtime:
405 *
406 * Sets the current calendar value by
407 * recalculating the epoch and offset
408 * from the system clock.
409 *
410 * Also adjusts the boottime to keep the
411 * value consistent, writes the new
412 * calendar value to the platform clock,
413 * and sends calendar change notifications.
414 */
415void
416clock_set_calendar_microtime(
417 clock_sec_t secs,
418 clock_usec_t microsecs)
419{
420 clock_sec_t sys;
421 clock_usec_t microsys;
422 uint64_t absolutesys;
423 clock_sec_t newsecs;
424 clock_sec_t oldsecs;
425 clock_usec_t newmicrosecs;
426 clock_usec_t oldmicrosecs;
427 uint64_t commpage_value;
428 spl_t s;
429
430 newsecs = secs;
431 newmicrosecs = microsecs;
432
433 s = splclock();
434 clock_lock();
435
436 commpage_disable_timestamp();
437
438 /*
439 * Adjust the boottime based on the delta.
440 */
441 clock_get_calendar_absolute_and_microtime_locked(&oldsecs, &oldmicrosecs, &absolutesys);
442 if (oldsecs < secs || (oldsecs == secs && oldmicrosecs < microsecs)){
443 // moving forwards
444 long deltasecs = secs, deltamicrosecs = microsecs;
445 TIME_SUB(deltasecs, oldsecs, deltamicrosecs, oldmicrosecs, USEC_PER_SEC);
446 TIME_ADD(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
447 } else {
448 // moving backwards
449 long deltasecs = oldsecs, deltamicrosecs = oldmicrosecs;
450 TIME_SUB(deltasecs, secs, deltamicrosecs, microsecs, USEC_PER_SEC);
451 TIME_SUB(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
452 }
453 commpage_value = clock_boottime * USEC_PER_SEC + clock_boottime_usec;
454
455 /*
456 * Calculate the new calendar epoch based on
457 * the new value and the system clock.
458 */
459 absolutetime_to_microtime(absolutesys, &sys, &microsys);
460 TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
461
462 /*
463 * Set the new calendar epoch.
464 */
465 clock_calend.epoch = secs;
466
467 nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
468
469 clock_interval_to_absolutetime_interval((uint32_t) secs, NSEC_PER_SEC, &clock_calend.epoch_absolute);
470 clock_calend.epoch_absolute += clock_calend.offset;
471
472 /*
473 * Cancel any adjustment in progress.
474 */
475 calend_adjtotal = clock_calend.adjdelta = 0;
476
477 clock_unlock();
478
479 /*
480 * Set the new value for the platform clock.
481 */
482 PESetUTCTimeOfDay(newsecs, newmicrosecs);
483
484 splx(s);
485
486 commpage_update_boottime(commpage_value);
487
488 /*
489 * Send host notifications.
490 */
491 host_notify_calendar_change();
492 host_notify_calendar_set();
493
494#if CONFIG_DTRACE
495 clock_track_calend_nowait();
496#endif
497}
498
499/*
500 * clock_initialize_calendar:
501 *
502 * Set the calendar and related clocks
503 * from the platform clock at boot or
504 * wake event.
505 *
506 * Also sends host notifications.
507 */
508
509uint64_t mach_absolutetime_asleep;
510uint64_t mach_absolutetime_last_sleep;
511
512void
513clock_initialize_calendar(void)
514{
515 clock_sec_t sys; // sleepless time since boot in seconds
516 clock_sec_t secs; // Current UTC time
517 clock_sec_t utc_offset_secs; // Difference in current UTC time and sleepless time since boot
518 clock_usec_t microsys;
519 clock_usec_t microsecs;
520 clock_usec_t utc_offset_microsecs;
521 uint64_t new_epoch; // utc_offset_secs in mach absolute time units
522 spl_t s;
523
524 PEGetUTCTimeOfDay(&secs, &microsecs);
525
526 s = splclock();
527 clock_lock();
528
529 commpage_disable_timestamp();
530
531 if ((long)secs >= (long)clock_boottime) {
532 /*
533 * Initialize the boot time based on the platform clock.
534 */
535 if (clock_boottime == 0){
536 clock_boottime = secs;
537 clock_boottime_usec = microsecs;
538 commpage_update_boottime(clock_boottime * USEC_PER_SEC + clock_boottime_usec);
539 }
540
541 /*
542 * Calculate the new calendar epoch based on
543 * the platform clock and the system clock.
544 */
545 clock_get_system_microtime(&sys, &microsys);
546 utc_offset_secs = secs;
547 utc_offset_microsecs = microsecs;
548
549 // This macro mutates utc_offset_secs and micro_utc_offset
550 TIME_SUB(utc_offset_secs, sys, utc_offset_microsecs, microsys, USEC_PER_SEC);
551
552 /*
553 * Set the new calendar epoch.
554 */
555
556 clock_calend.epoch = utc_offset_secs;
557
558 nanoseconds_to_absolutetime((uint64_t)utc_offset_microsecs * NSEC_PER_USEC, &clock_calend.offset);
559
560 clock_interval_to_absolutetime_interval((uint32_t) utc_offset_secs, NSEC_PER_SEC, &new_epoch);
561 new_epoch += clock_calend.offset;
562
563 if (clock_calend.epoch_absolute)
564 {
565 /* new_epoch is the difference between absolute_time and utc_time
566 * this value will remain constant until the system sleeps.
567 * Then, difference between values would go up by the time the system sleeps.
568 * epoch_absolute is the last difference between the two values
569 * so the difference in the differences would be the time of the last sleep
570 */
571
572 if(new_epoch > clock_calend.epoch_absolute) {
573 mach_absolutetime_last_sleep = new_epoch - clock_calend.epoch_absolute;
574 }
575 else {
576 mach_absolutetime_last_sleep = 0;
577 }
578 mach_absolutetime_asleep += mach_absolutetime_last_sleep;
579 KERNEL_DEBUG_CONSTANT(
580 MACHDBG_CODE(DBG_MACH_CLOCK,MACH_EPOCH_CHANGE) | DBG_FUNC_NONE,
581 (uintptr_t) mach_absolutetime_last_sleep,
582 (uintptr_t) mach_absolutetime_asleep,
583 (uintptr_t) (mach_absolutetime_last_sleep >> 32),
584 (uintptr_t) (mach_absolutetime_asleep >> 32),
585 0);
586 }
587 clock_calend.epoch_absolute = new_epoch;
588
589 /*
590 * Cancel any adjustment in progress.
591 */
592 calend_adjtotal = clock_calend.adjdelta = 0;
593 }
594
595 commpage_update_mach_continuous_time(mach_absolutetime_asleep);
596 adjust_cont_time_thread_calls();
597
598 clock_unlock();
599 splx(s);
600
601 /*
602 * Send host notifications.
603 */
604 host_notify_calendar_change();
605
606#if CONFIG_DTRACE
607 clock_track_calend_nowait();
608#endif
609}
610
611/*
612 * clock_get_boottime_nanotime:
613 *
614 * Return the boottime, used by sysctl.
615 */
616void
617clock_get_boottime_nanotime(
618 clock_sec_t *secs,
619 clock_nsec_t *nanosecs)
620{
621 spl_t s;
622
623 s = splclock();
624 clock_lock();
625
626 *secs = (clock_sec_t)clock_boottime;
627 *nanosecs = (clock_nsec_t)clock_boottime_usec * NSEC_PER_USEC;
628
629 clock_unlock();
630 splx(s);
631}
632
633/*
634 * clock_get_boottime_nanotime:
635 *
636 * Return the boottime, used by sysctl.
637 */
638void
639clock_get_boottime_microtime(
640 clock_sec_t *secs,
641 clock_usec_t *microsecs)
642{
643 spl_t s;
644
645 s = splclock();
646 clock_lock();
647
648 *secs = (clock_sec_t)clock_boottime;
649 *microsecs = (clock_nsec_t)clock_boottime_usec;
650
651 clock_unlock();
652 splx(s);
653}
654
655/*
656 * clock_adjtime:
657 *
658 * Interface to adjtime() syscall.
659 *
660 * Calculates adjustment variables and
661 * initiates adjustment.
662 */
663void
664clock_adjtime(
665 long *secs,
666 int *microsecs)
667{
668 uint32_t interval;
669 spl_t s;
670
671 s = splclock();
672 clock_lock();
673
674 interval = calend_set_adjustment(secs, microsecs);
675 if (interval != 0) {
676 calend_adjdeadline = mach_absolute_time() + interval;
677 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
678 calend_adjactive++;
679 }
680 else
681 if (timer_call_cancel(&calend_adjcall))
682 calend_adjactive--;
683
684 clock_unlock();
685 splx(s);
686}
687
688static uint32_t
689calend_set_adjustment(
690 long *secs,
691 int *microsecs)
692{
693 uint64_t now, t64;
694 int64_t total, ototal;
695 uint32_t interval = 0;
696
697 /*
698 * Compute the total adjustment time in nanoseconds.
699 */
700 total = ((int64_t)*secs * (int64_t)NSEC_PER_SEC) + (*microsecs * (int64_t)NSEC_PER_USEC);
701
702 /*
703 * Disable commpage gettimeofday().
704 */
705 commpage_disable_timestamp();
706
707 /*
708 * Get current absolute time.
709 */
710 now = mach_absolute_time();
711
712 /*
713 * Save the old adjustment total for later return.
714 */
715 ototal = calend_adjtotal;
716
717 /*
718 * Is a new correction specified?
719 */
720 if (total != 0) {
721 /*
722 * Set delta to the standard, small, adjustment skew.
723 */
724 int32_t delta = calend_adjskew;
725
726 if (total > 0) {
727 /*
728 * Positive adjustment. If greater than the preset 'big'
729 * threshold, slew at a faster rate, capping if necessary.
730 */
731 if (total > (int64_t) calend_adjbig)
732 delta *= 10;
733 if (delta > total)
734 delta = (int32_t)total;
735
736 /*
737 * Convert the delta back from ns to absolute time and store in adjoffset.
738 */
739 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
740 clock_calend.adjoffset = (uint32_t)t64;
741 }
742 else {
743 /*
744 * Negative adjustment; therefore, negate the delta. If
745 * greater than the preset 'big' threshold, slew at a faster
746 * rate, capping if necessary.
747 */
748 if (total < (int64_t) -calend_adjbig)
749 delta *= 10;
750 delta = -delta;
751 if (delta < total)
752 delta = (int32_t)total;
753
754 /*
755 * Save the current absolute time. Subsequent time operations occuring
756 * during this negative correction can make use of this value to ensure
757 * that time increases monotonically.
758 */
759 clock_calend.adjstart = now;
760
761 /*
762 * Convert the delta back from ns to absolute time and store in adjoffset.
763 */
764 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
765 clock_calend.adjoffset = (uint32_t)t64;
766 }
767
768 /*
769 * Store the total adjustment time in ns.
770 */
771 calend_adjtotal = total;
772
773 /*
774 * Store the delta for this adjustment period in ns.
775 */
776 clock_calend.adjdelta = delta;
777
778 /*
779 * Set the interval in absolute time for later return.
780 */
781 interval = calend_adjinterval;
782 }
783 else {
784 /*
785 * No change; clear any prior adjustment.
786 */
787 calend_adjtotal = clock_calend.adjdelta = 0;
788 }
789
790 /*
791 * If an prior correction was in progress, return the
792 * remaining uncorrected time from it.
793 */
794 if (ototal != 0) {
795 *secs = (long)(ototal / (long)NSEC_PER_SEC);
796 *microsecs = (int)((ototal % (int)NSEC_PER_SEC) / (int)NSEC_PER_USEC);
797 }
798 else
799 *secs = *microsecs = 0;
800
801#if CONFIG_DTRACE
802 clock_track_calend_nowait();
803#endif
804
805 return (interval);
806}
807
808static void
809calend_adjust_call(void)
810{
811 uint32_t interval;
812 spl_t s;
813
814 s = splclock();
815 clock_lock();
816
817 if (--calend_adjactive == 0) {
818 interval = calend_adjust();
819 if (interval != 0) {
820 clock_deadline_for_periodic_event(interval, mach_absolute_time(), &calend_adjdeadline);
821
822 if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
823 calend_adjactive++;
824 }
825 }
826
827 clock_unlock();
828 splx(s);
829}
830
831static uint32_t
832calend_adjust(void)
833{
834 uint64_t now, t64;
835 int32_t delta;
836 uint32_t interval = 0;
837
838 commpage_disable_timestamp();
839
840 now = mach_absolute_time();
841
842 delta = clock_calend.adjdelta;
843
844 if (delta > 0) {
845 clock_calend.offset += clock_calend.adjoffset;
846
847 calend_adjtotal -= delta;
848 if (delta > calend_adjtotal) {
849 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
850
851 nanoseconds_to_absolutetime((uint64_t)delta, &t64);
852 clock_calend.adjoffset = (uint32_t)t64;
853 }
854 }
855 else
856 if (delta < 0) {
857 clock_calend.offset -= clock_calend.adjoffset;
858
859 calend_adjtotal -= delta;
860 if (delta < calend_adjtotal) {
861 clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
862
863 nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
864 clock_calend.adjoffset = (uint32_t)t64;
865 }
866
867 if (clock_calend.adjdelta != 0)
868 clock_calend.adjstart = now;
869 }
870
871 if (clock_calend.adjdelta != 0)
872 interval = calend_adjinterval;
873
874#if CONFIG_DTRACE
875 clock_track_calend_nowait();
876#endif
877
878 return (interval);
879}
880
881/*
882 * Wait / delay routines.
883 */
884static void
885mach_wait_until_continue(
886 __unused void *parameter,
887 wait_result_t wresult)
888{
889 thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
890 /*NOTREACHED*/
891}
892
893/*
894 * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
895 *
896 * Parameters: args->deadline Amount of time to wait
897 *
898 * Returns: 0 Success
899 * !0 Not success
900 *
901 */
902kern_return_t
903mach_wait_until_trap(
904 struct mach_wait_until_trap_args *args)
905{
906 uint64_t deadline = args->deadline;
907 wait_result_t wresult;
908
909 wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
910 TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
911 if (wresult == THREAD_WAITING)
912 wresult = thread_block(mach_wait_until_continue);
913
914 return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
915}
916
917void
918clock_delay_until(
919 uint64_t deadline)
920{
921 uint64_t now = mach_absolute_time();
922
923 if (now >= deadline)
924 return;
925
926 _clock_delay_until_deadline(deadline - now, deadline);
927}
928
929/*
930 * Preserve the original precise interval that the client
931 * requested for comparison to the spin threshold.
932 */
933void
934_clock_delay_until_deadline(
935 uint64_t interval,
936 uint64_t deadline)
937{
938 _clock_delay_until_deadline_with_leeway(interval, deadline, 0);
939}
940
941/*
942 * Like _clock_delay_until_deadline, but it accepts a
943 * leeway value.
944 */
945void
946_clock_delay_until_deadline_with_leeway(
947 uint64_t interval,
948 uint64_t deadline,
949 uint64_t leeway)
950{
951
952 if (interval == 0)
953 return;
954
955 if ( ml_delay_should_spin(interval) ||
956 get_preemption_level() != 0 ||
957 ml_get_interrupts_enabled() == FALSE ) {
958 machine_delay_until(interval, deadline);
959 } else {
960 /*
961 * For now, assume a leeway request of 0 means the client does not want a leeway
962 * value. We may want to change this interpretation in the future.
963 */
964
965 if (leeway) {
966 assert_wait_deadline_with_leeway((event_t)clock_delay_until, THREAD_UNINT, TIMEOUT_URGENCY_LEEWAY, deadline, leeway);
967 } else {
968 assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);
969 }
970
971 thread_block(THREAD_CONTINUE_NULL);
972 }
973}
974
975void
976delay_for_interval(
977 uint32_t interval,
978 uint32_t scale_factor)
979{
980 uint64_t abstime;
981
982 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
983
984 _clock_delay_until_deadline(abstime, mach_absolute_time() + abstime);
985}
986
987void
988delay_for_interval_with_leeway(
989 uint32_t interval,
990 uint32_t leeway,
991 uint32_t scale_factor)
992{
993 uint64_t abstime_interval;
994 uint64_t abstime_leeway;
995
996 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime_interval);
997 clock_interval_to_absolutetime_interval(leeway, scale_factor, &abstime_leeway);
998
999 _clock_delay_until_deadline_with_leeway(abstime_interval, mach_absolute_time() + abstime_interval, abstime_leeway);
1000}
1001
1002void
1003delay(
1004 int usec)
1005{
1006 delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
1007}
1008
1009/*
1010 * Miscellaneous routines.
1011 */
1012void
1013clock_interval_to_deadline(
1014 uint32_t interval,
1015 uint32_t scale_factor,
1016 uint64_t *result)
1017{
1018 uint64_t abstime;
1019
1020 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1021
1022 *result = mach_absolute_time() + abstime;
1023}
1024
1025void
1026clock_absolutetime_interval_to_deadline(
1027 uint64_t abstime,
1028 uint64_t *result)
1029{
1030 *result = mach_absolute_time() + abstime;
1031}
1032
1033void
1034clock_continuoustime_interval_to_deadline(
1035 uint64_t conttime,
1036 uint64_t *result)
1037{
1038 *result = mach_continuous_time() + conttime;
1039}
1040
1041void
1042clock_get_uptime(
1043 uint64_t *result)
1044{
1045 *result = mach_absolute_time();
1046}
1047
1048void
1049clock_deadline_for_periodic_event(
1050 uint64_t interval,
1051 uint64_t abstime,
1052 uint64_t *deadline)
1053{
1054 assert(interval != 0);
1055
1056 *deadline += interval;
1057
1058 if (*deadline <= abstime) {
1059 *deadline = abstime + interval;
1060 abstime = mach_absolute_time();
1061
1062 if (*deadline <= abstime)
1063 *deadline = abstime + interval;
1064 }
1065}
1066
1067uint64_t
1068mach_continuous_time(void)
1069{
1070 while(1) {
1071 uint64_t read1 = mach_absolutetime_asleep;
1072 uint64_t absolute = mach_absolute_time();
1073 OSMemoryBarrier();
1074 uint64_t read2 = mach_absolutetime_asleep;
1075
1076 if(__builtin_expect(read1 == read2, 1)) {
1077 return absolute + read1;
1078 }
1079 }
1080}
1081
1082uint64_t
1083mach_continuous_approximate_time(void)
1084{
1085 while(1) {
1086 uint64_t read1 = mach_absolutetime_asleep;
1087 uint64_t absolute = mach_approximate_time();
1088 OSMemoryBarrier();
1089 uint64_t read2 = mach_absolutetime_asleep;
1090
1091 if(__builtin_expect(read1 == read2, 1)) {
1092 return absolute + read1;
1093 }
1094 }
1095}
1096
1097/*
1098 * continuoustime_to_absolutetime
1099 * Must be called with interrupts disabled
1100 * Returned value is only valid until the next update to
1101 * mach_continuous_time
1102 */
1103uint64_t
1104continuoustime_to_absolutetime(uint64_t conttime) {
1105 if (conttime <= mach_absolutetime_asleep)
1106 return 0;
1107 else
1108 return conttime - mach_absolutetime_asleep;
1109}
1110
1111/*
1112 * absolutetime_to_continuoustime
1113 * Must be called with interrupts disabled
1114 * Returned value is only valid until the next update to
1115 * mach_continuous_time
1116 */
1117uint64_t
1118absolutetime_to_continuoustime(uint64_t abstime) {
1119 return abstime + mach_absolutetime_asleep;
1120}
1121
1122#if CONFIG_DTRACE
1123
1124/*
1125 * clock_get_calendar_nanotime_nowait
1126 *
1127 * Description: Non-blocking version of clock_get_calendar_nanotime()
1128 *
1129 * Notes: This function operates by separately tracking calendar time
1130 * updates using a two element structure to copy the calendar
1131 * state, which may be asynchronously modified. It utilizes
1132 * barrier instructions in the tracking process and in the local
1133 * stable snapshot process in order to ensure that a consistent
1134 * snapshot is used to perform the calculation.
1135 */
1136void
1137clock_get_calendar_nanotime_nowait(
1138 clock_sec_t *secs,
1139 clock_nsec_t *nanosecs)
1140{
1141 int i = 0;
1142 uint64_t now;
1143 struct unlocked_clock_calend stable;
1144
1145 for (;;) {
1146 stable = flipflop[i]; /* take snapshot */
1147
1148 /*
1149 * Use a barrier instructions to ensure atomicity. We AND
1150 * off the "in progress" bit to get the current generation
1151 * count.
1152 */
1153 (void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
1154
1155 /*
1156 * If an update _is_ in progress, the generation count will be
1157 * off by one, if it _was_ in progress, it will be off by two,
1158 * and if we caught it at a good time, it will be equal (and
1159 * our snapshot is threfore stable).
1160 */
1161 if (flipflop[i].gen == stable.gen)
1162 break;
1163
1164 /* Switch to the oher element of the flipflop, and try again. */
1165 i ^= 1;
1166 }
1167
1168 now = mach_absolute_time();
1169
1170 if (stable.calend.adjdelta < 0) {
1171 uint32_t t32;
1172
1173 if (now > stable.calend.adjstart) {
1174 t32 = (uint32_t)(now - stable.calend.adjstart);
1175
1176 if (t32 > stable.calend.adjoffset)
1177 now -= stable.calend.adjoffset;
1178 else
1179 now = stable.calend.adjstart;
1180 }
1181 }
1182
1183 now += stable.calend.offset;
1184
1185 absolutetime_to_microtime(now, secs, nanosecs);
1186 *nanosecs *= NSEC_PER_USEC;
1187
1188 *secs += (clock_sec_t)stable.calend.epoch;
1189}
1190
1191static void
1192clock_track_calend_nowait(void)
1193{
1194 int i;
1195
1196 for (i = 0; i < 2; i++) {
1197 struct clock_calend tmp = clock_calend;
1198
1199 /*
1200 * Set the low bit if the generation count; since we use a
1201 * barrier instruction to do this, we are guaranteed that this
1202 * will flag an update in progress to an async caller trying
1203 * to examine the contents.
1204 */
1205 (void)hw_atomic_or(&flipflop[i].gen, 1);
1206
1207 flipflop[i].calend = tmp;
1208
1209 /*
1210 * Increment the generation count to clear the low bit to
1211 * signal completion. If a caller compares the generation
1212 * count after taking a copy while in progress, the count
1213 * will be off by two.
1214 */
1215 (void)hw_atomic_add(&flipflop[i].gen, 1);
1216 }
1217}
1218
1219#endif /* CONFIG_DTRACE */
1220