2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
34 #include <mach/mach_types.h>
37 #include <kern/sched_prim.h>
38 #include <kern/thread.h>
39 #include <kern/clock.h>
40 #include <kern/host_notify.h>
41 #include <kern/thread_call.h>
42 #include <libkern/OSAtomic.h>
44 #include <IOKit/IOPlatformExpert.h>
46 #include <machine/commpage.h>
48 #include <mach/mach_traps.h>
49 #include <mach/mach_time.h>
51 #include <sys/kdebug.h>
53 uint32_t hz_tick_interval
= 1;
56 decl_simple_lock_data(,clock_lock
)
58 #define clock_lock() \
59 simple_lock(&clock_lock)
61 #define clock_unlock() \
62 simple_unlock(&clock_lock)
64 #define clock_lock_init() \
65 simple_lock_init(&clock_lock, 0)
67 #ifdef kdp_simple_lock_is_acquired
68 boolean_t
kdp_clock_is_locked()
70 return kdp_simple_lock_is_acquired(&clock_lock
);
75 * Time of day (calendar) variables.
79 * TOD <- (seconds + epoch, fraction) <- CONV(current absolute time + offset)
81 * where CONV converts absolute time units into seconds and a fraction.
83 static struct clock_calend
{
86 uint64_t epoch_absolute
;
88 int32_t adjdelta
; /* Nanosecond time delta for this adjustment period */
89 uint64_t adjstart
; /* Absolute time value for start of this adjustment period */
90 uint32_t adjoffset
; /* Absolute time offset for this adjustment period as absolute value */
96 * Unlocked calendar flipflop; this is used to track a clock_calend such
97 * that we can safely access a snapshot of a valid clock_calend structure
98 * without needing to take any locks to do it.
100 * The trick is to use a generation count and set the low bit when it is
101 * being updated/read; by doing this, we guarantee, through use of the
102 * hw_atomic functions, that the generation is incremented when the bit
103 * is cleared atomically (by using a 1 bit add).
105 static struct unlocked_clock_calend
{
106 struct clock_calend calend
; /* copy of calendar */
107 uint32_t gen
; /* generation count */
110 static void clock_track_calend_nowait(void);
115 * Calendar adjustment variables and values.
117 #define calend_adjperiod (NSEC_PER_SEC / 100) /* adjustment period, ns */
118 #define calend_adjskew (40 * NSEC_PER_USEC) /* "standard" skew, ns / period */
119 #define calend_adjbig (NSEC_PER_SEC) /* use 10x skew above adjbig ns */
121 static int64_t calend_adjtotal
; /* Nanosecond remaining total adjustment */
122 static uint64_t calend_adjdeadline
; /* Absolute time value for next adjustment period */
123 static uint32_t calend_adjinterval
; /* Absolute time interval of adjustment period */
125 static timer_call_data_t calend_adjcall
;
126 static uint32_t calend_adjactive
;
128 static uint32_t calend_set_adjustment(
132 static void calend_adjust_call(void);
133 static uint32_t calend_adjust(void);
135 void _clock_delay_until_deadline(uint64_t interval
,
137 void _clock_delay_until_deadline_with_leeway(uint64_t interval
,
141 /* Seconds boottime epoch */
142 static uint64_t clock_boottime
;
143 static uint32_t clock_boottime_usec
;
145 #define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
147 if (((rfrac) += (frac)) >= (unit)) { \
154 #define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
156 if ((int)((rfrac) -= (frac)) < 0) { \
166 * Called once at boot to configure the clock subsystem.
173 timer_call_setup(&calend_adjcall
, (timer_call_func_t
)calend_adjust_call
, NULL
);
181 * Called on a processor each time started.
190 * clock_timebase_init:
192 * Called by machine dependent code
193 * to initialize areas dependent on the
194 * timebase value. May be called multiple
195 * times during start up.
198 clock_timebase_init(void)
202 nanoseconds_to_absolutetime(calend_adjperiod
, &abstime
);
203 calend_adjinterval
= (uint32_t)abstime
;
205 nanoseconds_to_absolutetime(NSEC_PER_SEC
/ 100, &abstime
);
206 hz_tick_interval
= (uint32_t)abstime
;
208 sched_timebase_init();
212 * mach_timebase_info_trap:
214 * User trap returns timebase constant.
217 mach_timebase_info_trap(
218 struct mach_timebase_info_trap_args
*args
)
220 mach_vm_address_t out_info_addr
= args
->info
;
221 mach_timebase_info_data_t info
;
223 clock_timebase_info(&info
);
225 copyout((void *)&info
, out_info_addr
, sizeof (info
));
227 return (KERN_SUCCESS
);
235 * clock_get_calendar_microtime:
237 * Returns the current calendar value,
238 * microseconds as the fraction.
241 clock_get_calendar_microtime(
243 clock_usec_t
*microsecs
)
245 clock_get_calendar_absolute_and_microtime(secs
, microsecs
, NULL
);
249 clock_get_calendar_absolute_and_microtime_locked(
251 clock_usec_t
*microsecs
,
254 uint64_t now
= mach_absolute_time();
258 if (clock_calend
.adjdelta
< 0) {
262 * Since offset is decremented during a negative adjustment,
263 * ensure that time increases monotonically without going
264 * temporarily backwards.
265 * If the delta has not yet passed, now is set to the start
266 * of the current adjustment period; otherwise, we're between
267 * the expiry of the delta and the next call to calend_adjust(),
268 * and we offset accordingly.
270 if (now
> clock_calend
.adjstart
) {
271 t32
= (uint32_t)(now
- clock_calend
.adjstart
);
273 if (t32
> clock_calend
.adjoffset
)
274 now
-= clock_calend
.adjoffset
;
276 now
= clock_calend
.adjstart
;
280 now
+= clock_calend
.offset
;
282 absolutetime_to_microtime(now
, secs
, microsecs
);
284 *secs
+= (clock_sec_t
)clock_calend
.epoch
;
288 * clock_get_calendar_absolute_and_microtime:
290 * Returns the current calendar value,
291 * microseconds as the fraction. Also
292 * returns mach_absolute_time if abstime
296 clock_get_calendar_absolute_and_microtime(
298 clock_usec_t
*microsecs
,
306 clock_get_calendar_absolute_and_microtime_locked(secs
, microsecs
, abstime
);
313 * clock_get_calendar_nanotime:
315 * Returns the current calendar value,
316 * nanoseconds as the fraction.
318 * Since we do not have an interface to
319 * set the calendar with resolution greater
320 * than a microsecond, we honor that here.
323 clock_get_calendar_nanotime(
325 clock_nsec_t
*nanosecs
)
332 clock_get_calendar_absolute_and_microtime_locked(secs
, nanosecs
, NULL
);
334 *nanosecs
*= NSEC_PER_USEC
;
341 * clock_gettimeofday:
343 * Kernel interface for commpage implementation of
344 * gettimeofday() syscall.
346 * Returns the current calendar value, and updates the
347 * commpage info as appropriate. Because most calls to
348 * gettimeofday() are handled in user mode by the commpage,
349 * this routine should be used infrequently.
354 clock_usec_t
*microsecs
)
356 clock_gettimeofday_and_absolute_time(secs
, microsecs
, NULL
);
360 clock_gettimeofday_and_absolute_time(
362 clock_usec_t
*microsecs
,
371 now
= mach_absolute_time();
373 if (clock_calend
.adjdelta
>= 0) {
374 clock_gettimeofday_set_commpage(now
, clock_calend
.epoch
, clock_calend
.offset
, secs
, microsecs
);
379 if (now
> clock_calend
.adjstart
) {
380 t32
= (uint32_t)(now
- clock_calend
.adjstart
);
382 if (t32
> clock_calend
.adjoffset
)
383 now
-= clock_calend
.adjoffset
;
385 now
= clock_calend
.adjstart
;
388 now
+= clock_calend
.offset
;
390 absolutetime_to_microtime(now
, secs
, microsecs
);
392 *secs
+= (clock_sec_t
)clock_calend
.epoch
;
404 * clock_set_calendar_microtime:
406 * Sets the current calendar value by
407 * recalculating the epoch and offset
408 * from the system clock.
410 * Also adjusts the boottime to keep the
411 * value consistent, writes the new
412 * calendar value to the platform clock,
413 * and sends calendar change notifications.
416 clock_set_calendar_microtime(
418 clock_usec_t microsecs
)
421 clock_usec_t microsys
;
422 uint64_t absolutesys
;
425 clock_usec_t newmicrosecs
;
426 clock_usec_t oldmicrosecs
;
427 uint64_t commpage_value
;
431 newmicrosecs
= microsecs
;
436 commpage_disable_timestamp();
439 * Adjust the boottime based on the delta.
441 clock_get_calendar_absolute_and_microtime_locked(&oldsecs
, &oldmicrosecs
, &absolutesys
);
442 if (oldsecs
< secs
|| (oldsecs
== secs
&& oldmicrosecs
< microsecs
)){
444 long deltasecs
= secs
, deltamicrosecs
= microsecs
;
445 TIME_SUB(deltasecs
, oldsecs
, deltamicrosecs
, oldmicrosecs
, USEC_PER_SEC
);
446 TIME_ADD(clock_boottime
, deltasecs
, clock_boottime_usec
, deltamicrosecs
, USEC_PER_SEC
);
449 long deltasecs
= oldsecs
, deltamicrosecs
= oldmicrosecs
;
450 TIME_SUB(deltasecs
, secs
, deltamicrosecs
, microsecs
, USEC_PER_SEC
);
451 TIME_SUB(clock_boottime
, deltasecs
, clock_boottime_usec
, deltamicrosecs
, USEC_PER_SEC
);
453 commpage_value
= clock_boottime
* USEC_PER_SEC
+ clock_boottime_usec
;
456 * Calculate the new calendar epoch based on
457 * the new value and the system clock.
459 absolutetime_to_microtime(absolutesys
, &sys
, µsys
);
460 TIME_SUB(secs
, sys
, microsecs
, microsys
, USEC_PER_SEC
);
463 * Set the new calendar epoch.
465 clock_calend
.epoch
= secs
;
467 nanoseconds_to_absolutetime((uint64_t)microsecs
* NSEC_PER_USEC
, &clock_calend
.offset
);
469 clock_interval_to_absolutetime_interval((uint32_t) secs
, NSEC_PER_SEC
, &clock_calend
.epoch_absolute
);
470 clock_calend
.epoch_absolute
+= clock_calend
.offset
;
473 * Cancel any adjustment in progress.
475 calend_adjtotal
= clock_calend
.adjdelta
= 0;
480 * Set the new value for the platform clock.
482 PESetUTCTimeOfDay(newsecs
, newmicrosecs
);
486 commpage_update_boottime(commpage_value
);
489 * Send host notifications.
491 host_notify_calendar_change();
492 host_notify_calendar_set();
495 clock_track_calend_nowait();
500 * clock_initialize_calendar:
502 * Set the calendar and related clocks
503 * from the platform clock at boot or
506 * Also sends host notifications.
509 uint64_t mach_absolutetime_asleep
;
510 uint64_t mach_absolutetime_last_sleep
;
513 clock_initialize_calendar(void)
515 clock_sec_t sys
; // sleepless time since boot in seconds
516 clock_sec_t secs
; // Current UTC time
517 clock_sec_t utc_offset_secs
; // Difference in current UTC time and sleepless time since boot
518 clock_usec_t microsys
;
519 clock_usec_t microsecs
;
520 clock_usec_t utc_offset_microsecs
;
521 uint64_t new_epoch
; // utc_offset_secs in mach absolute time units
524 PEGetUTCTimeOfDay(&secs
, µsecs
);
529 commpage_disable_timestamp();
531 if ((long)secs
>= (long)clock_boottime
) {
533 * Initialize the boot time based on the platform clock.
535 if (clock_boottime
== 0){
536 clock_boottime
= secs
;
537 clock_boottime_usec
= microsecs
;
538 commpage_update_boottime(clock_boottime
* USEC_PER_SEC
+ clock_boottime_usec
);
542 * Calculate the new calendar epoch based on
543 * the platform clock and the system clock.
545 clock_get_system_microtime(&sys
, µsys
);
546 utc_offset_secs
= secs
;
547 utc_offset_microsecs
= microsecs
;
549 // This macro mutates utc_offset_secs and micro_utc_offset
550 TIME_SUB(utc_offset_secs
, sys
, utc_offset_microsecs
, microsys
, USEC_PER_SEC
);
553 * Set the new calendar epoch.
556 clock_calend
.epoch
= utc_offset_secs
;
558 nanoseconds_to_absolutetime((uint64_t)utc_offset_microsecs
* NSEC_PER_USEC
, &clock_calend
.offset
);
560 clock_interval_to_absolutetime_interval((uint32_t) utc_offset_secs
, NSEC_PER_SEC
, &new_epoch
);
561 new_epoch
+= clock_calend
.offset
;
563 if (clock_calend
.epoch_absolute
)
565 /* new_epoch is the difference between absolute_time and utc_time
566 * this value will remain constant until the system sleeps.
567 * Then, difference between values would go up by the time the system sleeps.
568 * epoch_absolute is the last difference between the two values
569 * so the difference in the differences would be the time of the last sleep
572 if(new_epoch
> clock_calend
.epoch_absolute
) {
573 mach_absolutetime_last_sleep
= new_epoch
- clock_calend
.epoch_absolute
;
576 mach_absolutetime_last_sleep
= 0;
578 mach_absolutetime_asleep
+= mach_absolutetime_last_sleep
;
579 KERNEL_DEBUG_CONSTANT(
580 MACHDBG_CODE(DBG_MACH_CLOCK
,MACH_EPOCH_CHANGE
) | DBG_FUNC_NONE
,
581 (uintptr_t) mach_absolutetime_last_sleep
,
582 (uintptr_t) mach_absolutetime_asleep
,
583 (uintptr_t) (mach_absolutetime_last_sleep
>> 32),
584 (uintptr_t) (mach_absolutetime_asleep
>> 32),
587 clock_calend
.epoch_absolute
= new_epoch
;
590 * Cancel any adjustment in progress.
592 calend_adjtotal
= clock_calend
.adjdelta
= 0;
595 commpage_update_mach_continuous_time(mach_absolutetime_asleep
);
596 adjust_cont_time_thread_calls();
602 * Send host notifications.
604 host_notify_calendar_change();
607 clock_track_calend_nowait();
612 * clock_get_boottime_nanotime:
614 * Return the boottime, used by sysctl.
617 clock_get_boottime_nanotime(
619 clock_nsec_t
*nanosecs
)
626 *secs
= (clock_sec_t
)clock_boottime
;
627 *nanosecs
= (clock_nsec_t
)clock_boottime_usec
* NSEC_PER_USEC
;
634 * clock_get_boottime_nanotime:
636 * Return the boottime, used by sysctl.
639 clock_get_boottime_microtime(
641 clock_usec_t
*microsecs
)
648 *secs
= (clock_sec_t
)clock_boottime
;
649 *microsecs
= (clock_nsec_t
)clock_boottime_usec
;
658 * Interface to adjtime() syscall.
660 * Calculates adjustment variables and
661 * initiates adjustment.
674 interval
= calend_set_adjustment(secs
, microsecs
);
676 calend_adjdeadline
= mach_absolute_time() + interval
;
677 if (!timer_call_enter(&calend_adjcall
, calend_adjdeadline
, TIMER_CALL_SYS_CRITICAL
))
681 if (timer_call_cancel(&calend_adjcall
))
689 calend_set_adjustment(
694 int64_t total
, ototal
;
695 uint32_t interval
= 0;
698 * Compute the total adjustment time in nanoseconds.
700 total
= ((int64_t)*secs
* (int64_t)NSEC_PER_SEC
) + (*microsecs
* (int64_t)NSEC_PER_USEC
);
703 * Disable commpage gettimeofday().
705 commpage_disable_timestamp();
708 * Get current absolute time.
710 now
= mach_absolute_time();
713 * Save the old adjustment total for later return.
715 ototal
= calend_adjtotal
;
718 * Is a new correction specified?
722 * Set delta to the standard, small, adjustment skew.
724 int32_t delta
= calend_adjskew
;
728 * Positive adjustment. If greater than the preset 'big'
729 * threshold, slew at a faster rate, capping if necessary.
731 if (total
> (int64_t) calend_adjbig
)
734 delta
= (int32_t)total
;
737 * Convert the delta back from ns to absolute time and store in adjoffset.
739 nanoseconds_to_absolutetime((uint64_t)delta
, &t64
);
740 clock_calend
.adjoffset
= (uint32_t)t64
;
744 * Negative adjustment; therefore, negate the delta. If
745 * greater than the preset 'big' threshold, slew at a faster
746 * rate, capping if necessary.
748 if (total
< (int64_t) -calend_adjbig
)
752 delta
= (int32_t)total
;
755 * Save the current absolute time. Subsequent time operations occuring
756 * during this negative correction can make use of this value to ensure
757 * that time increases monotonically.
759 clock_calend
.adjstart
= now
;
762 * Convert the delta back from ns to absolute time and store in adjoffset.
764 nanoseconds_to_absolutetime((uint64_t)-delta
, &t64
);
765 clock_calend
.adjoffset
= (uint32_t)t64
;
769 * Store the total adjustment time in ns.
771 calend_adjtotal
= total
;
774 * Store the delta for this adjustment period in ns.
776 clock_calend
.adjdelta
= delta
;
779 * Set the interval in absolute time for later return.
781 interval
= calend_adjinterval
;
785 * No change; clear any prior adjustment.
787 calend_adjtotal
= clock_calend
.adjdelta
= 0;
791 * If an prior correction was in progress, return the
792 * remaining uncorrected time from it.
795 *secs
= (long)(ototal
/ (long)NSEC_PER_SEC
);
796 *microsecs
= (int)((ototal
% (int)NSEC_PER_SEC
) / (int)NSEC_PER_USEC
);
799 *secs
= *microsecs
= 0;
802 clock_track_calend_nowait();
809 calend_adjust_call(void)
817 if (--calend_adjactive
== 0) {
818 interval
= calend_adjust();
820 clock_deadline_for_periodic_event(interval
, mach_absolute_time(), &calend_adjdeadline
);
822 if (!timer_call_enter(&calend_adjcall
, calend_adjdeadline
, TIMER_CALL_SYS_CRITICAL
))
836 uint32_t interval
= 0;
838 commpage_disable_timestamp();
840 now
= mach_absolute_time();
842 delta
= clock_calend
.adjdelta
;
845 clock_calend
.offset
+= clock_calend
.adjoffset
;
847 calend_adjtotal
-= delta
;
848 if (delta
> calend_adjtotal
) {
849 clock_calend
.adjdelta
= delta
= (int32_t)calend_adjtotal
;
851 nanoseconds_to_absolutetime((uint64_t)delta
, &t64
);
852 clock_calend
.adjoffset
= (uint32_t)t64
;
857 clock_calend
.offset
-= clock_calend
.adjoffset
;
859 calend_adjtotal
-= delta
;
860 if (delta
< calend_adjtotal
) {
861 clock_calend
.adjdelta
= delta
= (int32_t)calend_adjtotal
;
863 nanoseconds_to_absolutetime((uint64_t)-delta
, &t64
);
864 clock_calend
.adjoffset
= (uint32_t)t64
;
867 if (clock_calend
.adjdelta
!= 0)
868 clock_calend
.adjstart
= now
;
871 if (clock_calend
.adjdelta
!= 0)
872 interval
= calend_adjinterval
;
875 clock_track_calend_nowait();
882 * Wait / delay routines.
885 mach_wait_until_continue(
886 __unused
void *parameter
,
887 wait_result_t wresult
)
889 thread_syscall_return((wresult
== THREAD_INTERRUPTED
)? KERN_ABORTED
: KERN_SUCCESS
);
894 * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
896 * Parameters: args->deadline Amount of time to wait
903 mach_wait_until_trap(
904 struct mach_wait_until_trap_args
*args
)
906 uint64_t deadline
= args
->deadline
;
907 wait_result_t wresult
;
909 wresult
= assert_wait_deadline_with_leeway((event_t
)mach_wait_until_trap
, THREAD_ABORTSAFE
,
910 TIMEOUT_URGENCY_USER_NORMAL
, deadline
, 0);
911 if (wresult
== THREAD_WAITING
)
912 wresult
= thread_block(mach_wait_until_continue
);
914 return ((wresult
== THREAD_INTERRUPTED
)? KERN_ABORTED
: KERN_SUCCESS
);
921 uint64_t now
= mach_absolute_time();
926 _clock_delay_until_deadline(deadline
- now
, deadline
);
930 * Preserve the original precise interval that the client
931 * requested for comparison to the spin threshold.
934 _clock_delay_until_deadline(
938 _clock_delay_until_deadline_with_leeway(interval
, deadline
, 0);
942 * Like _clock_delay_until_deadline, but it accepts a
946 _clock_delay_until_deadline_with_leeway(
955 if ( ml_delay_should_spin(interval
) ||
956 get_preemption_level() != 0 ||
957 ml_get_interrupts_enabled() == FALSE
) {
958 machine_delay_until(interval
, deadline
);
961 * For now, assume a leeway request of 0 means the client does not want a leeway
962 * value. We may want to change this interpretation in the future.
966 assert_wait_deadline_with_leeway((event_t
)clock_delay_until
, THREAD_UNINT
, TIMEOUT_URGENCY_LEEWAY
, deadline
, leeway
);
968 assert_wait_deadline((event_t
)clock_delay_until
, THREAD_UNINT
, deadline
);
971 thread_block(THREAD_CONTINUE_NULL
);
978 uint32_t scale_factor
)
982 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
984 _clock_delay_until_deadline(abstime
, mach_absolute_time() + abstime
);
988 delay_for_interval_with_leeway(
991 uint32_t scale_factor
)
993 uint64_t abstime_interval
;
994 uint64_t abstime_leeway
;
996 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime_interval
);
997 clock_interval_to_absolutetime_interval(leeway
, scale_factor
, &abstime_leeway
);
999 _clock_delay_until_deadline_with_leeway(abstime_interval
, mach_absolute_time() + abstime_interval
, abstime_leeway
);
1006 delay_for_interval((usec
< 0)? -usec
: usec
, NSEC_PER_USEC
);
1010 * Miscellaneous routines.
1013 clock_interval_to_deadline(
1015 uint32_t scale_factor
,
1020 clock_interval_to_absolutetime_interval(interval
, scale_factor
, &abstime
);
1022 *result
= mach_absolute_time() + abstime
;
1026 clock_absolutetime_interval_to_deadline(
1030 *result
= mach_absolute_time() + abstime
;
1034 clock_continuoustime_interval_to_deadline(
1038 *result
= mach_continuous_time() + conttime
;
1045 *result
= mach_absolute_time();
1049 clock_deadline_for_periodic_event(
1054 assert(interval
!= 0);
1056 *deadline
+= interval
;
1058 if (*deadline
<= abstime
) {
1059 *deadline
= abstime
+ interval
;
1060 abstime
= mach_absolute_time();
1062 if (*deadline
<= abstime
)
1063 *deadline
= abstime
+ interval
;
1068 mach_continuous_time(void)
1071 uint64_t read1
= mach_absolutetime_asleep
;
1072 uint64_t absolute
= mach_absolute_time();
1074 uint64_t read2
= mach_absolutetime_asleep
;
1076 if(__builtin_expect(read1
== read2
, 1)) {
1077 return absolute
+ read1
;
1083 mach_continuous_approximate_time(void)
1086 uint64_t read1
= mach_absolutetime_asleep
;
1087 uint64_t absolute
= mach_approximate_time();
1089 uint64_t read2
= mach_absolutetime_asleep
;
1091 if(__builtin_expect(read1
== read2
, 1)) {
1092 return absolute
+ read1
;
1098 * continuoustime_to_absolutetime
1099 * Must be called with interrupts disabled
1100 * Returned value is only valid until the next update to
1101 * mach_continuous_time
1104 continuoustime_to_absolutetime(uint64_t conttime
) {
1105 if (conttime
<= mach_absolutetime_asleep
)
1108 return conttime
- mach_absolutetime_asleep
;
1112 * absolutetime_to_continuoustime
1113 * Must be called with interrupts disabled
1114 * Returned value is only valid until the next update to
1115 * mach_continuous_time
1118 absolutetime_to_continuoustime(uint64_t abstime
) {
1119 return abstime
+ mach_absolutetime_asleep
;
1125 * clock_get_calendar_nanotime_nowait
1127 * Description: Non-blocking version of clock_get_calendar_nanotime()
1129 * Notes: This function operates by separately tracking calendar time
1130 * updates using a two element structure to copy the calendar
1131 * state, which may be asynchronously modified. It utilizes
1132 * barrier instructions in the tracking process and in the local
1133 * stable snapshot process in order to ensure that a consistent
1134 * snapshot is used to perform the calculation.
1137 clock_get_calendar_nanotime_nowait(
1139 clock_nsec_t
*nanosecs
)
1143 struct unlocked_clock_calend stable
;
1146 stable
= flipflop
[i
]; /* take snapshot */
1149 * Use a barrier instructions to ensure atomicity. We AND
1150 * off the "in progress" bit to get the current generation
1153 (void)hw_atomic_and(&stable
.gen
, ~(uint32_t)1);
1156 * If an update _is_ in progress, the generation count will be
1157 * off by one, if it _was_ in progress, it will be off by two,
1158 * and if we caught it at a good time, it will be equal (and
1159 * our snapshot is threfore stable).
1161 if (flipflop
[i
].gen
== stable
.gen
)
1164 /* Switch to the oher element of the flipflop, and try again. */
1168 now
= mach_absolute_time();
1170 if (stable
.calend
.adjdelta
< 0) {
1173 if (now
> stable
.calend
.adjstart
) {
1174 t32
= (uint32_t)(now
- stable
.calend
.adjstart
);
1176 if (t32
> stable
.calend
.adjoffset
)
1177 now
-= stable
.calend
.adjoffset
;
1179 now
= stable
.calend
.adjstart
;
1183 now
+= stable
.calend
.offset
;
1185 absolutetime_to_microtime(now
, secs
, nanosecs
);
1186 *nanosecs
*= NSEC_PER_USEC
;
1188 *secs
+= (clock_sec_t
)stable
.calend
.epoch
;
1192 clock_track_calend_nowait(void)
1196 for (i
= 0; i
< 2; i
++) {
1197 struct clock_calend tmp
= clock_calend
;
1200 * Set the low bit if the generation count; since we use a
1201 * barrier instruction to do this, we are guaranteed that this
1202 * will flag an update in progress to an async caller trying
1203 * to examine the contents.
1205 (void)hw_atomic_or(&flipflop
[i
].gen
, 1);
1207 flipflop
[i
].calend
= tmp
;
1210 * Increment the generation count to clear the low bit to
1211 * signal completion. If a caller compares the generation
1212 * count after taking a copy while in progress, the count
1213 * will be off by two.
1215 (void)hw_atomic_add(&flipflop
[i
].gen
, 1);
1219 #endif /* CONFIG_DTRACE */