+
+uint64_t
+mach_continuous_time(void)
+{
+ while(1) {
+ uint64_t read1 = mach_absolutetime_asleep;
+ uint64_t absolute = mach_absolute_time();
+ OSMemoryBarrier();
+ uint64_t read2 = mach_absolutetime_asleep;
+
+ if(__builtin_expect(read1 == read2, 1)) {
+ return absolute + read1;
+ }
+ }
+}
+
+uint64_t
+mach_continuous_approximate_time(void)
+{
+ while(1) {
+ uint64_t read1 = mach_absolutetime_asleep;
+ uint64_t absolute = mach_approximate_time();
+ OSMemoryBarrier();
+ uint64_t read2 = mach_absolutetime_asleep;
+
+ if(__builtin_expect(read1 == read2, 1)) {
+ return absolute + read1;
+ }
+ }
+}
+
+/*
+ * continuoustime_to_absolutetime
+ * Must be called with interrupts disabled
+ * Returned value is only valid until the next update to
+ * mach_continuous_time
+ */
+uint64_t
+continuoustime_to_absolutetime(uint64_t conttime) {
+ if (conttime <= mach_absolutetime_asleep)
+ return 0;
+ else
+ return conttime - mach_absolutetime_asleep;
+}
+
+/*
+ * absolutetime_to_continuoustime
+ * Must be called with interrupts disabled
+ * Returned value is only valid until the next update to
+ * mach_continuous_time
+ */
+uint64_t
+absolutetime_to_continuoustime(uint64_t abstime) {
+ return abstime + mach_absolutetime_asleep;
+}
+
+#if CONFIG_DTRACE
+
+/*
+ * clock_get_calendar_nanotime_nowait
+ *
+ * Description: Non-blocking version of clock_get_calendar_nanotime()
+ *
+ * Notes: This function operates by separately tracking calendar time
+ * updates using a two element structure to copy the calendar
+ * state, which may be asynchronously modified. It utilizes
+ * barrier instructions in the tracking process and in the local
+ * stable snapshot process in order to ensure that a consistent
+ * snapshot is used to perform the calculation.
+ */
+void
+clock_get_calendar_nanotime_nowait(
+ clock_sec_t *secs,
+ clock_nsec_t *nanosecs)
+{
+ int i = 0;
+ uint64_t now;
+ struct unlocked_clock_calend stable;
+ struct bintime bt;
+
+ for (;;) {
+ stable = flipflop[i]; /* take snapshot */
+
+ /*
+ * Use a barrier instructions to ensure atomicity. We AND
+ * off the "in progress" bit to get the current generation
+ * count.
+ */
+ (void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
+
+ /*
+ * If an update _is_ in progress, the generation count will be
+ * off by one, if it _was_ in progress, it will be off by two,
+ * and if we caught it at a good time, it will be equal (and
+ * our snapshot is threfore stable).
+ */
+ if (flipflop[i].gen == stable.gen)
+ break;
+
+ /* Switch to the other element of the flipflop, and try again. */
+ i ^= 1;
+ }
+
+ now = mach_absolute_time();
+
+ bt = get_scaled_time(now);
+
+ bintime_add(&bt, &clock_calend.bintime);
+
+ bintime2nsclock(&bt, secs, nanosecs);
+}
+
+static void
+clock_track_calend_nowait(void)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct clock_calend tmp = clock_calend;
+
+ /*
+ * Set the low bit if the generation count; since we use a
+ * barrier instruction to do this, we are guaranteed that this
+ * will flag an update in progress to an async caller trying
+ * to examine the contents.
+ */
+ (void)hw_atomic_or(&flipflop[i].gen, 1);
+
+ flipflop[i].calend = tmp;
+
+ /*
+ * Increment the generation count to clear the low bit to
+ * signal completion. If a caller compares the generation
+ * count after taking a copy while in progress, the count
+ * will be off by two.
+ */
+ (void)hw_atomic_add(&flipflop[i].gen, 1);
+ }
+}
+
+#endif /* CONFIG_DTRACE */
+