+
+#if CONFIG_DTRACE
+
+/*
+ * clock_get_calendar_nanotime_nowait
+ *
+ * Description: Non-blocking version of clock_get_calendar_nanotime()
+ *
+ * Notes: This function operates by separately tracking calendar time
+ * updates using a two element structure to copy the calendar
+ * state, which may be asynchronously modified. It utilizes
+ * barrier instructions in the tracking process and in the local
+ * stable snapshot process in order to ensure that a consistent
+ * snapshot is used to perform the calculation.
+ */
+void
+clock_get_calendar_nanotime_nowait(
+ clock_sec_t *secs,
+ clock_nsec_t *nanosecs)
+{
+ int i = 0;
+ uint64_t now;
+ struct unlocked_clock_calend stable;
+
+ for (;;) {
+ stable = flipflop[i]; /* take snapshot */
+
+ /*
+ * Use a barrier instructions to ensure atomicity. We AND
+ * off the "in progress" bit to get the current generation
+ * count.
+ */
+ (void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
+
+ /*
+ * If an update _is_ in progress, the generation count will be
+ * off by one, if it _was_ in progress, it will be off by two,
+ * and if we caught it at a good time, it will be equal (and
+ * our snapshot is threfore stable).
+ */
+ if (flipflop[i].gen == stable.gen)
+ break;
+
+ /* Switch to the oher element of the flipflop, and try again. */
+ i ^= 1;
+ }
+
+ now = mach_absolute_time();
+
+ if (stable.calend.adjdelta < 0) {
+ uint32_t t32;
+
+ if (now > stable.calend.adjstart) {
+ t32 = (uint32_t)(now - stable.calend.adjstart);
+
+ if (t32 > stable.calend.adjoffset)
+ now -= stable.calend.adjoffset;
+ else
+ now = stable.calend.adjstart;
+ }
+ }
+
+ now += stable.calend.offset;
+
+ absolutetime_to_microtime(now, secs, nanosecs);
+ *nanosecs *= NSEC_PER_USEC;
+
+ *secs += (clock_sec_t)stable.calend.epoch;
+}
+
+static void
+clock_track_calend_nowait(void)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct clock_calend tmp = clock_calend;
+
+ /*
+ * Set the low bit if the generation count; since we use a
+ * barrier instruction to do this, we are guaranteed that this
+ * will flag an update in progress to an async caller trying
+ * to examine the contents.
+ */
+ (void)hw_atomic_or(&flipflop[i].gen, 1);
+
+ flipflop[i].calend = tmp;
+
+ /*
+ * Increment the generation count to clear the low bit to
+ * signal completion. If a caller compares the generation
+ * count after taking a copy while in progress, the count
+ * will be off by two.
+ */
+ (void)hw_atomic_add(&flipflop[i].gen, 1);
+ }
+}
+
+#endif /* CONFIG_DTRACE */
+