+
+#if DEVELOPMENT || DEBUG
+
+void
+print_all_clock_variables_internal(const char* func, struct clock_calend* clock_calend_cp)
+{
+ clock_sec_t offset_secs;
+ clock_usec_t offset_microsecs;
+ clock_sec_t bintime_secs;
+ clock_usec_t bintime_microsecs;
+ clock_sec_t bootime_secs;
+ clock_usec_t bootime_microsecs;
+
+ if (!g_should_log_clock_adjustments) {
+ return;
+ }
+
+ bintime2usclock(&clock_calend_cp->offset, &offset_secs, &offset_microsecs);
+ bintime2usclock(&clock_calend_cp->bintime, &bintime_secs, &bintime_microsecs);
+ bintime2usclock(&clock_calend_cp->boottime, &bootime_secs, &bootime_microsecs);
+
+ os_log(OS_LOG_DEFAULT, "%s s_scale_ns %llu s_adj_nsx %lld tick_scale_x %llu offset_count %llu\n",
+ func, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx,
+ clock_calend_cp->tick_scale_x, clock_calend_cp->offset_count);
+ os_log(OS_LOG_DEFAULT, "%s offset.sec %ld offset.frac %llu offset_secs %lu offset_microsecs %d\n",
+ func, clock_calend_cp->offset.sec, clock_calend_cp->offset.frac,
+ (unsigned long)offset_secs, offset_microsecs);
+ os_log(OS_LOG_DEFAULT, "%s bintime.sec %ld bintime.frac %llu bintime_secs %lu bintime_microsecs %d\n",
+ func, clock_calend_cp->bintime.sec, clock_calend_cp->bintime.frac,
+ (unsigned long)bintime_secs, bintime_microsecs);
+ os_log(OS_LOG_DEFAULT, "%s bootime.sec %ld bootime.frac %llu bootime_secs %lu bootime_microsecs %d\n",
+ func, clock_calend_cp->boottime.sec, clock_calend_cp->boottime.frac,
+ (unsigned long)bootime_secs, bootime_microsecs);
+
+#if !HAS_CONTINUOUS_HWCLOCK
+ clock_sec_t basesleep_secs;
+ clock_usec_t basesleep_microsecs;
+
+ bintime2usclock(&clock_calend_cp->basesleep, &basesleep_secs, &basesleep_microsecs);
+ os_log(OS_LOG_DEFAULT, "%s basesleep.sec %ld basesleep.frac %llu basesleep_secs %lu basesleep_microsecs %d\n",
+ func, clock_calend_cp->basesleep.sec, clock_calend_cp->basesleep.frac,
+ (unsigned long)basesleep_secs, basesleep_microsecs);
+#endif
+}
+
+
+void
+print_all_clock_variables(const char* func, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* clock_calend_cp)
+{
+ if (!g_should_log_clock_adjustments) {
+ return;
+ }
+
+ struct bintime bt;
+ clock_sec_t wall_secs;
+ clock_usec_t wall_microsecs;
+ uint64_t now;
+ uint64_t delta;
+
+ if (pmu_secs) {
+ os_log(OS_LOG_DEFAULT, "%s PMU %lu s %d u \n", func, (unsigned long)*pmu_secs, *pmu_usec);
+ }
+ if (sys_secs) {
+ os_log(OS_LOG_DEFAULT, "%s sys %lu s %d u \n", func, (unsigned long)*sys_secs, *sys_usec);
+ }
+
+ print_all_clock_variables_internal(func, clock_calend_cp);
+
+ now = mach_absolute_time();
+ delta = now - clock_calend_cp->offset_count;
+
+ bt = scale_delta(delta, clock_calend_cp->tick_scale_x, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx);
+ bintime_add(&bt, &clock_calend_cp->bintime);
+ bintime2usclock(&bt, &wall_secs, &wall_microsecs);
+
+ os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
+ func, (unsigned long)wall_secs, wall_microsecs, now);
+}
+
+
+#endif /* DEVELOPMENT || DEBUG */
+
+
+/*
+ * clock_initialize_calendar:
+ *
+ * Set the calendar and related clocks
+ * from the platform clock at boot.
+ *
+ * Also sends host notifications.
+ */
+void
+clock_initialize_calendar(void)
+{
+ clock_sec_t sys; // sleepless time since boot in seconds
+ clock_sec_t secs; // Current UTC time
+ clock_sec_t utc_offset_secs; // Difference in current UTC time and sleepless time since boot
+ clock_usec_t microsys;
+ clock_usec_t microsecs;
+ clock_usec_t utc_offset_microsecs;
+ spl_t s;
+ struct bintime bt;
+#if ENABLE_LEGACY_CLOCK_CODE
+ struct bintime monotonic_bt;
+ struct latched_time monotonic_time;
+ uint64_t monotonic_usec_total;
+ clock_sec_t sys2, monotonic_sec;
+ clock_usec_t microsys2, monotonic_usec;
+ size_t size;
+
+#endif /* ENABLE_LEGACY_CLOCK_CODE */
+ //Get the UTC time and corresponding sys time
+ PEGetUTCTimeOfDay(&secs, µsecs);
+ clock_get_system_microtime(&sys, µsys);
+
+#if ENABLE_LEGACY_CLOCK_CODE
+ /*
+ * If the platform has a monotonic clock, use kern.monotonicclock_usecs
+ * to estimate the sleep/wake time, otherwise use the UTC time to estimate
+ * the sleep time.
+ */
+ size = sizeof(monotonic_time);
+ if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time, &size, NULL, 0) != 0) {
+ has_monotonic_clock = 0;
+ os_log(OS_LOG_DEFAULT, "%s system does not have monotonic clock\n", __func__);
+ } else {
+ has_monotonic_clock = 1;
+ monotonic_usec_total = monotonic_time.monotonic_time_usec;
+ absolutetime_to_microtime(monotonic_time.mach_time, &sys2, µsys2);
+ os_log(OS_LOG_DEFAULT, "%s system has monotonic clock\n", __func__);
+ }
+#endif /* ENABLE_LEGACY_CLOCK_CODE */
+
+ s = splclock();
+ clock_lock();
+
+ commpage_disable_timestamp();
+
+ utc_offset_secs = secs;
+ utc_offset_microsecs = microsecs;
+
+ /*
+ * We normally expect the UTC clock to be always-on and produce
+ * greater readings than the tick counter. There may be corner cases
+ * due to differing clock resolutions (UTC clock is likely lower) and
+ * and errors reading the UTC clock (some implementations return 0
+ * on error) in which that doesn't hold true. Bring the UTC measurements
+ * in-line with the tick counter measurements as a best effort in that case.
+ */
+ if ((sys > secs) || ((sys == secs) && (microsys > microsecs))) {
+ os_log(OS_LOG_DEFAULT, "%s WARNING: UTC time is less then sys time, (%lu s %d u) UTC (%lu s %d u) sys\n",
+ __func__, (unsigned long) secs, microsecs, (unsigned long)sys, microsys);
+ secs = utc_offset_secs = sys;
+ microsecs = utc_offset_microsecs = microsys;
+ }
+
+ // UTC - sys
+ // This macro stores the subtraction result in utc_offset_secs and utc_offset_microsecs
+ TIME_SUB(utc_offset_secs, sys, utc_offset_microsecs, microsys, USEC_PER_SEC);
+ // This function converts utc_offset_secs and utc_offset_microsecs in bintime
+ clock2bintime(&utc_offset_secs, &utc_offset_microsecs, &bt);
+
+ /*
+ * Initialize the boot time based on the platform clock.
+ */
+ clock_boottime = secs;
+ clock_boottime_usec = microsecs;
+ commpage_update_boottime(clock_boottime * USEC_PER_SEC + clock_boottime_usec);
+
+ nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC, &ticks_per_sec);
+ clock_calend.boottime = bt;
+ clock_calend.bintime = bt;
+ clock_calend.offset.sec = 0;
+ clock_calend.offset.frac = 0;
+
+ clock_calend.tick_scale_x = (uint64_t)1 << 63;
+ clock_calend.tick_scale_x /= ticks_per_sec;
+ clock_calend.tick_scale_x *= 2;
+
+ clock_calend.s_scale_ns = NSEC_PER_SEC;
+ clock_calend.s_adj_nsx = 0;
+
+#if ENABLE_LEGACY_CLOCK_CODE
+ if (has_monotonic_clock) {
+ monotonic_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
+ monotonic_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
+
+ // monotonic clock - sys
+ // This macro stores the subtraction result in monotonic_sec and monotonic_usec
+ TIME_SUB(monotonic_sec, sys2, monotonic_usec, microsys2, USEC_PER_SEC);
+ clock2bintime(&monotonic_sec, &monotonic_usec, &monotonic_bt);
+
+ // set the baseleep as the difference between monotonic clock - sys
+ clock_calend.basesleep = monotonic_bt;
+ }
+#endif /* ENABLE_LEGACY_CLOCK_CODE */
+ commpage_update_mach_continuous_time(mach_absolutetime_asleep);
+
+#if DEVELOPMENT || DEBUG
+ struct clock_calend clock_calend_cp = clock_calend;
+#endif
+
+ clock_unlock();
+ splx(s);
+
+ print_all_clock_variables(__func__, &secs, µsecs, &sys, µsys, &clock_calend_cp);
+
+ /*
+ * Send host notifications.
+ */
+ host_notify_calendar_change();
+
+#if CONFIG_DTRACE
+ clock_track_calend_nowait();
+#endif
+}
+
+#if HAS_CONTINUOUS_HWCLOCK
+
+static void
+scale_sleep_time(void)
+{
+ /* Apply the current NTP frequency adjustment to the time slept.
+ * The frequency adjustment remains stable between calls to ntp_adjtime(),
+ * and should thus provide a reasonable approximation of the total adjustment
+ * required for the time slept. */
+ struct bintime sleep_time;
+ uint64_t tick_scale_x, s_scale_ns;
+ int64_t s_adj_nsx;
+ int64_t sleep_adj = ntp_get_freq();
+ if (sleep_adj) {
+ get_scale_factors_from_adj(sleep_adj, &tick_scale_x, &s_scale_ns, &s_adj_nsx);
+ sleep_time = scale_delta(mach_absolutetime_last_sleep, tick_scale_x, s_scale_ns, s_adj_nsx);
+ } else {
+ tick_scale_x = (uint64_t)1 << 63;
+ tick_scale_x /= ticks_per_sec;
+ tick_scale_x *= 2;
+ sleep_time.sec = mach_absolutetime_last_sleep / ticks_per_sec;
+ sleep_time.frac = (mach_absolutetime_last_sleep % ticks_per_sec) * tick_scale_x;
+ }
+ bintime_add(&clock_calend.offset, &sleep_time);
+ bintime_add(&clock_calend.bintime, &sleep_time);
+}
+
+static void
+clock_wakeup_calendar_hwclock(void)
+{
+ spl_t s;
+
+ s = splclock();
+ clock_lock();
+
+ commpage_disable_timestamp();
+
+ uint64_t abstime = mach_absolute_time();
+ uint64_t total_sleep_time = mach_continuous_time() - abstime;
+
+ mach_absolutetime_last_sleep = total_sleep_time - mach_absolutetime_asleep;
+ mach_absolutetime_asleep = total_sleep_time;
+
+ scale_sleep_time();
+
+ KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE),
+ (uintptr_t)mach_absolutetime_last_sleep,
+ (uintptr_t)mach_absolutetime_asleep,
+ (uintptr_t)(mach_absolutetime_last_sleep >> 32),
+ (uintptr_t)(mach_absolutetime_asleep >> 32));
+
+ commpage_update_mach_continuous_time(mach_absolutetime_asleep);
+#if HIBERNATION
+ commpage_update_mach_continuous_time_hw_offset(hwclock_conttime_offset);
+#endif
+ adjust_cont_time_thread_calls();
+
+ clock_unlock();
+ splx(s);
+
+ host_notify_calendar_change();
+
+#if CONFIG_DTRACE
+ clock_track_calend_nowait();
+#endif
+}
+
+#endif /* HAS_CONTINUOUS_HWCLOCK */
+
+#if ENABLE_LEGACY_CLOCK_CODE
+
+static void
+clock_wakeup_calendar_legacy(void)
+{
+ clock_sec_t wake_sys_sec;
+ clock_usec_t wake_sys_usec;
+ clock_sec_t wake_sec;
+ clock_usec_t wake_usec;
+ clock_sec_t wall_time_sec;
+ clock_usec_t wall_time_usec;
+ clock_sec_t diff_sec;
+ clock_usec_t diff_usec;
+ clock_sec_t var_s;
+ clock_usec_t var_us;
+ spl_t s;
+ struct bintime bt, last_sleep_bt;
+ struct latched_time monotonic_time;
+ uint64_t monotonic_usec_total;
+ uint64_t wake_abs;
+ size_t size;
+
+ /*
+ * If the platform has the monotonic clock use that to
+ * compute the sleep time. The monotonic clock does not have an offset
+ * that can be modified, so nor kernel or userspace can change the time
+ * of this clock, it can only monotonically increase over time.
+ * During sleep mach_absolute_time (sys time) does not tick,
+ * so the sleep time is the difference between the current monotonic time
+ * less the absolute time and the previous difference stored at wake time.
+ *
+ * basesleep = (monotonic - sys) ---> computed at last wake
+ * sleep_time = (monotonic - sys) - basesleep
+ *
+ * If the platform does not support monotonic clock we set the wall time to what the
+ * UTC clock returns us.
+ * Setting the wall time to UTC time implies that we loose all the adjustments
+ * done during wake time through adjtime/ntp_adjustime.
+ * The UTC time is the monotonic clock + an offset that can be set
+ * by kernel.
+ * The time slept in this case is the difference between wall time and UTC
+ * at wake.
+ *
+ * IMPORTANT:
+ * We assume that only the kernel is setting the offset of the PMU/RTC and that
+ * it is doing it only througth the settimeofday interface.
+ */
+ if (has_monotonic_clock) {
+#if DEVELOPMENT || DEBUG
+ /*
+ * Just for debugging, get the wake UTC time.
+ */
+ PEGetUTCTimeOfDay(&var_s, &var_us);
+#endif
+ /*
+ * Get monotonic time with corresponding sys time
+ */
+ size = sizeof(monotonic_time);
+ if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time, &size, NULL, 0) != 0) {
+ panic("%s: could not call kern.monotonicclock_usecs", __func__);
+ }
+ wake_abs = monotonic_time.mach_time;
+ absolutetime_to_microtime(wake_abs, &wake_sys_sec, &wake_sys_usec);
+
+ monotonic_usec_total = monotonic_time.monotonic_time_usec;
+ wake_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
+ wake_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
+ } else {
+ /*
+ * Get UTC time and corresponding sys time
+ */
+ PEGetUTCTimeOfDay(&wake_sec, &wake_usec);
+ wake_abs = mach_absolute_time();
+ absolutetime_to_microtime(wake_abs, &wake_sys_sec, &wake_sys_usec);
+ }
+
+#if DEVELOPMENT || DEBUG
+ os_log(OS_LOG_DEFAULT, "time at wake %lu s %d u from %s clock, abs %llu\n", (unsigned long)wake_sec, wake_usec, (has_monotonic_clock)?"monotonic":"UTC", wake_abs);
+ if (has_monotonic_clock) {
+ os_log(OS_LOG_DEFAULT, "UTC time %lu s %d u\n", (unsigned long)var_s, var_us);
+ }
+#endif /* DEVELOPMENT || DEBUG */
+
+ s = splclock();
+ clock_lock();
+
+ commpage_disable_timestamp();
+
+#if DEVELOPMENT || DEBUG
+ struct clock_calend clock_calend_cp1 = clock_calend;
+#endif /* DEVELOPMENT || DEBUG */
+
+ /*
+ * We normally expect the UTC/monotonic clock to be always-on and produce
+ * greater readings than the sys counter. There may be corner cases
+ * due to differing clock resolutions (UTC/monotonic clock is likely lower) and
+ * and errors reading the UTC/monotonic clock (some implementations return 0
+ * on error) in which that doesn't hold true.
+ */
+ if ((wake_sys_sec > wake_sec) || ((wake_sys_sec == wake_sec) && (wake_sys_usec > wake_usec))) {
+ os_log_error(OS_LOG_DEFAULT, "WARNING: %s clock is less then sys clock at wake: %lu s %d u vs %lu s %d u, defaulting sleep time to zero\n", (has_monotonic_clock)?"monotonic":"UTC", (unsigned long)wake_sec, wake_usec, (unsigned long)wake_sys_sec, wake_sys_usec);
+ mach_absolutetime_last_sleep = 0;
+ goto done;
+ }
+
+ if (has_monotonic_clock) {
+ /*
+ * computer the difference monotonic - sys
+ * we already checked that monotonic time is
+ * greater than sys.
+ */
+ diff_sec = wake_sec;
+ diff_usec = wake_usec;
+ // This macro stores the subtraction result in diff_sec and diff_usec
+ TIME_SUB(diff_sec, wake_sys_sec, diff_usec, wake_sys_usec, USEC_PER_SEC);
+ //This function converts diff_sec and diff_usec in bintime
+ clock2bintime(&diff_sec, &diff_usec, &bt);
+
+ /*
+ * Safety belt: the monotonic clock will likely have a lower resolution than the sys counter.
+ * It's also possible that the device didn't fully transition to the powered-off state on
+ * the most recent sleep, so the sys counter may not have reset or may have only briefly
+ * turned off. In that case it's possible for the difference between the monotonic clock and the
+ * sys counter to be less than the previously recorded value in clock.calend.basesleep.
+ * In that case simply record that we slept for 0 ticks.
+ */
+ if ((bt.sec > clock_calend.basesleep.sec) ||
+ ((bt.sec == clock_calend.basesleep.sec) && (bt.frac > clock_calend.basesleep.frac))) {
+ //last_sleep is the difference between (current monotonic - abs) and (last wake monotonic - abs)
+ last_sleep_bt = bt;
+ bintime_sub(&last_sleep_bt, &clock_calend.basesleep);
+
+ bintime2absolutetime(&last_sleep_bt, &mach_absolutetime_last_sleep);
+ mach_absolutetime_asleep += mach_absolutetime_last_sleep;
+
+ //set basesleep to current monotonic - abs
+ clock_calend.basesleep = bt;
+
+ //update wall time
+ bintime_add(&clock_calend.offset, &last_sleep_bt);
+ bintime_add(&clock_calend.bintime, &last_sleep_bt);
+
+ bintime2usclock(&last_sleep_bt, &var_s, &var_us);
+ os_log(OS_LOG_DEFAULT, "time_slept (%lu s %d u)\n", (unsigned long) var_s, var_us);
+ } else {
+ bintime2usclock(&clock_calend.basesleep, &var_s, &var_us);
+ os_log_error(OS_LOG_DEFAULT, "WARNING: last wake monotonic-sys time (%lu s %d u) is greater then current monotonic-sys time(%lu s %d u), defaulting sleep time to zero\n", (unsigned long) var_s, var_us, (unsigned long) diff_sec, diff_usec);
+
+ mach_absolutetime_last_sleep = 0;
+ }
+ } else {
+ /*
+ * set the wall time to UTC value
+ */
+ bt = get_scaled_time(wake_abs);
+ bintime_add(&bt, &clock_calend.bintime);
+ bintime2usclock(&bt, &wall_time_sec, &wall_time_usec);
+
+ if (wall_time_sec > wake_sec || (wall_time_sec == wake_sec && wall_time_usec > wake_usec)) {
+ os_log(OS_LOG_DEFAULT, "WARNING: wall time (%lu s %d u) is greater than current UTC time (%lu s %d u), defaulting sleep time to zero\n", (unsigned long) wall_time_sec, wall_time_usec, (unsigned long) wake_sec, wake_usec);
+
+ mach_absolutetime_last_sleep = 0;
+ } else {
+ diff_sec = wake_sec;
+ diff_usec = wake_usec;
+ // This macro stores the subtraction result in diff_sec and diff_usec
+ TIME_SUB(diff_sec, wall_time_sec, diff_usec, wall_time_usec, USEC_PER_SEC);
+ //This function converts diff_sec and diff_usec in bintime
+ clock2bintime(&diff_sec, &diff_usec, &bt);
+
+ //time slept in this case is the difference between PMU/RTC and wall time
+ last_sleep_bt = bt;
+
+ bintime2absolutetime(&last_sleep_bt, &mach_absolutetime_last_sleep);
+ mach_absolutetime_asleep += mach_absolutetime_last_sleep;
+
+ //update wall time
+ bintime_add(&clock_calend.offset, &last_sleep_bt);
+ bintime_add(&clock_calend.bintime, &last_sleep_bt);
+
+ bintime2usclock(&last_sleep_bt, &var_s, &var_us);
+ os_log(OS_LOG_DEFAULT, "time_slept (%lu s %d u)\n", (unsigned long)var_s, var_us);
+ }
+ }
+done:
+ KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE),
+ (uintptr_t)mach_absolutetime_last_sleep,
+ (uintptr_t)mach_absolutetime_asleep,
+ (uintptr_t)(mach_absolutetime_last_sleep >> 32),
+ (uintptr_t)(mach_absolutetime_asleep >> 32));
+
+ commpage_update_mach_continuous_time(mach_absolutetime_asleep);
+ adjust_cont_time_thread_calls();
+
+#if DEVELOPMENT || DEBUG
+ struct clock_calend clock_calend_cp = clock_calend;
+#endif
+
+ clock_unlock();
+ splx(s);
+
+#if DEVELOPMENT || DEBUG
+ if (g_should_log_clock_adjustments) {
+ print_all_clock_variables("clock_wakeup_calendar: BEFORE", NULL, NULL, NULL, NULL, &clock_calend_cp1);
+ print_all_clock_variables("clock_wakeup_calendar: AFTER", NULL, NULL, NULL, NULL, &clock_calend_cp);
+ }
+#endif /* DEVELOPMENT || DEBUG */
+
+ host_notify_calendar_change();
+
+#if CONFIG_DTRACE
+ clock_track_calend_nowait();
+#endif
+}
+
+#endif /* ENABLE_LEGACY_CLOCK_CODE */
+
+void
+clock_wakeup_calendar(void)
+{
+#if HAS_CONTINUOUS_HWCLOCK
+#if HIBERNATION_USES_LEGACY_CLOCK
+ if (gIOHibernateState) {
+ // if we're resuming from hibernation, we have to take the legacy wakeup path
+ return clock_wakeup_calendar_legacy();
+ }
+#endif /* HIBERNATION_USES_LEGACY_CLOCK */
+ // use the hwclock wakeup path
+ return clock_wakeup_calendar_hwclock();
+#elif ENABLE_LEGACY_CLOCK_CODE
+ return clock_wakeup_calendar_legacy();
+#else
+#error "can't determine which clock code to run"
+#endif
+}
+
+/*
+ * clock_get_boottime_nanotime:
+ *
+ * Return the boottime, used by sysctl.
+ */
+void
+clock_get_boottime_nanotime(
+ clock_sec_t *secs,
+ clock_nsec_t *nanosecs)
+{
+ spl_t s;
+
+ s = splclock();
+ clock_lock();
+
+ *secs = (clock_sec_t)clock_boottime;
+ *nanosecs = (clock_nsec_t)clock_boottime_usec * NSEC_PER_USEC;
+
+ clock_unlock();
+ splx(s);
+}
+
+/*
+ * clock_get_boottime_nanotime:
+ *
+ * Return the boottime, used by sysctl.
+ */
+void
+clock_get_boottime_microtime(
+ clock_sec_t *secs,
+ clock_usec_t *microsecs)
+{
+ spl_t s;
+
+ s = splclock();
+ clock_lock();
+
+ *secs = (clock_sec_t)clock_boottime;
+ *microsecs = (clock_nsec_t)clock_boottime_usec;
+
+ clock_unlock();
+ splx(s);
+}
+
+
+/*
+ * Wait / delay routines.
+ */