+ tvp->tv_sec = tv_sec;
+ tvp->tv_usec = tv_usec;
+}
+
+void
+microuptime(
+ struct timeval *tvp)
+{
+ clock_sec_t tv_sec;
+ clock_usec_t tv_usec;
+
+ clock_get_system_microtime(&tv_sec, &tv_usec);
+
+ tvp->tv_sec = tv_sec;
+ tvp->tv_usec = tv_usec;
+}
+
+/*
+ * Ditto for timespec.
+ */
+void
+nanotime(
+ struct timespec *tsp)
+{
+ clock_sec_t tv_sec;
+ clock_nsec_t tv_nsec;
+
+ clock_get_calendar_nanotime(&tv_sec, &tv_nsec);
+
+ tsp->tv_sec = tv_sec;
+ tsp->tv_nsec = tv_nsec;
+}
+
+void
+nanouptime(
+ struct timespec *tsp)
+{
+ clock_sec_t tv_sec;
+ clock_nsec_t tv_nsec;
+
+ clock_get_system_nanotime(&tv_sec, &tv_nsec);
+
+ tsp->tv_sec = tv_sec;
+ tsp->tv_nsec = tv_nsec;
+}
+
+uint64_t
+tvtoabstime(
+ struct timeval *tvp)
+{
+ uint64_t result, usresult;
+
+ clock_interval_to_absolutetime_interval(
+ tvp->tv_sec, NSEC_PER_SEC, &result);
+ clock_interval_to_absolutetime_interval(
+ tvp->tv_usec, NSEC_PER_USEC, &usresult);
+
+ return result + usresult;
+}
+
+uint64_t
+tstoabstime(struct timespec *ts)
+{
+ uint64_t abstime_s, abstime_ns;
+ clock_interval_to_absolutetime_interval(ts->tv_sec, NSEC_PER_SEC, &abstime_s);
+ clock_interval_to_absolutetime_interval(ts->tv_nsec, 1, &abstime_ns);
+ return abstime_s + abstime_ns;
+}
+
+#if NETWORKING
+/*
+ * ratecheck(): simple time-based rate-limit checking.
+ */
+int
+ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
+{
+ struct timeval tv, delta;
+ int rv = 0;
+
+ net_uptime2timeval(&tv);
+ delta = tv;
+ timevalsub(&delta, lasttime);
+
+ /*
+ * check for 0,0 is so that the message will be seen at least once,
+ * even if interval is huge.
+ */
+ if (timevalcmp(&delta, mininterval, >=) ||
+ (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
+ *lasttime = tv;
+ rv = 1;
+ }
+
+ return rv;
+}
+
+/*
+ * ppsratecheck(): packets (or events) per second limitation.
+ */
+int
+ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
+{
+ struct timeval tv, delta;
+ int rv;
+
+ net_uptime2timeval(&tv);
+
+ timersub(&tv, lasttime, &delta);
+
+ /*
+ * Check for 0,0 so that the message will be seen at least once.
+ * If more than one second has passed since the last update of
+ * lasttime, reset the counter.
+ *
+ * we do increment *curpps even in *curpps < maxpps case, as some may
+ * try to use *curpps for stat purposes as well.
+ */
+ if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
+ delta.tv_sec >= 1) {
+ *lasttime = tv;
+ *curpps = 0;
+ rv = 1;
+ } else if (maxpps < 0) {
+ rv = 1;
+ } else if (*curpps < maxpps) {
+ rv = 1;
+ } else {
+ rv = 0;
+ }
+
+#if 1 /* DIAGNOSTIC? */
+ /* be careful about wrap-around */
+ if (*curpps + 1 > 0) {
+ *curpps = *curpps + 1;
+ }
+#else
+ /*
+ * assume that there's not too many calls to this function.
+ * not sure if the assumption holds, as it depends on *caller's*
+ * behavior, not the behavior of this function.
+ * IMHO it is wrong to make assumption on the caller's behavior,
+ * so the above #if is #if 1, not #ifdef DIAGNOSTIC.
+ */
+ *curpps = *curpps + 1;
+#endif
+
+ return rv;
+}
+#endif /* NETWORKING */
+
+void
+time_zone_slock_init(void)
+{
+ /* allocate lock group attribute and group */
+ tz_slock_grp_attr = lck_grp_attr_alloc_init();
+
+ tz_slock_grp = lck_grp_alloc_init("tzlock", tz_slock_grp_attr);
+
+ /* Allocate lock attribute */
+ tz_slock_attr = lck_attr_alloc_init();
+
+ /* Allocate the spin lock */
+ tz_slock = lck_spin_alloc_init(tz_slock_grp, tz_slock_attr);
+}
+
+int
+__mach_bridge_remote_time(__unused struct proc *p, struct __mach_bridge_remote_time_args *mbrt_args, uint64_t *retval)
+{
+ *retval = mach_bridge_remote_time(mbrt_args->local_timestamp);
+ return 0;