+ }
+ }
+ LIST_FOREACH(pr, &nd_prefix, ndpr_entry) {
+ NDPR_LOCK(pr);
+ pr->ndpr_stateflags &= ~NDPRF_PROCESSED_SERVICE;
+ NDPR_UNLOCK(pr);
+ }
+ lck_mtx_unlock(nd6_mutex);
+
+ lck_mtx_lock(rnh_lock);
+ /* We're done; let others enter */
+ nd6_service_busy = FALSE;
+ if (nd6_service_waiters > 0) {
+ nd6_service_waiters = 0;
+ wakeup(nd6_service_wc);
+ }
+}
+
+
+static int nd6_need_draining = 0;
+
+void
+nd6_drain(void *arg)
+{
+#pragma unused(arg)
+ nd6log2((LOG_DEBUG, "%s: draining ND6 entries\n", __func__));
+
+ lck_mtx_lock(rnh_lock);
+ nd6_need_draining = 1;
+ nd6_sched_timeout(NULL, NULL);
+ lck_mtx_unlock(rnh_lock);
+}
+
+/*
+ * We use the ``arg'' variable to decide whether or not the timer we're
+ * running is the fast timer. We do this to reset the nd6_fast_timer_on
+ * variable so that later we don't end up ignoring a ``fast timer''
+ * request if the 5 second timer is running (see nd6_sched_timeout).
+ */
+static void
+nd6_timeout(void *arg)
+{
+ struct nd6svc_arg sarg;
+ uint32_t buf;
+
+ lck_mtx_lock(rnh_lock);
+ bzero(&sarg, sizeof (sarg));
+ if (nd6_need_draining != 0) {
+ nd6_need_draining = 0;
+ sarg.draining = 1;
+ }
+ nd6_service(&sarg);
+ nd6log2((LOG_DEBUG, "%s: found %u, aging_lazy %u, aging %u, "
+ "sticky %u, killed %u\n", __func__, sarg.found, sarg.aging_lazy,
+ sarg.aging, sarg.sticky, sarg.killed));
+ /* re-arm the timer if there's work to do */
+ nd6_timeout_run--;
+ VERIFY(nd6_timeout_run >= 0 && nd6_timeout_run < 2);
+ if (arg == &nd6_fast_timer_on)
+ nd6_fast_timer_on = FALSE;
+ if (sarg.aging_lazy > 0 || sarg.aging > 0 || nd6_sched_timeout_want) {
+ struct timeval atv, ltv, *leeway;
+ int lazy = nd6_prune_lazy;
+
+ if (sarg.aging > 0 || lazy < 1) {
+ atv.tv_usec = 0;
+ atv.tv_sec = nd6_prune;
+ leeway = NULL;
+ } else {
+ VERIFY(lazy >= 1);
+ atv.tv_usec = 0;
+ atv.tv_sec = MAX(nd6_prune, lazy);
+ ltv.tv_usec = 0;
+ read_frandom(&buf, sizeof(buf));
+ ltv.tv_sec = MAX(buf % lazy, 1) * 2;
+ leeway = <v;
+ }
+ nd6_sched_timeout(&atv, leeway);
+ } else if (nd6_debug) {
+ nd6log2((LOG_DEBUG, "%s: not rescheduling timer\n", __func__));
+ }
+ lck_mtx_unlock(rnh_lock);
+}
+
+void
+nd6_sched_timeout(struct timeval *atv, struct timeval *ltv)
+{
+ struct timeval tv;
+
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
+ if (atv == NULL) {
+ tv.tv_usec = 0;
+ tv.tv_sec = MAX(nd6_prune, 1);
+ atv = &tv;
+ ltv = NULL; /* ignore leeway */
+ }
+ /* see comments on top of this file */
+ if (nd6_timeout_run == 0) {
+ if (ltv == NULL) {
+ nd6log2((LOG_DEBUG, "%s: timer scheduled in "
+ "T+%llus.%lluu (demand %d)\n", __func__,
+ (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec,
+ nd6_sched_timeout_want));
+ nd6_fast_timer_on = TRUE;
+ timeout(nd6_timeout, &nd6_fast_timer_on, tvtohz(atv));
+ } else {
+ nd6log2((LOG_DEBUG, "%s: timer scheduled in "
+ "T+%llus.%lluu with %llus.%lluu leeway "
+ "(demand %d)\n", __func__, (uint64_t)atv->tv_sec,
+ (uint64_t)atv->tv_usec, (uint64_t)ltv->tv_sec,
+ (uint64_t)ltv->tv_usec, nd6_sched_timeout_want));
+ nd6_fast_timer_on = FALSE;
+ timeout_with_leeway(nd6_timeout, NULL,
+ tvtohz(atv), tvtohz(ltv));
+ }
+ nd6_timeout_run++;
+ nd6_sched_timeout_want = 0;
+ } else if (nd6_timeout_run == 1 && ltv == NULL &&
+ nd6_fast_timer_on == FALSE) {
+ nd6log2((LOG_DEBUG, "%s: fast timer scheduled in "
+ "T+%llus.%lluu (demand %d)\n", __func__,
+ (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec,
+ nd6_sched_timeout_want));
+ nd6_fast_timer_on = TRUE;
+ nd6_sched_timeout_want = 0;
+ nd6_timeout_run++;
+ timeout(nd6_timeout, &nd6_fast_timer_on, tvtohz(atv));
+ } else {
+ if (ltv == NULL) {
+ nd6log2((LOG_DEBUG, "%s: not scheduling timer: "
+ "timers %d, fast_timer %d, T+%llus.%lluu\n",
+ __func__, nd6_timeout_run, nd6_fast_timer_on,
+ (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec));
+ } else {
+ nd6log2((LOG_DEBUG, "%s: not scheduling timer: "
+ "timers %d, fast_timer %d, T+%llus.%lluu "
+ "with %llus.%lluu leeway\n", __func__,
+ nd6_timeout_run, nd6_fast_timer_on,
+ (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec,
+ (uint64_t)ltv->tv_sec, (uint64_t)ltv->tv_usec));
+ }
+ }
+}
+
+/*
+ * ND6 router advertisement kernel notification
+ */
+void
+nd6_post_msg(u_int32_t code, struct nd_prefix_list *prefix_list,
+ u_int32_t list_length, u_int32_t mtu)
+{
+ struct kev_msg ev_msg;
+ struct kev_nd6_ra_data nd6_ra_msg_data;
+ struct nd_prefix_list *itr = prefix_list;
+
+ bzero(&ev_msg, sizeof (struct kev_msg));
+ ev_msg.vendor_code = KEV_VENDOR_APPLE;
+ ev_msg.kev_class = KEV_NETWORK_CLASS;
+ ev_msg.kev_subclass = KEV_ND6_SUBCLASS;
+ ev_msg.event_code = code;
+
+ bzero(&nd6_ra_msg_data, sizeof (nd6_ra_msg_data));
+
+ if (mtu > 0 && mtu >= IPV6_MMTU) {
+ nd6_ra_msg_data.mtu = mtu;
+ nd6_ra_msg_data.flags |= KEV_ND6_DATA_VALID_MTU;
+ }
+
+ if (list_length > 0 && prefix_list != NULL) {
+ nd6_ra_msg_data.list_length = list_length;
+ nd6_ra_msg_data.flags |= KEV_ND6_DATA_VALID_PREFIX;
+ }
+
+ while (itr != NULL && nd6_ra_msg_data.list_index < list_length) {
+ bcopy(&itr->pr.ndpr_prefix, &nd6_ra_msg_data.prefix.prefix,
+ sizeof (nd6_ra_msg_data.prefix.prefix));
+ nd6_ra_msg_data.prefix.raflags = itr->pr.ndpr_raf;
+ nd6_ra_msg_data.prefix.prefixlen = itr->pr.ndpr_plen;
+ nd6_ra_msg_data.prefix.origin = PR_ORIG_RA;
+ nd6_ra_msg_data.prefix.vltime = itr->pr.ndpr_vltime;
+ nd6_ra_msg_data.prefix.pltime = itr->pr.ndpr_pltime;
+ nd6_ra_msg_data.prefix.expire = ndpr_getexpire(&itr->pr);
+ nd6_ra_msg_data.prefix.flags = itr->pr.ndpr_stateflags;
+ nd6_ra_msg_data.prefix.refcnt = itr->pr.ndpr_addrcnt;
+ nd6_ra_msg_data.prefix.if_index = itr->pr.ndpr_ifp->if_index;
+
+ /* send the message up */
+ ev_msg.dv[0].data_ptr = &nd6_ra_msg_data;
+ ev_msg.dv[0].data_length = sizeof (nd6_ra_msg_data);
+ ev_msg.dv[1].data_length = 0;
+ dlil_post_complete_msg(NULL, &ev_msg);
+
+ /* clean up for the next prefix */
+ bzero(&nd6_ra_msg_data.prefix, sizeof (nd6_ra_msg_data.prefix));
+ itr = itr->next;
+ nd6_ra_msg_data.list_index++;