]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/netinet6/in6_rmx.c
xnu-4570.1.46.tar.gz
[apple/xnu.git] / bsd / netinet6 / in6_rmx.c
index f94f673dd3a58d27127979a60a39e065eb2c091c..7647eaf0cb202bc26ffeb50b0397c3cddd9048a2 100644 (file)
@@ -1,4 +1,30 @@
-/*     $KAME: in6_rmx.c,v 1.6 2000/03/25 07:23:45 sumikawa Exp $       */
+/*
+ * Copyright (c) 2003-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
 
 /*
  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
 
 /*
  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
 #include <kern/queue.h>
 #include <sys/socket.h>
 #include <sys/socketvar.h>
 #include <kern/queue.h>
 #include <sys/socket.h>
 #include <sys/socketvar.h>
+#include <sys/protosw.h>
 #include <sys/mbuf.h>
 #include <sys/syslog.h>
 #include <sys/mbuf.h>
 #include <sys/syslog.h>
+#include <sys/mcache.h>
+#include <kern/locks.h>
 
 #include <net/if.h>
 #include <net/route.h>
 #include <netinet/in.h>
 
 #include <net/if.h>
 #include <net/route.h>
 #include <netinet/in.h>
-#if defined(__APPLE__)
 #include <netinet/ip_var.h>
 #include <netinet/ip_var.h>
-#endif
 #include <netinet/in_var.h>
 
 #include <netinet/ip6.h>
 #include <netinet/in_var.h>
 
 #include <netinet/ip6.h>
 
 #include <netinet/icmp6.h>
 
 
 #include <netinet/icmp6.h>
 
-#if !defined(__APPLE__)
-#include <netinet6/tcp6.h>
-#include <netinet6/tcp6_seq.h>
-#include <netinet6/tcp6_timer.h>
-#include <netinet6/tcp6_var.h>
-#else
 #include <netinet/tcp.h>
 #include <netinet/tcp_seq.h>
 #include <netinet/tcp_timer.h>
 #include <netinet/tcp_var.h>
 #include <netinet/tcp.h>
 #include <netinet/tcp_seq.h>
 #include <netinet/tcp_timer.h>
 #include <netinet/tcp_var.h>
-#endif
 
 
-#if !defined(__APPLE__)
-#define tcp_sendspace tcp6_sendspace
-#define tcp_recvspace tcp6_recvspace
-#define time_second time.tv_sec
-#define tvtohz hzto
-#endif
+extern int     tvtohz(struct timeval *);
+
+static int in6_rtqtimo_run;            /* in6_rtqtimo is scheduled to run */
+static void in6_rtqtimo(void *);
+static void in6_sched_rtqtimo(struct timeval *);
 
 
-extern int     in6_inithead __P((void **head, int off));
+static struct radix_node *in6_addroute(void *, void *, struct radix_node_head *,
+    struct radix_node *);
+static struct radix_node *in6_deleteroute(void *, void *,
+    struct radix_node_head *);
+static struct radix_node *in6_matroute(void *, struct radix_node_head *);
+static struct radix_node *in6_matroute_args(void *, struct radix_node_head *,
+    rn_matchf_t *, void *);
+static void in6_clsroute(struct radix_node *, struct radix_node_head *);
+static int in6_rtqkill(struct radix_node *, void *);
 
 
-#define RTPRF_OURS             RTF_PROTO3      /* set on routes we manage */
+/*
+ * Accessed by in6_addroute(), in6_deleteroute() and in6_rtqkill(), during
+ * which the routing lock (rnh_lock) is held and thus protects the variable.
+ */
+static int in6dynroutes;
 
 /*
  * Do what we need to do when inserting a route.
  */
 static struct radix_node *
 in6_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
 
 /*
  * Do what we need to do when inserting a route.
  */
 static struct radix_node *
 in6_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
-           struct radix_node *treenodes)
+    struct radix_node *treenodes)
 {
        struct rtentry *rt = (struct rtentry *)treenodes;
 {
        struct rtentry *rt = (struct rtentry *)treenodes;
-       struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)rt_key(rt);
+       struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)(void *)rt_key(rt);
        struct radix_node *ret;
        struct radix_node *ret;
+       char dbuf[MAX_IPv6_STR_LEN], gbuf[MAX_IPv6_STR_LEN];
+       uint32_t flags = rt->rt_flags;
+       boolean_t verbose = (rt_verbose > 1);
+
+       LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
+       RT_LOCK_ASSERT_HELD(rt);
+
+       if (verbose)
+               rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf));
+
+       /*
+        * If this is a dynamic route (which is created via Redirect) and
+        * we already have the maximum acceptable number of such route entries,
+        * reject creating a new one.  We could initiate garbage collection to
+        * make available space right now, but the benefit would probably not
+        * be worth the cleaning overhead; we only have to endure a slightly
+        * suboptimal path even without the redirected route.
+        */
+       if ((rt->rt_flags & RTF_DYNAMIC) &&
+           ip6_maxdynroutes >= 0 && in6dynroutes >= ip6_maxdynroutes)
+               return (NULL);
 
        /*
         * For IPv6, all unicast non-host routes are automatically cloning.
 
        /*
         * For IPv6, all unicast non-host routes are automatically cloning.
@@ -135,9 +187,8 @@ in6_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
        if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
                rt->rt_flags |= RTF_MULTICAST;
 
        if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
                rt->rt_flags |= RTF_MULTICAST;
 
-       if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) {
+       if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST)))
                rt->rt_flags |= RTF_PRCLONING;
                rt->rt_flags |= RTF_PRCLONING;
-       }
 
        /*
         * A little bit of help for both IPv6 output and input:
 
        /*
         * A little bit of help for both IPv6 output and input:
@@ -154,59 +205,70 @@ in6_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
         * should elaborate the code.
         */
        if (rt->rt_flags & RTF_HOST) {
         * should elaborate the code.
         */
        if (rt->rt_flags & RTF_HOST) {
-               if (IN6_ARE_ADDR_EQUAL(&satosin6(rt->rt_ifa->ifa_addr)
-                                       ->sin6_addr,
-                                      &sin6->sin6_addr)) {
+               IFA_LOCK_SPIN(rt->rt_ifa);
+               if (IN6_ARE_ADDR_EQUAL(&satosin6(rt->rt_ifa->ifa_addr)->
+                   sin6_addr, &sin6->sin6_addr)) {
                        rt->rt_flags |= RTF_LOCAL;
                }
                        rt->rt_flags |= RTF_LOCAL;
                }
+               IFA_UNLOCK(rt->rt_ifa);
        }
 
        }
 
-       /*
-        * We also specify a send and receive pipe size for every
-        * route added, to help TCP a bit.  TCP doesn't actually
-        * want a true pipe size, which would be prohibitive in memory
-        * costs and is hard to compute anyway; it simply uses these
-        * values to size its buffers.  So, we fill them in with the
-        * same values that TCP would have used anyway, and allow the
-        * installing program or the link layer to override these values
-        * as it sees fit.  This will hopefully allow TCP more
-        * opportunities to save its ssthresh value.
-        */
-       if (!rt->rt_rmx.rmx_sendpipe && !(rt->rt_rmx.rmx_locks & RTV_SPIPE))
-               rt->rt_rmx.rmx_sendpipe = tcp_sendspace;
-
-       if (!rt->rt_rmx.rmx_recvpipe && !(rt->rt_rmx.rmx_locks & RTV_RPIPE))
-               rt->rt_rmx.rmx_recvpipe = tcp_recvspace;
-
-       if (!rt->rt_rmx.rmx_mtu && !(rt->rt_rmx.rmx_locks & RTV_MTU)
-           && rt->rt_ifp)
+       if (!rt->rt_rmx.rmx_mtu && !(rt->rt_rmx.rmx_locks & RTV_MTU) &&
+           rt->rt_ifp)
                rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
 
        ret = rn_addroute(v_arg, n_arg, head, treenodes);
                rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
 
        ret = rn_addroute(v_arg, n_arg, head, treenodes);
-       if (ret == NULL && rt->rt_flags & RTF_HOST) {
+       if (ret == NULL && (rt->rt_flags & RTF_HOST)) {
                struct rtentry *rt2;
                /*
                 * We are trying to add a host route, but can't.
                 * Find out if it is because of an
                struct rtentry *rt2;
                /*
                 * We are trying to add a host route, but can't.
                 * Find out if it is because of an
-                * ARP entry and delete it if so.
+                * ND6 entry and delete it if so.
                 */
                 */
-               rt2 = rtalloc1((struct sockaddr *)sin6, 0,
-                               RTF_CLONING | RTF_PRCLONING);
-               if (rt2) {
-                       if (rt2->rt_flags & RTF_LLINFO &&
-                               rt2->rt_flags & RTF_HOST &&
-                               rt2->rt_gateway &&
-                               rt2->rt_gateway->sa_family == AF_LINK) {
-                               rtrequest(RTM_DELETE,
-                                         (struct sockaddr *)rt_key(rt2),
-                                         rt2->rt_gateway,
-                                         rt_mask(rt2), rt2->rt_flags, 0);
+               rt2 = rtalloc1_scoped_locked((struct sockaddr *)sin6, 0,
+                   RTF_CLONING | RTF_PRCLONING, sin6_get_ifscope(rt_key(rt)));
+               if (rt2 != NULL) {
+                       char dbufc[MAX_IPv6_STR_LEN];
+
+                       RT_LOCK(rt2);
+                       if (verbose)
+                               rt_str(rt2, dbufc, sizeof (dbufc), NULL, 0);
+
+                       if ((rt2->rt_flags & RTF_LLINFO) &&
+                           (rt2->rt_flags & RTF_HOST) &&
+                           rt2->rt_gateway != NULL &&
+                           rt2->rt_gateway->sa_family == AF_LINK) {
+                               if (verbose) {
+                                       log(LOG_DEBUG, "%s: unable to insert "
+                                           "route to %s:%s, flags=%b, due to "
+                                           "existing ND6 route %s->%s "
+                                           "flags=%b, attempting to delete\n",
+                                           __func__, dbuf,
+                                           (rt->rt_ifp != NULL) ?
+                                           rt->rt_ifp->if_xname : "",
+                                           rt->rt_flags, RTF_BITS,
+                                           dbufc, (rt2->rt_ifp != NULL) ?
+                                           rt2->rt_ifp->if_xname : "",
+                                           rt2->rt_flags, RTF_BITS);
+                               }
+                               /*
+                                * Safe to drop rt_lock and use rt_key,
+                                * rt_gateway, since holding rnh_lock here
+                                * prevents another thread from calling
+                                * rt_setgate() on this route.
+                                */
+                               RT_UNLOCK(rt2);
+                               (void) rtrequest_locked(RTM_DELETE, rt_key(rt2),
+                                   rt2->rt_gateway, rt_mask(rt2),
+                                   rt2->rt_flags, NULL);
                                ret = rn_addroute(v_arg, n_arg, head,
                                ret = rn_addroute(v_arg, n_arg, head,
-                                       treenodes);
+                                   treenodes);
+                       } else {
+                               RT_UNLOCK(rt2);
                        }
                        }
-                       RTFREE(rt2);
+                       rtfree_locked(rt2);
                }
                }
-       } else if (ret == NULL && rt->rt_flags & RTF_CLONING) {
+       } else if (ret == NULL && (rt->rt_flags & RTF_CLONING)) {
                struct rtentry *rt2;
                /*
                 * We are trying to add a net route, but can't.
                struct rtentry *rt2;
                /*
                 * We are trying to add a net route, but can't.
@@ -220,51 +282,158 @@ in6_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
                 *      net route entry, 3ffe:0501:: -> if0.
                 *      This case should not raise an error.
                 */
                 *      net route entry, 3ffe:0501:: -> if0.
                 *      This case should not raise an error.
                 */
-               rt2 = rtalloc1((struct sockaddr *)sin6, 0,
-                               RTF_CLONING | RTF_PRCLONING);
-               if (rt2) {
-                       if ((rt2->rt_flags & (RTF_CLONING|RTF_HOST|RTF_GATEWAY))
-                                       == RTF_CLONING
-                        && rt2->rt_gateway
-                        && rt2->rt_gateway->sa_family == AF_LINK
-                        && rt2->rt_ifp == rt->rt_ifp) {
+               rt2 = rtalloc1_scoped_locked((struct sockaddr *)sin6, 0,
+                   RTF_CLONING | RTF_PRCLONING, sin6_get_ifscope(rt_key(rt)));
+               if (rt2 != NULL) {
+                       RT_LOCK(rt2);
+                       if ((rt2->rt_flags & (RTF_CLONING|RTF_HOST|
+                           RTF_GATEWAY)) == RTF_CLONING &&
+                           rt2->rt_gateway &&
+                           rt2->rt_gateway->sa_family == AF_LINK &&
+                           rt2->rt_ifp == rt->rt_ifp) {
                                ret = rt2->rt_nodes;
                        }
                                ret = rt2->rt_nodes;
                        }
-                       RTFREE(rt2);
+                       RT_UNLOCK(rt2);
+                       rtfree_locked(rt2);
+               }
+       }
+
+       if (ret != NULL && (rt->rt_flags & RTF_DYNAMIC))
+               in6dynroutes++;
+
+       if (!verbose)
+               goto done;
+
+       if (ret != NULL) {
+               if (flags != rt->rt_flags) {
+                       log(LOG_DEBUG, "%s: route to %s->%s->%s inserted, "
+                           "oflags=%b, flags=%b\n", __func__,
+                           dbuf, gbuf, (rt->rt_ifp != NULL) ?
+                           rt->rt_ifp->if_xname : "", flags, RTF_BITS,
+                           rt->rt_flags, RTF_BITS);
+               } else {
+                       log(LOG_DEBUG, "%s: route to %s->%s->%s inserted, "
+                           "flags=%b\n", __func__, dbuf, gbuf,
+                           (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "",
+                           rt->rt_flags, RTF_BITS);
                }
                }
+       } else {
+               log(LOG_DEBUG, "%s: unable to insert route to %s->%s->%s, "
+                   "flags=%b, already exists\n", __func__, dbuf, gbuf,
+                   (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "",
+                   rt->rt_flags, RTF_BITS);
        }
        }
-       return ret;
+done:
+       return (ret);
+}
+
+static struct radix_node *
+in6_deleteroute(void *v_arg, void *netmask_arg, struct radix_node_head *head)
+{
+       struct radix_node *rn;
+
+       LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
+
+       rn = rn_delete(v_arg, netmask_arg, head);
+       if (rn != NULL) {
+               struct rtentry *rt = (struct rtentry *)rn;
+
+               RT_LOCK(rt);
+               if (rt->rt_flags & RTF_DYNAMIC)
+                       in6dynroutes--;
+               if (rt_verbose > 1) {
+                       char dbuf[MAX_IPv6_STR_LEN], gbuf[MAX_IPv6_STR_LEN];
+
+                       rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf));
+                       log(LOG_DEBUG, "%s: route to %s->%s->%s deleted, "
+                           "flags=%b\n", __func__, dbuf, gbuf,
+                           (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "",
+                           rt->rt_flags, RTF_BITS);
+               }
+               RT_UNLOCK(rt);
+       }
+       return (rn);
 }
 
 /*
 }
 
 /*
- * This code is the inverse of in6_clsroute: on first reference, if we
- * were managing the route, stop doing so and set the expiration timer
- * back off again.
+ * Validate (unexpire) an expiring AF_INET6 route.
  */
  */
-static struct radix_node *
-in6_matroute(void *v_arg, struct radix_node_head *head)
+struct radix_node *
+in6_validate(struct radix_node *rn)
 {
 {
-       struct radix_node *rn = rn_match(v_arg, head);
        struct rtentry *rt = (struct rtentry *)rn;
 
        struct rtentry *rt = (struct rtentry *)rn;
 
-       if (rt && rt->rt_refcnt == 0) { /* this is first reference */
+       RT_LOCK_ASSERT_HELD(rt);
+
+       /* This is first reference? */
+       if (rt->rt_refcnt == 0) {
+               if (rt_verbose > 2) {
+                       char dbuf[MAX_IPv6_STR_LEN], gbuf[MAX_IPv6_STR_LEN];
+
+                       rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf));
+                       log(LOG_DEBUG, "%s: route to %s->%s->%s validated, "
+                           "flags=%b\n", __func__, dbuf, gbuf,
+                           (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "",
+                           rt->rt_flags, RTF_BITS);
+               }
+
+               /*
+                * It's one of ours; unexpire it.  If the timer is already
+                * scheduled, let it run later as it won't re-arm itself
+                * if there's nothing to do.
+                */
                if (rt->rt_flags & RTPRF_OURS) {
                        rt->rt_flags &= ~RTPRF_OURS;
                if (rt->rt_flags & RTPRF_OURS) {
                        rt->rt_flags &= ~RTPRF_OURS;
-                       rt->rt_rmx.rmx_expire = 0;
+                       rt_setexpire(rt, 0);
                }
        }
                }
        }
-       return rn;
+       return (rn);
+}
+
+/*
+ * Similar to in6_matroute_args except without the leaf-matching parameters.
+ */
+static struct radix_node *
+in6_matroute(void *v_arg, struct radix_node_head *head)
+{
+       return (in6_matroute_args(v_arg, head, NULL, NULL));
+}
+
+/*
+ * This code is the inverse of in6_clsroute: on first reference, if we
+ * were managing the route, stop doing so and set the expiration timer
+ * back off again.
+ */
+static struct radix_node *
+in6_matroute_args(void *v_arg, struct radix_node_head *head,
+    rn_matchf_t *f, void *w)
+{
+       struct radix_node *rn = rn_match_args(v_arg, head, f, w);
+
+       if (rn != NULL) {
+               RT_LOCK_SPIN((struct rtentry *)rn);
+               in6_validate(rn);
+               RT_UNLOCK((struct rtentry *)rn);
+       }
+       return (rn);
 }
 
 }
 
-static int rtq_reallyold = 60*60;
-       /* one hour is ``really old'' */
-                                  
-static int rtq_minreallyold = 10;
-       /* never automatically crank down to less */
-                                  
-static int rtq_toomany = 128;
-       /* 128 cached routes is ``too many'' */
-                                  
+SYSCTL_DECL(_net_inet6_ip6);
+
+/* one hour is ``really old'' */
+static uint32_t rtq_reallyold = 60*60;
+SYSCTL_UINT(_net_inet6_ip6, IPV6CTL_RTEXPIRE, rtexpire,
+       CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_reallyold, 0, "");
+
+/* never automatically crank down to less */
+static uint32_t rtq_minreallyold = 10;
+SYSCTL_UINT(_net_inet6_ip6, IPV6CTL_RTMINEXPIRE, rtminexpire,
+       CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_minreallyold, 0, "");
+
+/* 128 cached routes is ``too many'' */
+static uint32_t rtq_toomany = 128;
+SYSCTL_UINT(_net_inet6_ip6, IPV6CTL_RTMAXCACHE, rtmaxcache,
+       CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_toomany, 0, "");
 
 /*
  * On last reference drop, mark the route as belong to us so that it can be
 
 /*
  * On last reference drop, mark the route as belong to us so that it can be
@@ -273,111 +442,231 @@ static int rtq_toomany = 128;
 static void
 in6_clsroute(struct radix_node *rn, struct radix_node_head *head)
 {
 static void
 in6_clsroute(struct radix_node *rn, struct radix_node_head *head)
 {
+#pragma unused(head)
+       char dbuf[MAX_IPv6_STR_LEN], gbuf[MAX_IPv6_STR_LEN];
        struct rtentry *rt = (struct rtentry *)rn;
        struct rtentry *rt = (struct rtentry *)rn;
+       boolean_t verbose = (rt_verbose > 1);
+
+       LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
+       RT_LOCK_ASSERT_HELD(rt);
 
        if (!(rt->rt_flags & RTF_UP))
 
        if (!(rt->rt_flags & RTF_UP))
-               return;         /* prophylactic measures */
+               return;         /* prophylactic measures */
 
        if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST)
                return;
 
 
        if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST)
                return;
 
-       if ((rt->rt_flags & (RTF_WASCLONED | RTPRF_OURS))
-          != RTF_WASCLONED)
+       if (rt->rt_flags & RTPRF_OURS)
                return;
 
                return;
 
+       if (!(rt->rt_flags & (RTF_WASCLONED | RTF_DYNAMIC)))
+               return;
+
+       if (verbose)
+               rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf));
+
        /*
        /*
-        * As requested by David Greenman:
-        * If rtq_reallyold is 0, just delete the route without
-        * waiting for a timeout cycle to kill it.
+        * Delete the route immediately if RTF_DELCLONE is set or
+        * if route caching is disabled (rtq_reallyold set to 0).
+        * Otherwise, let it expire and be deleted by in6_rtqkill().
         */
         */
-       if (rtq_reallyold != 0) {
-               rt->rt_flags |= RTPRF_OURS;
-               rt->rt_rmx.rmx_expire = time_second + rtq_reallyold;
+       if ((rt->rt_flags & RTF_DELCLONE) || rtq_reallyold == 0) {
+               int err;
+
+               if (verbose) {
+                       log(LOG_DEBUG, "%s: deleting route to %s->%s->%s, "
+                           "flags=%b\n", __func__, dbuf, gbuf,
+                           (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "",
+                           rt->rt_flags, RTF_BITS);
+               }
+               /*
+                * Delete the route from the radix tree but since we are
+                * called when the route's reference count is 0, don't
+                * deallocate it until we return from this routine by
+                * telling rtrequest that we're interested in it.
+                * Safe to drop rt_lock and use rt_key, rt_gateway,
+                * since holding rnh_lock here prevents another thread
+                * from calling rt_setgate() on this route.
+                */
+               RT_UNLOCK(rt);
+               err = rtrequest_locked(RTM_DELETE, rt_key(rt),
+                   rt->rt_gateway, rt_mask(rt), rt->rt_flags, &rt);
+               if (err == 0) {
+                       /* Now let the caller free it */
+                       RT_LOCK(rt);
+                       RT_REMREF_LOCKED(rt);
+               } else {
+                       RT_LOCK(rt);
+                       if (!verbose)
+                               rt_str(rt, dbuf, sizeof (dbuf),
+                                   gbuf, sizeof (gbuf));
+                       log(LOG_ERR, "%s: error deleting route to "
+                           "%s->%s->%s, flags=%b, err=%d\n", __func__,
+                           dbuf, gbuf, (rt->rt_ifp != NULL) ?
+                           rt->rt_ifp->if_xname : "", rt->rt_flags,
+                           RTF_BITS, err);
+               }
        } else {
        } else {
-               rtrequest(RTM_DELETE,
-                         (struct sockaddr *)rt_key(rt),
-                         rt->rt_gateway, rt_mask(rt),
-                         rt->rt_flags, 0);
+               uint64_t timenow;
+
+               timenow = net_uptime();
+               rt->rt_flags |= RTPRF_OURS;
+               rt_setexpire(rt, timenow + rtq_reallyold);
+
+               if (verbose) {
+                       log(LOG_DEBUG, "%s: route to %s->%s->%s invalidated, "
+                                       "flags=%b, expire=T+%u\n", __func__, dbuf, gbuf,
+                                       (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "",
+                                       rt->rt_flags, RTF_BITS, rt->rt_expire - timenow);
+               }
+
+               /* We have at least one entry; arm the timer if not already */
+               in6_sched_rtqtimo(NULL);
        }
 }
 
 struct rtqk_arg {
        struct radix_node_head *rnh;
        }
 }
 
 struct rtqk_arg {
        struct radix_node_head *rnh;
-       int mode;
        int updating;
        int draining;
        int updating;
        int draining;
-       int killed;
-       int found;
-       time_t nextstop;
+       uint32_t killed;
+       uint32_t found;
+       uint64_t nextstop;
 };
 
 /*
  * Get rid of old routes.  When draining, this deletes everything, even when
 };
 
 /*
  * Get rid of old routes.  When draining, this deletes everything, even when
- * the timeout is not expired yet.  When updating, this makes sure that
- * nothing has a timeout longer than the current value of rtq_reallyold.
+ * the timeout is not expired yet.  This also applies if the route is dynamic
+ * and there are sufficiently large number of such routes (more than a half of
+ * maximum).  When updating, this makes sure that nothing has a timeout longer
+ * than the current value of rtq_reallyold.
  */
 static int
 in6_rtqkill(struct radix_node *rn, void *rock)
 {
        struct rtqk_arg *ap = rock;
        struct rtentry *rt = (struct rtentry *)rn;
  */
 static int
 in6_rtqkill(struct radix_node *rn, void *rock)
 {
        struct rtqk_arg *ap = rock;
        struct rtentry *rt = (struct rtentry *)rn;
+       boolean_t verbose = (rt_verbose > 1);
+       uint64_t timenow;
        int err;
 
        int err;
 
+       timenow = net_uptime();
+       LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
+
+       RT_LOCK(rt);
        if (rt->rt_flags & RTPRF_OURS) {
        if (rt->rt_flags & RTPRF_OURS) {
-               ap->found++;
+               char dbuf[MAX_IPv6_STR_LEN], gbuf[MAX_IPv6_STR_LEN];
 
 
-               if (ap->draining || rt->rt_rmx.rmx_expire <= time_second) {
-                       if (rt->rt_refcnt > 0)
-                               panic("rtqkill route really not free");
+               if (verbose)
+                       rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf));
 
 
-                       err = rtrequest(RTM_DELETE,
-                                       (struct sockaddr *)rt_key(rt),
-                                       rt->rt_gateway, rt_mask(rt),
-                                       rt->rt_flags, 0);
-                       if (err) {
-                               log(LOG_WARNING, "in6_rtqkill: error %d", err);
+               ap->found++;
+               VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0);
+               VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0);
+               if (ap->draining || rt->rt_expire <= timenow ||
+                   ((rt->rt_flags & RTF_DYNAMIC) && ip6_maxdynroutes >= 0 &&
+                   in6dynroutes > ip6_maxdynroutes / 2)) {
+                       if (rt->rt_refcnt > 0) {
+                               panic("%s: route %p marked with RTPRF_OURS "
+                                   "with non-zero refcnt (%u)", __func__,
+                                   rt, rt->rt_refcnt);
+                               /* NOTREACHED */
+                       }
+
+                       if (verbose) {
+                               log(LOG_DEBUG, "%s: deleting route to "
+                                   "%s->%s->%s, flags=%b, draining=%d\n",
+                                   __func__, dbuf, gbuf, (rt->rt_ifp != NULL) ?
+                                   rt->rt_ifp->if_xname : "", rt->rt_flags,
+                                   RTF_BITS, ap->draining);
+                       }
+                       RT_ADDREF_LOCKED(rt);   /* for us to free below */
+                       /*
+                        * Delete this route since we're done with it;
+                        * the route may be freed afterwards, so we
+                        * can no longer refer to 'rt' upon returning
+                        * from rtrequest().  Safe to drop rt_lock and
+                        * use rt_key, rt_gateway, since holding rnh_lock
+                        * here prevents another thread from calling
+                        * rt_setgate() on this route.
+                        */
+                       RT_UNLOCK(rt);
+                       err = rtrequest_locked(RTM_DELETE, rt_key(rt),
+                           rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL);
+                       if (err != 0) {
+                               RT_LOCK(rt);
+                               if (!verbose)
+                                       rt_str(rt, dbuf, sizeof (dbuf),
+                                           gbuf, sizeof (gbuf));
+                               log(LOG_ERR, "%s: error deleting route to "
+                                   "%s->%s->%s, flags=%b, err=%d\n", __func__,
+                                   dbuf, gbuf, (rt->rt_ifp != NULL) ?
+                                   rt->rt_ifp->if_xname : "", rt->rt_flags,
+                                   RTF_BITS, err);
+                               RT_UNLOCK(rt);
                        } else {
                                ap->killed++;
                        }
                        } else {
                                ap->killed++;
                        }
+                       rtfree_locked(rt);
                } else {
                } else {
-                       if (ap->updating
-                          && (rt->rt_rmx.rmx_expire - time_second
-                              > rtq_reallyold)) {
-                               rt->rt_rmx.rmx_expire = time_second
-                                       + rtq_reallyold;
+                       uint64_t expire = (rt->rt_expire - timenow);
+
+                       if (ap->updating && expire > rtq_reallyold) {
+                               rt_setexpire(rt, timenow + rtq_reallyold);
+                               if (verbose) {
+                                       log(LOG_DEBUG, "%s: route to "
+                                           "%s->%s->%s, flags=%b, adjusted "
+                                           "expire=T+%u (was T+%u)\n",
+                                           __func__, dbuf, gbuf,
+                                           (rt->rt_ifp != NULL) ?
+                                           rt->rt_ifp->if_xname : "",
+                                           rt->rt_flags, RTF_BITS,
+                                           (rt->rt_expire - timenow), expire);
+                               }
                        }
                        }
-                       ap->nextstop = lmin(ap->nextstop,
-                                           rt->rt_rmx.rmx_expire);
+                       ap->nextstop = lmin(ap->nextstop, rt->rt_expire);
+                       RT_UNLOCK(rt);
                }
                }
+       } else {
+               RT_UNLOCK(rt);
        }
 
        }
 
-       return 0;
+       return (0);
 }
 
 }
 
-#define RTQ_TIMEOUT    60*10   /* run no less than once every ten minutes */
+#define        RTQ_TIMEOUT     60*10   /* run no less than once every ten minutes */
 static int rtq_timeout = RTQ_TIMEOUT;
 
 static void
 static int rtq_timeout = RTQ_TIMEOUT;
 
 static void
-in6_rtqtimo(void *rock)
+in6_rtqtimo(void *targ)
 {
 {
-       struct radix_node_head *rnh = rock;
+#pragma unused(targ)
+       struct radix_node_head *rnh;
        struct rtqk_arg arg;
        struct timeval atv;
        struct rtqk_arg arg;
        struct timeval atv;
-       static time_t last_adjusted_timeout = 0;
-       int s;
-#ifdef __APPLE__
-       boolean_t   funnel_state;
-       funnel_state = thread_set_funneled(TRUE);
-#endif
-
-       arg.found = arg.killed = 0;
+       static uint64_t last_adjusted_timeout = 0;
+       boolean_t verbose = (rt_verbose > 1);
+       uint64_t timenow;
+       uint32_t ours;
+
+       lck_mtx_lock(rnh_lock);
+       rnh = rt_tables[AF_INET6];
+       VERIFY(rnh != NULL);
+
+       /* Get the timestamp after we acquire the lock for better accuracy */
+       timenow = net_uptime();
+       if (verbose) {
+               log(LOG_DEBUG, "%s: initial nextstop is T+%u seconds\n",
+                   __func__, rtq_timeout);
+       }
+       bzero(&arg, sizeof (arg));
        arg.rnh = rnh;
        arg.rnh = rnh;
-       arg.nextstop = time_second + rtq_timeout;
-       arg.draining = arg.updating = 0;
-       s = splnet();
+       arg.nextstop = timenow + rtq_timeout;
        rnh->rnh_walktree(rnh, in6_rtqkill, &arg);
        rnh->rnh_walktree(rnh, in6_rtqkill, &arg);
-       splx(s);
-
+       if (verbose) {
+               log(LOG_DEBUG, "%s: found %u, killed %u\n", __func__,
+                   arg.found, arg.killed);
+       }
        /*
         * Attempt to be somewhat dynamic about this:
         * If there are ``too many'' routes sitting around taking up space,
        /*
         * Attempt to be somewhat dynamic about this:
         * If there are ``too many'' routes sitting around taking up space,
@@ -386,113 +675,76 @@ in6_rtqtimo(void *rock)
         * than once in rtq_timeout seconds, to keep from cranking down too
         * hard.
         */
         * than once in rtq_timeout seconds, to keep from cranking down too
         * hard.
         */
-       if ((arg.found - arg.killed > rtq_toomany)
-          && (time_second - last_adjusted_timeout >= rtq_timeout)
-          && rtq_reallyold > rtq_minreallyold) {
-               rtq_reallyold = 2*rtq_reallyold / 3;
-               if (rtq_reallyold < rtq_minreallyold) {
+       ours = (arg.found - arg.killed);
+       if (ours > rtq_toomany &&
+           ((timenow - last_adjusted_timeout) >= (uint64_t)rtq_timeout) &&
+           rtq_reallyold > rtq_minreallyold) {
+               rtq_reallyold = 2 * rtq_reallyold / 3;
+               if (rtq_reallyold < rtq_minreallyold)
                        rtq_reallyold = rtq_minreallyold;
                        rtq_reallyold = rtq_minreallyold;
-               }
 
 
-               last_adjusted_timeout = time_second;
-#if DIAGNOSTIC
-               log(LOG_DEBUG, "in6_rtqtimo: adjusted rtq_reallyold to %d",
-                   rtq_reallyold);
-#endif
+               last_adjusted_timeout = timenow;
+               if (verbose) {
+                       log(LOG_DEBUG, "%s: adjusted rtq_reallyold to %d "
+                           "seconds\n", __func__, rtq_reallyold);
+               }
                arg.found = arg.killed = 0;
                arg.updating = 1;
                arg.found = arg.killed = 0;
                arg.updating = 1;
-               s = splnet();
                rnh->rnh_walktree(rnh, in6_rtqkill, &arg);
                rnh->rnh_walktree(rnh, in6_rtqkill, &arg);
-               splx(s);
        }
 
        atv.tv_usec = 0;
        }
 
        atv.tv_usec = 0;
-       atv.tv_sec = arg.nextstop;
-       timeout(in6_rtqtimo, rock, tvtohz(&atv));
-#ifdef __APPLE__
-       (void) thread_set_funneled(funnel_state);
-#endif
+       atv.tv_sec = arg.nextstop - timenow;
+       /* re-arm the timer only if there's work to do */
+       in6_rtqtimo_run = 0;
+       if (ours > 0)
+               in6_sched_rtqtimo(&atv);
+       else if (verbose)
+               log(LOG_DEBUG, "%s: not rescheduling timer\n", __func__);
+       lck_mtx_unlock(rnh_lock);
 }
 
 }
 
-/*
- * Age old PMTUs.
- */
-struct mtuex_arg {
-       struct radix_node_head *rnh;
-       time_t nextstop;
-};
-
-static int
-in6_mtuexpire(struct radix_node *rn, void *rock)
-{
-       struct rtentry *rt = (struct rtentry *)rn;
-       struct mtuex_arg *ap = rock;
-
-       /* sanity */
-       if (!rt)
-               panic("rt == NULL in in6_mtuexpire");
-
-       if (rt->rt_rmx.rmx_expire && !(rt->rt_flags & RTF_PROBEMTU)) {
-               if (rt->rt_rmx.rmx_expire <= time_second) {
-                       rt->rt_flags |= RTF_PROBEMTU;
-               } else {
-                       ap->nextstop = lmin(ap->nextstop,
-                                       rt->rt_rmx.rmx_expire);
-               }
-       }
-
-       return 0;
-}
-
-#define        MTUTIMO_DEFAULT (60*1)
-
 static void
 static void
-in6_mtutimo(void *rock)
+in6_sched_rtqtimo(struct timeval *atv)
 {
 {
-       struct radix_node_head *rnh = rock;
-       struct mtuex_arg arg;
-       struct timeval atv;
-       int s;
-#ifdef __APPLE__
-       boolean_t   funnel_state;
-       funnel_state = thread_set_funneled(TRUE);
-#endif
+       LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
 
 
-       arg.rnh = rnh;
-       arg.nextstop = time_second + MTUTIMO_DEFAULT;
-       s = splnet();
-       rnh->rnh_walktree(rnh, in6_mtuexpire, &arg);
-       splx(s);
+       if (!in6_rtqtimo_run) {
+               struct timeval tv;
 
 
-       atv.tv_usec = 0;
-       atv.tv_sec = arg.nextstop;
-       if (atv.tv_sec < time_second) {
-               printf("invalid mtu expiration time on routing table\n");
-               arg.nextstop = time_second + 30;        /*last resort*/
+               if (atv == NULL) {
+                       tv.tv_usec = 0;
+                       tv.tv_sec = MAX(rtq_timeout / 10, 1);
+                       atv = &tv;
+               }
+               if (rt_verbose > 1) {
+                       log(LOG_DEBUG, "%s: timer scheduled in "
+                           "T+%llus.%lluu\n", __func__,
+                           (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec);
+               }
+               in6_rtqtimo_run = 1;
+               timeout(in6_rtqtimo, NULL, tvtohz(atv));
        }
        }
-       timeout(in6_mtutimo, rock, tvtohz(&atv));
-#ifdef __APPLE__
-       (void) thread_set_funneled(funnel_state);
-#endif
 }
 
 }
 
-#if 0
 void
 void
-in6_rtqdrain()
+in6_rtqdrain(void)
 {
 {
-       struct radix_node_head *rnh = rt_tables[AF_INET6];
+       struct radix_node_head *rnh;
        struct rtqk_arg arg;
        struct rtqk_arg arg;
-       int s;
-       arg.found = arg.killed = 0;
+
+       if (rt_verbose > 1)
+               log(LOG_DEBUG, "%s: draining routes\n", __func__);
+
+       lck_mtx_lock(rnh_lock);
+       rnh = rt_tables[AF_INET6];
+       VERIFY(rnh != NULL);
+       bzero(&arg, sizeof (arg));
        arg.rnh = rnh;
        arg.rnh = rnh;
-       arg.nextstop = 0;
        arg.draining = 1;
        arg.draining = 1;
-       arg.updating = 0;
-       s = splnet();
        rnh->rnh_walktree(rnh, in6_rtqkill, &arg);
        rnh->rnh_walktree(rnh, in6_rtqkill, &arg);
-       splx(s);
+       lck_mtx_unlock(rnh_lock);
 }
 }
-#endif
 
 /*
  * Initialize our routing tree.
 
 /*
  * Initialize our routing tree.
@@ -502,17 +754,26 @@ in6_inithead(void **head, int off)
 {
        struct radix_node_head *rnh;
 
 {
        struct radix_node_head *rnh;
 
+       /* If called from route_init(), make sure it is exactly once */
+       VERIFY(head != (void **)&rt_tables[AF_INET6] || *head == NULL);
+
        if (!rn_inithead(head, off))
        if (!rn_inithead(head, off))
-               return 0;
+               return (0);
 
 
-       if (head != (void **)&rt_tables[AF_INET6]) /* BOGUS! */
-               return 1;       /* only do this for the real routing table */
+       /*
+        * We can get here from nfs_subs.c as well, in which case this
+        * won't be for the real routing table and thus we're done;
+        * this also takes care of the case when we're called more than
+        * once from anywhere but route_init().
+        */
+       if (head != (void **)&rt_tables[AF_INET6])
+               return (1);     /* only do this for the real routing table */
 
        rnh = *head;
        rnh->rnh_addaddr = in6_addroute;
 
        rnh = *head;
        rnh->rnh_addaddr = in6_addroute;
+       rnh->rnh_deladdr = in6_deleteroute;
        rnh->rnh_matchaddr = in6_matroute;
        rnh->rnh_matchaddr = in6_matroute;
+       rnh->rnh_matchaddr_args = in6_matroute_args;
        rnh->rnh_close = in6_clsroute;
        rnh->rnh_close = in6_clsroute;
-       in6_rtqtimo(rnh);       /* kick off timeout first time */
-       in6_mtutimo(rnh);       /* kick off timeout first time */
-       return 1;
+       return (1);
 }
 }