]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/netinet/in_rmx.c
xnu-1699.22.81.tar.gz
[apple/xnu.git] / bsd / netinet / in_rmx.c
index 371a6db643559bbeb5405d106f9e30e76fa5f889..2d0c2735d10d7fc46b8a703161e6c09c0e43e72c 100644 (file)
@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2000,2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
 #include <sys/sysctl.h>
 #include <sys/socket.h>
 #include <sys/mbuf.h>
 #include <sys/sysctl.h>
 #include <sys/socket.h>
 #include <sys/mbuf.h>
+#include <sys/protosw.h>
 #include <sys/syslog.h>
 #include <sys/syslog.h>
+#include <sys/mcache.h>
 #include <kern/lock.h>
 
 #include <net/if.h>
 #include <net/route.h>
 #include <netinet/in.h>
 #include <netinet/in_var.h>
 #include <kern/lock.h>
 
 #include <net/if.h>
 #include <net/route.h>
 #include <netinet/in.h>
 #include <netinet/in_var.h>
+#include <netinet/in_arp.h>
 
 extern int tvtohz(struct timeval *);
 extern int     in_inithead(void **head, int off);
 
 extern int tvtohz(struct timeval *);
 extern int     in_inithead(void **head, int off);
-extern u_long route_generation;
 
 #ifdef __APPLE__
 static void in_rtqtimo(void *rock);
 #endif
 
 
 #ifdef __APPLE__
 static void in_rtqtimo(void *rock);
 #endif
 
+static struct radix_node *in_matroute_args(void *, struct radix_node_head *,
+    rn_matchf_t *f, void *);
+
 #define RTPRF_OURS             RTF_PROTO3      /* set on routes we manage */
 
 /*
 #define RTPRF_OURS             RTF_PROTO3      /* set on routes we manage */
 
 /*
@@ -104,13 +109,16 @@ in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
        struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt);
        struct radix_node *ret;
 
        struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt);
        struct radix_node *ret;
 
+       lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+       RT_LOCK_ASSERT_HELD(rt);
+
        /*
         * For IP, all unicast non-host routes are automatically cloning.
         */
        /*
         * For IP, all unicast non-host routes are automatically cloning.
         */
-       if(IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
+       if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
                rt->rt_flags |= RTF_MULTICAST;
 
                rt->rt_flags |= RTF_MULTICAST;
 
-       if(!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) {
+       if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) {
                rt->rt_flags |= RTF_PRCLONING;
        }
 
                rt->rt_flags |= RTF_PRCLONING;
        }
 
@@ -134,11 +142,15 @@ in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
                if (in_broadcast(sin->sin_addr, rt->rt_ifp)) {
                        rt->rt_flags |= RTF_BROADCAST;
                } else {
                if (in_broadcast(sin->sin_addr, rt->rt_ifp)) {
                        rt->rt_flags |= RTF_BROADCAST;
                } else {
+                       /* Become a regular mutex */
+                       RT_CONVERT_LOCK(rt);
+                       IFA_LOCK_SPIN(rt->rt_ifa);
 #define satosin(sa) ((struct sockaddr_in *)sa)
                        if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr
                            == sin->sin_addr.s_addr)
                                rt->rt_flags |= RTF_LOCAL;
 #undef satosin
 #define satosin(sa) ((struct sockaddr_in *)sa)
                        if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr
                            == sin->sin_addr.s_addr)
                                rt->rt_flags |= RTF_LOCAL;
 #undef satosin
+                       IFA_UNLOCK(rt->rt_ifa);
                }
        }
 
                }
        }
 
@@ -154,19 +166,28 @@ in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
                 * Find out if it is because of an
                 * ARP entry and delete it if so.
                 */
                 * Find out if it is because of an
                 * ARP entry and delete it if so.
                 */
-               rt2 = rtalloc1_locked((struct sockaddr *)sin, 0,
-                               RTF_CLONING | RTF_PRCLONING);
+               rt2 = rtalloc1_scoped_locked(rt_key(rt), 0,
+                   RTF_CLONING | RTF_PRCLONING, sin_get_ifscope(rt_key(rt)));
                if (rt2) {
                if (rt2) {
-                       if (rt2->rt_flags & RTF_LLINFO &&
-                               rt2->rt_flags & RTF_HOST &&
-                               rt2->rt_gateway &&
-                               rt2->rt_gateway->sa_family == AF_LINK) {
-                               rtrequest_locked(RTM_DELETE,
-                                         (struct sockaddr *)rt_key(rt2),
-                                         rt2->rt_gateway,
-                                         rt_mask(rt2), rt2->rt_flags, 0);
+                       RT_LOCK(rt2);
+                       if ((rt2->rt_flags & RTF_LLINFO) &&
+                           (rt2->rt_flags & RTF_HOST) &&
+                           rt2->rt_gateway != NULL &&
+                           rt2->rt_gateway->sa_family == AF_LINK) {
+                               /*
+                                * Safe to drop rt_lock and use rt_key,
+                                * rt_gateway, since holding rnh_lock here
+                                * prevents another thread from calling
+                                * rt_setgate() on this route.
+                                */
+                               RT_UNLOCK(rt2);
+                               rtrequest_locked(RTM_DELETE, rt_key(rt2),
+                                   rt2->rt_gateway, rt_mask(rt2),
+                                   rt2->rt_flags, 0);
                                ret = rn_addroute(v_arg, n_arg, head,
                                        treenodes);
                                ret = rn_addroute(v_arg, n_arg, head,
                                        treenodes);
+                       } else {
+                               RT_UNLOCK(rt2);
                        }
                        rtfree_locked(rt2);
                }
                        }
                        rtfree_locked(rt2);
                }
@@ -174,41 +195,75 @@ in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
        return ret;
 }
 
        return ret;
 }
 
+/*
+ * Validate (unexpire) an expiring AF_INET route.
+ */
+struct radix_node *
+in_validate(struct radix_node *rn)
+{
+       struct rtentry *rt = (struct rtentry *)rn;
+
+       RT_LOCK_ASSERT_HELD(rt);
+
+       /* This is first reference? */
+       if (rt->rt_refcnt == 0) {
+               if (rt->rt_flags & RTPRF_OURS) {
+                       /* It's one of ours; unexpire it */
+                       rt->rt_flags &= ~RTPRF_OURS;
+                       rt_setexpire(rt, 0);
+               } else if ((rt->rt_flags & RTF_LLINFO) &&
+                   (rt->rt_flags & RTF_HOST) && rt->rt_gateway != NULL &&
+                   rt->rt_gateway->sa_family == AF_LINK) {
+                       /* It's ARP; let it be handled there */
+                       arp_validate(rt);
+               }
+       }
+       return (rn);
+}
+
+/*
+ * Similar to in_matroute_args except without the leaf-matching parameters.
+ */
+static struct radix_node *
+in_matroute(void *v_arg, struct radix_node_head *head)
+{
+       return (in_matroute_args(v_arg, head, NULL, NULL));
+}
+
 /*
  * This code is the inverse of in_clsroute: on first reference, if we
  * were managing the route, stop doing so and set the expiration timer
  * back off again.
  */
 static struct radix_node *
 /*
  * This code is the inverse of in_clsroute: on first reference, if we
  * were managing the route, stop doing so and set the expiration timer
  * back off again.
  */
 static struct radix_node *
-in_matroute(void *v_arg, struct radix_node_head *head)
+in_matroute_args(void *v_arg, struct radix_node_head *head,
+    rn_matchf_t *f, void *w)
 {
 {
-       struct radix_node *rn = rn_match(v_arg, head);
-       struct rtentry *rt = (struct rtentry *)rn;
+       struct radix_node *rn = rn_match_args(v_arg, head, f, w);
 
 
-       if(rt && rt->rt_refcnt == 0) { /* this is first reference */
-               if(rt->rt_flags & RTPRF_OURS) {
-                       rt->rt_flags &= ~RTPRF_OURS;
-                       rt->rt_rmx.rmx_expire = 0;
-               }
+       if (rn != NULL) {
+               RT_LOCK_SPIN((struct rtentry *)rn);
+               in_validate(rn);
+               RT_UNLOCK((struct rtentry *)rn);
        }
        }
-       return rn;
+       return (rn);
 }
 
 static int rtq_reallyold = 60*60;
        /* one hour is ``really old'' */
 }
 
 static int rtq_reallyold = 60*60;
        /* one hour is ``really old'' */
-SYSCTL_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW, 
+SYSCTL_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW | CTLFLAG_LOCKED
     &rtq_reallyold , 0, 
     "Default expiration time on dynamically learned routes");
                                   
 static int rtq_minreallyold = 10;
        /* never automatically crank down to less */
     &rtq_reallyold , 0, 
     "Default expiration time on dynamically learned routes");
                                   
 static int rtq_minreallyold = 10;
        /* never automatically crank down to less */
-SYSCTL_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW, 
+SYSCTL_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW | CTLFLAG_LOCKED
     &rtq_minreallyold , 0, 
     "Minimum time to attempt to hold onto dynamically learned routes");
                                   
 static int rtq_toomany = 128;
        /* 128 cached routes is ``too many'' */
     &rtq_minreallyold , 0, 
     "Minimum time to attempt to hold onto dynamically learned routes");
                                   
 static int rtq_toomany = 128;
        /* 128 cached routes is ``too many'' */
-SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW, 
+SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW | CTLFLAG_LOCKED
     &rtq_toomany , 0, "Upper limit on dynamically learned routes");
 
 #ifdef __APPLE__
     &rtq_toomany , 0, "Upper limit on dynamically learned routes");
 
 #ifdef __APPLE__
@@ -225,12 +280,12 @@ SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW,
  * If for some reason a circular route is needed, turn this sysctl (net.inet.ip.check_route_selfref) to zero.
  */
 int check_routeselfref = 1;
  * If for some reason a circular route is needed, turn this sysctl (net.inet.ip.check_route_selfref) to zero.
  */
 int check_routeselfref = 1;
-SYSCTL_INT(_net_inet_ip, OID_AUTO, check_route_selfref, CTLFLAG_RW
+SYSCTL_INT(_net_inet_ip, OID_AUTO, check_route_selfref, CTLFLAG_RW | CTLFLAG_LOCKED,
     &check_routeselfref , 0, "");
 #endif
 
     &check_routeselfref , 0, "");
 #endif
 
-__private_extern__ int use_routegenid = 1;
-SYSCTL_INT(_net_inet_ip, OID_AUTO, use_route_genid, CTLFLAG_RW
+int use_routegenid = 1;
+SYSCTL_INT(_net_inet_ip, OID_AUTO, use_route_genid, CTLFLAG_RW | CTLFLAG_LOCKED,
     &use_routegenid , 0, "");
 
 /*
     &use_routegenid , 0, "");
 
 /*
@@ -242,6 +297,9 @@ in_clsroute(struct radix_node *rn, __unused struct radix_node_head *head)
 {
        struct rtentry *rt = (struct rtentry *)rn;
 
 {
        struct rtentry *rt = (struct rtentry *)rn;
 
+       lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+       RT_LOCK_ASSERT_HELD(rt);
+
        if (!(rt->rt_flags & RTF_UP))
                return;         /* prophylactic measures */
 
        if (!(rt->rt_flags & RTF_UP))
                return;         /* prophylactic measures */
 
@@ -262,18 +320,26 @@ in_clsroute(struct radix_node *rn, __unused struct radix_node_head *head)
                 * called when the route's reference count is 0, don't
                 * deallocate it until we return from this routine by
                 * telling rtrequest that we're interested in it.
                 * called when the route's reference count is 0, don't
                 * deallocate it until we return from this routine by
                 * telling rtrequest that we're interested in it.
+                * Safe to drop rt_lock and use rt_key, rt_gateway since
+                * holding rnh_lock here prevents another thread from
+                * calling rt_setgate() on this route.
                 */
                 */
+               RT_UNLOCK(rt);
                if (rtrequest_locked(RTM_DELETE, (struct sockaddr *)rt_key(rt),
                    rt->rt_gateway, rt_mask(rt), rt->rt_flags, &rt) == 0) {
                        /* Now let the caller free it */
                if (rtrequest_locked(RTM_DELETE, (struct sockaddr *)rt_key(rt),
                    rt->rt_gateway, rt_mask(rt), rt->rt_flags, &rt) == 0) {
                        /* Now let the caller free it */
-                       rtunref(rt);
+                       RT_LOCK(rt);
+                       RT_REMREF_LOCKED(rt);
+               } else {
+                       RT_LOCK(rt);
                }
        } else {
                }
        } else {
-               struct timeval timenow;
+               uint64_t timenow;
 
 
-               getmicrotime(&timenow);
+               timenow = net_uptime();
                rt->rt_flags |= RTPRF_OURS;
                rt->rt_flags |= RTPRF_OURS;
-               rt->rt_rmx.rmx_expire = timenow.tv_sec + rtq_reallyold;
+               rt_setexpire(rt,
+                   rt_expiry(rt, timenow, rtq_reallyold));
        }
 }
 
        }
 }
 
@@ -283,7 +349,7 @@ struct rtqk_arg {
        int killed;
        int found;
        int updating;
        int killed;
        int found;
        int updating;
-       time_t nextstop;
+       uint64_t nextstop;
 };
 
 /*
 };
 
 /*
@@ -297,37 +363,51 @@ in_rtqkill(struct radix_node *rn, void *rock)
        struct rtqk_arg *ap = rock;
        struct rtentry *rt = (struct rtentry *)rn;
        int err;
        struct rtqk_arg *ap = rock;
        struct rtentry *rt = (struct rtentry *)rn;
        int err;
-       struct timeval timenow;
+       uint64_t timenow;
 
 
-       getmicrotime(&timenow);
-       lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_OWNED);
+       timenow = net_uptime();
+       lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
 
 
+       RT_LOCK(rt);
        if (rt->rt_flags & RTPRF_OURS) {
                ap->found++;
 
        if (rt->rt_flags & RTPRF_OURS) {
                ap->found++;
 
-               if (ap->draining || rt->rt_rmx.rmx_expire <= timenow.tv_sec) {
+               VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0);
+               VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0);
+               if (ap->draining || rt->rt_expire <= timenow) {
                        if (rt->rt_refcnt > 0)
                                panic("rtqkill route really not free");
 
                        if (rt->rt_refcnt > 0)
                                panic("rtqkill route really not free");
 
-                       err = rtrequest_locked(RTM_DELETE,
-                                       (struct sockaddr *)rt_key(rt),
-                                       rt->rt_gateway, rt_mask(rt),
-                                       rt->rt_flags, 0);
+                       /*
+                        * Delete this route since we're done with it;
+                        * the route may be freed afterwards, so we
+                        * can no longer refer to 'rt' upon returning
+                        * from rtrequest().  Safe to drop rt_lock and
+                        * use rt_key, rt_gateway since holding rnh_lock
+                        * here prevents another thread from calling
+                        * rt_setgate() on this route.
+                        */
+                       RT_UNLOCK(rt);
+                       err = rtrequest_locked(RTM_DELETE, rt_key(rt),
+                           rt->rt_gateway, rt_mask(rt), rt->rt_flags, 0);
                        if (err) {
                                log(LOG_WARNING, "in_rtqkill: error %d\n", err);
                        } else {
                                ap->killed++;
                        }
                } else {
                        if (err) {
                                log(LOG_WARNING, "in_rtqkill: error %d\n", err);
                        } else {
                                ap->killed++;
                        }
                } else {
-                       if (ap->updating
-                          && (rt->rt_rmx.rmx_expire - timenow.tv_sec
-                              > rtq_reallyold)) {
-                               rt->rt_rmx.rmx_expire = timenow.tv_sec
-                                       + rtq_reallyold;
+                       if (ap->updating &&
+                           (rt->rt_expire - timenow) >
+                           rt_expiry(rt, 0, rtq_reallyold)) {
+                               rt_setexpire(rt, rt_expiry(rt,
+                                   timenow, rtq_reallyold));
                        }
                        ap->nextstop = lmin(ap->nextstop,
                        }
                        ap->nextstop = lmin(ap->nextstop,
-                                           rt->rt_rmx.rmx_expire);
+                                           rt->rt_expire);
+                       RT_UNLOCK(rt);
                }
                }
+       } else {
+               RT_UNLOCK(rt);
        }
 
        return 0;
        }
 
        return 0;
@@ -348,16 +428,16 @@ in_rtqtimo(void *rock)
        struct radix_node_head *rnh = rock;
        struct rtqk_arg arg;
        struct timeval atv;
        struct radix_node_head *rnh = rock;
        struct rtqk_arg arg;
        struct timeval atv;
-       static time_t last_adjusted_timeout = 0;
-       struct timeval timenow;
+       static uint64_t last_adjusted_timeout = 0;
+       uint64_t timenow;
 
 
-       lck_mtx_lock(rt_mtx);
+       lck_mtx_lock(rnh_lock);
        /* Get the timestamp after we acquire the lock for better accuracy */
        /* Get the timestamp after we acquire the lock for better accuracy */
-       getmicrotime(&timenow);
+        timenow = net_uptime();
 
        arg.found = arg.killed = 0;
        arg.rnh = rnh;
 
        arg.found = arg.killed = 0;
        arg.rnh = rnh;
-       arg.nextstop = timenow.tv_sec + rtq_timeout;
+       arg.nextstop = timenow + rtq_timeout;
        arg.draining = arg.updating = 0;
        rnh->rnh_walktree(rnh, in_rtqkill, &arg);
 
        arg.draining = arg.updating = 0;
        rnh->rnh_walktree(rnh, in_rtqkill, &arg);
 
@@ -370,14 +450,14 @@ in_rtqtimo(void *rock)
         * hard.
         */
        if((arg.found - arg.killed > rtq_toomany)
         * hard.
         */
        if((arg.found - arg.killed > rtq_toomany)
-          && (timenow.tv_sec - last_adjusted_timeout >= rtq_timeout)
+          && ((timenow - last_adjusted_timeout) >= (uint64_t)rtq_timeout)
           && rtq_reallyold > rtq_minreallyold) {
                rtq_reallyold = 2*rtq_reallyold / 3;
                if(rtq_reallyold < rtq_minreallyold) {
                        rtq_reallyold = rtq_minreallyold;
                }
 
           && rtq_reallyold > rtq_minreallyold) {
                rtq_reallyold = 2*rtq_reallyold / 3;
                if(rtq_reallyold < rtq_minreallyold) {
                        rtq_reallyold = rtq_minreallyold;
                }
 
-               last_adjusted_timeout = timenow.tv_sec;
+               last_adjusted_timeout = timenow;
 #if DIAGNOSTIC
                log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n",
                    rtq_reallyold);
 #if DIAGNOSTIC
                log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n",
                    rtq_reallyold);
@@ -388,8 +468,8 @@ in_rtqtimo(void *rock)
        }
 
        atv.tv_usec = 0;
        }
 
        atv.tv_usec = 0;
-       atv.tv_sec = arg.nextstop - timenow.tv_sec;
-       lck_mtx_unlock(rt_mtx);
+       atv.tv_sec = arg.nextstop - timenow;
+       lck_mtx_unlock(rnh_lock);
        timeout(in_rtqtimo_funnel, rock, tvtohz(&atv));
 }
 
        timeout(in_rtqtimo_funnel, rock, tvtohz(&atv));
 }
 
@@ -403,9 +483,9 @@ in_rtqdrain(void)
        arg.nextstop = 0;
        arg.draining = 1;
        arg.updating = 0;
        arg.nextstop = 0;
        arg.draining = 1;
        arg.updating = 0;
-       lck_mtx_lock(rt_mtx);
+       lck_mtx_lock(rnh_lock);
        rnh->rnh_walktree(rnh, in_rtqkill, &arg);
        rnh->rnh_walktree(rnh, in_rtqkill, &arg);
-       lck_mtx_unlock(rt_mtx);
+       lck_mtx_unlock(rnh_lock);
 }
 
 /*
 }
 
 /*
@@ -430,6 +510,7 @@ in_inithead(void **head, int off)
        rnh = *head;
        rnh->rnh_addaddr = in_addroute;
        rnh->rnh_matchaddr = in_matroute;
        rnh = *head;
        rnh->rnh_addaddr = in_addroute;
        rnh->rnh_matchaddr = in_matroute;
+       rnh->rnh_matchaddr_args = in_matroute_args;
        rnh->rnh_close = in_clsroute;
        in_rtqtimo(rnh);        /* kick off timeout first time */
        return 1;
        rnh->rnh_close = in_clsroute;
        in_rtqtimo(rnh);        /* kick off timeout first time */
        return 1;
@@ -458,6 +539,7 @@ in_ifadownkill(struct radix_node *rn, void *xap)
        struct rtentry *rt = (struct rtentry *)rn;
        int err;
 
        struct rtentry *rt = (struct rtentry *)rn;
        int err;
 
+       RT_LOCK(rt);
        if (rt->rt_ifa == ap->ifa &&
            (ap->del || !(rt->rt_flags & RTF_STATIC))) {
                /*
        if (rt->rt_ifa == ap->ifa &&
            (ap->del || !(rt->rt_flags & RTF_STATIC))) {
                /*
@@ -466,14 +548,20 @@ in_ifadownkill(struct radix_node *rn, void *xap)
                 * away the pointers that rn_walktree() needs in order
                 * continue our descent.  We will end up deleting all
                 * the routes that rtrequest() would have in any case,
                 * away the pointers that rn_walktree() needs in order
                 * continue our descent.  We will end up deleting all
                 * the routes that rtrequest() would have in any case,
-                * so that behavior is not needed there.
+                * so that behavior is not needed there.  Safe to drop
+                * rt_lock and use rt_key, rt_gateway, since holding
+                * rnh_lock here prevents another thread from calling
+                * rt_setgate() on this route.
                 */
                rt->rt_flags &= ~(RTF_CLONING | RTF_PRCLONING);
                 */
                rt->rt_flags &= ~(RTF_CLONING | RTF_PRCLONING);
-               err = rtrequest_locked(RTM_DELETE, (struct sockaddr *)rt_key(rt),
-                               rt->rt_gateway, rt_mask(rt), rt->rt_flags, 0);
+               RT_UNLOCK(rt);
+               err = rtrequest_locked(RTM_DELETE, rt_key(rt),
+                   rt->rt_gateway, rt_mask(rt), rt->rt_flags, 0);
                if (err) {
                        log(LOG_WARNING, "in_ifadownkill: error %d\n", err);
                }
                if (err) {
                        log(LOG_WARNING, "in_ifadownkill: error %d\n", err);
                }
+       } else {
+               RT_UNLOCK(rt);
        }
        return 0;
 }
        }
        return 0;
 }
@@ -484,19 +572,26 @@ in_ifadown(struct ifaddr *ifa, int delete)
        struct in_ifadown_arg arg;
        struct radix_node_head *rnh;
 
        struct in_ifadown_arg arg;
        struct radix_node_head *rnh;
 
-       lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_OWNED);
+       lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
 
 
+       /*
+        * Holding rnh_lock here prevents the possibility of
+        * ifa from changing (e.g. in_ifinit), so it is safe
+        * to access its ifa_addr without locking.
+        */
        if (ifa->ifa_addr->sa_family != AF_INET)
        if (ifa->ifa_addr->sa_family != AF_INET)
-               return 1;
+               return (1);
 
        /* trigger route cache reevaluation */
 
        /* trigger route cache reevaluation */
-       if (use_routegenid) 
-               route_generation++;
+       if (use_routegenid)
+               routegenid_update();
 
        arg.rnh = rnh = rt_tables[AF_INET];
        arg.ifa = ifa;
        arg.del = delete;
        rnh->rnh_walktree(rnh, in_ifadownkill, &arg);
 
        arg.rnh = rnh = rt_tables[AF_INET];
        arg.ifa = ifa;
        arg.del = delete;
        rnh->rnh_walktree(rnh, in_ifadownkill, &arg);
+       IFA_LOCK_SPIN(ifa);
        ifa->ifa_flags &= ~IFA_ROUTE;
        ifa->ifa_flags &= ~IFA_ROUTE;
-       return 0;
+       IFA_UNLOCK(ifa);
+       return (0);
 }
 }