]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/netinet/in_rmx.c
xnu-1699.22.81.tar.gz
[apple/xnu.git] / bsd / netinet / in_rmx.c
index c307be910cbcb1f20dc27d95adc9699a89b5c135..2d0c2735d10d7fc46b8a703161e6c09c0e43e72c 100644 (file)
@@ -1,23 +1,29 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * Copyright 1994, 1995 Massachusetts Institute of Technology
 #include <sys/sysctl.h>
 #include <sys/socket.h>
 #include <sys/mbuf.h>
+#include <sys/protosw.h>
 #include <sys/syslog.h>
+#include <sys/mcache.h>
+#include <kern/lock.h>
 
 #include <net/if.h>
 #include <net/route.h>
 #include <netinet/in.h>
 #include <netinet/in_var.h>
+#include <netinet/in_arp.h>
 
-extern int     in_inithead __P((void **head, int off));
+extern int tvtohz(struct timeval *);
+extern int     in_inithead(void **head, int off);
 
 #ifdef __APPLE__
 static void in_rtqtimo(void *rock);
 #endif
 
+static struct radix_node *in_matroute_args(void *, struct radix_node_head *,
+    rn_matchf_t *f, void *);
+
 #define RTPRF_OURS             RTF_PROTO3      /* set on routes we manage */
 
 /*
@@ -95,13 +109,16 @@ in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
        struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt);
        struct radix_node *ret;
 
+       lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+       RT_LOCK_ASSERT_HELD(rt);
+
        /*
         * For IP, all unicast non-host routes are automatically cloning.
         */
-       if(IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
+       if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
                rt->rt_flags |= RTF_MULTICAST;
 
-       if(!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) {
+       if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) {
                rt->rt_flags |= RTF_PRCLONING;
        }
 
@@ -125,11 +142,15 @@ in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
                if (in_broadcast(sin->sin_addr, rt->rt_ifp)) {
                        rt->rt_flags |= RTF_BROADCAST;
                } else {
+                       /* Become a regular mutex */
+                       RT_CONVERT_LOCK(rt);
+                       IFA_LOCK_SPIN(rt->rt_ifa);
 #define satosin(sa) ((struct sockaddr_in *)sa)
                        if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr
                            == sin->sin_addr.s_addr)
                                rt->rt_flags |= RTF_LOCAL;
 #undef satosin
+                       IFA_UNLOCK(rt->rt_ifa);
                }
        }
 
@@ -145,61 +166,104 @@ in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
                 * Find out if it is because of an
                 * ARP entry and delete it if so.
                 */
-               rt2 = rtalloc1((struct sockaddr *)sin, 0,
-                               RTF_CLONING | RTF_PRCLONING);
+               rt2 = rtalloc1_scoped_locked(rt_key(rt), 0,
+                   RTF_CLONING | RTF_PRCLONING, sin_get_ifscope(rt_key(rt)));
                if (rt2) {
-                       if (rt2->rt_flags & RTF_LLINFO &&
-                               rt2->rt_flags & RTF_HOST &&
-                               rt2->rt_gateway &&
-                               rt2->rt_gateway->sa_family == AF_LINK) {
-                               rtrequest(RTM_DELETE,
-                                         (struct sockaddr *)rt_key(rt2),
-                                         rt2->rt_gateway,
-                                         rt_mask(rt2), rt2->rt_flags, 0);
+                       RT_LOCK(rt2);
+                       if ((rt2->rt_flags & RTF_LLINFO) &&
+                           (rt2->rt_flags & RTF_HOST) &&
+                           rt2->rt_gateway != NULL &&
+                           rt2->rt_gateway->sa_family == AF_LINK) {
+                               /*
+                                * Safe to drop rt_lock and use rt_key,
+                                * rt_gateway, since holding rnh_lock here
+                                * prevents another thread from calling
+                                * rt_setgate() on this route.
+                                */
+                               RT_UNLOCK(rt2);
+                               rtrequest_locked(RTM_DELETE, rt_key(rt2),
+                                   rt2->rt_gateway, rt_mask(rt2),
+                                   rt2->rt_flags, 0);
                                ret = rn_addroute(v_arg, n_arg, head,
                                        treenodes);
+                       } else {
+                               RT_UNLOCK(rt2);
                        }
-                       rtfree(rt2);
+                       rtfree_locked(rt2);
                }
        }
        return ret;
 }
 
+/*
+ * Validate (unexpire) an expiring AF_INET route.
+ */
+struct radix_node *
+in_validate(struct radix_node *rn)
+{
+       struct rtentry *rt = (struct rtentry *)rn;
+
+       RT_LOCK_ASSERT_HELD(rt);
+
+       /* This is first reference? */
+       if (rt->rt_refcnt == 0) {
+               if (rt->rt_flags & RTPRF_OURS) {
+                       /* It's one of ours; unexpire it */
+                       rt->rt_flags &= ~RTPRF_OURS;
+                       rt_setexpire(rt, 0);
+               } else if ((rt->rt_flags & RTF_LLINFO) &&
+                   (rt->rt_flags & RTF_HOST) && rt->rt_gateway != NULL &&
+                   rt->rt_gateway->sa_family == AF_LINK) {
+                       /* It's ARP; let it be handled there */
+                       arp_validate(rt);
+               }
+       }
+       return (rn);
+}
+
+/*
+ * Similar to in_matroute_args except without the leaf-matching parameters.
+ */
+static struct radix_node *
+in_matroute(void *v_arg, struct radix_node_head *head)
+{
+       return (in_matroute_args(v_arg, head, NULL, NULL));
+}
+
 /*
  * This code is the inverse of in_clsroute: on first reference, if we
  * were managing the route, stop doing so and set the expiration timer
  * back off again.
  */
 static struct radix_node *
-in_matroute(void *v_arg, struct radix_node_head *head)
+in_matroute_args(void *v_arg, struct radix_node_head *head,
+    rn_matchf_t *f, void *w)
 {
-       struct radix_node *rn = rn_match(v_arg, head);
-       struct rtentry *rt = (struct rtentry *)rn;
+       struct radix_node *rn = rn_match_args(v_arg, head, f, w);
 
-       if(rt && rt->rt_refcnt == 0) { /* this is first reference */
-               if(rt->rt_flags & RTPRF_OURS) {
-                       rt->rt_flags &= ~RTPRF_OURS;
-                       rt->rt_rmx.rmx_expire = 0;
-               }
+       if (rn != NULL) {
+               RT_LOCK_SPIN((struct rtentry *)rn);
+               in_validate(rn);
+               RT_UNLOCK((struct rtentry *)rn);
        }
-       return rn;
+       return (rn);
 }
 
 static int rtq_reallyold = 60*60;
        /* one hour is ``really old'' */
-SYSCTL_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW, 
+SYSCTL_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW | CTLFLAG_LOCKED
     &rtq_reallyold , 0, 
     "Default expiration time on dynamically learned routes");
                                   
 static int rtq_minreallyold = 10;
        /* never automatically crank down to less */
-SYSCTL_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW, 
+SYSCTL_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW | CTLFLAG_LOCKED
     &rtq_minreallyold , 0, 
     "Minimum time to attempt to hold onto dynamically learned routes");
                                   
 static int rtq_toomany = 128;
        /* 128 cached routes is ``too many'' */
-SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW, 
+SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW | CTLFLAG_LOCKED
     &rtq_toomany , 0, "Upper limit on dynamically learned routes");
 
 #ifdef __APPLE__
@@ -216,43 +280,66 @@ SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW,
  * If for some reason a circular route is needed, turn this sysctl (net.inet.ip.check_route_selfref) to zero.
  */
 int check_routeselfref = 1;
-SYSCTL_INT(_net_inet_ip, OID_AUTO, check_route_selfref, CTLFLAG_RW
+SYSCTL_INT(_net_inet_ip, OID_AUTO, check_route_selfref, CTLFLAG_RW | CTLFLAG_LOCKED,
     &check_routeselfref , 0, "");
 #endif
 
+int use_routegenid = 1;
+SYSCTL_INT(_net_inet_ip, OID_AUTO, use_route_genid, CTLFLAG_RW | CTLFLAG_LOCKED,
+    &use_routegenid , 0, "");
 
 /*
  * On last reference drop, mark the route as belong to us so that it can be
  * timed out.
  */
 static void
-in_clsroute(struct radix_node *rn, struct radix_node_head *head)
+in_clsroute(struct radix_node *rn, __unused struct radix_node_head *head)
 {
        struct rtentry *rt = (struct rtentry *)rn;
 
-       if(!(rt->rt_flags & RTF_UP))
+       lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+       RT_LOCK_ASSERT_HELD(rt);
+
+       if (!(rt->rt_flags & RTF_UP))
                return;         /* prophylactic measures */
 
-       if((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST)
+       if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST)
                return;
 
-       if((rt->rt_flags & (RTF_WASCLONED | RTPRF_OURS))
-          != RTF_WASCLONED)
+       if ((rt->rt_flags & (RTF_WASCLONED | RTPRF_OURS)) != RTF_WASCLONED)
                return;
 
        /*
-        * As requested by David Greenman:
-        * If rtq_reallyold is 0, just delete the route without
-        * waiting for a timeout cycle to kill it.
+        * Delete the route immediately if RTF_DELCLONE is set or
+        * if route caching is disabled (rtq_reallyold set to 0).
+        * Otherwise, let it expire and be deleted by in_rtqkill().
         */
-       if(rtq_reallyold != 0) {
-               rt->rt_flags |= RTPRF_OURS;
-               rt->rt_rmx.rmx_expire = time_second + rtq_reallyold;
+       if ((rt->rt_flags & RTF_DELCLONE) || rtq_reallyold == 0) {
+               /*
+                * Delete the route from the radix tree but since we are
+                * called when the route's reference count is 0, don't
+                * deallocate it until we return from this routine by
+                * telling rtrequest that we're interested in it.
+                * Safe to drop rt_lock and use rt_key, rt_gateway since
+                * holding rnh_lock here prevents another thread from
+                * calling rt_setgate() on this route.
+                */
+               RT_UNLOCK(rt);
+               if (rtrequest_locked(RTM_DELETE, (struct sockaddr *)rt_key(rt),
+                   rt->rt_gateway, rt_mask(rt), rt->rt_flags, &rt) == 0) {
+                       /* Now let the caller free it */
+                       RT_LOCK(rt);
+                       RT_REMREF_LOCKED(rt);
+               } else {
+                       RT_LOCK(rt);
+               }
        } else {
-               rtrequest(RTM_DELETE,
-                         (struct sockaddr *)rt_key(rt),
-                         rt->rt_gateway, rt_mask(rt),
-                         rt->rt_flags, 0);
+               uint64_t timenow;
+
+               timenow = net_uptime();
+               rt->rt_flags |= RTPRF_OURS;
+               rt_setexpire(rt,
+                   rt_expiry(rt, timenow, rtq_reallyold));
        }
 }
 
@@ -262,7 +349,7 @@ struct rtqk_arg {
        int killed;
        int found;
        int updating;
-       time_t nextstop;
+       uint64_t nextstop;
 };
 
 /*
@@ -276,33 +363,51 @@ in_rtqkill(struct radix_node *rn, void *rock)
        struct rtqk_arg *ap = rock;
        struct rtentry *rt = (struct rtentry *)rn;
        int err;
+       uint64_t timenow;
 
-       if(rt->rt_flags & RTPRF_OURS) {
+       timenow = net_uptime();
+       lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+
+       RT_LOCK(rt);
+       if (rt->rt_flags & RTPRF_OURS) {
                ap->found++;
 
-               if(ap->draining || rt->rt_rmx.rmx_expire <= time_second) {
-                       if(rt->rt_refcnt > 0)
+               VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0);
+               VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0);
+               if (ap->draining || rt->rt_expire <= timenow) {
+                       if (rt->rt_refcnt > 0)
                                panic("rtqkill route really not free");
 
-                       err = rtrequest(RTM_DELETE,
-                                       (struct sockaddr *)rt_key(rt),
-                                       rt->rt_gateway, rt_mask(rt),
-                                       rt->rt_flags, 0);
-                       if(err) {
+                       /*
+                        * Delete this route since we're done with it;
+                        * the route may be freed afterwards, so we
+                        * can no longer refer to 'rt' upon returning
+                        * from rtrequest().  Safe to drop rt_lock and
+                        * use rt_key, rt_gateway since holding rnh_lock
+                        * here prevents another thread from calling
+                        * rt_setgate() on this route.
+                        */
+                       RT_UNLOCK(rt);
+                       err = rtrequest_locked(RTM_DELETE, rt_key(rt),
+                           rt->rt_gateway, rt_mask(rt), rt->rt_flags, 0);
+                       if (err) {
                                log(LOG_WARNING, "in_rtqkill: error %d\n", err);
                        } else {
                                ap->killed++;
                        }
                } else {
-                       if(ap->updating
-                          && (rt->rt_rmx.rmx_expire - time_second
-                              > rtq_reallyold)) {
-                               rt->rt_rmx.rmx_expire = time_second
-                                       + rtq_reallyold;
+                       if (ap->updating &&
+                           (rt->rt_expire - timenow) >
+                           rt_expiry(rt, 0, rtq_reallyold)) {
+                               rt_setexpire(rt, rt_expiry(rt,
+                                   timenow, rtq_reallyold));
                        }
                        ap->nextstop = lmin(ap->nextstop,
-                                           rt->rt_rmx.rmx_expire);
+                                           rt->rt_expire);
+                       RT_UNLOCK(rt);
                }
+       } else {
+               RT_UNLOCK(rt);
        }
 
        return 0;
@@ -311,11 +416,7 @@ in_rtqkill(struct radix_node *rn, void *rock)
 static void
 in_rtqtimo_funnel(void *rock)
 {
-       boolean_t       funnel_state;
-
-       funnel_state = thread_funnel_set(network_flock, TRUE);
         in_rtqtimo(rock);
-       (void) thread_funnel_set(network_flock, FALSE);
 
 }
 #define RTQ_TIMEOUT    60*10   /* run no less than once every ten minutes */
@@ -327,16 +428,18 @@ in_rtqtimo(void *rock)
        struct radix_node_head *rnh = rock;
        struct rtqk_arg arg;
        struct timeval atv;
-       static time_t last_adjusted_timeout = 0;
-       int s;
+       static uint64_t last_adjusted_timeout = 0;
+       uint64_t timenow;
+
+       lck_mtx_lock(rnh_lock);
+       /* Get the timestamp after we acquire the lock for better accuracy */
+        timenow = net_uptime();
 
        arg.found = arg.killed = 0;
        arg.rnh = rnh;
-       arg.nextstop = time_second + rtq_timeout;
+       arg.nextstop = timenow + rtq_timeout;
        arg.draining = arg.updating = 0;
-       s = splnet();
        rnh->rnh_walktree(rnh, in_rtqkill, &arg);
-       splx(s);
 
        /*
         * Attempt to be somewhat dynamic about this:
@@ -347,27 +450,26 @@ in_rtqtimo(void *rock)
         * hard.
         */
        if((arg.found - arg.killed > rtq_toomany)
-          && (time_second - last_adjusted_timeout >= rtq_timeout)
+          && ((timenow - last_adjusted_timeout) >= (uint64_t)rtq_timeout)
           && rtq_reallyold > rtq_minreallyold) {
                rtq_reallyold = 2*rtq_reallyold / 3;
                if(rtq_reallyold < rtq_minreallyold) {
                        rtq_reallyold = rtq_minreallyold;
                }
 
-               last_adjusted_timeout = time_second;
+               last_adjusted_timeout = timenow;
 #if DIAGNOSTIC
                log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n",
                    rtq_reallyold);
 #endif
                arg.found = arg.killed = 0;
                arg.updating = 1;
-               s = splnet();
                rnh->rnh_walktree(rnh, in_rtqkill, &arg);
-               splx(s);
        }
 
        atv.tv_usec = 0;
-       atv.tv_sec = arg.nextstop - time_second;
+       atv.tv_sec = arg.nextstop - timenow;
+       lck_mtx_unlock(rnh_lock);
        timeout(in_rtqtimo_funnel, rock, tvtohz(&atv));
 }
 
@@ -376,15 +478,14 @@ in_rtqdrain(void)
 {
        struct radix_node_head *rnh = rt_tables[AF_INET];
        struct rtqk_arg arg;
-       int s;
        arg.found = arg.killed = 0;
        arg.rnh = rnh;
        arg.nextstop = 0;
        arg.draining = 1;
        arg.updating = 0;
-       s = splnet();
+       lck_mtx_lock(rnh_lock);
        rnh->rnh_walktree(rnh, in_rtqkill, &arg);
-       splx(s);
+       lck_mtx_unlock(rnh_lock);
 }
 
 /*
@@ -409,6 +510,7 @@ in_inithead(void **head, int off)
        rnh = *head;
        rnh->rnh_addaddr = in_addroute;
        rnh->rnh_matchaddr = in_matroute;
+       rnh->rnh_matchaddr_args = in_matroute_args;
        rnh->rnh_close = in_clsroute;
        in_rtqtimo(rnh);        /* kick off timeout first time */
        return 1;
@@ -437,6 +539,7 @@ in_ifadownkill(struct radix_node *rn, void *xap)
        struct rtentry *rt = (struct rtentry *)rn;
        int err;
 
+       RT_LOCK(rt);
        if (rt->rt_ifa == ap->ifa &&
            (ap->del || !(rt->rt_flags & RTF_STATIC))) {
                /*
@@ -445,14 +548,20 @@ in_ifadownkill(struct radix_node *rn, void *xap)
                 * away the pointers that rn_walktree() needs in order
                 * continue our descent.  We will end up deleting all
                 * the routes that rtrequest() would have in any case,
-                * so that behavior is not needed there.
+                * so that behavior is not needed there.  Safe to drop
+                * rt_lock and use rt_key, rt_gateway, since holding
+                * rnh_lock here prevents another thread from calling
+                * rt_setgate() on this route.
                 */
                rt->rt_flags &= ~(RTF_CLONING | RTF_PRCLONING);
-               err = rtrequest(RTM_DELETE, (struct sockaddr *)rt_key(rt),
-                               rt->rt_gateway, rt_mask(rt), rt->rt_flags, 0);
+               RT_UNLOCK(rt);
+               err = rtrequest_locked(RTM_DELETE, rt_key(rt),
+                   rt->rt_gateway, rt_mask(rt), rt->rt_flags, 0);
                if (err) {
                        log(LOG_WARNING, "in_ifadownkill: error %d\n", err);
                }
+       } else {
+               RT_UNLOCK(rt);
        }
        return 0;
 }
@@ -463,13 +572,26 @@ in_ifadown(struct ifaddr *ifa, int delete)
        struct in_ifadown_arg arg;
        struct radix_node_head *rnh;
 
+       lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+
+       /*
+        * Holding rnh_lock here prevents the possibility of
+        * ifa from changing (e.g. in_ifinit), so it is safe
+        * to access its ifa_addr without locking.
+        */
        if (ifa->ifa_addr->sa_family != AF_INET)
-               return 1;
+               return (1);
+
+       /* trigger route cache reevaluation */
+       if (use_routegenid)
+               routegenid_update();
 
        arg.rnh = rnh = rt_tables[AF_INET];
        arg.ifa = ifa;
        arg.del = delete;
        rnh->rnh_walktree(rnh, in_ifadownkill, &arg);
+       IFA_LOCK_SPIN(ifa);
        ifa->ifa_flags &= ~IFA_ROUTE;
-       return 0;
+       IFA_UNLOCK(ifa);
+       return (0);
 }