+/*
+ * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
/* $FreeBSD: src/sys/netinet6/in6_rmx.c,v 1.1.2.2 2001/07/03 11:01:52 ume Exp $ */
/* $KAME: in6_rmx.c,v 1.10 2001/05/24 05:44:58 itojun Exp $ */
#include <sys/socketvar.h>
#include <sys/mbuf.h>
#include <sys/syslog.h>
+#include <kern/lock.h>
#include <net/if.h>
#include <net/route.h>
#include <netinet/tcp_timer.h>
#include <netinet/tcp_var.h>
-extern int in6_inithead __P((void **head, int off));
-static void in6_rtqtimo __P((void *rock));
-static void in6_mtutimo __P((void *rock));
+extern int in6_inithead(void **head, int off);
+static void in6_rtqtimo(void *rock);
+static void in6_mtutimo(void *rock);
+extern int tvtohz(struct timeval *);
+
+static struct radix_node *in6_matroute_args(void *, struct radix_node_head *,
+ rn_matchf_t *, void *);
#define RTPRF_OURS RTF_PROTO3 /* set on routes we manage */
+/*
+ * Accessed by in6_addroute(), in6_deleteroute() and in6_rtqkill(), during
+ * which the routing lock (rnh_lock) is held and thus protects the variable.
+ */
+static int in6dynroutes;
+
/*
* Do what we need to do when inserting a route.
*/
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)rt_key(rt);
struct radix_node *ret;
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+ RT_LOCK_ASSERT_HELD(rt);
+
+ /*
+ * If this is a dynamic route (which is created via Redirect) and
+ * we already have the maximum acceptable number of such route entries,
+ * reject creating a new one. We could initiate garbage collection to
+ * make available space right now, but the benefit would probably not
+ * be worth the cleaning overhead; we only have to endure a slightly
+ * suboptimal path even without the redirecbted route.
+ */
+ if ((rt->rt_flags & RTF_DYNAMIC) != 0 &&
+ ip6_maxdynroutes >= 0 && in6dynroutes >= ip6_maxdynroutes)
+ return (NULL);
+
/*
* For IPv6, all unicast non-host routes are automatically cloning.
*/
rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
ret = rn_addroute(v_arg, n_arg, head, treenodes);
- if (ret == NULL && rt->rt_flags & RTF_HOST) {
+ if (ret == NULL && (rt->rt_flags & RTF_HOST)) {
struct rtentry *rt2;
/*
* We are trying to add a host route, but can't.
* Find out if it is because of an
* ARP entry and delete it if so.
*/
- rt2 = rtalloc1((struct sockaddr *)sin6, 0,
+ rt2 = rtalloc1_locked((struct sockaddr *)sin6, 0,
RTF_CLONING | RTF_PRCLONING);
if (rt2) {
- if (rt2->rt_flags & RTF_LLINFO &&
- rt2->rt_flags & RTF_HOST &&
- rt2->rt_gateway &&
- rt2->rt_gateway->sa_family == AF_LINK) {
- rtrequest(RTM_DELETE,
- (struct sockaddr *)rt_key(rt2),
- rt2->rt_gateway,
- rt_mask(rt2), rt2->rt_flags, 0);
+ RT_LOCK(rt2);
+ if ((rt2->rt_flags & RTF_LLINFO) &&
+ (rt2->rt_flags & RTF_HOST) &&
+ rt2->rt_gateway != NULL &&
+ rt2->rt_gateway->sa_family == AF_LINK) {
+ /*
+ * Safe to drop rt_lock and use rt_key,
+ * rt_gateway, since holding rnh_lock here
+ * prevents another thread from calling
+ * rt_setgate() on this route.
+ */
+ RT_UNLOCK(rt2);
+ (void) rtrequest_locked(RTM_DELETE, rt_key(rt2),
+ rt2->rt_gateway, rt_mask(rt2),
+ rt2->rt_flags, 0);
ret = rn_addroute(v_arg, n_arg, head,
treenodes);
+ } else {
+ RT_UNLOCK(rt2);
}
- rtfree(rt2);
+ rtfree_locked(rt2);
}
- } else if (ret == NULL && rt->rt_flags & RTF_CLONING) {
+ } else if (ret == NULL && (rt->rt_flags & RTF_CLONING)) {
struct rtentry *rt2;
/*
* We are trying to add a net route, but can't.
* net route entry, 3ffe:0501:: -> if0.
* This case should not raise an error.
*/
- rt2 = rtalloc1((struct sockaddr *)sin6, 0,
+ rt2 = rtalloc1_locked((struct sockaddr *)sin6, 0,
RTF_CLONING | RTF_PRCLONING);
if (rt2) {
+ RT_LOCK(rt2);
if ((rt2->rt_flags & (RTF_CLONING|RTF_HOST|RTF_GATEWAY))
== RTF_CLONING
&& rt2->rt_gateway
&& rt2->rt_ifp == rt->rt_ifp) {
ret = rt2->rt_nodes;
}
- rtfree(rt2);
+ RT_UNLOCK(rt2);
+ rtfree_locked(rt2);
}
}
+
+ if (ret != NULL && (rt->rt_flags & RTF_DYNAMIC) != 0)
+ in6dynroutes++;
+
return ret;
}
+static struct radix_node *
+in6_deleteroute(void * v_arg, void *netmask_arg, struct radix_node_head *head)
+{
+ struct radix_node *rn;
+
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+
+ rn = rn_delete(v_arg, netmask_arg, head);
+ if (rn != NULL) {
+ struct rtentry *rt = (struct rtentry *)rn;
+ RT_LOCK_SPIN(rt);
+ if ((rt->rt_flags & RTF_DYNAMIC) != 0)
+ in6dynroutes--;
+ RT_UNLOCK(rt);
+ }
+
+ return (rn);
+}
+
+/*
+ * Similar to in6_matroute_args except without the leaf-matching parameters.
+ */
+static struct radix_node *
+in6_matroute(void *v_arg, struct radix_node_head *head)
+{
+ return (in6_matroute_args(v_arg, head, NULL, NULL));
+}
+
/*
* This code is the inverse of in6_clsroute: on first reference, if we
* were managing the route, stop doing so and set the expiration timer
* back off again.
*/
static struct radix_node *
-in6_matroute(void *v_arg, struct radix_node_head *head)
+in6_matroute_args(void *v_arg, struct radix_node_head *head,
+ rn_matchf_t *f, void *w)
{
- struct radix_node *rn = rn_match(v_arg, head);
+ struct radix_node *rn = rn_match_args(v_arg, head, f, w);
struct rtentry *rt = (struct rtentry *)rn;
- if (rt && rt->rt_refcnt == 0) { /* this is first reference */
- if (rt->rt_flags & RTPRF_OURS) {
+ /* This is first reference? */
+ if (rt != NULL) {
+ RT_LOCK_SPIN(rt);
+ if (rt->rt_refcnt == 0 && (rt->rt_flags & RTPRF_OURS)) {
rt->rt_flags &= ~RTPRF_OURS;
rt->rt_rmx.rmx_expire = 0;
}
+ RT_UNLOCK(rt);
}
- return rn;
+ return (rn);
}
SYSCTL_DECL(_net_inet6_ip6);
/* one hour is ``really old'' */
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_RTEXPIRE, rtexpire,
CTLFLAG_RW, &rtq_reallyold , 0, "");
-
+
static int rtq_minreallyold = 10;
/* never automatically crank down to less */
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_RTMINEXPIRE, rtminexpire,
CTLFLAG_RW, &rtq_minreallyold , 0, "");
-
+
static int rtq_toomany = 128;
/* 128 cached routes is ``too many'' */
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_RTMAXCACHE, rtmaxcache,
CTLFLAG_RW, &rtq_toomany , 0, "");
-
+
/*
* On last reference drop, mark the route as belong to us so that it can be
* timed out.
*/
static void
-in6_clsroute(struct radix_node *rn, struct radix_node_head *head)
+in6_clsroute(struct radix_node *rn, __unused struct radix_node_head *head)
{
struct rtentry *rt = (struct rtentry *)rn;
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+ RT_LOCK_ASSERT_HELD(rt);
+
if (!(rt->rt_flags & RTF_UP))
return; /* prophylactic measures */
if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST)
return;
- if ((rt->rt_flags & (RTF_WASCLONED | RTPRF_OURS))
- != RTF_WASCLONED)
+ if (rt->rt_flags & RTPRF_OURS)
+ return;
+
+ if (!(rt->rt_flags & (RTF_WASCLONED | RTF_DYNAMIC)))
return;
/*
- * As requested by David Greenman:
- * If rtq_reallyold is 0, just delete the route without
- * waiting for a timeout cycle to kill it.
+ * Delete the route immediately if RTF_DELCLONE is set or
+ * if route caching is disabled (rtq_reallyold set to 0).
+ * Otherwise, let it expire and be deleted by in6_rtqkill().
*/
- if (rtq_reallyold != 0) {
- rt->rt_flags |= RTPRF_OURS;
- rt->rt_rmx.rmx_expire = time_second + rtq_reallyold;
+ if ((rt->rt_flags & RTF_DELCLONE) || rtq_reallyold == 0) {
+ /*
+ * Delete the route from the radix tree but since we are
+ * called when the route's reference count is 0, don't
+ * deallocate it until we return from this routine by
+ * telling rtrequest that we're interested in it.
+ * Safe to drop rt_lock and use rt_key, rt_gateway,
+ * since holding rnh_lock here prevents another thread
+ * from calling rt_setgate() on this route.
+ */
+ RT_UNLOCK(rt);
+ if (rtrequest_locked(RTM_DELETE, rt_key(rt),
+ rt->rt_gateway, rt_mask(rt), rt->rt_flags, &rt) == 0) {
+ /* Now let the caller free it */
+ RT_LOCK(rt);
+ RT_REMREF_LOCKED(rt);
+ } else {
+ RT_LOCK(rt);
+ }
} else {
- rtrequest(RTM_DELETE,
- (struct sockaddr *)rt_key(rt),
- rt->rt_gateway, rt_mask(rt),
- rt->rt_flags, 0);
+ struct timeval timenow;
+
+ getmicrotime(&timenow);
+ rt->rt_flags |= RTPRF_OURS;
+ rt->rt_rmx.rmx_expire =
+ rt_expiry(rt, timenow.tv_sec, rtq_reallyold);
}
}
/*
* Get rid of old routes. When draining, this deletes everything, even when
- * the timeout is not expired yet. When updating, this makes sure that
- * nothing has a timeout longer than the current value of rtq_reallyold.
+ * the timeout is not expired yet. This also applies if the route is dynamic
+ * and there are sufficiently large number of such routes (more than a half of
+ * maximum). When updating, this makes sure that nothing has a timeout longer
+ * than the current value of rtq_reallyold.
*/
static int
in6_rtqkill(struct radix_node *rn, void *rock)
struct rtqk_arg *ap = rock;
struct rtentry *rt = (struct rtentry *)rn;
int err;
+ struct timeval timenow;
+ getmicrotime(&timenow);
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+
+ RT_LOCK(rt);
if (rt->rt_flags & RTPRF_OURS) {
ap->found++;
- if (ap->draining || rt->rt_rmx.rmx_expire <= time_second) {
+ if (ap->draining || rt->rt_rmx.rmx_expire <= timenow.tv_sec ||
+ ((rt->rt_flags & RTF_DYNAMIC) != 0 &&
+ ip6_maxdynroutes >= 0 &&
+ in6dynroutes > ip6_maxdynroutes / 2)) {
if (rt->rt_refcnt > 0)
panic("rtqkill route really not free");
- err = rtrequest(RTM_DELETE,
- (struct sockaddr *)rt_key(rt),
- rt->rt_gateway, rt_mask(rt),
- rt->rt_flags, 0);
+ /*
+ * Delete this route since we're done with it;
+ * the route may be freed afterwards, so we
+ * can no longer refer to 'rt' upon returning
+ * from rtrequest(). Safe to drop rt_lock and
+ * use rt_key, rt_gateway, since holding rnh_lock
+ * here prevents another thread from calling
+ * rt_setgate() on this route.
+ */
+ RT_UNLOCK(rt);
+ err = rtrequest_locked(RTM_DELETE, rt_key(rt),
+ rt->rt_gateway, rt_mask(rt), rt->rt_flags, 0);
if (err) {
log(LOG_WARNING, "in6_rtqkill: error %d", err);
} else {
ap->killed++;
}
} else {
- if (ap->updating
- && (rt->rt_rmx.rmx_expire - time_second
- > rtq_reallyold)) {
- rt->rt_rmx.rmx_expire = time_second
- + rtq_reallyold;
+ if (ap->updating &&
+ (unsigned)(rt->rt_rmx.rmx_expire - timenow.tv_sec) >
+ rt_expiry(rt, 0, rtq_reallyold)) {
+ rt->rt_rmx.rmx_expire = rt_expiry(rt,
+ timenow.tv_sec, rtq_reallyold);
}
ap->nextstop = lmin(ap->nextstop,
rt->rt_rmx.rmx_expire);
+ RT_UNLOCK(rt);
}
+ } else {
+ RT_UNLOCK(rt);
}
return 0;
#define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */
static int rtq_timeout = RTQ_TIMEOUT;
-static void
-in6_rtqtimo_funneled(void *rock)
-{
-#ifdef __APPLE__
- boolean_t funnel_state;
- funnel_state = thread_funnel_set(network_flock, TRUE);
- in6_rtqtimo(rock);
-#endif
-#ifdef __APPLE__
- (void) thread_funnel_set(network_flock, FALSE);
-#endif
-}
-
static void
in6_rtqtimo(void *rock)
{
struct rtqk_arg arg;
struct timeval atv;
static time_t last_adjusted_timeout = 0;
- int s;
+ struct timeval timenow;
+
+ lck_mtx_lock(rnh_lock);
+ /* Get the timestamp after we acquire the lock for better accuracy */
+ getmicrotime(&timenow);
arg.found = arg.killed = 0;
arg.rnh = rnh;
- arg.nextstop = time_second + rtq_timeout;
+ arg.nextstop = timenow.tv_sec + rtq_timeout;
arg.draining = arg.updating = 0;
- s = splnet();
rnh->rnh_walktree(rnh, in6_rtqkill, &arg);
- splx(s);
/*
* Attempt to be somewhat dynamic about this:
* hard.
*/
if ((arg.found - arg.killed > rtq_toomany)
- && (time_second - last_adjusted_timeout >= rtq_timeout)
+ && (timenow.tv_sec - last_adjusted_timeout >= rtq_timeout)
&& rtq_reallyold > rtq_minreallyold) {
rtq_reallyold = 2*rtq_reallyold / 3;
if (rtq_reallyold < rtq_minreallyold) {
rtq_reallyold = rtq_minreallyold;
}
- last_adjusted_timeout = time_second;
+ last_adjusted_timeout = timenow.tv_sec;
#if DIAGNOSTIC
log(LOG_DEBUG, "in6_rtqtimo: adjusted rtq_reallyold to %d",
rtq_reallyold);
#endif
arg.found = arg.killed = 0;
arg.updating = 1;
- s = splnet();
rnh->rnh_walktree(rnh, in6_rtqkill, &arg);
- splx(s);
}
atv.tv_usec = 0;
- atv.tv_sec = arg.nextstop - time_second;
- timeout(in6_rtqtimo_funneled, rock, tvtohz(&atv));
+ atv.tv_sec = arg.nextstop - timenow.tv_sec;
+ lck_mtx_unlock(rnh_lock);
+ timeout(in6_rtqtimo, rock, tvtohz(&atv));
}
/*
{
struct rtentry *rt = (struct rtentry *)rn;
struct mtuex_arg *ap = rock;
+ struct timeval timenow;
+
+ getmicrotime(&timenow);
/* sanity */
if (!rt)
panic("rt == NULL in in6_mtuexpire");
+ RT_LOCK(rt);
if (rt->rt_rmx.rmx_expire && !(rt->rt_flags & RTF_PROBEMTU)) {
- if (rt->rt_rmx.rmx_expire <= time_second) {
+ if (rt->rt_rmx.rmx_expire <= timenow.tv_sec) {
rt->rt_flags |= RTF_PROBEMTU;
} else {
ap->nextstop = lmin(ap->nextstop,
rt->rt_rmx.rmx_expire);
}
}
+ RT_UNLOCK(rt);
return 0;
}
#define MTUTIMO_DEFAULT (60*1)
-static void
-in6_mtutimo_funneled(void *rock)
-{
-#ifdef __APPLE__
- boolean_t funnel_state;
- funnel_state = thread_funnel_set(network_flock, TRUE);
- in6_mtutimo(rock);
-#endif
-#ifdef __APPLE__
- (void) thread_funnel_set(network_flock, FALSE);
-#endif
-}
-
static void
in6_mtutimo(void *rock)
{
struct radix_node_head *rnh = rock;
struct mtuex_arg arg;
struct timeval atv;
- int s;
+ struct timeval timenow;
+
+ getmicrotime(&timenow);
arg.rnh = rnh;
- arg.nextstop = time_second + MTUTIMO_DEFAULT;
- s = splnet();
+ arg.nextstop = timenow.tv_sec + MTUTIMO_DEFAULT;
+ lck_mtx_lock(rnh_lock);
rnh->rnh_walktree(rnh, in6_mtuexpire, &arg);
- splx(s);
atv.tv_usec = 0;
atv.tv_sec = arg.nextstop;
- if (atv.tv_sec < time_second) {
+ if (atv.tv_sec < timenow.tv_sec) {
#if DIAGNOSTIC
log(LOG_DEBUG, "IPv6: invalid mtu expiration time on routing table\n");
#endif
- arg.nextstop = time_second + 30; /*last resort*/
+ arg.nextstop = timenow.tv_sec + 30; /*last resort*/
}
- atv.tv_sec -= time_second;
- timeout(in6_mtutimo_funneled, rock, tvtohz(&atv));
+ atv.tv_sec -= timenow.tv_sec;
+ lck_mtx_unlock(rnh_lock);
+ timeout(in6_mtutimo, rock, tvtohz(&atv));
}
-#if 0
void
in6_rtqdrain()
{
struct radix_node_head *rnh = rt_tables[AF_INET6];
struct rtqk_arg arg;
- int s;
arg.found = arg.killed = 0;
arg.rnh = rnh;
arg.nextstop = 0;
arg.draining = 1;
arg.updating = 0;
- s = splnet();
+ lck_mtx_lock(rnh_lock);
rnh->rnh_walktree(rnh, in6_rtqkill, &arg);
- splx(s);
+ lck_mtx_unlock(rnh_lock);
}
-#endif
/*
* Initialize our routing tree.
rnh = *head;
rnh->rnh_addaddr = in6_addroute;
+ rnh->rnh_deladdr = in6_deleteroute;
rnh->rnh_matchaddr = in6_matroute;
+ rnh->rnh_matchaddr_args = in6_matroute_args;
rnh->rnh_close = in6_clsroute;
in6_rtqtimo(rnh); /* kick off timeout first time */
in6_mtutimo(rnh); /* kick off timeout first time */