#include <pexpert/pexpert.h>
+/*
+ * Synchronization notes:
+ *
+ * Routing entries fall under two locking domains: the global routing table
+ * lock (rnh_lock) and the per-entry lock (rt_lock); the latter is a mutex that
+ * resides (statically defined) in the rtentry structure.
+ *
+ * The locking domains for routing are defined as follows:
+ *
+ * The global routing lock is used to serialize all accesses to the radix
+ * trees defined by rt_tables[], as well as the tree of masks. This includes
+ * lookups, insertions and removals of nodes to/from the respective tree.
+ * It is also used to protect certain fields in the route entry that aren't
+ * often modified and/or require global serialization (more details below.)
+ *
+ * The per-route entry lock is used to serialize accesses to several routing
+ * entry fields (more details below.) Acquiring and releasing this lock is
+ * done via RT_LOCK() and RT_UNLOCK() routines.
+ *
+ * In cases where both rnh_lock and rt_lock must be held, the former must be
+ * acquired first in order to maintain lock ordering. It is not a requirement
+ * that rnh_lock be acquired first before rt_lock, but in case both must be
+ * acquired in succession, the correct lock ordering must be followed.
+ *
+ * The fields of the rtentry structure are protected in the following way:
+ *
+ * rt_nodes[]
+ *
+ * - Routing table lock (rnh_lock).
+ *
+ * rt_parent, rt_mask, rt_llinfo_free
+ *
+ * - Set once during creation and never changes; no locks to read.
+ *
+ * rt_flags, rt_genmask, rt_llinfo, rt_rmx, rt_refcnt, rt_gwroute
+ *
+ * - Routing entry lock (rt_lock) for read/write access.
+ *
+ * - Some values of rt_flags are either set once at creation time,
+ * or aren't currently used, and thus checking against them can
+ * be done without rt_lock: RTF_GATEWAY, RTF_HOST, RTF_DYNAMIC,
+ * RTF_DONE, RTF_XRESOLVE, RTF_STATIC, RTF_BLACKHOLE, RTF_ANNOUNCE,
+ * RTF_USETRAILERS, RTF_WASCLONED, RTF_PINNED, RTF_LOCAL,
+ * RTF_BROADCAST, RTF_MULTICAST, RTF_IFSCOPE.
+ *
+ * rt_key, rt_gateway, rt_ifp, rt_ifa
+ *
+ * - Always written/modified with both rnh_lock and rt_lock held.
+ *
+ * - May be read freely with rnh_lock held, else must hold rt_lock
+ * for read access; holding both locks for read is also okay.
+ *
+ * - In the event rnh_lock is not acquired, or is not possible to be
+ * acquired across the operation, setting RTF_CONDEMNED on a route
+ * entry will prevent its rt_key, rt_gateway, rt_ifp and rt_ifa
+ * from being modified. This is typically done on a route that
+ * has been chosen for a removal (from the tree) prior to dropping
+ * the rt_lock, so that those values will remain the same until
+ * the route is freed.
+ *
+ * When rnh_lock is held rt_setgate(), rt_setif(), and rtsetifa() are
+ * single-threaded, thus exclusive. This flag will also prevent the
+ * route from being looked up via rt_lookup().
+ *
+ * generation_id
+ *
+ * - Assumes that 32-bit writes are atomic; no locks.
+ *
+ * rt_dlt, rt_output
+ *
+ * - Currently unused; no locks.
+ *
+ * Operations on a route entry can be described as follows:
+ *
+ * CREATE an entry with reference count set to 0 as part of RTM_ADD/RESOLVE.
+ *
+ * INSERTION of an entry into the radix tree holds the rnh_lock, checks
+ * for duplicates and then adds the entry. rtrequest returns the entry
+ * after bumping up the reference count to 1 (for the caller).
+ *
+ * LOOKUP of an entry holds the rnh_lock and bumps up the reference count
+ * before returning; it is valid to also bump up the reference count using
+ * RT_ADDREF after the lookup has returned an entry.
+ *
+ * REMOVAL of an entry from the radix tree holds the rnh_lock, removes the
+ * entry but does not decrement the reference count. Removal happens when
+ * the route is explicitly deleted (RTM_DELETE) or when it is in the cached
+ * state and it expires. The route is said to be "down" when it is no
+ * longer present in the tree. Freeing the entry will happen on the last
+ * reference release of such a "down" route.
+ *
+ * RT_ADDREF/RT_REMREF operates on the routing entry which increments/
+ * decrements the reference count, rt_refcnt, atomically on the rtentry.
+ * rt_refcnt is modified only using this routine. The general rule is to
+ * do RT_ADDREF in the function that is passing the entry as an argument,
+ * in order to prevent the entry from being freed by the callee.
+ */
+
#define equal(a1, a2) (bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0)
#define SA(p) ((struct sockaddr *)(p))
__private_extern__ struct rtstat rtstat = { 0, 0, 0, 0, 0 };
struct radix_node_head *rt_tables[AF_MAX+1];
-lck_mtx_t *rt_mtx; /*### global routing tables mutex for now */
-lck_attr_t *rt_mtx_attr;
-lck_grp_t *rt_mtx_grp;
-lck_grp_attr_t *rt_mtx_grp_attr;
+lck_mtx_t *rnh_lock; /* global routing tables mutex */
+static lck_attr_t *rnh_lock_attr;
+static lck_grp_t *rnh_lock_grp;
+static lck_grp_attr_t *rnh_lock_grp_attr;
+
+/* Lock group and attribute for routing entry locks */
+static lck_attr_t *rte_mtx_attr;
+static lck_grp_t *rte_mtx_grp;
+static lck_grp_attr_t *rte_mtx_grp_attr;
lck_mtx_t *route_domain_mtx; /*### global routing tables mutex for now */
int rttrash = 0; /* routes not in table but not freed */
-static unsigned int rte_debug;
+unsigned int rte_debug;
/* Possible flags for rte_debug */
#define RTD_DEBUG 0x1 /* enable or disable rtentry debug facility */
-#define RTD_TRACE 0x2 /* trace alloc, free and refcnt */
+#define RTD_TRACE 0x2 /* trace alloc, free, refcnt and lock */
#define RTD_NO_FREE 0x4 /* don't free (good to catch corruptions) */
+#define RTE_NAME "rtentry" /* name for zone and rt_lock */
+
static struct zone *rte_zone; /* special zone for rtentry */
#define RTE_ZONE_MAX 65536 /* maximum elements in zone */
-#define RTE_ZONE_NAME "rtentry" /* name of rtentry zone */
+#define RTE_ZONE_NAME RTE_NAME /* name of rtentry zone */
#define RTD_INUSE 0xFEEDFACE /* entry is in use */
#define RTD_FREED 0xDEADBEEF /* entry is freed */
-#define RTD_TRSTACK_SIZE 8 /* depth of stack trace */
-#define RTD_REFHIST_SIZE 4 /* refcnt history size */
+/* For gdb */
+__private_extern__ unsigned int ctrace_stack_size = CTRACE_STACK_SIZE;
+__private_extern__ unsigned int ctrace_hist_size = CTRACE_HIST_SIZE;
/*
* Debug variant of rtentry structure.
struct rtentry_dbg {
struct rtentry rtd_entry; /* rtentry */
struct rtentry rtd_entry_saved; /* saved rtentry */
- u_int32_t rtd_inuse; /* in use pattern */
- u_int16_t rtd_refhold_cnt; /* # of rtref */
- u_int16_t rtd_refrele_cnt; /* # of rtunref */
+ uint32_t rtd_inuse; /* in use pattern */
+ uint16_t rtd_refhold_cnt; /* # of rtref */
+ uint16_t rtd_refrele_cnt; /* # of rtunref */
+ uint32_t rtd_lock_cnt; /* # of locks */
+ uint32_t rtd_unlock_cnt; /* # of unlocks */
/*
- * Thread and PC stack trace up to RTD_TRSTACK_SIZE
- * deep during alloc and free.
+ * Alloc and free callers.
*/
- struct thread *rtd_alloc_thread;
- void *rtd_alloc_stk_pc[RTD_TRSTACK_SIZE];
- struct thread *rtd_free_thread;
- void *rtd_free_stk_pc[RTD_TRSTACK_SIZE];
+ ctrace_t rtd_alloc;
+ ctrace_t rtd_free;
/*
* Circular lists of rtref and rtunref callers.
*/
- u_int16_t rtd_refhold_next;
- u_int16_t rtd_refrele_next;
- struct {
- struct thread *th;
- void *pc[RTD_TRSTACK_SIZE];
- } rtd_refhold[RTD_REFHIST_SIZE];
- struct {
- struct thread *th;
- void *pc[RTD_TRSTACK_SIZE];
- } rtd_refrele[RTD_REFHIST_SIZE];
+ ctrace_t rtd_refhold[CTRACE_HIST_SIZE];
+ ctrace_t rtd_refrele[CTRACE_HIST_SIZE];
+ /*
+ * Circular lists of locks and unlocks.
+ */
+ ctrace_t rtd_lock[CTRACE_HIST_SIZE];
+ ctrace_t rtd_unlock[CTRACE_HIST_SIZE];
/*
* Trash list linkage
*/
TAILQ_ENTRY(rtentry_dbg) rtd_trash_link;
};
-/* List of trash route entries protected by rt_mtx */
+#define atomic_add_16_ov(a, n) \
+ ((uint16_t) OSAddAtomic16(n, (volatile SInt16 *)a))
+#define atomic_add_32_ov(a, n) \
+ ((uint32_t) OSAddAtomic(n, a))
+
+/* List of trash route entries protected by rnh_lock */
static TAILQ_HEAD(, rtentry_dbg) rttrash_head;
+static void rte_lock_init(struct rtentry *);
+static void rte_lock_destroy(struct rtentry *);
static inline struct rtentry *rte_alloc_debug(void);
static inline void rte_free_debug(struct rtentry *);
+static inline void rte_lock_debug(struct rtentry_dbg *);
+static inline void rte_unlock_debug(struct rtentry_dbg *);
static void rt_maskedcopy(struct sockaddr *,
struct sockaddr *, struct sockaddr *);
static void rtable_init(void **);
static inline void rtref_audit(struct rtentry_dbg *);
static inline void rtunref_audit(struct rtentry_dbg *);
-static struct rtentry *rtalloc1_common_locked(struct sockaddr *, int, u_long,
+static struct rtentry *rtalloc1_common_locked(struct sockaddr *, int, uint32_t,
unsigned int);
static int rtrequest_common_locked(int, struct sockaddr *,
struct sockaddr *, struct sockaddr *, int, struct rtentry **,
unsigned int);
-static void rtalloc_ign_common_locked(struct route *, u_long, unsigned int);
+static void rtalloc_ign_common_locked(struct route *, uint32_t, unsigned int);
static inline void sa_set_ifscope(struct sockaddr *, unsigned int);
static struct sockaddr *sin_copy(struct sockaddr_in *, struct sockaddr_in *,
unsigned int);
static struct sockaddr *mask_copy(struct sockaddr *, struct sockaddr_in *,
unsigned int);
+static struct sockaddr *sa_trim(struct sockaddr *, int);
static struct radix_node *node_lookup(struct sockaddr *, struct sockaddr *,
unsigned int);
static struct radix_node *node_lookup_default(void);
static int rn_match_ifscope(struct radix_node *, void *);
static struct ifaddr *ifa_ifwithroute_common_locked(int,
const struct sockaddr *, const struct sockaddr *, unsigned int);
+static struct rtentry *rte_alloc(void);
+static void rte_free(struct rtentry *);
+static void rtfree_common(struct rtentry *, boolean_t);
-__private_extern__ u_long route_generation = 0;
-extern int use_routegenid;
+uint32_t route_generation = 0;
/*
* sockaddr_in with embedded interface scope; this is used internally
/*
* Interface index (scope) of the primary interface; determined at
* the time when the default, non-scoped route gets added, changed
- * or deleted. Protected by rt_mtx.
+ * or deleted. Protected by rnh_lock.
*/
static unsigned int primary_ifscope = IFSCOPE_NONE;
}
/*
- * Set the ifscope of the primary interface; caller holds rt_mtx.
+ * Set the ifscope of the primary interface; caller holds rnh_lock.
*/
void
set_primary_ifscope(unsigned int ifscope)
}
/*
- * Return the ifscope of the primary interface; caller holds rt_mtx.
+ * Return the ifscope of the primary interface; caller holds rnh_lock.
*/
unsigned int
get_primary_ifscope(void)
return (SA(dst));
}
+/*
+ * Trim trailing zeroes on a sockaddr and update its length.
+ */
+static struct sockaddr *
+sa_trim(struct sockaddr *sa, int skip)
+{
+ caddr_t cp, base = (caddr_t)sa + skip;
+
+ if (sa->sa_len <= skip)
+ return (sa);
+
+ for (cp = base + (sa->sa_len - skip); cp > base && cp[-1] == 0;)
+ cp--;
+
+ sa->sa_len = (cp - base) + skip;
+ if (sa->sa_len < skip) {
+ /* Must not happen, and if so, panic */
+ panic("%s: broken logic (sa_len %d < skip %d )", __func__,
+ sa->sa_len, skip);
+ /* NOTREACHED */
+ } else if (sa->sa_len == skip) {
+ /* If we end up with all zeroes, then there's no mask */
+ sa->sa_len = 0;
+ }
+
+ return (sa);
+}
+
+/*
+ * Called by rtm_msg{1,2} routines to "scrub" the embedded interface scope
+ * away from the socket address structure, so that clients of the routing
+ * socket will not be confused by the presence of the embedded scope, or the
+ * side effect of the increased length due to that. The source sockaddr is
+ * not modified; instead, the scrubbing happens on the destination sockaddr
+ * storage that is passed in by the caller.
+ */
+struct sockaddr *
+rtm_scrub_ifscope(int idx, struct sockaddr *hint, struct sockaddr *sa,
+ struct sockaddr_storage *ss)
+{
+ struct sockaddr *ret = sa;
+
+ switch (idx) {
+ case RTAX_DST:
+ /*
+ * If this is for an AF_INET destination address, call
+ * sin_copy() with IFSCOPE_NONE as it does what we need.
+ */
+ if (sa->sa_family == AF_INET &&
+ SINIFSCOPE(sa)->sin_ifscope != IFSCOPE_NONE) {
+ bzero(ss, sizeof (*ss));
+ ret = sin_copy(SIN(sa), SIN(ss), IFSCOPE_NONE);
+ }
+ break;
+
+ case RTAX_NETMASK: {
+ /*
+ * If this is for a mask, we can't tell whether or not
+ * there is an embedded interface scope, as the span of
+ * bytes between sa_len and the beginning of the mask
+ * (offset of sin_addr in the case of AF_INET) may be
+ * filled with all-ones by rn_addmask(), and hence we
+ * cannot rely on sa_family. Because of this, we use
+ * the sa_family of the hint sockaddr (RTAX_{DST,IFA})
+ * as indicator as to whether or not the mask is to be
+ * treated as one for AF_INET. Clearing the embedded
+ * scope involves setting it to IFSCOPE_NONE followed
+ * by calling sa_trim() to trim trailing zeroes from
+ * the storage sockaddr, which reverses what was done
+ * earlier by mask_copy() on the source sockaddr.
+ */
+ int skip = offsetof(struct sockaddr_in, sin_addr);
+ if (sa->sa_len > skip && sa->sa_len <= sizeof (*ss) &&
+ hint != NULL && hint->sa_family == AF_INET) {
+ bzero(ss, sizeof (*ss));
+ bcopy(sa, ss, sa->sa_len);
+ SINIFSCOPE(ss)->sin_ifscope = IFSCOPE_NONE;
+ ret = sa_trim(SA(ss), skip);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ return (ret);
+}
+
/*
* Callback leaf-matching routine for rn_matchaddr_args used
* for looking up an exact match for a scoped route entry.
if (rte_debug != 0)
rte_debug |= RTD_DEBUG;
- rt_mtx_grp_attr = lck_grp_attr_alloc_init();
-
- rt_mtx_grp = lck_grp_alloc_init("route", rt_mtx_grp_attr);
-
- rt_mtx_attr = lck_attr_alloc_init();
-
- if ((rt_mtx = lck_mtx_alloc_init(rt_mtx_grp, rt_mtx_attr)) == NULL) {
- printf("route_init: can't alloc rt_mtx\n");
+ rnh_lock_grp_attr = lck_grp_attr_alloc_init();
+ rnh_lock_grp = lck_grp_alloc_init("route", rnh_lock_grp_attr);
+ rnh_lock_attr = lck_attr_alloc_init();
+ if ((rnh_lock = lck_mtx_alloc_init(rnh_lock_grp,
+ rnh_lock_attr)) == NULL) {
+ printf("route_init: can't alloc rnh_lock\n");
return;
}
- lck_mtx_lock(rt_mtx);
+ rte_mtx_grp_attr = lck_grp_attr_alloc_init();
+ rte_mtx_grp = lck_grp_alloc_init(RTE_NAME, rte_mtx_grp_attr);
+ rte_mtx_attr = lck_attr_alloc_init();
+
+ lck_mtx_lock(rnh_lock);
rn_init(); /* initialize all zeroes, all ones, mask table */
- lck_mtx_unlock(rt_mtx);
+ lck_mtx_unlock(rnh_lock);
rtable_init((void **)rt_tables);
route_domain_mtx = routedomain.dom_mtx;
TAILQ_INIT(&rttrash_head);
}
+/*
+ * Atomically increment route generation counter
+ */
+void
+routegenid_update(void)
+{
+ (void) atomic_add_32_ov(&route_generation, 1);
+}
+
/*
* Packet routing routines.
*/
void
rtalloc(struct route *ro)
{
- rtalloc_ign(ro, 0UL);
+ rtalloc_ign(ro, 0);
}
void
-rtalloc_ign_locked(struct route *ro, u_long ignore)
+rtalloc_ign_locked(struct route *ro, uint32_t ignore)
{
return (rtalloc_ign_common_locked(ro, ignore, IFSCOPE_NONE));
}
void
-rtalloc_scoped_ign_locked(struct route *ro, u_long ignore, unsigned int ifscope)
+rtalloc_scoped_ign_locked(struct route *ro, uint32_t ignore,
+ unsigned int ifscope)
{
return (rtalloc_ign_common_locked(ro, ignore, ifscope));
}
static void
-rtalloc_ign_common_locked(struct route *ro, u_long ignore,
+rtalloc_ign_common_locked(struct route *ro, uint32_t ignore,
unsigned int ifscope)
{
struct rtentry *rt;
if ((rt = ro->ro_rt) != NULL) {
- if (rt->rt_ifp != NULL && rt->rt_flags & RTF_UP)
+ RT_LOCK_SPIN(rt);
+ if (rt->rt_ifp != NULL && (rt->rt_flags & RTF_UP) &&
+ rt->generation_id == route_generation) {
+ RT_UNLOCK(rt);
return;
+ }
+ RT_UNLOCK(rt);
rtfree_locked(rt);
ro->ro_rt = NULL;
}
ro->ro_rt = rtalloc1_common_locked(&ro->ro_dst, 1, ignore, ifscope);
- if (ro->ro_rt)
+ if (ro->ro_rt != NULL) {
ro->ro_rt->generation_id = route_generation;
+ RT_LOCK_ASSERT_NOTHELD(ro->ro_rt);
+ }
}
+
void
-rtalloc_ign(struct route *ro, u_long ignore)
+rtalloc_ign(struct route *ro, uint32_t ignore)
{
- lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_NOTOWNED);
- lck_mtx_lock(rt_mtx);
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
+ lck_mtx_lock(rnh_lock);
rtalloc_ign_locked(ro, ignore);
- lck_mtx_unlock(rt_mtx);
+ lck_mtx_unlock(rnh_lock);
+}
+
+void
+rtalloc_scoped_ign(struct route *ro, uint32_t ignore, unsigned int ifscope)
+{
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
+ lck_mtx_lock(rnh_lock);
+ rtalloc_scoped_ign_locked(ro, ignore, ifscope);
+ lck_mtx_unlock(rnh_lock);
}
struct rtentry *
-rtalloc1_locked(struct sockaddr *dst, int report, u_long ignflags)
+rtalloc1_locked(struct sockaddr *dst, int report, uint32_t ignflags)
{
return (rtalloc1_common_locked(dst, report, ignflags, IFSCOPE_NONE));
}
struct rtentry *
-rtalloc1_scoped_locked(struct sockaddr *dst, int report, u_long ignflags,
+rtalloc1_scoped_locked(struct sockaddr *dst, int report, uint32_t ignflags,
unsigned int ifscope)
{
return (rtalloc1_common_locked(dst, report, ignflags, ifscope));
* Or, at least try.. Create a cloned route if needed.
*/
static struct rtentry *
-rtalloc1_common_locked(struct sockaddr *dst, int report, u_long ignflags,
+rtalloc1_common_locked(struct sockaddr *dst, int report, uint32_t ignflags,
unsigned int ifscope)
{
struct radix_node_head *rnh = rt_tables[dst->sa_family];
struct rtentry *rt, *newrt = NULL;
struct rt_addrinfo info;
- u_long nflags;
+ uint32_t nflags;
int err = 0, msgtype = RTM_MISS;
if (rnh == NULL)
if (rt == NULL)
goto unreachable;
+ RT_LOCK_SPIN(rt);
newrt = rt;
nflags = rt->rt_flags & ~ignflags;
+ RT_UNLOCK(rt);
if (report && (nflags & (RTF_CLONING | RTF_PRCLONING))) {
/*
* We are apparently adding (report = 0 in delete).
}
struct rtentry *
-rtalloc1(struct sockaddr *dst, int report, u_long ignflags)
+rtalloc1(struct sockaddr *dst, int report, uint32_t ignflags)
{
struct rtentry * entry;
- lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_NOTOWNED);
- lck_mtx_lock(rt_mtx);
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
+ lck_mtx_lock(rnh_lock);
entry = rtalloc1_locked(dst, report, ignflags);
- lck_mtx_unlock(rt_mtx);
+ lck_mtx_unlock(rnh_lock);
+ return (entry);
+}
+
+struct rtentry *
+rtalloc1_scoped(struct sockaddr *dst, int report, uint32_t ignflags,
+ unsigned int ifscope)
+{
+ struct rtentry * entry;
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
+ lck_mtx_lock(rnh_lock);
+ entry = rtalloc1_scoped_locked(dst, report, ignflags, ifscope);
+ lck_mtx_unlock(rnh_lock);
return (entry);
}
void
rtfree_locked(struct rtentry *rt)
{
- /*
- * find the tree for that address family
- * Note: in the case of igmp packets, there might not be an rnh
- */
- struct radix_node_head *rnh;
+ rtfree_common(rt, TRUE);
+}
- lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_OWNED);
+static void
+rtfree_common(struct rtentry *rt, boolean_t locked)
+{
+ struct radix_node_head *rnh;
- /* See 3582620 - We hit this during the transition from funnels to locks */
- if (rt == 0) {
- printf("rtfree - rt is NULL\n");
+ /*
+ * Atomically decrement the reference count and if it reaches 0,
+ * and there is a close function defined, call the close function.
+ */
+ RT_LOCK_SPIN(rt);
+ if (rtunref(rt) > 0) {
+ RT_UNLOCK(rt);
return;
}
-
- rnh = rt_tables[rt_key(rt)->sa_family];
/*
- * decrement the reference count by one and if it reaches 0,
- * and there is a close function defined, call the close function
+ * To avoid violating lock ordering, we must drop rt_lock before
+ * trying to acquire the global rnh_lock. If we are called with
+ * rnh_lock held, then we already have exclusive access; otherwise
+ * we do the lock dance.
*/
- rtunref(rt);
- if (rt->rt_refcnt > 0)
- return;
+ if (!locked) {
+ /*
+ * Note that we check it again below after grabbing rnh_lock,
+ * since it is possible that another thread doing a lookup wins
+ * the race, grabs the rnh_lock first, and bumps up the reference
+ * count in which case the route should be left alone as it is
+ * still in use. It's also possible that another thread frees
+ * the route after we drop rt_lock; to prevent the route from
+ * being freed, we hold an extra reference.
+ */
+ RT_ADDREF_LOCKED(rt);
+ RT_UNLOCK(rt);
+ lck_mtx_lock(rnh_lock);
+ RT_LOCK_SPIN(rt);
+ RT_REMREF_LOCKED(rt);
+ if (rt->rt_refcnt > 0) {
+ /* We've lost the race, so abort */
+ RT_UNLOCK(rt);
+ goto done;
+ }
+ }
+
+ /*
+ * We may be blocked on other lock(s) as part of freeing
+ * the entry below, so convert from spin to full mutex.
+ */
+ RT_CONVERT_LOCK(rt);
+
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+
+ /* Negative refcnt must never happen */
+ if (rt->rt_refcnt != 0)
+ panic("rt %p invalid refcnt %d", rt, rt->rt_refcnt);
+
+ /*
+ * find the tree for that address family
+ * Note: in the case of igmp packets, there might not be an rnh
+ */
+ rnh = rt_tables[rt_key(rt)->sa_family];
/*
* On last reference give the "close method" a chance to cleanup
* close routine typically issues RTM_DELETE which clears the RTF_UP
* flag on the entry so that the code below reclaims the storage.
*/
- if (rnh && rnh->rnh_close && rt->rt_refcnt == 0)
+ if (rnh != NULL && rnh->rnh_close != NULL)
rnh->rnh_close((struct radix_node *)rt, rnh);
/*
- * If we are no longer "up" (and ref == 0)
- * then we can free the resources associated
- * with the route.
+ * If we are no longer "up" (and ref == 0) then we can free the
+ * resources associated with the route.
*/
if (!(rt->rt_flags & RTF_UP)) {
if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT))
- panic ("rtfree 2");
+ panic("rt %p freed while in radix tree\n", rt);
/*
* the rtentry must have been removed from the routing table
- * so it is represented in rttrash.. remove that now.
+ * so it is represented in rttrash; remove that now.
*/
- (void) OSDecrementAtomic((SInt32 *)&rttrash);
+ (void) OSDecrementAtomic(&rttrash);
if (rte_debug & RTD_DEBUG) {
TAILQ_REMOVE(&rttrash_head, (struct rtentry_dbg *)rt,
rtd_trash_link);
}
-#ifdef DIAGNOSTIC
- if (rt->rt_refcnt < 0) {
- printf("rtfree: %p not freed (neg refs) cnt=%d\n",
- rt, rt->rt_refcnt);
- return;
- }
-#endif
+ /*
+ * Route is no longer in the tree and refcnt is 0;
+ * we have exclusive access, so destroy it.
+ */
+ RT_UNLOCK(rt);
/*
* release references on items we hold them on..
* e.g other routes and ifaddrs.
*/
- if (rt->rt_parent)
+ if (rt->rt_parent != NULL) {
rtfree_locked(rt->rt_parent);
+ rt->rt_parent = NULL;
+ }
- if(rt->rt_ifa) {
+ if (rt->rt_ifa != NULL) {
ifafree(rt->rt_ifa);
rt->rt_ifa = NULL;
}
/*
- * The key is separatly alloc'd so free it (see rt_setgate()).
+ * Now free any attached link-layer info.
+ */
+ if (rt->rt_llinfo != NULL) {
+ if (rt->rt_llinfo_free != NULL)
+ (*rt->rt_llinfo_free)(rt->rt_llinfo);
+ else
+ R_Free(rt->rt_llinfo);
+ rt->rt_llinfo = NULL;
+ }
+
+ /*
+ * The key is separately alloc'd so free it (see rt_setgate()).
* This also frees the gateway, as they are always malloc'd
* together.
*/
/*
* and the rtentry itself of course
*/
+ rte_lock_destroy(rt);
rte_free(rt);
+ } else {
+ /*
+ * The "close method" has been called, but the route is
+ * still in the radix tree with zero refcnt, i.e. "up"
+ * and in the cached state.
+ */
+ RT_UNLOCK(rt);
}
+done:
+ if (!locked)
+ lck_mtx_unlock(rnh_lock);
}
void
rtfree(struct rtentry *rt)
{
- lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_NOTOWNED);
- lck_mtx_lock(rt_mtx);
- rtfree_locked(rt);
- lck_mtx_unlock(rt_mtx);
+ rtfree_common(rt, FALSE);
}
/*
* the refcount reaches zero. Unless you have really good reason,
* use rtfree not rtunref.
*/
-void
+int
rtunref(struct rtentry *p)
{
- lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_OWNED);
+ RT_LOCK_ASSERT_HELD(p);
- if (p->rt_refcnt <= 0)
- panic("rtunref: bad refcnt %d for rt=%p\n", p->rt_refcnt, p);
+ if (p->rt_refcnt == 0)
+ panic("%s(%p) bad refcnt\n", __func__, p);
+
+ --p->rt_refcnt;
if (rte_debug & RTD_DEBUG)
rtunref_audit((struct rtentry_dbg *)p);
- p->rt_refcnt--;
+ /* Return new value */
+ return (p->rt_refcnt);
}
static inline void
rtunref_audit(struct rtentry_dbg *rte)
{
+ uint16_t idx;
+
if (rte->rtd_inuse != RTD_INUSE)
panic("rtunref: on freed rte=%p\n", rte);
- rte->rtd_refrele_cnt++;
-
- if (rte_debug & RTD_TRACE) {
- rte->rtd_refrele[rte->rtd_refrele_next].th = current_thread();
- bzero(rte->rtd_refrele[rte->rtd_refrele_next].pc,
- sizeof (rte->rtd_refrele[rte->rtd_refrele_next].pc));
- (void) OSBacktrace(rte->rtd_refrele[rte->rtd_refrele_next].pc,
- RTD_TRSTACK_SIZE);
-
- rte->rtd_refrele_next =
- (rte->rtd_refrele_next + 1) % RTD_REFHIST_SIZE;
- }
+ idx = atomic_add_16_ov(&rte->rtd_refrele_cnt, 1) % CTRACE_HIST_SIZE;
+ if (rte_debug & RTD_TRACE)
+ ctrace_record(&rte->rtd_refrele[idx]);
}
/*
void
rtref(struct rtentry *p)
{
- lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_OWNED);
+ RT_LOCK_ASSERT_HELD(p);
- if (p->rt_refcnt < 0)
- panic("rtref: bad refcnt %d for rt=%p\n", p->rt_refcnt, p);
+ if (++p->rt_refcnt == 0)
+ panic("%s(%p) bad refcnt\n", __func__, p);
if (rte_debug & RTD_DEBUG)
rtref_audit((struct rtentry_dbg *)p);
-
- p->rt_refcnt++;
}
static inline void
rtref_audit(struct rtentry_dbg *rte)
{
+ uint16_t idx;
+
if (rte->rtd_inuse != RTD_INUSE)
panic("rtref_audit: on freed rte=%p\n", rte);
- rte->rtd_refhold_cnt++;
-
- if (rte_debug & RTD_TRACE) {
- rte->rtd_refhold[rte->rtd_refhold_next].th = current_thread();
- bzero(rte->rtd_refhold[rte->rtd_refhold_next].pc,
- sizeof (rte->rtd_refhold[rte->rtd_refhold_next].pc));
- (void) OSBacktrace(rte->rtd_refhold[rte->rtd_refhold_next].pc,
- RTD_TRSTACK_SIZE);
-
- rte->rtd_refhold_next =
- (rte->rtd_refhold_next + 1) % RTD_REFHIST_SIZE;
- }
+ idx = atomic_add_16_ov(&rte->rtd_refhold_cnt, 1) % CTRACE_HIST_SIZE;
+ if (rte_debug & RTD_TRACE)
+ ctrace_record(&rte->rtd_refhold[idx]);
}
void
rtsetifa(struct rtentry *rt, struct ifaddr* ifa)
{
- if (rt == NULL)
- panic("rtsetifa");
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+
+ RT_LOCK_ASSERT_HELD(rt);
if (rt->rt_ifa == ifa)
return;
ifaref(rt->rt_ifa);
}
-void
-ifafree(struct ifaddr *ifa)
-{
- int oldval;
-
- if (ifa == NULL)
- panic("ifafree");
-
- oldval = OSAddAtomic(-1, (SInt32 *)&ifa->ifa_refcnt);
-
- if (oldval == 0) {
- if ((ifa->ifa_debug & IFA_ATTACHED) != 0) {
- panic("ifa attached to ifp is being freed\n");
- }
- FREE(ifa, M_IFADDR);
- }
-}
-
-void
-ifaref(struct ifaddr *ifa)
-{
- if (ifa == NULL)
- panic("ifaref");
-
- if (OSAddAtomic(1, (SInt32 *)&ifa->ifa_refcnt) == 0xffffffff)
- panic("ifaref - reference count rolled over!");
-}
-
/*
* Force a routing table entry to the specified
* destination to go through the given gateway.
unsigned int ifscope = (ifp != NULL) ? ifp->if_index : IFSCOPE_NONE;
struct sockaddr_in sin;
- lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_NOTOWNED);
- lck_mtx_lock(rt_mtx);
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
+ lck_mtx_lock(rnh_lock);
/*
* Verify the gateway is directly reachable; if scoped routing
/* Lookup route to the destination (from the original IP header) */
rt = rtalloc1_scoped_locked(dst, 0, RTF_CLONING|RTF_PRCLONING, ifscope);
+ if (rt != NULL)
+ RT_LOCK(rt);
/* Embed scope in src for comparison against rt_gateway below */
if (ip_doscopedroute && src->sa_family == AF_INET)
* we have a routing loop, perhaps as a result of an interface
* going down recently.
*/
- if (!(flags & RTF_DONE) && rt &&
+ if (!(flags & RTF_DONE) && rt != NULL &&
(!equal(src, rt->rt_gateway) || !equal(rt->rt_ifa->ifa_addr,
ifa->ifa_addr))) {
error = EINVAL;
error = EHOSTUNREACH;
}
}
-
+
if (ifa) {
ifafree(ifa);
ifa = NULL;
}
-
- if (error)
+
+ if (error) {
+ if (rt != NULL)
+ RT_UNLOCK(rt);
goto done;
+ }
+
/*
* Create a new entry if we just got back a wildcard entry
* or the the lookup failed. This is necessary for hosts
* which use routing redirects generated by smart gateways
* to dynamically build the routing tables.
*/
- if ((rt == 0) || (rt_mask(rt) && rt_mask(rt)->sa_len < 2))
+ if ((rt == NULL) || (rt_mask(rt) != NULL && rt_mask(rt)->sa_len < 2))
goto create;
/*
* Don't listen to the redirect if it's
* for a route to an interface.
*/
+ RT_LOCK_ASSERT_HELD(rt);
if (rt->rt_flags & RTF_GATEWAY) {
if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) {
/*
* to net; similar to cloned routes, the newly
* created host route is scoped as well.
*/
- create:
+create:
+ if (rt != NULL)
+ RT_UNLOCK(rt);
flags |= RTF_GATEWAY | RTF_DYNAMIC;
error = rtrequest_scoped_locked(RTM_ADD, dst,
gateway, netmask, flags, NULL, ifscope);
* add the key and gateway (in one malloc'd chunk).
*/
error = rt_setgate(rt, rt_key(rt), gateway);
+ RT_UNLOCK(rt);
}
} else {
+ RT_UNLOCK(rt);
error = EHOSTUNREACH;
}
done:
- if (rt) {
+ if (rt != NULL) {
+ RT_LOCK_ASSERT_NOTHELD(rt);
if (rtp && !error)
*rtp = rt;
else
if (stat != NULL)
(*stat)++;
if (use_routegenid)
- route_generation++;
+ routegenid_update();
}
+ lck_mtx_unlock(rnh_lock);
bzero((caddr_t)&info, sizeof(info));
info.rti_info[RTAX_DST] = dst;
info.rti_info[RTAX_GATEWAY] = gateway;
info.rti_info[RTAX_NETMASK] = netmask;
info.rti_info[RTAX_AUTHOR] = src;
rt_missmsg(RTM_REDIRECT, &info, flags, error);
- lck_mtx_unlock(rt_mtx);
}
/*
* Routing table ioctl interface.
*/
int
-rtioctl(int req, caddr_t data, struct proc *p)
+rtioctl(unsigned long req, caddr_t data, struct proc *p)
{
#pragma unused(p)
#if INET && MROUTING
{
struct ifaddr *ifa;
- lck_mtx_lock(rt_mtx);
+ lck_mtx_lock(rnh_lock);
ifa = ifa_ifwithroute_locked(flags, dst, gateway);
- lck_mtx_unlock(rt_mtx);
+ lck_mtx_unlock(rnh_lock);
return (ifa);
}
struct rtentry *rt = NULL;
struct sockaddr_in dst_in, gw_in;
- lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_OWNED);
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
if (ip_doscopedroute) {
/*
if (ifa == NULL) {
/* Workaround to avoid gcc warning regarding const variable */
rt = rtalloc1_scoped_locked((struct sockaddr *)(size_t)dst,
- 0, 0UL, ifscope);
+ 0, 0, ifscope);
if (rt != NULL) {
+ RT_LOCK_SPIN(rt);
ifa = rt->rt_ifa;
if (ifa != NULL)
ifaref(ifa);
- rtunref(rt);
+ RT_REMREF_LOCKED(rt);
+ RT_UNLOCK(rt);
rt = NULL;
}
}
if ((ifa == NULL ||
!equal(ifa->ifa_addr, (struct sockaddr *)(size_t)gateway)) &&
(rt = rtalloc1_scoped_locked((struct sockaddr *)(size_t)gateway,
- 0, 0UL, ifscope)) != NULL) {
+ 0, 0, ifscope)) != NULL) {
if (ifa != NULL)
ifafree(ifa);
+ RT_LOCK_SPIN(rt);
ifa = rt->rt_ifa;
if (ifa != NULL)
ifaref(ifa);
- rtunref(rt);
+ RT_REMREF_LOCKED(rt);
+ RT_UNLOCK(rt);
}
/*
* If an interface scope was specified, the interface index of
return (ifa);
}
-#define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
-
-static int rt_fixdelete __P((struct radix_node *, void *));
-static int rt_fixchange __P((struct radix_node *, void *));
+static int rt_fixdelete(struct radix_node *, void *);
+static int rt_fixchange(struct radix_node *, void *);
struct rtfc_arg {
struct rtentry *rt0;
struct sockaddr_in sin, mask;
#define senderr(x) { error = x ; goto bad; }
- lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_OWNED);
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
/*
* Find the correct routing tree to use for this Address Family
*/
* will decrement the reference via rtfree_locked() and then
* possibly deallocate it.
*/
- rtref(rt);
+ RT_LOCK(rt);
+ RT_ADDREF_LOCKED(rt);
rt->rt_flags &= ~RTF_UP;
+ /*
+ * For consistency, in case the caller didn't set the flag.
+ */
+ rt->rt_flags |= RTF_CONDEMNED;
+
/*
* Now search what's left of the subtree for any cloned
* routes which might have been formed from this node.
*/
if ((rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) &&
rt_mask(rt)) {
+ RT_UNLOCK(rt);
rnh->rnh_walktree_from(rnh, dst, rt_mask(rt),
rt_fixdelete, rt);
+ RT_LOCK(rt);
}
/*
* This might result in another rtentry being freed if
* we held its last reference.
*/
- if (rt->rt_gwroute) {
- rt = rt->rt_gwroute;
- rtfree_locked(rt);
- (rt = (struct rtentry *)rn)->rt_gwroute = 0;
+ if (rt->rt_gwroute != NULL) {
+ rtfree_locked(rt->rt_gwroute);
+ rt->rt_gwroute = NULL;
}
/*
* one more rtentry floating around that is not
* linked to the routing table.
*/
- (void) OSIncrementAtomic((SInt32 *)&rttrash);
+ (void) OSIncrementAtomic(&rttrash);
if (rte_debug & RTD_DEBUG) {
TAILQ_INSERT_TAIL(&rttrash_head,
(struct rtentry_dbg *)rt, rtd_trash_link);
if (rt_inet_default(rt, rt_key(rt)))
set_primary_ifscope(IFSCOPE_NONE);
+ RT_UNLOCK(rt);
+
/*
* If the caller wants it, then it can have it,
* but it's up to it to free the rtentry as we won't be
case RTM_RESOLVE:
if (ret_nrt == 0 || (rt = *ret_nrt) == 0)
senderr(EINVAL);
+ /*
+ * If cloning, we have the parent route given by the caller
+ * and will use its rt_gateway, rt_rmx as part of the cloning
+ * process below. Since rnh_lock is held at this point, the
+ * parent's rt_ifa and rt_gateway will not change, and its
+ * relevant rt_flags will not change as well. The only thing
+ * that could change are the metrics, and thus we hold the
+ * parent route's rt_lock later on during the actual copying
+ * of rt_rmx.
+ */
ifa = rt->rt_ifa;
ifaref(ifa);
flags = rt->rt_flags &
if ((rt = rte_alloc()) == NULL)
senderr(ENOBUFS);
Bzero(rt, sizeof(*rt));
+ rte_lock_init(rt);
+ RT_LOCK(rt);
rt->rt_flags = RTF_UP | flags;
/*
* also add the rt_gwroute if possible.
*/
if ((error = rt_setgate(rt, dst, gateway)) != 0) {
+ RT_UNLOCK(rt);
+ rte_lock_destroy(rt);
rte_free(rt);
senderr(error);
}
RTF_CLONING | RTF_PRCLONING);
}
if (rt2 && rt2->rt_parent) {
- rtrequest_locked(RTM_DELETE,
- (struct sockaddr *)rt_key(rt2),
- rt2->rt_gateway,
- rt_mask(rt2), rt2->rt_flags, 0);
+ /*
+ * rnh_lock is held here, so rt_key and
+ * rt_gateway of rt2 will not change.
+ */
+ (void) rtrequest_locked(RTM_DELETE, rt_key(rt2),
+ rt2->rt_gateway, rt_mask(rt2),
+ rt2->rt_flags, 0);
rtfree_locked(rt2);
rn = rnh->rnh_addaddr((caddr_t)ndst,
(caddr_t)netmask,
* then un-make it (this should be a function)
*/
if (rn == 0) {
- if (rt->rt_gwroute)
+ if (rt->rt_gwroute) {
rtfree_locked(rt->rt_gwroute);
+ rt->rt_gwroute = NULL;
+ }
if (rt->rt_ifa) {
ifafree(rt->rt_ifa);
+ rt->rt_ifa = NULL;
}
R_Free(rt_key(rt));
+ RT_UNLOCK(rt);
+ rte_lock_destroy(rt);
rte_free(rt);
senderr(EEXIST);
}
rt->rt_parent = 0;
/*
- * If we got here from RESOLVE, then we are cloning
- * so clone the rest, and note that we
- * are a clone (and increment the parent's references)
+ * If we got here from RESOLVE, then we are cloning so clone
+ * the rest, and note that we are a clone (and increment the
+ * parent's references). rnh_lock is still held, which prevents
+ * a lookup from returning the newly-created route. Hence
+ * holding and releasing the parent's rt_lock while still
+ * holding the route's rt_lock is safe since the new route
+ * is not yet externally visible.
*/
if (req == RTM_RESOLVE) {
+ RT_LOCK_SPIN(*ret_nrt);
rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */
if ((*ret_nrt)->rt_flags & (RTF_CLONING | RTF_PRCLONING)) {
rt->rt_parent = (*ret_nrt);
- rtref(*ret_nrt);
+ RT_ADDREF_LOCKED(*ret_nrt);
}
+ RT_UNLOCK(*ret_nrt);
}
/*
ifafree(ifa);
ifa = 0;
- /*
- * We repeat the same procedure from rt_setgate() here because
- * it doesn't fire when we call it there because the node
- * hasn't been added to the tree yet.
- */
- if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != 0) {
- struct rtfc_arg arg;
- arg.rnh = rnh;
- arg.rt0 = rt;
- rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt),
- rt_fixchange, &arg);
- }
-
/*
* If this is the (non-scoped) default route, record
* the interface index used for the primary ifscope.
*/
if (ret_nrt) {
*ret_nrt = rt;
- rtref(rt);
+ RT_ADDREF_LOCKED(rt);
+ }
+
+ /*
+ * We repeat the same procedure from rt_setgate() here because
+ * it doesn't fire when we call it there because the node
+ * hasn't been added to the tree yet.
+ */
+ if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != 0) {
+ struct rtfc_arg arg;
+ arg.rnh = rnh;
+ arg.rt0 = rt;
+ RT_UNLOCK(rt);
+ rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt),
+ rt_fixchange, &arg);
+ } else {
+ RT_UNLOCK(rt);
}
break;
}
struct rtentry **ret_nrt)
{
int error;
- lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_NOTOWNED);
- lck_mtx_lock(rt_mtx);
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
+ lck_mtx_lock(rnh_lock);
error = rtrequest_locked(req, dst, gateway, netmask, flags, ret_nrt);
- lck_mtx_unlock(rt_mtx);
+ lck_mtx_unlock(rnh_lock);
return (error);
}
/*
struct rtentry *rt = (struct rtentry *)rn;
struct rtentry *rt0 = vp;
- lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_OWNED);
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+ RT_LOCK(rt);
if (rt->rt_parent == rt0 &&
!(rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
- return rtrequest_locked(RTM_DELETE, rt_key(rt),
- (struct sockaddr *)0, rt_mask(rt),
- rt->rt_flags, (struct rtentry **)0);
+ /*
+ * Safe to drop rt_lock and use rt_key, since holding
+ * rnh_lock here prevents another thread from calling
+ * rt_setgate() on this route.
+ */
+ RT_UNLOCK(rt);
+ return (rtrequest_locked(RTM_DELETE, rt_key(rt), NULL,
+ rt_mask(rt), rt->rt_flags, NULL));
}
+ RT_UNLOCK(rt);
return 0;
}
u_char *xk1, *xm1, *xk2, *xmp;
int i, len, mlen;
- lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_OWNED);
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+
+ RT_LOCK(rt);
if (!rt->rt_parent ||
- (rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING)))
+ (rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
+ RT_UNLOCK(rt);
return (0);
+ }
if (rt->rt_parent == rt0)
goto delete_rt;
/* avoid applying a less specific route */
xmp = (u_char *)rt_mask(rt->rt_parent);
mlen = rt_key(rt->rt_parent)->sa_len;
- if (mlen > rt_key(rt0)->sa_len)
+ if (mlen > rt_key(rt0)->sa_len) {
+ RT_UNLOCK(rt);
return (0);
+ }
for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) {
- if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i])
+ if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) {
+ RT_UNLOCK(rt);
return (0);
+ }
}
for (i = rnh->rnh_treetop->rn_offset; i < len; i++) {
- if ((xk2[i] & xm1[i]) != xk1[i])
+ if ((xk2[i] & xm1[i]) != xk1[i]) {
+ RT_UNLOCK(rt);
return (0);
+ }
}
/*
* changed/added under the node's mask. So, get rid of it.
*/
delete_rt:
+ /*
+ * Safe to drop rt_lock and use rt_key, since holding rnh_lock here
+ * prevents another thread from calling rt_setgate() on this route.
+ */
+ RT_UNLOCK(rt);
return (rtrequest_locked(RTM_DELETE, rt_key(rt), NULL,
rt_mask(rt), rt->rt_flags, NULL));
}
+/*
+ * Round up sockaddr len to multiples of 32-bytes. This will reduce
+ * or even eliminate the need to re-allocate the chunk of memory used
+ * for rt_key and rt_gateway in the event the gateway portion changes.
+ * Certain code paths (e.g. IPSec) are notorious for caching the address
+ * of rt_gateway; this rounding-up would help ensure that the gateway
+ * portion never gets deallocated (though it may change contents) and
+ * thus greatly simplifies things.
+ */
+#define SA_SIZE(x) (-(-((uintptr_t)(x)) & -(32)))
+
+/*
+ * Sets the gateway and/or gateway route portion of a route; may be
+ * called on an existing route to modify the gateway portion. Both
+ * rt_key and rt_gateway are allocated out of the same memory chunk.
+ * Route entry lock must be held by caller; this routine will return
+ * with the lock held.
+ */
int
rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate)
{
- int dlen = ROUNDUP(dst->sa_len), glen = ROUNDUP(gate->sa_len);
+ int dlen = SA_SIZE(dst->sa_len), glen = SA_SIZE(gate->sa_len);
struct radix_node_head *rnh = rt_tables[dst->sa_family];
- lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_OWNED);
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+ RT_LOCK_ASSERT_HELD(rt);
+
+ /*
+ * If this is for a route that is on its way of being removed,
+ * or is temporarily frozen, reject the modification request.
+ */
+ if (rt->rt_flags & RTF_CONDEMNED)
+ return (EBUSY);
+
+ /* Add an extra ref for ourselves */
+ RT_ADDREF_LOCKED(rt);
/*
* A host route with the destination equal to the gateway
* The route might already exist if this is an RTM_CHANGE
* or a routing redirect, so try to delete it.
*/
- if (rt_key(rt))
- rtrequest_locked(RTM_DELETE, rt_key(rt),
+ if (rt_key(rt) != NULL) {
+ /*
+ * Safe to drop rt_lock and use rt_key, rt_gateway,
+ * since holding rnh_lock here prevents another thread
+ * from calling rt_setgate() on this route.
+ */
+ RT_UNLOCK(rt);
+ (void) rtrequest_locked(RTM_DELETE, rt_key(rt),
rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL);
+ RT_LOCK(rt);
+ }
+ /* Release extra ref */
+ RT_REMREF_LOCKED(rt);
return (EADDRNOTAVAIL);
}
ifscope = (dst->sa_family == AF_INET) ?
sa_get_ifscope(dst) : IFSCOPE_NONE;
+ RT_UNLOCK(rt);
gwrt = rtalloc1_scoped_locked(gate, 1, RTF_PRCLONING, ifscope);
+ if (gwrt != NULL)
+ RT_LOCK_ASSERT_NOTHELD(gwrt);
+ RT_LOCK(rt);
/*
* Cloning loop avoidance:
* for the gateway to be referred to by another route.
*/
if (gwrt == rt) {
- rtunref(gwrt);
+ RT_REMREF_LOCKED(gwrt);
+ /* Release extra ref */
+ RT_REMREF_LOCKED(rt);
return (EADDRINUSE); /* failure */
}
- /* If scoped, the gateway route must use the same interface */
+ /*
+ * If scoped, the gateway route must use the same interface;
+ * we're holding rnh_lock now, so rt_gateway and rt_ifp of gwrt
+ * should not change and are freely accessible.
+ */
if (ifscope != IFSCOPE_NONE && (rt->rt_flags & RTF_IFSCOPE) &&
gwrt != NULL && gwrt->rt_ifp != NULL &&
gwrt->rt_ifp->if_index != ifscope) {
- rtfree_locked(gwrt);
+ rtfree_locked(gwrt); /* rt != gwrt, no deadlock */
+ /* Release extra ref */
+ RT_REMREF_LOCKED(rt);
return ((rt->rt_flags & RTF_HOST) ?
EHOSTUNREACH : ENETUNREACH);
}
+ /* Check again since we dropped the lock above */
+ if (rt->rt_flags & RTF_CONDEMNED) {
+ if (gwrt != NULL)
+ rtfree_locked(gwrt);
+ /* Release extra ref */
+ RT_REMREF_LOCKED(rt);
+ return (EBUSY);
+ }
+
if (rt->rt_gwroute != NULL)
rtfree_locked(rt->rt_gwroute);
rt->rt_gwroute = gwrt;
* to the right place. Otherwise, malloc a new block and update
* the 'dst' address and point rt_gateway to the right place.
*/
- if (rt->rt_gateway == NULL || glen > ROUNDUP(rt->rt_gateway->sa_len)) {
+ if (rt->rt_gateway == NULL || glen > SA_SIZE(rt->rt_gateway->sa_len)) {
caddr_t new;
/* The underlying allocation is done with M_WAITOK set */
if (rt->rt_gwroute != NULL)
rtfree_locked(rt->rt_gwroute);
rt->rt_gwroute = NULL;
+ /* Release extra ref */
+ RT_REMREF_LOCKED(rt);
return (ENOBUFS);
}
* here to initialize a newly allocated route entry, in
* which case rt_key(rt) is NULL (and so does rt_gateway).
*/
- Bcopy(dst, new, dlen);
+ bzero(new, dlen + glen);
+ Bcopy(dst, new, dst->sa_len);
R_Free(rt_key(rt)); /* free old block; NULL is okay */
rt->rt_nodes->rn_key = new;
rt->rt_gateway = (struct sockaddr *)(new + dlen);
/*
* Copy the new gateway value into the memory chunk.
*/
- Bcopy(gate, rt->rt_gateway, glen);
+ Bcopy(gate, rt->rt_gateway, gate->sa_len);
/*
* For consistency between rt_gateway and rt_key(gwrt).
struct rtfc_arg arg;
arg.rnh = rnh;
arg.rt0 = rt;
+ RT_UNLOCK(rt);
rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt),
rt_fixchange, &arg);
+ RT_LOCK(rt);
}
+ /* Release extra ref */
+ RT_REMREF_LOCKED(rt);
return (0);
}
+#undef SA_SIZE
+
static void
rt_maskedcopy(struct sockaddr *src, struct sockaddr *dst,
struct sockaddr *netmask)
struct radix_node *rn0, *rn;
boolean_t dontcare = (ifscope == IFSCOPE_NONE);
- lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_OWNED);
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
if (!lookup_only)
netmask = NULL;
rn = rnh->rnh_lookup(dst, netmask, rnh);
else
rn = rnh->rnh_matchaddr(dst, rnh);
- goto done;
+
+ /*
+ * Don't return a root node; also, rnh_matchaddr callback
+ * would have done the necessary work to clear RTPRF_OURS
+ * for certain protocol families.
+ */
+ if (rn != NULL && (rn->rn_flags & RNF_ROOT))
+ rn = NULL;
+ if (rn != NULL) {
+ RT_LOCK_SPIN(RT(rn));
+ if (!(RT(rn)->rt_flags & RTF_CONDEMNED)) {
+ RT_ADDREF_LOCKED(RT(rn));
+ RT_UNLOCK(RT(rn));
+ } else {
+ RT_UNLOCK(RT(rn));
+ rn = NULL;
+ }
+ }
+ return (RT(rn));
}
/*
* any reason, there is no primary interface, return what we have.
*/
if (dontcare && (ifscope = get_primary_ifscope()) == IFSCOPE_NONE)
- goto validate;
+ goto done;
/*
* Keep the original result if either of the following is true:
* 2) The route uses the loopback interface, in which case the
* destination (host/net) is local/loopback.
*
- * Otherwise, do a more specified search using the scope.
+ * Otherwise, do a more specified search using the scope;
+ * we're holding rnh_lock now, so rt_ifp should not change.
*/
if (rn != NULL) {
struct rtentry *rt = RT(rn);
RT(rn)->rt_ifp->if_index != ifscope)
rn = NULL;
-validate:
- if (rn != NULL && !lookup_only)
- (void) in_validate(rn);
-
done:
- if (rn != NULL && (rn->rn_flags & RNF_ROOT))
- rn = NULL;
- else if (rn != NULL)
- rtref(RT(rn));
+ if (rn != NULL) {
+ /*
+ * Manually clear RTPRF_OURS using in_validate() and
+ * bump up the reference count after, and not before;
+ * we only get here for AF_INET. node_lookup() has
+ * done the check against RNF_ROOT, so we can be sure
+ * that we're not returning a root node here.
+ */
+ RT_LOCK_SPIN(RT(rn));
+ if (!(RT(rn)->rt_flags & RTF_CONDEMNED)) {
+ if (!lookup_only)
+ (void) in_validate(rn);
+ RT_ADDREF_LOCKED(RT(rn));
+ RT_UNLOCK(RT(rn));
+ } else {
+ RT_UNLOCK(RT(rn));
+ rn = NULL;
+ }
+ }
return (RT(rn));
}
rtinit(struct ifaddr *ifa, int cmd, int flags)
{
int error;
- lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_NOTOWNED);
- lck_mtx_lock(rt_mtx);
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
+ lck_mtx_lock(rnh_lock);
error = rtinit_locked(ifa, cmd, flags);
- lck_mtx_unlock(rt_mtx);
+ lck_mtx_unlock(rnh_lock);
return (error);
}
* We set "report" to FALSE so that if it doesn't exist,
* it doesn't report an error or clone a route, etc. etc.
*/
- rt = rtalloc1_locked(dst, 0, 0UL);
+ rt = rtalloc1_locked(dst, 0, 0);
if (rt) {
/*
* Ok so we found the rtentry. it has an extra reference
* for us at this stage. we won't need that so
* lop that off now.
*/
- rtunref(rt);
+ RT_LOCK_SPIN(rt);
if (rt->rt_ifa != ifa) {
+ RT_REMREF_LOCKED(rt);
+ RT_UNLOCK(rt);
/*
* If the interface in the rtentry doesn't match
* the interface we are using, then we don't
(void) m_free(m);
return (flags & RTF_HOST ? EHOSTUNREACH
: ENETUNREACH);
+ } else {
+ RT_REMREF_LOCKED(rt);
+ RT_UNLOCK(rt);
}
}
/* XXX */
* it doesn't exist, we could just return at this point
* with an "ELSE" clause, but apparently not..
*/
- lck_mtx_unlock(rt_mtx);
+ lck_mtx_unlock(rnh_lock);
return (flags & RTF_HOST ? EHOSTUNREACH
: ENETUNREACH);
}
*/
if (cmd == RTM_DELETE && error == 0 && (rt = nrt)) {
/*
- * notify any listenning routing agents of the change
+ * notify any listening routing agents of the change
*/
+ RT_LOCK(rt);
rt_newaddrmsg(cmd, ifa, error, nrt);
if (use_routegenid)
- route_generation++;
+ routegenid_update();
+ RT_UNLOCK(rt);
rtfree_locked(rt);
}
* We need to sanity check the result.
*/
if (cmd == RTM_ADD && error == 0 && (rt = nrt)) {
+ RT_LOCK(rt);
/*
* If it came back with an unexpected interface, then it must
* have already existed or something. (XXX)
*/
rt_newaddrmsg(cmd, ifa, error, nrt);
if (use_routegenid)
- route_generation++;
+ routegenid_update();
/*
* We just wanted to add it; we don't actually need a
* reference. This will result in a route that's added
* RTM_DELETE code will do the necessary step to adjust
* the reference count at deletion time.
*/
- rtunref(rt);
+ RT_REMREF_LOCKED(rt);
+ RT_UNLOCK(rt);
}
return (error);
}
-struct rtentry *
+static void
+rte_lock_init(struct rtentry *rt)
+{
+ lck_mtx_init(&rt->rt_lock, rte_mtx_grp, rte_mtx_attr);
+}
+
+static void
+rte_lock_destroy(struct rtentry *rt)
+{
+ RT_LOCK_ASSERT_NOTHELD(rt);
+ lck_mtx_destroy(&rt->rt_lock, rte_mtx_grp);
+}
+
+void
+rt_lock(struct rtentry *rt, boolean_t spin)
+{
+ RT_LOCK_ASSERT_NOTHELD(rt);
+ if (spin)
+ lck_mtx_lock_spin(&rt->rt_lock);
+ else
+ lck_mtx_lock(&rt->rt_lock);
+ if (rte_debug & RTD_DEBUG)
+ rte_lock_debug((struct rtentry_dbg *)rt);
+}
+
+void
+rt_unlock(struct rtentry *rt)
+{
+ RT_LOCK_ASSERT_HELD(rt);
+ if (rte_debug & RTD_DEBUG)
+ rte_unlock_debug((struct rtentry_dbg *)rt);
+ lck_mtx_unlock(&rt->rt_lock);
+
+}
+
+static inline void
+rte_lock_debug(struct rtentry_dbg *rte)
+{
+ uint32_t idx;
+
+ idx = atomic_add_32_ov(&rte->rtd_lock_cnt, 1) % CTRACE_HIST_SIZE;
+ if (rte_debug & RTD_TRACE)
+ ctrace_record(&rte->rtd_lock[idx]);
+}
+
+static inline void
+rte_unlock_debug(struct rtentry_dbg *rte)
+{
+ uint32_t idx;
+
+ idx = atomic_add_32_ov(&rte->rtd_unlock_cnt, 1) % CTRACE_HIST_SIZE;
+ if (rte_debug & RTD_TRACE)
+ ctrace_record(&rte->rtd_unlock[idx]);
+}
+
+static struct rtentry *
rte_alloc(void)
{
if (rte_debug & RTD_DEBUG)
return ((struct rtentry *)zalloc(rte_zone));
}
-void
+static void
rte_free(struct rtentry *p)
{
if (rte_debug & RTD_DEBUG) {
rte = ((struct rtentry_dbg *)zalloc(rte_zone));
if (rte != NULL) {
bzero(rte, sizeof (*rte));
- if (rte_debug & RTD_TRACE) {
- rte->rtd_alloc_thread = current_thread();
- (void) OSBacktrace(rte->rtd_alloc_stk_pc,
- RTD_TRSTACK_SIZE);
- }
+ if (rte_debug & RTD_TRACE)
+ ctrace_record(&rte->rtd_alloc);
rte->rtd_inuse = RTD_INUSE;
}
return ((struct rtentry *)rte);
panic("rte_free: corrupted rte=%p\n", rte);
bcopy((caddr_t)p, (caddr_t)&rte->rtd_entry_saved, sizeof (*p));
- bzero((caddr_t)p, sizeof (*p));
+ /* Preserve rt_lock to help catch use-after-free cases */
+ bzero((caddr_t)p, offsetof(struct rtentry, rt_lock));
rte->rtd_inuse = RTD_FREED;
- if (rte_debug & RTD_TRACE) {
- rte->rtd_free_thread = current_thread();
- (void) OSBacktrace(rte->rtd_free_stk_pc, RTD_TRSTACK_SIZE);
- }
+ if (rte_debug & RTD_TRACE)
+ ctrace_record(&rte->rtd_free);
if (!(rte_debug & RTD_NO_FREE))
zfree(rte_zone, p);
}
+
+void
+ctrace_record(ctrace_t *tr)
+{
+ tr->th = current_thread();
+ bzero(tr->pc, sizeof (tr->pc));
+ (void) OSBacktrace(tr->pc, CTRACE_STACK_SIZE);
+}