/*
- * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/domain.h>
+#include <sys/stat.h>
+#include <sys/ubc.h>
+#include <sys/vnode.h>
#include <sys/syslog.h>
#include <sys/queue.h>
#include <sys/mcache.h>
+#include <sys/priv.h>
#include <sys/protosw.h>
#include <sys/kernel.h>
#include <kern/locks.h>
#include <net/if.h>
#include <net/route.h>
#include <net/ntstat.h>
+#include <net/nwk_wq.h>
+#if NECP
+#include <net/necp.h>
+#endif /* NECP */
#include <netinet/in.h>
#include <netinet/in_var.h>
#include <netinet/ip_var.h>
#include <netinet/ip6.h>
+#include <netinet/in_arp.h>
#if INET6
#include <netinet6/ip6_var.h>
extern void kdp_set_gateway_mac(void *gatewaymac);
-__private_extern__ struct rtstat rtstat = { 0, 0, 0, 0, 0 };
+__private_extern__ struct rtstat rtstat = { 0, 0, 0, 0, 0, 0 };
struct radix_node_head *rt_tables[AF_MAX+1];
decl_lck_mtx_data(, rnh_lock_data); /* global routing tables mutex */
int rttrash = 0; /* routes not in table but not freed */
-unsigned int rte_debug;
+unsigned int rte_debug = 0;
/* Possible flags for rte_debug */
#define RTD_DEBUG 0x1 /* enable or disable rtentry debug facility */
static void rte_if_ref(struct ifnet *, int);
static void rt_set_idleref(struct rtentry *);
static void rt_clear_idleref(struct rtentry *);
+static void route_event_callback(void *);
static void rt_str4(struct rtentry *, char *, uint32_t, char *, uint32_t);
#if INET6
static void rt_str6(struct rtentry *, char *, uint32_t, char *, uint32_t);
#define RN(r) ((struct radix_node *)r)
#define RT_HOST(r) (RT(r)->rt_flags & RTF_HOST)
+unsigned int rt_verbose = 0;
+#if (DEVELOPMENT || DEBUG)
SYSCTL_DECL(_net_route);
-
-unsigned int rt_verbose; /* verbosity level (0 to disable) */
SYSCTL_UINT(_net_route, OID_AUTO, verbose, CTLFLAG_RW | CTLFLAG_LOCKED,
&rt_verbose, 0, "");
+#endif /* (DEVELOPMENT || DEBUG) */
static void
rtable_init(void **table)
#if INET6
_CASSERT(offsetof(struct route, ro_rt) ==
offsetof(struct route_in6, ro_rt));
+ _CASSERT(offsetof(struct route, ro_lle) ==
+ offsetof(struct route_in6, ro_lle));
_CASSERT(offsetof(struct route, ro_srcia) ==
offsetof(struct route_in6, ro_srcia));
_CASSERT(offsetof(struct route, ro_flags) ==
}
break;
}
+ case RTAX_GATEWAY: {
+ /*
+ * Break if the gateway is not AF_LINK type (indirect routes)
+ *
+ * Else, if is, check if it is resolved. If not yet resolved
+ * simply break else scrub the link layer address.
+ */
+ if ((sa->sa_family != AF_LINK) || (SDL(sa)->sdl_alen == 0))
+ break;
+ /* fallthrough */
+ }
case RTAX_IFP: {
if (sa->sa_family == AF_LINK && credp) {
struct sockaddr_dl *sdl = SDL(buf);
void
rtalloc_ign(struct route *ro, uint32_t ignore)
{
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(rnh_lock);
rtalloc_ign_common_locked(ro, ignore, IFSCOPE_NONE);
lck_mtx_unlock(rnh_lock);
void
rtalloc_scoped_ign(struct route *ro, uint32_t ignore, unsigned int ifscope)
{
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(rnh_lock);
rtalloc_ign_common_locked(ro, ignore, ifscope);
lck_mtx_unlock(rnh_lock);
return (rtalloc1_common_locked(dst, report, ignflags, ifscope));
}
-/*
- * Look up the route that matches the address given
- * Or, at least try.. Create a cloned route if needed.
- */
-static struct rtentry *
+struct rtentry *
rtalloc1_common_locked(struct sockaddr *dst, int report, uint32_t ignflags,
unsigned int ifscope)
{
* Which basically means "cant get there from here"
*/
rtstat.rts_unreach++;
+
miss:
if (report) {
/*
rtalloc1(struct sockaddr *dst, int report, uint32_t ignflags)
{
struct rtentry *entry;
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(rnh_lock);
entry = rtalloc1_locked(dst, report, ignflags);
lck_mtx_unlock(rnh_lock);
unsigned int ifscope)
{
struct rtentry *entry;
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(rnh_lock);
entry = rtalloc1_scoped_locked(dst, report, ignflags, ifscope);
lck_mtx_unlock(rnh_lock);
{
struct radix_node_head *rnh;
- lck_mtx_assert(rnh_lock, locked ?
+ LCK_MTX_ASSERT(rnh_lock, locked ?
LCK_MTX_ASSERT_OWNED : LCK_MTX_ASSERT_NOTOWNED);
/*
*/
RT_CONVERT_LOCK(rt);
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
/* Negative refcnt must never happen */
if (rt->rt_refcnt != 0) {
struct rtentry *rt_parent;
struct ifaddr *rt_ifa;
+ rt->rt_flags |= RTF_DEAD;
if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT)) {
panic("rt %p freed while in radix tree\n", rt);
/* NOTREACHED */
rt->rt_llinfo = NULL;
}
+ /* Destroy eventhandler lists context */
+ eventhandler_lists_ctxt_destroy(&rt->rt_evhdlr_ctxt);
+
/*
* Route is no longer in the tree and refcnt is 0;
* we have exclusive access, so destroy it.
*/
RT_UNLOCK(rt);
+ rte_lock_destroy(rt);
if (rt_parent != NULL)
rtfree_locked(rt_parent);
/*
* and the rtentry itself of course
*/
- rte_lock_destroy(rt);
rte_free(rt);
} else {
/*
{
RT_LOCK_ASSERT_HELD(p);
+ VERIFY((p->rt_flags & RTF_DEAD) == 0);
if (++p->rt_refcnt == 0) {
panic("%s(%p) bad refcnt\n", __func__, p);
/* NOTREACHED */
void
rtsetifa(struct rtentry *rt, struct ifaddr *ifa)
{
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
RT_LOCK_ASSERT_HELD(rt);
struct sockaddr_storage ss;
int af = src->sa_family;
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(rnh_lock);
/*
* comparison against rt_gateway below.
*/
#if INET6
- if ((af == AF_INET && ip_doscopedroute) ||
- (af == AF_INET6 && ip6_doscopedroute))
+ if ((af == AF_INET) || (af == AF_INET6))
#else
- if (af == AF_INET && ip_doscopedroute)
+ if (af == AF_INET)
#endif /* !INET6 */
src = sa_copy(src, &ss, &ifscope);
done:
if (rt != NULL) {
RT_LOCK_ASSERT_NOTHELD(rt);
- if (rtp && !error)
- *rtp = rt;
+ if (!error) {
+ /* Enqueue event to refresh flow route entries */
+ route_event_enqueue_nwk_wq_entry(rt, NULL, ROUTE_ENTRY_REFRESH, NULL, FALSE);
+ if (rtp)
+ *rtp = rt;
+ else
+ rtfree_locked(rt);
+ }
else
rtfree_locked(rt);
}
struct rtentry *rt = NULL;
struct sockaddr_storage dst_ss, gw_ss;
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
/*
* Just in case the sockaddr passed in by the caller
*/
#if INET6
if (dst != NULL &&
- ((dst->sa_family == AF_INET && ip_doscopedroute) ||
- (dst->sa_family == AF_INET6 && ip6_doscopedroute)))
+ ((dst->sa_family == AF_INET) ||
+ (dst->sa_family == AF_INET6)))
#else
- if (dst != NULL && dst->sa_family == AF_INET && ip_doscopedroute)
+ if (dst != NULL && dst->sa_family == AF_INET)
#endif /* !INET6 */
dst = sa_copy(SA((uintptr_t)dst), &dst_ss, NULL);
#if INET6
if (gw != NULL &&
- ((gw->sa_family == AF_INET && ip_doscopedroute) ||
- (gw->sa_family == AF_INET6 && ip6_doscopedroute)))
+ ((gw->sa_family == AF_INET) ||
+ (gw->sa_family == AF_INET6)))
#else
- if (gw != NULL && gw->sa_family == AF_INET && ip_doscopedroute)
+ if (gw != NULL && gw->sa_family == AF_INET)
#endif /* !INET6 */
gw = sa_copy(SA((uintptr_t)gw), &gw_ss, NULL);
#define senderr(x) { error = x; goto bad; }
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
/*
* Find the correct routing tree to use for this Address Family
*/
* routing socket request.
*/
#if INET6
- if (req != RTM_RESOLVE &&
- ((af == AF_INET && ip_doscopedroute) ||
- (af == AF_INET6 && ip6_doscopedroute))) {
+ if (req != RTM_RESOLVE && ((af == AF_INET) || (af == AF_INET6))) {
#else
- if (req != RTM_RESOLVE && af == AF_INET && ip_doscopedroute) {
+ if (req != RTM_RESOLVE && af == AF_INET) {
#endif /* !INET6 */
/* Transform dst into the internal routing table form */
dst = sa_copy(dst, &ss, &ifscope);
if (ifscope != IFSCOPE_NONE)
flags |= RTF_IFSCOPE;
- } else {
- if ((flags & RTF_IFSCOPE) && (af != AF_INET && af != AF_INET6))
- senderr(EINVAL);
-
-#if INET6
- if ((af == AF_INET && !ip_doscopedroute) ||
- (af == AF_INET6 && !ip6_doscopedroute))
-#else
- if (af == AF_INET && !ip_doscopedroute)
-#endif /* !INET6 */
- ifscope = IFSCOPE_NONE;
+ } else if ((flags & RTF_IFSCOPE) &&
+ (af != AF_INET && af != AF_INET6)) {
+ senderr(EINVAL);
}
if (ifscope == IFSCOPE_NONE)
switch (req) {
case RTM_DELETE: {
struct rtentry *gwrt = NULL;
+ boolean_t was_router = FALSE;
+ uint32_t old_rt_refcnt = 0;
/*
* Remove the item from the tree and return it.
* Complain if it is not there and do no more processing.
rt = (struct rtentry *)rn;
RT_LOCK(rt);
+ old_rt_refcnt = rt->rt_refcnt;
rt->rt_flags &= ~RTF_UP;
/*
* Release any idle reference count held on the interface
* Clear RTF_ROUTER if it's set.
*/
if (rt->rt_flags & RTF_ROUTER) {
+ was_router = TRUE;
VERIFY(rt->rt_flags & RTF_HOST);
rt->rt_flags &= ~RTF_ROUTER;
}
+ /*
+ * Enqueue work item to invoke callback for this route entry
+ *
+ * If the old count is 0, it implies that last reference is being
+ * removed and there's no one listening for this route event.
+ */
+ if (old_rt_refcnt != 0)
+ route_event_enqueue_nwk_wq_entry(rt, NULL,
+ ROUTE_ENTRY_DELETED, NULL, TRUE);
+
/*
* Now search what's left of the subtree for any cloned
* routes which might have been formed from this node.
RT_LOCK(rt);
}
+ if (was_router) {
+ struct route_event rt_ev;
+ route_event_init(&rt_ev, rt, NULL, ROUTE_LLENTRY_DELETED);
+ RT_UNLOCK(rt);
+ (void) rnh->rnh_walktree(rnh,
+ route_event_walktree, (void *)&rt_ev);
+ RT_LOCK(rt);
+ }
+
/*
* Remove any external references we may have.
*/
IFSCOPE_NONE);
}
+#if NECP
+ /*
+ * If this is a change in a default route, update
+ * necp client watchers to re-evaluate
+ */
+ if (SA_DEFAULT(rt_key(rt))) {
+ necp_update_all_clients();
+ }
+#endif /* NECP */
+
RT_UNLOCK(rt);
/*
senderr(EINVAL);
/*
* According to the UNIX conformance tests, we need to return
- * ENETUNREACH when the parent route is RTF_REJECT.
+ * ENETUNREACH when the parent route is RTF_REJECT.
* However, there isn't any point in cloning RTF_REJECT
* routes, so we immediately return an error.
*/
flags |= RTF_HOST;
#if INET6
- if ((af != AF_INET && af != AF_INET6) ||
- (af == AF_INET && !ip_doscopedroute) ||
- (af == AF_INET6 && !ip6_doscopedroute))
+ if (af != AF_INET && af != AF_INET6)
#else
- if (af != AF_INET || !ip_doscopedroute)
+ if (af != AF_INET)
#endif /* !INET6 */
goto makeroute;
if (ifa == NULL)
senderr(ENETUNREACH);
makeroute:
+ /*
+ * We land up here for both RTM_RESOLVE and RTM_ADD
+ * when we decide to create a route.
+ */
if ((rt = rte_alloc()) == NULL)
senderr(ENOBUFS);
Bzero(rt, sizeof(*rt));
rte_lock_init(rt);
+ eventhandler_lists_ctxt_init(&rt->rt_evhdlr_ctxt);
getmicrotime(&caltime);
rt->base_calendartime = caltime.tv_sec;
rt->base_uptime = net_uptime();
rt->rt_ifp->if_index);
}
+#if NECP
+ /*
+ * If this is a change in a default route, update
+ * necp client watchers to re-evaluate
+ */
+ if (SA_DEFAULT(rt_key(rt))) {
+ necp_update_all_clients();
+ }
+#endif /* NECP */
+
/*
* actually return a resultant rtentry and
* give the caller a single reference.
struct sockaddr *netmask, int flags, struct rtentry **ret_nrt)
{
int error;
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(rnh_lock);
error = rtrequest_locked(req, dst, gateway, netmask, flags, ret_nrt);
lck_mtx_unlock(rnh_lock);
unsigned int ifscope)
{
int error;
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(rnh_lock);
error = rtrequest_scoped_locked(req, dst, gateway, netmask, flags,
ret_nrt, ifscope);
struct rtentry *rt = (struct rtentry *)rn;
struct rtentry *rt0 = vp;
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
RT_LOCK(rt);
if (rt->rt_parent == rt0 &&
u_char *xk1, *xm1, *xk2, *xmp;
int i, len;
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
RT_LOCK(rt);
}
rnh = rt_tables[dst->sa_family];
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
RT_LOCK_ASSERT_HELD(rt);
/*
rt->rt_ifp->if_index);
}
+#if NECP
+ /*
+ * If this is a change in a default route, update
+ * necp client watchers to re-evaluate
+ */
+ if (SA_DEFAULT(dst)) {
+ necp_update_all_clients();
+ }
+#endif /* NECP */
+
/*
* Tell the kernel debugger about the new default gateway
* if the gateway route uses the primary interface, or
{
boolean_t gwrt_isrouter;
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
RT_LOCK_ASSERT_HELD(rt);
if (gwrt != NULL)
rt_lookup_common(boolean_t lookup_only, boolean_t coarse, struct sockaddr *dst,
struct sockaddr *netmask, struct radix_node_head *rnh, unsigned int ifscope)
{
- struct radix_node *rn0, *rn;
- boolean_t dontcare;
+ struct radix_node *rn0, *rn = NULL;
int af = dst->sa_family;
- struct sockaddr_storage dst_ss, mask_ss;
- char s_dst[MAX_IPv6_STR_LEN], s_netmask[MAX_IPv6_STR_LEN];
+ struct sockaddr_storage dst_ss;
+ struct sockaddr_storage mask_ss;
+ boolean_t dontcare;
+#if (DEVELOPMENT || DEBUG)
char dbuf[MAX_SCOPE_ADDR_STR_LEN], gbuf[MAX_IPv6_STR_LEN];
-
+ char s_dst[MAX_IPv6_STR_LEN], s_netmask[MAX_IPv6_STR_LEN];
+#endif
VERIFY(!coarse || ifscope == IFSCOPE_NONE);
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
#if INET6
/*
* While we have rnh_lock held, see if we need to schedule the timer.
* Non-scoped route lookup.
*/
#if INET6
- if ((af != AF_INET && af != AF_INET6) ||
- (af == AF_INET && !ip_doscopedroute) ||
- (af == AF_INET6 && !ip6_doscopedroute)) {
+ if (af != AF_INET && af != AF_INET6) {
#else
- if (af != AF_INET || !ip_doscopedroute) {
+ if (af != AF_INET) {
#endif /* !INET6 */
rn = rnh->rnh_matchaddr(dst, rnh);
netmask = ma_copy(af, netmask, &mask_ss, ifscope);
dontcare = (ifscope == IFSCOPE_NONE);
+#if (DEVELOPMENT || DEBUG)
if (rt_verbose) {
if (af == AF_INET)
(void) inet_ntop(af, &SIN(dst)->sin_addr.s_addr,
printf("%s (%d, %d, %s, %s, %u)\n",
__func__, lookup_only, coarse, s_dst, s_netmask, ifscope);
}
+#endif
/*
* Scoped route lookup:
*/
if (rn != NULL) {
struct rtentry *rt = RT(rn);
-
+#if (DEVELOPMENT || DEBUG)
if (rt_verbose) {
rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf));
printf("%s unscoped search %p to %s->%s->%s ifa_ifp %s\n",
(rt->rt_ifa->ifa_ifp != NULL) ?
rt->rt_ifa->ifa_ifp->if_xname : "");
}
- if (!(rt->rt_ifp->if_flags & IFF_LOOPBACK)) {
+#endif
+ if (!(rt->rt_ifp->if_flags & IFF_LOOPBACK) ||
+ (rt->rt_flags & RTF_GATEWAY)) {
if (rt->rt_ifp->if_index != ifscope) {
/*
* Wrong interface; keep the original result
*/
if (rn == NULL) {
rn = node_lookup(dst, netmask, ifscope);
-
+#if (DEVELOPMENT || DEBUG)
if (rt_verbose && rn != NULL) {
struct rtentry *rt = RT(rn);
(rt->rt_ifa->ifa_ifp != NULL) ?
rt->rt_ifa->ifa_ifp->if_xname : "");
}
+#endif
}
/*
* Use the original result if either of the following is true:
rn = NULL;
}
}
-
+#if (DEVELOPMENT || DEBUG)
if (rt_verbose) {
if (rn == NULL)
printf("%s %u return NULL\n", __func__, ifscope);
rt->rt_ifa->ifa_ifp->if_xname : "");
}
}
-
+#endif
return (RT(rn));
}
{
int error;
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(rnh_lock);
error = rtinit_locked(ifa, cmd, flags);
{
struct radix_node_head *rnh;
uint8_t nbuf[128]; /* long enough for IPv6 */
+#if (DEVELOPMENT || DEBUG)
char dbuf[MAX_IPv6_STR_LEN], gbuf[MAX_IPv6_STR_LEN];
char abuf[MAX_IPv6_STR_LEN];
+#endif
struct rtentry *rt = NULL;
struct sockaddr *dst;
struct sockaddr *netmask;
* changing (e.g. in_ifinit), so it is safe to access its
* ifa_{dst}addr (here and down below) without locking.
*/
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
if (flags & RTF_HOST) {
dst = ifa->ifa_dstaddr;
goto done;
}
+#if (DEVELOPMENT || DEBUG)
if (dst->sa_family == AF_INET) {
(void) inet_ntop(AF_INET, &SIN(dst)->sin_addr.s_addr,
abuf, sizeof (abuf));
abuf, sizeof (abuf));
}
#endif /* INET6 */
+#endif /* (DEVELOPMENT || DEBUG) */
if ((rnh = rt_tables[dst->sa_family]) == NULL) {
error = EINVAL;
*/
rt = rt_lookup_coarse(TRUE, dst, NULL, rnh);
if (rt != NULL) {
+#if (DEVELOPMENT || DEBUG)
rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf));
+#endif
/*
* Ok so we found the rtentry. it has an extra reference
* for us at this stage. we won't need that so
* an error. This seems to be the only point
* of this whole RTM_DELETE clause.
*/
+#if (DEVELOPMENT || DEBUG)
if (rt_verbose) {
log(LOG_DEBUG, "%s: not removing "
"route to %s->%s->%s, flags %b, "
rt->rt_ifa),
(uint64_t)VM_KERNEL_ADDRPERM(ifa));
}
+#endif /* (DEVELOPMENT || DEBUG) */
RT_REMREF_LOCKED(rt);
RT_UNLOCK(rt);
rt = NULL;
* Don't remove the subnet/prefix route if
* this was manually added from above.
*/
+#if (DEVELOPMENT || DEBUG)
if (rt_verbose) {
log(LOG_DEBUG, "%s: not removing "
"static route to %s->%s->%s, "
rt->rt_ifp->if_xname : ""),
rt->rt_flags, RTF_BITS, abuf);
}
+#endif /* (DEVELOPMENT || DEBUG) */
RT_REMREF_LOCKED(rt);
RT_UNLOCK(rt);
rt = NULL;
error = EBUSY;
goto done;
}
+#if (DEVELOPMENT || DEBUG)
if (rt_verbose) {
log(LOG_DEBUG, "%s: removing route to "
"%s->%s->%s, flags %b, ifaddr %s\n",
rt->rt_ifp->if_xname : ""),
rt->rt_flags, RTF_BITS, abuf);
}
+#endif /* (DEVELOPMENT || DEBUG) */
RT_REMREF_LOCKED(rt);
RT_UNLOCK(rt);
rt = NULL;
goto done;
VERIFY(rt != NULL);
-
+#if (DEVELOPMENT || DEBUG)
rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf));
-
+#endif /* (DEVELOPMENT || DEBUG) */
switch (cmd) {
case RTM_DELETE:
/*
RT_LOCK(rt);
rt_newaddrmsg(cmd, ifa, error, rt);
RT_UNLOCK(rt);
+#if (DEVELOPMENT || DEBUG)
if (rt_verbose) {
log(LOG_DEBUG, "%s: removed route to %s->%s->%s, "
"flags %b, ifaddr %s\n", __func__, dbuf, gbuf,
((rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : ""),
rt->rt_flags, RTF_BITS, abuf);
}
+#endif /* (DEVELOPMENT || DEBUG) */
rtfree_locked(rt);
break;
if (rt->rt_ifa != ifa) {
void (*ifa_rtrequest)
(int, struct rtentry *, struct sockaddr *);
-
- if (!(rt->rt_ifa->ifa_ifp->if_flags &
- (IFF_POINTOPOINT|IFF_LOOPBACK))) {
- log(LOG_ERR, "%s: %s route to %s->%s->%s, "
- "flags %b, ifaddr %s, rt_ifa 0x%llx != "
- "ifa 0x%llx\n", __func__, rtm2str(cmd),
- dbuf, gbuf, ((rt->rt_ifp != NULL) ?
- rt->rt_ifp->if_xname : ""), rt->rt_flags,
- RTF_BITS, abuf,
- (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_ifa),
- (uint64_t)VM_KERNEL_ADDRPERM(ifa));
- }
-
+#if (DEVELOPMENT || DEBUG)
if (rt_verbose) {
+ if (!(rt->rt_ifa->ifa_ifp->if_flags &
+ (IFF_POINTOPOINT|IFF_LOOPBACK))) {
+ log(LOG_ERR, "%s: %s route to %s->%s->%s, "
+ "flags %b, ifaddr %s, rt_ifa 0x%llx != "
+ "ifa 0x%llx\n", __func__, rtm2str(cmd),
+ dbuf, gbuf, ((rt->rt_ifp != NULL) ?
+ rt->rt_ifp->if_xname : ""), rt->rt_flags,
+ RTF_BITS, abuf,
+ (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_ifa),
+ (uint64_t)VM_KERNEL_ADDRPERM(ifa));
+ }
+
log(LOG_DEBUG, "%s: %s route to %s->%s->%s, "
"flags %b, ifaddr %s, rt_ifa was 0x%llx "
"now 0x%llx\n", __func__, rtm2str(cmd),
(uint64_t)VM_KERNEL_ADDRPERM(rt->rt_ifa),
(uint64_t)VM_KERNEL_ADDRPERM(ifa));
}
+#endif /* (DEVELOPMENT || DEBUG) */
/*
* Ask that the protocol in question
if (ifa_rtrequest != NULL)
ifa_rtrequest(RTM_ADD, rt, NULL);
} else {
+#if (DEVELOPMENT || DEBUG)
if (rt_verbose) {
log(LOG_DEBUG, "%s: added route to %s->%s->%s, "
"flags %b, ifaddr %s\n", __func__, dbuf,
rt->rt_ifp->if_xname : ""), rt->rt_flags,
RTF_BITS, abuf);
}
+#endif /* (DEVELOPMENT || DEBUG) */
}
/*
* notify any listenning routing agents of the change
ev_msg.dv[0].data_length = sizeof (struct net_event_data);
ev_msg.dv[0].data_ptr = &ev_data;
- kev_post_msg(&ev_msg);
+ dlil_post_complete_msg(NULL, &ev_msg);
}
}
if (dst->ro_rt != NULL)
RT_ADDREF(dst->ro_rt);
+ /* Hold one reference for the local copy of struct lle */
+ if (dst->ro_lle != NULL)
+ LLE_ADDREF(dst->ro_lle);
+
/* Hold one reference for the local copy of struct ifaddr */
if (dst->ro_srcia != NULL)
IFA_ADDREF(dst->ro_srcia);
void
route_copyin(struct route *src, struct route *dst, size_t length)
{
- /* No cached route at the destination? */
+ /*
+ * No cached route at the destination?
+ * If none, then remove old references if present
+ * and copy entire src route.
+ */
if (dst->ro_rt == NULL) {
+ /*
+ * Ditch the cached link layer reference (dst)
+ * since we're about to take everything there is in src
+ */
+ if (dst->ro_lle != NULL)
+ LLE_REMREF(dst->ro_lle);
/*
* Ditch the address in the cached copy (dst) since
* we're about to take everything there is in src.
if (dst->ro_srcia != NULL)
IFA_REMREF(dst->ro_srcia);
/*
- * Copy everything (rt, srcia, flags, dst) from src; the
+ * Copy everything (rt, ro_lle, srcia, flags, dst) from src; the
* references to rt and/or srcia were held at the time
* of storage and are kept intact.
*/
bcopy(src, dst, length);
- } else if (src->ro_rt != NULL) {
- /*
- * If the same, update srcia and flags, and ditch the route
- * in the local copy. Else ditch the one that is currently
- * cached, and cache the new route.
- */
- if (dst->ro_rt == src->ro_rt) {
- dst->ro_flags = src->ro_flags;
- if (dst->ro_srcia != src->ro_srcia) {
- if (dst->ro_srcia != NULL)
- IFA_REMREF(dst->ro_srcia);
- dst->ro_srcia = src->ro_srcia;
- } else if (src->ro_srcia != NULL) {
- IFA_REMREF(src->ro_srcia);
- }
- rtfree(src->ro_rt);
- } else {
- rtfree(dst->ro_rt);
+ goto done;
+ }
+
+ /*
+ * We know dst->ro_rt is not NULL here.
+ * If the src->ro_rt is the same, update ro_lle, srcia and flags
+ * and ditch the route in the local copy.
+ */
+ if (dst->ro_rt == src->ro_rt) {
+ dst->ro_flags = src->ro_flags;
+
+ if (dst->ro_lle != src->ro_lle) {
+ if (dst->ro_lle != NULL)
+ LLE_REMREF(dst->ro_lle);
+ dst->ro_lle = src->ro_lle;
+ } else if (src->ro_lle != NULL) {
+ LLE_REMREF(src->ro_lle);
+ }
+
+ if (dst->ro_srcia != src->ro_srcia) {
if (dst->ro_srcia != NULL)
IFA_REMREF(dst->ro_srcia);
- bcopy(src, dst, length);
+ dst->ro_srcia = src->ro_srcia;
+ } else if (src->ro_srcia != NULL) {
+ IFA_REMREF(src->ro_srcia);
}
- } else if (src->ro_srcia != NULL) {
+ rtfree(src->ro_rt);
+ goto done;
+ }
+
+ /*
+ * If they are dst's ro_rt is not equal to src's,
+ * and src'd rt is not NULL, then remove old references
+ * if present and copy entire src route.
+ */
+ if (src->ro_rt != NULL) {
+ rtfree(dst->ro_rt);
+
+ if (dst->ro_lle != NULL)
+ LLE_REMREF(dst->ro_lle);
+ if (dst->ro_srcia != NULL)
+ IFA_REMREF(dst->ro_srcia);
+ bcopy(src, dst, length);
+ goto done;
+ }
+
+ /*
+ * Here, dst's cached route is not NULL but source's is.
+ * Just get rid of all the other cached reference in src.
+ */
+ if (src->ro_srcia != NULL) {
/*
* Ditch src address in the local copy (src) since we're
* not caching the route entry anyway (ro_rt is NULL).
*/
IFA_REMREF(src->ro_srcia);
}
-
+ if (src->ro_lle != NULL) {
+ /*
+ * Ditch cache lle in the local copy (src) since we're
+ * not caching the route anyway (ro_rt is NULL).
+ */
+ LLE_REMREF(src->ro_lle);
+ }
+done:
/* This function consumes the references on src */
+ src->ro_lle = NULL;
src->ro_rt = NULL;
src->ro_srcia = NULL;
}
break;
}
}
+
+void route_event_init(struct route_event *p_route_ev, struct rtentry *rt,
+ struct rtentry *gwrt, int route_ev_code)
+{
+ VERIFY(p_route_ev != NULL);
+ bzero(p_route_ev, sizeof(*p_route_ev));
+
+ p_route_ev->rt = rt;
+ p_route_ev->gwrt = gwrt;
+ p_route_ev->route_event_code = route_ev_code;
+}
+
+static void
+route_event_callback(void *arg)
+{
+ struct route_event *p_rt_ev = (struct route_event *)arg;
+ struct rtentry *rt = p_rt_ev->rt;
+ eventhandler_tag evtag = p_rt_ev->evtag;
+ int route_ev_code = p_rt_ev->route_event_code;
+
+ if (route_ev_code == ROUTE_EVHDLR_DEREGISTER) {
+ VERIFY(evtag != NULL);
+ EVENTHANDLER_DEREGISTER(&rt->rt_evhdlr_ctxt, route_event,
+ evtag);
+ rtfree(rt);
+ return;
+ }
+
+ EVENTHANDLER_INVOKE(&rt->rt_evhdlr_ctxt, route_event, rt_key(rt),
+ route_ev_code, (struct sockaddr *)&p_rt_ev->rt_addr,
+ rt->rt_flags);
+
+ /* The code enqueuing the route event held a reference */
+ rtfree(rt);
+ /* XXX No reference is taken on gwrt */
+}
+
+int
+route_event_walktree(struct radix_node *rn, void *arg)
+{
+ struct route_event *p_route_ev = (struct route_event *)arg;
+ struct rtentry *rt = (struct rtentry *)rn;
+ struct rtentry *gwrt = p_route_ev->rt;
+
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
+
+ RT_LOCK(rt);
+
+ /* Return if the entry is pending cleanup */
+ if (rt->rt_flags & RTPRF_OURS) {
+ RT_UNLOCK(rt);
+ return (0);
+ }
+
+ /* Return if it is not an indirect route */
+ if (!(rt->rt_flags & RTF_GATEWAY)) {
+ RT_UNLOCK(rt);
+ return (0);
+ }
+
+ if (rt->rt_gwroute != gwrt) {
+ RT_UNLOCK(rt);
+ return (0);
+ }
+
+ route_event_enqueue_nwk_wq_entry(rt, gwrt, p_route_ev->route_event_code,
+ NULL, TRUE);
+ RT_UNLOCK(rt);
+
+ return (0);
+}
+
+struct route_event_nwk_wq_entry
+{
+ struct nwk_wq_entry nwk_wqe;
+ struct route_event rt_ev_arg;
+};
+
+void
+route_event_enqueue_nwk_wq_entry(struct rtentry *rt, struct rtentry *gwrt,
+ uint32_t route_event_code, eventhandler_tag evtag, boolean_t rt_locked)
+{
+ struct route_event_nwk_wq_entry *p_rt_ev = NULL;
+ struct sockaddr *p_gw_saddr = NULL;
+
+ MALLOC(p_rt_ev, struct route_event_nwk_wq_entry *,
+ sizeof(struct route_event_nwk_wq_entry),
+ M_NWKWQ, M_WAITOK | M_ZERO);
+
+ /*
+ * If the intent is to de-register, don't take
+ * reference, route event registration already takes
+ * a reference on route.
+ */
+ if (route_event_code != ROUTE_EVHDLR_DEREGISTER) {
+ /* The reference is released by route_event_callback */
+ if (rt_locked)
+ RT_ADDREF_LOCKED(rt);
+ else
+ RT_ADDREF(rt);
+ }
+
+ p_rt_ev->rt_ev_arg.rt = rt;
+ p_rt_ev->rt_ev_arg.gwrt = gwrt;
+ p_rt_ev->rt_ev_arg.evtag = evtag;
+
+ if (gwrt != NULL)
+ p_gw_saddr = gwrt->rt_gateway;
+ else
+ p_gw_saddr = rt->rt_gateway;
+
+ VERIFY(p_gw_saddr->sa_len <= sizeof(p_rt_ev->rt_ev_arg.rt_addr));
+ bcopy(p_gw_saddr, &(p_rt_ev->rt_ev_arg.rt_addr), p_gw_saddr->sa_len);
+
+ p_rt_ev->rt_ev_arg.route_event_code = route_event_code;
+ p_rt_ev->nwk_wqe.func = route_event_callback;
+ p_rt_ev->nwk_wqe.is_arg_managed = TRUE;
+ p_rt_ev->nwk_wqe.arg = &p_rt_ev->rt_ev_arg;
+ nwk_wq_enqueue((struct nwk_wq_entry*)p_rt_ev);
+}
+
+const char *
+route_event2str(int route_event)
+{
+ const char *route_event_str = "ROUTE_EVENT_UNKNOWN";
+ switch (route_event) {
+ case ROUTE_STATUS_UPDATE:
+ route_event_str = "ROUTE_STATUS_UPDATE";
+ break;
+ case ROUTE_ENTRY_REFRESH:
+ route_event_str = "ROUTE_ENTRY_REFRESH";
+ break;
+ case ROUTE_ENTRY_DELETED:
+ route_event_str = "ROUTE_ENTRY_DELETED";
+ break;
+ case ROUTE_LLENTRY_RESOLVED:
+ route_event_str = "ROUTE_LLENTRY_RESOLVED";
+ break;
+ case ROUTE_LLENTRY_UNREACH:
+ route_event_str = "ROUTE_LLENTRY_UNREACH";
+ break;
+ case ROUTE_LLENTRY_CHANGED:
+ route_event_str = "ROUTE_LLENTRY_CHANGED";
+ break;
+ case ROUTE_LLENTRY_STALE:
+ route_event_str = "ROUTE_LLENTRY_STALE";
+ break;
+ case ROUTE_LLENTRY_TIMEDOUT:
+ route_event_str = "ROUTE_LLENTRY_TIMEDOUT";
+ break;
+ case ROUTE_LLENTRY_DELETED:
+ route_event_str = "ROUTE_LLENTRY_DELETED";
+ break;
+ case ROUTE_LLENTRY_EXPIRED:
+ route_event_str = "ROUTE_LLENTRY_EXPIRED";
+ break;
+ case ROUTE_LLENTRY_PROBED:
+ route_event_str = "ROUTE_LLENTRY_PROBED";
+ break;
+ case ROUTE_EVHDLR_DEREGISTER:
+ route_event_str = "ROUTE_EVHDLR_DEREGISTER";
+ break;
+ default:
+ /* Init'd to ROUTE_EVENT_UNKNOWN */
+ break;
+ }
+ return route_event_str;
+}
+
+int
+route_op_entitlement_check(struct socket *so,
+ kauth_cred_t cred,
+ int route_op_type,
+ boolean_t allow_root)
+{
+ if (so != NULL) {
+ if (route_op_type == ROUTE_OP_READ) {
+ /*
+ * If needed we can later extend this for more
+ * granular entitlements and return a bit set of
+ * allowed accesses.
+ */
+ if (soopt_cred_check(so, PRIV_NET_RESTRICTED_ROUTE_NC_READ,
+ allow_root) == 0)
+ return (0);
+ else
+ return (-1);
+ }
+ } else if (cred != NULL) {
+ uid_t uid = kauth_cred_getuid(cred);
+
+ /* uid is 0 for root */
+ if (uid != 0 || !allow_root) {
+ if (route_op_type == ROUTE_OP_READ) {
+ if (priv_check_cred(cred,
+ PRIV_NET_RESTRICTED_ROUTE_NC_READ, 0) == 0)
+ return (0);
+ else
+ return (-1);
+ }
+ }
+ }
+ return (-1);
+}