/*
- * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. The rights granted to you under the
- * License may not be used to create, or enable the creation or
- * redistribution of, unlawful or unlicensed copies of an Apple operating
- * system, or to circumvent, violate, or enable the circumvention or
- * violation of, any terms of an Apple operating system software license
- * agreement.
- *
- * Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
* limitations under the License.
- *
- * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* Copyright (c) 1988, 1991, 1993
#include <sys/domain.h>
#include <sys/protosw.h>
#include <sys/syslog.h>
+#include <sys/mcache.h>
#include <kern/lock.h>
#include <net/if.h>
#include <net/route.h>
+#include <net/dlil.h>
#include <net/raw_cb.h>
#include <netinet/in.h>
+#include <netinet/in_var.h>
+#include <netinet/in_arp.h>
+#include <netinet6/nd6.h>
#include <machine/spl.h>
-extern void m_copydata(struct mbuf *, int, int, caddr_t);
-extern void m_copyback(struct mbuf *, int, int, caddr_t);
-
extern struct rtstat rtstat;
-extern int rttrash;
+extern int check_routeselfref;
+extern struct domain routedomain;
MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables");
-extern lck_mtx_t *rt_mtx;
static struct sockaddr route_dst = { 2, PF_ROUTE, { 0, } };
static struct sockaddr route_src = { 2, PF_ROUTE, { 0, } };
static struct sockaddr sa_zero = { sizeof(sa_zero), AF_INET, { 0, } };
-static struct sockproto route_proto = { PF_ROUTE, 0 };
struct walkarg {
int w_tmemsize;
struct sysctl_req *w_req;
};
-static struct mbuf *
- rt_msg1(int, struct rt_addrinfo *);
+static struct mbuf *rt_msg1(int, struct rt_addrinfo *);
static int rt_msg2(int, struct rt_addrinfo *, caddr_t, struct walkarg *);
static int rt_xaddrs(caddr_t, caddr_t, struct rt_addrinfo *);
static int sysctl_dumpentry(struct radix_node *rn, void *vw);
+static int sysctl_dumpentry_ext(struct radix_node *rn, void *vw);
static int sysctl_iflist(int af, struct walkarg *w);
static int sysctl_iflist2(int af, struct walkarg *w);
-static int route_output(struct mbuf *, struct socket *);
-static void rt_setmetrics(u_long, struct rt_metrics *, struct rt_metrics *);
+static int route_output(struct mbuf *, struct socket *);
+static void rt_setmetrics(u_int32_t, struct rt_metrics *, struct rtentry *);
+static void rt_getmetrics(struct rtentry *, struct rt_metrics *);
static void rt_setif(struct rtentry *, struct sockaddr *, struct sockaddr *,
- struct sockaddr *);
+ struct sockaddr *, unsigned int);
+static void rt_drainall(void);
+
+#ifndef SIN
+#define SIN(sa) ((struct sockaddr_in *)(size_t)(sa))
+#endif
+
+SYSCTL_NODE(_net, OID_AUTO, idle, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
+ "idle network monitoring");
+
+static struct timeval last_ts;
-/* Sleazy use of local variables throughout file, warning!!!! */
-#define dst info.rti_info[RTAX_DST]
-#define gate info.rti_info[RTAX_GATEWAY]
-#define netmask info.rti_info[RTAX_NETMASK]
-#define genmask info.rti_info[RTAX_GENMASK]
-#define ifpaddr info.rti_info[RTAX_IFP]
-#define ifaaddr info.rti_info[RTAX_IFA]
-#define brdaddr info.rti_info[RTAX_BRD]
+SYSCTL_NODE(_net_idle, OID_AUTO, route, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
+ "idle route monitoring");
+
+static int rt_if_idle_drain_interval = RT_IF_IDLE_DRAIN_INTERVAL;
+SYSCTL_INT(_net_idle_route, OID_AUTO, drain_interval, CTLFLAG_RW,
+ &rt_if_idle_drain_interval, 0, "Default interval for draining "
+ "routes when doing interface idle reference counting.");
+
+/*
+ * This macro calculates skew in wall clock, just in case the user changes the
+ * system time. This skew adjustment is required because we now keep the route
+ * expiration times in uptime terms in the kernel, but the userland still
+ * expects expiration times in terms of calendar times.
+ */
+#define CALCULATE_CLOCKSKEW(cc, ic, cu, iu)\
+ ((cc.tv_sec - ic) - (cu - iu))
/*
* It really doesn't make any sense at all for this code to share much
rp = sotorawcb(so);
if (error) {
FREE(rp, M_PCB);
- so->so_pcb = 0;
+ so->so_pcb = NULL;
so->so_flags |= SOF_PCBCLEARING;
return error;
}
static int
rts_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
{
- int s, error;
- s = splnet();
+ int error;
error = raw_usrreqs.pru_bind(so, nam, p); /* xxx just EINVAL */
- splx(s);
return error;
}
static int
rts_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
{
- int s, error;
- s = splnet();
+ int error;
error = raw_usrreqs.pru_connect(so, nam, p); /* XXX just EINVAL */
- splx(s);
return error;
}
rts_detach(struct socket *so)
{
struct rawcb *rp = sotorawcb(so);
- int s, error;
+ int error;
- s = splnet();
if (rp != 0) {
switch(rp->rcb_proto.sp_protocol) {
case AF_INET:
route_cb.any_count--;
}
error = raw_usrreqs.pru_detach(so);
- splx(s);
return error;
}
static int
rts_disconnect(struct socket *so)
{
- int s, error;
- s = splnet();
+ int error;
error = raw_usrreqs.pru_disconnect(so);
- splx(s);
return error;
}
static int
rts_peeraddr(struct socket *so, struct sockaddr **nam)
{
- int s, error;
- s = splnet();
+ int error;
error = raw_usrreqs.pru_peeraddr(so, nam);
- splx(s);
return error;
}
rts_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
struct mbuf *control, struct proc *p)
{
- int s, error;
- s = splnet();
+ int error;
error = raw_usrreqs.pru_send(so, flags, m, nam, control, p);
- splx(s);
return error;
}
static int
rts_shutdown(struct socket *so)
{
- int s, error;
- s = splnet();
+ int error;
error = raw_usrreqs.pru_shutdown(so);
- splx(s);
return error;
}
static int
rts_sockaddr(struct socket *so, struct sockaddr **nam)
{
- int s, error;
- s = splnet();
+ int error;
error = raw_usrreqs.pru_sockaddr(so, nam);
- splx(s);
return error;
}
/*ARGSUSED*/
static int
-route_output(m, so)
- struct mbuf *m;
- struct socket *so;
+route_output(struct mbuf *m, struct socket *so)
{
- struct rt_msghdr *rtm = 0;
- struct rtentry *rt = 0;
- struct rtentry *saved_nrt = 0;
+ struct rt_msghdr *rtm = NULL;
+ struct rtentry *rt = NULL;
+ struct rtentry *saved_nrt = NULL;
struct radix_node_head *rnh;
struct rt_addrinfo info;
int len, error = 0;
- struct ifnet *ifp = 0;
+ sa_family_t dst_sa_family = 0;
+ struct ifnet *ifp = NULL;
#ifndef __APPLE__
struct proc *curproc = current_proc();
#endif
+ struct sockaddr_in dst_in, gate_in;
int sendonlytoself = 0;
+ unsigned int ifscope = IFSCOPE_NONE;
-#define senderr(e) { error = e; goto flush;}
- if (m == 0 || ((m->m_len < sizeof(long)) && (m = m_pullup(m, sizeof(long))) == 0))
+#define senderr(e) { error = (e); goto flush;}
+ if (m == NULL ||
+ ((m->m_len < sizeof(intptr_t)) && (m = m_pullup(m, sizeof(intptr_t))) == 0))
return (ENOBUFS);
if ((m->m_flags & M_PKTHDR) == 0)
panic("route_output");
/* unlock the socket (but keep a reference) it won't be accessed until raw_input appends to it. */
socket_unlock(so, 0);
- lck_mtx_lock(rt_mtx);
+ lck_mtx_lock(rnh_lock);
len = m->m_pkthdr.len;
if (len < sizeof(*rtm) ||
len != mtod(m, struct rt_msghdr *)->rtm_msglen) {
- dst = 0;
+ info.rti_info[RTAX_DST] = NULL;
senderr(EINVAL);
}
R_Malloc(rtm, struct rt_msghdr *, len);
- if (rtm == 0) {
- dst = 0;
+ if (rtm == NULL) {
+ info.rti_info[RTAX_DST] = NULL;
senderr(ENOBUFS);
}
m_copydata(m, 0, len, (caddr_t)rtm);
if (rtm->rtm_version != RTM_VERSION) {
- dst = 0;
+ info.rti_info[RTAX_DST] = NULL;
senderr(EPROTONOSUPPORT);
}
-
+
/*
* Silent version of RTM_GET for Reachabiltiy APIs. We may change
* all RTM_GETs to be silent in the future, so this is private for now.
sendonlytoself = 1;
rtm->rtm_type = RTM_GET;
}
-
+
/*
* Perform permission checking, only privileged sockets
* may perform operations other than RTM_GET
*/
if (rtm->rtm_type != RTM_GET && (so->so_state & SS_PRIV) == 0) {
- dst = 0;
+ info.rti_info[RTAX_DST] = NULL;
senderr(EPERM);
}
rtm->rtm_pid = proc_selfpid();
info.rti_addrs = rtm->rtm_addrs;
if (rt_xaddrs((caddr_t)(rtm + 1), len + (caddr_t)rtm, &info)) {
- dst = 0;
+ info.rti_info[RTAX_DST] = NULL;
senderr(EINVAL);
}
- if (dst == 0 || (dst->sa_family >= AF_MAX)
- || (gate != 0 && (gate->sa_family >= AF_MAX))) {
+ if (info.rti_info[RTAX_DST] == NULL || (info.rti_info[RTAX_DST]->sa_family >= AF_MAX) ||
+ (info.rti_info[RTAX_GATEWAY] != NULL && (info.rti_info[RTAX_GATEWAY]->sa_family >= AF_MAX))) {
senderr(EINVAL);
}
- if (genmask) {
+
+ if (info.rti_info[RTAX_DST]->sa_family == AF_INET && info.rti_info[RTAX_DST]->sa_len != sizeof (dst_in)) {
+ /* At minimum, we need up to sin_addr */
+ if (info.rti_info[RTAX_DST]->sa_len < offsetof(struct sockaddr_in, sin_zero))
+ senderr(EINVAL);
+ bzero(&dst_in, sizeof (dst_in));
+ dst_in.sin_len = sizeof (dst_in);
+ dst_in.sin_family = AF_INET;
+ dst_in.sin_port = SIN(info.rti_info[RTAX_DST])->sin_port;
+ dst_in.sin_addr = SIN(info.rti_info[RTAX_DST])->sin_addr;
+ info.rti_info[RTAX_DST] = (struct sockaddr *)&dst_in;
+ dst_sa_family = info.rti_info[RTAX_DST]->sa_family;
+ }
+
+ if (info.rti_info[RTAX_GATEWAY] != NULL &&
+ info.rti_info[RTAX_GATEWAY]->sa_family == AF_INET && info.rti_info[RTAX_GATEWAY]->sa_len != sizeof (gate_in)) {
+ /* At minimum, we need up to sin_addr */
+ if (info.rti_info[RTAX_GATEWAY]->sa_len < offsetof(struct sockaddr_in, sin_zero))
+ senderr(EINVAL);
+ bzero(&gate_in, sizeof (gate_in));
+ gate_in.sin_len = sizeof (gate_in);
+ gate_in.sin_family = AF_INET;
+ gate_in.sin_port = SIN(info.rti_info[RTAX_GATEWAY])->sin_port;
+ gate_in.sin_addr = SIN(info.rti_info[RTAX_GATEWAY])->sin_addr;
+ info.rti_info[RTAX_GATEWAY] = (struct sockaddr *)&gate_in;
+ }
+
+ if (info.rti_info[RTAX_GENMASK]) {
struct radix_node *t;
- t = rn_addmask((caddr_t)genmask, 0, 1);
- if (t && Bcmp(genmask, t->rn_key, *(u_char *)genmask) == 0)
- genmask = (struct sockaddr *)(t->rn_key);
+ t = rn_addmask((caddr_t)info.rti_info[RTAX_GENMASK], 0, 1);
+ if (t && Bcmp(info.rti_info[RTAX_GENMASK], t->rn_key, *(u_char *)info.rti_info[RTAX_GENMASK]) == 0)
+ info.rti_info[RTAX_GENMASK] = (struct sockaddr *)(t->rn_key);
else
senderr(ENOBUFS);
}
+
+ /*
+ * If RTF_IFSCOPE flag is set, then rtm_index specifies the scope.
+ */
+ if (rtm->rtm_flags & RTF_IFSCOPE) {
+ if (info.rti_info[RTAX_DST]->sa_family != AF_INET && info.rti_info[RTAX_DST]->sa_family != AF_INET6)
+ senderr(EINVAL);
+ ifscope = rtm->rtm_index;
+ }
+
+ /*
+ * RTF_PROXY can only be set internally from within the kernel.
+ */
+ if (rtm->rtm_flags & RTF_PROXY)
+ senderr(EINVAL);
+
+ /*
+ * For AF_INET, always zero out the embedded scope ID. If this is
+ * a scoped request, it must be done explicitly by setting RTF_IFSCOPE
+ * flag and the corresponding rtm_index value. This is to prevent
+ * false interpretation of the scope ID because it's using the sin_zero
+ * field, which might not be properly cleared by the requestor.
+ */
+ if (info.rti_info[RTAX_DST]->sa_family == AF_INET)
+ sin_set_ifscope(info.rti_info[RTAX_DST], IFSCOPE_NONE);
+ if (info.rti_info[RTAX_GATEWAY] != NULL && info.rti_info[RTAX_GATEWAY]->sa_family == AF_INET)
+ sin_set_ifscope(info.rti_info[RTAX_GATEWAY], IFSCOPE_NONE);
+
switch (rtm->rtm_type) {
-
+
case RTM_ADD:
- if (gate == 0)
+ if (info.rti_info[RTAX_GATEWAY] == NULL)
senderr(EINVAL);
#ifdef __APPLE__
* confusing the routing table with a wrong route to the previous default gateway
*/
{
- extern int check_routeselfref;
-#define satosinaddr(sa) (((struct sockaddr_in *)sa)->sin_addr.s_addr)
+#define satosinaddr(sa) (((struct sockaddr_in *)(void *)sa)->sin_addr.s_addr)
- if (check_routeselfref && (dst && dst->sa_family == AF_INET) &&
- (netmask && satosinaddr(netmask) == INADDR_BROADCAST) &&
- (gate && satosinaddr(dst) == satosinaddr(gate))) {
+ if (check_routeselfref && (info.rti_info[RTAX_DST] && info.rti_info[RTAX_DST]->sa_family == AF_INET) &&
+ (info.rti_info[RTAX_NETMASK] && satosinaddr(info.rti_info[RTAX_NETMASK]) == INADDR_BROADCAST) &&
+ (info.rti_info[RTAX_GATEWAY] && satosinaddr(info.rti_info[RTAX_DST]) == satosinaddr(info.rti_info[RTAX_GATEWAY]))) {
log(LOG_WARNING, "route_output: circular route %ld.%ld.%ld.%ld/32 ignored\n",
- (ntohl(satosinaddr(gate)>>24))&0xff,
- (ntohl(satosinaddr(gate)>>16))&0xff,
- (ntohl(satosinaddr(gate)>>8))&0xff,
- (ntohl(satosinaddr(gate)))&0xff);
+ (ntohl(satosinaddr(info.rti_info[RTAX_GATEWAY])>>24))&0xff,
+ (ntohl(satosinaddr(info.rti_info[RTAX_GATEWAY])>>16))&0xff,
+ (ntohl(satosinaddr(info.rti_info[RTAX_GATEWAY])>>8))&0xff,
+ (ntohl(satosinaddr(info.rti_info[RTAX_GATEWAY])))&0xff);
senderr(EINVAL);
}
}
#endif
- error = rtrequest_locked(RTM_ADD, dst, gate, netmask,
- rtm->rtm_flags, &saved_nrt);
+ error = rtrequest_scoped_locked(RTM_ADD, info.rti_info[RTAX_DST], info.rti_info[RTAX_GATEWAY],
+ info.rti_info[RTAX_NETMASK], rtm->rtm_flags, &saved_nrt, ifscope);
if (error == 0 && saved_nrt) {
+ RT_LOCK(saved_nrt);
#ifdef __APPLE__
/*
* If the route request specified an interface with
* rarely encountered.
* dwiggins@bbn.com
*/
-
- rt_setif(saved_nrt, ifpaddr, ifaaddr, gate);
+
+ rt_setif(saved_nrt, info.rti_info[RTAX_IFP], info.rti_info[RTAX_IFA], info.rti_info[RTAX_GATEWAY],
+ ifscope);
#endif
rt_setmetrics(rtm->rtm_inits,
- &rtm->rtm_rmx, &saved_nrt->rt_rmx);
+ &rtm->rtm_rmx, saved_nrt);
saved_nrt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits);
saved_nrt->rt_rmx.rmx_locks |=
- (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks);
- rtunref(saved_nrt);
- saved_nrt->rt_genmask = genmask;
+ (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks);
+ saved_nrt->rt_genmask = info.rti_info[RTAX_GENMASK];
+ RT_REMREF_LOCKED(saved_nrt);
+ RT_UNLOCK(saved_nrt);
}
break;
case RTM_DELETE:
- error = rtrequest_locked(RTM_DELETE, dst, gate, netmask,
- rtm->rtm_flags, &saved_nrt);
+ error = rtrequest_scoped_locked(RTM_DELETE, info.rti_info[RTAX_DST],
+ info.rti_info[RTAX_GATEWAY], info.rti_info[RTAX_NETMASK], rtm->rtm_flags, &saved_nrt, ifscope);
if (error == 0) {
- if ((rt = saved_nrt))
- rtref(rt);
+ rt = saved_nrt;
+ RT_LOCK(rt);
goto report;
}
break;
case RTM_GET:
case RTM_CHANGE:
case RTM_LOCK:
- if ((rnh = rt_tables[dst->sa_family]) == 0) {
+ if ((rnh = rt_tables[info.rti_info[RTAX_DST]->sa_family]) == NULL)
senderr(EAFNOSUPPORT);
- } else if ((rt = (struct rtentry *)
- rnh->rnh_lookup(dst, netmask, rnh)) != NULL)
- rtref(rt);
- else
+
+ /*
+ * Lookup the best match based on the key-mask pair;
+ * callee adds a reference and checks for root node.
+ */
+ rt = rt_lookup(TRUE, info.rti_info[RTAX_DST], info.rti_info[RTAX_NETMASK], rnh, ifscope);
+ if (rt == NULL)
senderr(ESRCH);
+ RT_LOCK(rt);
+
+ /*
+ * Holding rnh_lock here prevents the possibility of
+ * ifa from changing (e.g. in_ifinit), so it is safe
+ * to access its ifa_addr (down below) without locking.
+ */
switch(rtm->rtm_type) {
case RTM_GET: {
struct ifaddr *ifa2;
report:
- dst = rt_key(rt);
- gate = rt->rt_gateway;
- netmask = rt_mask(rt);
- genmask = rt->rt_genmask;
+ ifa2 = NULL;
+ RT_LOCK_ASSERT_HELD(rt);
+ info.rti_info[RTAX_DST] = rt_key(rt);
+ dst_sa_family = info.rti_info[RTAX_DST]->sa_family;
+ info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
+ info.rti_info[RTAX_NETMASK] = rt_mask(rt);
+ info.rti_info[RTAX_GENMASK] = rt->rt_genmask;
if (rtm->rtm_addrs & (RTA_IFP | RTA_IFA)) {
ifp = rt->rt_ifp;
if (ifp) {
ifnet_lock_shared(ifp);
- ifa2 = ifp->if_addrhead.tqh_first;
- ifpaddr = ifa2->ifa_addr;
+ ifa2 = ifp->if_lladdr;
+ info.rti_info[RTAX_IFP] = ifa2->ifa_addr;
+ IFA_ADDREF(ifa2);
ifnet_lock_done(ifp);
- ifaaddr = rt->rt_ifa->ifa_addr;
+ info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr;
rtm->rtm_index = ifp->if_index;
} else {
- ifpaddr = 0;
- ifaaddr = 0;
+ info.rti_info[RTAX_IFP] = NULL;
+ info.rti_info[RTAX_IFA] = NULL;
}
+ } else if ((ifp = rt->rt_ifp) != NULL) {
+ rtm->rtm_index = ifp->if_index;
}
+ if (ifa2 != NULL)
+ IFA_LOCK(ifa2);
len = rt_msg2(rtm->rtm_type, &info, (caddr_t)0,
(struct walkarg *)0);
+ if (ifa2 != NULL)
+ IFA_UNLOCK(ifa2);
if (len > rtm->rtm_msglen) {
struct rt_msghdr *new_rtm;
R_Malloc(new_rtm, struct rt_msghdr *, len);
if (new_rtm == 0) {
+ RT_UNLOCK(rt);
+ if (ifa2 != NULL)
+ IFA_REMREF(ifa2);
senderr(ENOBUFS);
}
Bcopy(rtm, new_rtm, rtm->rtm_msglen);
R_Free(rtm); rtm = new_rtm;
}
+ if (ifa2 != NULL)
+ IFA_LOCK(ifa2);
(void)rt_msg2(rtm->rtm_type, &info, (caddr_t)rtm,
(struct walkarg *)0);
+ if (ifa2 != NULL)
+ IFA_UNLOCK(ifa2);
rtm->rtm_flags = rt->rt_flags;
- rtm->rtm_rmx = rt->rt_rmx;
+ rt_getmetrics(rt, &rtm->rtm_rmx);
rtm->rtm_addrs = info.rti_addrs;
+ if (ifa2 != NULL)
+ IFA_REMREF(ifa2);
}
break;
case RTM_CHANGE:
- if (gate && (error = rt_setgate(rt, rt_key(rt), gate)))
- senderr(error);
-
+ if (info.rti_info[RTAX_GATEWAY] && (error = rt_setgate(rt,
+ rt_key(rt), info.rti_info[RTAX_GATEWAY]))) {
+ int tmp = error;
+ RT_UNLOCK(rt);
+ senderr(tmp);
+ }
/*
* If they tried to change things but didn't specify
* the required gateway, then just use the old one.
* flags on the default route without changing the
* default gateway. Changing flags still doesn't work.
*/
- if ((rt->rt_flags & RTF_GATEWAY) && !gate)
- gate = rt->rt_gateway;
+ if ((rt->rt_flags & RTF_GATEWAY) && !info.rti_info[RTAX_GATEWAY])
+ info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
#ifdef __APPLE__
/*
* equivalent to the code found at this very spot
* in BSD.
*/
- rt_setif(rt, ifpaddr, ifaaddr, gate);
+ rt_setif(rt, info.rti_info[RTAX_IFP], info.rti_info[RTAX_IFA], info.rti_info[RTAX_GATEWAY],
+ ifscope);
#endif
rt_setmetrics(rtm->rtm_inits, &rtm->rtm_rmx,
- &rt->rt_rmx);
+ rt);
#ifndef __APPLE__
/* rt_setif, called above does this for us on darwin */
if (rt->rt_ifa && rt->rt_ifa->ifa_rtrequest)
- rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt, gate);
+ rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt, info.rti_info[RTAX_GATEWAY]);
#endif
- if (genmask)
- rt->rt_genmask = genmask;
+ if (info.rti_info[RTAX_GENMASK])
+ rt->rt_genmask = info.rti_info[RTAX_GENMASK];
/*
* Fall into
*/
(rtm->rtm_inits & rtm->rtm_rmx.rmx_locks);
break;
}
+ RT_UNLOCK(rt);
break;
default:
else
rtm->rtm_flags |= RTF_DONE;
}
- if (rt)
+ if (rt != NULL) {
+ RT_LOCK_ASSERT_NOTHELD(rt);
rtfree_locked(rt);
- lck_mtx_unlock(rt_mtx);
+ }
+ lck_mtx_unlock(rnh_lock);
socket_lock(so, 0); /* relock the socket now */
{
struct rawcb *rp = 0;
if (error)
return error;
} else {
+ struct sockproto route_proto = {PF_ROUTE, 0};
if (rp)
rp->rcb_proto.sp_family = 0; /* Avoid us */
- if (dst)
- route_proto.sp_protocol = dst->sa_family;
+ if (dst_sa_family != 0)
+ route_proto.sp_protocol = dst_sa_family;
if (m) {
socket_unlock(so, 0);
raw_input(m, &route_proto, &route_src, &route_dst);
return (error);
}
+void
+rt_setexpire(struct rtentry *rt, uint64_t expiry)
+{
+ /* set both rt_expire and rmx_expire */
+ rt->rt_expire = expiry;
+ if (expiry) {
+ rt->rt_rmx.rmx_expire = expiry + rt->base_calendartime -
+ rt->base_uptime;
+ } else
+ rt->rt_rmx.rmx_expire = 0;
+}
+
static void
-rt_setmetrics(which, in, out)
- u_long which;
- struct rt_metrics *in, *out;
+rt_setmetrics(u_int32_t which, struct rt_metrics *in, struct rtentry *out)
{
-#define metric(f, e) if (which & (f)) out->e = in->e;
+ struct timeval curr_calendar_time;
+ uint64_t curr_uptime;
+
+ getmicrotime(&curr_calendar_time);
+ curr_uptime = net_uptime();
+
+#define metric(f, e) if (which & (f)) out->rt_rmx.e = in->e;
metric(RTV_RPIPE, rmx_recvpipe);
metric(RTV_SPIPE, rmx_sendpipe);
metric(RTV_SSTHRESH, rmx_ssthresh);
metric(RTV_MTU, rmx_mtu);
metric(RTV_EXPIRE, rmx_expire);
#undef metric
+
+ if (out->rt_rmx.rmx_expire > 0) {
+ /* account for system time change */
+ curr_uptime = net_uptime();
+ getmicrotime(&curr_calendar_time);
+ out->base_calendartime +=
+ CALCULATE_CLOCKSKEW(curr_calendar_time,
+ out->base_calendartime,
+ curr_uptime, out->base_uptime);
+ rt_setexpire(out,
+ out->rt_rmx.rmx_expire -
+ out->base_calendartime +
+ out->base_uptime);
+ } else {
+ rt_setexpire(out, 0);
+ }
+
+ VERIFY(out->rt_expire == 0 || out->rt_rmx.rmx_expire != 0);
+ VERIFY(out->rt_expire != 0 || out->rt_rmx.rmx_expire == 0);
+}
+
+static void
+rt_getmetrics(struct rtentry *in, struct rt_metrics *out)
+{
+ struct timeval curr_calendar_time;
+ uint64_t curr_uptime;
+
+ VERIFY(in->rt_expire == 0 || in->rt_rmx.rmx_expire != 0);
+ VERIFY(in->rt_expire != 0 || in->rt_rmx.rmx_expire == 0);
+
+ *out = in->rt_rmx;
+
+ if (in->rt_expire) {
+ /* account for system time change */
+ getmicrotime(&curr_calendar_time);
+ curr_uptime = net_uptime();
+
+ in->base_calendartime +=
+ CALCULATE_CLOCKSKEW(curr_calendar_time,
+ in->base_calendartime,
+ curr_uptime, in->base_uptime);
+
+ out->rmx_expire = in->base_calendartime +
+ in->rt_expire - in->base_uptime;
+ } else
+ out->rmx_expire = 0;
}
/*
- * Set route's interface given ifpaddr, ifaaddr, and gateway.
+ * Set route's interface given info.rti_info[RTAX_IFP], info.rti_info[RTAX_IFA], and gateway.
*/
static void
-rt_setif(
- struct rtentry *rt,
- struct sockaddr *Ifpaddr,
- struct sockaddr *Ifaaddr,
- struct sockaddr *Gate)
+rt_setif(struct rtentry *rt, struct sockaddr *Ifpaddr, struct sockaddr *Ifaaddr,
+ struct sockaddr *Gate, unsigned int ifscope)
{
- struct ifaddr *ifa = 0;
- struct ifnet *ifp = 0;
+ struct ifaddr *ifa = NULL;
+ struct ifnet *ifp = NULL;
+ void (*ifa_rtrequest)
+ (int, struct rtentry *, struct sockaddr *);
+
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
- lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_OWNED);
+ RT_LOCK_ASSERT_HELD(rt);
- /* new gateway could require new ifaddr, ifp;
- flags may also be different; ifp may be specified
- by ll sockaddr when protocol address is ambiguous */
- if (Ifpaddr && (ifa = ifa_ifwithnet(Ifpaddr)) &&
+ /* trigger route cache reevaluation */
+ if (use_routegenid)
+ routegenid_update();
+
+ /* Don't update a defunct route */
+ if (rt->rt_flags & RTF_CONDEMNED)
+ return;
+
+ /* Add an extra ref for ourselves */
+ RT_ADDREF_LOCKED(rt);
+
+ /* Become a regular mutex, just in case */
+ RT_CONVERT_LOCK(rt);
+
+ /*
+ * New gateway could require new ifaddr, ifp; flags may also
+ * be different; ifp may be specified by ll sockaddr when
+ * protocol address is ambiguous.
+ */
+ if (Ifpaddr && (ifa = ifa_ifwithnet_scoped(Ifpaddr, ifscope)) &&
(ifp = ifa->ifa_ifp) && (Ifaaddr || Gate)) {
- ifafree(ifa);
- ifa = ifaof_ifpforaddr(Ifaaddr ? Ifaaddr : Gate,
- ifp);
- }
- else
- {
+ IFA_REMREF(ifa);
+ ifa = ifaof_ifpforaddr(Ifaaddr ? Ifaaddr : Gate, ifp);
+ } else {
if (ifa) {
- ifafree(ifa);
+ IFA_REMREF(ifa);
ifa = 0;
}
if (Ifpaddr && (ifp = if_withname(Ifpaddr)) ) {
if (Gate) {
ifa = ifaof_ifpforaddr(Gate, ifp);
- }
- else {
+ } else {
ifnet_lock_shared(ifp);
ifa = TAILQ_FIRST(&ifp->if_addrhead);
- ifaref(ifa);
+ if (ifa != NULL)
+ IFA_ADDREF(ifa);
ifnet_lock_done(ifp);
}
- }
- else if (Ifaaddr && (ifa = ifa_ifwithaddr(Ifaaddr))) {
- ifp = ifa->ifa_ifp;
- }
- else if (Gate && (ifa = ifa_ifwithroute(rt->rt_flags,
- rt_key(rt), Gate))) {
+ } else if (Ifaaddr &&
+ (ifa = ifa_ifwithaddr_scoped(Ifaaddr, ifscope))) {
ifp = ifa->ifa_ifp;
+ } else if (Gate != NULL) {
+ /*
+ * Safe to drop rt_lock and use rt_key, since holding
+ * rnh_lock here prevents another thread from calling
+ * rt_setgate() on this route. We cannot hold the
+ * lock across ifa_ifwithroute since the lookup done
+ * by that routine may point to the same route.
+ */
+ RT_UNLOCK(rt);
+ if ((ifa = ifa_ifwithroute_scoped_locked(rt->rt_flags,
+ rt_key(rt), Gate, ifscope)) != NULL)
+ ifp = ifa->ifa_ifp;
+ RT_LOCK(rt);
+ /* Don't update a defunct route */
+ if (rt->rt_flags & RTF_CONDEMNED) {
+ if (ifa != NULL)
+ IFA_REMREF(ifa);
+ /* Release extra ref */
+ RT_REMREF_LOCKED(rt);
+ return;
+ }
}
}
if (ifa) {
struct ifaddr *oifa = rt->rt_ifa;
if (oifa != ifa) {
- if (oifa && oifa->ifa_rtrequest)
- oifa->ifa_rtrequest(RTM_DELETE,
- rt, Gate);
+ if (oifa != NULL) {
+ IFA_LOCK_SPIN(oifa);
+ ifa_rtrequest = oifa->ifa_rtrequest;
+ IFA_UNLOCK(oifa);
+ if (ifa_rtrequest != NULL)
+ ifa_rtrequest(RTM_DELETE, rt, Gate);
+ }
rtsetifa(rt, ifa);
- rt->rt_ifp = ifp;
- rt->rt_rmx.rmx_mtu = ifp->if_mtu;
- if (rt->rt_ifa && rt->rt_ifa->ifa_rtrequest)
- rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt, Gate);
- } else {
- ifafree(ifa);
- goto call_ifareq;
+
+ if (rt->rt_ifp != ifp) {
+ /*
+ * Purge any link-layer info caching.
+ */
+ if (rt->rt_llinfo_purge != NULL)
+ rt->rt_llinfo_purge(rt);
+
+ /*
+ * Adjust route ref count for the interfaces.
+ */
+ if (rt->rt_if_ref_fn != NULL) {
+ rt->rt_if_ref_fn(ifp, 1);
+ rt->rt_if_ref_fn(rt->rt_ifp, -1);
+ }
+ }
+ rt->rt_ifp = ifp;
+ /*
+ * If this is the (non-scoped) default route, record
+ * the interface index used for the primary ifscope.
+ */
+ if (rt_primary_default(rt, rt_key(rt))) {
+ set_primary_ifscope(rt_key(rt)->sa_family,
+ rt->rt_ifp->if_index);
+ }
+ rt->rt_rmx.rmx_mtu = ifp->if_mtu;
+ if (rt->rt_ifa != NULL) {
+ IFA_LOCK_SPIN(rt->rt_ifa);
+ ifa_rtrequest = rt->rt_ifa->ifa_rtrequest;
+ IFA_UNLOCK(rt->rt_ifa);
+ if (ifa_rtrequest != NULL)
+ ifa_rtrequest(RTM_ADD, rt, Gate);
+ }
+ IFA_REMREF(ifa);
+ /* Release extra ref */
+ RT_REMREF_LOCKED(rt);
+ return;
}
- ifafree(ifa);
- return;
+ IFA_REMREF(ifa);
}
- call_ifareq:
+
/* XXX: to reset gateway to correct value, at RTM_CHANGE */
- if (rt->rt_ifa && rt->rt_ifa->ifa_rtrequest)
- rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt, Gate);
-}
+ if (rt->rt_ifa != NULL) {
+ IFA_LOCK_SPIN(rt->rt_ifa);
+ ifa_rtrequest = rt->rt_ifa->ifa_rtrequest;
+ IFA_UNLOCK(rt->rt_ifa);
+ if (ifa_rtrequest != NULL)
+ ifa_rtrequest(RTM_ADD, rt, Gate);
+ }
+ /* Release extra ref */
+ RT_REMREF_LOCKED(rt);
+}
-#define ROUNDUP(a) \
- ((a) > 0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
-#define ADVANCE(x, n) (x += ROUNDUP((n)->sa_len))
+#define ROUNDUP32(a) \
+ ((a) > 0 ? (1 + (((a) - 1) | (sizeof(uint32_t) - 1))) : sizeof(uint32_t))
+#define ADVANCE32(x, n) (x += ROUNDUP32((n)->sa_len))
/*
* This data is derived straight from userland.
*/
static int
-rt_xaddrs(cp, cplim, rtinfo)
- caddr_t cp, cplim;
- struct rt_addrinfo *rtinfo;
+rt_xaddrs(caddr_t cp, caddr_t cplim, struct rt_addrinfo *rtinfo)
{
struct sockaddr *sa;
int i;
/* accept it */
rtinfo->rti_info[i] = sa;
- ADVANCE(cp, sa);
+ ADVANCE32(cp, sa);
}
return (0);
}
static struct mbuf *
-rt_msg1(
- int type,
- struct rt_addrinfo *rtinfo)
+rt_msg1(int type, struct rt_addrinfo *rtinfo)
{
struct rt_msghdr *rtm;
struct mbuf *m;
int i;
- struct sockaddr *sa;
int len, dlen;
switch (type) {
rtm = mtod(m, struct rt_msghdr *);
bzero((caddr_t)rtm, len);
for (i = 0; i < RTAX_MAX; i++) {
+ struct sockaddr *sa, *hint;
+ struct sockaddr_storage ss;
+
if ((sa = rtinfo->rti_info[i]) == NULL)
continue;
+
+ switch (i) {
+ case RTAX_DST:
+ case RTAX_NETMASK:
+ if ((hint = rtinfo->rti_info[RTAX_DST]) == NULL)
+ hint = rtinfo->rti_info[RTAX_IFA];
+
+ /* Scrub away any trace of embedded interface scope */
+ sa = rtm_scrub_ifscope(type, i, hint, sa, &ss);
+ break;
+
+ default:
+ break;
+ }
+
rtinfo->rti_addrs |= (1 << i);
- dlen = ROUNDUP(sa->sa_len);
+ dlen = ROUNDUP32(sa->sa_len);
m_copyback(m, len, dlen, (caddr_t)sa);
len += dlen;
}
}
static int
-rt_msg2(type, rtinfo, cp, w)
- int type;
- struct rt_addrinfo *rtinfo;
- caddr_t cp;
- struct walkarg *w;
+rt_msg2(int type, struct rt_addrinfo *rtinfo, caddr_t cp, struct walkarg *w)
{
int i;
int len, dlen, second_time = 0;
len = sizeof(struct ifma_msghdr2);
break;
+ case RTM_GET_EXT:
+ len = sizeof (struct rt_msghdr_ext);
+ break;
+
case RTM_GET2:
len = sizeof(struct rt_msghdr2);
break;
if (cp0)
cp += len;
for (i = 0; i < RTAX_MAX; i++) {
- struct sockaddr *sa;
+ struct sockaddr *sa, *hint;
+ struct sockaddr_storage ss;
if ((sa = rtinfo->rti_info[i]) == 0)
continue;
+
+ switch (i) {
+ case RTAX_DST:
+ case RTAX_NETMASK:
+ if ((hint = rtinfo->rti_info[RTAX_DST]) == NULL)
+ hint = rtinfo->rti_info[RTAX_IFA];
+
+ /* Scrub away any trace of embedded interface scope */
+ sa = rtm_scrub_ifscope(type, i, hint, sa, &ss);
+ break;
+
+ default:
+ break;
+ }
+
rtinfo->rti_addrs |= (1 << i);
- dlen = ROUNDUP(sa->sa_len);
+ dlen = ROUNDUP32(sa->sa_len);
if (cp) {
bcopy((caddr_t)sa, cp, (unsigned)dlen);
cp += dlen;
if (rw->w_tmemsize < len) {
if (rw->w_tmem)
FREE(rw->w_tmem, M_RTABLE);
- rw->w_tmem = (caddr_t)
- _MALLOC(len, M_RTABLE, M_WAITOK); /*###LD0412 was NOWAIT */
+ rw->w_tmem = _MALLOC(len, M_RTABLE, M_WAITOK);
if (rw->w_tmem)
rw->w_tmemsize = len;
}
}
}
if (cp) {
- struct rt_msghdr *rtm = (struct rt_msghdr *)cp0;
+ struct rt_msghdr *rtm = (struct rt_msghdr *)(void *)cp0;
rtm->rtm_version = RTM_VERSION;
rtm->rtm_type = type;
* destination.
*/
void
-rt_missmsg(type, rtinfo, flags, error)
- int type, flags, error;
- struct rt_addrinfo *rtinfo;
+rt_missmsg(int type, struct rt_addrinfo *rtinfo, int flags, int error)
{
struct rt_msghdr *rtm;
struct mbuf *m;
struct sockaddr *sa = rtinfo->rti_info[RTAX_DST];
-
- lck_mtx_assert(rt_mtx, LCK_MTX_ASSERT_OWNED);
+ struct sockproto route_proto = {PF_ROUTE, 0};
if (route_cb.any_count == 0)
return;
rtm->rtm_flags = RTF_DONE | flags;
rtm->rtm_errno = error;
rtm->rtm_addrs = rtinfo->rti_addrs;
- route_proto.sp_protocol = sa ? sa->sa_family : 0;
+ route_proto.sp_family = sa ? sa->sa_family : 0;
raw_input(m, &route_proto, &route_src, &route_dst);
}
struct if_msghdr *ifm;
struct mbuf *m;
struct rt_addrinfo info;
+ struct sockproto route_proto = {PF_ROUTE, 0};
if (route_cb.any_count == 0)
return;
ifm = mtod(m, struct if_msghdr *);
ifm->ifm_index = ifp->if_index;
ifm->ifm_flags = (u_short)ifp->if_flags;
- if_data_internal_to_if_data(&ifp->if_data, &ifm->ifm_data);
+ if_data_internal_to_if_data(ifp, &ifp->if_data, &ifm->ifm_data);
ifm->ifm_addrs = 0;
- route_proto.sp_protocol = 0;
raw_input(m, &route_proto, &route_src, &route_dst);
}
* copies of it.
*
* Since this is coming from the interface, it is expected that the
- * interface will be locked.
+ * interface will be locked. Caller must hold rnh_lock and rt_lock.
*/
void
-rt_newaddrmsg(cmd, ifa, error, rt)
- int cmd, error;
- struct ifaddr *ifa;
- struct rtentry *rt;
+rt_newaddrmsg(int cmd, struct ifaddr *ifa, int error, struct rtentry *rt)
{
struct rt_addrinfo info;
struct sockaddr *sa = 0;
int pass;
struct mbuf *m = 0;
struct ifnet *ifp = ifa->ifa_ifp;
+ struct sockproto route_proto = {PF_ROUTE, 0};
+
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+ RT_LOCK_ASSERT_HELD(rt);
if (route_cb.any_count == 0)
return;
+
+ /* Become a regular mutex, just in case */
+ RT_CONVERT_LOCK(rt);
for (pass = 1; pass < 3; pass++) {
bzero((caddr_t)&info, sizeof(info));
if ((cmd == RTM_ADD && pass == 1) ||
struct ifa_msghdr *ifam;
int ncmd = cmd == RTM_ADD ? RTM_NEWADDR : RTM_DELADDR;
- ifaaddr = sa = ifa->ifa_addr;
- ifpaddr = ifp->if_addrhead.tqh_first->ifa_addr;
- netmask = ifa->ifa_netmask;
- brdaddr = ifa->ifa_dstaddr;
- if ((m = rt_msg1(ncmd, &info)) == NULL)
+ /* Lock ifp for if_lladdr */
+ ifnet_lock_shared(ifp);
+ IFA_LOCK(ifa);
+ info.rti_info[RTAX_IFA] = sa = ifa->ifa_addr;
+ /*
+ * Holding ifnet lock here prevents the link address
+ * from changing contents, so no need to hold its
+ * lock. The link address is always present; it's
+ * never freed.
+ */
+ info.rti_info[RTAX_IFP] = ifp->if_lladdr->ifa_addr;
+ info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
+ info.rti_info[RTAX_BRD] = ifa->ifa_dstaddr;
+ if ((m = rt_msg1(ncmd, &info)) == NULL) {
+ IFA_UNLOCK(ifa);
+ ifnet_lock_done(ifp);
continue;
+ }
+ IFA_UNLOCK(ifa);
+ ifnet_lock_done(ifp);
ifam = mtod(m, struct ifa_msghdr *);
ifam->ifam_index = ifp->if_index;
+ IFA_LOCK_SPIN(ifa);
ifam->ifam_metric = ifa->ifa_metric;
ifam->ifam_flags = ifa->ifa_flags;
+ IFA_UNLOCK(ifa);
ifam->ifam_addrs = info.rti_addrs;
}
if ((cmd == RTM_ADD && pass == 2) ||
if (rt == 0)
continue;
- netmask = rt_mask(rt);
- dst = sa = rt_key(rt);
- gate = rt->rt_gateway;
+ info.rti_info[RTAX_NETMASK] = rt_mask(rt);
+ info.rti_info[RTAX_DST] = sa = rt_key(rt);
+ info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
if ((m = rt_msg1(cmd, &info)) == NULL)
continue;
rtm = mtod(m, struct rt_msghdr *);
* there is no route state to worry about.
*/
void
-rt_newmaddrmsg(cmd, ifma)
- int cmd;
- struct ifmultiaddr *ifma;
+rt_newmaddrmsg(int cmd, struct ifmultiaddr *ifma)
{
struct rt_addrinfo info;
struct mbuf *m = 0;
struct ifnet *ifp = ifma->ifma_ifp;
struct ifma_msghdr *ifmam;
+ struct sockproto route_proto = {PF_ROUTE, 0};
if (route_cb.any_count == 0)
return;
+ /* Lock ifp for if_lladdr */
+ ifnet_lock_shared(ifp);
bzero((caddr_t)&info, sizeof(info));
- ifaaddr = ifma->ifma_addr;
- if (ifp && ifp->if_addrhead.tqh_first)
- ifpaddr = ifp->if_addrhead.tqh_first->ifa_addr;
- else
- ifpaddr = NULL;
+ IFMA_LOCK(ifma);
+ info.rti_info[RTAX_IFA] = ifma->ifma_addr;
+ info.rti_info[RTAX_IFP] = ifp->if_lladdr->ifa_addr; /* lladdr doesn't need lock */
+
/*
* If a link-layer address is present, present it as a ``gateway''
* (similarly to how ARP entries, e.g., are presented).
*/
- gate = ifma->ifma_ll->ifma_addr;
- if ((m = rt_msg1(cmd, &info)) == NULL)
+ info.rti_info[RTAX_GATEWAY] = (ifma->ifma_ll != NULL) ? ifma->ifma_ll->ifma_addr : NULL;
+ if ((m = rt_msg1(cmd, &info)) == NULL) {
+ IFMA_UNLOCK(ifma);
+ ifnet_lock_done(ifp);
return;
+ }
ifmam = mtod(m, struct ifma_msghdr *);
- ifmam->ifmam_index = ifp ? ifp->if_index : 0;
+ ifmam->ifmam_index = ifp->if_index;
ifmam->ifmam_addrs = info.rti_addrs;
route_proto.sp_protocol = ifma->ifma_addr->sa_family;
+ IFMA_UNLOCK(ifma);
+ ifnet_lock_done(ifp);
raw_input(m, &route_proto, &route_src, &route_dst);
}
* This is used in dumping the kernel table via sysctl().
*/
int
-sysctl_dumpentry(rn, vw)
- struct radix_node *rn;
- void *vw;
+sysctl_dumpentry(struct radix_node *rn, void *vw)
{
struct walkarg *w = vw;
struct rtentry *rt = (struct rtentry *)rn;
int error = 0, size;
struct rt_addrinfo info;
- if (w->w_op == NET_RT_FLAGS && !(rt->rt_flags & w->w_arg))
+ RT_LOCK(rt);
+ if (w->w_op == NET_RT_FLAGS && !(rt->rt_flags & w->w_arg)) {
+ RT_UNLOCK(rt);
return 0;
+ }
bzero((caddr_t)&info, sizeof(info));
- dst = rt_key(rt);
- gate = rt->rt_gateway;
- netmask = rt_mask(rt);
- genmask = rt->rt_genmask;
+ info.rti_info[RTAX_DST] = rt_key(rt);
+ info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
+ info.rti_info[RTAX_NETMASK] = rt_mask(rt);
+ info.rti_info[RTAX_GENMASK] = rt->rt_genmask;
+
if (w->w_op != NET_RT_DUMP2) {
size = rt_msg2(RTM_GET, &info, 0, w);
if (w->w_req && w->w_tmem) {
- struct rt_msghdr *rtm = (struct rt_msghdr *)w->w_tmem;
+ struct rt_msghdr *rtm =
+ (struct rt_msghdr *)(void *)w->w_tmem;
rtm->rtm_flags = rt->rt_flags;
rtm->rtm_use = rt->rt_use;
- rtm->rtm_rmx = rt->rt_rmx;
+ rt_getmetrics(rt, &rtm->rtm_rmx);
rtm->rtm_index = rt->rt_ifp->if_index;
rtm->rtm_pid = 0;
- rtm->rtm_seq = 0;
- rtm->rtm_errno = 0;
+ rtm->rtm_seq = 0;
+ rtm->rtm_errno = 0;
rtm->rtm_addrs = info.rti_addrs;
error = SYSCTL_OUT(w->w_req, (caddr_t)rtm, size);
+ RT_UNLOCK(rt);
return (error);
}
} else {
- size = rt_msg2(RTM_GET2, &info, 0, w);
- if (w->w_req && w->w_tmem) {
- struct rt_msghdr2 *rtm = (struct rt_msghdr2 *)w->w_tmem;
-
- rtm->rtm_flags = rt->rt_flags;
- rtm->rtm_use = rt->rt_use;
- rtm->rtm_rmx = rt->rt_rmx;
- rtm->rtm_index = rt->rt_ifp->if_index;
- rtm->rtm_refcnt = rt->rt_refcnt;
+ size = rt_msg2(RTM_GET2, &info, 0, w);
+ if (w->w_req && w->w_tmem) {
+ struct rt_msghdr2 *rtm =
+ (struct rt_msghdr2 *)(void *)w->w_tmem;
+
+ rtm->rtm_flags = rt->rt_flags;
+ rtm->rtm_use = rt->rt_use;
+ rt_getmetrics(rt, &rtm->rtm_rmx);
+ rtm->rtm_index = rt->rt_ifp->if_index;
+ rtm->rtm_refcnt = rt->rt_refcnt;
if (rt->rt_parent)
rtm->rtm_parentflags = rt->rt_parent->rt_flags;
else
rtm->rtm_parentflags = 0;
- rtm->rtm_reserved = 0;
- rtm->rtm_addrs = info.rti_addrs;
- error = SYSCTL_OUT(w->w_req, (caddr_t)rtm, size);
- return (error);
+ rtm->rtm_reserved = 0;
+ rtm->rtm_addrs = info.rti_addrs;
+ error = SYSCTL_OUT(w->w_req, (caddr_t)rtm, size);
+ RT_UNLOCK(rt);
+ return (error);
+ }
+ }
+ RT_UNLOCK(rt);
+ return (error);
+}
+
+/*
+ * This is used for dumping extended information from route entries.
+ */
+int
+sysctl_dumpentry_ext(struct radix_node *rn, void *vw)
+{
+ struct walkarg *w = vw;
+ struct rtentry *rt = (struct rtentry *)rn;
+ int error = 0, size;
+ struct rt_addrinfo info;
+ RT_LOCK(rt);
+ if (w->w_op == NET_RT_DUMPX_FLAGS && !(rt->rt_flags & w->w_arg)) {
+ RT_UNLOCK(rt);
+ return (0);
+ }
+ bzero(&info, sizeof (info));
+ info.rti_info[RTAX_DST] = rt_key(rt);
+ info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
+ info.rti_info[RTAX_NETMASK] = rt_mask(rt);
+ info.rti_info[RTAX_GENMASK] = rt->rt_genmask;
+
+ size = rt_msg2(RTM_GET_EXT, &info, 0, w);
+ if (w->w_req && w->w_tmem) {
+ struct rt_msghdr_ext *ertm =
+ (struct rt_msghdr_ext *)(void *)w->w_tmem;
+
+ ertm->rtm_flags = rt->rt_flags;
+ ertm->rtm_use = rt->rt_use;
+ rt_getmetrics(rt, &ertm->rtm_rmx);
+ ertm->rtm_index = rt->rt_ifp->if_index;
+ ertm->rtm_pid = 0;
+ ertm->rtm_seq = 0;
+ ertm->rtm_errno = 0;
+ ertm->rtm_addrs = info.rti_addrs;
+ if (rt->rt_llinfo_get_ri == NULL) {
+ bzero(&ertm->rtm_ri, sizeof (ertm->rtm_ri));
+ ertm->rtm_ri.ri_rssi = IFNET_RSSI_UNKNOWN;
+ ertm->rtm_ri.ri_lqm = IFNET_LQM_THRESH_OFF;
+ ertm->rtm_ri.ri_npm = IFNET_NPM_THRESH_UNKNOWN;
}
+ else
+ rt->rt_llinfo_get_ri(rt, &ertm->rtm_ri);
+
+ error = SYSCTL_OUT(w->w_req, (caddr_t)ertm, size);
+ RT_UNLOCK(rt);
+ return (error);
}
+ RT_UNLOCK(rt);
return (error);
}
+/*
+ * rdar://9307819
+ * To avoid to call copyout() while holding locks and to cause problems
+ * in the paging path, sysctl_iflist() and sysctl_iflist2() contstruct
+ * the list in two passes. In the first pass we compute the total
+ * length of the data we are going to copyout, then we release
+ * all locks to allocate a temporary buffer that gets filled
+ * in the second pass.
+ *
+ * Note that we are verifying the assumption that _MALLOC returns a buffer
+ * that is at least 32 bits aligned and that the messages and addresses are
+ * 32 bits aligned.
+ */
+
int
-sysctl_iflist(
- int af,
- struct walkarg *w)
+sysctl_iflist(int af, struct walkarg *w)
{
struct ifnet *ifp;
struct ifaddr *ifa;
struct rt_addrinfo info;
int len, error = 0;
+ int pass = 0;
+ int total_len = 0, current_len = 0;
+ char *total_buffer = NULL, *cp = NULL;
bzero((caddr_t)&info, sizeof(info));
- ifnet_head_lock_shared();
- TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
- if (error)
- break;
- if (w->w_arg && w->w_arg != ifp->if_index)
- continue;
- ifnet_lock_shared(ifp);
- ifa = ifp->if_addrhead.tqh_first;
- ifpaddr = ifa->ifa_addr;
- len = rt_msg2(RTM_IFINFO, &info, (caddr_t)0, w);
- ifpaddr = 0;
- if (w->w_req && w->w_tmem) {
- struct if_msghdr *ifm;
-
- ifm = (struct if_msghdr *)w->w_tmem;
- ifm->ifm_index = ifp->if_index;
- ifm->ifm_flags = (u_short)ifp->if_flags;
- if_data_internal_to_if_data(&ifp->if_data, &ifm->ifm_data);
- ifm->ifm_addrs = info.rti_addrs;
- error = SYSCTL_OUT(w->w_req,(caddr_t)ifm, len);
- if (error) {
- ifnet_lock_done(ifp);
+
+ for (pass = 0; pass < 2; pass++) {
+ ifnet_head_lock_shared();
+
+ TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
+ if (error)
break;
- }
- }
- while ((ifa = ifa->ifa_link.tqe_next) != 0) {
- if (af && af != ifa->ifa_addr->sa_family)
+ if (w->w_arg && w->w_arg != ifp->if_index)
continue;
-#ifndef __APPLE__
- if (curproc->p_prison && prison_if(curproc, ifa->ifa_addr))
- continue;
-#endif
- ifaaddr = ifa->ifa_addr;
- netmask = ifa->ifa_netmask;
- brdaddr = ifa->ifa_dstaddr;
- len = rt_msg2(RTM_NEWADDR, &info, 0, w);
- if (w->w_req && w->w_tmem) {
- struct ifa_msghdr *ifam;
-
- ifam = (struct ifa_msghdr *)w->w_tmem;
- ifam->ifam_index = ifa->ifa_ifp->if_index;
- ifam->ifam_flags = ifa->ifa_flags;
- ifam->ifam_metric = ifa->ifa_metric;
- ifam->ifam_addrs = info.rti_addrs;
- error = SYSCTL_OUT(w->w_req, w->w_tmem, len);
- if (error)
+ ifnet_lock_shared(ifp);
+ /*
+ * Holding ifnet lock here prevents the link address from
+ * changing contents, so no need to hold the ifa lock.
+ * The link address is always present; it's never freed.
+ */
+ ifa = ifp->if_lladdr;
+ info.rti_info[RTAX_IFP] = ifa->ifa_addr;
+ len = rt_msg2(RTM_IFINFO, &info, (caddr_t)0, NULL);
+ if (pass == 0) {
+ total_len += len;
+ } else {
+ struct if_msghdr *ifm;
+
+ if (current_len + len > total_len) {
+ ifnet_lock_done(ifp);
+ printf("sysctl_iflist: current_len (%d) + len (%d) > total_len (%d)\n",
+ current_len, len, total_len);
+ error = ENOBUFS;
break;
+ }
+ info.rti_info[RTAX_IFP] = ifa->ifa_addr;
+ len = rt_msg2(RTM_IFINFO, &info, (caddr_t)cp, NULL);
+ info.rti_info[RTAX_IFP] = NULL;
+
+ ifm = (struct if_msghdr *)(void *)cp;
+ ifm->ifm_index = ifp->if_index;
+ ifm->ifm_flags = (u_short)ifp->if_flags;
+ if_data_internal_to_if_data(ifp, &ifp->if_data,
+ &ifm->ifm_data);
+ ifm->ifm_addrs = info.rti_addrs;
+
+ cp += len;
+ VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
+ current_len += len;
+ }
+ while ((ifa = ifa->ifa_link.tqe_next) != 0) {
+ IFA_LOCK(ifa);
+ if (af && af != ifa->ifa_addr->sa_family) {
+ IFA_UNLOCK(ifa);
+ continue;
+ }
+ info.rti_info[RTAX_IFA] = ifa->ifa_addr;
+ info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
+ info.rti_info[RTAX_BRD] = ifa->ifa_dstaddr;
+ len = rt_msg2(RTM_NEWADDR, &info, 0, 0);
+ if (pass == 0) {
+ total_len += len;
+ } else {
+ struct ifa_msghdr *ifam;
+
+ if (current_len + len > total_len) {
+ IFA_UNLOCK(ifa);
+ printf("sysctl_iflist: current_len (%d) + len (%d) > total_len (%d)\n",
+ current_len, len, total_len);
+ error = ENOBUFS;
+ break;
+ }
+ len = rt_msg2(RTM_NEWADDR, &info, (caddr_t)cp, NULL);
+
+ ifam = (struct ifa_msghdr *)(void *)cp;
+ ifam->ifam_index = ifa->ifa_ifp->if_index;
+ ifam->ifam_flags = ifa->ifa_flags;
+ ifam->ifam_metric = ifa->ifa_metric;
+ ifam->ifam_addrs = info.rti_addrs;
+
+ cp += len;
+ VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
+ current_len += len;
+ }
+ IFA_UNLOCK(ifa);
}
+ ifnet_lock_done(ifp);
+ info.rti_info[RTAX_IFA] = info.rti_info[RTAX_NETMASK] =
+ info.rti_info[RTAX_BRD] = NULL;
+ }
+
+ ifnet_head_done();
+
+ if (error)
+ break;
+
+ if (pass == 0) {
+ /* Better to return zero length buffer than ENOBUFS */
+ if (total_len == 0)
+ total_len = 1;
+ total_len += total_len >> 3;
+ total_buffer = _MALLOC(total_len, M_RTABLE, M_ZERO | M_WAITOK);
+ if (total_buffer == NULL) {
+ printf("sysctl_iflist: _MALLOC(%d) failed\n", total_len);
+ error = ENOBUFS;
+ break;
+ }
+ cp = total_buffer;
+ VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
+ } else {
+ error = SYSCTL_OUT(w->w_req, total_buffer, current_len);
+ if (error)
+ break;
}
- ifnet_lock_done(ifp);
- ifaaddr = netmask = brdaddr = 0;
}
- ifnet_head_done();
+
+ if (total_buffer != NULL)
+ _FREE(total_buffer, M_RTABLE);
+
return error;
}
int
-sysctl_iflist2(
- int af,
- struct walkarg *w)
+sysctl_iflist2(int af, struct walkarg *w)
{
struct ifnet *ifp;
struct ifaddr *ifa;
struct rt_addrinfo info;
int len, error = 0;
-
+ int pass = 0;
+ int total_len = 0, current_len = 0;
+ char *total_buffer = NULL, *cp = NULL;
+
bzero((caddr_t)&info, sizeof(info));
- ifnet_head_lock_shared();
- TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
- if (error)
- break;
- if (w->w_arg && w->w_arg != ifp->if_index)
- continue;
- ifnet_lock_shared(ifp);
- ifa = ifp->if_addrhead.tqh_first;
- ifpaddr = ifa->ifa_addr;
- len = rt_msg2(RTM_IFINFO2, &info, (caddr_t)0, w);
- ifpaddr = 0;
- if (w->w_req && w->w_tmem) {
- struct if_msghdr2 *ifm;
-
- ifm = (struct if_msghdr2 *)w->w_tmem;
- ifm->ifm_addrs = info.rti_addrs;
- ifm->ifm_flags = (u_short)ifp->if_flags;
- ifm->ifm_index = ifp->if_index;
- ifm->ifm_snd_len = ifp->if_snd.ifq_len;
- ifm->ifm_snd_maxlen = ifp->if_snd.ifq_maxlen;
- ifm->ifm_snd_drops = ifp->if_snd.ifq_drops;
- ifm->ifm_timer = ifp->if_timer;
- if_data_internal_to_if_data64(&ifp->if_data, &ifm->ifm_data);
- error = SYSCTL_OUT(w->w_req, w->w_tmem, len);
- if (error) {
- ifnet_lock_done(ifp);
+
+ for (pass = 0; pass < 2; pass++) {
+ ifnet_head_lock_shared();
+
+ TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
+ if (error)
break;
- }
- }
- while ((ifa = ifa->ifa_link.tqe_next) != 0) {
- if (af && af != ifa->ifa_addr->sa_family)
+ if (w->w_arg && w->w_arg != ifp->if_index)
continue;
- ifaaddr = ifa->ifa_addr;
- netmask = ifa->ifa_netmask;
- brdaddr = ifa->ifa_dstaddr;
- len = rt_msg2(RTM_NEWADDR, &info, 0, w);
- if (w->w_req && w->w_tmem) {
- struct ifa_msghdr *ifam;
-
- ifam = (struct ifa_msghdr *)w->w_tmem;
- ifam->ifam_index = ifa->ifa_ifp->if_index;
- ifam->ifam_flags = ifa->ifa_flags;
- ifam->ifam_metric = ifa->ifa_metric;
- ifam->ifam_addrs = info.rti_addrs;
- error = SYSCTL_OUT(w->w_req, w->w_tmem, len);
- if (error)
+ ifnet_lock_shared(ifp);
+ /*
+ * Holding ifnet lock here prevents the link address from
+ * changing contents, so no need to hold the ifa lock.
+ * The link address is always present; it's never freed.
+ */
+ ifa = ifp->if_lladdr;
+ info.rti_info[RTAX_IFP] = ifa->ifa_addr;
+ len = rt_msg2(RTM_IFINFO2, &info, (caddr_t)0, NULL);
+ if (pass == 0) {
+ total_len += len;
+ } else {
+ struct if_msghdr2 *ifm;
+
+ if (current_len + len > total_len) {
+ ifnet_lock_done(ifp);
+ printf("sysctl_iflist2: current_len (%d) + len (%d) > total_len (%d)\n",
+ current_len, len, total_len);
+ error = ENOBUFS;
break;
+ }
+ info.rti_info[RTAX_IFP] = ifa->ifa_addr;
+ len = rt_msg2(RTM_IFINFO2, &info, (caddr_t)cp, NULL);
+ info.rti_info[RTAX_IFP] = NULL;
+
+ ifm = (struct if_msghdr2 *)(void *)cp;
+ ifm->ifm_addrs = info.rti_addrs;
+ ifm->ifm_flags = (u_short)ifp->if_flags;
+ ifm->ifm_index = ifp->if_index;
+ ifm->ifm_snd_len = IFCQ_LEN(&ifp->if_snd);
+ ifm->ifm_snd_maxlen = IFCQ_MAXLEN(&ifp->if_snd);
+ ifm->ifm_snd_drops =
+ ifp->if_snd.ifcq_dropcnt.packets;
+ ifm->ifm_timer = ifp->if_timer;
+ if_data_internal_to_if_data64(ifp, &ifp->if_data,
+ &ifm->ifm_data);
+
+ cp += len;
+ VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
+ current_len += len;
}
- }
- if (error) {
- ifnet_lock_done(ifp);
- break;
- }
- {
- struct ifmultiaddr *ifma;
-
- for (ifma = ifp->if_multiaddrs.lh_first; ifma;
- ifma = ifma->ifma_link.le_next) {
- if (af && af != ifma->ifma_addr->sa_family)
+ while ((ifa = ifa->ifa_link.tqe_next) != 0) {
+ IFA_LOCK(ifa);
+ if (af && af != ifa->ifa_addr->sa_family) {
+ IFA_UNLOCK(ifa);
continue;
- bzero((caddr_t)&info, sizeof(info));
- ifaaddr = ifma->ifma_addr;
- if (ifp->if_addrhead.tqh_first)
- ifpaddr = ifp->if_addrhead.tqh_first->ifa_addr;
- if (ifma->ifma_ll)
- gate = ifma->ifma_ll->ifma_addr;
- len = rt_msg2(RTM_NEWMADDR2, &info, 0, w);
- if (w->w_req && w->w_tmem) {
- struct ifma_msghdr2 *ifmam;
-
- ifmam = (struct ifma_msghdr2 *)w->w_tmem;
- ifmam->ifmam_addrs = info.rti_addrs;
- ifmam->ifmam_flags = 0;
- ifmam->ifmam_index = ifma->ifma_ifp->if_index;
- ifmam->ifmam_refcount = ifma->ifma_refcount;
- error = SYSCTL_OUT(w->w_req, w->w_tmem, len);
- if (error)
+ }
+ info.rti_info[RTAX_IFA] = ifa->ifa_addr;
+ info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
+ info.rti_info[RTAX_BRD] = ifa->ifa_dstaddr;
+ len = rt_msg2(RTM_NEWADDR, &info, 0, 0);
+ if (pass == 0) {
+ total_len += len;
+ } else {
+ struct ifa_msghdr *ifam;
+
+ if (current_len + len > total_len) {
+ IFA_UNLOCK(ifa);
+ printf("sysctl_iflist2: current_len (%d) + len (%d) > total_len (%d)\n",
+ current_len, len, total_len);
+ error = ENOBUFS;
break;
+ }
+ len = rt_msg2(RTM_NEWADDR, &info, (caddr_t)cp, 0);
+
+ ifam = (struct ifa_msghdr *)(void *)cp;
+ ifam->ifam_index = ifa->ifa_ifp->if_index;
+ ifam->ifam_flags = ifa->ifa_flags;
+ ifam->ifam_metric = ifa->ifa_metric;
+ ifam->ifam_addrs = info.rti_addrs;
+
+ cp += len;
+ VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
+ current_len += len;
+ }
+ IFA_UNLOCK(ifa);
+ }
+ if (error) {
+ ifnet_lock_done(ifp);
+ break;
+ }
+ {
+ struct ifmultiaddr *ifma;
+
+ for (ifma = LIST_FIRST(&ifp->if_multiaddrs);
+ ifma != NULL; ifma = LIST_NEXT(ifma, ifma_link)) {
+ struct ifaddr *ifa0;
+
+ IFMA_LOCK(ifma);
+ if (af && af != ifma->ifma_addr->sa_family) {
+ IFMA_UNLOCK(ifma);
+ continue;
+ }
+ bzero((caddr_t)&info, sizeof(info));
+ info.rti_info[RTAX_IFA] = ifma->ifma_addr;
+ /*
+ * Holding ifnet lock here prevents the link
+ * address from changing contents, so no need
+ * to hold the ifa0 lock. The link address is
+ * always present; it's never freed.
+ */
+ ifa0 = ifp->if_lladdr;
+ info.rti_info[RTAX_IFP] = ifa0->ifa_addr;
+ if (ifma->ifma_ll != NULL)
+ info.rti_info[RTAX_GATEWAY] = ifma->ifma_ll->ifma_addr;
+ len = rt_msg2(RTM_NEWMADDR2, &info, 0, 0);
+ if (pass == 0) {
+ total_len += len;
+ } else {
+ struct ifma_msghdr2 *ifmam;
+
+ if (current_len + len > total_len) {
+ IFMA_UNLOCK(ifma);
+ printf("sysctl_iflist2: current_len (%d) + len (%d) > total_len (%d)\n",
+ current_len, len, total_len);
+ error = ENOBUFS;
+ break;
+ }
+ len = rt_msg2(RTM_NEWMADDR2, &info, (caddr_t)cp, 0);
+
+ ifmam = (struct ifma_msghdr2 *)(void *)cp;
+ ifmam->ifmam_addrs = info.rti_addrs;
+ ifmam->ifmam_flags = 0;
+ ifmam->ifmam_index =
+ ifma->ifma_ifp->if_index;
+ ifmam->ifmam_refcount =
+ ifma->ifma_reqcnt;
+
+ cp += len;
+ VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
+ current_len += len;
+ }
+ IFMA_UNLOCK(ifma);
}
}
+ ifnet_lock_done(ifp);
+ info.rti_info[RTAX_IFA] = info.rti_info[RTAX_NETMASK] =
+ info.rti_info[RTAX_BRD] = NULL;
+ }
+ ifnet_head_done();
+
+ if (error)
+ break;
+
+ if (pass == 0) {
+ /* Better to return zero length buffer than ENOBUFS */
+ if (total_len == 0)
+ total_len = 1;
+ total_len += total_len >> 3;
+ total_buffer = _MALLOC(total_len, M_RTABLE, M_ZERO | M_WAITOK);
+ if (total_buffer == NULL) {
+ printf("sysctl_iflist2: _MALLOC(%d) failed\n", total_len);
+ error = ENOBUFS;
+ break;
+ }
+ cp = total_buffer;
+ VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
+ } else {
+ error = SYSCTL_OUT(w->w_req, total_buffer, current_len);
+ if (error)
+ break;
}
- ifnet_lock_done(ifp);
- ifaaddr = netmask = brdaddr = 0;
}
- ifnet_head_done();
+
+ if (total_buffer != NULL)
+ _FREE(total_buffer, M_RTABLE);
+
return error;
}
static int
sysctl_rttrash(struct sysctl_req *req)
{
- int error;
+ int error;
+
+ error = SYSCTL_OUT(req, &rttrash, sizeof(rttrash));
+ if (error)
+ return (error);
+
+ return 0;
+}
- error = SYSCTL_OUT(req, &rttrash, sizeof(rttrash));
- if (error)
- return (error);
+/*
+ * Called from pfslowtimo(), protected by domain_proto_mtx
+ */
+static void
+rt_drainall(void)
+{
+ struct timeval delta_ts, current_ts;
- return 0;
+ /*
+ * This test is done without holding rnh_lock; in the even that
+ * we read stale value, it will only cause an extra (or miss)
+ * drain and is therefore harmless.
+ */
+ if (ifnet_aggressive_drainers == 0) {
+ if (timerisset(&last_ts))
+ timerclear(&last_ts);
+ return;
+ }
+
+ microuptime(¤t_ts);
+ timersub(¤t_ts, &last_ts, &delta_ts);
+
+ if (delta_ts.tv_sec >= rt_if_idle_drain_interval) {
+ timerclear(&last_ts);
+
+ in_rtqdrain(); /* protocol cloned routes: INET */
+ in_arpdrain(NULL); /* cloned routes: ARP */
+#if INET6
+ in6_rtqdrain(); /* protocol cloned routes: INET6 */
+ nd6_drain(NULL); /* cloned routes: ND6 */
+#endif /* INET6 */
+
+ last_ts.tv_sec = current_ts.tv_sec;
+ last_ts.tv_usec = current_ts.tv_usec;
+ }
}
+void
+rt_aggdrain(int on)
+{
+ lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+
+ if (on)
+ routedomain.dom_protosw->pr_flags |= PR_AGGDRAIN;
+ else
+ routedomain.dom_protosw->pr_flags &= ~PR_AGGDRAIN;
+}
static int
sysctl_rtsock SYSCTL_HANDLER_ARGS
{
+#pragma unused(oidp)
int *name = (int *)arg1;
u_int namelen = arg2;
struct radix_node_head *rnh;
case NET_RT_DUMP:
case NET_RT_DUMP2:
case NET_RT_FLAGS:
- lck_mtx_lock(rt_mtx);
+ lck_mtx_lock(rnh_lock);
+ for (i = 1; i <= AF_MAX; i++)
+ if ((rnh = rt_tables[i]) && (af == 0 || af == i) &&
+ (error = rnh->rnh_walktree(rnh,
+ sysctl_dumpentry, &w)))
+ break;
+ lck_mtx_unlock(rnh_lock);
+ break;
+ case NET_RT_DUMPX:
+ case NET_RT_DUMPX_FLAGS:
+ lck_mtx_lock(rnh_lock);
for (i = 1; i <= AF_MAX; i++)
if ((rnh = rt_tables[i]) && (af == 0 || af == i) &&
(error = rnh->rnh_walktree(rnh,
- sysctl_dumpentry, &w)))
+ sysctl_dumpentry_ext, &w)))
break;
- lck_mtx_unlock(rt_mtx);
+ lck_mtx_unlock(rnh_lock);
break;
case NET_RT_IFLIST:
error = sysctl_iflist(af, &w);
return (error);
}
-SYSCTL_NODE(_net, PF_ROUTE, routetable, CTLFLAG_RD, sysctl_rtsock, "");
+SYSCTL_NODE(_net, PF_ROUTE, routetable, CTLFLAG_RD | CTLFLAG_LOCKED, sysctl_rtsock, "");
/*
* Definitions of protocols supported in the ROUTE domain.
*/
-
-struct domain routedomain; /* or at least forward */
-
static struct protosw routesw[] = {
{ SOCK_RAW, &routedomain, 0, PR_ATOMIC|PR_ADDR,
0, route_output, raw_ctlinput, 0,
0,
- raw_init, 0, 0, 0,
+ raw_init, 0, 0, rt_drainall,
0,
&route_usrreqs,
0, 0, 0,
struct domain routedomain =
{ PF_ROUTE, "route", route_init, 0, 0,
routesw,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ NULL, NULL, 0, 0, 0, 0, NULL, 0,
{ 0, 0 } };
DOMAIN_SET(route);