/*
- * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @(#)rtsock.c 8.5 (Berkeley) 11/2/94
*/
-
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/kauth.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
#include <sys/proc.h>
#include <sys/protosw.h>
#include <sys/syslog.h>
#include <sys/mcache.h>
-#include <kern/lock.h>
+#include <kern/locks.h>
+#include <sys/codesign.h>
#include <net/if.h>
#include <net/route.h>
#include <netinet/in_arp.h>
#include <netinet6/nd6.h>
-#include <machine/spl.h>
-
extern struct rtstat rtstat;
-extern int check_routeselfref;
-extern struct domain routedomain;
+extern struct domain routedomain_s;
+static struct domain *routedomain = NULL;
MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables");
-static struct sockaddr route_dst = { 2, PF_ROUTE, { 0, } };
-static struct sockaddr route_src = { 2, PF_ROUTE, { 0, } };
-static struct sockaddr sa_zero = { sizeof(sa_zero), AF_INET, { 0, } };
+static struct sockaddr route_dst = { 2, PF_ROUTE, { 0, } };
+static struct sockaddr route_src = { 2, PF_ROUTE, { 0, } };
+static struct sockaddr sa_zero = { sizeof (sa_zero), AF_INET, { 0, } };
+
+struct route_cb {
+ u_int32_t ip_count; /* attached w/ AF_INET */
+ u_int32_t ip6_count; /* attached w/ AF_INET6 */
+ u_int32_t any_count; /* total attached */
+};
+
+static struct route_cb route_cb;
struct walkarg {
int w_tmemsize;
struct sysctl_req *w_req;
};
+static void route_dinit(struct domain *);
+static int rts_abort(struct socket *);
+static int rts_attach(struct socket *, int, struct proc *);
+static int rts_bind(struct socket *, struct sockaddr *, struct proc *);
+static int rts_connect(struct socket *, struct sockaddr *, struct proc *);
+static int rts_detach(struct socket *);
+static int rts_disconnect(struct socket *);
+static int rts_peeraddr(struct socket *, struct sockaddr **);
+static int rts_send(struct socket *, int, struct mbuf *, struct sockaddr *,
+ struct mbuf *, struct proc *);
+static int rts_shutdown(struct socket *);
+static int rts_sockaddr(struct socket *, struct sockaddr **);
+
+static int route_output(struct mbuf *, struct socket *);
+static int rt_setmetrics(u_int32_t, struct rt_metrics *, struct rtentry *);
+static void rt_getmetrics(struct rtentry *, struct rt_metrics *);
+static void rt_setif(struct rtentry *, struct sockaddr *, struct sockaddr *,
+ struct sockaddr *, unsigned int);
+static int rt_xaddrs(caddr_t, caddr_t, struct rt_addrinfo *);
static struct mbuf *rt_msg1(int, struct rt_addrinfo *);
-static int rt_msg2(int, struct rt_addrinfo *, caddr_t, struct walkarg *);
-static int rt_xaddrs(caddr_t, caddr_t, struct rt_addrinfo *);
-static int sysctl_dumpentry(struct radix_node *rn, void *vw);
-static int sysctl_dumpentry_ext(struct radix_node *rn, void *vw);
-static int sysctl_iflist(int af, struct walkarg *w);
-static int sysctl_iflist2(int af, struct walkarg *w);
-static int route_output(struct mbuf *, struct socket *);
-static void rt_setmetrics(u_int32_t, struct rt_metrics *, struct rtentry *);
-static void rt_getmetrics(struct rtentry *, struct rt_metrics *);
-static void rt_setif(struct rtentry *, struct sockaddr *, struct sockaddr *,
- struct sockaddr *, unsigned int);
-static void rt_drainall(void);
-
-#ifndef SIN
-#define SIN(sa) ((struct sockaddr_in *)(size_t)(sa))
-#endif
-
-SYSCTL_NODE(_net, OID_AUTO, idle, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
- "idle network monitoring");
-
-static struct timeval last_ts;
-
-SYSCTL_NODE(_net_idle, OID_AUTO, route, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
- "idle route monitoring");
-
-static int rt_if_idle_drain_interval = RT_IF_IDLE_DRAIN_INTERVAL;
-SYSCTL_INT(_net_idle_route, OID_AUTO, drain_interval, CTLFLAG_RW,
- &rt_if_idle_drain_interval, 0, "Default interval for draining "
- "routes when doing interface idle reference counting.");
-
-/*
- * This macro calculates skew in wall clock, just in case the user changes the
- * system time. This skew adjustment is required because we now keep the route
- * expiration times in uptime terms in the kernel, but the userland still
- * expects expiration times in terms of calendar times.
- */
-#define CALCULATE_CLOCKSKEW(cc, ic, cu, iu)\
- ((cc.tv_sec - ic) - (cu - iu))
+static int rt_msg2(int, struct rt_addrinfo *, caddr_t, struct walkarg *,
+ kauth_cred_t *);
+static int sysctl_dumpentry(struct radix_node *rn, void *vw);
+static int sysctl_dumpentry_ext(struct radix_node *rn, void *vw);
+static int sysctl_iflist(int af, struct walkarg *w);
+static int sysctl_iflist2(int af, struct walkarg *w);
+static int sysctl_rtstat(struct sysctl_req *);
+static int sysctl_rttrash(struct sysctl_req *);
+static int sysctl_rtsock SYSCTL_HANDLER_ARGS;
+
+SYSCTL_NODE(_net, PF_ROUTE, routetable, CTLFLAG_RD | CTLFLAG_LOCKED,
+ sysctl_rtsock, "");
+
+SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "routing");
+
+/* Align x to 1024 (only power of 2) assuming x is positive */
+#define ALIGN_BYTES(x) do { \
+ x = P2ALIGN(x, 1024); \
+} while(0)
+
+#define ROUNDUP32(a) \
+ ((a) > 0 ? (1 + (((a) - 1) | (sizeof (uint32_t) - 1))) : \
+ sizeof (uint32_t))
+
+#define ADVANCE32(x, n) \
+ (x += ROUNDUP32((n)->sa_len))
/*
* It really doesn't make any sense at all for this code to share much
static int
rts_abort(struct socket *so)
{
- int error;
-
- error = raw_usrreqs.pru_abort(so);
- return error;
+ return (raw_usrreqs.pru_abort(so));
}
/* pru_accept is EOPNOTSUPP */
static int
-rts_attach(struct socket *so, int proto, __unused struct proc *p)
+rts_attach(struct socket *so, int proto, struct proc *p)
{
+#pragma unused(p)
struct rawcb *rp;
int error;
- if (sotorawcb(so) != 0)
- return EISCONN; /* XXX panic? */
- MALLOC(rp, struct rawcb *, sizeof *rp, M_PCB, M_WAITOK); /* XXX */
- if (rp == 0)
- return ENOBUFS;
- bzero(rp, sizeof *rp);
+ VERIFY(so->so_pcb == NULL);
+
+ MALLOC(rp, struct rawcb *, sizeof (*rp), M_PCB, M_WAITOK | M_ZERO);
+ if (rp == NULL)
+ return (ENOBUFS);
- /*
- * The splnet() is necessary to block protocols from sending
- * error notifications (like RTM_REDIRECT or RTM_LOSING) while
- * this PCB is extant but incompletely initialized.
- * Probably we should try to do more of this work beforehand and
- * eliminate the spl.
- */
so->so_pcb = (caddr_t)rp;
- error = raw_attach(so, proto); /* don't use raw_usrreqs.pru_attach, it checks for SS_PRIV */
+ /* don't use raw_usrreqs.pru_attach, it checks for SS_PRIV */
+ error = raw_attach(so, proto);
rp = sotorawcb(so);
if (error) {
FREE(rp, M_PCB);
so->so_pcb = NULL;
so->so_flags |= SOF_PCBCLEARING;
- return error;
+ return (error);
}
- switch(rp->rcb_proto.sp_protocol) {
-//####LD route_cb needs looking
+ switch (rp->rcb_proto.sp_protocol) {
case AF_INET:
- route_cb.ip_count++;
+ atomic_add_32(&route_cb.ip_count, 1);
break;
case AF_INET6:
- route_cb.ip6_count++;
- break;
- case AF_IPX:
- route_cb.ipx_count++;
- break;
- case AF_NS:
- route_cb.ns_count++;
+ atomic_add_32(&route_cb.ip6_count, 1);
break;
}
rp->rcb_faddr = &route_src;
- route_cb.any_count++;
- /* the socket is already locked when we enter rts_attach */
+ atomic_add_32(&route_cb.any_count, 1);
+ /* the socket is already locked when we enter rts_attach */
soisconnected(so);
so->so_options |= SO_USELOOPBACK;
- return 0;
+ return (0);
}
static int
rts_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
{
- int error;
- error = raw_usrreqs.pru_bind(so, nam, p); /* xxx just EINVAL */
- return error;
+ return (raw_usrreqs.pru_bind(so, nam, p)); /* xxx just EINVAL */
}
static int
rts_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
{
- int error;
- error = raw_usrreqs.pru_connect(so, nam, p); /* XXX just EINVAL */
- return error;
+ return (raw_usrreqs.pru_connect(so, nam, p)); /* XXX just EINVAL */
}
/* pru_connect2 is EOPNOTSUPP */
rts_detach(struct socket *so)
{
struct rawcb *rp = sotorawcb(so);
- int error;
- if (rp != 0) {
- switch(rp->rcb_proto.sp_protocol) {
- case AF_INET:
- route_cb.ip_count--;
- break;
- case AF_INET6:
- route_cb.ip6_count--;
- break;
- case AF_IPX:
- route_cb.ipx_count--;
- break;
- case AF_NS:
- route_cb.ns_count--;
- break;
- }
- route_cb.any_count--;
+ VERIFY(rp != NULL);
+
+ switch (rp->rcb_proto.sp_protocol) {
+ case AF_INET:
+ atomic_add_32(&route_cb.ip_count, -1);
+ break;
+ case AF_INET6:
+ atomic_add_32(&route_cb.ip6_count, -1);
+ break;
}
- error = raw_usrreqs.pru_detach(so);
- return error;
+ atomic_add_32(&route_cb.any_count, -1);
+ return (raw_usrreqs.pru_detach(so));
}
static int
rts_disconnect(struct socket *so)
{
- int error;
- error = raw_usrreqs.pru_disconnect(so);
- return error;
+ return (raw_usrreqs.pru_disconnect(so));
}
/* pru_listen is EOPNOTSUPP */
static int
rts_peeraddr(struct socket *so, struct sockaddr **nam)
{
- int error;
- error = raw_usrreqs.pru_peeraddr(so, nam);
- return error;
+ return (raw_usrreqs.pru_peeraddr(so, nam));
}
/* pru_rcvd is EOPNOTSUPP */
static int
rts_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
- struct mbuf *control, struct proc *p)
+ struct mbuf *control, struct proc *p)
{
- int error;
- error = raw_usrreqs.pru_send(so, flags, m, nam, control, p);
- return error;
+ return (raw_usrreqs.pru_send(so, flags, m, nam, control, p));
}
/* pru_sense is null */
static int
rts_shutdown(struct socket *so)
{
- int error;
- error = raw_usrreqs.pru_shutdown(so);
- return error;
+ return (raw_usrreqs.pru_shutdown(so));
}
static int
rts_sockaddr(struct socket *so, struct sockaddr **nam)
{
- int error;
- error = raw_usrreqs.pru_sockaddr(so, nam);
- return error;
+ return (raw_usrreqs.pru_sockaddr(so, nam));
}
static struct pr_usrreqs route_usrreqs = {
- rts_abort, pru_accept_notsupp, rts_attach, rts_bind,
- rts_connect, pru_connect2_notsupp, pru_control_notsupp,
- rts_detach, rts_disconnect, pru_listen_notsupp, rts_peeraddr,
- pru_rcvd_notsupp, pru_rcvoob_notsupp, rts_send, pru_sense_null,
- rts_shutdown, rts_sockaddr, sosend, soreceive, pru_sopoll_notsupp
+ .pru_abort = rts_abort,
+ .pru_attach = rts_attach,
+ .pru_bind = rts_bind,
+ .pru_connect = rts_connect,
+ .pru_detach = rts_detach,
+ .pru_disconnect = rts_disconnect,
+ .pru_peeraddr = rts_peeraddr,
+ .pru_send = rts_send,
+ .pru_shutdown = rts_shutdown,
+ .pru_sockaddr = rts_sockaddr,
+ .pru_sosend = sosend,
+ .pru_soreceive = soreceive,
};
/*ARGSUSED*/
int len, error = 0;
sa_family_t dst_sa_family = 0;
struct ifnet *ifp = NULL;
-#ifndef __APPLE__
- struct proc *curproc = current_proc();
-#endif
struct sockaddr_in dst_in, gate_in;
int sendonlytoself = 0;
unsigned int ifscope = IFSCOPE_NONE;
-
-#define senderr(e) { error = (e); goto flush;}
- if (m == NULL ||
- ((m->m_len < sizeof(intptr_t)) && (m = m_pullup(m, sizeof(intptr_t))) == 0))
+ struct rawcb *rp = NULL;
+ boolean_t is_router = FALSE;
+#define senderr(e) { error = (e); goto flush; }
+ if (m == NULL || ((m->m_len < sizeof (intptr_t)) &&
+ (m = m_pullup(m, sizeof (intptr_t))) == NULL))
return (ENOBUFS);
- if ((m->m_flags & M_PKTHDR) == 0)
- panic("route_output");
+ VERIFY(m->m_flags & M_PKTHDR);
- /* unlock the socket (but keep a reference) it won't be accessed until raw_input appends to it. */
+ /*
+ * Unlock the socket (but keep a reference) it won't be
+ * accessed until raw_input appends to it.
+ */
socket_unlock(so, 0);
lck_mtx_lock(rnh_lock);
len = m->m_pkthdr.len;
- if (len < sizeof(*rtm) ||
+ if (len < sizeof (*rtm) ||
len != mtod(m, struct rt_msghdr *)->rtm_msglen) {
info.rti_info[RTAX_DST] = NULL;
senderr(EINVAL);
* all RTM_GETs to be silent in the future, so this is private for now.
*/
if (rtm->rtm_type == RTM_GET_SILENT) {
- if ((so->so_options & SO_USELOOPBACK) == 0)
+ if (!(so->so_options & SO_USELOOPBACK))
senderr(EINVAL);
sendonlytoself = 1;
rtm->rtm_type = RTM_GET;
* Perform permission checking, only privileged sockets
* may perform operations other than RTM_GET
*/
- if (rtm->rtm_type != RTM_GET && (so->so_state & SS_PRIV) == 0) {
+ if (rtm->rtm_type != RTM_GET && !(so->so_state & SS_PRIV)) {
info.rti_info[RTAX_DST] = NULL;
senderr(EPERM);
}
info.rti_info[RTAX_DST] = NULL;
senderr(EINVAL);
}
- if (info.rti_info[RTAX_DST] == NULL || (info.rti_info[RTAX_DST]->sa_family >= AF_MAX) ||
- (info.rti_info[RTAX_GATEWAY] != NULL && (info.rti_info[RTAX_GATEWAY]->sa_family >= AF_MAX))) {
+ if (info.rti_info[RTAX_DST] == NULL ||
+ info.rti_info[RTAX_DST]->sa_family >= AF_MAX ||
+ (info.rti_info[RTAX_GATEWAY] != NULL &&
+ info.rti_info[RTAX_GATEWAY]->sa_family >= AF_MAX))
senderr(EINVAL);
- }
- if (info.rti_info[RTAX_DST]->sa_family == AF_INET && info.rti_info[RTAX_DST]->sa_len != sizeof (dst_in)) {
+ if (info.rti_info[RTAX_DST]->sa_family == AF_INET &&
+ info.rti_info[RTAX_DST]->sa_len != sizeof (dst_in)) {
/* At minimum, we need up to sin_addr */
- if (info.rti_info[RTAX_DST]->sa_len < offsetof(struct sockaddr_in, sin_zero))
+ if (info.rti_info[RTAX_DST]->sa_len <
+ offsetof(struct sockaddr_in, sin_zero))
senderr(EINVAL);
bzero(&dst_in, sizeof (dst_in));
dst_in.sin_len = sizeof (dst_in);
}
if (info.rti_info[RTAX_GATEWAY] != NULL &&
- info.rti_info[RTAX_GATEWAY]->sa_family == AF_INET && info.rti_info[RTAX_GATEWAY]->sa_len != sizeof (gate_in)) {
+ info.rti_info[RTAX_GATEWAY]->sa_family == AF_INET &&
+ info.rti_info[RTAX_GATEWAY]->sa_len != sizeof (gate_in)) {
/* At minimum, we need up to sin_addr */
- if (info.rti_info[RTAX_GATEWAY]->sa_len < offsetof(struct sockaddr_in, sin_zero))
+ if (info.rti_info[RTAX_GATEWAY]->sa_len <
+ offsetof(struct sockaddr_in, sin_zero))
senderr(EINVAL);
bzero(&gate_in, sizeof (gate_in));
gate_in.sin_len = sizeof (gate_in);
if (info.rti_info[RTAX_GENMASK]) {
struct radix_node *t;
t = rn_addmask((caddr_t)info.rti_info[RTAX_GENMASK], 0, 1);
- if (t && Bcmp(info.rti_info[RTAX_GENMASK], t->rn_key, *(u_char *)info.rti_info[RTAX_GENMASK]) == 0)
- info.rti_info[RTAX_GENMASK] = (struct sockaddr *)(t->rn_key);
+ if (t != NULL && Bcmp(info.rti_info[RTAX_GENMASK],
+ t->rn_key, *(u_char *)info.rti_info[RTAX_GENMASK]) == 0)
+ info.rti_info[RTAX_GENMASK] =
+ (struct sockaddr *)(t->rn_key);
else
senderr(ENOBUFS);
}
* If RTF_IFSCOPE flag is set, then rtm_index specifies the scope.
*/
if (rtm->rtm_flags & RTF_IFSCOPE) {
- if (info.rti_info[RTAX_DST]->sa_family != AF_INET && info.rti_info[RTAX_DST]->sa_family != AF_INET6)
+ if (info.rti_info[RTAX_DST]->sa_family != AF_INET &&
+ info.rti_info[RTAX_DST]->sa_family != AF_INET6)
senderr(EINVAL);
ifscope = rtm->rtm_index;
}
+ /*
+ * Block changes on INTCOPROC interfaces.
+ */
+ if (ifscope) {
+ unsigned int intcoproc_scope = 0;
+ ifnet_head_lock_shared();
+ TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
+ if (IFNET_IS_INTCOPROC(ifp)) {
+ intcoproc_scope = ifp->if_index;
+ break;
+ }
+ }
+ ifnet_head_done();
+ if (intcoproc_scope == ifscope && current_proc()->p_pid != 0)
+ senderr(EINVAL);
+ }
/*
* RTF_PROXY can only be set internally from within the kernel.
*/
if (info.rti_info[RTAX_DST]->sa_family == AF_INET)
sin_set_ifscope(info.rti_info[RTAX_DST], IFSCOPE_NONE);
- if (info.rti_info[RTAX_GATEWAY] != NULL && info.rti_info[RTAX_GATEWAY]->sa_family == AF_INET)
+ if (info.rti_info[RTAX_GATEWAY] != NULL &&
+ info.rti_info[RTAX_GATEWAY]->sa_family == AF_INET)
sin_set_ifscope(info.rti_info[RTAX_GATEWAY], IFSCOPE_NONE);
switch (rtm->rtm_type) {
+ case RTM_ADD:
+ if (info.rti_info[RTAX_GATEWAY] == NULL)
+ senderr(EINVAL);
- case RTM_ADD:
- if (info.rti_info[RTAX_GATEWAY] == NULL)
- senderr(EINVAL);
-
-#ifdef __APPLE__
-/* XXX LD11JUL02 Special case for AOL 5.1.2 connectivity issue to AirPort BS (Radar 2969954)
- * AOL is adding a circular route ("10.0.1.1/32 10.0.1.1") when establishing its ppp tunnel
- * to the AP BaseStation by removing the default gateway and replacing it with their tunnel entry point.
- * There is no apparent reason to add this route as there is a valid 10.0.1.1/24 route to the BS.
- * That circular route was ignored on previous version of MacOS X because of a routing bug
- * corrected with the merge to FreeBSD4.4 (a route generated from an RTF_CLONING route had the RTF_WASCLONED
- * flag set but did not have a reference to the parent route) and that entry was left in the RT. This workaround is
- * made in order to provide binary compatibility with AOL.
- * If we catch a process adding a circular route with a /32 from the routing socket, we error it out instead of
- * confusing the routing table with a wrong route to the previous default gateway
- */
-{
-#define satosinaddr(sa) (((struct sockaddr_in *)(void *)sa)->sin_addr.s_addr)
-
- if (check_routeselfref && (info.rti_info[RTAX_DST] && info.rti_info[RTAX_DST]->sa_family == AF_INET) &&
- (info.rti_info[RTAX_NETMASK] && satosinaddr(info.rti_info[RTAX_NETMASK]) == INADDR_BROADCAST) &&
- (info.rti_info[RTAX_GATEWAY] && satosinaddr(info.rti_info[RTAX_DST]) == satosinaddr(info.rti_info[RTAX_GATEWAY]))) {
- log(LOG_WARNING, "route_output: circular route %ld.%ld.%ld.%ld/32 ignored\n",
- (ntohl(satosinaddr(info.rti_info[RTAX_GATEWAY])>>24))&0xff,
- (ntohl(satosinaddr(info.rti_info[RTAX_GATEWAY])>>16))&0xff,
- (ntohl(satosinaddr(info.rti_info[RTAX_GATEWAY])>>8))&0xff,
- (ntohl(satosinaddr(info.rti_info[RTAX_GATEWAY])))&0xff);
-
- senderr(EINVAL);
+ error = rtrequest_scoped_locked(RTM_ADD,
+ info.rti_info[RTAX_DST], info.rti_info[RTAX_GATEWAY],
+ info.rti_info[RTAX_NETMASK], rtm->rtm_flags, &saved_nrt,
+ ifscope);
+ if (error == 0 && saved_nrt != NULL) {
+ RT_LOCK(saved_nrt);
+ /*
+ * If the route request specified an interface with
+ * IFA and/or IFP, we set the requested interface on
+ * the route with rt_setif. It would be much better
+ * to do this inside rtrequest, but that would
+ * require passing the desired interface, in some
+ * form, to rtrequest. Since rtrequest is called in
+ * so many places (roughly 40 in our source), adding
+ * a parameter is to much for us to swallow; this is
+ * something for the FreeBSD developers to tackle.
+ * Instead, we let rtrequest compute whatever
+ * interface it wants, then come in behind it and
+ * stick in the interface that we really want. This
+ * works reasonably well except when rtrequest can't
+ * figure out what interface to use (with
+ * ifa_withroute) and returns ENETUNREACH. Ideally
+ * it shouldn't matter if rtrequest can't figure out
+ * the interface if we're going to explicitly set it
+ * ourselves anyway. But practically we can't
+ * recover here because rtrequest will not do any of
+ * the work necessary to add the route if it can't
+ * find an interface. As long as there is a default
+ * route that leads to some interface, rtrequest will
+ * find an interface, so this problem should be
+ * rarely encountered.
+ * dwiggins@bbn.com
+ */
+ rt_setif(saved_nrt,
+ info.rti_info[RTAX_IFP], info.rti_info[RTAX_IFA],
+ info.rti_info[RTAX_GATEWAY], ifscope);
+ (void)rt_setmetrics(rtm->rtm_inits, &rtm->rtm_rmx, saved_nrt);
+ saved_nrt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits);
+ saved_nrt->rt_rmx.rmx_locks |=
+ (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks);
+ saved_nrt->rt_genmask = info.rti_info[RTAX_GENMASK];
+ RT_REMREF_LOCKED(saved_nrt);
+ RT_UNLOCK(saved_nrt);
+ }
+ break;
+
+ case RTM_DELETE:
+ error = rtrequest_scoped_locked(RTM_DELETE,
+ info.rti_info[RTAX_DST], info.rti_info[RTAX_GATEWAY],
+ info.rti_info[RTAX_NETMASK], rtm->rtm_flags, &saved_nrt,
+ ifscope);
+ if (error == 0) {
+ rt = saved_nrt;
+ RT_LOCK(rt);
+ goto report;
+ }
+ break;
+
+ case RTM_GET:
+ case RTM_CHANGE:
+ case RTM_LOCK:
+ rnh = rt_tables[info.rti_info[RTAX_DST]->sa_family];
+ if (rnh == NULL)
+ senderr(EAFNOSUPPORT);
+ /*
+ * Lookup the best match based on the key-mask pair;
+ * callee adds a reference and checks for root node.
+ */
+ rt = rt_lookup(TRUE, info.rti_info[RTAX_DST],
+ info.rti_info[RTAX_NETMASK], rnh, ifscope);
+ if (rt == NULL)
+ senderr(ESRCH);
+ RT_LOCK(rt);
+
+ /*
+ * Holding rnh_lock here prevents the possibility of
+ * ifa from changing (e.g. in_ifinit), so it is safe
+ * to access its ifa_addr (down below) without locking.
+ */
+ switch (rtm->rtm_type) {
+ case RTM_GET: {
+ kauth_cred_t cred;
+ struct ifaddr *ifa2;
+report:
+ cred = kauth_cred_proc_ref(current_proc());
+ ifa2 = NULL;
+ RT_LOCK_ASSERT_HELD(rt);
+ info.rti_info[RTAX_DST] = rt_key(rt);
+ dst_sa_family = info.rti_info[RTAX_DST]->sa_family;
+ info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
+ info.rti_info[RTAX_NETMASK] = rt_mask(rt);
+ info.rti_info[RTAX_GENMASK] = rt->rt_genmask;
+ if (rtm->rtm_addrs & (RTA_IFP | RTA_IFA)) {
+ ifp = rt->rt_ifp;
+ if (ifp != NULL) {
+ ifnet_lock_shared(ifp);
+ ifa2 = ifp->if_lladdr;
+ info.rti_info[RTAX_IFP] =
+ ifa2->ifa_addr;
+ IFA_ADDREF(ifa2);
+ ifnet_lock_done(ifp);
+ info.rti_info[RTAX_IFA] =
+ rt->rt_ifa->ifa_addr;
+ rtm->rtm_index = ifp->if_index;
+ } else {
+ info.rti_info[RTAX_IFP] = NULL;
+ info.rti_info[RTAX_IFA] = NULL;
+ }
+ } else if ((ifp = rt->rt_ifp) != NULL) {
+ rtm->rtm_index = ifp->if_index;
}
-}
-#endif
- error = rtrequest_scoped_locked(RTM_ADD, info.rti_info[RTAX_DST], info.rti_info[RTAX_GATEWAY],
- info.rti_info[RTAX_NETMASK], rtm->rtm_flags, &saved_nrt, ifscope);
- if (error == 0 && saved_nrt) {
- RT_LOCK(saved_nrt);
-#ifdef __APPLE__
- /*
- * If the route request specified an interface with
- * IFA and/or IFP, we set the requested interface on
- * the route with rt_setif. It would be much better
- * to do this inside rtrequest, but that would
- * require passing the desired interface, in some
- * form, to rtrequest. Since rtrequest is called in
- * so many places (roughly 40 in our source), adding
- * a parameter is to much for us to swallow; this is
- * something for the FreeBSD developers to tackle.
- * Instead, we let rtrequest compute whatever
- * interface it wants, then come in behind it and
- * stick in the interface that we really want. This
- * works reasonably well except when rtrequest can't
- * figure out what interface to use (with
- * ifa_withroute) and returns ENETUNREACH. Ideally
- * it shouldn't matter if rtrequest can't figure out
- * the interface if we're going to explicitly set it
- * ourselves anyway. But practically we can't
- * recover here because rtrequest will not do any of
- * the work necessary to add the route if it can't
- * find an interface. As long as there is a default
- * route that leads to some interface, rtrequest will
- * find an interface, so this problem should be
- * rarely encountered.
- * dwiggins@bbn.com
- */
-
- rt_setif(saved_nrt, info.rti_info[RTAX_IFP], info.rti_info[RTAX_IFA], info.rti_info[RTAX_GATEWAY],
- ifscope);
-#endif
- rt_setmetrics(rtm->rtm_inits,
- &rtm->rtm_rmx, saved_nrt);
- saved_nrt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits);
- saved_nrt->rt_rmx.rmx_locks |=
- (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks);
- saved_nrt->rt_genmask = info.rti_info[RTAX_GENMASK];
- RT_REMREF_LOCKED(saved_nrt);
- RT_UNLOCK(saved_nrt);
+ if (ifa2 != NULL)
+ IFA_LOCK(ifa2);
+ len = rt_msg2(rtm->rtm_type, &info, NULL, NULL, &cred);
+ if (ifa2 != NULL)
+ IFA_UNLOCK(ifa2);
+ if (len > rtm->rtm_msglen) {
+ struct rt_msghdr *new_rtm;
+ R_Malloc(new_rtm, struct rt_msghdr *, len);
+ if (new_rtm == NULL) {
+ RT_UNLOCK(rt);
+ if (ifa2 != NULL)
+ IFA_REMREF(ifa2);
+ senderr(ENOBUFS);
+ }
+ Bcopy(rtm, new_rtm, rtm->rtm_msglen);
+ R_Free(rtm); rtm = new_rtm;
}
- break;
+ if (ifa2 != NULL)
+ IFA_LOCK(ifa2);
+ (void) rt_msg2(rtm->rtm_type, &info, (caddr_t)rtm,
+ NULL, &cred);
+ if (ifa2 != NULL)
+ IFA_UNLOCK(ifa2);
+ rtm->rtm_flags = rt->rt_flags;
+ rt_getmetrics(rt, &rtm->rtm_rmx);
+ rtm->rtm_addrs = info.rti_addrs;
+ if (ifa2 != NULL)
+ IFA_REMREF(ifa2);
- case RTM_DELETE:
- error = rtrequest_scoped_locked(RTM_DELETE, info.rti_info[RTAX_DST],
- info.rti_info[RTAX_GATEWAY], info.rti_info[RTAX_NETMASK], rtm->rtm_flags, &saved_nrt, ifscope);
- if (error == 0) {
- rt = saved_nrt;
- RT_LOCK(rt);
- goto report;
- }
+ kauth_cred_unref(&cred);
break;
+ }
- case RTM_GET:
case RTM_CHANGE:
- case RTM_LOCK:
- if ((rnh = rt_tables[info.rti_info[RTAX_DST]->sa_family]) == NULL)
- senderr(EAFNOSUPPORT);
-
+ is_router = (rt->rt_flags & RTF_ROUTER) ? TRUE : FALSE;
+
+ if (info.rti_info[RTAX_GATEWAY] != NULL &&
+ (error = rt_setgate(rt, rt_key(rt),
+ info.rti_info[RTAX_GATEWAY]))) {
+ int tmp = error;
+ RT_UNLOCK(rt);
+ senderr(tmp);
+ }
/*
- * Lookup the best match based on the key-mask pair;
- * callee adds a reference and checks for root node.
+ * If they tried to change things but didn't specify
+ * the required gateway, then just use the old one.
+ * This can happen if the user tries to change the
+ * flags on the default route without changing the
+ * default gateway. Changing flags still doesn't work.
*/
- rt = rt_lookup(TRUE, info.rti_info[RTAX_DST], info.rti_info[RTAX_NETMASK], rnh, ifscope);
- if (rt == NULL)
- senderr(ESRCH);
- RT_LOCK(rt);
+ if ((rt->rt_flags & RTF_GATEWAY) &&
+ info.rti_info[RTAX_GATEWAY] == NULL)
+ info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
/*
- * Holding rnh_lock here prevents the possibility of
- * ifa from changing (e.g. in_ifinit), so it is safe
- * to access its ifa_addr (down below) without locking.
+ * On Darwin, we call rt_setif which contains the
+ * equivalent to the code found at this very spot
+ * in BSD.
*/
- switch(rtm->rtm_type) {
-
- case RTM_GET: {
- struct ifaddr *ifa2;
- report:
- ifa2 = NULL;
- RT_LOCK_ASSERT_HELD(rt);
- info.rti_info[RTAX_DST] = rt_key(rt);
- dst_sa_family = info.rti_info[RTAX_DST]->sa_family;
- info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
- info.rti_info[RTAX_NETMASK] = rt_mask(rt);
- info.rti_info[RTAX_GENMASK] = rt->rt_genmask;
- if (rtm->rtm_addrs & (RTA_IFP | RTA_IFA)) {
- ifp = rt->rt_ifp;
- if (ifp) {
- ifnet_lock_shared(ifp);
- ifa2 = ifp->if_lladdr;
- info.rti_info[RTAX_IFP] = ifa2->ifa_addr;
- IFA_ADDREF(ifa2);
- ifnet_lock_done(ifp);
- info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr;
- rtm->rtm_index = ifp->if_index;
- } else {
- info.rti_info[RTAX_IFP] = NULL;
- info.rti_info[RTAX_IFA] = NULL;
- }
- } else if ((ifp = rt->rt_ifp) != NULL) {
- rtm->rtm_index = ifp->if_index;
- }
- if (ifa2 != NULL)
- IFA_LOCK(ifa2);
- len = rt_msg2(rtm->rtm_type, &info, (caddr_t)0,
- (struct walkarg *)0);
- if (ifa2 != NULL)
- IFA_UNLOCK(ifa2);
- if (len > rtm->rtm_msglen) {
- struct rt_msghdr *new_rtm;
- R_Malloc(new_rtm, struct rt_msghdr *, len);
- if (new_rtm == 0) {
- RT_UNLOCK(rt);
- if (ifa2 != NULL)
- IFA_REMREF(ifa2);
- senderr(ENOBUFS);
- }
- Bcopy(rtm, new_rtm, rtm->rtm_msglen);
- R_Free(rtm); rtm = new_rtm;
- }
- if (ifa2 != NULL)
- IFA_LOCK(ifa2);
- (void)rt_msg2(rtm->rtm_type, &info, (caddr_t)rtm,
- (struct walkarg *)0);
- if (ifa2 != NULL)
- IFA_UNLOCK(ifa2);
- rtm->rtm_flags = rt->rt_flags;
- rt_getmetrics(rt, &rtm->rtm_rmx);
- rtm->rtm_addrs = info.rti_addrs;
- if (ifa2 != NULL)
- IFA_REMREF(ifa2);
- }
- break;
+ rt_setif(rt,
+ info.rti_info[RTAX_IFP], info.rti_info[RTAX_IFA],
+ info.rti_info[RTAX_GATEWAY], ifscope);
+
+ if ((error = rt_setmetrics(rtm->rtm_inits,
+ &rtm->rtm_rmx, rt))) {
+ int tmp = error;
+ RT_UNLOCK(rt);
+ senderr(tmp);
+ }
+ if (info.rti_info[RTAX_GENMASK])
+ rt->rt_genmask = info.rti_info[RTAX_GENMASK];
- case RTM_CHANGE:
- if (info.rti_info[RTAX_GATEWAY] && (error = rt_setgate(rt,
- rt_key(rt), info.rti_info[RTAX_GATEWAY]))) {
- int tmp = error;
- RT_UNLOCK(rt);
- senderr(tmp);
- }
- /*
- * If they tried to change things but didn't specify
- * the required gateway, then just use the old one.
- * This can happen if the user tries to change the
- * flags on the default route without changing the
- * default gateway. Changing flags still doesn't work.
- */
- if ((rt->rt_flags & RTF_GATEWAY) && !info.rti_info[RTAX_GATEWAY])
- info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
-
-#ifdef __APPLE__
- /*
- * On Darwin, we call rt_setif which contains the
- * equivalent to the code found at this very spot
- * in BSD.
- */
- rt_setif(rt, info.rti_info[RTAX_IFP], info.rti_info[RTAX_IFA], info.rti_info[RTAX_GATEWAY],
- ifscope);
-#endif
-
- rt_setmetrics(rtm->rtm_inits, &rtm->rtm_rmx,
- rt);
-#ifndef __APPLE__
- /* rt_setif, called above does this for us on darwin */
- if (rt->rt_ifa && rt->rt_ifa->ifa_rtrequest)
- rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt, info.rti_info[RTAX_GATEWAY]);
-#endif
- if (info.rti_info[RTAX_GENMASK])
- rt->rt_genmask = info.rti_info[RTAX_GENMASK];
- /*
- * Fall into
- */
- case RTM_LOCK:
- rt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits);
- rt->rt_rmx.rmx_locks |=
- (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks);
- break;
- }
- RT_UNLOCK(rt);
+ /*
+ * Enqueue work item to invoke callback for this route entry
+ * This may not be needed always, but for now issue it anytime
+ * RTM_CHANGE gets called.
+ */
+ route_event_enqueue_nwk_wq_entry(rt, NULL, ROUTE_ENTRY_REFRESH, NULL, TRUE);
+ /*
+ * If the route is for a router, walk the tree to send refresh
+ * event to protocol cloned entries
+ */
+ if (is_router) {
+ struct route_event rt_ev;
+ route_event_init(&rt_ev, rt, NULL, ROUTE_ENTRY_REFRESH);
+ RT_UNLOCK(rt);
+ (void) rnh->rnh_walktree(rnh, route_event_walktree, (void *)&rt_ev);
+ RT_LOCK(rt);
+ }
+ /* FALLTHRU */
+ case RTM_LOCK:
+ rt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits);
+ rt->rt_rmx.rmx_locks |=
+ (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks);
break;
-
- default:
- senderr(EOPNOTSUPP);
+ }
+ RT_UNLOCK(rt);
+ break;
+
+ default:
+ senderr(EOPNOTSUPP);
}
flush:
- if (rtm) {
+ if (rtm != NULL) {
if (error)
rtm->rtm_errno = error;
else
rtfree_locked(rt);
}
lck_mtx_unlock(rnh_lock);
- socket_lock(so, 0); /* relock the socket now */
- {
- struct rawcb *rp = 0;
+
+ /* relock the socket now */
+ socket_lock(so, 0);
/*
* Check to see if we don't want our own messages.
*/
- if ((so->so_options & SO_USELOOPBACK) == 0) {
+ if (!(so->so_options & SO_USELOOPBACK)) {
if (route_cb.any_count <= 1) {
- if (rtm)
+ if (rtm != NULL)
R_Free(rtm);
m_freem(m);
return (error);
/* There is another listener, so construct message */
rp = sotorawcb(so);
}
- if (rtm) {
+ if (rtm != NULL) {
m_copyback(m, 0, rtm->rtm_msglen, (caddr_t)rtm);
if (m->m_pkthdr.len < rtm->rtm_msglen) {
m_freem(m);
m = NULL;
- } else if (m->m_pkthdr.len > rtm->rtm_msglen)
+ } else if (m->m_pkthdr.len > rtm->rtm_msglen) {
m_adj(m, rtm->rtm_msglen - m->m_pkthdr.len);
+ }
R_Free(rtm);
}
- if (sendonlytoself && m) {
+ if (sendonlytoself && m != NULL) {
error = 0;
- if (sbappendaddr(&so->so_rcv, &route_src, m, (struct mbuf*)0, &error) != 0) {
+ if (sbappendaddr(&so->so_rcv, &route_src, m,
+ NULL, &error) != 0) {
sorwakeup(so);
}
if (error)
- return error;
+ return (error);
} else {
- struct sockproto route_proto = {PF_ROUTE, 0};
- if (rp)
+ struct sockproto route_proto = { PF_ROUTE, 0 };
+ if (rp != NULL)
rp->rcb_proto.sp_family = 0; /* Avoid us */
if (dst_sa_family != 0)
route_proto.sp_protocol = dst_sa_family;
- if (m) {
+ if (m != NULL) {
socket_unlock(so, 0);
raw_input(m, &route_proto, &route_src, &route_dst);
socket_lock(so, 0);
}
- if (rp)
+ if (rp != NULL)
rp->rcb_proto.sp_family = PF_ROUTE;
- }
}
return (error);
}
if (expiry) {
rt->rt_rmx.rmx_expire = expiry + rt->base_calendartime -
rt->base_uptime;
- } else
+ } else {
rt->rt_rmx.rmx_expire = 0;
+ }
}
-static void
+static int
rt_setmetrics(u_int32_t which, struct rt_metrics *in, struct rtentry *out)
{
- struct timeval curr_calendar_time;
- uint64_t curr_uptime;
-
- getmicrotime(&curr_calendar_time);
- curr_uptime = net_uptime();
-
-#define metric(f, e) if (which & (f)) out->rt_rmx.e = in->e;
- metric(RTV_RPIPE, rmx_recvpipe);
- metric(RTV_SPIPE, rmx_sendpipe);
- metric(RTV_SSTHRESH, rmx_ssthresh);
- metric(RTV_RTT, rmx_rtt);
- metric(RTV_RTTVAR, rmx_rttvar);
- metric(RTV_HOPCOUNT, rmx_hopcount);
- metric(RTV_MTU, rmx_mtu);
- metric(RTV_EXPIRE, rmx_expire);
+ if (!(which & RTV_REFRESH_HOST)) {
+ struct timeval caltime;
+ getmicrotime(&caltime);
+#define metric(f, e) if (which & (f)) out->rt_rmx.e = in->e;
+ metric(RTV_RPIPE, rmx_recvpipe);
+ metric(RTV_SPIPE, rmx_sendpipe);
+ metric(RTV_SSTHRESH, rmx_ssthresh);
+ metric(RTV_RTT, rmx_rtt);
+ metric(RTV_RTTVAR, rmx_rttvar);
+ metric(RTV_HOPCOUNT, rmx_hopcount);
+ metric(RTV_MTU, rmx_mtu);
+ metric(RTV_EXPIRE, rmx_expire);
#undef metric
-
- if (out->rt_rmx.rmx_expire > 0) {
- /* account for system time change */
- curr_uptime = net_uptime();
- getmicrotime(&curr_calendar_time);
- out->base_calendartime +=
- CALCULATE_CLOCKSKEW(curr_calendar_time,
- out->base_calendartime,
- curr_uptime, out->base_uptime);
- rt_setexpire(out,
- out->rt_rmx.rmx_expire -
- out->base_calendartime +
- out->base_uptime);
+ if (out->rt_rmx.rmx_expire > 0) {
+ /* account for system time change */
+ getmicrotime(&caltime);
+ out->base_calendartime +=
+ NET_CALCULATE_CLOCKSKEW(caltime,
+ out->base_calendartime,
+ net_uptime(), out->base_uptime);
+ rt_setexpire(out,
+ out->rt_rmx.rmx_expire -
+ out->base_calendartime +
+ out->base_uptime);
+ } else {
+ rt_setexpire(out, 0);
+ }
+
+ VERIFY(out->rt_expire == 0 || out->rt_rmx.rmx_expire != 0);
+ VERIFY(out->rt_expire != 0 || out->rt_rmx.rmx_expire == 0);
} else {
- rt_setexpire(out, 0);
+ /* Only RTV_REFRESH_HOST must be set */
+ if ((which & ~RTV_REFRESH_HOST) ||
+ (out->rt_flags & RTF_STATIC) ||
+ !(out->rt_flags & RTF_LLINFO)) {
+ return (EINVAL);
+ }
+
+ if (out->rt_llinfo_refresh == NULL) {
+ return (ENOTSUP);
+ }
+
+ out->rt_llinfo_refresh(out);
}
-
- VERIFY(out->rt_expire == 0 || out->rt_rmx.rmx_expire != 0);
- VERIFY(out->rt_expire != 0 || out->rt_rmx.rmx_expire == 0);
+ return (0);
}
static void
rt_getmetrics(struct rtentry *in, struct rt_metrics *out)
{
- struct timeval curr_calendar_time;
- uint64_t curr_uptime;
+ struct timeval caltime;
VERIFY(in->rt_expire == 0 || in->rt_rmx.rmx_expire != 0);
VERIFY(in->rt_expire != 0 || in->rt_rmx.rmx_expire == 0);
-
- *out = in->rt_rmx;
-
- if (in->rt_expire) {
+
+ *out = in->rt_rmx;
+
+ if (in->rt_expire != 0) {
/* account for system time change */
- getmicrotime(&curr_calendar_time);
- curr_uptime = net_uptime();
+ getmicrotime(&caltime);
in->base_calendartime +=
- CALCULATE_CLOCKSKEW(curr_calendar_time,
- in->base_calendartime,
- curr_uptime, in->base_uptime);
-
+ NET_CALCULATE_CLOCKSKEW(caltime,
+ in->base_calendartime, net_uptime(), in->base_uptime);
+
out->rmx_expire = in->base_calendartime +
in->rt_expire - in->base_uptime;
- } else
+ } else {
out->rmx_expire = 0;
+ }
}
/*
- * Set route's interface given info.rti_info[RTAX_IFP], info.rti_info[RTAX_IFA], and gateway.
+ * Set route's interface given info.rti_info[RTAX_IFP],
+ * info.rti_info[RTAX_IFA], and gateway.
*/
static void
rt_setif(struct rtentry *rt, struct sockaddr *Ifpaddr, struct sockaddr *Ifaaddr,
{
struct ifaddr *ifa = NULL;
struct ifnet *ifp = NULL;
- void (*ifa_rtrequest)
- (int, struct rtentry *, struct sockaddr *);
+ void (*ifa_rtrequest)(int, struct rtentry *, struct sockaddr *);
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
RT_LOCK_ASSERT_HELD(rt);
- /* trigger route cache reevaluation */
- if (use_routegenid)
- routegenid_update();
-
/* Don't update a defunct route */
if (rt->rt_flags & RTF_CONDEMNED)
return;
IFA_REMREF(ifa);
ifa = ifaof_ifpforaddr(Ifaaddr ? Ifaaddr : Gate, ifp);
} else {
- if (ifa) {
+ if (ifa != NULL) {
IFA_REMREF(ifa);
- ifa = 0;
+ ifa = NULL;
}
- if (Ifpaddr && (ifp = if_withname(Ifpaddr)) ) {
+ if (Ifpaddr && (ifp = if_withname(Ifpaddr))) {
if (Gate) {
ifa = ifaof_ifpforaddr(Gate, ifp);
} else {
}
}
}
- if (ifa) {
+
+ /* trigger route cache reevaluation */
+ if (rt_key(rt)->sa_family == AF_INET)
+ routegenid_inet_update();
+#if INET6
+ else if (rt_key(rt)->sa_family == AF_INET6)
+ routegenid_inet6_update();
+#endif /* INET6 */
+
+ if (ifa != NULL) {
struct ifaddr *oifa = rt->rt_ifa;
if (oifa != ifa) {
if (oifa != NULL) {
set_primary_ifscope(rt_key(rt)->sa_family,
rt->rt_ifp->if_index);
}
- rt->rt_rmx.rmx_mtu = ifp->if_mtu;
+ /*
+ * If rmx_mtu is not locked, update it
+ * to the MTU used by the new interface.
+ */
+ if (!(rt->rt_rmx.rmx_locks & RTV_MTU))
+ rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
+
if (rt->rt_ifa != NULL) {
IFA_LOCK_SPIN(rt->rt_ifa);
ifa_rtrequest = rt->rt_ifa->ifa_rtrequest;
return;
}
IFA_REMREF(ifa);
+ ifa = NULL;
}
/* XXX: to reset gateway to correct value, at RTM_CHANGE */
ifa_rtrequest(RTM_ADD, rt, Gate);
}
+ /*
+ * Workaround for local address routes pointing to the loopback
+ * interface added by configd, until <rdar://problem/12970142>.
+ */
+ if ((rt->rt_ifp->if_flags & IFF_LOOPBACK) &&
+ (rt->rt_flags & RTF_HOST) && rt->rt_ifa->ifa_ifp == rt->rt_ifp) {
+ ifa = ifa_ifwithaddr(rt_key(rt));
+ if (ifa != NULL) {
+ if (ifa != rt->rt_ifa)
+ rtsetifa(rt, ifa);
+ IFA_REMREF(ifa);
+ }
+ }
+
/* Release extra ref */
RT_REMREF_LOCKED(rt);
}
-#define ROUNDUP32(a) \
- ((a) > 0 ? (1 + (((a) - 1) | (sizeof(uint32_t) - 1))) : sizeof(uint32_t))
-#define ADVANCE32(x, n) (x += ROUNDUP32((n)->sa_len))
-
-
/*
* Extract the addresses of the passed sockaddrs.
* Do a little sanity checking so as to avoid bad memory references.
struct sockaddr *sa;
int i;
- bzero(rtinfo->rti_info, sizeof(rtinfo->rti_info));
+ bzero(rtinfo->rti_info, sizeof (rtinfo->rti_info));
for (i = 0; (i < RTAX_MAX) && (cp < cplim); i++) {
if ((rtinfo->rti_addrs & (1 << i)) == 0)
continue;
/*
* It won't fit.
*/
- if ( (cp + sa->sa_len) > cplim ) {
+ if ((cp + sa->sa_len) > cplim)
return (EINVAL);
- }
-
/*
* there are no more.. quit now
* If there are more bits, they are in error.
- * I've seen this. route(1) can evidently generate these.
+ * I've seen this. route(1) can evidently generate these.
* This causes kernel to core dump.
* for compatibility, If we see this, point to a safe address.
*/
rtinfo->rti_info[i] = &sa_zero;
return (0); /* should be EINVAL but for compat */
}
-
/* accept it */
rtinfo->rti_info[i] = sa;
ADVANCE32(cp, sa);
struct rt_msghdr *rtm;
struct mbuf *m;
int i;
- int len, dlen;
+ int len, dlen, off;
switch (type) {
case RTM_DELADDR:
case RTM_NEWADDR:
- len = sizeof(struct ifa_msghdr);
+ len = sizeof (struct ifa_msghdr);
break;
case RTM_DELMADDR:
case RTM_NEWMADDR:
- len = sizeof(struct ifma_msghdr);
+ len = sizeof (struct ifma_msghdr);
break;
case RTM_IFINFO:
- len = sizeof(struct if_msghdr);
+ len = sizeof (struct if_msghdr);
break;
default:
- len = sizeof(struct rt_msghdr);
+ len = sizeof (struct rt_msghdr);
}
- if (len > MCLBYTES)
- panic("rt_msg1");
m = m_gethdr(M_DONTWAIT, MT_DATA);
if (m && len > MHLEN) {
MCLGET(m, M_DONTWAIT);
- if ((m->m_flags & M_EXT) == 0) {
+ if (!(m->m_flags & M_EXT)) {
m_free(m);
m = NULL;
}
}
- if (m == 0)
- return (m);
+ if (m == NULL)
+ return (NULL);
m->m_pkthdr.len = m->m_len = len;
- m->m_pkthdr.rcvif = 0;
+ m->m_pkthdr.rcvif = NULL;
rtm = mtod(m, struct rt_msghdr *);
bzero((caddr_t)rtm, len);
+ off = len;
for (i = 0; i < RTAX_MAX; i++) {
struct sockaddr *sa, *hint;
- struct sockaddr_storage ss;
+ uint8_t ssbuf[SOCK_MAXADDRLEN + 1];
+
+ /*
+ * Make sure to accomodate the largest possible size of sa_len.
+ */
+ _CASSERT(sizeof (ssbuf) == (SOCK_MAXADDRLEN + 1));
if ((sa = rtinfo->rti_info[i]) == NULL)
continue;
hint = rtinfo->rti_info[RTAX_IFA];
/* Scrub away any trace of embedded interface scope */
- sa = rtm_scrub_ifscope(type, i, hint, sa, &ss);
+ sa = rtm_scrub(type, i, hint, sa, &ssbuf,
+ sizeof (ssbuf), NULL);
break;
default:
}
rtinfo->rti_addrs |= (1 << i);
- dlen = ROUNDUP32(sa->sa_len);
- m_copyback(m, len, dlen, (caddr_t)sa);
- len += dlen;
+ dlen = sa->sa_len;
+ m_copyback(m, off, dlen, (caddr_t)sa);
+ len = off + dlen;
+ off += ROUNDUP32(dlen);
}
if (m->m_pkthdr.len != len) {
m_freem(m);
}
static int
-rt_msg2(int type, struct rt_addrinfo *rtinfo, caddr_t cp, struct walkarg *w)
+rt_msg2(int type, struct rt_addrinfo *rtinfo, caddr_t cp, struct walkarg *w,
+ kauth_cred_t* credp)
{
int i;
- int len, dlen, second_time = 0;
+ int len, dlen, rlen, second_time = 0;
caddr_t cp0;
rtinfo->rti_addrs = 0;
case RTM_DELADDR:
case RTM_NEWADDR:
- len = sizeof(struct ifa_msghdr);
+ len = sizeof (struct ifa_msghdr);
break;
case RTM_DELMADDR:
case RTM_NEWMADDR:
- len = sizeof(struct ifma_msghdr);
+ len = sizeof (struct ifma_msghdr);
break;
case RTM_IFINFO:
- len = sizeof(struct if_msghdr);
+ len = sizeof (struct if_msghdr);
break;
case RTM_IFINFO2:
- len = sizeof(struct if_msghdr2);
+ len = sizeof (struct if_msghdr2);
break;
case RTM_NEWMADDR2:
- len = sizeof(struct ifma_msghdr2);
+ len = sizeof (struct ifma_msghdr2);
break;
case RTM_GET_EXT:
break;
case RTM_GET2:
- len = sizeof(struct rt_msghdr2);
+ len = sizeof (struct rt_msghdr2);
break;
default:
- len = sizeof(struct rt_msghdr);
+ len = sizeof (struct rt_msghdr);
}
cp0 = cp;
if (cp0)
cp += len;
for (i = 0; i < RTAX_MAX; i++) {
struct sockaddr *sa, *hint;
- struct sockaddr_storage ss;
+ uint8_t ssbuf[SOCK_MAXADDRLEN + 1];
- if ((sa = rtinfo->rti_info[i]) == 0)
+ /*
+ * Make sure to accomodate the largest possible size of sa_len.
+ */
+ _CASSERT(sizeof (ssbuf) == (SOCK_MAXADDRLEN + 1));
+
+ if ((sa = rtinfo->rti_info[i]) == NULL)
continue;
switch (i) {
hint = rtinfo->rti_info[RTAX_IFA];
/* Scrub away any trace of embedded interface scope */
- sa = rtm_scrub_ifscope(type, i, hint, sa, &ss);
+ sa = rtm_scrub(type, i, hint, sa, &ssbuf,
+ sizeof (ssbuf), NULL);
+ break;
+ case RTAX_GATEWAY:
+ case RTAX_IFP:
+ sa = rtm_scrub(type, i, NULL, sa, &ssbuf,
+ sizeof (ssbuf), credp);
break;
default:
}
rtinfo->rti_addrs |= (1 << i);
- dlen = ROUNDUP32(sa->sa_len);
+ dlen = sa->sa_len;
+ rlen = ROUNDUP32(dlen);
if (cp) {
- bcopy((caddr_t)sa, cp, (unsigned)dlen);
- cp += dlen;
+ bcopy((caddr_t)sa, cp, (size_t)dlen);
+ if (dlen != rlen)
+ bzero(cp + dlen, rlen - dlen);
+ cp += rlen;
}
- len += dlen;
+ len += rlen;
}
- if (cp == 0 && w != NULL && !second_time) {
+ if (cp == NULL && w != NULL && !second_time) {
struct walkarg *rw = w;
- if (rw->w_req) {
+ if (rw->w_req != NULL) {
if (rw->w_tmemsize < len) {
- if (rw->w_tmem)
+ if (rw->w_tmem != NULL)
FREE(rw->w_tmem, M_RTABLE);
rw->w_tmem = _MALLOC(len, M_RTABLE, M_WAITOK);
- if (rw->w_tmem)
+ if (rw->w_tmem != NULL)
rw->w_tmemsize = len;
}
- if (rw->w_tmem) {
+ if (rw->w_tmem != NULL) {
cp = rw->w_tmem;
second_time = 1;
goto again;
struct rt_msghdr *rtm;
struct mbuf *m;
struct sockaddr *sa = rtinfo->rti_info[RTAX_DST];
- struct sockproto route_proto = {PF_ROUTE, 0};
+ struct sockproto route_proto = { PF_ROUTE, 0 };
if (route_cb.any_count == 0)
return;
m = rt_msg1(type, rtinfo);
- if (m == 0)
+ if (m == NULL)
return;
rtm = mtod(m, struct rt_msghdr *);
rtm->rtm_flags = RTF_DONE | flags;
* socket indicating that the status of a network interface has changed.
*/
void
-rt_ifmsg(
- struct ifnet *ifp)
+rt_ifmsg(struct ifnet *ifp)
{
struct if_msghdr *ifm;
struct mbuf *m;
struct rt_addrinfo info;
- struct sockproto route_proto = {PF_ROUTE, 0};
+ struct sockproto route_proto = { PF_ROUTE, 0 };
if (route_cb.any_count == 0)
return;
- bzero((caddr_t)&info, sizeof(info));
+ bzero((caddr_t)&info, sizeof (info));
m = rt_msg1(RTM_IFINFO, &info);
- if (m == 0)
+ if (m == NULL)
return;
ifm = mtod(m, struct if_msghdr *);
ifm->ifm_index = ifp->if_index;
int pass;
struct mbuf *m = 0;
struct ifnet *ifp = ifa->ifa_ifp;
- struct sockproto route_proto = {PF_ROUTE, 0};
+ struct sockproto route_proto = { PF_ROUTE, 0 };
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
RT_LOCK_ASSERT_HELD(rt);
if (route_cb.any_count == 0)
/* Become a regular mutex, just in case */
RT_CONVERT_LOCK(rt);
for (pass = 1; pass < 3; pass++) {
- bzero((caddr_t)&info, sizeof(info));
+ bzero((caddr_t)&info, sizeof (info));
if ((cmd == RTM_ADD && pass == 1) ||
(cmd == RTM_DELETE && pass == 2)) {
struct ifa_msghdr *ifam;
(cmd == RTM_DELETE && pass == 1)) {
struct rt_msghdr *rtm;
- if (rt == 0)
+ if (rt == NULL)
continue;
info.rti_info[RTAX_NETMASK] = rt_mask(rt);
info.rti_info[RTAX_DST] = sa = rt_key(rt);
struct mbuf *m = 0;
struct ifnet *ifp = ifma->ifma_ifp;
struct ifma_msghdr *ifmam;
- struct sockproto route_proto = {PF_ROUTE, 0};
+ struct sockproto route_proto = { PF_ROUTE, 0 };
if (route_cb.any_count == 0)
return;
/* Lock ifp for if_lladdr */
ifnet_lock_shared(ifp);
- bzero((caddr_t)&info, sizeof(info));
+ bzero((caddr_t)&info, sizeof (info));
IFMA_LOCK(ifma);
info.rti_info[RTAX_IFA] = ifma->ifma_addr;
- info.rti_info[RTAX_IFP] = ifp->if_lladdr->ifa_addr; /* lladdr doesn't need lock */
+ /* lladdr doesn't need lock */
+ info.rti_info[RTAX_IFP] = ifp->if_lladdr->ifa_addr;
/*
* If a link-layer address is present, present it as a ``gateway''
* (similarly to how ARP entries, e.g., are presented).
*/
- info.rti_info[RTAX_GATEWAY] = (ifma->ifma_ll != NULL) ? ifma->ifma_ll->ifma_addr : NULL;
+ info.rti_info[RTAX_GATEWAY] = (ifma->ifma_ll != NULL) ?
+ ifma->ifma_ll->ifma_addr : NULL;
if ((m = rt_msg1(cmd, &info)) == NULL) {
IFMA_UNLOCK(ifma);
ifnet_lock_done(ifp);
raw_input(m, &route_proto, &route_src, &route_dst);
}
+const char *
+rtm2str(int cmd)
+{
+ const char *c = "RTM_?";
+
+ switch (cmd) {
+ case RTM_ADD:
+ c = "RTM_ADD";
+ break;
+ case RTM_DELETE:
+ c = "RTM_DELETE";
+ break;
+ case RTM_CHANGE:
+ c = "RTM_CHANGE";
+ break;
+ case RTM_GET:
+ c = "RTM_GET";
+ break;
+ case RTM_LOSING:
+ c = "RTM_LOSING";
+ break;
+ case RTM_REDIRECT:
+ c = "RTM_REDIRECT";
+ break;
+ case RTM_MISS:
+ c = "RTM_MISS";
+ break;
+ case RTM_LOCK:
+ c = "RTM_LOCK";
+ break;
+ case RTM_OLDADD:
+ c = "RTM_OLDADD";
+ break;
+ case RTM_OLDDEL:
+ c = "RTM_OLDDEL";
+ break;
+ case RTM_RESOLVE:
+ c = "RTM_RESOLVE";
+ break;
+ case RTM_NEWADDR:
+ c = "RTM_NEWADDR";
+ break;
+ case RTM_DELADDR:
+ c = "RTM_DELADDR";
+ break;
+ case RTM_IFINFO:
+ c = "RTM_IFINFO";
+ break;
+ case RTM_NEWMADDR:
+ c = "RTM_NEWMADDR";
+ break;
+ case RTM_DELMADDR:
+ c = "RTM_DELMADDR";
+ break;
+ case RTM_GET_SILENT:
+ c = "RTM_GET_SILENT";
+ break;
+ case RTM_IFINFO2:
+ c = "RTM_IFINFO2";
+ break;
+ case RTM_NEWMADDR2:
+ c = "RTM_NEWMADDR2";
+ break;
+ case RTM_GET2:
+ c = "RTM_GET2";
+ break;
+ case RTM_GET_EXT:
+ c = "RTM_GET_EXT";
+ break;
+ }
+
+ return (c);
+}
+
/*
* This is used in dumping the kernel table via sysctl().
*/
-int
+static int
sysctl_dumpentry(struct radix_node *rn, void *vw)
{
struct walkarg *w = vw;
struct rtentry *rt = (struct rtentry *)rn;
int error = 0, size;
struct rt_addrinfo info;
+ kauth_cred_t cred;
+
+ cred = kauth_cred_proc_ref(current_proc());
RT_LOCK(rt);
- if (w->w_op == NET_RT_FLAGS && !(rt->rt_flags & w->w_arg)) {
- RT_UNLOCK(rt);
- return 0;
- }
- bzero((caddr_t)&info, sizeof(info));
+ if (w->w_op == NET_RT_FLAGS && !(rt->rt_flags & w->w_arg))
+ goto done;
+ bzero((caddr_t)&info, sizeof (info));
info.rti_info[RTAX_DST] = rt_key(rt);
info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
info.rti_info[RTAX_NETMASK] = rt_mask(rt);
info.rti_info[RTAX_GENMASK] = rt->rt_genmask;
if (w->w_op != NET_RT_DUMP2) {
- size = rt_msg2(RTM_GET, &info, 0, w);
- if (w->w_req && w->w_tmem) {
+ size = rt_msg2(RTM_GET, &info, NULL, w, &cred);
+ if (w->w_req != NULL && w->w_tmem != NULL) {
struct rt_msghdr *rtm =
(struct rt_msghdr *)(void *)w->w_tmem;
rtm->rtm_errno = 0;
rtm->rtm_addrs = info.rti_addrs;
error = SYSCTL_OUT(w->w_req, (caddr_t)rtm, size);
- RT_UNLOCK(rt);
- return (error);
}
} else {
- size = rt_msg2(RTM_GET2, &info, 0, w);
- if (w->w_req && w->w_tmem) {
+ size = rt_msg2(RTM_GET2, &info, NULL, w, &cred);
+ if (w->w_req != NULL && w->w_tmem != NULL) {
struct rt_msghdr2 *rtm =
(struct rt_msghdr2 *)(void *)w->w_tmem;
rtm->rtm_reserved = 0;
rtm->rtm_addrs = info.rti_addrs;
error = SYSCTL_OUT(w->w_req, (caddr_t)rtm, size);
- RT_UNLOCK(rt);
- return (error);
}
}
+
+done:
RT_UNLOCK(rt);
+ kauth_cred_unref(&cred);
return (error);
}
/*
* This is used for dumping extended information from route entries.
*/
-int
+static int
sysctl_dumpentry_ext(struct radix_node *rn, void *vw)
{
struct walkarg *w = vw;
struct rtentry *rt = (struct rtentry *)rn;
int error = 0, size;
struct rt_addrinfo info;
+ kauth_cred_t cred;
+
+ cred = kauth_cred_proc_ref(current_proc());
RT_LOCK(rt);
- if (w->w_op == NET_RT_DUMPX_FLAGS && !(rt->rt_flags & w->w_arg)) {
- RT_UNLOCK(rt);
- return (0);
- }
+ if (w->w_op == NET_RT_DUMPX_FLAGS && !(rt->rt_flags & w->w_arg))
+ goto done;
bzero(&info, sizeof (info));
info.rti_info[RTAX_DST] = rt_key(rt);
info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
info.rti_info[RTAX_NETMASK] = rt_mask(rt);
info.rti_info[RTAX_GENMASK] = rt->rt_genmask;
-
- size = rt_msg2(RTM_GET_EXT, &info, 0, w);
- if (w->w_req && w->w_tmem) {
+
+ size = rt_msg2(RTM_GET_EXT, &info, NULL, w, &cred);
+ if (w->w_req != NULL && w->w_tmem != NULL) {
struct rt_msghdr_ext *ertm =
(struct rt_msghdr_ext *)(void *)w->w_tmem;
ertm->rtm_ri.ri_rssi = IFNET_RSSI_UNKNOWN;
ertm->rtm_ri.ri_lqm = IFNET_LQM_THRESH_OFF;
ertm->rtm_ri.ri_npm = IFNET_NPM_THRESH_UNKNOWN;
- }
- else
+ } else {
rt->rt_llinfo_get_ri(rt, &ertm->rtm_ri);
-
+ }
error = SYSCTL_OUT(w->w_req, (caddr_t)ertm, size);
- RT_UNLOCK(rt);
- return (error);
}
+
+done:
RT_UNLOCK(rt);
+ kauth_cred_unref(&cred);
return (error);
}
/*
* rdar://9307819
- * To avoid to call copyout() while holding locks and to cause problems
- * in the paging path, sysctl_iflist() and sysctl_iflist2() contstruct
+ * To avoid to call copyout() while holding locks and to cause problems
+ * in the paging path, sysctl_iflist() and sysctl_iflist2() contstruct
* the list in two passes. In the first pass we compute the total
* length of the data we are going to copyout, then we release
- * all locks to allocate a temporary buffer that gets filled
+ * all locks to allocate a temporary buffer that gets filled
* in the second pass.
*
- * Note that we are verifying the assumption that _MALLOC returns a buffer
- * that is at least 32 bits aligned and that the messages and addresses are
+ * Note that we are verifying the assumption that _MALLOC returns a buffer
+ * that is at least 32 bits aligned and that the messages and addresses are
* 32 bits aligned.
*/
-
-int
+static int
sysctl_iflist(int af, struct walkarg *w)
{
struct ifnet *ifp;
struct ifaddr *ifa;
struct rt_addrinfo info;
- int len, error = 0;
+ int len = 0, error = 0;
int pass = 0;
int total_len = 0, current_len = 0;
char *total_buffer = NULL, *cp = NULL;
+ kauth_cred_t cred;
+
+ cred = kauth_cred_proc_ref(current_proc());
+
+ bzero((caddr_t)&info, sizeof (info));
- bzero((caddr_t)&info, sizeof(info));
-
for (pass = 0; pass < 2; pass++) {
ifnet_head_lock_shared();
-
+
TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
if (error)
break;
continue;
ifnet_lock_shared(ifp);
/*
- * Holding ifnet lock here prevents the link address from
- * changing contents, so no need to hold the ifa lock.
- * The link address is always present; it's never freed.
+ * Holding ifnet lock here prevents the link address
+ * from changing contents, so no need to hold the ifa
+ * lock. The link address is always present; it's
+ * never freed.
*/
ifa = ifp->if_lladdr;
info.rti_info[RTAX_IFP] = ifa->ifa_addr;
- len = rt_msg2(RTM_IFINFO, &info, (caddr_t)0, NULL);
+ len = rt_msg2(RTM_IFINFO, &info, NULL, NULL, &cred);
if (pass == 0) {
total_len += len;
} else {
if (current_len + len > total_len) {
ifnet_lock_done(ifp);
- printf("sysctl_iflist: current_len (%d) + len (%d) > total_len (%d)\n",
- current_len, len, total_len);
error = ENOBUFS;
break;
}
info.rti_info[RTAX_IFP] = ifa->ifa_addr;
- len = rt_msg2(RTM_IFINFO, &info, (caddr_t)cp, NULL);
+ len = rt_msg2(RTM_IFINFO, &info,
+ (caddr_t)cp, NULL, &cred);
info.rti_info[RTAX_IFP] = NULL;
-
+
ifm = (struct if_msghdr *)(void *)cp;
ifm->ifm_index = ifp->if_index;
ifm->ifm_flags = (u_short)ifp->if_flags;
if_data_internal_to_if_data(ifp, &ifp->if_data,
- &ifm->ifm_data);
+ &ifm->ifm_data);
ifm->ifm_addrs = info.rti_addrs;
+ /*
+ * <rdar://problem/32940901>
+ * Round bytes only for non-platform
+ */
+ if (!csproc_get_platform_binary(w->w_req->p)) {
+ ALIGN_BYTES(ifm->ifm_data.ifi_ibytes);
+ ALIGN_BYTES(ifm->ifm_data.ifi_obytes);
+ }
cp += len;
- VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
+ VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t)));
current_len += len;
}
- while ((ifa = ifa->ifa_link.tqe_next) != 0) {
+ while ((ifa = ifa->ifa_link.tqe_next) != NULL) {
IFA_LOCK(ifa);
if (af && af != ifa->ifa_addr->sa_family) {
IFA_UNLOCK(ifa);
info.rti_info[RTAX_IFA] = ifa->ifa_addr;
info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
info.rti_info[RTAX_BRD] = ifa->ifa_dstaddr;
- len = rt_msg2(RTM_NEWADDR, &info, 0, 0);
+ len = rt_msg2(RTM_NEWADDR, &info, NULL, NULL,
+ &cred);
if (pass == 0) {
total_len += len;
} else {
if (current_len + len > total_len) {
IFA_UNLOCK(ifa);
- printf("sysctl_iflist: current_len (%d) + len (%d) > total_len (%d)\n",
- current_len, len, total_len);
error = ENOBUFS;
break;
}
- len = rt_msg2(RTM_NEWADDR, &info, (caddr_t)cp, NULL);
-
+ len = rt_msg2(RTM_NEWADDR, &info,
+ (caddr_t)cp, NULL, &cred);
+
ifam = (struct ifa_msghdr *)(void *)cp;
- ifam->ifam_index = ifa->ifa_ifp->if_index;
+ ifam->ifam_index =
+ ifa->ifa_ifp->if_index;
ifam->ifam_flags = ifa->ifa_flags;
ifam->ifam_metric = ifa->ifa_metric;
ifam->ifam_addrs = info.rti_addrs;
cp += len;
- VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
+ VERIFY(IS_P2ALIGNED(cp,
+ sizeof (u_int32_t)));
current_len += len;
}
IFA_UNLOCK(ifa);
}
ifnet_lock_done(ifp);
- info.rti_info[RTAX_IFA] = info.rti_info[RTAX_NETMASK] =
- info.rti_info[RTAX_BRD] = NULL;
+ info.rti_info[RTAX_IFA] = info.rti_info[RTAX_NETMASK] =
+ info.rti_info[RTAX_BRD] = NULL;
}
-
+
ifnet_head_done();
-
- if (error)
+
+ if (error != 0) {
+ if (error == ENOBUFS)
+ printf("%s: current_len (%d) + len (%d) > "
+ "total_len (%d)\n", __func__, current_len,
+ len, total_len);
break;
-
+ }
+
if (pass == 0) {
/* Better to return zero length buffer than ENOBUFS */
if (total_len == 0)
total_len = 1;
total_len += total_len >> 3;
- total_buffer = _MALLOC(total_len, M_RTABLE, M_ZERO | M_WAITOK);
+ total_buffer = _MALLOC(total_len, M_RTABLE,
+ M_ZERO | M_WAITOK);
if (total_buffer == NULL) {
- printf("sysctl_iflist: _MALLOC(%d) failed\n", total_len);
+ printf("%s: _MALLOC(%d) failed\n", __func__,
+ total_len);
error = ENOBUFS;
break;
}
cp = total_buffer;
- VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
+ VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t)));
} else {
error = SYSCTL_OUT(w->w_req, total_buffer, current_len);
if (error)
break;
}
}
-
+
if (total_buffer != NULL)
_FREE(total_buffer, M_RTABLE);
-
- return error;
+
+ kauth_cred_unref(&cred);
+ return (error);
}
-int
+static int
sysctl_iflist2(int af, struct walkarg *w)
{
struct ifnet *ifp;
struct ifaddr *ifa;
struct rt_addrinfo info;
- int len, error = 0;
+ int len = 0, error = 0;
int pass = 0;
int total_len = 0, current_len = 0;
char *total_buffer = NULL, *cp = NULL;
+ kauth_cred_t cred;
- bzero((caddr_t)&info, sizeof(info));
+ cred = kauth_cred_proc_ref(current_proc());
+
+ bzero((caddr_t)&info, sizeof (info));
for (pass = 0; pass < 2; pass++) {
+ struct ifmultiaddr *ifma;
+
ifnet_head_lock_shared();
-
+
TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
if (error)
break;
continue;
ifnet_lock_shared(ifp);
/*
- * Holding ifnet lock here prevents the link address from
- * changing contents, so no need to hold the ifa lock.
- * The link address is always present; it's never freed.
+ * Holding ifnet lock here prevents the link address
+ * from changing contents, so no need to hold the ifa
+ * lock. The link address is always present; it's
+ * never freed.
*/
ifa = ifp->if_lladdr;
info.rti_info[RTAX_IFP] = ifa->ifa_addr;
- len = rt_msg2(RTM_IFINFO2, &info, (caddr_t)0, NULL);
+ len = rt_msg2(RTM_IFINFO2, &info, NULL, NULL, &cred);
if (pass == 0) {
total_len += len;
} else {
if (current_len + len > total_len) {
ifnet_lock_done(ifp);
- printf("sysctl_iflist2: current_len (%d) + len (%d) > total_len (%d)\n",
- current_len, len, total_len);
error = ENOBUFS;
break;
}
info.rti_info[RTAX_IFP] = ifa->ifa_addr;
- len = rt_msg2(RTM_IFINFO2, &info, (caddr_t)cp, NULL);
+ len = rt_msg2(RTM_IFINFO2, &info,
+ (caddr_t)cp, NULL, &cred);
info.rti_info[RTAX_IFP] = NULL;
-
+
ifm = (struct if_msghdr2 *)(void *)cp;
ifm->ifm_addrs = info.rti_addrs;
ifm->ifm_flags = (u_short)ifp->if_flags;
ifm->ifm_snd_drops =
ifp->if_snd.ifcq_dropcnt.packets;
ifm->ifm_timer = ifp->if_timer;
- if_data_internal_to_if_data64(ifp, &ifp->if_data,
- &ifm->ifm_data);
+ if_data_internal_to_if_data64(ifp,
+ &ifp->if_data, &ifm->ifm_data);
+ /*
+ * <rdar://problem/32940901>
+ * Round bytes only for non-platform
+ */
+ if (!csproc_get_platform_binary(w->w_req->p)) {
+ ALIGN_BYTES(ifm->ifm_data.ifi_ibytes);
+ ALIGN_BYTES(ifm->ifm_data.ifi_obytes);
+ }
cp += len;
- VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
+ VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t)));
current_len += len;
}
- while ((ifa = ifa->ifa_link.tqe_next) != 0) {
+ while ((ifa = ifa->ifa_link.tqe_next) != NULL) {
IFA_LOCK(ifa);
if (af && af != ifa->ifa_addr->sa_family) {
IFA_UNLOCK(ifa);
info.rti_info[RTAX_IFA] = ifa->ifa_addr;
info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
info.rti_info[RTAX_BRD] = ifa->ifa_dstaddr;
- len = rt_msg2(RTM_NEWADDR, &info, 0, 0);
+ len = rt_msg2(RTM_NEWADDR, &info, NULL, NULL,
+ &cred);
if (pass == 0) {
total_len += len;
} else {
struct ifa_msghdr *ifam;
-
+
if (current_len + len > total_len) {
IFA_UNLOCK(ifa);
- printf("sysctl_iflist2: current_len (%d) + len (%d) > total_len (%d)\n",
- current_len, len, total_len);
error = ENOBUFS;
break;
}
- len = rt_msg2(RTM_NEWADDR, &info, (caddr_t)cp, 0);
+ len = rt_msg2(RTM_NEWADDR, &info,
+ (caddr_t)cp, NULL, &cred);
ifam = (struct ifa_msghdr *)(void *)cp;
- ifam->ifam_index = ifa->ifa_ifp->if_index;
+ ifam->ifam_index =
+ ifa->ifa_ifp->if_index;
ifam->ifam_flags = ifa->ifa_flags;
ifam->ifam_metric = ifa->ifa_metric;
ifam->ifam_addrs = info.rti_addrs;
cp += len;
- VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
+ VERIFY(IS_P2ALIGNED(cp,
+ sizeof (u_int32_t)));
current_len += len;
}
IFA_UNLOCK(ifa);
ifnet_lock_done(ifp);
break;
}
- {
- struct ifmultiaddr *ifma;
-
- for (ifma = LIST_FIRST(&ifp->if_multiaddrs);
- ifma != NULL; ifma = LIST_NEXT(ifma, ifma_link)) {
- struct ifaddr *ifa0;
-
- IFMA_LOCK(ifma);
- if (af && af != ifma->ifma_addr->sa_family) {
+
+ for (ifma = LIST_FIRST(&ifp->if_multiaddrs);
+ ifma != NULL; ifma = LIST_NEXT(ifma, ifma_link)) {
+ struct ifaddr *ifa0;
+
+ IFMA_LOCK(ifma);
+ if (af && af != ifma->ifma_addr->sa_family) {
+ IFMA_UNLOCK(ifma);
+ continue;
+ }
+ bzero((caddr_t)&info, sizeof (info));
+ info.rti_info[RTAX_IFA] = ifma->ifma_addr;
+ /*
+ * Holding ifnet lock here prevents the link
+ * address from changing contents, so no need
+ * to hold the ifa0 lock. The link address is
+ * always present; it's never freed.
+ */
+ ifa0 = ifp->if_lladdr;
+ info.rti_info[RTAX_IFP] = ifa0->ifa_addr;
+ if (ifma->ifma_ll != NULL)
+ info.rti_info[RTAX_GATEWAY] =
+ ifma->ifma_ll->ifma_addr;
+ len = rt_msg2(RTM_NEWMADDR2, &info, NULL, NULL,
+ &cred);
+ if (pass == 0) {
+ total_len += len;
+ } else {
+ struct ifma_msghdr2 *ifmam;
+
+ if (current_len + len > total_len) {
IFMA_UNLOCK(ifma);
- continue;
- }
- bzero((caddr_t)&info, sizeof(info));
- info.rti_info[RTAX_IFA] = ifma->ifma_addr;
- /*
- * Holding ifnet lock here prevents the link
- * address from changing contents, so no need
- * to hold the ifa0 lock. The link address is
- * always present; it's never freed.
- */
- ifa0 = ifp->if_lladdr;
- info.rti_info[RTAX_IFP] = ifa0->ifa_addr;
- if (ifma->ifma_ll != NULL)
- info.rti_info[RTAX_GATEWAY] = ifma->ifma_ll->ifma_addr;
- len = rt_msg2(RTM_NEWMADDR2, &info, 0, 0);
- if (pass == 0) {
- total_len += len;
- } else {
- struct ifma_msghdr2 *ifmam;
-
- if (current_len + len > total_len) {
- IFMA_UNLOCK(ifma);
- printf("sysctl_iflist2: current_len (%d) + len (%d) > total_len (%d)\n",
- current_len, len, total_len);
- error = ENOBUFS;
- break;
- }
- len = rt_msg2(RTM_NEWMADDR2, &info, (caddr_t)cp, 0);
-
- ifmam = (struct ifma_msghdr2 *)(void *)cp;
- ifmam->ifmam_addrs = info.rti_addrs;
- ifmam->ifmam_flags = 0;
- ifmam->ifmam_index =
- ifma->ifma_ifp->if_index;
- ifmam->ifmam_refcount =
- ifma->ifma_reqcnt;
-
- cp += len;
- VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
- current_len += len;
+ error = ENOBUFS;
+ break;
}
- IFMA_UNLOCK(ifma);
+ len = rt_msg2(RTM_NEWMADDR2, &info,
+ (caddr_t)cp, NULL, &cred);
+
+ ifmam =
+ (struct ifma_msghdr2 *)(void *)cp;
+ ifmam->ifmam_addrs = info.rti_addrs;
+ ifmam->ifmam_flags = 0;
+ ifmam->ifmam_index =
+ ifma->ifma_ifp->if_index;
+ ifmam->ifmam_refcount =
+ ifma->ifma_reqcnt;
+
+ cp += len;
+ VERIFY(IS_P2ALIGNED(cp,
+ sizeof (u_int32_t)));
+ current_len += len;
}
+ IFMA_UNLOCK(ifma);
}
ifnet_lock_done(ifp);
- info.rti_info[RTAX_IFA] = info.rti_info[RTAX_NETMASK] =
- info.rti_info[RTAX_BRD] = NULL;
+ info.rti_info[RTAX_IFA] = info.rti_info[RTAX_NETMASK] =
+ info.rti_info[RTAX_BRD] = NULL;
}
ifnet_head_done();
-
- if (error)
+
+ if (error) {
+ if (error == ENOBUFS)
+ printf("%s: current_len (%d) + len (%d) > "
+ "total_len (%d)\n", __func__, current_len,
+ len, total_len);
break;
-
+ }
+
if (pass == 0) {
/* Better to return zero length buffer than ENOBUFS */
if (total_len == 0)
total_len = 1;
total_len += total_len >> 3;
- total_buffer = _MALLOC(total_len, M_RTABLE, M_ZERO | M_WAITOK);
+ total_buffer = _MALLOC(total_len, M_RTABLE,
+ M_ZERO | M_WAITOK);
if (total_buffer == NULL) {
- printf("sysctl_iflist2: _MALLOC(%d) failed\n", total_len);
+ printf("%s: _MALLOC(%d) failed\n", __func__,
+ total_len);
error = ENOBUFS;
break;
}
cp = total_buffer;
- VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t)));
+ VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t)));
} else {
error = SYSCTL_OUT(w->w_req, total_buffer, current_len);
if (error)
break;
}
}
-
+
if (total_buffer != NULL)
_FREE(total_buffer, M_RTABLE);
-
- return error;
+
+ kauth_cred_unref(&cred);
+ return (error);
}
static int
sysctl_rtstat(struct sysctl_req *req)
{
- int error;
-
- error = SYSCTL_OUT(req, &rtstat, sizeof(struct rtstat));
- if (error)
- return (error);
-
- return 0;
+ return (SYSCTL_OUT(req, &rtstat, sizeof (struct rtstat)));
}
static int
sysctl_rttrash(struct sysctl_req *req)
{
- int error;
-
- error = SYSCTL_OUT(req, &rttrash, sizeof(rttrash));
- if (error)
- return (error);
-
- return 0;
-}
-
-/*
- * Called from pfslowtimo(), protected by domain_proto_mtx
- */
-static void
-rt_drainall(void)
-{
- struct timeval delta_ts, current_ts;
-
- /*
- * This test is done without holding rnh_lock; in the even that
- * we read stale value, it will only cause an extra (or miss)
- * drain and is therefore harmless.
- */
- if (ifnet_aggressive_drainers == 0) {
- if (timerisset(&last_ts))
- timerclear(&last_ts);
- return;
- }
-
- microuptime(¤t_ts);
- timersub(¤t_ts, &last_ts, &delta_ts);
-
- if (delta_ts.tv_sec >= rt_if_idle_drain_interval) {
- timerclear(&last_ts);
-
- in_rtqdrain(); /* protocol cloned routes: INET */
- in_arpdrain(NULL); /* cloned routes: ARP */
-#if INET6
- in6_rtqdrain(); /* protocol cloned routes: INET6 */
- nd6_drain(NULL); /* cloned routes: ND6 */
-#endif /* INET6 */
-
- last_ts.tv_sec = current_ts.tv_sec;
- last_ts.tv_usec = current_ts.tv_usec;
- }
-}
-
-void
-rt_aggdrain(int on)
-{
- lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
-
- if (on)
- routedomain.dom_protosw->pr_flags |= PR_AGGDRAIN;
- else
- routedomain.dom_protosw->pr_flags &= ~PR_AGGDRAIN;
+ return (SYSCTL_OUT(req, &rttrash, sizeof (rttrash)));
}
static int
if (namelen != 3)
return (EINVAL);
af = name[0];
- Bzero(&w, sizeof(w));
+ Bzero(&w, sizeof (w));
w.w_op = name[1];
w.w_arg = name[2];
w.w_req = req;
error = sysctl_rttrash(req);
break;
}
- if (w.w_tmem)
+ if (w.w_tmem != NULL)
FREE(w.w_tmem, M_RTABLE);
return (error);
}
-SYSCTL_NODE(_net, PF_ROUTE, routetable, CTLFLAG_RD | CTLFLAG_LOCKED, sysctl_rtsock, "");
-
/*
* Definitions of protocols supported in the ROUTE domain.
*/
static struct protosw routesw[] = {
-{ SOCK_RAW, &routedomain, 0, PR_ATOMIC|PR_ADDR,
- 0, route_output, raw_ctlinput, 0,
- 0,
- raw_init, 0, 0, rt_drainall,
- 0,
- &route_usrreqs,
- 0, 0, 0,
- { 0, 0 }, 0, { 0 }
+{
+ .pr_type = SOCK_RAW,
+ .pr_protocol = 0,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_output = route_output,
+ .pr_ctlinput = raw_ctlinput,
+ .pr_init = raw_init,
+ .pr_usrreqs = &route_usrreqs,
}
};
-struct domain routedomain =
- { PF_ROUTE, "route", route_init, 0, 0,
- routesw,
- NULL, NULL, 0, 0, 0, 0, NULL, 0,
- { 0, 0 } };
+static int route_proto_count = (sizeof (routesw) / sizeof (struct protosw));
+
+struct domain routedomain_s = {
+ .dom_family = PF_ROUTE,
+ .dom_name = "route",
+ .dom_init = route_dinit,
+};
+
+static void
+route_dinit(struct domain *dp)
+{
+ struct protosw *pr;
+ int i;
+
+ VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
+ VERIFY(routedomain == NULL);
-DOMAIN_SET(route);
+ routedomain = dp;
+ for (i = 0, pr = &routesw[0]; i < route_proto_count; i++, pr++)
+ net_add_proto(pr, dp, 1);
+
+ route_init();
+}