/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/socketvar.h>
#include <sys/sysctl.h>
#include <sys/syslog.h>
+#include <sys/mcache.h>
+#include <net/ntstat.h>
+
+#include <kern/zalloc.h>
#include <net/if.h>
#include <net/if_types.h>
#include <netinet/in_var.h>
#include <netinet/ip_var.h>
#if INET6
+#include <netinet6/in6_pcb.h>
#include <netinet6/ip6_var.h>
#endif
#include <netinet/ip_icmp.h>
#if IPSEC
#include <netinet6/ipsec.h>
+#include <netinet6/esp.h>
extern int ipsec_bypass;
-extern lck_mtx_t *sadb_mutex;
#endif /*IPSEC*/
#else
static int udpcksum = 0; /* XXX */
#endif
-SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW | CTLFLAG_LOCKED,
&udpcksum, 0, "");
+static u_int32_t udps_in_sw_cksum;
+SYSCTL_UINT(_net_inet_udp, OID_AUTO, in_sw_cksum, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &udps_in_sw_cksum, 0,
+ "Number of received packets checksummed in software");
+
+static u_int64_t udps_in_sw_cksum_bytes;
+SYSCTL_QUAD(_net_inet_udp, OID_AUTO, in_sw_cksum_bytes, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &udps_in_sw_cksum_bytes,
+ "Amount of received data checksummed in software");
+
+static u_int32_t udps_out_sw_cksum;
+SYSCTL_UINT(_net_inet_udp, OID_AUTO, out_sw_cksum, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &udps_out_sw_cksum, 0,
+ "Number of transmitted packets checksummed in software");
+
+static u_int64_t udps_out_sw_cksum_bytes;
+SYSCTL_QUAD(_net_inet_udp, OID_AUTO, out_sw_cksum_bytes, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &udps_out_sw_cksum_bytes,
+ "Amount of transmitted data checksummed in software");
+
int log_in_vain = 0;
-SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW | CTLFLAG_LOCKED,
&log_in_vain, 0, "Log all incoming UDP packets");
static int blackhole = 0;
-SYSCTL_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW | CTLFLAG_LOCKED,
&blackhole, 0, "Do not send port unreachables for refused connects");
struct inpcbhead udb; /* from udp_var.h */
#define UDBHASHSIZE 16
#endif
-extern int apple_hwcksum_rx;
extern int esp_udp_encap_port;
-extern u_long route_generation;
-extern void ipfwsyslog( int level, char *format,...);
+extern void ipfwsyslog( int level, const char *format,...);
extern int fw_verbose;
+static int udp_gc_done = FALSE; /* Garbage collection performed last slowtimo */
+#if IPFIREWALL
#define log_in_vain_log( a ) { \
if ( (log_in_vain == 3 ) && (fw_verbose == 2)) { /* Apple logging, log to ipfw.log */ \
ipfwsyslog a ; \
} \
else log a ; \
}
+#else
+#define log_in_vain_log( a ) { log a; }
+#endif
struct udpstat udpstat; /* from udp_var.h */
-SYSCTL_STRUCT(_net_inet_udp, UDPCTL_STATS, stats, CTLFLAG_RD,
+SYSCTL_STRUCT(_net_inet_udp, UDPCTL_STATS, stats, CTLFLAG_RD | CTLFLAG_LOCKED,
&udpstat, udpstat, "UDP statistics (struct udpstat, netinet/udp_var.h)");
-SYSCTL_INT(_net_inet_udp, OID_AUTO, pcbcount, CTLFLAG_RD,
+SYSCTL_INT(_net_inet_udp, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
&udbinfo.ipi_count, 0, "Number of active PCBs");
-static struct sockaddr_in udp_in = { sizeof(udp_in), AF_INET };
+__private_extern__ int udp_use_randomport = 1;
+SYSCTL_INT(_net_inet_udp, OID_AUTO, randomize_ports, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &udp_use_randomport, 0, "Randomize UDP port numbers");
+
#if INET6
struct udp_in6 {
struct sockaddr_in6 uin6_sin;
u_char uin6_init_done : 1;
-} udp_in6 = {
- { sizeof(udp_in6.uin6_sin), AF_INET6 },
- 0
};
struct udp_ip6 {
struct ip6_hdr uip6_ip6;
u_char uip6_init_done : 1;
-} udp_ip6;
-#endif /* INET6 */
-
-static void udp_append(struct inpcb *last, struct ip *ip,
- struct mbuf *n, int off);
-#if INET6
+};
static void ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip);
+static void udp_append(struct inpcb *last, struct ip *ip,
+ struct mbuf *n, int off, struct sockaddr_in *pudp_in,
+ struct udp_in6 *pudp_in6, struct udp_ip6 *pudp_ip6);
+#else
+static void udp_append(struct inpcb *last, struct ip *ip,
+ struct mbuf *n, int off, struct sockaddr_in *pudp_in);
#endif
static int udp_detach(struct socket *so);
* allocate lock group attribute and group for udp pcb mutexes
*/
pcbinfo->mtx_grp_attr = lck_grp_attr_alloc_init();
- lck_grp_attr_setdefault(pcbinfo->mtx_grp_attr);
pcbinfo->mtx_grp = lck_grp_alloc_init("udppcb", pcbinfo->mtx_grp_attr);
pcbinfo->mtx_attr = lck_attr_alloc_init();
- lck_attr_setdefault(pcbinfo->mtx_attr);
if ((pcbinfo->mtx = lck_rw_alloc_init(pcbinfo->mtx_grp, pcbinfo->mtx_attr)) == NULL)
return; /* pretty much dead if this fails... */
-
- in_pcb_nat_init(&udbinfo, AF_INET, IPPROTO_UDP, SOCK_DGRAM);
#else
udbinfo.ipi_zone = zinit("udpcb", sizeof(struct inpcb), maxsockets,
ZONE_INTERRUPT, 0);
#endif
-
-#if 0
- /* for pcb sharing testing only */
- stat = in_pcb_new_share_client(&udbinfo, &fake_owner);
- kprintf("udp_init in_pcb_new_share_client - stat = %d\n", stat);
-
- laddr.s_addr = 0x11646464;
- faddr.s_addr = 0x11646465;
-
- lport = 1500;
- in_pcb_grab_port(&udbinfo, 0, laddr, &lport, faddr, 1600, 0, fake_owner);
- kprintf("udp_init in_pcb_grab_port - stat = %d\n", stat);
-
- stat = in_pcb_rem_share_client(&udbinfo, fake_owner);
- kprintf("udp_init in_pcb_rem_share_client - stat = %d\n", stat);
-
- stat = in_pcb_new_share_client(&udbinfo, &fake_owner);
- kprintf("udp_init in_pcb_new_share_client(2) - stat = %d\n", stat);
-
- laddr.s_addr = 0x11646464;
- faddr.s_addr = 0x11646465;
-
- lport = 1500;
- stat = in_pcb_grab_port(&udbinfo, 0, laddr, &lport, faddr, 1600, 0, fake_owner);
- kprintf("udp_init in_pcb_grab_port(2) - stat = %d\n", stat);
-#endif
}
void
register struct udphdr *uh;
register struct inpcb *inp;
struct mbuf *opts = 0;
- int len;
+ int len, isbroadcast;
struct ip save_ip;
struct sockaddr *append_sa;
struct inpcbinfo *pcbinfo = &udbinfo;
+ struct sockaddr_in udp_in = {
+ sizeof (udp_in), AF_INET, 0, { 0 }, { 0, 0, 0, 0, 0, 0, 0, 0 }
+ };
+ struct ip_moptions *imo = NULL;
+ int foundmembership = 0, ret = 0;
+#if INET6
+ struct udp_in6 udp_in6 = {
+ { sizeof (udp_in6.uin6_sin), AF_INET6, 0, 0,
+ IN6ADDR_ANY_INIT, 0 },
+ 0
+ };
+ struct udp_ip6 udp_ip6;
+#endif /* INET6 */
+ struct ifnet *ifp = (m->m_pkthdr.rcvif != NULL) ? m->m_pkthdr.rcvif: NULL;
udpstat.udps_ipackets++;
if (m->m_pkthdr.csum_flags & CSUM_TCP_SUM16)
m->m_pkthdr.csum_flags = 0; /* invalidate hwcksum for UDP */
+ /* Expect 32-bit aligned data pointer on strict-align platforms */
+ MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
+
/*
* Strip IP options, if any; should skip this,
* make available to user, and use on returned packets,
}
ip = mtod(m, struct ip *);
}
- uh = (struct udphdr *)((caddr_t)ip + iphlen);
+ uh = (struct udphdr *)(void *)((caddr_t)ip + iphlen);
/* destination port of 0 is illegal, based on RFC768. */
- if (uh->uh_dport == 0)
+ if (uh->uh_dport == 0) {
+
+ if (ifp->if_udp_stat != NULL)
+ atomic_add_64(&ifp->if_udp_stat->port0, 1);
+
goto bad;
+ }
KERNEL_DEBUG(DBG_LAYER_IN_BEG, uh->uh_dport, uh->uh_sport,
ip->ip_src.s_addr, ip->ip_dst.s_addr, uh->uh_ulen);
if (ip->ip_len != len) {
if (len > ip->ip_len || len < sizeof(struct udphdr)) {
udpstat.udps_badlen++;
+
+ if (ifp->if_udp_stat != NULL)
+ atomic_add_64(&ifp->if_udp_stat->badlength, 1);
+
goto bad;
}
m_adj(m, len - ip->ip_len);
} else {
char b[9];
doudpcksum:
- *(uint32_t*)&b[0] = *(uint32_t*)&((struct ipovly *)ip)->ih_x1[0];
- *(uint32_t*)&b[4] = *(uint32_t*)&((struct ipovly *)ip)->ih_x1[4];
- *(uint8_t*)&b[8] = *(uint8_t*)&((struct ipovly *)ip)->ih_x1[8];
-
- bzero(((struct ipovly *)ip)->ih_x1, 9);
+ bcopy(((struct ipovly *)ip)->ih_x1, b,
+ sizeof (((struct ipovly *)ip)->ih_x1));
+ bzero(((struct ipovly *)ip)->ih_x1,
+ sizeof (((struct ipovly *)ip)->ih_x1));
((struct ipovly *)ip)->ih_len = uh->uh_ulen;
uh->uh_sum = in_cksum(m, len + sizeof (struct ip));
-
- *(uint32_t*)&((struct ipovly *)ip)->ih_x1[0] = *(uint32_t*)&b[0];
- *(uint32_t*)&((struct ipovly *)ip)->ih_x1[4] = *(uint32_t*)&b[4];
- *(uint8_t*)&((struct ipovly *)ip)->ih_x1[8] = *(uint8_t*)&b[8];
+ bcopy(b, ((struct ipovly *)ip)->ih_x1,
+ sizeof (((struct ipovly *)ip)->ih_x1));
+
+ udp_in_cksum_stats(len);
}
if (uh->uh_sum) {
udpstat.udps_badsum++;
+
+ if (ifp->if_udp_stat != NULL)
+ atomic_add_64(&ifp->if_udp_stat->badchksum, 1);
+
m_freem(m);
KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
return;
udpstat.udps_nosum++;
#endif
- if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
- in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
- struct inpcb *last;
+ isbroadcast = in_broadcast(ip->ip_dst, ifp);
+
+ if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || isbroadcast) {
+
+ int reuse_sock = 0, mcast_delivered = 0;
+
lck_rw_lock_shared(pcbinfo->mtx);
/*
* Deliver a multicast or broadcast datagram to *all* sockets
* Locate pcb(s) for datagram.
* (Algorithm copied from raw_intr().)
*/
- last = NULL;
#if INET6
udp_in6.uin6_init_done = udp_ip6.uip6_init_done = 0;
#endif
LIST_FOREACH(inp, &udb, inp_list) {
-#ifdef __APPLE__
- /* Ignore nat/SharedIP dummy pcbs */
- if (inp->inp_socket == &udbinfo.nat_dummy_socket)
- continue;
-#endif
if (inp->inp_socket == NULL)
continue;
if (inp != sotoinpcb(inp->inp_socket))
- panic("udp_input: bad so back ptr inp=%x\n", inp);
+ panic("udp_input: bad so back ptr inp=%p\n", inp);
#if INET6
if ((inp->inp_vflag & INP_IPV4) == 0)
continue;
#endif
+ if (ip_restrictrecvif && ifp != NULL &&
+ (ifp->if_eflags & IFEF_RESTRICTED_RECV) &&
+ !(inp->inp_flags & INP_RECV_ANYIF))
+ continue;
+
+ if ((inp->inp_moptions == NULL) &&
+ (ntohl(ip->ip_dst.s_addr) != INADDR_ALLHOSTS_GROUP) &&
+ (isbroadcast == 0) )
+ continue;
+
+
if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
continue;
}
}
}
- if (last != NULL) {
- struct mbuf *n;
+ if (isbroadcast == 0 && (ntohl(ip->ip_dst.s_addr) != INADDR_ALLHOSTS_GROUP)) {
+ if((imo = inp->inp_moptions) == NULL) {
+ udp_unlock(inp->inp_socket, 1, 0);
+ continue;
+ } else {
+ struct sockaddr_in group;
+ int blocked;
+
+ IMO_LOCK(imo);
+
+ bzero(&group, sizeof(struct sockaddr_in));
+ group.sin_len = sizeof(struct sockaddr_in);
+ group.sin_family = AF_INET;
+ group.sin_addr = ip->ip_dst;
+
+ blocked = imo_multi_filter(imo, ifp,
+ (struct sockaddr *)&group,
+ (struct sockaddr *)&udp_in);
+ if (blocked == MCAST_PASS)
+ foundmembership = 1;
+
+ IMO_UNLOCK(imo);
+ if (!foundmembership) {
+ udp_unlock(inp->inp_socket, 1, 0);
+ continue;
+ }
+ foundmembership = 0;
+ }
+ }
+ reuse_sock = inp->inp_socket->so_options& (SO_REUSEPORT|SO_REUSEADDR);
+ {
#if IPSEC
int skipit = 0;
/* check AH/ESP integrity. */
if (ipsec_bypass == 0) {
- lck_mtx_lock(sadb_mutex);
- if (ipsec4_in_reject_so(m, last->inp_socket)) {
- ipsecstat.in_polvio++;
+ if (ipsec4_in_reject_so(m, inp->inp_socket)) {
+ IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
/* do not inject data to pcb */
skipit = 1;
}
- lck_mtx_unlock(sadb_mutex);
}
- if (skipit == 0)
+ if (skipit == 0)
#endif /*IPSEC*/
- if ((n = m_copy(m, 0, M_COPYALL)) != NULL) {
- udp_append(last, ip, n,
- iphlen +
- sizeof(struct udphdr));
+ {
+ struct mbuf *n = NULL;
+
+ if (reuse_sock)
+ n = m_copy(m, 0, M_COPYALL);
+#if INET6
+ udp_append(inp, ip, m,
+ iphlen + sizeof(struct udphdr),
+ &udp_in, &udp_in6, &udp_ip6);
+#else
+ udp_append(inp, ip, m,
+ iphlen + sizeof(struct udphdr),
+ &udp_in);
+#endif /* INET6 */
+ mcast_delivered++;
+
+ m = n;
}
- udp_unlock(last->inp_socket, 1, 0);
+ udp_unlock(inp->inp_socket, 1, 0);
}
- last = inp;
/*
* Don't look for additional matches if this one does
* not have either the SO_REUSEPORT or SO_REUSEADDR
* socket options set. This heuristic avoids searching
* through all pcbs in the common case of a non-shared
- * port. It * assumes that an application will never
+ * port. It assumes that an application will never
* clear these options after setting them.
*/
- if ((last->inp_socket->so_options&(SO_REUSEPORT|SO_REUSEADDR)) == 0)
+ if (reuse_sock == 0 || m == NULL)
break;
+
+ /*
+ * Expect 32-bit aligned data pointer on strict-align
+ * platforms.
+ */
+ MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
+ /*
+ * Recompute IP and UDP header pointers for new mbuf
+ */
+ ip = mtod(m, struct ip *);
+ uh = (struct udphdr *)(void *)((caddr_t)ip + iphlen);
}
lck_rw_done(pcbinfo->mtx);
- if (last == NULL) {
+ if (mcast_delivered == 0) {
/*
* No matching pcb found; discard datagram.
* (No need to send an ICMP Port Unreachable
* for a broadcast or multicast datgram.)
*/
udpstat.udps_noportbcast++;
+
+ if (ifp->if_udp_stat != NULL)
+ atomic_add_64(&ifp->if_udp_stat->port_unreach, 1);
+
goto bad;
}
-#if IPSEC
- /* check AH/ESP integrity. */
- if (ipsec_bypass == 0 && m) {
- lck_mtx_lock(sadb_mutex);
- if (ipsec4_in_reject_so(m, last->inp_socket)) {
- ipsecstat.in_polvio++;
- lck_mtx_unlock(sadb_mutex);
- udp_unlock(last->inp_socket, 1, 0);
- goto bad;
- }
- lck_mtx_unlock(sadb_mutex);
- }
-#endif /*IPSEC*/
- udp_append(last, ip, m, iphlen + sizeof(struct udphdr));
- udp_unlock(last->inp_socket, 1, 0);
+
+ if (m != NULL) /* free the extra copy of mbuf or skipped by IPSec */
+ m_freem(m);
+ KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
return;
}
KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
return;
}
+ /*
+ * Expect 32-bit aligned data pointer on strict-align
+ * platforms.
+ */
+ MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
+
ip = mtod(m, struct ip *);
- uh = (struct udphdr *)((caddr_t)ip + iphlen);
+ uh = (struct udphdr *)(void *)((caddr_t)ip + iphlen);
}
/* Check for NAT keepalive packet */
if (payload_len == 1 && *(u_int8_t*)((caddr_t)uh + sizeof(struct udphdr)) == 0xFF) {
KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
return;
}
- else if (payload_len == 4 && *(u_int32_t*)((caddr_t)uh + sizeof(struct udphdr)) != 0) {
+ else if (payload_len == 4 && *(u_int32_t*)(void *)((caddr_t)uh + sizeof(struct udphdr)) != 0) {
/* UDP encapsulated IPSec packet to pass through NAT */
- size_t stripsiz;
-
- stripsiz = sizeof(struct udphdr);
-
- ip = mtod(m, struct ip *);
- ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), iphlen);
- m->m_data += stripsiz;
- m->m_len -= stripsiz;
- m->m_pkthdr.len -= stripsiz;
- ip = mtod(m, struct ip *);
- ip->ip_len = ip->ip_len - stripsiz;
- ip->ip_p = IPPROTO_ESP;
-
KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0);
- esp4_input(m, iphlen);
+ /* preserve the udp header */
+ esp4_input(m, iphlen + sizeof(struct udphdr));
return;
}
}
* Locate pcb for datagram.
*/
inp = in_pcblookup_hash(&udbinfo, ip->ip_src, uh->uh_sport,
- ip->ip_dst, uh->uh_dport, 1, m->m_pkthdr.rcvif);
+ ip->ip_dst, uh->uh_dport, 1, ifp);
if (inp == NULL) {
+
+ if (ifp->if_udp_stat != NULL)
+ atomic_add_64(&ifp->if_udp_stat->port_unreach, 1);
+
if (log_in_vain) {
char buf[MAX_IPv4_STR_LEN];
char buf2[MAX_IPv4_STR_LEN];
goto bad;
#endif
if (blackhole)
- if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type != IFT_LOOP)
+ if (ifp && ifp->if_type != IFT_LOOP)
goto bad;
*ip = save_ip;
ip->ip_len += iphlen;
if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
udp_unlock(inp->inp_socket, 1, 0);
+
+ if (ifp->if_udp_stat != NULL)
+ atomic_add_64(&ifp->if_udp_stat->cleanup, 1);
+
goto bad;
}
#if IPSEC
if (ipsec_bypass == 0 && inp != NULL) {
- lck_mtx_lock(sadb_mutex);
if (ipsec4_in_reject_so(m, inp->inp_socket)) {
- ipsecstat.in_polvio++;
- lck_mtx_unlock(sadb_mutex);
+ IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
udp_unlock(inp->inp_socket, 1, 0);
+
+ if (ifp->if_udp_stat != NULL)
+ atomic_add_64(&ifp->if_udp_stat->badipsec, 1);
+
goto bad;
- }
- lck_mtx_unlock(sadb_mutex);
+ }
}
#endif /*IPSEC*/
*/
udp_in.sin_port = uh->uh_sport;
udp_in.sin_addr = ip->ip_src;
- if (inp->inp_flags & INP_CONTROLOPTS
- || inp->inp_socket->so_options & SO_TIMESTAMP) {
+ if ((inp->inp_flags & INP_CONTROLOPTS) != 0
+ || (inp->inp_socket->so_options & SO_TIMESTAMP) != 0
+ || (inp->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0) {
#if INET6
if (inp->inp_vflag & INP_IPV6) {
int savedflags;
ip_2_ip6_hdr(&udp_ip6.uip6_ip6, ip);
savedflags = inp->inp_flags;
inp->inp_flags &= ~INP_UNMAPPABLEOPTS;
- ip6_savecontrol(inp, &opts, &udp_ip6.uip6_ip6, m);
+ ret = ip6_savecontrol(inp, m, &opts);
inp->inp_flags = savedflags;
} else
#endif
- ip_savecontrol(inp, &opts, ip, m);
+ {
+ ret = ip_savecontrol(inp, &opts, ip, m);
+ }
+ if (ret != 0) {
+ udp_unlock(inp->inp_socket, 1, 0);
+ goto bad;
+ }
}
m_adj(m, iphlen + sizeof(struct udphdr));
#if INET6
if (inp->inp_vflag & INP_IPV6) {
in6_sin_2_v4mapsin6(&udp_in, &udp_in6.uin6_sin);
- append_sa = (struct sockaddr *)&udp_in6;
+ append_sa = (struct sockaddr *)&udp_in6.uin6_sin;
} else
#endif
append_sa = (struct sockaddr *)&udp_in;
+ if (nstat_collect) {
+ locked_add_64(&inp->inp_stat->rxpackets, 1);
+ locked_add_64(&inp->inp_stat->rxbytes, m->m_pkthdr.len);
+ }
+ so_recv_data_stat(inp->inp_socket, m, 0);
if (sbappendaddr(&inp->inp_socket->so_rcv, append_sa, m, opts, NULL) == 0) {
udpstat.udps_fullsock++;
- }
- else {
+ } else {
sorwakeup(inp->inp_socket);
}
udp_unlock(inp->inp_socket, 1, 0);
ip6->ip6_plen = ip->ip_len;
ip6->ip6_nxt = ip->ip_p;
ip6->ip6_hlim = ip->ip_ttl;
- ip6->ip6_src.s6_addr32[2] = ip6->ip6_dst.s6_addr32[2] =
- IPV6_ADDR_INT32_SMP;
- ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr;
- ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr;
+ if (ip->ip_src.s_addr) {
+ ip6->ip6_src.s6_addr32[2] = IPV6_ADDR_INT32_SMP;
+ ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr;
+ }
+ if (ip->ip_dst.s_addr) {
+ ip6->ip6_dst.s6_addr32[2] = IPV6_ADDR_INT32_SMP;
+ ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr;
+ }
}
#endif
/*
* subroutine of udp_input(), mainly for source code readability.
- * caller must properly init udp_ip6 and udp_in6 beforehand.
*/
static void
-udp_append(last, ip, n, off)
- struct inpcb *last;
- struct ip *ip;
- struct mbuf *n;
- int off;
+#if INET6
+udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off,
+ struct sockaddr_in *pudp_in, struct udp_in6 *pudp_in6,
+ struct udp_ip6 *pudp_ip6)
+#else
+udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off,
+ struct sockaddr_in *pudp_in)
+#endif
{
struct sockaddr *append_sa;
struct mbuf *opts = 0;
+ int ret = 0;
- if (last->inp_flags & INP_CONTROLOPTS ||
- last->inp_socket->so_options & SO_TIMESTAMP) {
+#if CONFIG_MACF_NET
+ if (mac_inpcb_check_deliver(last, n, AF_INET, SOCK_DGRAM) != 0) {
+ m_freem(n);
+ return;
+ }
+#endif
+ if ((last->inp_flags & INP_CONTROLOPTS) != 0 ||
+ (last->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
+ (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0) {
#if INET6
if (last->inp_vflag & INP_IPV6) {
int savedflags;
- if (udp_ip6.uip6_init_done == 0) {
- ip_2_ip6_hdr(&udp_ip6.uip6_ip6, ip);
- udp_ip6.uip6_init_done = 1;
+ if (pudp_ip6->uip6_init_done == 0) {
+ ip_2_ip6_hdr(&pudp_ip6->uip6_ip6, ip);
+ pudp_ip6->uip6_init_done = 1;
}
savedflags = last->inp_flags;
last->inp_flags &= ~INP_UNMAPPABLEOPTS;
- ip6_savecontrol(last, &opts, &udp_ip6.uip6_ip6, n);
+ ret = ip6_savecontrol(last, n, &opts);
+ if (ret != 0) {
+ last->inp_flags = savedflags;
+ goto error;
+ }
last->inp_flags = savedflags;
} else
#endif
- ip_savecontrol(last, &opts, ip, n);
+ {
+ ret = ip_savecontrol(last, &opts, ip, n);
+ if (ret != 0) {
+ goto error;
+ }
+ }
}
#if INET6
if (last->inp_vflag & INP_IPV6) {
- if (udp_in6.uin6_init_done == 0) {
- in6_sin_2_v4mapsin6(&udp_in, &udp_in6.uin6_sin);
- udp_in6.uin6_init_done = 1;
+ if (pudp_in6->uin6_init_done == 0) {
+ in6_sin_2_v4mapsin6(pudp_in, &pudp_in6->uin6_sin);
+ pudp_in6->uin6_init_done = 1;
}
- append_sa = (struct sockaddr *)&udp_in6.uin6_sin;
+ append_sa = (struct sockaddr *)&pudp_in6->uin6_sin;
} else
#endif
- append_sa = (struct sockaddr *)&udp_in;
+ append_sa = (struct sockaddr *)pudp_in;
+ if (nstat_collect) {
+ locked_add_64(&last->inp_stat->rxpackets, 1);
+ locked_add_64(&last->inp_stat->rxbytes, n->m_pkthdr.len);
+ }
+ so_recv_data_stat(last->inp_socket, n, 0);
m_adj(n, off);
if (sbappendaddr(&last->inp_socket->so_rcv, append_sa, n, opts, NULL) == 0) {
udpstat.udps_fullsock++;
- } else
+ } else {
sorwakeup(last->inp_socket);
+ }
+ return;
+error:
+ m_freem(n);
+ m_freem(opts);
+ return;
}
/*
void *vip;
{
struct ip *ip = vip;
- struct udphdr *uh;
void (*notify)(struct inpcb *, int) = udp_notify;
struct in_addr faddr;
struct inpcb *inp;
- faddr = ((struct sockaddr_in *)sa)->sin_addr;
+ faddr = ((struct sockaddr_in *)(void *)sa)->sin_addr;
if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
- return;
+ return;
if (PRC_IS_REDIRECT(cmd)) {
ip = 0;
else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
return;
if (ip) {
- uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
- inp = in_pcblookup_hash(&udbinfo, faddr, uh->uh_dport,
- ip->ip_src, uh->uh_sport, 0, NULL);
+ struct udphdr uh;
+
+ bcopy(((caddr_t)ip + (ip->ip_hl << 2)), &uh, sizeof (uh));
+ inp = in_pcblookup_hash(&udbinfo, faddr, uh.uh_dport,
+ ip->ip_src, uh.uh_sport, 0, NULL);
if (inp != NULL && inp->inp_socket != NULL) {
udp_lock(inp->inp_socket, 1, 0);
- if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
+ if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
+ WNT_STOPUSING) {
udp_unlock(inp->inp_socket, 1, 0);
return;
}
in_pcbnotifyall(&udbinfo, faddr, inetctlerrmap[cmd], notify);
}
+int
+udp_ctloutput(struct socket *so, struct sockopt *sopt)
+{
+ int error, optval;
+ struct inpcb *inp;
+
+ /* Allow <SOL_SOCKET,SO_FLUSH> at this level */
+ if (sopt->sopt_level != IPPROTO_UDP &&
+ !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH))
+ return (ip_ctloutput(so, sopt));
+
+ error = 0;
+ inp = sotoinpcb(so);
+
+ switch (sopt->sopt_dir) {
+ case SOPT_SET:
+ switch (sopt->sopt_name) {
+ case UDP_NOCKSUM:
+ /* This option is settable only for UDP over IPv4 */
+ if (!(inp->inp_vflag & INP_IPV4)) {
+ error = EINVAL;
+ break;
+ }
+
+ if ((error = sooptcopyin(sopt, &optval, sizeof (optval),
+ sizeof (optval))) != 0)
+ break;
+
+ if (optval != 0)
+ inp->inp_flags |= INP_UDP_NOCKSUM;
+ else
+ inp->inp_flags &= ~INP_UDP_NOCKSUM;
+ break;
+
+ case SO_FLUSH:
+ if ((error = sooptcopyin(sopt, &optval, sizeof (optval),
+ sizeof (optval))) != 0)
+ break;
+
+ error = inp_flush(inp, optval);
+ break;
+
+ default:
+ error = ENOPROTOOPT;
+ break;
+ }
+ break;
+
+ case SOPT_GET:
+ switch (sopt->sopt_name) {
+ case UDP_NOCKSUM:
+ optval = inp->inp_flags & INP_UDP_NOCKSUM;
+ break;
+
+ default:
+ error = ENOPROTOOPT;
+ break;
+ }
+ if (error == 0)
+ error = sooptcopyout(sopt, &optval, sizeof (optval));
+ break;
+ }
+ return (error);
+}
+
static int
udp_pcblist SYSCTL_HANDLER_ARGS
{
+#pragma unused(oidp, arg1, arg2)
int error, i, n;
struct inpcb *inp, **inp_list;
inp_gen_t gencnt;
return error;
}
-SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
+SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist, CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
udp_pcblist, "S,xinpcb", "List of active UDP sockets");
+#if !CONFIG_EMBEDDED
+
+static int
+udp_pcblist64 SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int error, i, n;
+ struct inpcb *inp, **inp_list;
+ inp_gen_t gencnt;
+ struct xinpgen xig;
+
+ /*
+ * The process of preparing the TCB list is too time-consuming and
+ * resource-intensive to repeat twice on every request.
+ */
+ lck_rw_lock_shared(udbinfo.mtx);
+ if (req->oldptr == USER_ADDR_NULL) {
+ n = udbinfo.ipi_count;
+ req->oldidx = 2 * (sizeof xig)
+ + (n + n/8) * sizeof(struct xinpcb64);
+ lck_rw_done(udbinfo.mtx);
+ return 0;
+ }
+
+ if (req->newptr != USER_ADDR_NULL) {
+ lck_rw_done(udbinfo.mtx);
+ return EPERM;
+ }
+
+ /*
+ * OK, now we're committed to doing something.
+ */
+ gencnt = udbinfo.ipi_gencnt;
+ n = udbinfo.ipi_count;
+
+ bzero(&xig, sizeof(xig));
+ xig.xig_len = sizeof xig;
+ xig.xig_count = n;
+ xig.xig_gen = gencnt;
+ xig.xig_sogen = so_gencnt;
+ error = SYSCTL_OUT(req, &xig, sizeof xig);
+ if (error) {
+ lck_rw_done(udbinfo.mtx);
+ return error;
+ }
+ /*
+ * We are done if there is no pcb
+ */
+ if (n == 0) {
+ lck_rw_done(udbinfo.mtx);
+ return 0;
+ }
+ inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
+ if (inp_list == 0) {
+ lck_rw_done(udbinfo.mtx);
+ return ENOMEM;
+ }
+
+ for (inp = LIST_FIRST(udbinfo.listhead), i = 0; inp && i < n;
+ inp = LIST_NEXT(inp, inp_list)) {
+ if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
+ inp_list[i++] = inp;
+ }
+ n = i;
+
+ error = 0;
+ for (i = 0; i < n; i++) {
+ inp = inp_list[i];
+ if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
+ struct xinpcb64 xi;
+
+ bzero(&xi, sizeof(xi));
+ xi.xi_len = sizeof xi;
+ inpcb_to_xinpcb64(inp, &xi);
+ if (inp->inp_socket)
+ sotoxsocket64(inp->inp_socket, &xi.xi_socket);
+ error = SYSCTL_OUT(req, &xi, sizeof xi);
+ }
+ }
+ if (!error) {
+ /*
+ * Give the user an updated idea of our state.
+ * If the generation differs from what we told
+ * her before, she knows that something happened
+ * while we were processing this request, and it
+ * might be necessary to retry.
+ */
+ bzero(&xig, sizeof(xig));
+ xig.xig_len = sizeof xig;
+ xig.xig_gen = udbinfo.ipi_gencnt;
+ xig.xig_sogen = so_gencnt;
+ xig.xig_count = udbinfo.ipi_count;
+ error = SYSCTL_OUT(req, &xig, sizeof xig);
+ }
+ FREE(inp_list, M_TEMP);
+ lck_rw_done(udbinfo.mtx);
+ return error;
+}
+
+SYSCTL_PROC(_net_inet_udp, OID_AUTO, pcblist64, CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
+ udp_pcblist64, "S,xinpcb64", "List of active UDP sockets");
+
+#endif /* !CONFIG_EMBEDDED */
+
+static int
+udp_pcblist_n SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int error = 0;
+
+ error = get_pcblist_n(IPPROTO_UDP, req, &udbinfo);
+
+ return error;
+}
+
+SYSCTL_PROC(_net_inet_udp, OID_AUTO, pcblist_n, CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
+ udp_pcblist_n, "S,xinpcb_n", "List of active UDP sockets");
+
+
+__private_extern__ void
+udp_get_ports_used(unsigned int ifindex, uint8_t *bitfield)
+{
+ inpcb_get_ports_used(ifindex, bitfield, &udbinfo);
+}
+
+__private_extern__ uint32_t
+udp_count_opportunistic(unsigned int ifindex, u_int32_t flags)
+{
+ return inpcb_count_opportunistic(ifindex, &udbinfo, flags);
+}
static __inline__ u_int16_t
get_socket_id(struct socket * s)
if (s == NULL) {
return (0);
}
- val = (u_int16_t)(((u_int32_t)s) / sizeof(struct socket));
+ val = (u_int16_t)(((uintptr_t)s) / sizeof(struct socket));
if (val == 0) {
val = 0xffff;
}
return (val);
}
+static int
+udp_check_pktinfo(struct mbuf *control, struct ifnet **outif, struct in_addr *laddr)
+{
+ struct cmsghdr *cm = 0;
+ struct in_pktinfo *pktinfo;
+ struct ifnet *ifp;
+
+ /*
+ * XXX: Currently, we assume all the optional information is stored
+ * in a single mbuf.
+ */
+ if (control->m_next)
+ return (EINVAL);
+
+ if (control->m_len < CMSG_LEN(0))
+ return (EINVAL);
+
+ for (cm = M_FIRST_CMSGHDR(control); cm; cm = M_NXT_CMSGHDR(control, cm)) {
+ if (cm->cmsg_len < sizeof(struct cmsghdr) || cm->cmsg_len > control->m_len)
+ return (EINVAL);
+
+ if (cm->cmsg_level != IPPROTO_IP || cm->cmsg_type != IP_PKTINFO)
+ continue;
+
+ if (cm->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
+ return (EINVAL);
+
+ pktinfo = (struct in_pktinfo *)(void *)CMSG_DATA(cm);
+
+ /* Check for a valid ifindex in pktinfo */
+ ifnet_head_lock_shared();
+
+ if (pktinfo->ipi_ifindex > if_index) {
+ ifnet_head_done();
+ return (ENXIO);
+ }
+
+ /* If ipi_ifindex is specified it takes precedence over ipi_spec_dst */
+
+ if (pktinfo->ipi_ifindex) {
+ ifp = ifindex2ifnet[pktinfo->ipi_ifindex];
+ if (ifp == NULL) {
+ ifnet_head_done();
+ return (ENXIO);
+ }
+
+ ifnet_head_done();
+
+ if (outif != NULL)
+ *outif = ifp;
+ laddr->s_addr = INADDR_ANY;
+ break;
+ }
+
+ ifnet_head_done();
+
+ /* Use the provided ipi_spec_dst address for temp source address */
+ if (outif != NULL)
+ *outif = NULL;
+ *laddr = pktinfo->ipi_spec_dst;
+ break;
+ }
+ return (0);
+}
+
static int
udp_output(inp, m, addr, control, p)
register struct inpcb *inp;
{
register struct udpiphdr *ui;
register int len = m->m_pkthdr.len;
- struct sockaddr_in *sin, src;
- struct in_addr origladdr, laddr, faddr;
+ struct sockaddr_in *sin;
+ struct in_addr origladdr, laddr, faddr, pi_laddr;
u_short lport, fport;
- struct sockaddr_in *ifaddr;
- int error = 0, udp_dodisconnect = 0;
-
+ struct sockaddr_in ifaddr;
+ int error = 0, udp_dodisconnect = 0, pktinfo = 0;
+ struct socket *so = inp->inp_socket;
+ int soopts = 0;
+ struct mbuf *inpopts;
+ struct ip_moptions *mopts;
+ struct route ro;
+ struct ip_out_args ipoa = { IFSCOPE_NONE, { 0 }, IPOAF_SELECT_SRCIF };
+ struct ifnet *outif = NULL;
+ struct flowadv *adv = &ipoa.ipoa_flowadv;
+ mbuf_svc_class_t msc = MBUF_SC_UNSPEC;
+ struct ifnet *origoutifp;
+ int flowadv = 0;
+
+ /* Enable flow advisory only when connected */
+ flowadv = (so->so_state & SS_ISCONNECTED) ? 1 : 0;
+
+ pi_laddr.s_addr = INADDR_ANY;
KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_START, 0,0,0,0,0);
- if (control)
- m_freem(control); /* XXX */
+ lck_mtx_assert(&inp->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
+ if (control != NULL) {
+ msc = mbuf_service_class_from_control(control);
+
+ error = udp_check_pktinfo(control, &outif, &pi_laddr);
+
+ m_freem(control);
+ if (error)
+ goto release;
+ pktinfo++;
+ if (outif != NULL)
+ ipoa.ipoa_boundif = outif->if_index;
+ }
KERNEL_DEBUG(DBG_LAYER_OUT_BEG, inp->inp_fport, inp->inp_lport,
inp->inp_laddr.s_addr, inp->inp_faddr.s_addr,
goto release;
}
+ if (flowadv && INP_WAIT_FOR_IF_FEEDBACK(inp)) {
+ /*
+ * The socket is flow-controlled, drop the packets
+ * until the inp is not flow controlled
+ */
+ error = ENOBUFS;
+ goto release;
+ }
+ /*
+ * If socket was bound to an ifindex, tell ip_output about it.
+ * If the ancillary IP_PKTINFO option contains an interface index,
+ * it takes precedence over the one specified by IP_BOUND_IF.
+ */
+ if (ipoa.ipoa_boundif == IFSCOPE_NONE &&
+ (inp->inp_flags & INP_BOUND_IF)) {
+ outif = inp->inp_boundifp;
+ ipoa.ipoa_boundif = outif->if_index;
+ }
+ if (inp->inp_flags & INP_NO_IFT_CELLULAR)
+ ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
+ soopts |= IP_OUTARGS;
+
/* If there was a routing change, discard cached route and check
* that we have a valid source address.
* Reacquire a new source address if INADDR_ANY was specified
*/
-
-#if 1
- lck_mtx_assert(inp->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
-#endif
-
- if (inp->inp_route.ro_rt && inp->inp_route.ro_rt->generation_id != route_generation) {
- if (ifa_foraddr(inp->inp_laddr.s_addr) == 0) { /* src address is gone */
- if (inp->inp_flags & INP_INADDR_ANY)
- inp->inp_faddr.s_addr = INADDR_ANY; /* new src will be set later */
- else {
+ if (inp->inp_route.ro_rt != NULL &&
+ inp->inp_route.ro_rt->generation_id != route_generation) {
+ struct in_ifaddr *ia;
+
+ /* src address is gone? */
+ if ((ia = ifa_foraddr(inp->inp_laddr.s_addr)) == NULL) {
+ if (((inp->inp_flags & INP_INADDR_ANY) == 0) ||
+ (so->so_state & SS_ISCONNECTED)) {
+ /* Rdar://5448998
+ * If the source address is gone, return an error if:
+ * - the source was specified
+ * - the socket was already connected
+ */
+ soevent(so,
+ (SO_FILT_HINT_LOCKED |
+ SO_FILT_HINT_NOSRCADDR));
error = EADDRNOTAVAIL;
goto release;
+ } else {
+ /* new src will be set later */
+ inp->inp_laddr.s_addr = INADDR_ANY;
+ inp->inp_last_outifp = NULL;
}
}
- rtfree(inp->inp_route.ro_rt);
- inp->inp_route.ro_rt = (struct rtentry *)0;
+ if (ia != NULL)
+ IFA_REMREF(&ia->ia_ifa);
+ if (inp->inp_route.ro_rt != NULL)
+ rtfree(inp->inp_route.ro_rt);
+ inp->inp_route.ro_rt = NULL;
+ }
+
+ origoutifp = inp->inp_last_outifp;
+
+ /* IP_PKTINFO option check.
+ * If a temporary scope or src address is provided, use it for this packet only
+ * and make sure we forget it after sending this datagram.
+ */
+
+ if (pi_laddr.s_addr != INADDR_ANY ||
+ (ipoa.ipoa_boundif != IFSCOPE_NONE && pktinfo)) {
+ laddr = pi_laddr; /* temp src address for this datagram only */
+ origladdr.s_addr = INADDR_ANY;
+ udp_dodisconnect = 1; /* we don't want to keep the laddr or route */
+ inp->inp_flags |= INP_INADDR_ANY; /* remember we don't care about src addr.*/
+ } else {
+ origladdr = laddr = inp->inp_laddr;
}
- origladdr= laddr = inp->inp_laddr;
+ origoutifp = inp->inp_last_outifp;
faddr = inp->inp_faddr;
lport = inp->inp_lport;
fport = inp->inp_fport;
if (addr) {
- sin = (struct sockaddr_in *)addr;
+ sin = (struct sockaddr_in *)(void *)addr;
if (faddr.s_addr != INADDR_ANY) {
error = EISCONN;
goto release;
* In case we don't have a local port set, go through the full connect.
* We don't have a local port yet (ie, we can't be looked up),
* so it's not an issue if the input runs at the same time we do this.
- */
- error = in_pcbconnect(inp, addr, p);
+ */
+
+ if (pi_laddr.s_addr != INADDR_ANY) /* if we have a source address specified, use that */
+ inp->inp_laddr = pi_laddr;
+ error = in_pcbconnect(inp, addr, p, &outif); /* if a scope is specified, use it */
if (error) {
goto release;
}
faddr = inp->inp_faddr;
fport = inp->inp_fport;
udp_dodisconnect = 1;
+ ipoa.ipoa_boundif = (outif != NULL) ?
+ outif->if_index : IFSCOPE_NONE;
}
- else {
+ else {
/* Fast path case
* we have a full address and a local port.
* use those info to build the packet without changing the pcb
* and interfering with the input path. See 3851370
+ * Note: if we may have a scope from IP_PKTINFO but the
+ * priority is always given to the scope provided by INP_BOUND_IF.
*/
if (laddr.s_addr == INADDR_ANY) {
- if ((error = in_pcbladdr(inp, addr, &ifaddr)) != 0)
- goto release;
- laddr = ifaddr->sin_addr;
- inp->inp_flags |= INP_INADDR_ANY; /* from pcbconnect: remember we don't care about src addr.*/
+ if ((error = in_pcbladdr(inp, addr, &ifaddr, &outif)) != 0)
+ goto release;
+ laddr = ifaddr.sin_addr;
+ inp->inp_flags |= INP_INADDR_ANY; /* from pcbconnect: remember we don't care about src addr.*/
+ ipoa.ipoa_boundif = (outif != NULL) ?
+ outif->if_index : IFSCOPE_NONE;
}
-
+
faddr = sin->sin_addr;
fport = sin->sin_port;
}
}
}
+#if CONFIG_MACF_NET
+ mac_mbuf_label_associate_inpcb(inp, m);
+#endif
+ if (inp->inp_flowhash == 0)
+ inp->inp_flowhash = inp_calc_flowhash(inp);
/*
* Calculate data length and get a mbuf
/*
* Set up checksum and output datagram.
*/
- if (udpcksum) {
+ if (udpcksum && !(inp->inp_flags & INP_UDP_NOCKSUM)) {
ui->ui_sum = in_pseudo(ui->ui_src.s_addr, ui->ui_dst.s_addr,
htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
m->m_pkthdr.csum_flags = CSUM_UDP;
goto abort;
}
#endif /*IPSEC*/
+
+ inpopts = inp->inp_options;
+ soopts |= (inp->inp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST));
+ mopts = inp->inp_moptions;
+ if (mopts != NULL) {
+ IMO_LOCK(mopts);
+ IMO_ADDREF_LOCKED(mopts);
+ if (IN_MULTICAST(ntohl(ui->ui_dst.s_addr)) &&
+ mopts->imo_multicast_ifp != NULL) {
+ inp->inp_last_outifp = mopts->imo_multicast_ifp;
+ }
+ IMO_UNLOCK(mopts);
+ }
+
+ /* Copy the cached route and take an extra reference */
+ inp_route_copyout(inp, &ro);
+
+ set_packet_service_class(m, so, msc, 0);
m->m_pkthdr.socket_id = get_socket_id(inp->inp_socket);
- error = ip_output_list(m, 0, inp->inp_options, &inp->inp_route,
- (inp->inp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST)),
- inp->inp_moptions);
+ m->m_pkthdr.m_flowhash = inp->inp_flowhash;
+ m->m_pkthdr.m_fhflags |= PF_TAG_FLOWHASH;
+ if (flowadv)
+ m->m_pkthdr.m_fhflags |= PF_TAG_FLOWADV;
+
+ if (ipoa.ipoa_boundif != IFSCOPE_NONE)
+ ipoa.ipoa_flags |= IPOAF_BOUND_IF;
+ if (laddr.s_addr != INADDR_ANY)
+ ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR;
+
+ inp->inp_sndinprog_cnt++;
+
+ socket_unlock(so, 0);
+ error = ip_output_list(m, 0, inpopts, &ro, soopts, mopts, &ipoa);
+ m = NULL;
+ socket_lock(so, 0);
+ if (mopts != NULL)
+ IMO_REMREF(mopts);
+
+ if (error == 0 && nstat_collect) {
+ locked_add_64(&inp->inp_stat->txpackets, 1);
+ locked_add_64(&inp->inp_stat->txbytes, len);
+ }
+
+ if (flowadv && (adv->code == FADV_FLOW_CONTROLLED ||
+ adv->code == FADV_SUSPENDED)) {
+ /* return a hint to the application that
+ * the packet has been dropped
+ */
+ error = ENOBUFS;
+ inp_set_fc_state(inp, adv->code);
+ }
+
+ VERIFY(inp->inp_sndinprog_cnt > 0);
+ if ( --inp->inp_sndinprog_cnt == 0)
+ inp->inp_flags &= ~(INP_FC_FEEDBACK);
+
+ /* Synchronize PCB cached route */
+ inp_route_copyin(inp, &ro);
+
+abort:
if (udp_dodisconnect) {
+ /* Always discard the cached route for unconnected socket */
+ if (inp->inp_route.ro_rt != NULL) {
+ rtfree(inp->inp_route.ro_rt);
+ inp->inp_route.ro_rt = NULL;
+ }
in_pcbdisconnect(inp);
inp->inp_laddr = origladdr; /* XXX rehash? */
- }
- KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0,0,0,0);
- return (error);
+ inp->inp_last_outifp = origoutifp;
+ } else if (inp->inp_route.ro_rt != NULL) {
+ struct rtentry *rt = inp->inp_route.ro_rt;
+ struct ifnet *outifp;
-abort:
- if (udp_dodisconnect) {
- in_pcbdisconnect(inp);
- inp->inp_laddr = origladdr; /* XXX rehash? */
- }
+ if (rt->rt_flags & (RTF_MULTICAST|RTF_BROADCAST))
+ rt = NULL; /* unusable */
+ /*
+ * Always discard if it is a multicast or broadcast route.
+ */
+ if (rt == NULL) {
+ rtfree(inp->inp_route.ro_rt);
+ inp->inp_route.ro_rt = NULL;
+ }
+ /*
+ * If the destination route is unicast, update outifp with
+ * that of the route interface used by IP.
+ */
+ if (rt != NULL && (outifp = rt->rt_ifp) != inp->inp_last_outifp)
+ inp->inp_last_outifp = outifp;
+ }
release:
- m_freem(m);
+ if (m != NULL)
+ m_freem(m);
KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0,0,0,0);
return (error);
}
-u_long udp_sendspace = 9216; /* really max datagram size */
- /* 40 1K datagrams */
-SYSCTL_INT(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW,
- &udp_sendspace, 0, "Maximum outgoing UDP datagram size");
-
-u_long udp_recvspace = 40 * (1024 +
+u_int32_t udp_sendspace = 9216; /* really max datagram size */
+/* 187 1K datagrams (approx 192 KB) */
+u_int32_t udp_recvspace = 187 * (1024 +
#if INET6
sizeof(struct sockaddr_in6)
#else
sizeof(struct sockaddr_in)
#endif
);
-SYSCTL_INT(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
- &udp_recvspace, 0, "Maximum incoming UDP datagram size");
+
+/* Check that the values of udp send and recv space do not exceed sb_max */
+static int
+sysctl_udp_sospace(struct sysctl_oid *oidp, __unused void *arg1,
+ __unused int arg2, struct sysctl_req *req) {
+ u_int32_t new_value = 0, *space_p = NULL;
+ int changed = 0, error = 0;
+ u_quad_t sb_effective_max = (sb_max/ (MSIZE+MCLBYTES)) * MCLBYTES;
+
+ switch (oidp->oid_number) {
+ case UDPCTL_RECVSPACE:
+ space_p = &udp_recvspace;
+ break;
+ case UDPCTL_MAXDGRAM:
+ space_p = &udp_sendspace;
+ break;
+ default:
+ return EINVAL;
+ }
+ error = sysctl_io_number(req, *space_p, sizeof(u_int32_t),
+ &new_value, &changed);
+ if (changed) {
+ if (new_value > 0 && new_value <= sb_effective_max) {
+ *space_p = new_value;
+ } else {
+ error = ERANGE;
+ }
+ }
+ return error;
+}
+
+SYSCTL_PROC(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &udp_recvspace, 0, &sysctl_udp_sospace, "IU", "Maximum incoming UDP datagram size");
+
+SYSCTL_PROC(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &udp_sendspace, 0, &sysctl_udp_sospace, "IU", "Maximum outgoing UDP datagram size");
static int
udp_abort(struct socket *so)
inp = sotoinpcb(so);
if (inp == 0)
- panic("udp_abort: so=%x null inp\n", so); /* ??? possible? panic instead? */
+ panic("udp_abort: so=%p null inp\n", so); /* ??? possible? panic instead? */
soisdisconnected(so);
in_pcbdetach(inp);
return 0;
}
static int
-udp_attach(struct socket *so, int proto, struct proc *p)
+udp_attach(struct socket *so, __unused int proto, struct proc *p)
{
struct inpcb *inp;
int error;
inp = sotoinpcb(so);
if (inp != 0)
- panic ("udp_attach so=%x inp=%x\n", so, inp);
+ panic ("udp_attach so=%p inp=%p\n", so, inp);
error = in_pcballoc(so, &udbinfo, p);
if (error)
inp = (struct inpcb *)so->so_pcb;
inp->inp_vflag |= INP_IPV4;
inp->inp_ip_ttl = ip_defttl;
+ if (nstat_collect)
+ nstat_udp_new_pcb(inp);
return 0;
}
struct inpcb *inp;
int error;
+ if (nam->sa_family != 0 && nam->sa_family != AF_INET
+ && nam->sa_family != AF_INET6) {
+ return EAFNOSUPPORT;
+ }
inp = sotoinpcb(so);
if (inp == 0)
return EINVAL;
return EINVAL;
if (inp->inp_faddr.s_addr != INADDR_ANY)
return EISCONN;
- error = in_pcbconnect(inp, nam, p);
- if (error == 0)
+ error = in_pcbconnect(inp, nam, p, NULL);
+ if (error == 0) {
soisconnected(so);
+ if (inp->inp_flowhash == 0)
+ inp->inp_flowhash = inp_calc_flowhash(inp);
+ }
return error;
}
inp = sotoinpcb(so);
if (inp == 0)
- panic("udp_detach: so=%x null inp\n", so); /* ??? possible? panic instead? */
+ panic("udp_detach: so=%p null inp\n", so); /* ??? possible? panic instead? */
in_pcbdetach(inp);
inp->inp_state = INPCB_STATE_DEAD;
return 0;
return ENOTCONN;
in_pcbdisconnect(inp);
+
+ /* reset flow controlled state, just in case */
+ inp_reset_fc_state(inp);
+
inp->inp_laddr.s_addr = INADDR_ANY;
so->so_state &= ~SS_ISCONNECTED; /* XXX */
+ inp->inp_last_outifp = NULL;
return 0;
}
static int
-udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+udp_send(struct socket *so, __unused int flags, struct mbuf *m, struct sockaddr *addr,
struct mbuf *control, struct proc *p)
{
struct inpcb *inp;
m_freem(m);
return EINVAL;
}
+
return udp_output(inp, m, addr, control, p);
}
int
-udp_lock(so, refcount, debug)
- struct socket *so;
- int refcount, debug;
+udp_lock(struct socket *so, int refcount, void *debug)
{
- int lr_saved;
-#ifdef __ppc__
- if (debug == 0) {
- __asm__ volatile("mflr %0" : "=r" (lr_saved));
- }
- else lr_saved = debug;
-#endif
+ void *lr_saved;
+
+ if (debug == NULL)
+ lr_saved = __builtin_return_address(0);
+ else
+ lr_saved = debug;
if (so->so_pcb) {
- lck_mtx_assert(((struct inpcb *)so->so_pcb)->inpcb_mtx, LCK_MTX_ASSERT_NOTOWNED);
- lck_mtx_lock(((struct inpcb *)so->so_pcb)->inpcb_mtx);
- }
- else {
- panic("udp_lock: so=%x NO PCB! lr=%x\n", so, lr_saved);
- lck_mtx_assert(so->so_proto->pr_domain->dom_mtx, LCK_MTX_ASSERT_NOTOWNED);
- lck_mtx_lock(so->so_proto->pr_domain->dom_mtx);
+ lck_mtx_assert(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
+ LCK_MTX_ASSERT_NOTOWNED);
+ lck_mtx_lock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
+ } else {
+ panic("udp_lock: so=%p NO PCB! lr=%p lrh= %s\n",
+ so, lr_saved, solockhistory_nr(so));
+ /* NOTREACHED */
}
-
- if (refcount)
+ if (refcount)
so->so_usecount++;
- so->reserved3= lr_saved;
+ so->lock_lr[so->next_lock_lr] = lr_saved;
+ so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
return (0);
}
int
-udp_unlock(so, refcount, debug)
- struct socket *so;
- int refcount;
- int debug;
+udp_unlock(struct socket *so, int refcount, void *debug)
{
- int lr_saved;
- struct inpcb *inp = sotoinpcb(so);
- struct inpcbinfo *pcbinfo = &udbinfo;
-#ifdef __ppc__
- if (debug == 0) {
- __asm__ volatile("mflr %0" : "=r" (lr_saved));
- }
- else lr_saved = debug;
-#endif
- if (refcount) {
+ void *lr_saved;
+
+ if (debug == NULL)
+ lr_saved = __builtin_return_address(0);
+ else
+ lr_saved = debug;
+
+ if (refcount)
so->so_usecount--;
-#if 0
- if (so->so_usecount == 0 && (inp->inp_wantcnt == WNT_STOPUSING)) {
- if (lck_rw_try_lock_exclusive(pcbinfo->mtx)) {
- in_pcbdispose(inp);
- lck_rw_done(pcbinfo->mtx);
- return(0);
- }
- }
-#endif
- }
+
if (so->so_pcb == NULL) {
- panic("udp_unlock: so=%x NO PCB! lr=%x\n", so, lr_saved);
- lck_mtx_assert(so->so_proto->pr_domain->dom_mtx, LCK_MTX_ASSERT_OWNED);
- lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx);
- }
- else {
- lck_mtx_assert(((struct inpcb *)so->so_pcb)->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
- lck_mtx_unlock(((struct inpcb *)so->so_pcb)->inpcb_mtx);
+ panic("udp_unlock: so=%p NO PCB! lr=%p lrh= %s\n",
+ so, lr_saved, solockhistory_nr(so));
+ /* NOTREACHED */
+ } else {
+ lck_mtx_assert(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
+ LCK_MTX_ASSERT_OWNED);
+ so->unlock_lr[so->next_unlock_lr] = lr_saved;
+ so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
+ lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
}
- so->reserved4 = lr_saved;
return (0);
}
lck_mtx_t *
-udp_getlock(so, locktype)
- struct socket *so;
- int locktype;
+udp_getlock(struct socket *so, __unused int locktype)
{
struct inpcb *inp = sotoinpcb(so);
if (so->so_pcb)
- return(inp->inpcb_mtx);
+ return(&inp->inpcb_mtx);
else {
- panic("udp_getlock: so=%x NULL so_pcb\n", so);
+ panic("udp_getlock: so=%p NULL so_pcb lrh= %s\n",
+ so, solockhistory_nr(so));
return (so->so_proto->pr_domain->dom_mtx);
}
}
struct socket *so;
struct inpcbinfo *pcbinfo = &udbinfo;
- lck_rw_lock_exclusive(pcbinfo->mtx);
+ if (lck_rw_try_lock_exclusive(pcbinfo->mtx) == FALSE) {
+ if (udp_gc_done == TRUE) {
+ udp_gc_done = FALSE;
+ return; /* couldn't get the lock, better lock next time */
+ }
+ lck_rw_lock_exclusive(pcbinfo->mtx);
+ }
+
+ udp_gc_done = TRUE;
for (inp = udb.lh_first; inp != NULL; inp = inpnxt) {
inpnxt = inp->inp_list.le_next;
-
- /* Ignore nat/SharedIP dummy pcbs */
- if (inp->inp_socket == &udbinfo.nat_dummy_socket)
- continue;
if (inp->inp_wantcnt != WNT_STOPUSING)
continue;
so = inp->inp_socket;
- if (!lck_mtx_try_lock(inp->inpcb_mtx)) /* skip if busy, no hurry for cleanup... */
+ if (!lck_mtx_try_lock(&inp->inpcb_mtx)) /* skip if busy, no hurry for cleanup... */
continue;
- if (so->so_usecount == 0)
+ if (so->so_usecount == 0) {
+ if (inp->inp_state != INPCB_STATE_DEAD) {
+#if INET6
+ if (INP_CHECK_SOCKAF(so, AF_INET6))
+ in6_pcbdetach(inp);
+ else
+#endif /* INET6 */
+ in_pcbdetach(inp);
+ }
in_pcbdispose(inp);
- else
- lck_mtx_unlock(inp->inpcb_mtx);
+ } else {
+ lck_mtx_unlock(&inp->inpcb_mtx);
+ }
}
lck_rw_done(pcbinfo->mtx);
}
return 1;
}
+void
+udp_in_cksum_stats(u_int32_t len)
+{
+ udps_in_sw_cksum++;
+ udps_in_sw_cksum_bytes += len;
+}
+
+void
+udp_out_cksum_stats(u_int32_t len)
+{
+ udps_out_sw_cksum++;
+ udps_out_sw_cksum_bytes += len;
+}