X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/9bccf70c0258c7cac2dcb80011b2a964d884c552..d41d1dae2cd00cc08c7982087d1c445180cad9f5:/bsd/netinet/tcp_output.c diff --git a/bsd/netinet/tcp_output.c b/bsd/netinet/tcp_output.c index d3e5558d3..69a2c2aed 100644 --- a/bsd/netinet/tcp_output.c +++ b/bsd/netinet/tcp_output.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 @@ -54,6 +60,12 @@ * @(#)tcp_output.c 8.4 (Berkeley) 5/24/95 * $FreeBSD: src/sys/netinet/tcp_output.c,v 1.39.2.10 2001/07/07 04:30:38 silby Exp $ */ +/* + * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce + * support for mandatory and extensible security protections. This notice + * is included in support of clause 2.2 (b) of the Apple Public License, + * Version 2.0. + */ #define _IP_VHL @@ -69,9 +81,11 @@ #include #include +#include #include #include +#include #include #include #include @@ -96,6 +110,10 @@ #include #endif /*IPSEC*/ +#if CONFIG_MACF_NET +#include +#endif /* MAC_SOCKET */ + #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETTCP, 1) #define DBG_LAYER_END NETDBG_CODE(DBG_NETTCP, 3) #define DBG_FNC_TCP_OUTPUT NETDBG_CODE(DBG_NETTCP, (4 << 8) | 1) @@ -105,7 +123,7 @@ extern struct mbuf *m_copypack(); #endif -static int path_mtu_discovery = 1; +int path_mtu_discovery = 1; SYSCTL_INT(_net_inet_tcp, OID_AUTO, path_mtu_discovery, CTLFLAG_RW, &path_mtu_discovery, 1, "Enable Path MTU Discovery"); @@ -113,7 +131,7 @@ int ss_fltsz = 1; SYSCTL_INT(_net_inet_tcp, OID_AUTO, slowstart_flightsize, CTLFLAG_RW, &ss_fltsz, 1, "Slow start flight size"); -int ss_fltsz_local = TCP_MAXWIN; /* something large */ +int ss_fltsz_local = 8; /* starts with eight segments max */ SYSCTL_INT(_net_inet_tcp, OID_AUTO, local_slowstart_flightsize, CTLFLAG_RW, &ss_fltsz_local, 1, "Slow start flight size for local networks"); @@ -121,24 +139,92 @@ int tcp_do_newreno = 0; SYSCTL_INT(_net_inet_tcp, OID_AUTO, newreno, CTLFLAG_RW, &tcp_do_newreno, 0, "Enable NewReno Algorithms"); -struct mbuf *m_copym_with_hdrs __P((struct mbuf*, int, int, int, struct mbuf**, int*)); +int tcp_do_tso = 1; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso, CTLFLAG_RW, + &tcp_do_tso, 0, "Enable TCP Segmentation Offload"); + + +int tcp_ecn_outbound = 0; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, ecn_initiate_out, CTLFLAG_RW, &tcp_ecn_outbound, + 0, "Initiate ECN for outbound connections"); +int tcp_ecn_inbound = 0; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, ecn_negotiate_in, CTLFLAG_RW, &tcp_ecn_inbound, + 0, "Allow ECN negotiation for inbound connections"); + +int tcp_packet_chaining = 50; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, packetchain, CTLFLAG_RW, &tcp_packet_chaining, + 0, "Enable TCP output packet chaining"); + +int tcp_output_unlocked = 1; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, socket_unlocked_on_output, CTLFLAG_RW, &tcp_output_unlocked, + 0, "Unlock TCP when sending packets down to IP"); + +static int32_t packchain_newlist = 0; +static int32_t packchain_looped = 0; +static int32_t packchain_sent = 0; /* temporary: for testing */ #if IPSEC extern int ipsec_bypass; #endif +extern int slowlink_wsize; /* window correction for slow links */ +#if IPFIREWALL +extern int fw_enable; /* firewall check for packet chaining */ +extern int fw_bypass; /* firewall check: disable packet chaining if there is rules */ +#endif /* IPFIREWALL */ + +extern vm_size_t so_cache_zone_element_size; +#if RANDOM_IP_ID +extern int ip_use_randomid; +#endif /* RANDOM_IP_ID */ +extern u_int32_t dlil_filter_count; +extern u_int32_t kipf_count; + +static int tcp_ip_output(struct socket *, struct tcpcb *, struct mbuf *, int, + struct mbuf *, int, int, int32_t); + +static __inline__ u_int16_t +get_socket_id(struct socket * s) +{ + u_int16_t val; + + if (so_cache_zone_element_size == 0) { + return (0); + } + val = (u_int16_t)(((uintptr_t)s) / so_cache_zone_element_size); + if (val == 0) { + val = 0xffff; + } + return (val); +} + /* * Tcp output routine: figure out what should be sent and send it. + * + * Returns: 0 Success + * EADDRNOTAVAIL + * ENOBUFS + * EMSGSIZE + * EHOSTUNREACH + * ENETDOWN + * ip_output_list:ENOMEM + * ip_output_list:EADDRNOTAVAIL + * ip_output_list:ENETUNREACH + * ip_output_list:EHOSTUNREACH + * ip_output_list:EACCES + * ip_output_list:EMSGSIZE + * ip_output_list:ENOBUFS + * ip_output_list:??? [ignorable: mostly IPSEC/firewall/DLIL] + * ip6_output:??? [IPV6 only] */ int -tcp_output(tp) - register struct tcpcb *tp; +tcp_output(struct tcpcb *tp) { - register struct socket *so = tp->t_inpcb->inp_socket; - register long len, win; - int off, flags, error; + struct socket *so = tp->t_inpcb->inp_socket; + int32_t len, recwin, sendwin, off; + int flags, error; register struct mbuf *m; struct ip *ip = NULL; register struct ipovly *ipov = NULL; @@ -148,59 +234,46 @@ tcp_output(tp) register struct tcphdr *th; u_char opt[TCP_MAXOLEN]; unsigned ipoptlen, optlen, hdrlen; - int idle, sendalot; - int maxburst = TCP_MAXBURST; - struct rmxp_tao *taop; - struct rmxp_tao tao_noncached; -#if INET6 - int isipv6; + int idle, sendalot, lost = 0; + int i, sack_rxmit; + int tso = 0; + int sack_bytes_rxmt; + struct sackhole *p; +#ifdef IPSEC + unsigned ipsec_optlen = 0; #endif - int last_off; + int maxburst = TCP_MAXBURST; + int last_off = 0; int m_off; - struct mbuf *m_last = 0; - struct mbuf *m_head = 0; - - - KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_START, 0,0,0,0,0); + struct mbuf *m_last = NULL; + struct mbuf *m_head = NULL; + struct mbuf *packetlist = NULL; + struct mbuf *tp_inp_options = tp->t_inpcb->inp_depend4.inp4_options; #if INET6 - if (isipv6 = ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)) { - - KERNEL_DEBUG(DBG_LAYER_BEG, - ((tp->t_inpcb->inp_fport << 16) | tp->t_inpcb->inp_lport), - (((tp->t_inpcb->in6p_laddr.s6_addr16[0] & 0xffff) << 16) | - (tp->t_inpcb->in6p_faddr.s6_addr16[0] & 0xffff)), - 0,0,0); - } - else + int isipv6 = tp->t_inpcb->inp_vflag & INP_IPV6 ; + struct ip6_pktopts *inp6_pktopts = tp->t_inpcb->inp_depend6.inp6_outputopts; #endif + short packchain_listadd = 0; + u_int16_t socket_id = get_socket_id(so); + int so_options = so->so_options; + struct rtentry *rt; - { - KERNEL_DEBUG(DBG_LAYER_BEG, - ((tp->t_inpcb->inp_fport << 16) | tp->t_inpcb->inp_lport), - (((tp->t_inpcb->inp_laddr.s_addr & 0xffff) << 16) | - (tp->t_inpcb->inp_faddr.s_addr & 0xffff)), - 0,0,0); - } /* * Determine length of data that should be transmitted, * and flags that will be used. * If there is some data or critical controls (SYN, RST) * to send, then transmit; otherwise, investigate further. */ - idle = (tp->snd_max == tp->snd_una); -#ifdef __APPLE__ + idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); if (idle && tp->t_rcvtime >= tp->t_rxtcur) { -#else - if (idle && (ticks - tp->t_rcvtime) >= tp->t_rxtcur) { -#endif /* * We have been idle for "a while" and no acks are * expected to clock out any data we send -- * slow start to get ack "clock" running again. - * + * * Set the slow-start flight size depending on whether * this is a local network or not. - */ + */ if ( #if INET6 (isipv6 && in6_localaddr(&tp->t_inpcb->in6p_faddr)) || @@ -215,12 +288,186 @@ tcp_output(tp) else tp->snd_cwnd = tp->t_maxseg * ss_fltsz; } + tp->t_flags &= ~TF_LASTIDLE; + if (idle) { + if (tp->t_flags & TF_MORETOCOME) { + tp->t_flags |= TF_LASTIDLE; + idle = 0; + } + } again: + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_START, 0,0,0,0,0); + +#if INET6 + if (isipv6) { + + KERNEL_DEBUG(DBG_LAYER_BEG, + ((tp->t_inpcb->inp_fport << 16) | tp->t_inpcb->inp_lport), + (((tp->t_inpcb->in6p_laddr.s6_addr16[0] & 0xffff) << 16) | + (tp->t_inpcb->in6p_faddr.s6_addr16[0] & 0xffff)), + sendalot,0,0); + } + else +#endif + + { + KERNEL_DEBUG(DBG_LAYER_BEG, + ((tp->t_inpcb->inp_fport << 16) | tp->t_inpcb->inp_lport), + (((tp->t_inpcb->inp_laddr.s_addr & 0xffff) << 16) | + (tp->t_inpcb->inp_faddr.s_addr & 0xffff)), + sendalot,0,0); + /* + * If the route generation id changed, we need to check that our + * local (source) IP address is still valid. If it isn't either + * return error or silently do nothing (assuming the address will + * come back before the TCP connection times out). + */ + rt = tp->t_inpcb->inp_route.ro_rt; + if (rt != NULL && (!(rt->rt_flags & RTF_UP) || + rt->generation_id != route_generation)) { + struct ifnet *ifp; + struct in_ifaddr *ia; + + /* disable multipages at the socket */ + somultipages(so, FALSE); + + /* Disable TSO for the socket until we know more */ + tp->t_flags &= ~TF_TSO; + + /* check that the source address is still valid */ + if ((ia = ifa_foraddr(tp->t_inpcb->inp_laddr.s_addr)) == NULL) { + + if (tp->t_state >= TCPS_CLOSE_WAIT) { + tcp_drop(tp, EADDRNOTAVAIL); + return(EADDRNOTAVAIL); + } + + /* set Retransmit timer if it wasn't set + * reset Persist timer and shift register as the + * adversed peer window may not be valid anymore + */ + + if (!tp->t_timer[TCPT_REXMT]) { + tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; + if (tp->t_timer[TCPT_PERSIST]) { + tp->t_timer[TCPT_PERSIST] = 0; + tp->t_rxtshift = 0; + } + } + + if (tp->t_pktlist_head != NULL) + m_freem_list(tp->t_pktlist_head); + TCP_PKTLIST_CLEAR(tp); + + /* drop connection if source address isn't available */ + if (so->so_flags & SOF_NOADDRAVAIL) { + tcp_drop(tp, EADDRNOTAVAIL); + return(EADDRNOTAVAIL); + } + else + return(0); /* silently ignore, keep data in socket: address may be back */ + } + ifafree(&ia->ia_ifa); + + /* + * Address is still valid; check for multipages capability + * again in case the outgoing interface has changed. + */ + RT_LOCK(rt); + if ((ifp = rt->rt_ifp) != NULL) { + somultipages(so, (ifp->if_hwassist & IFNET_MULTIPAGES)); + tcp_set_tso(tp, ifp); + } + if (rt->rt_flags & RTF_UP) + rt->generation_id = route_generation; + /* + * See if we should do MTU discovery. Don't do it if: + * 1) it is disabled via the sysctl + * 2) the route isn't up + * 3) the MTU is locked (if it is, then discovery has been + * disabled) + */ + + if (!path_mtu_discovery || ((rt != NULL) && + (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) + tp->t_flags &= ~TF_PMTUD; + else + tp->t_flags |= TF_PMTUD; + + RT_UNLOCK(rt); + } + } + + /* + * If we've recently taken a timeout, snd_max will be greater than + * snd_nxt. There may be SACK information that allows us to avoid + * resending already delivered data. Adjust snd_nxt accordingly. + */ + if (tp->sack_enable && SEQ_LT(tp->snd_nxt, tp->snd_max)) + tcp_sack_adjust(tp); sendalot = 0; off = tp->snd_nxt - tp->snd_una; - win = min(tp->snd_wnd, tp->snd_cwnd); + sendwin = min(tp->snd_wnd, tp->snd_cwnd); + + if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0) + sendwin = min(sendwin, slowlink_wsize); flags = tcp_outflags[tp->t_state]; + /* + * Send any SACK-generated retransmissions. If we're explicitly trying + * to send out new data (when sendalot is 1), bypass this function. + * If we retransmit in fast recovery mode, decrement snd_cwnd, since + * we're replacing a (future) new transmission with a retransmission + * now, and we previously incremented snd_cwnd in tcp_input(). + */ + /* + * Still in sack recovery , reset rxmit flag to zero. + */ + sack_rxmit = 0; + sack_bytes_rxmt = 0; + len = 0; + p = NULL; + if (tp->sack_enable && IN_FASTRECOVERY(tp) && + (p = tcp_sack_output(tp, &sack_bytes_rxmt))) { + int32_t cwin; + + cwin = min(tp->snd_wnd, tp->snd_cwnd) - sack_bytes_rxmt; + if (cwin < 0) + cwin = 0; + /* Do not retransmit SACK segments beyond snd_recover */ + if (SEQ_GT(p->end, tp->snd_recover)) { + /* + * (At least) part of sack hole extends beyond + * snd_recover. Check to see if we can rexmit data + * for this hole. + */ + if (SEQ_GEQ(p->rxmit, tp->snd_recover)) { + /* + * Can't rexmit any more data for this hole. + * That data will be rexmitted in the next + * sack recovery episode, when snd_recover + * moves past p->rxmit. + */ + p = NULL; + goto after_sack_rexmit; + } else + /* Can rexmit part of the current hole */ + len = ((int32_t)min(cwin, + tp->snd_recover - p->rxmit)); + } else + len = ((int32_t)min(cwin, p->end - p->rxmit)); + if (len > 0) { + off = p->rxmit - tp->snd_una; /* update off only if we really transmit SACK data */ + sack_rxmit = 1; + sendalot = 1; + tcpstat.tcps_sack_rexmits++; + tcpstat.tcps_sack_rexmit_bytes += + min(len, tp->t_maxseg); + } + else + len = 0; + } +after_sack_rexmit: /* * Get standard flags, and add SYN or FIN if requested by 'hidden' * state flags. @@ -237,7 +484,7 @@ again: * and go to transmit state. */ if (tp->t_force) { - if (win == 0) { + if (sendwin == 0) { /* * If we still have some data to send, then * clear the FIN bit. Usually this would @@ -256,18 +503,60 @@ again: */ if (off < so->so_snd.sb_cc) flags &= ~TH_FIN; - win = 1; + sendwin = 1; } else { tp->t_timer[TCPT_PERSIST] = 0; tp->t_rxtshift = 0; } } - len = (long)ulmin(so->so_snd.sb_cc, win) - off; - - if ((taop = tcp_gettaocache(tp->t_inpcb)) == NULL) { - taop = &tao_noncached; - bzero(taop, sizeof(*taop)); + /* + * If snd_nxt == snd_max and we have transmitted a FIN, the + * offset will be > 0 even if so_snd.sb_cc is 0, resulting in + * a negative length. This can also occur when TCP opens up + * its congestion window while receiving additional duplicate + * acks after fast-retransmit because TCP will reset snd_nxt + * to snd_max after the fast-retransmit. + * + * In the normal retransmit-FIN-only case, however, snd_nxt will + * be set to snd_una, the offset will be 0, and the length may + * wind up 0. + * + * If sack_rxmit is true we are retransmitting from the scoreboard + * in which case len is already set. + */ + if (sack_rxmit == 0) { + if (sack_bytes_rxmt == 0) + len = min(so->so_snd.sb_cc, sendwin) - off; + else { + int32_t cwin; + + /* + * We are inside of a SACK recovery episode and are + * sending new data, having retransmitted all the + * data possible in the scoreboard. + */ + len = min(so->so_snd.sb_cc, tp->snd_wnd) + - off; + /* + * Don't remove this (len > 0) check ! + * We explicitly check for len > 0 here (although it + * isn't really necessary), to work around a gcc + * optimization issue - to force gcc to compute + * len above. Without this check, the computation + * of len is bungled by the optimizer. + */ + if (len > 0) { + cwin = tp->snd_cwnd - + (tp->snd_nxt - tp->sack_newdata) - + sack_bytes_rxmt; + if (cwin < 0) + cwin = 0; + len = imin(len, cwin); + } + else + len = 0; + } } /* @@ -278,22 +567,39 @@ again: if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) { flags &= ~TH_SYN; off--, len++; - if (len > 0 && tp->t_state == TCPS_SYN_SENT && - taop->tao_ccsent == 0) { - KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); - return 0; + if (len > 0 && tp->t_state == TCPS_SYN_SENT) { + while (!(tp->t_flags & TF_SENDINPROG) && + tp->t_pktlist_head != NULL) { + packetlist = tp->t_pktlist_head; + packchain_listadd = tp->t_lastchain; + packchain_sent++; + TCP_PKTLIST_CLEAR(tp); + tp->t_flags |= TF_SENDINPROG; + + error = tcp_ip_output(so, tp, packetlist, + packchain_listadd, tp_inp_options, + (so_options & SO_DONTROUTE), (sack_rxmit | (sack_bytes_rxmt != 0)), 0); + + tp->t_flags &= ~TF_SENDINPROG; + } + /* tcp was closed while we were in ip; resume close */ + if ((tp->t_flags & + (TF_CLOSING|TF_SENDINPROG)) == TF_CLOSING) { + tp->t_flags &= ~TF_CLOSING; + (void) tcp_close(tp); + } + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, + 0,0,0,0,0); + return 0; } } /* - * Be careful not to send data and/or FIN on SYN segments - * in cases when no CC option will be sent. + * Be careful not to send data and/or FIN on SYN segments. * This measure is needed to prevent interoperability problems * with not fully conformant TCP implementations. */ - if ((flags & TH_SYN) && - ((tp->t_flags & TF_NOOPT) || !(tp->t_flags & TF_REQ_CC) || - ((flags & TH_ACK) && !(tp->t_flags & TF_RCVD_CC)))) { + if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { len = 0; flags &= ~TH_FIN; } @@ -302,7 +608,7 @@ again: /* * If FIN has been sent but not acked, * but we haven't been called to retransmit, - * len will be -1. Otherwise, window shrank + * len will be < 0. Otherwise, window shrank * after we sent into it. If window shrank to 0, * cancel pending retransmit, pull snd_nxt back * to (closed) window, and set the persist timer @@ -310,7 +616,7 @@ again: * close completely, just wait for an ACK. */ len = 0; - if (win == 0) { + if (sendwin == 0) { tp->t_timer[TCPT_REXMT] = 0; tp->t_rxtshift = 0; tp->snd_nxt = tp->snd_una; @@ -318,38 +624,103 @@ again: tcp_setpersist(tp); } } + + /* + * Truncate to the maximum segment length or enable TCP Segmentation + * Offloading (if supported by hardware) and ensure that FIN is removed + * if the length no longer contains the last data byte. + * + * TSO may only be used if we are in a pure bulk sending state. The + * presence of TCP-MD5, SACK retransmits, SACK advertizements, ipfw rules + * and IP options prevent using TSO. With TSO the TCP header is the same + * (except for the sequence number) for all generated packets. This + * makes it impossible to transmit any options which vary per generated + * segment or packet. + * + * The length of TSO bursts is limited to TCP_MAXWIN. That limit and + * removal of FIN (if not already catched here) are handled later after + * the exact length of the TCP options are known. + */ +#if IPSEC + /* + * Pre-calculate here as we save another lookup into the darknesses + * of IPsec that way and can actually decide if TSO is ok. + */ + if (ipsec_bypass == 0) + ipsec_optlen = ipsec_hdrsiz_tcp(tp); +#endif + if (len > tp->t_maxseg) { - len = tp->t_maxseg; - sendalot = 1; + if ((tp->t_flags & TF_TSO) && tcp_do_tso && +#if RANDOM_IP_ID + ip_use_randomid && +#endif /* RANDOM_IP_ID */ + kipf_count == 0 && dlil_filter_count == 0 && + tp->rcv_numsacks == 0 && sack_rxmit == 0 && sack_bytes_rxmt == 0 && + tp->t_inpcb->inp_options == NULL && + tp->t_inpcb->in6p_options == NULL +#if IPSEC + && ipsec_optlen == 0 +#endif +#if IPFIREWALL + && (fw_enable == 0 || fw_bypass) +#endif + ) { + tso = 1; + sendalot = 0; + } else { + len = tp->t_maxseg; + sendalot = 1; + tso = 0; + } + } + if (sack_rxmit) { + if (SEQ_LT(p->rxmit + len, tp->snd_una + so->so_snd.sb_cc)) + flags &= ~TH_FIN; + } else { + if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc)) + flags &= ~TH_FIN; } - if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc)) - flags &= ~TH_FIN; - win = sbspace(&so->so_rcv); + recwin = tcp_sbspace(tp); /* - * Sender silly window avoidance. If connection is idle - * and can send all data, a maximum segment, - * at least a maximum default-size segment do it, - * or are forced, do it; otherwise don't bother. - * If peer's buffer is tiny, then send - * when window is at least half open. - * If retransmitting (possibly after persist timer forced us - * to send into a small window), then must resend. + * Sender silly window avoidance. We transmit under the following + * conditions when len is non-zero: + * + * - We have a full segment (or more with TSO) + * - This is the last buffer in a write()/send() and we are + * either idle or running NODELAY + * - we've timed out (e.g. persist timer) + * - we have more then 1/2 the maximum send window's worth of + * data (receiver may be limited the window size) + * - we need to retransmit */ if (len) { - if (len == tp->t_maxseg) + if (len >= tp->t_maxseg) { + tp->t_flags |= TF_MAXSEGSNT; goto send; + } if (!(tp->t_flags & TF_MORETOCOME) && - (idle || tp->t_flags & TF_NODELAY) && + (idle || tp->t_flags & TF_NODELAY || tp->t_flags & TF_MAXSEGSNT) && (tp->t_flags & TF_NOPUSH) == 0 && - len + off >= so->so_snd.sb_cc) + len + off >= so->so_snd.sb_cc) { + tp->t_flags &= ~TF_MAXSEGSNT; goto send; - if (tp->t_force) + } + if (tp->t_force) { + tp->t_flags &= ~TF_MAXSEGSNT; goto send; - if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) + } + if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { + tp->t_flags &= ~TF_MAXSEGSNT; goto send; - if (SEQ_LT(tp->snd_nxt, tp->snd_max)) + } + if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { + tp->t_flags &= ~TF_MAXSEGSNT; + goto send; + } + if (sack_rxmit) goto send; } @@ -359,24 +730,35 @@ again: * next expected input). If the difference is at least two * max size segments, or at least 50% of the maximum possible * window, then want to send a window update to peer. + * Skip this if the connection is in T/TCP half-open state. */ - if (win > 0) { + if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN)) { /* * "adv" is the amount we can increase the window, * taking into account that we are limited by * TCP_MAXWIN << tp->rcv_scale. */ - long adv = min(win, (long)TCP_MAXWIN << tp->rcv_scale) - + int32_t adv = imin(recwin, (int)TCP_MAXWIN << tp->rcv_scale) - (tp->rcv_adv - tp->rcv_nxt); - if (adv >= (long) (2 * tp->t_maxseg)) - goto send; - if (2 * adv >= (long) so->so_rcv.sb_hiwat) - goto send; + if (adv >= (int32_t) (2 * tp->t_maxseg)) { + + /* + * Update only if the resulting scaled value of the window changed, or + * if there is a change in the sequence since the last ack. + * This avoids what appears as dupe ACKS (see rdar://5640997) + */ + + if ((tp->last_ack_sent != tp->rcv_nxt) || (((recwin + adv) >> tp->rcv_scale) > recwin)) + goto send; + } + if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) + goto send; } /* - * Send if we owe peer an ACK. + * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW + * is also a catch-all for the retransmit timer timeout case. */ if (tp->t_flags & TF_ACKNOW) goto send; @@ -387,13 +769,22 @@ again: goto send; /* * If our state indicates that FIN should be sent - * and we have not yet done so, or we're retransmitting the FIN, - * then we need to send. + * and we have not yet done so, then we need to send. */ if (flags & TH_FIN && ((tp->t_flags & TF_SENTFIN) == 0 || tp->snd_nxt == tp->snd_una)) goto send; - + /* + * In SACK, it is possible for tcp_output to fail to send a segment + * after the retransmission timer has been turned off. Make sure + * that the retransmission timer is set. + */ + if (tp->sack_enable && (tp->t_state >= TCPS_ESTABLISHED) && SEQ_GT(tp->snd_max, tp->snd_una) && + tp->t_timer[TCPT_REXMT] == 0 && + tp->t_timer[TCPT_PERSIST] == 0) { + tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; + goto just_return; + } /* * TCP window updates are not reliable, rather a polling protocol * using ``persist'' packets is used to insure receipt of window @@ -421,10 +812,28 @@ again: tp->t_rxtshift = 0; tcp_setpersist(tp); } - +just_return: /* - * No reason to send a segment, just return. + * If there is no reason to send a segment, just return. + * but if there is some packets left in the packet list, send them now. */ + while (!(tp->t_flags & TF_SENDINPROG) && tp->t_pktlist_head != NULL) { + packetlist = tp->t_pktlist_head; + packchain_listadd = tp->t_lastchain; + packchain_sent++; + TCP_PKTLIST_CLEAR(tp); + tp->t_flags |= TF_SENDINPROG; + + error = tcp_ip_output(so, tp, packetlist, packchain_listadd, + tp_inp_options, (so_options & SO_DONTROUTE), (sack_rxmit | (sack_bytes_rxmt != 0)), recwin); + + tp->t_flags &= ~TF_SENDINPROG; + } + /* tcp was closed while we were in ip; resume close */ + if ((tp->t_flags & (TF_CLOSING|TF_SENDINPROG)) == TF_CLOSING) { + tp->t_flags &= ~TF_CLOSING; + (void) tcp_close(tp); + } KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); return (0); @@ -466,7 +875,90 @@ send: optlen += 4; } } + } + + /* + RFC 3168 states that: + - If you ever sent an ECN-setup SYN/SYN-ACK you must be prepared + to handle the TCP ECE flag, even if you also later send a + non-ECN-setup SYN/SYN-ACK. + - If you ever send a non-ECN-setup SYN/SYN-ACK, you must not set + the ip ECT flag. + + It is not clear how the ECE flag would ever be set if you never + set the IP ECT flag on outbound packets. All the same, we use + the TE_SETUPSENT to indicate that we have committed to handling + the TCP ECE flag correctly. We use the TE_SENDIPECT to indicate + whether or not we should set the IP ECT flag on outbound packets. + */ + /* + * For a SYN-ACK, send an ECN setup SYN-ACK + */ + if (tcp_ecn_inbound && (flags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { + if ((tp->ecn_flags & TE_SETUPRECEIVED) != 0) { + if ((tp->ecn_flags & TE_SETUPSENT) == 0) { + /* Setting TH_ECE makes this an ECN-setup SYN-ACK */ + flags |= TH_ECE; + + /* + * Record that we sent the ECN-setup and default to + * setting IP ECT. + */ + tp->ecn_flags |= (TE_SETUPSENT | TE_SENDIPECT); + } + else { + /* + * We sent an ECN-setup SYN-ACK but it was dropped. + * Fallback to non-ECN-setup SYN-ACK and clear flag + * that to indicate we should not send data with IP ECT set. + * + * Pretend we didn't receive an ECN-setup SYN. + */ + tp->ecn_flags &= ~TE_SETUPRECEIVED; + } + } + } + else if (tcp_ecn_outbound && (flags & (TH_SYN | TH_ACK)) == TH_SYN) { + if ((tp->ecn_flags & TE_SETUPSENT) == 0) { + /* Setting TH_ECE and TH_CWR makes this an ECN-setup SYN */ + flags |= (TH_ECE | TH_CWR); + + /* + * Record that we sent the ECN-setup and default to + * setting IP ECT. + */ + tp->ecn_flags |= (TE_SETUPSENT | TE_SENDIPECT); + } + else { + /* + * We sent an ECN-setup SYN but it was dropped. + * Fall back to no ECN and clear flag indicating + * we should send data with IP ECT set. + */ + tp->ecn_flags &= ~TE_SENDIPECT; + } + } + + /* + * Check if we should set the TCP CWR flag. + * CWR flag is sent when we reduced the congestion window because + * we received a TCP ECE or we performed a fast retransmit. We + * never set the CWR flag on retransmitted packets. We only set + * the CWR flag on data packets. Pure acks don't have this set. + */ + if ((tp->ecn_flags & TE_SENDCWR) != 0 && len != 0 && + !SEQ_LT(tp->snd_nxt, tp->snd_max)) { + flags |= TH_CWR; + tp->ecn_flags &= ~TE_SENDCWR; + } + + /* + * Check if we should set the TCP ECE flag. + */ + if ((tp->ecn_flags & TE_SENDECE) != 0 && len == 0) { + flags |= TH_ECE; + } /* * Send a timestamp and echo-reply if this is a SYN and our side @@ -486,94 +978,106 @@ send: optlen += TCPOLEN_TSTAMP_APPA; } - /* - * Send `CC-family' options if our side wants to use them (TF_REQ_CC), - * options are allowed (!TF_NOOPT) and it's not a RST. - */ - if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC && - (flags & TH_RST) == 0) { - switch (flags & (TH_SYN|TH_ACK)) { - /* - * This is a normal ACK, send CC if we received CC before - * from our peer. + if (tp->sack_enable && ((tp->t_flags & TF_NOOPT) == 0)) { + /* + * Tack on the SACK permitted option *last*. + * And do padding of options after tacking this on. + * This is because of MSS, TS, WinScale and Signatures are + * all present, we have just 2 bytes left for the SACK + * permitted option, which is just enough. */ - case TH_ACK: - if (!(tp->t_flags & TF_RCVD_CC)) - break; - /*FALLTHROUGH*/ - /* - * We can only get here in T/TCP's SYN_SENT* state, when - * we're a sending a non-SYN segment without waiting for - * the ACK of our SYN. A check above assures that we only - * do this if our peer understands T/TCP. + * If this is the first SYN of connection (not a SYN + * ACK), include SACK permitted option. If this is a + * SYN ACK, include SACK permitted option if peer has + * already done so. This is only for active connect, + * since the syncache takes care of the passive connect. */ - case 0: - opt[optlen++] = TCPOPT_NOP; - opt[optlen++] = TCPOPT_NOP; - opt[optlen++] = TCPOPT_CC; - opt[optlen++] = TCPOLEN_CC; - *(u_int32_t *)&opt[optlen] = htonl(tp->cc_send); - - optlen += 4; - break; + if ((flags & TH_SYN) && + (!(flags & TH_ACK) || (tp->t_flags & TF_SACK_PERMIT))) { + u_char *bp; + bp = (u_char *)opt + optlen; + + *bp++ = TCPOPT_SACK_PERMITTED; + *bp++ = TCPOLEN_SACK_PERMITTED; + optlen += TCPOLEN_SACK_PERMITTED; + } /* - * This is our initial SYN, check whether we have to use - * CC or CC.new. + * Send SACKs if necessary. This should be the last + * option processed. Only as many SACKs are sent as + * are permitted by the maximum options size. + * + * In general, SACK blocks consume 8*n+2 bytes. + * So a full size SACK blocks option is 34 bytes + * (to generate 4 SACK blocks). At a minimum, + * we need 10 bytes (to generate 1 SACK block). + * If TCP Timestamps (12 bytes) and TCP Signatures + * (18 bytes) are both present, we'll just have + * 10 bytes for SACK options 40 - (12 + 18). */ - case TH_SYN: - opt[optlen++] = TCPOPT_NOP; - opt[optlen++] = TCPOPT_NOP; - opt[optlen++] = tp->t_flags & TF_SENDCCNEW ? - TCPOPT_CCNEW : TCPOPT_CC; - opt[optlen++] = TCPOLEN_CC; - *(u_int32_t *)&opt[optlen] = htonl(tp->cc_send); - optlen += 4; - break; + if (TCPS_HAVEESTABLISHED(tp->t_state) && + (tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks > 0 && + MAX_TCPOPTLEN - optlen - 2 >= TCPOLEN_SACK) { + int nsack, sackoptlen, padlen; + u_char *bp = (u_char *)opt + optlen; + u_int32_t *lp; - /* - * This is a SYN,ACK; send CC and CC.echo if we received - * CC from our peer. - */ - case (TH_SYN|TH_ACK): - if (tp->t_flags & TF_RCVD_CC) { - opt[optlen++] = TCPOPT_NOP; - opt[optlen++] = TCPOPT_NOP; - opt[optlen++] = TCPOPT_CC; - opt[optlen++] = TCPOLEN_CC; - *(u_int32_t *)&opt[optlen] = - htonl(tp->cc_send); - optlen += 4; - opt[optlen++] = TCPOPT_NOP; - opt[optlen++] = TCPOPT_NOP; - opt[optlen++] = TCPOPT_CCECHO; - opt[optlen++] = TCPOLEN_CC; - *(u_int32_t *)&opt[optlen] = - htonl(tp->cc_recv); - optlen += 4; + nsack = (MAX_TCPOPTLEN - optlen - 2) / TCPOLEN_SACK; + nsack = min(nsack, tp->rcv_numsacks); + sackoptlen = (2 + nsack * TCPOLEN_SACK); + + /* + * First we need to pad options so that the + * SACK blocks can start at a 4-byte boundary + * (sack option and length are at a 2 byte offset). + */ + padlen = (MAX_TCPOPTLEN - optlen - sackoptlen) % 4; + optlen += padlen; + while (padlen-- > 0) + *bp++ = TCPOPT_NOP; + + tcpstat.tcps_sack_send_blocks++; + *bp++ = TCPOPT_SACK; + *bp++ = sackoptlen; + lp = (u_int32_t *)bp; + for (i = 0; i < nsack; i++) { + struct sackblk sack = tp->sackblks[i]; + *lp++ = htonl(sack.start); + *lp++ = htonl(sack.end); } - break; + optlen += sackoptlen; } - } + } + + /* Pad TCP options to a 4 byte boundary */ + if (optlen < MAX_TCPOPTLEN && (optlen % sizeof(u_int32_t))) { + int pad = sizeof(u_int32_t) - (optlen % sizeof(u_int32_t)); + u_char *bp = (u_char *)opt + optlen; + + optlen += pad; + while (pad) { + *bp++ = TCPOPT_EOL; + pad--; + } + } + + hdrlen += optlen; - hdrlen += optlen; #if INET6 if (isipv6) ipoptlen = ip6_optlen(tp->t_inpcb); else #endif { - if (tp->t_inpcb->inp_options) { - ipoptlen = tp->t_inpcb->inp_options->m_len - + if (tp_inp_options) { + ipoptlen = tp_inp_options->m_len - offsetof(struct ipoption, ipopt_list); - } else { + } else ipoptlen = 0; - } } #if IPSEC - if (ipsec_bypass == 0) - ipoptlen += ipsec_hdrsiz_tcp(tp); + ipoptlen += ipsec_optlen; #endif /* @@ -581,14 +1085,34 @@ send: * bump the packet length beyond the t_maxopd length. * Clear the FIN bit because we cut off the tail of * the segment. + * + * When doing TSO limit a burst to TCP_MAXWIN minus the + * IP, TCP and Options length to keep ip->ip_len from + * overflowing. Prevent the last segment from being + * fractional thus making them all equal sized and set + * the flag to continue sending. TSO is disabled when + * IP options or IPSEC are present. */ if (len + optlen + ipoptlen > tp->t_maxopd) { /* * If there is still more to send, don't close the connection. */ flags &= ~TH_FIN; - len = tp->t_maxopd - optlen - ipoptlen; - sendalot = 1; + if (tso) { + int32_t tso_maxlen; + + tso_maxlen = tp->tso_max_segment_size ? tp->tso_max_segment_size : TCP_MAXWIN; + + if (len > tso_maxlen - hdrlen - optlen) { + len = tso_maxlen - hdrlen - optlen; + len = len - (len % (tp->t_maxopd - optlen)); + sendalot = 1; + } else if (tp->t_flags & TF_NEEDFIN) + sendalot = 1; + } else { + len = tp->t_maxopd - optlen - ipoptlen; + sendalot = 1; + } } /*#ifdef DIAGNOSTIC*/ @@ -609,7 +1133,7 @@ send: if (len) { if (tp->t_force && len == 1) tcpstat.tcps_sndprobe++; - else if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { + else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { tcpstat.tcps_sndrexmitpack++; tcpstat.tcps_sndrexmitbyte += len; } else { @@ -643,7 +1167,7 @@ send: m = NULL; #if INET6 if (MHLEN < hdrlen + max_linkhdr) { - MGETHDR(m, M_DONTWAIT, MT_HEADER); + MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */ if (m == NULL) { error = ENOBUFS; goto out; @@ -660,7 +1184,7 @@ send: #endif if (len <= MHLEN - hdrlen - max_linkhdr) { if (m == NULL) { - MGETHDR(m, M_DONTWAIT, MT_HEADER); + MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */ if (m == NULL) { error = ENOBUFS; goto out; @@ -668,6 +1192,12 @@ send: m->m_data += max_linkhdr; m->m_len = hdrlen; } + /* makes sure we still have data left to be sent at this point */ + if (so->so_snd.sb_mb == NULL || off < 0) { + if (m != NULL) m_freem(m); + error = 0; /* should we return an error? */ + goto out; + } m_copydata(so->so_snd.sb_mb, off, (int) len, mtod(m, caddr_t) + hdrlen); m->m_len += len; @@ -690,16 +1220,22 @@ send: * m_copym_with_hdrs to avoid rescanning from the beginning of the socket buffer mbuf list. * setting the mbuf pointer to NULL is sufficient to disable the hint mechanism. */ - if (m_head != so->so_snd.sb_mb || last_off != off) + if (m_head != so->so_snd.sb_mb || sack_rxmit || last_off != off) m_last = NULL; last_off = off + len; m_head = so->so_snd.sb_mb; - + + /* makes sure we still have data left to be sent at this point */ + if (m_head == NULL) { + error = 0; /* should we return an error? */ + goto out; + } + /* * m_copym_with_hdrs will always return the last mbuf pointer and the offset into it that * it acted on to fullfill the current request, whether a valid 'hint' was passed in or not */ - if ((m = m_copym_with_hdrs(so->so_snd.sb_mb, off, (int) len, M_DONTWAIT, &m_last, &m_off)) == NULL) { + if ((m = m_copym_with_hdrs(so->so_snd.sb_mb, off, len, M_DONTWAIT, &m_last, &m_off)) == NULL) { error = ENOBUFS; goto out; } @@ -726,7 +1262,7 @@ send: else tcpstat.tcps_sndwinup++; - MGETHDR(m, M_DONTWAIT, MT_HEADER); + MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */ if (m == NULL) { error = ENOBUFS; goto out; @@ -740,7 +1276,10 @@ send: m->m_data += max_linkhdr; m->m_len = hdrlen; } - m->m_pkthdr.rcvif = (struct ifnet *)0; + m->m_pkthdr.rcvif = 0; +#if CONFIG_MACF_NET + mac_mbuf_label_associate_inpcb(tp->t_inpcb, m); +#endif #if INET6 if (isipv6) { ip6 = mtod(m, struct ip6_hdr *); @@ -754,6 +1293,10 @@ send: th = (struct tcphdr *)(ip + 1); /* this picks up the pseudo header (w/o the length) */ tcp_fillheaders(tp, ip, th); + if ((tp->ecn_flags & TE_SENDIPECT) != 0 && len && + !SEQ_LT(tp->snd_nxt, tp->snd_max)) { + ip->ip_tos = IPTOS_ECN_ECT0; + } } /* @@ -777,11 +1320,19 @@ send: * case, since we know we aren't doing a retransmission. * (retransmit and persist are mutually exclusive...) */ - if (len || (flags & (TH_SYN|TH_FIN)) || tp->t_timer[TCPT_PERSIST]) - th->th_seq = htonl(tp->snd_nxt); - else - th->th_seq = htonl(tp->snd_max); + if (sack_rxmit == 0) { + if (len || (flags & (TH_SYN|TH_FIN)) || tp->t_timer[TCPT_PERSIST]) + th->th_seq = htonl(tp->snd_nxt); + else + th->th_seq = htonl(tp->snd_max); + } else { + th->th_seq = htonl(p->rxmit); + p->rxmit += len; + tp->sackhint.sack_bytes_rexmit += len; + } th->th_ack = htonl(tp->rcv_nxt); + tp->last_ack_sent = tp->rcv_nxt; + if (optlen) { bcopy(opt, th + 1, optlen); th->th_off = (sizeof (struct tcphdr) + optlen) >> 2; @@ -791,13 +1342,33 @@ send: * Calculate receive window. Don't shrink window, * but avoid silly window syndrome. */ - if (win < (long)(so->so_rcv.sb_hiwat / 4) && win < (long)tp->t_maxseg) - win = 0; - if (win < (long)(tp->rcv_adv - tp->rcv_nxt)) - win = (long)(tp->rcv_adv - tp->rcv_nxt); - if (win > (long)TCP_MAXWIN << tp->rcv_scale) - win = (long)TCP_MAXWIN << tp->rcv_scale; - th->th_win = htons((u_short) (win>>tp->rcv_scale)); + if (recwin < (int32_t)(so->so_rcv.sb_hiwat / 4) && recwin < (int)tp->t_maxseg) + recwin = 0; + if (recwin < (int32_t)(tp->rcv_adv - tp->rcv_nxt)) + recwin = (int32_t)(tp->rcv_adv - tp->rcv_nxt); + if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0) { + if (recwin > (int32_t)slowlink_wsize) + recwin = slowlink_wsize; + th->th_win = htons((u_short) (recwin>>tp->rcv_scale)); + } + else { + if (recwin > (int32_t)(TCP_MAXWIN << tp->rcv_scale)) + recwin = (int32_t)(TCP_MAXWIN << tp->rcv_scale); + th->th_win = htons((u_short) (recwin>>tp->rcv_scale)); + } + + /* + * Adjust the RXWIN0SENT flag - indicate that we have advertised + * a 0 window. This may cause the remote transmitter to stall. This + * flag tells soreceive() to disable delayed acknowledgements when + * draining the buffer. This can occur if the receiver is attempting + * to read more data then can be buffered prior to transmitting on + * the connection. + */ + if (recwin == 0) + tp->t_flags |= TF_RXWIN0SENT; + else + tp->t_flags &= ~TF_RXWIN0SENT; if (SEQ_GT(tp->snd_up, tp->snd_nxt)) { th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt)); th->th_flags |= TH_URG; @@ -831,11 +1402,25 @@ send: if (len + optlen) th->th_sum = in_addword(th->th_sum, htons((u_short)(optlen + len))); + } + + /* + * Enable TSO and specify the size of the segments. + * The TCP pseudo header checksum is always provided. + * XXX: Fixme: This is currently not the case for IPv6. + */ + if (tso) { +#if INET6 + if (isipv6) + m->m_pkthdr.csum_flags = CSUM_TSO_IPV6; + else +#endif /* INET6 */ + m->m_pkthdr.csum_flags = CSUM_TSO_IPV4; - /* IP version must be set here for ipv4/ipv6 checking later */ - KASSERT(ip->ip_v == IPVERSION, - ("%s: IP version incorrect: %d", __FUNCTION__, ip->ip_v)); - } + m->m_pkthdr.tso_segsz = tp->t_maxopd - optlen; + } + else + m->m_pkthdr.tso_segsz = 0; /* * In transmit state, time the transmission and arrange for @@ -855,6 +1440,8 @@ send: tp->t_flags |= TF_SENTFIN; } } + if (sack_rxmit) + goto timer; tp->snd_nxt += len; if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { tp->snd_max = tp->snd_nxt; @@ -877,23 +1464,37 @@ send: * Initialize shift counter which is used for backoff * of retransmit time. */ +timer: if (tp->t_timer[TCPT_REXMT] == 0 && - tp->snd_nxt != tp->snd_una) { - tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; + ((sack_rxmit && tp->snd_nxt != tp->snd_max) || + tp->snd_nxt != tp->snd_una)) { if (tp->t_timer[TCPT_PERSIST]) { tp->t_timer[TCPT_PERSIST] = 0; tp->t_rxtshift = 0; } + tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; } - } else - if (SEQ_GT(tp->snd_nxt + len, tp->snd_max)) + } else { + /* + * Persist case, update snd_max but since we are in + * persist mode (no window) we do not update snd_nxt. + */ + int xlen = len; + if (flags & TH_SYN) + ++xlen; + if (flags & TH_FIN) { + ++xlen; + tp->t_flags |= TF_SENTFIN; + } + if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max)) tp->snd_max = tp->snd_nxt + len; + } #if TCPDEBUG /* * Trace. */ - if (so->so_options & SO_DEBUG) + if (so_options & SO_DEBUG) tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0); #endif @@ -928,17 +1529,21 @@ send: goto out; } #endif /*IPSEC*/ + m->m_pkthdr.socket_id = socket_id; + +#if PKT_PRIORITY + set_traffic_class(m, so, MBUF_TC_NONE); +#endif /* PKT_PRIORITY */ error = ip6_output(m, - tp->t_inpcb->in6p_outputopts, + inp6_pktopts, &tp->t_inpcb->in6p_route, - (so->so_options & SO_DONTROUTE), NULL, NULL); + (so_options & SO_DONTROUTE), NULL, NULL, 0); } else #endif /* INET6 */ { - struct rtentry *rt; ip->ip_len = m->m_pkthdr.len; #if INET6 - if (INP_CHECK_SOCKAF(so, AF_INET6)) + if (isipv6) ip->ip_ttl = in6_selecthlim(tp->t_inpcb, tp->t_inpcb->in6p_route.ro_rt ? tp->t_inpcb->in6p_route.ro_rt->rt_ifp @@ -946,7 +1551,7 @@ send: else #endif /* INET6 */ ip->ip_ttl = tp->t_inpcb->inp_ip_ttl; /* XXX */ - ip->ip_tos = tp->t_inpcb->inp_ip_tos; /* XXX */ + ip->ip_tos |= (tp->t_inpcb->inp_ip_tos & ~IPTOS_ECN_MASK); /* XXX */ #if INET6 @@ -968,45 +1573,123 @@ send: } /* - * See if we should do MTU discovery. We do it only if the following - * are true: - * 1) we have a valid route to the destination - * 2) the MTU is not locked (if it is, then discovery has been - * disabled) + * See if we should do MTU discovery. + * Look at the flag updated on the following criterias: + * 1) Path MTU discovery is authorized by the sysctl + * 2) The route isn't set yet (unlikely but could happen) + * 3) The route is up + * 4) the MTU is not locked (if it is, then discovery has been + * disabled for that route) */ - if (path_mtu_discovery - && (rt = tp->t_inpcb->inp_route.ro_rt) - && rt->rt_flags & RTF_UP - && !(rt->rt_rmx.rmx_locks & RTV_MTU)) { + + if (path_mtu_discovery && (tp->t_flags & TF_PMTUD)) ip->ip_off |= IP_DF; - } + #if IPSEC if (ipsec_bypass == 0) ipsec_setsocket(m, so); #endif /*IPSEC*/ - error = ip_output(m, tp->t_inpcb->inp_options, &tp->t_inpcb->inp_route, - (so->so_options & SO_DONTROUTE), 0); - } - if (error) { + /* + * The socket is kept locked while sending out packets in ip_output, even if packet chaining is not active. + */ + lost = 0; + m->m_pkthdr.socket_id = socket_id; + m->m_nextpkt = NULL; +#if PKT_PRIORITY + set_traffic_class(m, so, MBUF_TC_NONE); +#endif /* PKT_PRIORITY */ + tp->t_pktlist_sentlen += len; + tp->t_lastchain++; + if (tp->t_pktlist_head != NULL) { + tp->t_pktlist_tail->m_nextpkt = m; + tp->t_pktlist_tail = m; + } else { + packchain_newlist++; + tp->t_pktlist_head = tp->t_pktlist_tail = m; + } + + if (sendalot == 0 || (tp->t_state != TCPS_ESTABLISHED) || + (tp->snd_cwnd <= (tp->snd_wnd / 8)) || + (tp->t_flags & (TH_PUSH | TF_ACKNOW)) || tp->t_force != 0 || + tp->t_lastchain >= tcp_packet_chaining) { + error = 0; + while (!(tp->t_flags & TF_SENDINPROG) && + tp->t_pktlist_head != NULL) { + packetlist = tp->t_pktlist_head; + packchain_listadd = tp->t_lastchain; + packchain_sent++; + lost = tp->t_pktlist_sentlen; + TCP_PKTLIST_CLEAR(tp); + tp->t_flags |= TF_SENDINPROG; + + error = tcp_ip_output(so, tp, packetlist, + packchain_listadd, tp_inp_options, + (so_options & SO_DONTROUTE), (sack_rxmit | (sack_bytes_rxmt != 0)), recwin); + + tp->t_flags &= ~TF_SENDINPROG; + if (error) { + /* + * Take into account the rest of unsent + * packets in the packet list for this tcp + * into "lost", since we're about to free + * the whole list below. + */ + lost += tp->t_pktlist_sentlen; + break; + } else { + lost = 0; + } + } + /* tcp was closed while we were in ip; resume close */ + if ((tp->t_flags & (TF_CLOSING|TF_SENDINPROG)) == TF_CLOSING) { + tp->t_flags &= ~TF_CLOSING; + (void) tcp_close(tp); + return (0); + } + } + else { + error = 0; + packchain_looped++; + tcpstat.tcps_sndtotal++; + + goto again; + } + } + if (error) { /* - * We know that the packet was lost, so back out the - * sequence number advance, if any. + * Assume that the packets were lost, so back out the + * sequence number advance, if any. Note that the "lost" + * variable represents the amount of user data sent during + * the recent call to ip_output_list() plus the amount of + * user data in the packet list for this tcp at the moment. */ - if (tp->t_force == 0 || !tp->t_timer[TCPT_PERSIST]) { + if (tp->t_force == 0 || tp->t_timer[TCPT_PERSIST] == 0) { /* * No need to check for TH_FIN here because * the TF_SENTFIN flag handles that case. */ - if ((flags & TH_SYN) == 0) - tp->snd_nxt -= len; + if ((flags & TH_SYN) == 0) { + if (sack_rxmit) { + p->rxmit -= lost; + tp->sackhint.sack_bytes_rexmit -= lost; + } else + tp->snd_nxt -= lost; + } } out: + if (tp->t_pktlist_head != NULL) + m_freem_list(tp->t_pktlist_head); + TCP_PKTLIST_CLEAR(tp); + if (error == ENOBUFS) { - if (!tp->t_timer[TCPT_REXMT] && - !tp->t_timer[TCPT_PERSIST]) - tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; - tcp_quench(tp->t_inpcb, 0); + if (!tp->t_timer[TCPT_REXMT] && + !tp->t_timer[TCPT_PERSIST]) + tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; + + tp->snd_cwnd = tp->t_maxseg; + tp->t_bytes_acked = 0; + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); return (0); } @@ -1016,7 +1699,16 @@ out: * for us. tcp_mtudisc() will, as its last action, * initiate retransmission, so it is important to * not do so here. + * + * If TSO was active we either got an interface + * without TSO capabilits or TSO was turned off. + * Disable it for this connection as too and + * immediatly retry with MSS sized segments generated + * by this function. */ + if (tso) + tp->t_flags &= ~TF_TSO; + tcp_mtudisc(tp->t_inpcb, 0); KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); return 0; @@ -1030,30 +1722,141 @@ out: KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); return (error); } + tcpstat.tcps_sndtotal++; /* * Data sent (as far as we can tell). * If this advertises a larger window than any other segment, * then remember the size of the advertised window. - * Any pending ACK has now been sent. + * Make sure ACK/DELACK conditions are cleared before + * we unlock the socket. + * NOTE: for now, this is done in tcp_ip_output for IPv4 */ - if (win > 0 && SEQ_GT(tp->rcv_nxt+win, tp->rcv_adv)) - tp->rcv_adv = tp->rcv_nxt + win; - tp->last_ack_sent = tp->rcv_nxt; - tp->t_flags &= ~(TF_ACKNOW|TF_DELACK); - if (sendalot) +#if INET6 + if (isipv6) { + if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) + tp->rcv_adv = tp->rcv_nxt + recwin; + tp->last_ack_sent = tp->rcv_nxt; + tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); + } +#endif + + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END,0,0,0,0,0); + if (sendalot && (!tcp_do_newreno || --maxburst)) goto again; - KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); return (0); } +static int +tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, + int cnt, struct mbuf *opt, int flags, int sack_in_progress, int recwin) +{ + int error = 0; + boolean_t chain; + boolean_t unlocked = FALSE; + struct inpcb *inp = tp->t_inpcb; + struct ip_out_args ipoa; + struct route ro; +#if CONFIG_OUT_IF + unsigned int outif; +#endif /* CONFIG_OUT_IF */ + + /* If socket was bound to an ifindex, tell ip_output about it */ + ipoa.ipoa_ifscope = (inp->inp_flags & INP_BOUND_IF) ? + inp->inp_boundif : IFSCOPE_NONE; + flags |= IP_OUTARGS; + + /* Copy the cached route and take an extra reference */ + inp_route_copyout(inp, &ro); + + /* + * Data sent (as far as we can tell). + * If this advertises a larger window than any other segment, + * then remember the size of the advertised window. + * Make sure ACK/DELACK conditions are cleared before + * we unlock the socket. + */ + if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) + tp->rcv_adv = tp->rcv_nxt + recwin; + tp->last_ack_sent = tp->rcv_nxt; + tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); + + /* + * If allowed, unlock TCP socket while in IP + * but only if the connection is established and + * if we're not sending from an upcall. + */ + if (tcp_output_unlocked && ((so->so_flags & SOF_UPCALLINUSE) == 0) && + (tp->t_state == TCPS_ESTABLISHED) && (sack_in_progress == 0)) { + unlocked = TRUE; + socket_unlock(so, 0); + } + + /* + * Don't send down a chain of packets when: + * - TCP chaining is disabled + * - there is an IPsec rule set + * - there is a non default rule set for the firewall + */ + + chain = tcp_packet_chaining > 1 +#if IPSEC + && ipsec_bypass +#endif +#if IPFIREWALL + && (fw_enable == 0 || fw_bypass) +#endif + ; // I'm important, not extraneous + + + while (pkt != NULL) { + struct mbuf *npkt = pkt->m_nextpkt; + + if (!chain) { + pkt->m_nextpkt = NULL; + /* + * If we are not chaining, make sure to set the packet + * list count to 0 so that IP takes the right path; + * this is important for cases such as IPSec where a + * single mbuf might result in multiple mbufs as part + * of the encapsulation. If a non-zero count is passed + * down to IP, the head of the chain might change and + * we could end up skipping it (thus generating bogus + * packets). Fixing it in IP would be desirable, but + * for now this would do it. + */ + cnt = 0; + } + + error = ip_output_list(pkt, cnt, opt, &ro, flags, 0, &ipoa); + if (chain || error) { + /* + * If we sent down a chain then we are done since + * the callee had taken care of everything; else + * we need to free the rest of the chain ourselves. + */ + if (!chain) + m_freem_list(npkt); + break; + } + pkt = npkt; + } + + if (unlocked) + socket_lock(so, 0); + + /* Synchronize cached PCB route */ + inp_route_copyin(inp, &ro); + + return (error); +} + void tcp_setpersist(tp) register struct tcpcb *tp; { int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1; - int tt; if (tp->t_timer[TCPT_REXMT]) panic("tcp_setpersist: retransmit pending");