X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/ea3f04195ba4a5034c9c8e9b726d4f7ce96f1832..refs/heads/master:/bsd/netinet/tcp_output.c?ds=inline diff --git a/bsd/netinet/tcp_output.c b/bsd/netinet/tcp_output.c index 7c1988f1f..5eecbbbf5 100644 --- a/bsd/netinet/tcp_output.c +++ b/bsd/netinet/tcp_output.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -67,7 +67,7 @@ * Version 2.0. */ -#define _IP_VHL +#define _IP_VHL #include @@ -95,13 +95,11 @@ #include #include #include -#if INET6 #include #include #include -#endif #include -#define TCPOUTFLAGS +#define TCPOUTFLAGS #include #include #include @@ -120,43 +118,47 @@ #include #endif /*IPSEC*/ -#if CONFIG_MACF_NET -#include -#endif /* MAC_SOCKET */ - -#include #if MPTCP #include #include #include +#include #endif #include -#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETTCP, 1) -#define DBG_LAYER_END NETDBG_CODE(DBG_NETTCP, 3) -#define DBG_FNC_TCP_OUTPUT NETDBG_CODE(DBG_NETTCP, (4 << 8) | 1) +#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETTCP, 1) +#define DBG_LAYER_END NETDBG_CODE(DBG_NETTCP, 3) +#define DBG_FNC_TCP_OUTPUT NETDBG_CODE(DBG_NETTCP, (4 << 8) | 1) SYSCTL_SKMEM_TCP_INT(OID_AUTO, path_mtu_discovery, - CTLFLAG_RW | CTLFLAG_LOCKED, int, path_mtu_discovery, 1, - "Enable Path MTU Discovery"); - -SYSCTL_SKMEM_TCP_INT(OID_AUTO, slowstart_flightsize, - CTLFLAG_RW | CTLFLAG_LOCKED, int, ss_fltsz, 1, - "Slow start flight size"); + CTLFLAG_RW | CTLFLAG_LOCKED, int, path_mtu_discovery, 1, + "Enable Path MTU Discovery"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, local_slowstart_flightsize, - CTLFLAG_RW | CTLFLAG_LOCKED, int, ss_fltsz_local, 8, - "Slow start flight size for local networks"); + CTLFLAG_RW | CTLFLAG_LOCKED, int, ss_fltsz_local, 8, + "Slow start flight size for local networks"); -int tcp_do_tso = 1; +int tcp_do_tso = 1; SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso, CTLFLAG_RW | CTLFLAG_LOCKED, - &tcp_do_tso, 0, "Enable TCP Segmentation Offload"); + &tcp_do_tso, 0, "Enable TCP Segmentation Offload"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, ecn_setup_percentage, CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_ecn_setup_percentage, 100, "Max ECN setup percentage"); +SYSCTL_SKMEM_TCP_INT(OID_AUTO, do_ack_compression, + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_do_ack_compression, 1, + "Enable TCP ACK compression (on (cell only): 1, off: 0, on (all interfaces): 2)"); + +SYSCTL_SKMEM_TCP_INT(OID_AUTO, ack_compression_rate, + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_ack_compression_rate, TCP_COMP_CHANGE_RATE, + "Rate at which we force sending new ACKs (in ms)"); + +SYSCTL_SKMEM_TCP_INT(OID_AUTO, randomize_timestamps, + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_randomize_timestamps, 1, + "Randomize TCP timestamps to prevent tracking (on: 1, off: 0)"); + static int sysctl_change_ecn_setting SYSCTL_HANDLER_ARGS { @@ -166,8 +168,9 @@ sysctl_change_ecn_setting SYSCTL_HANDLER_ARGS err = sysctl_io_number(req, tcp_ecn_outbound, sizeof(int32_t), &i, &changed); - if (err != 0 || req->newptr == USER_ADDR_NULL) + if (err != 0 || req->newptr == USER_ADDR_NULL) { return err; + } if (changed) { if ((tcp_ecn_outbound == 0 || tcp_ecn_outbound == 1) && @@ -185,10 +188,9 @@ sysctl_change_ecn_setting SYSCTL_HANDLER_ARGS ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) { if (!IFNET_IS_CELLULAR(ifp)) { - ifnet_lock_exclusive(ifp); - ifp->if_eflags &= ~IFEF_ECN_DISABLE; - ifp->if_eflags &= ~IFEF_ECN_ENABLE; - ifnet_lock_done(ifp); + if_clear_eflags(ifp, + IFEF_ECN_ENABLE | + IFEF_ECN_DISABLE); } } ifnet_head_done(); @@ -200,10 +202,8 @@ sysctl_change_ecn_setting SYSCTL_HANDLER_ARGS ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) { if (!IFNET_IS_CELLULAR(ifp)) { - ifnet_lock_exclusive(ifp); - ifp->if_eflags |= IFEF_ECN_ENABLE; - ifp->if_eflags &= ~IFEF_ECN_DISABLE; - ifnet_lock_done(ifp); + if_set_eflags(ifp, IFEF_ECN_ENABLE); + if_clear_eflags(ifp, IFEF_ECN_DISABLE); } } ifnet_head_done(); @@ -232,52 +232,40 @@ SYSCTL_PROC(_net_inet_tcp, OID_AUTO, ecn_negotiate_in, "Initiate ECN for inbound connections"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, packetchain, - CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_packet_chaining, 50, - "Enable TCP output packet chaining"); + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_packet_chaining, 50, + "Enable TCP output packet chaining"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, socket_unlocked_on_output, - CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_output_unlocked, 1, - "Unlock TCP when sending packets down to IP"); - -SYSCTL_SKMEM_TCP_INT(OID_AUTO, rfc3390, - CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_do_rfc3390, 1, - "Calculate intial slowstart cwnd depending on MSS"); + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_output_unlocked, 1, + "Unlock TCP when sending packets down to IP"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, min_iaj_win, - CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_min_iaj_win, MIN_IAJ_WIN, - "Minimum recv win based on inter-packet arrival jitter"); + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_min_iaj_win, MIN_IAJ_WIN, + "Minimum recv win based on inter-packet arrival jitter"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, acc_iaj_react_limit, - CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_acc_iaj_react_limit, - ACC_IAJ_REACT_LIMIT, "Accumulated IAJ when receiver starts to react"); - -SYSCTL_SKMEM_TCP_INT(OID_AUTO, doautosndbuf, - CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_do_autosendbuf, 1, - "Enable send socket buffer auto-tuning"); + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_acc_iaj_react_limit, + ACC_IAJ_REACT_LIMIT, "Accumulated IAJ when receiver starts to react"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, autosndbufinc, - CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_autosndbuf_inc, - 8 * 1024, "Increment in send socket bufffer size"); + CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_autosndbuf_inc, + 8 * 1024, "Increment in send socket bufffer size"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, autosndbufmax, - CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_autosndbuf_max, 512 * 1024, - "Maximum send socket buffer size"); - -SYSCTL_SKMEM_TCP_INT(OID_AUTO, ack_prioritize, - CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_prioritize_acks, 1, - "Prioritize pure acks"); + CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_autosndbuf_max, 2 * 1024 * 1024, + "Maximum send socket buffer size"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, rtt_recvbg, - CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_use_rtt_recvbg, 1, - "Use RTT for bg recv algorithm"); + CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_use_rtt_recvbg, 1, + "Use RTT for bg recv algorithm"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, recv_throttle_minwin, - CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_recv_throttle_minwin, 16 * 1024, - "Minimum recv win for throttling"); + CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_recv_throttle_minwin, 16 * 1024, + "Minimum recv win for throttling"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, enable_tlp, - CTLFLAG_RW | CTLFLAG_LOCKED, - int32_t, tcp_enable_tlp, 1, "Enable Tail loss probe"); + CTLFLAG_RW | CTLFLAG_LOCKED, + int32_t, tcp_enable_tlp, 1, "Enable Tail loss probe"); static int32_t packchain_newlist = 0; static int32_t packchain_looped = 0; @@ -288,28 +276,25 @@ static int32_t packchain_sent = 0; extern int ipsec_bypass; #endif -extern int slowlink_wsize; /* window correction for slow links */ -#if IPFIREWALL -extern int fw_enable; /* firewall check for packet chaining */ -extern int fw_bypass; /* firewall check: disable packet chaining if there is rules */ -#endif /* IPFIREWALL */ +extern int slowlink_wsize; /* window correction for slow links */ extern u_int32_t dlil_filter_disable_tso_count; extern u_int32_t kipf_count; static int tcp_ip_output(struct socket *, struct tcpcb *, struct mbuf *, int, struct mbuf *, int, int, boolean_t); -static struct mbuf* tcp_send_lroacks(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th); static int tcp_recv_throttle(struct tcpcb *tp); -static int32_t tcp_tfo_check(struct tcpcb *tp, int32_t len) +static int32_t +tcp_tfo_check(struct tcpcb *tp, int32_t len) { struct socket *so = tp->t_inpcb->inp_socket; unsigned int optlen = 0; unsigned int cookie_len; - if (tp->t_flags & TF_NOOPT) + if (tp->t_flags & TF_NOOPT) { goto fallback; + } if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) && !tcp_heuristic_do_tfo(tp)) { @@ -318,45 +303,54 @@ static int32_t tcp_tfo_check(struct tcpcb *tp, int32_t len) goto fallback; } - if (so->so_flags1 & SOF1_DATA_AUTHENTICATED) + if (so->so_flags1 & SOF1_DATA_AUTHENTICATED) { return len; + } optlen += TCPOLEN_MAXSEG; - if (tp->t_flags & TF_REQ_SCALE) + if (tp->t_flags & TF_REQ_SCALE) { optlen += 4; + } #if MPTCP if ((so->so_flags & SOF_MP_SUBFLOW) && mptcp_enable && (tp->t_rxtshift <= mptcp_mpcap_retries || - (tptomptp(tp)->mpt_mpte->mpte_flags & MPTE_FORCE_ENABLE))) + (tptomptp(tp)->mpt_mpte->mpte_flags & MPTE_FORCE_ENABLE))) { optlen += sizeof(struct mptcp_mpcapable_opt_common) + sizeof(mptcp_key_t); + } #endif /* MPTCP */ - if (tp->t_flags & TF_REQ_TSTMP) + if (tp->t_flags & TF_REQ_TSTMP) { optlen += TCPOLEN_TSTAMP_APPA; + } - if (SACK_ENABLED(tp)) + if (SACK_ENABLED(tp)) { optlen += TCPOLEN_SACK_PERMITTED; + } /* Now, decide whether to use TFO or not */ /* Don't even bother trying if there is no space at all... */ - if (MAX_TCPOPTLEN - optlen < TCPOLEN_FASTOPEN_REQ) + if (MAX_TCPOPTLEN - optlen < TCPOLEN_FASTOPEN_REQ) { goto fallback; + } cookie_len = tcp_cache_get_cookie_len(tp); - if (cookie_len == 0) + if (cookie_len == 0) { /* No cookie, so we request one */ return 0; + } /* There is not enough space for the cookie, so we cannot do TFO */ - if (MAX_TCPOPTLEN - optlen < cookie_len) + if (MAX_TCPOPTLEN - optlen < cookie_len) { goto fallback; + } /* Do not send SYN+data if there is more in the queue than MSS */ - if (so->so_snd.sb_cc > (tp->t_maxopd - MAX_TCPOPTLEN)) + if (so->so_snd.sb_cc > (tp->t_maxopd - MAX_TCPOPTLEN)) { goto fallback; + } /* Ok, everything looks good. We can go on and do TFO */ return len; @@ -367,16 +361,17 @@ fallback: } /* Returns the number of bytes written to the TCP option-space */ -static unsigned -tcp_tfo_write_cookie_rep(struct tcpcb *tp, unsigned optlen, u_char *opt) +static unsigned int +tcp_tfo_write_cookie_rep(struct tcpcb *tp, unsigned int optlen, u_char *opt) { u_char out[CCAES_BLOCK_SIZE]; unsigned ret = 0; u_char *bp; if ((MAX_TCPOPTLEN - optlen) < - (TCPOLEN_FASTOPEN_REQ + TFO_COOKIE_LEN_DEFAULT)) + (TCPOLEN_FASTOPEN_REQ + TFO_COOKIE_LEN_DEFAULT)) { return ret; + } tcp_tfo_gen_cookie(tp->t_inpcb, out, sizeof(out)); @@ -393,16 +388,21 @@ tcp_tfo_write_cookie_rep(struct tcpcb *tp, unsigned optlen, u_char *opt) return ret; } -static unsigned -tcp_tfo_write_cookie(struct tcpcb *tp, unsigned optlen, int32_t len, - u_char *opt) +static unsigned int +tcp_tfo_write_cookie(struct tcpcb *tp, unsigned int optlen, int32_t len, + u_char *opt) { - u_int8_t tfo_len = MAX_TCPOPTLEN - optlen - TCPOLEN_FASTOPEN_REQ; + uint8_t tfo_len; struct socket *so = tp->t_inpcb->inp_socket; unsigned ret = 0; int res; u_char *bp; + if (TCPOLEN_FASTOPEN_REQ > MAX_TCPOPTLEN - optlen) { + return 0; + } + tfo_len = (uint8_t)(MAX_TCPOPTLEN - optlen - TCPOLEN_FASTOPEN_REQ); + if (so->so_flags1 & SOF1_DATA_AUTHENTICATED) { /* If there is some data, let's track it */ if (len > 0) { @@ -449,11 +449,9 @@ tcp_tfo_write_cookie(struct tcpcb *tp, unsigned optlen, int32_t len, } static inline bool -tcp_send_ecn_flags_on_syn(struct tcpcb *tp, struct socket *so) +tcp_send_ecn_flags_on_syn(struct tcpcb *tp) { - return !((tp->ecn_flags & TE_SETUPSENT || - (so->so_flags & SOF_MP_SUBFLOW) || - (tfo_enabled(tp)))); + return !(tp->ecn_flags & TE_SETUPSENT); } void @@ -502,8 +500,9 @@ tcp_set_ecn(struct tcpcb *tp, struct ifnet *ifp) return; check_heuristic: - if (!tcp_heuristic_do_ecn(tp)) + if (!tcp_heuristic_do_ecn(tp)) { tp->ecn_flags &= ~TE_ENABLE_ECN; + } /* * If the interface setting, system-level setting and heuristics @@ -516,11 +515,37 @@ check_heuristic: * Use the random value in iss for randomizing * this selection */ - if ((tp->iss % 100) >= tcp_ecn_setup_percentage) + if ((tp->iss % 100) >= tcp_ecn_setup_percentage) { tp->ecn_flags &= ~TE_ENABLE_ECN; + } } } +int +tcp_flight_size(struct tcpcb *tp) +{ + int ret; + + VERIFY(tp->sackhint.sack_bytes_acked >= 0); + VERIFY(tp->sackhint.sack_bytes_rexmit >= 0); + + /* + * RFC6675, SetPipe (), SACK'd bytes are discounted. All the rest is still in-flight. + */ + ret = tp->snd_nxt - tp->snd_una - tp->sackhint.sack_bytes_acked; + + if (ret < 0) { + /* + * This happens when the RTO-timer fires because snd_nxt gets artificially + * decreased. If we then receive some SACK-blogs, sack_bytes_acked is + * going to be high. + */ + ret = 0; + } + + return ret; +} + /* * Tcp output routine: figure out what should be sent and send it. * @@ -553,16 +578,14 @@ tcp_output(struct tcpcb *tp) struct inpcb *inp = tp->t_inpcb; struct socket *so = inp->inp_socket; int32_t len, recwin, sendwin, off; - int flags, error; + uint8_t flags; + int error; struct mbuf *m; struct ip *ip = NULL; - struct ipovly *ipov = NULL; -#if INET6 struct ip6_hdr *ip6 = NULL; -#endif /* INET6 */ struct tcphdr *th; u_char opt[TCP_MAXOLEN]; - unsigned ipoptlen, optlen, hdrlen; + unsigned int ipoptlen, optlen, hdrlen; int idle, sendalot, lost = 0; int i, sack_rxmit; int tso = 0; @@ -570,23 +593,17 @@ tcp_output(struct tcpcb *tp) tcp_seq old_snd_nxt = 0; struct sackhole *p; #if IPSEC - unsigned ipsec_optlen = 0; + unsigned int ipsec_optlen = 0; #endif /* IPSEC */ int idle_time = 0; struct mbuf *packetlist = NULL; struct mbuf *tp_inp_options = inp->inp_depend4.inp4_options; -#if INET6 - int isipv6 = inp->inp_vflag & INP_IPV6 ; -#else - int isipv6 = 0; -#endif - short packchain_listadd = 0; + int isipv6 = inp->inp_vflag & INP_IPV6; + int packchain_listadd = 0; int so_options = so->so_options; struct rtentry *rt; u_int32_t svc_flags = 0, allocated_len; - u_int32_t lro_ackmore = (tp->t_lropktlen != 0) ? 1 : 0; - struct mbuf *mnext = NULL; - int sackoptlen = 0; + unsigned int sackoptlen = 0; #if MPTCP boolean_t mptcp_acknow; #endif /* MPTCP */ @@ -595,6 +612,8 @@ tcp_output(struct tcpcb *tp) boolean_t wired = FALSE; boolean_t sack_rescue_rxt = FALSE; int sotc = so->so_traffic_class; + boolean_t do_not_compress = FALSE; + boolean_t sack_rxmted = FALSE; /* * Determine length of data that should be transmitted, @@ -620,11 +639,13 @@ tcp_output(struct tcpcb *tp) * Do some other tasks that need to be done after * idle time */ - if (!SLIST_EMPTY(&tp->t_rxt_segments)) + if (!SLIST_EMPTY(&tp->t_rxt_segments)) { tcp_rxtseg_clean(tp); + } /* If stretch ack was auto-disabled, re-evaluate it */ tcp_cc_after_idle_stretchack(tp); + tp->t_forced_acks = TCP_FORCED_ACKS_COUNT; } tp->t_flags &= ~TF_LASTIDLE; if (idle) { @@ -649,25 +670,22 @@ again: #if MPTCP mptcp_acknow = FALSE; #endif + do_not_compress = FALSE; - KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_START, 0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_START, 0, 0, 0, 0, 0); -#if INET6 if (isipv6) { KERNEL_DEBUG(DBG_LAYER_BEG, - ((inp->inp_fport << 16) | inp->inp_lport), - (((inp->in6p_laddr.s6_addr16[0] & 0xffff) << 16) | - (inp->in6p_faddr.s6_addr16[0] & 0xffff)), - sendalot,0,0); - } else -#endif - - { + ((inp->inp_fport << 16) | inp->inp_lport), + (((inp->in6p_laddr.s6_addr16[0] & 0xffff) << 16) | + (inp->in6p_faddr.s6_addr16[0] & 0xffff)), + sendalot, 0, 0); + } else { KERNEL_DEBUG(DBG_LAYER_BEG, - ((inp->inp_fport << 16) | inp->inp_lport), - (((inp->inp_laddr.s_addr & 0xffff) << 16) | - (inp->inp_faddr.s_addr & 0xffff)), - sendalot,0,0); + ((inp->inp_fport << 16) | inp->inp_lport), + (((inp->inp_laddr.s_addr & 0xffff) << 16) | + (inp->inp_faddr.s_addr & 0xffff)), + sendalot, 0, 0); } /* * If the route generation id changed, we need to check that our @@ -692,12 +710,14 @@ again: if (isipv6) { ia6 = ifa_foraddr6(&inp->in6p_laddr); - if (ia6 != NULL) + if (ia6 != NULL) { found_srcaddr = 1; + } } else { ia = ifa_foraddr(inp->inp_laddr.s_addr); - if (ia != NULL) + if (ia != NULL) { found_srcaddr = 1; + } } /* check that the source address is still valid */ @@ -725,8 +745,9 @@ again: } } - if (tp->t_pktlist_head != NULL) + if (tp->t_pktlist_head != NULL) { m_freem_list(tp->t_pktlist_head); + } TCP_PKTLIST_CLEAR(tp); /* drop connection if source address isn't available */ @@ -738,11 +759,13 @@ again: return 0; /* silently ignore, keep data in socket: address may be back */ } } - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); + } - if (ia6 != NULL) + if (ia6 != NULL) { IFA_REMREF(&ia6->ia_ifa); + } /* * Address is still valid; check for multipages capability @@ -755,8 +778,9 @@ again: soif2kcl(so, (ifp->if_eflags & IFEF_2KCL)); tcp_set_ecn(tp, ifp); } - if (rt->rt_flags & RTF_UP) + if (rt->rt_flags & RTF_UP) { RT_GENID_SYNC(rt); + } /* * See if we should do MTU discovery. Don't do it if: * 1) it is disabled via the sysctl @@ -767,10 +791,11 @@ again: if (!path_mtu_discovery || ((rt != NULL) && (!(rt->rt_flags & RTF_UP) || - (rt->rt_rmx.rmx_locks & RTV_MTU)))) + (rt->rt_rmx.rmx_locks & RTV_MTU)))) { tp->t_flags &= ~TF_PMTUD; - else + } else { tp->t_flags |= TF_PMTUD; + } RT_UNLOCK(rt); } @@ -786,14 +811,16 @@ again: * snd_nxt. There may be SACK information that allows us to avoid * resending already delivered data. Adjust snd_nxt accordingly. */ - if (SACK_ENABLED(tp) && SEQ_LT(tp->snd_nxt, tp->snd_max)) + if (SACK_ENABLED(tp) && SEQ_LT(tp->snd_nxt, tp->snd_max)) { tcp_sack_adjust(tp); + } sendalot = 0; off = tp->snd_nxt - tp->snd_una; sendwin = min(tp->snd_wnd, tp->snd_cwnd); - if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0) + if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0) { sendwin = min(sendwin, slowlink_wsize); + } flags = tcp_outflags[tp->t_state]; /* @@ -815,9 +842,20 @@ again: (p = tcp_sack_output(tp, &sack_bytes_rxmt))) { int32_t cwin; - cwin = min(tp->snd_wnd, tp->snd_cwnd) - sack_bytes_rxmt; - if (cwin < 0) + if (tcp_do_better_lr) { + cwin = min(tp->snd_wnd, tp->snd_cwnd) - tcp_flight_size(tp); + if (cwin <= 0 && sack_rxmted == FALSE) { + /* Allow to clock out at least on per period */ + cwin = tp->t_maxseg; + } + + sack_rxmted = TRUE; + } else { + cwin = min(tp->snd_wnd, tp->snd_cwnd) - sack_bytes_rxmt; + } + if (cwin < 0) { cwin = 0; + } /* Do not retransmit SACK segments beyond snd_recover */ if (SEQ_GT(p->end, tp->snd_recover)) { /* @@ -834,10 +872,11 @@ again: */ p = NULL; goto after_sack_rexmit; - } else + } else { /* Can rexmit part of the current hole */ len = ((int32_t)min(cwin, - tp->snd_recover - p->rxmit)); + tp->snd_recover - p->rxmit)); + } } else { len = ((int32_t)min(cwin, p->end - p->rxmit)); } @@ -845,6 +884,9 @@ again: off = p->rxmit - tp->snd_una; sack_rxmit = 1; sendalot = 1; + /* Everything sent after snd_nxt will allow us to account for fast-retransmit of the retransmitted segment */ + tp->send_highest_sack = tp->snd_nxt; + tp->t_new_dupacks = 0; tcpstat.tcps_sack_rexmits++; tcpstat.tcps_sack_rexmit_bytes += min(len, tp->t_maxseg); @@ -857,10 +899,9 @@ after_sack_rexmit: * Get standard flags, and add SYN or FIN if requested by 'hidden' * state flags. */ - if (tp->t_flags & TF_NEEDFIN) + if (tp->t_flags & TF_NEEDFIN) { flags |= TH_FIN; - if (tp->t_flags & TF_NEEDSYN) - flags |= TH_SYN; + } /* * If in persist timeout with window of 0, send 1 byte. @@ -886,8 +927,9 @@ after_sack_rexmit: * to send then the probe will be the FIN * itself. */ - if (off < so->so_snd.sb_cc) + if (off < so->so_snd.sb_cc) { flags &= ~TH_FIN; + } sendwin = 1; } else { tp->t_timer[TCPT_PERSIST] = 0; @@ -917,18 +959,22 @@ after_sack_rexmit: } else { int32_t cwin; - cwin = tp->snd_cwnd - - (tp->snd_nxt - tp->sack_newdata) - - sack_bytes_rxmt; - if (cwin < 0) + if (tcp_do_better_lr) { + cwin = tp->snd_cwnd - tcp_flight_size(tp); + } else { + cwin = tp->snd_cwnd - + (tp->snd_nxt - tp->sack_newdata) - + sack_bytes_rxmt; + } + if (cwin < 0) { cwin = 0; - /* + } + /* * We are inside of a SACK recovery episode and are * sending new data, having retransmitted all the * data possible in the scoreboard. */ - len = min(so->so_snd.sb_cc, tp->snd_wnd) - - off; + len = min(so->so_snd.sb_cc, tp->snd_wnd) - off; /* * Don't remove this (len > 0) check ! * We explicitly check for len > 0 here (although it @@ -962,8 +1008,9 @@ after_sack_rexmit: * must have been advanced to cover it. */ if ((tp->t_flags & TF_SENTFIN) && - tp->snd_max == tp->snd_recover) + tp->snd_max == tp->snd_recover) { tp->snd_nxt--; + } off = tp->snd_nxt - tp->snd_una; sendalot = 0; @@ -991,7 +1038,7 @@ after_sack_rexmit: len++; if (len > 0 && tp->t_state == TCPS_SYN_SENT) { while (inp->inp_sndinprog_cnt == 0 && - tp->t_pktlist_head != NULL) { + tp->t_pktlist_head != NULL) { packetlist = tp->t_pktlist_head; packchain_listadd = tp->t_lastchain; packchain_sent++; @@ -1009,14 +1056,14 @@ after_sack_rexmit: * resume close */ if (inp->inp_sndinprog_cnt == 0 && - (tp->t_flags & TF_CLOSING)) { + (tp->t_flags & TF_CLOSING)) { tp->t_flags &= ~TF_CLOSING; (void) tcp_close(tp); } else { tcp_check_timer_state(tp); } KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, - 0,0,0,0,0); + 0, 0, 0, 0, 0); return 0; } } @@ -1039,11 +1086,13 @@ after_sack_rexmit: /* * Don't send a RST with data. */ - if (flags & TH_RST) + if (flags & TH_RST) { len = 0; + } - if ((flags & TH_SYN) && tp->t_state <= TCPS_SYN_SENT && tfo_enabled(tp)) + if ((flags & TH_SYN) && tp->t_state <= TCPS_SYN_SENT && tfo_enabled(tp)) { len = tcp_tfo_check(tp, len); + } /* * The check here used to be (len < 0). Some times len is zero @@ -1069,8 +1118,9 @@ after_sack_rexmit: TCP_RESET_REXMT_STATE(tp); tp->snd_nxt = tp->snd_una; off = 0; - if (tp->t_timer[TCPT_PERSIST] == 0) + if (tp->t_timer[TCPT_PERSIST] == 0) { tcp_setpersist(tp); + } } } @@ -1083,8 +1133,7 @@ after_sack_rexmit: * 3. our send window (slow start and congestion controlled) is * larger than sent but unacknowledged data in send buffer. */ - if (tcp_do_autosendbuf == 1 && - !INP_WAIT_FOR_IF_FEEDBACK(inp) && !IN_FASTRECOVERY(tp) && + if (!INP_WAIT_FOR_IF_FEEDBACK(inp) && !IN_FASTRECOVERY(tp) && (so->so_snd.sb_flags & (SB_AUTOSIZE | SB_TRIM)) == SB_AUTOSIZE && tcp_cansbgrow(&so->so_snd)) { if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && @@ -1105,7 +1154,7 @@ after_sack_rexmit: * * TSO may only be used if we are in a pure bulk sending state. * The presence of TCP-MD5, SACK retransmits, SACK advertizements, - * ipfw rules and IP options, as well as disabling hardware checksum + * filters and IP options, as well as disabling hardware checksum * offload prevent using TSO. With TSO the TCP header is the same * (except for the sequence number) for all generated packets. This * makes it impossible to transmit any options which vary per generated @@ -1120,22 +1169,20 @@ after_sack_rexmit: * Pre-calculate here as we save another lookup into the darknesses * of IPsec that way and can actually decide if TSO is ok. */ - if (ipsec_bypass == 0) - ipsec_optlen = ipsec_hdrsiz_tcp(tp); + if (ipsec_bypass == 0) { + ipsec_optlen = (unsigned int)ipsec_hdrsiz_tcp(tp); + } #endif if (len > tp->t_maxseg) { if ((tp->t_flags & TF_TSO) && tcp_do_tso && hwcksum_tx && ip_use_randomid && kipf_count == 0 && dlil_filter_disable_tso_count == 0 && - tp->rcv_numsacks == 0 && sack_rxmit == 0 && + tp->rcv_numsacks == 0 && sack_rxmit == 0 && sack_bytes_rxmt == 0 && inp->inp_options == NULL && inp->in6p_options == NULL #if IPSEC && ipsec_optlen == 0 -#endif -#if IPFIREWALL - && (fw_enable == 0 || fw_bypass) #endif ) { tso = 1; @@ -1145,6 +1192,8 @@ after_sack_rexmit: sendalot = 1; tso = 0; } + } else { + tso = 0; } /* Send one segment or less as a tail loss probe */ @@ -1166,19 +1215,21 @@ after_sack_rexmit: int newlen = len; if (tp->t_state >= TCPS_ESTABLISHED && (tp->t_mpflags & TMPF_SND_MPPRIO || - tp->t_mpflags & TMPF_SND_REM_ADDR || - tp->t_mpflags & TMPF_SND_MPFAIL || - tp->t_mpflags & TMPF_SND_KEYS || - tp->t_mpflags & TMPF_SND_JACK)) { + tp->t_mpflags & TMPF_SND_REM_ADDR || + tp->t_mpflags & TMPF_SND_MPFAIL || + tp->t_mpflags & TMPF_SND_KEYS || + tp->t_mpflags & TMPF_SND_JACK)) { if (len > 0) { len = 0; + tso = 0; } /* * On a new subflow, don't try to send again, because * we are still waiting for the fourth ack. */ - if (!(tp->t_mpflags & TMPF_PREESTABLISHED)) + if (!(tp->t_mpflags & TMPF_PREESTABLISHED)) { sendalot = 1; + } mptcp_acknow = TRUE; } else { mptcp_acknow = FALSE; @@ -1195,41 +1246,21 @@ after_sack_rexmit: if (newlen < len) { len = newlen; - } - } -#endif /* MPTCP */ - - /* - * If the socket is capable of doing unordered send, - * pull the amount of data that can be sent from the - * unordered priority queues to the serial queue in - * the socket buffer. If bytes are not yet available - * in the highest priority message, we may not be able - * to send any new data. - */ - if (so->so_flags & SOF_ENABLE_MSGS) { - if ((off + len) > - so->so_msg_state->msg_serial_bytes) { - sbpull_unordered_data(so, off, len); - - /* check if len needs to be modified */ - if ((off + len) > - so->so_msg_state->msg_serial_bytes) { - len = so->so_msg_state->msg_serial_bytes - off; - if (len <= 0) { - len = 0; - tcpstat.tcps_msg_sndwaithipri++; - } + if (len <= tp->t_maxseg) { + tso = 0; } } } +#endif /* MPTCP */ if (sack_rxmit) { - if (SEQ_LT(p->rxmit + len, tp->snd_una + so->so_snd.sb_cc)) + if (SEQ_LT(p->rxmit + len, tp->snd_una + so->so_snd.sb_cc)) { flags &= ~TH_FIN; + } } else { - if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc)) + if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc)) { flags &= ~TH_FIN; + } } /* * Compare available window to amount of window @@ -1260,18 +1291,21 @@ after_sack_rexmit: if (recwin > 0 && tcp_recv_throttle(tp)) { uint32_t min_iaj_win = tcp_min_iaj_win * tp->t_maxseg; uint32_t bg_rwintop = tp->rcv_adv; - if (SEQ_LT(bg_rwintop, tp->rcv_nxt + min_iaj_win)) + if (SEQ_LT(bg_rwintop, tp->rcv_nxt + min_iaj_win)) { bg_rwintop = tp->rcv_nxt + min_iaj_win; + } recwin = imin((int32_t)(bg_rwintop - tp->rcv_nxt), recwin); - if (recwin < 0) + if (recwin < 0) { recwin = 0; + } } } #endif /* TRAFFIC_MGT */ - if (recwin > (int32_t)(TCP_MAXWIN << tp->rcv_scale)) + if (recwin > (int32_t)(TCP_MAXWIN << tp->rcv_scale)) { recwin = (int32_t)(TCP_MAXWIN << tp->rcv_scale); + } if (!(so->so_flags & SOF_MP_SUBFLOW)) { if (recwin < (int32_t)(tp->rcv_adv - tp->rcv_nxt)) { @@ -1279,10 +1313,12 @@ after_sack_rexmit: } } else { struct mptcb *mp_tp = tptomptp(tp); + int64_t recwin_announced = (int64_t)(mp_tp->mpt_rcvadv - mp_tp->mpt_rcvnxt); /* Don't remove what we announced at the MPTCP-layer */ - if (recwin < (int32_t)(mp_tp->mpt_rcvadv - (uint32_t)mp_tp->mpt_rcvnxt)) { - recwin = (int32_t)(mp_tp->mpt_rcvadv - (uint32_t)mp_tp->mpt_rcvnxt); + VERIFY(recwin_announced < INT32_MAX && recwin_announced > INT32_MIN); + if (recwin < (int32_t)recwin_announced) { + recwin = (int32_t)recwin_announced; } } @@ -1299,12 +1335,15 @@ after_sack_rexmit: * data (receiver may be limited the window size) */ if (len) { - if (tp->t_flagsext & TF_FORCE) + if (tp->t_flagsext & TF_FORCE) { goto send; - if (SEQ_LT(tp->snd_nxt, tp->snd_max)) + } + if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { goto send; - if (sack_rxmit) + } + if (sack_rxmit) { goto send; + } /* * If this here is the first segment after SYN/ACK and TFO @@ -1313,8 +1352,9 @@ after_sack_rexmit: if (tp->t_state == TCPS_SYN_RECEIVED && tfo_enabled(tp) && (tp->t_tfo_flags & TFO_F_COOKIE_VALID) && - tp->snd_nxt == tp->iss + 1) + tp->snd_nxt == tp->iss + 1) { goto send; + } /* * Send new data on the connection only if it is @@ -1322,8 +1362,9 @@ after_sack_rexmit: */ if (!INP_WAIT_FOR_IF_FEEDBACK(inp) || tp->t_state != TCPS_ESTABLISHED) { - if (len >= tp->t_maxseg) + if (len >= tp->t_maxseg) { goto send; + } if (!(tp->t_flags & TF_MORETOCOME) && (idle || tp->t_flags & TF_NODELAY || @@ -1331,22 +1372,24 @@ after_sack_rexmit: ALLOW_LIMITED_TRANSMIT(tp)) && (tp->t_flags & TF_NOPUSH) == 0 && (len + off >= so->so_snd.sb_cc || - /* - * MPTCP needs to respect the DSS-mappings. So, it - * may be sending data that *could* have been - * coalesced, but cannot because of - * mptcp_adj_sendlen(). - */ - so->so_flags & SOF_MP_SUBFLOW)) + /* + * MPTCP needs to respect the DSS-mappings. So, it + * may be sending data that *could* have been + * coalesced, but cannot because of + * mptcp_adj_sendlen(). + */ + so->so_flags & SOF_MP_SUBFLOW)) { goto send; - if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) + } + if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { goto send; + } } else { tcpstat.tcps_fcholdpacket++; } } - if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN)) { + if (recwin > 0) { /* * "adv" is the amount we can increase the window, * taking into account that we are limited by @@ -1354,43 +1397,79 @@ after_sack_rexmit: */ int32_t adv, oldwin = 0; adv = imin(recwin, (int)TCP_MAXWIN << tp->rcv_scale) - - (tp->rcv_adv - tp->rcv_nxt); + (tp->rcv_adv - tp->rcv_nxt); - if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) + if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { oldwin = tp->rcv_adv - tp->rcv_nxt; + } - if (adv >= (int32_t) (2 * tp->t_maxseg)) { - /* - * Update only if the resulting scaled value of - * the window changed, or if there is a change in - * the sequence since the last ack. This avoids - * what appears as dupe ACKS (see rdar://5640997) - * - * If streaming is detected avoid sending too many - * window updates. We will depend on the delack - * timer to send a window update when needed. - */ - if (!(tp->t_flags & TF_STRETCHACK) && - (tp->last_ack_sent != tp->rcv_nxt || - ((oldwin + adv) >> tp->rcv_scale) > - (oldwin >> tp->rcv_scale))) { - goto send; + if (tcp_ack_strategy == TCP_ACK_STRATEGY_LEGACY) { + if (adv >= (int32_t) (2 * tp->t_maxseg)) { + /* + * Update only if the resulting scaled value of + * the window changed, or if there is a change in + * the sequence since the last ack. This avoids + * what appears as dupe ACKS (see rdar://5640997) + * + * If streaming is detected avoid sending too many + * window updates. We will depend on the delack + * timer to send a window update when needed. + * + * If there is more data to read, don't send an ACK. + * Otherwise we will end up sending many ACKs if the + * application is doing micro-reads. + */ + if (!(tp->t_flags & TF_STRETCHACK) && + (tp->last_ack_sent != tp->rcv_nxt || + ((oldwin + adv) >> tp->rcv_scale) > + (oldwin >> tp->rcv_scale))) { + goto send; + } + } + } else { + if (adv >= (int32_t) (2 * tp->t_maxseg)) { + /* + * ACK every second full-sized segment, if the + * ACK is advancing or the window becomes bigger + */ + if (so->so_rcv.sb_cc < so->so_rcv.sb_lowat && + (tp->last_ack_sent != tp->rcv_nxt || + ((oldwin + adv) >> tp->rcv_scale) > + (oldwin >> tp->rcv_scale))) { + goto send; + } + } else if (tp->t_flags & TF_DELACK) { + /* + * If we delayed the ACK and the window + * is not advancing by a lot (< 2MSS), ACK + * immediately if the last incoming packet had + * the push flag set and we emptied the buffer. + * + * This takes care of a sender doing small + * repeated writes with Nagle enabled. + */ + if (so->so_rcv.sb_cc == 0 && + tp->last_ack_sent != tp->rcv_nxt && + (tp->t_flagsext & TF_LAST_IS_PSH)) { + goto send; + } } - } - if (4 * adv >= (int32_t) so->so_rcv.sb_hiwat) + if (4 * adv >= (int32_t) so->so_rcv.sb_hiwat) { goto send; + } /* * Make sure that the delayed ack timer is set if * we delayed sending a window update because of * streaming detection. */ - if ((tp->t_flags & TF_STRETCHACK) && + if (tcp_ack_strategy == TCP_ACK_STRATEGY_LEGACY && + (tp->t_flags & TF_STRETCHACK) && !(tp->t_flags & TF_DELACK)) { tp->t_flags |= TF_DELACK; tp->t_timer[TCPT_DELACK] = - OFFSET_FROM_START(tp, tcp_delack); + OFFSET_FROM_START(tp, tcp_delack); } } @@ -1398,24 +1477,31 @@ after_sack_rexmit: * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW * is also a catch-all for the retransmit timer timeout case. */ - if (tp->t_flags & TF_ACKNOW) + if (tp->t_flags & TF_ACKNOW) { + if (tp->t_forced_acks > 0) { + tp->t_forced_acks--; + } goto send; - if ((flags & TH_RST) || - ((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) + } + if ((flags & TH_RST) || (flags & TH_SYN)) { goto send; - if (SEQ_GT(tp->snd_up, tp->snd_una)) + } + if (SEQ_GT(tp->snd_up, tp->snd_una)) { goto send; + } #if MPTCP - if (mptcp_acknow) + if (mptcp_acknow) { goto send; + } #endif /* MPTCP */ /* * If our state indicates that FIN should be sent * and we have not yet done so, then we need to send. */ if ((flags & TH_FIN) && - (!(tp->t_flags & TF_SENTFIN) || tp->snd_nxt == tp->snd_una)) + (!(tp->t_flags & TF_SENTFIN) || tp->snd_nxt == tp->snd_una)) { goto send; + } /* * In SACK, it is possible for tcp_output to fail to send a segment * after the retransmission timer has been turned off. Make sure @@ -1426,7 +1512,7 @@ after_sack_rexmit: tp->t_timer[TCPT_REXMT] == 0 && tp->t_timer[TCPT_PERSIST] == 0) { tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, - tp->t_rxtcur); + tp->t_rxtcur); goto just_return; } /* @@ -1462,7 +1548,7 @@ just_return: * but if there is some packets left in the packet list, send them now. */ while (inp->inp_sndinprog_cnt == 0 && - tp->t_pktlist_head != NULL) { + tp->t_pktlist_head != NULL) { packetlist = tp->t_pktlist_head; packchain_listadd = tp->t_lastchain; packchain_sent++; @@ -1475,13 +1561,13 @@ just_return: } /* tcp was closed while we were in ip; resume close */ if (inp->inp_sndinprog_cnt == 0 && - (tp->t_flags & TF_CLOSING)) { + (tp->t_flags & TF_CLOSING)) { tp->t_flags &= ~TF_CLOSING; (void) tcp_close(tp); } else { tcp_check_timer_state(tp); } - KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); return 0; send: @@ -1490,10 +1576,13 @@ send: * the max segment size. */ if (len > 0) { - if (len >= tp->t_maxseg) + do_not_compress = TRUE; + + if (len >= tp->t_maxseg) { tp->t_flags |= TF_MAXSEGSNT; - else + } else { tp->t_flags &= ~TF_MAXSEGSNT; + } } /* * Before ESTABLISHED, force sending of initial options @@ -1504,12 +1593,11 @@ send: * max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MCLBYTES */ optlen = 0; -#if INET6 - if (isipv6) - hdrlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr); - else -#endif - hdrlen = sizeof (struct tcpiphdr); + if (isipv6) { + hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); + } else { + hdrlen = sizeof(struct tcpiphdr); + } if (flags & TH_SYN) { tp->snd_nxt = tp->iss; if ((tp->t_flags & TF_NOOPT) == 0) { @@ -1526,9 +1614,9 @@ send: (tp->t_flags & TF_RCVD_SCALE))) { *((u_int32_t *)(void *)(opt + optlen)) = htonl( TCPOPT_NOP << 24 | - TCPOPT_WINDOW << 16 | - TCPOLEN_WINDOW << 8 | - tp->request_r_scale); + TCPOPT_WINDOW << 16 | + TCPOLEN_WINDOW << 8 | + tp->request_r_scale); optlen += 4; } #if MPTCP @@ -1543,23 +1631,19 @@ send: * Send a timestamp and echo-reply if this is a SYN and our side * wants to use timestamps (TF_REQ_TSTMP is set) or both our side * and our peer have sent timestamps in our SYN's. - */ - if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && - (flags & TH_RST) == 0 && + */ + if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP && + (flags & TH_RST) == 0 && ((flags & TH_ACK) == 0 || - (tp->t_flags & TF_RCVD_TSTMP))) { + (tp->t_flags & TF_RCVD_TSTMP))) { u_int32_t *lp = (u_int32_t *)(void *)(opt + optlen); - /* Form timestamp option as shown in appendix A of RFC 1323. */ - *lp++ = htonl(TCPOPT_TSTAMP_HDR); - *lp++ = htonl(tcp_now); - *lp = htonl(tp->ts_recent); - optlen += TCPOLEN_TSTAMP_APPA; - } - - /* Note the timestamp for receive buffer autosizing */ - if (tp->rfbuf_ts == 0 && (so->so_rcv.sb_flags & SB_AUTOSIZE)) - tp->rfbuf_ts = tcp_now; + /* Form timestamp option as shown in appendix A of RFC 1323. */ + *lp++ = htonl(TCPOPT_TSTAMP_HDR); + *lp++ = htonl(tcp_now + tp->t_ts_offset); + *lp = htonl(tp->ts_recent); + optlen += TCPOLEN_TSTAMP_APPA; + } if (SACK_ENABLED(tp) && ((tp->t_flags & TF_NOOPT) == 0)) { /* @@ -1601,19 +1685,21 @@ send: tp->t_mpflags |= TMPF_MPTCP_ACKNOW; } optlen = mptcp_setup_opts(tp, off, &opt[0], optlen, flags, - len, &mptcp_acknow); + len, &mptcp_acknow, &do_not_compress); tp->t_mpflags &= ~TMPF_SEND_DSN; } #endif /* MPTCP */ if (tfo_enabled(tp) && !(tp->t_flags & TF_NOOPT) && - (flags & (TH_SYN | TH_ACK)) == TH_SYN) + (flags & (TH_SYN | TH_ACK)) == TH_SYN) { optlen += tcp_tfo_write_cookie(tp, optlen, len, opt); + } if (tfo_enabled(tp) && (flags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK) && - (tp->t_tfo_flags & TFO_F_OFFER_COOKIE)) + (tp->t_tfo_flags & TFO_F_OFFER_COOKIE)) { optlen += tcp_tfo_write_cookie_rep(tp, optlen, opt); + } if (SACK_ENABLED(tp) && ((tp->t_flags & TF_NOOPT) == 0)) { /* @@ -1641,6 +1727,7 @@ send: nsack = min(nsack, (tp->rcv_numsacks + (TCP_SEND_DSACK_OPT(tp) ? 1 : 0))); sackoptlen = (2 + nsack * TCPOLEN_SACK); + VERIFY(sackoptlen < UINT8_MAX); /* * First we need to pad options so that the @@ -1649,12 +1736,13 @@ send: */ padlen = (MAX_TCPOPTLEN - optlen - sackoptlen) % 4; optlen += padlen; - while (padlen-- > 0) + while (padlen-- > 0) { *bp++ = TCPOPT_NOP; + } tcpstat.tcps_sack_send_blocks++; *bp++ = TCPOPT_SACK; - *bp++ = sackoptlen; + *bp++ = (uint8_t)sackoptlen; lp = (u_int32_t *)(void *)bp; /* @@ -1712,7 +1800,7 @@ send: if ((flags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK) && (tp->ecn_flags & TE_ENABLE_ECN)) { if (tp->ecn_flags & TE_SETUPRECEIVED) { - if (tcp_send_ecn_flags_on_syn(tp, so)) { + if (tcp_send_ecn_flags_on_syn(tp)) { /* * Setting TH_ECE makes this an ECN-setup * SYN-ACK @@ -1723,7 +1811,7 @@ send: * Record that we sent the ECN-setup and * default to setting IP ECT. */ - tp->ecn_flags |= (TE_SETUPSENT|TE_SENDIPECT); + tp->ecn_flags |= (TE_SETUPSENT | TE_SENDIPECT); tcpstat.tcps_ecn_server_setup++; tcpstat.tcps_ecn_server_success++; } else { @@ -1754,7 +1842,7 @@ send: } } else if ((flags & (TH_SYN | TH_ACK)) == TH_SYN && (tp->ecn_flags & TE_ENABLE_ECN)) { - if (tcp_send_ecn_flags_on_syn(tp, so)) { + if (tcp_send_ecn_flags_on_syn(tp)) { /* * Setting TH_ECE and TH_CWR makes this an * ECN-setup SYN @@ -1810,21 +1898,18 @@ send: tp->t_dsack_lseq = 0; tp->t_dsack_rseq = 0; -#if INET6 - if (isipv6) + if (isipv6) { ipoptlen = ip6_optlen(inp); - else -#endif - { + } else { if (tp_inp_options) { ipoptlen = tp_inp_options->m_len - - offsetof(struct ipoption, ipopt_list); + offsetof(struct ipoption, ipopt_list); } else { ipoptlen = 0; } } #if IPSEC - ipoptlen += ipsec_optlen; + ipoptlen += ipsec_optlen; #endif /* @@ -1850,39 +1935,46 @@ send: int32_t tso_maxlen; tso_maxlen = tp->tso_max_segment_size ? - tp->tso_max_segment_size : TCP_MAXWIN; + tp->tso_max_segment_size : TCP_MAXWIN; if (len > tso_maxlen - hdrlen - optlen) { len = tso_maxlen - hdrlen - optlen; - len = len - (len % (tp->t_maxopd - optlen)); sendalot = 1; } else if (tp->t_flags & TF_NEEDFIN) { sendalot = 1; } + + if (len % (tp->t_maxopd - optlen) != 0) { + len = len - (len % (tp->t_maxopd - optlen)); + sendalot = 1; + } } else { len = tp->t_maxopd - optlen - ipoptlen; sendalot = 1; } } - if (max_linkhdr + hdrlen > MCLBYTES) + if (max_linkhdr + hdrlen > MCLBYTES) { panic("tcphdr too big"); + } /* Check if there is enough data in the send socket * buffer to start measuring bandwidth */ if ((tp->t_flagsext & TF_MEASURESNDBW) != 0 && - (tp->t_bwmeas != NULL) && - (tp->t_flagsext & TF_BWMEAS_INPROGRESS) == 0) { + (tp->t_bwmeas != NULL) && + (tp->t_flagsext & TF_BWMEAS_INPROGRESS) == 0) { tp->t_bwmeas->bw_size = min(min( - (so->so_snd.sb_cc - (tp->snd_max - tp->snd_una)), - tp->snd_cwnd), tp->snd_wnd); + (so->so_snd.sb_cc - (tp->snd_max - tp->snd_una)), + tp->snd_cwnd), tp->snd_wnd); if (tp->t_bwmeas->bw_minsize > 0 && - tp->t_bwmeas->bw_size < tp->t_bwmeas->bw_minsize) + tp->t_bwmeas->bw_size < tp->t_bwmeas->bw_minsize) { tp->t_bwmeas->bw_size = 0; - if (tp->t_bwmeas->bw_maxsize > 0) + } + if (tp->t_bwmeas->bw_maxsize > 0) { tp->t_bwmeas->bw_size = min(tp->t_bwmeas->bw_size, tp->t_bwmeas->bw_maxsize); + } if (tp->t_bwmeas->bw_size > 0) { tp->t_flagsext |= TF_BWMEAS_INPROGRESS; tp->t_bwmeas->bw_start = tp->snd_max; @@ -1897,15 +1989,19 @@ send: * the template for sends on this connection. */ if (len) { - tp->t_pmtud_lastseg_size = len + optlen + ipoptlen; - if ((tp->t_flagsext & TF_FORCE) && len == 1) + /* Remember what the last head-of-line packet-size was */ + if (tp->t_pmtud_lastseg_size == 0 && tp->snd_nxt == tp->snd_una) { + ASSERT(len + optlen + ipoptlen <= IP_MAXPACKET); + tp->t_pmtud_lastseg_size = (uint16_t)(len + optlen + ipoptlen); + } + if ((tp->t_flagsext & TF_FORCE) && len == 1) { tcpstat.tcps_sndprobe++; - else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { + } else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { tcpstat.tcps_sndrexmitpack++; tcpstat.tcps_sndrexmitbyte += len; if (nstat_collect) { nstat_route_tx(inp->inp_route.ro_rt, 1, - len, NSTAT_TX_FLAG_RETRANSMIT); + len, NSTAT_TX_FLAG_RETRANSMIT); INP_ADD_STAT(inp, cell, wifi, wired, txpackets, 1); INP_ADD_STAT(inp, cell, wifi, wired, @@ -1948,24 +2044,24 @@ send: /* minimum length we are going to allocate */ allocated_len = MHLEN; - if (MHLEN < hdrlen + max_linkhdr) { + if (MHLEN < hdrlen + max_linkhdr) { MGETHDR(m, M_DONTWAIT, MT_HEADER); if (m == NULL) { error = ENOBUFS; goto out; } - MCLGET(m, M_DONTWAIT); - if ((m->m_flags & M_EXT) == 0) { - m_freem(m); - error = ENOBUFS; - goto out; - } + MCLGET(m, M_DONTWAIT); + if ((m->m_flags & M_EXT) == 0) { + m_freem(m); + error = ENOBUFS; + goto out; + } m->m_data += max_linkhdr; m->m_len = hdrlen; allocated_len = MCLBYTES; } if (len <= allocated_len - hdrlen - max_linkhdr) { - if (m == NULL) { + if (m == NULL) { VERIFY(allocated_len <= MHLEN); MGETHDR(m, M_DONTWAIT, MT_HEADER); if (m == NULL) { @@ -1977,7 +2073,9 @@ send: } /* makes sure we still have data left to be sent at this point */ if (so->so_snd.sb_mb == NULL || off < 0) { - if (m != NULL) m_freem(m); + if (m != NULL) { + m_freem(m); + } error = 0; /* should we return an error? */ goto out; } @@ -2021,10 +2119,10 @@ send: * it acted on to fullfill the current request, * whether a valid 'hint' was passed in or not. */ - if ((m = m_copym_with_hdrs(so->so_snd.sb_mb, + if ((m = m_copym_with_hdrs(so->so_snd.sb_mb, off, len, M_DONTWAIT, NULL, NULL, copymode)) == NULL) { - error = ENOBUFS; + error = ENOBUFS; goto out; } m->m_data += max_linkhdr; @@ -2039,19 +2137,21 @@ send: * * On SYN-segments we should not add the PUSH-flag. */ - if (off + len == so->so_snd.sb_cc && !(flags & TH_SYN)) + if (off + len == so->so_snd.sb_cc && !(flags & TH_SYN)) { flags |= TH_PUSH; + } } else { - if (tp->t_flags & TF_ACKNOW) + if (tp->t_flags & TF_ACKNOW) { tcpstat.tcps_sndacks++; - else if (flags & (TH_SYN|TH_FIN|TH_RST)) + } else if (flags & (TH_SYN | TH_FIN | TH_RST)) { tcpstat.tcps_sndctrl++; - else if (SEQ_GT(tp->snd_up, tp->snd_una)) + } else if (SEQ_GT(tp->snd_up, tp->snd_una)) { tcpstat.tcps_sndurg++; - else + } else { tcpstat.tcps_sndwinup++; + } - MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */ + MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */ if (m == NULL) { error = ENOBUFS; goto out; @@ -2068,16 +2168,36 @@ send: m->m_len = hdrlen; } m->m_pkthdr.rcvif = 0; -#if CONFIG_MACF_NET - mac_mbuf_label_associate_inpcb(inp, m); -#endif -#if INET6 + + /* Any flag other than pure-ACK: Do not compress! */ + if (flags & ~(TH_ACK)) { + do_not_compress = TRUE; + } + + if (tp->rcv_scale == 0) { + do_not_compress = TRUE; + } + + if (do_not_compress || (tcp_do_ack_compression == 1 && !cell) || __improbable(!tcp_do_ack_compression)) { + m->m_pkthdr.comp_gencnt = 0; + } else { + if (TSTMP_LT(tp->t_comp_lastinc + tcp_ack_compression_rate, tcp_now)) { + tp->t_comp_gencnt++; + /* 0 means no compression, thus jump this */ + if (tp->t_comp_gencnt <= TCP_ACK_COMPRESSION_DUMMY) { + tp->t_comp_gencnt = TCP_ACK_COMPRESSION_DUMMY + 1; + } + tp->t_comp_lastinc = tcp_now; + } + m->m_pkthdr.comp_gencnt = tp->t_comp_gencnt; + } + if (isipv6) { ip6 = mtod(m, struct ip6_hdr *); th = (struct tcphdr *)(void *)(ip6 + 1); tcp_fillheaders(tp, ip6, th); if ((tp->ecn_flags & TE_SENDIPECT) != 0 && len && - !SEQ_LT(tp->snd_nxt, tp->snd_max) && !sack_rxmit) { + !SEQ_LT(tp->snd_nxt, tp->snd_max) && !sack_rxmit) { ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20); } svc_flags |= PKT_SCF_IPV6; @@ -2085,11 +2205,8 @@ send: m_pftag(m)->pftag_hdr = (void *)ip6; m_pftag(m)->pftag_flags |= PF_TAG_HDR_INET6; #endif /* PF_ECN */ - } else -#endif /* INET6 */ - { + } else { ip = mtod(m, struct ip *); - ipov = (struct ipovly *)ip; th = (struct tcphdr *)(void *)(ip + 1); /* this picks up the pseudo header (w/o the length) */ tcp_fillheaders(tp, ip, th); @@ -2110,8 +2227,9 @@ send: * If resending a FIN, be sure not to use a new sequence number. */ if ((flags & TH_FIN) && (tp->t_flags & TF_SENTFIN) && - tp->snd_nxt == tp->snd_max) + tp->snd_nxt == tp->snd_max) { tp->snd_nxt--; + } /* * If we are doing retransmissions, then snd_nxt will * not reflect the first unsent octet. For ACK only @@ -2129,7 +2247,7 @@ send: * retransmissions. */ if (sack_rxmit == 0) { - if (len || (flags & (TH_SYN|TH_FIN)) || + if (len || (flags & (TH_SYN | TH_FIN)) || tp->t_timer[TCPT_PERSIST]) { th->th_seq = htonl(tp->snd_nxt); if (len > 0) { @@ -2141,9 +2259,10 @@ send: tcp_rxtseg_insert(tp, tp->snd_nxt, (tp->snd_nxt + len - 1)); } - if (len > 0) + if (len > 0) { m->m_pkthdr.pkt_flags |= PKTF_TCP_REXMT; + } } } else { th->th_seq = htonl(tp->snd_max); @@ -2163,10 +2282,11 @@ send: tp->last_ack_sent = tp->rcv_nxt; if (optlen) { bcopy(opt, th + 1, optlen); - th->th_off = (sizeof (struct tcphdr) + optlen) >> 2; + th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; } th->th_flags = flags; - th->th_win = htons((u_short) (recwin>>tp->rcv_scale)); + th->th_win = htons((u_short) (recwin >> tp->rcv_scale)); + tp->t_last_recwin = recwin; if (!(so->so_flags & SOF_MP_SUBFLOW)) { if (recwin > 0 && SEQ_LT(tp->rcv_adv, tp->rcv_nxt + recwin)) { tp->rcv_adv = tp->rcv_nxt + recwin; @@ -2177,8 +2297,8 @@ send: tp->rcv_adv = tp->rcv_nxt + recwin; } - if (recwin > 0 && SEQ_LT(mp_tp->mpt_rcvadv, (uint32_t)mp_tp->mpt_rcvnxt + recwin)) { - mp_tp->mpt_rcvadv = (uint32_t)mp_tp->mpt_rcvnxt + recwin; + if (recwin > 0 && MPTCP_SEQ_LT(mp_tp->mpt_rcvadv, mp_tp->mpt_rcvnxt + recwin)) { + mp_tp->mpt_rcvadv = mp_tp->mpt_rcvnxt + recwin; } } @@ -2190,10 +2310,11 @@ send: * to read more data then can be buffered prior to transmitting on * the connection. */ - if (th->th_win == 0) + if (th->th_win == 0) { tp->t_flags |= TF_RXWIN0SENT; - else + } else { tp->t_flags &= ~TF_RXWIN0SENT; + } if (SEQ_GT(tp->snd_up, tp->snd_nxt)) { th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt)); @@ -2205,7 +2326,7 @@ send: * so that it doesn't drift into the send window on sequence * number wraparound. */ - tp->snd_up = tp->snd_una; /* drag it along */ + tp->snd_up = tp->snd_una; /* drag it along */ } /* @@ -2221,10 +2342,10 @@ send: */ if (tp->t_state != TCPS_ESTABLISHED && (tp->t_state == TCPS_CLOSING || tp->t_state == TCPS_TIME_WAIT - || tp->t_state == TCPS_LAST_ACK || (th->th_flags & TH_RST))) + || tp->t_state == TCPS_LAST_ACK || (th->th_flags & TH_RST))) { m->m_pkthdr.pkt_flags |= PKTF_LAST_PKT; + } -#if INET6 if (isipv6) { /* * ip6_plen is not need to be filled now, and will be filled @@ -2232,18 +2353,17 @@ send: */ m->m_pkthdr.csum_flags = CSUM_TCPIPV6; m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); - if (len + optlen) + if (len + optlen) { th->th_sum = in_addword(th->th_sum, - htons((u_short)(optlen + len))); - } - else -#endif /* INET6 */ - { + htons((u_short)(optlen + len))); + } + } else { m->m_pkthdr.csum_flags = CSUM_TCP; m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); - if (len + optlen) + if (len + optlen) { th->th_sum = in_addword(th->th_sum, - htons((u_short)(optlen + len))); + htons((u_short)(optlen + len))); + } } /* @@ -2251,12 +2371,11 @@ send: * The TCP pseudo header checksum is always provided. */ if (tso) { -#if INET6 - if (isipv6) + if (isipv6) { m->m_pkthdr.csum_flags |= CSUM_TSO_IPV6; - else -#endif /* INET6 */ + } else { m->m_pkthdr.csum_flags |= CSUM_TSO_IPV4; + } m->m_pkthdr.tso_segsz = tp->t_maxopd - optlen; } else { @@ -2274,17 +2393,19 @@ send: /* * Advance snd_nxt over sequence space of this segment. */ - if (flags & (TH_SYN|TH_FIN)) { - if (flags & TH_SYN) + if (flags & (TH_SYN | TH_FIN)) { + if (flags & TH_SYN) { tp->snd_nxt++; + } if ((flags & TH_FIN) && - !(tp->t_flags & TF_SENTFIN)) { + !(tp->t_flags & TF_SENTFIN)) { tp->snd_nxt++; tp->t_flags |= TF_SENTFIN; } } - if (sack_rxmit) + if (sack_rxmit) { goto timer; + } if (sack_rescue_rxt == TRUE) { tp->snd_nxt = old_snd_nxt; sack_rescue_rxt = FALSE; @@ -2316,14 +2437,14 @@ send: timer: if (tp->t_timer[TCPT_REXMT] == 0 && ((sack_rxmit && tp->snd_nxt != tp->snd_max) || - tp->snd_nxt != tp->snd_una || (flags & TH_FIN))) { + tp->snd_nxt != tp->snd_una || (flags & TH_FIN))) { if (tp->t_timer[TCPT_PERSIST]) { tp->t_timer[TCPT_PERSIST] = 0; tp->t_persist_stop = 0; TCP_RESET_REXMT_STATE(tp); } tp->t_timer[TCPT_REXMT] = - OFFSET_FROM_START(tp, tp->t_rxtcur); + OFFSET_FROM_START(tp, tp->t_rxtcur); } /* @@ -2338,27 +2459,39 @@ timer: tp->snd_nxt == tp->snd_max && SEQ_GT(tp->snd_nxt, tp->snd_una) && tp->t_rxtshift == 0 && - (tp->t_flagsext & (TF_SENT_TLPROBE|TF_PKTS_REORDERED)) == 0) { - u_int32_t pto, srtt; - - /* - * Using SRTT alone to set PTO can cause spurious - * retransmissions on wireless networks where there - * is a lot of variance in RTT. Taking variance - * into account will avoid this. - */ - srtt = tp->t_srtt >> TCP_RTT_SHIFT; - pto = ((TCP_REXMTVAL(tp)) * 3) >> 1; - pto = max (2 * srtt, pto); - if ((tp->snd_max - tp->snd_una) == tp->t_maxseg) - pto = max(pto, - (((3 * pto) >> 2) + tcp_delack * 2)); - else - pto = max(10, pto); + (tp->t_flagsext & (TF_SENT_TLPROBE | TF_PKTS_REORDERED)) == 0) { + uint32_t pto, srtt; + + if (tcp_do_better_lr) { + srtt = tp->t_srtt >> TCP_RTT_SHIFT; + pto = 2 * srtt; + if ((tp->snd_max - tp->snd_una) <= tp->t_maxseg) { + pto += tcp_delack; + } else { + pto += 2; + } + } else { + /* + * Using SRTT alone to set PTO can cause spurious + * retransmissions on wireless networks where there + * is a lot of variance in RTT. Taking variance + * into account will avoid this. + */ + srtt = tp->t_srtt >> TCP_RTT_SHIFT; + pto = ((TCP_REXMTVAL(tp)) * 3) >> 1; + pto = max(2 * srtt, pto); + if ((tp->snd_max - tp->snd_una) == tp->t_maxseg) { + pto = max(pto, + (((3 * pto) >> 2) + tcp_delack * 2)); + } else { + pto = max(10, pto); + } + } /* if RTO is less than PTO, choose RTO instead */ - if (tp->t_rxtcur < pto) + if (tp->t_rxtcur < pto) { pto = tp->t_rxtcur; + } tp->t_timer[TCPT_PTO] = OFFSET_FROM_START(tp, pto); } @@ -2368,10 +2501,11 @@ timer: * persist mode (no window) we do not update snd_nxt. */ int xlen = len; - if (flags & TH_SYN) + if (flags & TH_SYN) { ++xlen; + } if ((flags & TH_FIN) && - !(tp->t_flags & TF_SENTFIN)) { + !(tp->t_flags & TF_SENTFIN)) { ++xlen; tp->t_flags |= TF_SENTFIN; } @@ -2385,8 +2519,9 @@ timer: /* * Trace. */ - if (so_options & SO_DEBUG) + if (so_options & SO_DEBUG) { tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0); + } #endif /* @@ -2395,7 +2530,6 @@ timer: * to handle ttl and tos; we could keep them in * the template, but need a way to checksum without them. */ -#if INET6 /* * m->m_pkthdr.len should have been set before cksum calcuration, * because in6_cksum() need it. @@ -2415,18 +2549,17 @@ timer: ((inp->inp_fport << 16) | inp->inp_lport), (((inp->in6p_laddr.s6_addr16[0] & 0xffff) << 16) | (inp->in6p_faddr.s6_addr16[0] & 0xffff)), - sendalot,0,0); - } else -#endif /* INET6 */ - { - ip->ip_len = m->m_pkthdr.len; - ip->ip_ttl = inp->inp_ip_ttl; /* XXX */ + sendalot, 0, 0); + } else { + ASSERT(m->m_pkthdr.len <= IP_MAXPACKET); + ip->ip_len = (u_short)m->m_pkthdr.len; + ip->ip_ttl = inp->inp_ip_ttl; /* XXX */ ip->ip_tos |= (inp->inp_ip_tos & ~IPTOS_ECN_MASK);/* XXX */ - KERNEL_DEBUG(DBG_LAYER_BEG, - ((inp->inp_fport << 16) | inp->inp_lport), - (((inp->inp_laddr.s_addr & 0xffff) << 16) | - (inp->inp_faddr.s_addr & 0xffff)), 0,0,0); - } + KERNEL_DEBUG(DBG_LAYER_BEG, + ((inp->inp_fport << 16) | inp->inp_lport), + (((inp->inp_laddr.s_addr & 0xffff) << 16) | + (inp->inp_faddr.s_addr & 0xffff)), 0, 0, 0); + } /* * See if we should do MTU discovery. @@ -2437,35 +2570,36 @@ timer: * 4) the MTU is not locked (if it is, then discovery has been * disabled for that route) */ -#if INET6 - if (!isipv6) -#endif /* INET6 */ - if (path_mtu_discovery && (tp->t_flags & TF_PMTUD)) + if (!isipv6) { + if (path_mtu_discovery && (tp->t_flags & TF_PMTUD)) { ip->ip_off |= IP_DF; + } + } #if NECP { necp_kernel_policy_id policy_id; necp_kernel_policy_id skip_policy_id; u_int32_t route_rule_id; - if (!necp_socket_is_allowed_to_send_recv(inp, NULL, &policy_id, &route_rule_id, &skip_policy_id)) { + u_int32_t pass_flags; + if (!necp_socket_is_allowed_to_send_recv(inp, NULL, 0, &policy_id, &route_rule_id, &skip_policy_id, &pass_flags)) { TCP_LOG_DROP_NECP(isipv6 ? (void *)ip6 : (void *)ip, th, tp, true); m_freem(m); error = EHOSTUNREACH; goto out; } - necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id, skip_policy_id); + necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id, skip_policy_id, pass_flags); if (net_qos_policy_restricted != 0) { - necp_socket_update_qos_marking(inp, inp->inp_route.ro_rt, - NULL, route_rule_id); + necp_socket_update_qos_marking(inp, inp->inp_route.ro_rt, route_rule_id); } } #endif /* NECP */ #if IPSEC - if (inp->inp_sp != NULL) + if (inp->inp_sp != NULL) { ipsec_setsocket(m, so); + } #endif /*IPSEC*/ /* @@ -2482,10 +2616,11 @@ timer: m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC | PKTF_FLOW_ADV); m->m_pkthdr.pkt_proto = IPPROTO_TCP; m->m_pkthdr.tx_tcp_pid = so->last_pid; - if (so->so_flags & SOF_DELEGATED) + if (so->so_flags & SOF_DELEGATED) { m->m_pkthdr.tx_tcp_e_pid = so->e_pid; - else + } else { m->m_pkthdr.tx_tcp_e_pid = 0; + } m->m_nextpkt = NULL; @@ -2498,15 +2633,15 @@ timer: * 3. Only ACK flag is set. * 4. there is no outstanding data on this connection. */ - if (tcp_prioritize_acks != 0 && len == 0 && - (inp->inp_last_outifp->if_eflags & - (IFEF_TXSTART | IFEF_NOACKPRI)) == IFEF_TXSTART) { + if (len == 0 && (inp->inp_last_outifp->if_eflags & (IFEF_TXSTART | IFEF_NOACKPRI)) == IFEF_TXSTART) { if (th->th_flags == TH_ACK && tp->snd_una == tp->snd_max && - tp->t_timer[TCPT_REXMT] == 0) + tp->t_timer[TCPT_REXMT] == 0) { svc_flags |= PKT_SCF_TCP_ACK; - if (th->th_flags & TH_SYN) + } + if (th->th_flags & TH_SYN) { svc_flags |= PKT_SCF_TCP_SYN; + } } set_packet_service_class(m, so, sotc, svc_flags); } else { @@ -2524,16 +2659,13 @@ timer: tp->t_pktlist_sentlen += len; tp->t_lastchain++; -#if INET6 if (isipv6) { DTRACE_TCP5(send, struct mbuf *, m, struct inpcb *, inp, - struct ip6 *, ip6, struct tcpcb *, tp, struct tcphdr *, - th); - } else -#endif /* INET6 */ - { + struct ip6 *, ip6, struct tcpcb *, tp, struct tcphdr *, + th); + } else { DTRACE_TCP5(send, struct mbuf *, m, struct inpcb *, inp, - struct ip *, ip, struct tcpcb *, tp, struct tcphdr *, th); + struct ip *, ip, struct tcpcb *, tp, struct tcphdr *, th); } if (tp->t_pktlist_head != NULL) { @@ -2544,30 +2676,6 @@ timer: tp->t_pktlist_head = tp->t_pktlist_tail = m; } - if (lro_ackmore && !sackoptlen && tp->t_timer[TCPT_PERSIST] == 0 && - (th->th_flags & TH_ACK) == TH_ACK && len == 0 && - tp->t_state == TCPS_ESTABLISHED) { - /* For a pure ACK, see if you need to send more of them */ - mnext = tcp_send_lroacks(tp, m, th); - if (mnext) { - tp->t_pktlist_tail->m_nextpkt = mnext; - if (mnext->m_nextpkt == NULL) { - tp->t_pktlist_tail = mnext; - tp->t_lastchain++; - } else { - struct mbuf *tail, *next; - next = mnext->m_nextpkt; - tail = next->m_nextpkt; - while (tail) { - next = tail; - tail = tail->m_nextpkt; - tp->t_lastchain++; - } - tp->t_pktlist_tail = next; - } - } - } - if (sendalot == 0 || (tp->t_state != TCPS_ESTABLISHED) || (tp->snd_cwnd <= (tp->snd_wnd / 8)) || (tp->t_flags & TF_ACKNOW) || @@ -2575,7 +2683,7 @@ timer: tp->t_lastchain >= tcp_packet_chaining) { error = 0; while (inp->inp_sndinprog_cnt == 0 && - tp->t_pktlist_head != NULL) { + tp->t_pktlist_head != NULL) { packetlist = tp->t_pktlist_head; packchain_listadd = tp->t_lastchain; packchain_sent++; @@ -2601,7 +2709,7 @@ timer: } /* tcp was closed while we were in ip; resume close */ if (inp->inp_sndinprog_cnt == 0 && - (tp->t_flags & TF_CLOSING)) { + (tp->t_flags & TF_CLOSING)) { tp->t_flags &= ~TF_CLOSING; (void) tcp_close(tp); return 0; @@ -2632,23 +2740,36 @@ timer: if (SEQ_GT((p->rxmit - lost), tp->snd_una)) { p->rxmit -= lost; + + if (SEQ_LT(p->rxmit, p->start)) { + p->rxmit = p->start; + } } else { lost = p->rxmit - tp->snd_una; p->rxmit = tp->snd_una; + + if (SEQ_LT(p->rxmit, p->start)) { + p->rxmit = p->start; + } } tp->sackhint.sack_bytes_rexmit -= lost; + if (tp->sackhint.sack_bytes_rexmit < 0) { + tp->sackhint.sack_bytes_rexmit = 0; + } } else { if (SEQ_GT((tp->snd_nxt - lost), - tp->snd_una)) + tp->snd_una)) { tp->snd_nxt -= lost; - else + } else { tp->snd_nxt = tp->snd_una; + } } } } out: - if (tp->t_pktlist_head != NULL) + if (tp->t_pktlist_head != NULL) { m_freem_list(tp->t_pktlist_head); + } TCP_PKTLIST_CLEAR(tp); if (error == ENOBUFS) { @@ -2660,13 +2781,14 @@ out: if (tp->t_timer[TCPT_REXMT] == 0 && tp->t_timer[TCPT_PERSIST] == 0 && (len != 0 || (flags & (TH_SYN | TH_FIN)) != 0 || - so->so_snd.sb_cc > 0)) + so->so_snd.sb_cc > 0)) { tp->t_timer[TCPT_REXMT] = - OFFSET_FROM_START(tp, tp->t_rxtcur); + OFFSET_FROM_START(tp, tp->t_rxtcur); + } tp->snd_cwnd = tp->t_maxseg; tp->t_bytes_acked = 0; tcp_check_timer_state(tp); - KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); tcp_ccdbg_trace(tp, NULL, TCP_CC_OUTPUT_ERROR); return 0; @@ -2684,35 +2806,37 @@ out: * immediatly retry with MSS sized segments generated * by this function. */ - if (tso) + if (tso) { tp->t_flags &= ~TF_TSO; + } tcp_mtudisc(inp, 0); tcp_check_timer_state(tp); - KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); return 0; } /* * Unless this is due to interface restriction policy, - * treat EHOSTUNREACH/ENETDOWN as a soft error. + * treat EHOSTUNREACH/ENETDOWN/EADDRNOTAVAIL as a soft error. */ - if ((error == EHOSTUNREACH || error == ENETDOWN) && + if ((error == EHOSTUNREACH || error == ENETDOWN || error == EADDRNOTAVAIL) && TCPS_HAVERCVDSYN(tp->t_state) && !inp_restricted_send(inp, inp->inp_last_outifp)) { - tp->t_softerror = error; - error = 0; + tp->t_softerror = error; + error = 0; } tcp_check_timer_state(tp); - KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); return error; } tcpstat.tcps_sndtotal++; - KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END,0,0,0,0,0); - if (sendalot) + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); + if (sendalot) { goto again; + } tcp_check_timer_state(tp); @@ -2731,13 +2855,13 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, struct ip_out_args ipoa; struct route ro; struct ifnet *outif = NULL; + bool check_qos_marking_again = (so->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE) ? FALSE : TRUE; bzero(&ipoa, sizeof(ipoa)); ipoa.ipoa_boundif = IFSCOPE_NONE; ipoa.ipoa_flags = IPOAF_SELECT_SRCIF | IPOAF_BOUND_SRCADDR; ipoa.ipoa_sotc = SO_TC_UNSPEC; ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC; -#if INET6 struct ip6_out_args ip6oa; struct route_in6 ro6; @@ -2749,93 +2873,84 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, struct flowadv *adv = (isipv6 ? &ip6oa.ip6oa_flowadv : &ipoa.ipoa_flowadv); -#else /* INET6 */ - struct flowadv *adv = &ipoa.ipoa_flowadv; -#endif /* !INET6 */ /* If socket was bound to an ifindex, tell ip_output about it */ if (inp->inp_flags & INP_BOUND_IF) { -#if INET6 if (isipv6) { ip6oa.ip6oa_boundif = inp->inp_boundifp->if_index; ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF; - } else -#endif /* INET6 */ - { + } else { ipoa.ipoa_boundif = inp->inp_boundifp->if_index; ipoa.ipoa_flags |= IPOAF_BOUND_IF; } } if (INP_NO_CELLULAR(inp)) { -#if INET6 - if (isipv6) + if (isipv6) { ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR; - else -#endif /* INET6 */ + } else { ipoa.ipoa_flags |= IPOAF_NO_CELLULAR; + } } if (INP_NO_EXPENSIVE(inp)) { -#if INET6 - if (isipv6) + if (isipv6) { ip6oa.ip6oa_flags |= IP6OAF_NO_EXPENSIVE; - else -#endif /* INET6 */ + } else { ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE; - + } } if (INP_NO_CONSTRAINED(inp)) { -#if INET6 - if (isipv6) + if (isipv6) { ip6oa.ip6oa_flags |= IP6OAF_NO_CONSTRAINED; - else -#endif /* INET6 */ + } else { ipoa.ipoa_flags |= IPOAF_NO_CONSTRAINED; + } } if (INP_AWDL_UNRESTRICTED(inp)) { -#if INET6 - if (isipv6) + if (isipv6) { ip6oa.ip6oa_flags |= IP6OAF_AWDL_UNRESTRICTED; - else -#endif /* INET6 */ + } else { ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED; - + } } -#if INET6 if (INP_INTCOPROC_ALLOWED(inp) && isipv6) { ip6oa.ip6oa_flags |= IP6OAF_INTCOPROC_ALLOWED; } if (isipv6) { ip6oa.ip6oa_sotc = so->so_traffic_class; ip6oa.ip6oa_netsvctype = so->so_netsvctype; - } else -#endif /* INET6 */ - { + ip6oa.qos_marking_gencount = inp->inp_policyresult.results.qos_marking_gencount; + } else { ipoa.ipoa_sotc = so->so_traffic_class; ipoa.ipoa_netsvctype = so->so_netsvctype; + ipoa.qos_marking_gencount = inp->inp_policyresult.results.qos_marking_gencount; } if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) { -#if INET6 - if (isipv6) + if (isipv6) { ip6oa.ip6oa_flags |= IP6OAF_QOSMARKING_ALLOWED; - else -#endif /* INET6 */ + } else { ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED; + } } -#if INET6 - if (isipv6) + if (check_qos_marking_again) { + if (isipv6) { + ip6oa.ip6oa_flags |= IP6OAF_REDO_QOSMARKING_POLICY; + } else { + ipoa.ipoa_flags |= IPOAF_REDO_QOSMARKING_POLICY; + } + } + if (isipv6) { flags |= IPV6_OUTARGS; - else -#endif /* INET6 */ + } else { flags |= IP_OUTARGS; + } /* Copy the cached route and take an extra reference */ -#if INET6 - if (isipv6) + if (isipv6) { in6p_route_copyout(inp, &ro6); - else -#endif /* INET6 */ + } else { inp_route_copyout(inp, &ro); + } /* * Make sure ACK/DELACK conditions are cleared before @@ -2861,7 +2976,6 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, if (tcp_output_unlocked && !so->so_upcallusecount && (tp->t_state == TCPS_ESTABLISHED) && (sack_in_progress == 0) && !IN_FASTRECOVERY(tp) && !(so->so_flags & SOF_MP_SUBFLOW)) { - unlocked = TRUE; socket_unlock(so, 0); } @@ -2875,12 +2989,9 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, chain = tcp_packet_chaining > 1 #if IPSEC - && ipsec_bypass -#endif -#if IPFIREWALL - && (fw_enable == 0 || fw_bypass) + && ipsec_bypass #endif - ; // I'm important, not extraneous + ; // I'm important, not extraneous while (pkt != NULL) { struct mbuf *npkt = pkt->m_nextpkt; @@ -2900,14 +3011,12 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, */ cnt = 0; } -#if INET6 if (isipv6) { error = ip6_output_list(pkt, cnt, inp->in6p_outputopts, &ro6, flags, NULL, NULL, &ip6oa); ifdenied = (ip6oa.ip6oa_retflags & IP6OARF_IFDENIED); } else { -#endif /* INET6 */ error = ip_output_list(pkt, cnt, opt, &ro, flags, NULL, &ipoa); ifdenied = (ipoa.ipoa_retflags & IPOARF_IFDENIED); @@ -2919,15 +3028,17 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, * the callee had taken care of everything; else * we need to free the rest of the chain ourselves. */ - if (!chain) + if (!chain) { m_freem_list(npkt); + } break; } pkt = npkt; } - if (unlocked) + if (unlocked) { socket_lock(so, 0); + } /* * Enter flow controlled state if the connection is established @@ -2945,10 +3056,11 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, int rc; rc = inp_set_fc_state(inp, adv->code); - if (rc == 1) + if (rc == 1) { tcp_ccdbg_trace(tp, NULL, ((adv->code == FADV_FLOW_CONTROLLED) ? TCP_CC_FLOW_CONTROL : TCP_CC_SUSPEND)); + } } /* @@ -2956,18 +3068,18 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, * packets are dropped. Return ENOBUFS, to update the * pcb state. */ - if (adv->code == FADV_SUSPENDED) + if (adv->code == FADV_SUSPENDED) { error = ENOBUFS; + } VERIFY(inp->inp_sndinprog_cnt > 0); - if ( --inp->inp_sndinprog_cnt == 0) { + if (--inp->inp_sndinprog_cnt == 0) { inp->inp_flags &= ~(INP_FC_FEEDBACK); if (inp->inp_sndingprog_waiters > 0) { wakeup(&inp->inp_sndinprog_cnt); } } -#if INET6 if (isipv6) { /* * When an NECP IP tunnel policy forces the outbound interface, @@ -2979,10 +3091,28 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, } else if (ro6.ro_rt != NULL) { outif = ro6.ro_rt->rt_ifp; } - } else -#endif /* INET6 */ - if (ro.ro_rt != NULL) + } else { + if (ro.ro_rt != NULL) { outif = ro.ro_rt->rt_ifp; + } + } + if (check_qos_marking_again) { + uint32_t qos_marking_gencount; + bool allow_qos_marking; + if (isipv6) { + qos_marking_gencount = ip6oa.qos_marking_gencount; + allow_qos_marking = ip6oa.ip6oa_flags & IP6OAF_QOSMARKING_ALLOWED ? TRUE : FALSE; + } else { + qos_marking_gencount = ipoa.qos_marking_gencount; + allow_qos_marking = ipoa.ipoa_flags & IPOAF_QOSMARKING_ALLOWED ? TRUE : FALSE; + } + inp->inp_policyresult.results.qos_marking_gencount = qos_marking_gencount; + if (allow_qos_marking == TRUE) { + inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED; + } else { + inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED; + } + } if (outif != NULL && outif != inp->inp_last_outifp) { /* Update the send byte count */ @@ -2992,24 +3122,23 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, so->so_snd.sb_flags &= ~SB_SNDBYTE_CNT; } inp->inp_last_outifp = outif; - } if (error != 0 && ifdenied && - (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp) || INP_NO_CONSTRAINED(inp))) + (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp) || INP_NO_CONSTRAINED(inp))) { soevent(so, - (SO_FILT_HINT_LOCKED|SO_FILT_HINT_IFDENIED)); + (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED)); + } /* Synchronize cached PCB route & options */ -#if INET6 - if (isipv6) + if (isipv6) { in6p_route_copyin(inp, &ro6); - else -#endif /* INET6 */ + } else { inp_route_copyin(inp, &ro); + } if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift == 0 && - tp->t_inpcb->inp_route.ro_rt != NULL) { + tp->t_inpcb->inp_route.ro_rt != NULL) { /* If we found the route and there is an rtt on it * reset the retransmit timer */ @@ -3046,111 +3175,13 @@ tcp_setpersist(struct tcpcb *tp) tcptv_persmin_val, TCPTV_PERSMAX, 0); tp->t_timer[TCPT_PERSIST] = OFFSET_FROM_START(tp, tp->t_timer[TCPT_PERSIST]); - if (tp->t_rxtshift < TCP_MAXRXTSHIFT) + if (tp->t_rxtshift < TCP_MAXRXTSHIFT) { tp->t_rxtshift++; -} - -/* - * Send as many acks as data coalesced. Every other packet when stretch - * ACK is not enabled. Every 8 packets, if stretch ACK is enabled. - */ -static struct mbuf* -tcp_send_lroacks(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th) -{ - struct mbuf *mnext = NULL, *ack_chain = NULL, *tail = NULL; - int count = 0; - tcp_seq org_ack = ntohl(th->th_ack); - tcp_seq prev_ack = 0; - int tack_offset = 28; /* IPv6 and IP options not supported */ - int twin_offset = 34; /* IPv6 and IP options not supported */ - int ack_size = (tp->t_flags & TF_STRETCHACK) ? - (maxseg_unacked * tp->t_maxseg) : (tp->t_maxseg << 1); - int segs_acked = (tp->t_flags & TF_STRETCHACK) ? maxseg_unacked : 2; - struct mbuf *prev_ack_pkt = NULL; - struct socket *so = tp->t_inpcb->inp_socket; - unsigned short winsz = ntohs(th->th_win); - unsigned int scaled_win = winsz<rcv_scale; - tcp_seq win_rtedge = org_ack + scaled_win; - - count = tp->t_lropktlen/tp->t_maxseg; - - prev_ack = (org_ack - tp->t_lropktlen) + ack_size; - if (prev_ack < org_ack) { - ack_chain = m_dup(m, M_DONTWAIT); - if (ack_chain) { - th->th_ack = htonl(prev_ack); - /* Keep adv window constant for duplicated ACK packets */ - scaled_win = win_rtedge - prev_ack; - if (scaled_win > (int32_t)(TCP_MAXWIN << tp->rcv_scale)) - scaled_win = (int32_t)(TCP_MAXWIN << tp->rcv_scale); - th->th_win = htons(scaled_win>>tp->rcv_scale); - if (lrodebug == 5) { - printf("%s: win = %d winsz = %d sc = %d" - " lro_len %d %d\n", - __func__, scaled_win>>tp->rcv_scale, winsz, - tp->rcv_scale, tp->t_lropktlen, count); - } - tail = ack_chain; - count -= segs_acked; /* accounts for prev_ack packet */ - count = (count <= segs_acked) ? 0 : count - segs_acked; - tcpstat.tcps_sndacks++; - so_tc_update_stats(m, so, m_get_service_class(m)); - } else { - return NULL; - } - } - else { - tp->t_lropktlen = 0; - return NULL; } - - prev_ack_pkt = ack_chain; - - while (count > 0) { - if ((prev_ack + ack_size) < org_ack) { - prev_ack += ack_size; - } else { - /* - * The last ACK sent must have the ACK number that TCP - * thinks is the last sent ACK number. - */ - prev_ack = org_ack; - } - mnext = m_dup(prev_ack_pkt, M_DONTWAIT); - if (mnext) { - /* Keep adv window constant for duplicated ACK packets */ - scaled_win = win_rtedge - prev_ack; - if (scaled_win > (int32_t)(TCP_MAXWIN << tp->rcv_scale)) - scaled_win = (int32_t)(TCP_MAXWIN << tp->rcv_scale); - winsz = htons(scaled_win>>tp->rcv_scale); - if (lrodebug == 5) { - printf("%s: winsz = %d ack %x count %d\n", - __func__, scaled_win>>tp->rcv_scale, - prev_ack, count); - } - bcopy(&winsz, mtod(prev_ack_pkt, caddr_t) + twin_offset, 2); - HTONL(prev_ack); - bcopy(&prev_ack, mtod(prev_ack_pkt, caddr_t) + tack_offset, 4); - NTOHL(prev_ack); - tail->m_nextpkt = mnext; - tail = mnext; - count -= segs_acked; - tcpstat.tcps_sndacks++; - so_tc_update_stats(m, so, m_get_service_class(m)); - } else { - if (lrodebug == 5) { - printf("%s: failed to alloc mbuf.\n", __func__); - } - break; - } - prev_ack_pkt = mnext; - } - tp->t_lropktlen = 0; - return ack_chain; } static int -tcp_recv_throttle (struct tcpcb *tp) +tcp_recv_throttle(struct tcpcb *tp) { uint32_t base_rtt, newsize; struct sockbuf *sbrcv = &tp->t_inpcb->inp_socket->so_rcv; @@ -3166,8 +3197,9 @@ tcp_recv_throttle (struct tcpcb *tp) * If the connection is already being throttled, leave it * in that state until rtt comes closer to base rtt */ - if (tp->t_flagsext & TF_RECV_THROTTLE) + if (tp->t_flagsext & TF_RECV_THROTTLE) { return 1; + } base_rtt = get_base_rtt(tp); @@ -3178,8 +3210,9 @@ tcp_recv_throttle (struct tcpcb *tp) */ if (tp->t_rttcur > (base_rtt + target_qdelay)) { tp->t_flagsext |= TF_RECV_THROTTLE; - if (tp->t_recv_throttle_ts == 0) + if (tp->t_recv_throttle_ts == 0) { tp->t_recv_throttle_ts = tcp_now; + } /* * Reduce the recv socket buffer size to * minimize latecy. @@ -3204,8 +3237,9 @@ tcp_recv_throttle (struct tcpcb *tp) * Timestamps are not supported or there is no good RTT * measurement. Use IPDV in this case. */ - if (tp->acc_iaj > tcp_acc_iaj_react_limit) + if (tp->acc_iaj > tcp_acc_iaj_react_limit) { return 1; + } return 0; }