X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/4b17d6b6e417f714551ec129064745ea9919780e..HEAD:/bsd/netinet/tcp_ledbat.c diff --git a/bsd/netinet/tcp_ledbat.c b/bsd/netinet/tcp_ledbat.c index d13dc50bf..f345934a7 100644 --- a/bsd/netinet/tcp_ledbat.c +++ b/bsd/netinet/tcp_ledbat.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2011 Apple Inc. All rights reserved. + * Copyright (c) 2010-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -37,9 +37,7 @@ #include #include -#if INET6 #include -#endif #include #include #include @@ -58,13 +56,13 @@ int tcp_ledbat_init(struct tcpcb *tp); int tcp_ledbat_cleanup(struct tcpcb *tp); void tcp_ledbat_cwnd_init(struct tcpcb *tp); -void tcp_ledbat_inseq_ack_rcvd(struct tcpcb *tp, struct tcphdr *th); +void tcp_ledbat_congestion_avd(struct tcpcb *tp, struct tcphdr *th); void tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th); void tcp_ledbat_pre_fr(struct tcpcb *tp); void tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th); void tcp_ledbat_after_idle(struct tcpcb *tp); void tcp_ledbat_after_timeout(struct tcpcb *tp); -int tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th); +static int tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th); void tcp_ledbat_switch_cc(struct tcpcb *tp, uint16_t old_cc_index); struct tcp_cc_algo tcp_cc_ledbat = { @@ -72,7 +70,7 @@ struct tcp_cc_algo tcp_cc_ledbat = { .init = tcp_ledbat_init, .cleanup = tcp_ledbat_cleanup, .cwnd_init = tcp_ledbat_cwnd_init, - .inseq_ack_rcvd = tcp_ledbat_inseq_ack_rcvd, + .congestion_avd = tcp_ledbat_congestion_avd, .ack_rcvd = tcp_ledbat_ack_rcvd, .pre_fr = tcp_ledbat_pre_fr, .post_fr = tcp_ledbat_post_fr, @@ -82,108 +80,102 @@ struct tcp_cc_algo tcp_cc_ledbat = { .switch_to = tcp_ledbat_switch_cc }; -extern int tcp_do_rfc3465; -extern int tcp_do_rfc3465_lim2; -extern uint32_t get_base_rtt(struct tcpcb *tp); - -/* Target queuing delay in milliseconds. This includes the processing - * and scheduling delay on both of the end-hosts. A LEDBAT sender tries +/* Target queuing delay in milliseconds. This includes the processing + * and scheduling delay on both of the end-hosts. A LEDBAT sender tries * to keep queuing delay below this limit. When the queuing delay - * goes above this limit, a LEDBAT sender will start reducing the + * goes above this limit, a LEDBAT sender will start reducing the * congestion window. * - * The LEDBAT draft says that target queue delay MUST be 100 ms for + * The LEDBAT draft says that target queue delay MUST be 100 ms for * inter-operability. */ -int target_qdelay = 100; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, bg_target_qdelay, CTLFLAG_RW | CTLFLAG_LOCKED, - &target_qdelay , 100, "Target queuing delay"); +SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_target_qdelay, CTLFLAG_RW | CTLFLAG_LOCKED, + int, target_qdelay, 100, "Target queuing delay"); /* Allowed increase and tether are used to place an upper bound on * congestion window based on the amount of data that is outstanding. - * This will limit the congestion window when the amount of data in + * This will limit the congestion window when the amount of data in * flight is little because the application is writing to the socket - * intermittently and is preventing the connection from becoming idle . + * intermittently and is preventing the connection from becoming idle . * * max_allowed_cwnd = allowed_increase + (tether * flight_size) * cwnd = min(cwnd, max_allowed_cwnd) * * 'Allowed_increase' parameter is set to 8. If the flight size is zero, then * we want the congestion window to be at least 8 packets to reduce the - * delay induced by delayed ack. This helps when the receiver is acking + * delay induced by delayed ack. This helps when the receiver is acking * more than 2 packets at a time (stretching acks for better performance). - * + * * 'Tether' is also set to 2. We do not want this to limit the growth of cwnd * during slow-start. - */ -int allowed_increase = 8; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, bg_allowed_increase, CTLFLAG_RW | CTLFLAG_LOCKED, - &allowed_increase, 1, "Additive constant used to calculate max allowed congestion window"); + */ +SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_allowed_increase, CTLFLAG_RW | CTLFLAG_LOCKED, + int, allowed_increase, 8, + "Additive constant used to calculate max allowed congestion window"); /* Left shift for cwnd to get tether value of 2 */ -int tether_shift = 1; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, bg_tether_shift, CTLFLAG_RW | CTLFLAG_LOCKED, - &tether_shift, 1, "Tether shift for max allowed congestion window"); +SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_tether_shift, CTLFLAG_RW | CTLFLAG_LOCKED, + int, tether_shift, 1, "Tether shift for max allowed congestion window"); -/* Start with an initial window of 2. This will help to get more accurate +/* Start with an initial window of 2. This will help to get more accurate * minimum RTT measurement in the beginning. It will help to probe * the path slowly and will not add to the existing delay if the path is * already congested. Using 2 packets will reduce the delay induced by delayed-ack. */ -uint32_t bg_ss_fltsz = 2; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, bg_ss_fltsz, CTLFLAG_RW | CTLFLAG_LOCKED, - &bg_ss_fltsz, 2, "Initial congestion window for background transport"); +SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_ss_fltsz, CTLFLAG_RW | CTLFLAG_LOCKED, + uint32_t, bg_ss_fltsz, 2, "Initial congestion window for background transport"); extern int rtt_samples_per_slot; -static void update_cwnd(struct tcpcb *tp, uint32_t incr) { +static void +update_cwnd(struct tcpcb *tp, uint32_t incr) +{ uint32_t max_allowed_cwnd = 0, flight_size = 0; - uint32_t qdelay, base_rtt; - int32_t off_target; + uint32_t base_rtt; base_rtt = get_base_rtt(tp); /* If we do not have a good RTT measurement yet, increment - * congestion window by the default value. + * congestion window by the default value. */ if (base_rtt == 0 || tp->t_rttcur == 0) { tp->snd_cwnd += incr; goto check_max; } - - qdelay = tp->t_rttcur - base_rtt; - off_target = (int32_t)(target_qdelay - qdelay); - if (off_target >= 0) { - /* Delay decreased or remained the same, we can increase + if (tp->t_rttcur <= (base_rtt + target_qdelay)) { + /* + * Delay decreased or remained the same, we can increase * the congestion window according to RFC 3465. * * Move background slow-start threshold to current * congestion window so that the next time (after some idle - * period), we can attempt to do slow-start till here if there + * period), we can attempt to do slow-start till here if there * is no increase in rtt */ - if (tp->bg_ssthresh < tp->snd_cwnd) + if (tp->bg_ssthresh < tp->snd_cwnd) { tp->bg_ssthresh = tp->snd_cwnd; - tp->snd_cwnd += incr; - + } + tp->snd_cwnd += incr; } else { - /* In response to an increase in rtt, reduce the congestion - * window by one-eighth. This will help to yield immediately + /* In response to an increase in rtt, reduce the congestion + * window by one-eighth. This will help to yield immediately * to a competing stream. */ uint32_t redwin; - redwin = tp->snd_cwnd >> 3; + redwin = tp->snd_cwnd >> 3; tp->snd_cwnd -= redwin; - if (tp->snd_cwnd < bg_ss_fltsz * tp->t_maxseg) + if (tp->snd_cwnd < bg_ss_fltsz * tp->t_maxseg) { tp->snd_cwnd = bg_ss_fltsz * tp->t_maxseg; + } - /* Lower background slow-start threshold so that the connection + /* Lower background slow-start threshold so that the connection * will go into congestion avoidance phase */ - if (tp->bg_ssthresh > tp->snd_cwnd) + if (tp->bg_ssthresh > tp->snd_cwnd) { tp->bg_ssthresh = tp->snd_cwnd; + } } check_max: /* Calculate the outstanding flight size and restrict the @@ -191,44 +183,50 @@ check_max: */ flight_size = tp->snd_max - tp->snd_una; - max_allowed_cwnd = (allowed_increase * tp->t_maxseg) - + (flight_size << tether_shift); + max_allowed_cwnd = (allowed_increase * tp->t_maxseg) + + (flight_size << tether_shift); tp->snd_cwnd = min(tp->snd_cwnd, max_allowed_cwnd); return; } -int tcp_ledbat_init(struct tcpcb *tp) { +int +tcp_ledbat_init(struct tcpcb *tp) +{ #pragma unused(tp) OSIncrementAtomic((volatile SInt32 *)&tcp_cc_ledbat.num_sockets); return 0; } -int tcp_ledbat_cleanup(struct tcpcb *tp) { +int +tcp_ledbat_cleanup(struct tcpcb *tp) +{ #pragma unused(tp) OSDecrementAtomic((volatile SInt32 *)&tcp_cc_ledbat.num_sockets); return 0; } -/* Initialize the congestion window for a connection - * +/* Initialize the congestion window for a connection + * */ void -tcp_ledbat_cwnd_init(struct tcpcb *tp) { +tcp_ledbat_cwnd_init(struct tcpcb *tp) +{ tp->snd_cwnd = tp->t_maxseg * bg_ss_fltsz; tp->bg_ssthresh = tp->snd_ssthresh; } -/* Function to handle an in-sequence ack which is fast-path processing - * of an in sequence ack in tcp_input function (called as header prediction). +/* Function to handle an in-sequence ack which is fast-path processing + * of an in sequence ack in tcp_input function (called as header prediction). * This gets called only during congestion avoidance phase. */ void -tcp_ledbat_inseq_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) { +tcp_ledbat_congestion_avd(struct tcpcb *tp, struct tcphdr *th) +{ int acked = 0; u_int32_t incr = 0; - acked = th->th_ack - tp->snd_una; + acked = BYTES_ACKED(th, tp); tp->t_bytes_acked += acked; if (tp->t_bytes_acked > tp->snd_cwnd) { tp->t_bytes_acked -= tp->snd_cwnd; @@ -242,7 +240,8 @@ tcp_ledbat_inseq_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) { /* Function to process an ack. */ void -tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) { +tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) +{ /* * RFC 3465 - Appropriate Byte Counting. * @@ -256,11 +255,11 @@ tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) { * greater than or equal to the congestion window. */ - register u_int cw = tp->snd_cwnd; - register u_int incr = tp->t_maxseg; - int acked = 0; + uint32_t cw = tp->snd_cwnd; + uint32_t incr = tp->t_maxseg; + uint32_t acked = 0; - acked = th->th_ack - tp->snd_una; + acked = BYTES_ACKED(th, tp); tp->t_bytes_acked += acked; if (cw >= tp->bg_ssthresh) { /* congestion-avoidance */ @@ -278,37 +277,46 @@ tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) { */ u_int abc_lim; - abc_lim = (tcp_do_rfc3465_lim2 && - tp->snd_nxt == tp->snd_max) ? incr * 2 : incr; + abc_lim = (tp->snd_nxt == tp->snd_max) ? incr * 2 : incr; - incr = lmin(acked, abc_lim); + incr = ulmin(acked, abc_lim); } - if (tp->t_bytes_acked >= cw) + if (tp->t_bytes_acked >= cw) { tp->t_bytes_acked -= cw; - if (incr > 0) + } + if (incr > 0) { update_cwnd(tp, incr); + } } void -tcp_ledbat_pre_fr(struct tcpcb *tp) { +tcp_ledbat_pre_fr(struct tcpcb *tp) +{ uint32_t win; - win = min(tp->snd_wnd, tp->snd_cwnd) / - 2 / tp->t_maxseg; - if ( win < 2 ) + win = min(tp->snd_wnd, tp->snd_cwnd) / + 2 / tp->t_maxseg; + if (win < 2) { win = 2; - tp->snd_ssthresh = win * tp->t_maxseg; - if (tp->bg_ssthresh > tp->snd_ssthresh) + } + tp->snd_ssthresh = win * tp->t_maxseg; + if (tp->bg_ssthresh > tp->snd_ssthresh) { tp->bg_ssthresh = tp->snd_ssthresh; + } tcp_cc_resize_sndbuf(tp); } void -tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th) { +tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th) +{ int32_t ss; - ss = tp->snd_max - th->th_ack; + if (th) { + ss = tp->snd_max - th->th_ack; + } else { + ss = tp->snd_max - tp->snd_una; + } /* * Complete ack. Inflate the congestion window to @@ -318,11 +326,16 @@ tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th) { * snd_ssthresh outstanding data. But in case we * would be inclined to send a burst, better to do * it via the slow start mechanism. + * + * If the flight size is zero, then make congestion + * window to be worth at least 2 segments to avoid + * delayed acknowledgement (draft-ietf-tcpm-rfc3782-bis-05). */ - if (ss < (int32_t)tp->snd_ssthresh) - tp->snd_cwnd = ss + tp->t_maxseg; - else + if (ss < (int32_t)tp->snd_ssthresh) { + tp->snd_cwnd = max(ss, tp->t_maxseg) + tp->t_maxseg; + } else { tp->snd_cwnd = tp->snd_ssthresh; + } tp->t_bytes_acked = 0; } @@ -332,35 +345,13 @@ tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th) { * Clear base history after idle time. */ void -tcp_ledbat_after_idle(struct tcpcb *tp) { - int32_t n = N_RTT_BASE, i = (N_RTT_BASE - 1); - - /* Decide how many base history entries have to be cleared - * based on how long the connection has been idle. - */ - - if (tp->t_rttcur > 0) { - int32_t nrtt, idle_time; - - idle_time = tcp_now - tp->t_rcvtime; - nrtt = idle_time / tp->t_rttcur; - n = nrtt / rtt_samples_per_slot; - if (n > N_RTT_BASE) - n = N_RTT_BASE; - } - for (i = (N_RTT_BASE - 1); n > 0; --i, --n) { - tp->rtt_hist[i] = 0; - } - for (n = (N_RTT_BASE - 1); i >= 0; --i, --n) { - tp->rtt_hist[n] = tp->rtt_hist[i]; - tp->rtt_hist[i] = 0; - } - +tcp_ledbat_after_idle(struct tcpcb *tp) +{ /* Reset the congestion window */ tp->snd_cwnd = tp->t_maxseg * bg_ss_fltsz; } -/* Function to change the congestion window when the retransmit +/* Function to change the congestion window when the retransmit * timer fires. The behavior is the same as that for best-effort * TCP, reduce congestion window to one segment and start probing * the link using "slow start". The slow start threshold is set @@ -368,19 +359,20 @@ tcp_ledbat_after_idle(struct tcpcb *tp) { * threshold also. */ void -tcp_ledbat_after_timeout(struct tcpcb *tp) { - if (tp->t_state >= TCPS_ESTABLISHED) { +tcp_ledbat_after_timeout(struct tcpcb *tp) +{ + if (tp->t_state >= TCPS_ESTABLISHED) { u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg; - if (win < 2) + if (win < 2) { win = 2; - tp->snd_cwnd = tp->t_maxseg; + } tp->snd_ssthresh = win * tp->t_maxseg; - tp->t_bytes_acked = 0; - tp->t_dupacks = 0; - if (tp->bg_ssthresh > tp->snd_ssthresh) + if (tp->bg_ssthresh > tp->snd_ssthresh) { tp->bg_ssthresh = tp->snd_ssthresh; + } + tp->snd_cwnd = tp->t_maxseg; tcp_cc_resize_sndbuf(tp); } } @@ -389,45 +381,54 @@ tcp_ledbat_after_timeout(struct tcpcb *tp) { * Indicate whether this ack should be delayed. * We can delay the ack if: * - our last ack wasn't a 0-sized window. - * - the peer hasn't sent us a TH_PUSH data packet: if he did, take this - * as a clue that we need to ACK without any delay. This helps higher - * level protocols who won't send us more data even if the window is - * open because their last "segment" hasn't been ACKed + * - the peer hasn't sent us a TH_PUSH data packet: if he did, take this + * as a clue that we need to ACK without any delay. This helps higher + * level protocols who won't send us more data even if the window is + * open because their last "segment" hasn't been ACKed * Otherwise the receiver will ack every other full-sized segment or when the - * delayed ack timer fires. This will help to generate better rtt estimates for + * delayed ack timer fires. This will help to generate better rtt estimates for * the other end if it is a ledbat sender. - * + * */ -int -tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th) { - if ((tp->t_flags & TF_RXWIN0SENT) == 0 && - (th->th_flags & TH_PUSH) == 0 && - (tp->t_unacksegs == 1)) - return(1); - return(0); +static int +tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th) +{ + if (tcp_ack_strategy == TCP_ACK_STRATEGY_MODERN) { + return tcp_cc_delay_ack(tp, th); + } else { + if ((tp->t_flags & TF_RXWIN0SENT) == 0 && + (th->th_flags & TH_PUSH) == 0 && (tp->t_unacksegs == 1)) { + return 1; + } + return 0; + } } /* Change a connection to use ledbat. First, lower bg_ssthresh value - * if it needs to be. + * if it needs to be. */ void -tcp_ledbat_switch_cc(struct tcpcb *tp, uint16_t old_cc_index) { +tcp_ledbat_switch_cc(struct tcpcb *tp, uint16_t old_cc_index) +{ #pragma unused(old_cc_index) uint32_t cwnd; - if (tp->bg_ssthresh == 0 || tp->bg_ssthresh > tp->snd_ssthresh) + if (tp->bg_ssthresh == 0 || tp->bg_ssthresh > tp->snd_ssthresh) { tp->bg_ssthresh = tp->snd_ssthresh; + } cwnd = min(tp->snd_wnd, tp->snd_cwnd); - if (tp->snd_cwnd > tp->bg_ssthresh) + if (tp->snd_cwnd > tp->bg_ssthresh) { cwnd = cwnd / tp->t_maxseg; - else + } else { cwnd = cwnd / 2 / tp->t_maxseg; + } - if (cwnd < bg_ss_fltsz) + if (cwnd < bg_ss_fltsz) { cwnd = bg_ss_fltsz; + } tp->snd_cwnd = cwnd * tp->t_maxseg; tp->t_bytes_acked = 0;