/*
- * Copyright (c) 2010 Apple Inc. All rights reserved.
+ * Copyright (c) 2010-2014 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
int tcp_ledbat_init(struct tcpcb *tp);
int tcp_ledbat_cleanup(struct tcpcb *tp);
void tcp_ledbat_cwnd_init(struct tcpcb *tp);
-void tcp_ledbat_inseq_ack_rcvd(struct tcpcb *tp, struct tcphdr *th);
+void tcp_ledbat_congestion_avd(struct tcpcb *tp, struct tcphdr *th);
void tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th);
-void tcp_ledbat_pre_fr(struct tcpcb *tp, struct tcphdr *th);
+void tcp_ledbat_pre_fr(struct tcpcb *tp);
void tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th);
void tcp_ledbat_after_idle(struct tcpcb *tp);
void tcp_ledbat_after_timeout(struct tcpcb *tp);
.init = tcp_ledbat_init,
.cleanup = tcp_ledbat_cleanup,
.cwnd_init = tcp_ledbat_cwnd_init,
- .inseq_ack_rcvd = tcp_ledbat_inseq_ack_rcvd,
+ .congestion_avd = tcp_ledbat_congestion_avd,
.ack_rcvd = tcp_ledbat_ack_rcvd,
.pre_fr = tcp_ledbat_pre_fr,
.post_fr = tcp_ledbat_post_fr,
.switch_to = tcp_ledbat_switch_cc
};
-extern int tcp_do_rfc3465;
-extern int tcp_do_rfc3465_lim2;
-extern uint32_t get_base_rtt(struct tcpcb *tp);
-
/* Target queuing delay in milliseconds. This includes the processing
* and scheduling delay on both of the end-hosts. A LEDBAT sender tries
* to keep queuing delay below this limit. When the queuing delay
* The LEDBAT draft says that target queue delay MUST be 100 ms for
* inter-operability.
*/
-int target_qdelay = 100;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, bg_target_qdelay, CTLFLAG_RW | CTLFLAG_LOCKED,
- &target_qdelay , 100, "Target queuing delay");
+SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_target_qdelay, CTLFLAG_RW | CTLFLAG_LOCKED,
+ int, target_qdelay, 100, "Target queuing delay");
/* Allowed increase and tether are used to place an upper bound on
* congestion window based on the amount of data that is outstanding.
* max_allowed_cwnd = allowed_increase + (tether * flight_size)
* cwnd = min(cwnd, max_allowed_cwnd)
*
- * 'Allowed_increase' parameter is set to 2. If the flight size is zero, then
- * we want the congestion window to be at least 2 packets to reduce the
- * delay induced by delayed ack. This helps when the receiver is acking every
- * other packet.
+ * 'Allowed_increase' parameter is set to 8. If the flight size is zero, then
+ * we want the congestion window to be at least 8 packets to reduce the
+ * delay induced by delayed ack. This helps when the receiver is acking
+ * more than 2 packets at a time (stretching acks for better performance).
*
* 'Tether' is also set to 2. We do not want this to limit the growth of cwnd
* during slow-start.
*/
-int allowed_increase = 2;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, bg_allowed_increase, CTLFLAG_RW | CTLFLAG_LOCKED,
- &allowed_increase, 1, "Additive constant used to calculate max allowed congestion window");
+SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_allowed_increase, CTLFLAG_RW | CTLFLAG_LOCKED,
+ int, allowed_increase, 8,
+ "Additive constant used to calculate max allowed congestion window");
/* Left shift for cwnd to get tether value of 2 */
-int tether_shift = 1;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, bg_tether_shift, CTLFLAG_RW | CTLFLAG_LOCKED,
- &tether_shift, 1, "Tether shift for max allowed congestion window");
+SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_tether_shift, CTLFLAG_RW | CTLFLAG_LOCKED,
+ int, tether_shift, 1, "Tether shift for max allowed congestion window");
/* Start with an initial window of 2. This will help to get more accurate
* minimum RTT measurement in the beginning. It will help to probe
* the path slowly and will not add to the existing delay if the path is
* already congested. Using 2 packets will reduce the delay induced by delayed-ack.
*/
-uint32_t bg_ss_fltsz = 2;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, bg_ss_fltsz, CTLFLAG_RW | CTLFLAG_LOCKED,
- &bg_ss_fltsz, 2, "Initial congestion window for background transport");
+SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_ss_fltsz, CTLFLAG_RW | CTLFLAG_LOCKED,
+ uint32_t, bg_ss_fltsz, 2, "Initial congestion window for background transport");
extern int rtt_samples_per_slot;
static void update_cwnd(struct tcpcb *tp, uint32_t incr) {
uint32_t max_allowed_cwnd = 0, flight_size = 0;
- uint32_t qdelay, base_rtt;
- int32_t off_target;
+ uint32_t base_rtt;
base_rtt = get_base_rtt(tp);
tp->snd_cwnd += incr;
goto check_max;
}
-
- qdelay = tp->t_rttcur - base_rtt;
- off_target = (int32_t)(target_qdelay - qdelay);
- if (off_target >= 0) {
- /* Delay decreased or remained the same, we can increase
+ if (tp->t_rttcur <= (base_rtt + target_qdelay)) {
+ /*
+ * Delay decreased or remained the same, we can increase
* the congestion window according to RFC 3465.
*
* Move background slow-start threshold to current
* This gets called only during congestion avoidance phase.
*/
void
-tcp_ledbat_inseq_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) {
+tcp_ledbat_congestion_avd(struct tcpcb *tp, struct tcphdr *th) {
int acked = 0;
u_int32_t incr = 0;
- acked = th->th_ack - tp->snd_una;
+ acked = BYTES_ACKED(th, tp);
tp->t_bytes_acked += acked;
if (tp->t_bytes_acked > tp->snd_cwnd) {
tp->t_bytes_acked -= tp->snd_cwnd;
* greater than or equal to the congestion window.
*/
- register u_int cw = tp->snd_cwnd;
- register u_int incr = tp->t_maxseg;
+ u_int cw = tp->snd_cwnd;
+ u_int incr = tp->t_maxseg;
int acked = 0;
- acked = th->th_ack - tp->snd_una;
+ acked = BYTES_ACKED(th, tp);
tp->t_bytes_acked += acked;
if (cw >= tp->bg_ssthresh) {
/* congestion-avoidance */
}
void
-tcp_ledbat_pre_fr(struct tcpcb *tp, struct tcphdr *th) {
-#pragma unused(th)
-
+tcp_ledbat_pre_fr(struct tcpcb *tp) {
uint32_t win;
win = min(tp->snd_wnd, tp->snd_cwnd) /
tp->snd_ssthresh = win * tp->t_maxseg;
if (tp->bg_ssthresh > tp->snd_ssthresh)
tp->bg_ssthresh = tp->snd_ssthresh;
+
+ tcp_cc_resize_sndbuf(tp);
}
void
* snd_ssthresh outstanding data. But in case we
* would be inclined to send a burst, better to do
* it via the slow start mechanism.
+ *
+ * If the flight size is zero, then make congestion
+ * window to be worth at least 2 segments to avoid
+ * delayed acknowledgement (draft-ietf-tcpm-rfc3782-bis-05).
*/
if (ss < (int32_t)tp->snd_ssthresh)
- tp->snd_cwnd = ss + tp->t_maxseg;
+ tp->snd_cwnd = max(ss, tp->t_maxseg) + tp->t_maxseg;
else
tp->snd_cwnd = tp->snd_ssthresh;
tp->t_bytes_acked = 0;
*/
void
tcp_ledbat_after_idle(struct tcpcb *tp) {
- int32_t n = N_RTT_BASE, i = (N_RTT_BASE - 1);
-
- /* Decide how many base history entries have to be cleared
- * based on how long the connection has been idle.
- */
-
- if (tp->t_rttcur > 0) {
- int32_t nrtt, idle_time;
-
- idle_time = tcp_now - tp->t_rcvtime;
- nrtt = idle_time / tp->t_rttcur;
- n = nrtt / rtt_samples_per_slot;
- if (n > N_RTT_BASE)
- n = N_RTT_BASE;
- }
- for (i = (N_RTT_BASE - 1); n > 0; --i, --n) {
- tp->rtt_hist[i] = 0;
- }
- for (n = (N_RTT_BASE - 1); i >= 0; --i, --n) {
- tp->rtt_hist[n] = tp->rtt_hist[i];
- tp->rtt_hist[i] = 0;
- }
/* Reset the congestion window */
tp->snd_cwnd = tp->t_maxseg * bg_ss_fltsz;
u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
if (win < 2)
win = 2;
- tp->snd_cwnd = tp->t_maxseg;
tp->snd_ssthresh = win * tp->t_maxseg;
- tp->t_bytes_acked = 0;
- tp->t_dupacks = 0;
if (tp->bg_ssthresh > tp->snd_ssthresh)
tp->bg_ssthresh = tp->snd_ssthresh;
+
+ tp->snd_cwnd = tp->t_maxseg;
+ tcp_cc_resize_sndbuf(tp);
}
}
int
tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th) {
if ((tp->t_flags & TF_RXWIN0SENT) == 0 &&
- (th->th_flags & TH_PUSH) == 0 &&
- (tp->t_flags & TF_DELACK) == 0)
+ (th->th_flags & TH_PUSH) == 0 && (tp->t_unacksegs == 1))
return(1);
return(0);
}