]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/tcp_ledbat.c
457233d8d8639b37090a4a04f3235ca0efc5a81a
[apple/xnu.git] / bsd / netinet / tcp_ledbat.c
1 /*
2 * Copyright (c) 2010-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/protosw.h>
32 #include <sys/mcache.h>
33 #include <sys/sysctl.h>
34
35 #include <net/route.h>
36 #include <netinet/in.h>
37 #include <netinet/in_systm.h>
38 #include <netinet/ip.h>
39
40 #if INET6
41 #include <netinet/ip6.h>
42 #endif
43 #include <netinet/ip_var.h>
44 #include <netinet/tcp.h>
45 #include <netinet/tcp_fsm.h>
46 #include <netinet/tcp_timer.h>
47 #include <netinet/tcp_var.h>
48 #include <netinet/tcpip.h>
49 #include <netinet/tcp_cc.h>
50
51 #include <libkern/OSAtomic.h>
52
53 /* This file implements an alternate TCP congestion control algorithm
54 * for background transport developed by LEDBAT working group at IETF and
55 * described in draft: draft-ietf-ledbat-congestion-02
56 */
57
58 int tcp_ledbat_init(struct tcpcb *tp);
59 int tcp_ledbat_cleanup(struct tcpcb *tp);
60 void tcp_ledbat_cwnd_init(struct tcpcb *tp);
61 void tcp_ledbat_congestion_avd(struct tcpcb *tp, struct tcphdr *th);
62 void tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th);
63 void tcp_ledbat_pre_fr(struct tcpcb *tp);
64 void tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th);
65 void tcp_ledbat_after_idle(struct tcpcb *tp);
66 void tcp_ledbat_after_timeout(struct tcpcb *tp);
67 int tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th);
68 void tcp_ledbat_switch_cc(struct tcpcb *tp, uint16_t old_cc_index);
69
70 struct tcp_cc_algo tcp_cc_ledbat = {
71 .name = "ledbat",
72 .init = tcp_ledbat_init,
73 .cleanup = tcp_ledbat_cleanup,
74 .cwnd_init = tcp_ledbat_cwnd_init,
75 .congestion_avd = tcp_ledbat_congestion_avd,
76 .ack_rcvd = tcp_ledbat_ack_rcvd,
77 .pre_fr = tcp_ledbat_pre_fr,
78 .post_fr = tcp_ledbat_post_fr,
79 .after_idle = tcp_ledbat_after_idle,
80 .after_timeout = tcp_ledbat_after_timeout,
81 .delay_ack = tcp_ledbat_delay_ack,
82 .switch_to = tcp_ledbat_switch_cc
83 };
84
85 /* Target queuing delay in milliseconds. This includes the processing
86 * and scheduling delay on both of the end-hosts. A LEDBAT sender tries
87 * to keep queuing delay below this limit. When the queuing delay
88 * goes above this limit, a LEDBAT sender will start reducing the
89 * congestion window.
90 *
91 * The LEDBAT draft says that target queue delay MUST be 100 ms for
92 * inter-operability.
93 */
94 int target_qdelay = 100;
95 SYSCTL_INT(_net_inet_tcp, OID_AUTO, bg_target_qdelay, CTLFLAG_RW | CTLFLAG_LOCKED,
96 &target_qdelay , 100, "Target queuing delay");
97
98 /* Allowed increase and tether are used to place an upper bound on
99 * congestion window based on the amount of data that is outstanding.
100 * This will limit the congestion window when the amount of data in
101 * flight is little because the application is writing to the socket
102 * intermittently and is preventing the connection from becoming idle .
103 *
104 * max_allowed_cwnd = allowed_increase + (tether * flight_size)
105 * cwnd = min(cwnd, max_allowed_cwnd)
106 *
107 * 'Allowed_increase' parameter is set to 8. If the flight size is zero, then
108 * we want the congestion window to be at least 8 packets to reduce the
109 * delay induced by delayed ack. This helps when the receiver is acking
110 * more than 2 packets at a time (stretching acks for better performance).
111 *
112 * 'Tether' is also set to 2. We do not want this to limit the growth of cwnd
113 * during slow-start.
114 */
115 int allowed_increase = 8;
116 SYSCTL_INT(_net_inet_tcp, OID_AUTO, bg_allowed_increase, CTLFLAG_RW | CTLFLAG_LOCKED,
117 &allowed_increase, 1, "Additive constant used to calculate max allowed congestion window");
118
119 /* Left shift for cwnd to get tether value of 2 */
120 int tether_shift = 1;
121 SYSCTL_INT(_net_inet_tcp, OID_AUTO, bg_tether_shift, CTLFLAG_RW | CTLFLAG_LOCKED,
122 &tether_shift, 1, "Tether shift for max allowed congestion window");
123
124 /* Start with an initial window of 2. This will help to get more accurate
125 * minimum RTT measurement in the beginning. It will help to probe
126 * the path slowly and will not add to the existing delay if the path is
127 * already congested. Using 2 packets will reduce the delay induced by delayed-ack.
128 */
129 uint32_t bg_ss_fltsz = 2;
130 SYSCTL_INT(_net_inet_tcp, OID_AUTO, bg_ss_fltsz, CTLFLAG_RW | CTLFLAG_LOCKED,
131 &bg_ss_fltsz, 2, "Initial congestion window for background transport");
132
133 extern int rtt_samples_per_slot;
134
135 static void update_cwnd(struct tcpcb *tp, uint32_t incr) {
136 uint32_t max_allowed_cwnd = 0, flight_size = 0;
137 uint32_t base_rtt;
138
139 base_rtt = get_base_rtt(tp);
140
141 /* If we do not have a good RTT measurement yet, increment
142 * congestion window by the default value.
143 */
144 if (base_rtt == 0 || tp->t_rttcur == 0) {
145 tp->snd_cwnd += incr;
146 goto check_max;
147 }
148
149 if (tp->t_rttcur <= (base_rtt + target_qdelay)) {
150 /*
151 * Delay decreased or remained the same, we can increase
152 * the congestion window according to RFC 3465.
153 *
154 * Move background slow-start threshold to current
155 * congestion window so that the next time (after some idle
156 * period), we can attempt to do slow-start till here if there
157 * is no increase in rtt
158 */
159 if (tp->bg_ssthresh < tp->snd_cwnd)
160 tp->bg_ssthresh = tp->snd_cwnd;
161 tp->snd_cwnd += incr;
162
163 } else {
164 /* In response to an increase in rtt, reduce the congestion
165 * window by one-eighth. This will help to yield immediately
166 * to a competing stream.
167 */
168 uint32_t redwin;
169
170 redwin = tp->snd_cwnd >> 3;
171 tp->snd_cwnd -= redwin;
172 if (tp->snd_cwnd < bg_ss_fltsz * tp->t_maxseg)
173 tp->snd_cwnd = bg_ss_fltsz * tp->t_maxseg;
174
175 /* Lower background slow-start threshold so that the connection
176 * will go into congestion avoidance phase
177 */
178 if (tp->bg_ssthresh > tp->snd_cwnd)
179 tp->bg_ssthresh = tp->snd_cwnd;
180 }
181 check_max:
182 /* Calculate the outstanding flight size and restrict the
183 * congestion window to a factor of flight size.
184 */
185 flight_size = tp->snd_max - tp->snd_una;
186
187 max_allowed_cwnd = (allowed_increase * tp->t_maxseg)
188 + (flight_size << tether_shift);
189 tp->snd_cwnd = min(tp->snd_cwnd, max_allowed_cwnd);
190 return;
191 }
192
193 int tcp_ledbat_init(struct tcpcb *tp) {
194 #pragma unused(tp)
195 OSIncrementAtomic((volatile SInt32 *)&tcp_cc_ledbat.num_sockets);
196 return 0;
197 }
198
199 int tcp_ledbat_cleanup(struct tcpcb *tp) {
200 #pragma unused(tp)
201 OSDecrementAtomic((volatile SInt32 *)&tcp_cc_ledbat.num_sockets);
202 return 0;
203 }
204
205 /* Initialize the congestion window for a connection
206 *
207 */
208
209 void
210 tcp_ledbat_cwnd_init(struct tcpcb *tp) {
211 tp->snd_cwnd = tp->t_maxseg * bg_ss_fltsz;
212 tp->bg_ssthresh = tp->snd_ssthresh;
213 }
214
215 /* Function to handle an in-sequence ack which is fast-path processing
216 * of an in sequence ack in tcp_input function (called as header prediction).
217 * This gets called only during congestion avoidance phase.
218 */
219 void
220 tcp_ledbat_congestion_avd(struct tcpcb *tp, struct tcphdr *th) {
221 int acked = 0;
222 u_int32_t incr = 0;
223
224 acked = BYTES_ACKED(th, tp);
225 tp->t_bytes_acked += acked;
226 if (tp->t_bytes_acked > tp->snd_cwnd) {
227 tp->t_bytes_acked -= tp->snd_cwnd;
228 incr = tp->t_maxseg;
229 }
230
231 if (tp->snd_cwnd < tp->snd_wnd && incr > 0) {
232 update_cwnd(tp, incr);
233 }
234 }
235 /* Function to process an ack.
236 */
237 void
238 tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) {
239 /*
240 * RFC 3465 - Appropriate Byte Counting.
241 *
242 * If the window is currently less than ssthresh,
243 * open the window by the number of bytes ACKed by
244 * the last ACK, however clamp the window increase
245 * to an upper limit "L".
246 *
247 * In congestion avoidance phase, open the window by
248 * one segment each time "bytes_acked" grows to be
249 * greater than or equal to the congestion window.
250 */
251
252 u_int cw = tp->snd_cwnd;
253 u_int incr = tp->t_maxseg;
254 int acked = 0;
255
256 acked = BYTES_ACKED(th, tp);
257 tp->t_bytes_acked += acked;
258 if (cw >= tp->bg_ssthresh) {
259 /* congestion-avoidance */
260 if (tp->t_bytes_acked < cw) {
261 /* No need to increase yet. */
262 incr = 0;
263 }
264 } else {
265 /*
266 * If the user explicitly enables RFC3465
267 * use 2*SMSS for the "L" param. Otherwise
268 * use the more conservative 1*SMSS.
269 *
270 * (See RFC 3465 2.3 Choosing the Limit)
271 */
272 u_int abc_lim;
273
274 abc_lim = (tcp_do_rfc3465_lim2 &&
275 tp->snd_nxt == tp->snd_max) ? incr * 2 : incr;
276
277 incr = lmin(acked, abc_lim);
278 }
279 if (tp->t_bytes_acked >= cw)
280 tp->t_bytes_acked -= cw;
281 if (incr > 0)
282 update_cwnd(tp, incr);
283 }
284
285 void
286 tcp_ledbat_pre_fr(struct tcpcb *tp) {
287 uint32_t win;
288
289 win = min(tp->snd_wnd, tp->snd_cwnd) /
290 2 / tp->t_maxseg;
291 if ( win < 2 )
292 win = 2;
293 tp->snd_ssthresh = win * tp->t_maxseg;
294 if (tp->bg_ssthresh > tp->snd_ssthresh)
295 tp->bg_ssthresh = tp->snd_ssthresh;
296
297 tcp_cc_resize_sndbuf(tp);
298 }
299
300 void
301 tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th) {
302 int32_t ss;
303
304 ss = tp->snd_max - th->th_ack;
305
306 /*
307 * Complete ack. Inflate the congestion window to
308 * ssthresh and exit fast recovery.
309 *
310 * Window inflation should have left us with approx.
311 * snd_ssthresh outstanding data. But in case we
312 * would be inclined to send a burst, better to do
313 * it via the slow start mechanism.
314 *
315 * If the flight size is zero, then make congestion
316 * window to be worth at least 2 segments to avoid
317 * delayed acknowledgement (draft-ietf-tcpm-rfc3782-bis-05).
318 */
319 if (ss < (int32_t)tp->snd_ssthresh)
320 tp->snd_cwnd = max(ss, tp->t_maxseg) + tp->t_maxseg;
321 else
322 tp->snd_cwnd = tp->snd_ssthresh;
323 tp->t_bytes_acked = 0;
324 }
325
326 /*
327 * Function to handle connections that have been idle for
328 * some time. Slow start to get ack "clock" running again.
329 * Clear base history after idle time.
330 */
331 void
332 tcp_ledbat_after_idle(struct tcpcb *tp) {
333
334 /* Reset the congestion window */
335 tp->snd_cwnd = tp->t_maxseg * bg_ss_fltsz;
336 }
337
338 /* Function to change the congestion window when the retransmit
339 * timer fires. The behavior is the same as that for best-effort
340 * TCP, reduce congestion window to one segment and start probing
341 * the link using "slow start". The slow start threshold is set
342 * to half of the current window. Lower the background slow start
343 * threshold also.
344 */
345 void
346 tcp_ledbat_after_timeout(struct tcpcb *tp) {
347 if (tp->t_state >= TCPS_ESTABLISHED) {
348 u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
349 if (win < 2)
350 win = 2;
351 tp->snd_ssthresh = win * tp->t_maxseg;
352
353 if (tp->bg_ssthresh > tp->snd_ssthresh)
354 tp->bg_ssthresh = tp->snd_ssthresh;
355
356 tp->snd_cwnd = tp->t_maxseg;
357 tcp_cc_resize_sndbuf(tp);
358 }
359 }
360
361 /*
362 * Indicate whether this ack should be delayed.
363 * We can delay the ack if:
364 * - our last ack wasn't a 0-sized window.
365 * - the peer hasn't sent us a TH_PUSH data packet: if he did, take this
366 * as a clue that we need to ACK without any delay. This helps higher
367 * level protocols who won't send us more data even if the window is
368 * open because their last "segment" hasn't been ACKed
369 * Otherwise the receiver will ack every other full-sized segment or when the
370 * delayed ack timer fires. This will help to generate better rtt estimates for
371 * the other end if it is a ledbat sender.
372 *
373 */
374
375 int
376 tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th) {
377 if ((tp->t_flags & TF_RXWIN0SENT) == 0 &&
378 (th->th_flags & TH_PUSH) == 0 && (tp->t_unacksegs == 1))
379 return(1);
380 return(0);
381 }
382
383 /* Change a connection to use ledbat. First, lower bg_ssthresh value
384 * if it needs to be.
385 */
386 void
387 tcp_ledbat_switch_cc(struct tcpcb *tp, uint16_t old_cc_index) {
388 #pragma unused(old_cc_index)
389 uint32_t cwnd;
390
391 if (tp->bg_ssthresh == 0 || tp->bg_ssthresh > tp->snd_ssthresh)
392 tp->bg_ssthresh = tp->snd_ssthresh;
393
394 cwnd = min(tp->snd_wnd, tp->snd_cwnd);
395
396 if (tp->snd_cwnd > tp->bg_ssthresh)
397 cwnd = cwnd / tp->t_maxseg;
398 else
399 cwnd = cwnd / 2 / tp->t_maxseg;
400
401 if (cwnd < bg_ss_fltsz)
402 cwnd = bg_ss_fltsz;
403
404 tp->snd_cwnd = cwnd * tp->t_maxseg;
405 tp->t_bytes_acked = 0;
406
407 OSIncrementAtomic((volatile SInt32 *)&tcp_cc_ledbat.num_sockets);
408 }