]>
Commit | Line | Data |
---|---|---|
fe8ab488 A |
1 | /* |
2 | * Copyright (c) 2013-2014 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #include <sys/param.h> | |
29 | #include <sys/systm.h> | |
30 | #include <sys/kernel.h> | |
31 | #include <sys/protosw.h> | |
32 | #include <sys/socketvar.h> | |
33 | #include <sys/syslog.h> | |
34 | ||
35 | #include <net/route.h> | |
36 | #include <netinet/in.h> | |
37 | #include <netinet/in_systm.h> | |
38 | #include <netinet/ip.h> | |
39 | ||
40 | #if INET6 | |
41 | #include <netinet/ip6.h> | |
42 | #endif /* INET6 */ | |
43 | ||
44 | #include <netinet/ip_var.h> | |
45 | #include <netinet/tcp.h> | |
46 | #include <netinet/tcp_timer.h> | |
47 | #include <netinet/tcp_var.h> | |
48 | #include <netinet/tcp_fsm.h> | |
49 | #include <netinet/tcp_var.h> | |
50 | #include <netinet/tcp_cc.h> | |
51 | #include <netinet/tcpip.h> | |
52 | #include <netinet/tcp_seq.h> | |
53 | #include <kern/task.h> | |
54 | #include <libkern/OSAtomic.h> | |
55 | ||
56 | static int tcp_cubic_init(struct tcpcb *tp); | |
57 | static int tcp_cubic_cleanup(struct tcpcb *tp); | |
58 | static void tcp_cubic_cwnd_init_or_reset(struct tcpcb *tp); | |
59 | static void tcp_cubic_congestion_avd(struct tcpcb *tp, struct tcphdr *th); | |
60 | static void tcp_cubic_ack_rcvd(struct tcpcb *tp, struct tcphdr *th); | |
61 | static void tcp_cubic_pre_fr(struct tcpcb *tp); | |
62 | static void tcp_cubic_post_fr(struct tcpcb *tp, struct tcphdr *th); | |
63 | static void tcp_cubic_after_timeout(struct tcpcb *tp); | |
64 | static int tcp_cubic_delay_ack(struct tcpcb *tp, struct tcphdr *th); | |
65 | static void tcp_cubic_switch_cc(struct tcpcb *tp, u_int16_t old_index); | |
66 | static uint32_t tcp_cubic_update(struct tcpcb *tp, u_int32_t rtt); | |
67 | static uint32_t tcp_cubic_tcpwin(struct tcpcb *tp, struct tcphdr *th); | |
68 | static inline void tcp_cubic_clear_state(struct tcpcb *tp); | |
69 | ||
70 | ||
71 | extern float cbrtf(float x); | |
72 | ||
73 | struct tcp_cc_algo tcp_cc_cubic = { | |
74 | .name = "cubic", | |
75 | .init = tcp_cubic_init, | |
76 | .cleanup = tcp_cubic_cleanup, | |
77 | .cwnd_init = tcp_cubic_cwnd_init_or_reset, | |
78 | .congestion_avd = tcp_cubic_congestion_avd, | |
79 | .ack_rcvd = tcp_cubic_ack_rcvd, | |
80 | .pre_fr = tcp_cubic_pre_fr, | |
81 | .post_fr = tcp_cubic_post_fr, | |
82 | .after_idle = tcp_cubic_cwnd_init_or_reset, | |
83 | .after_timeout = tcp_cubic_after_timeout, | |
84 | .delay_ack = tcp_cubic_delay_ack, | |
85 | .switch_to = tcp_cubic_switch_cc | |
86 | }; | |
87 | ||
88 | const float tcp_cubic_backoff = 0.2; /* multiplicative decrease factor */ | |
89 | const float tcp_cubic_coeff = 0.4; | |
90 | const float tcp_cubic_fast_convergence_factor = 0.875; | |
91 | ||
92 | static int tcp_cubic_tcp_friendliness = 0; | |
93 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, cubic_tcp_friendliness, | |
94 | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_cubic_tcp_friendliness, 0, | |
95 | "Enable TCP friendliness"); | |
96 | ||
97 | static int tcp_cubic_fast_convergence = 0; | |
98 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, cubic_fast_convergence, | |
99 | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_cubic_fast_convergence, 0, | |
100 | "Enable fast convergence"); | |
101 | ||
102 | static int tcp_cubic_use_minrtt = 0; | |
103 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, cubic_use_minrtt, | |
104 | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_cubic_use_minrtt, 0, | |
105 | "use a min of 5 sec rtt"); | |
106 | ||
107 | static int tcp_cubic_init(struct tcpcb *tp) | |
108 | { | |
109 | OSIncrementAtomic((volatile SInt32 *)&tcp_cc_cubic.num_sockets); | |
110 | ||
111 | VERIFY(tp->t_ccstate != NULL); | |
112 | tcp_cubic_clear_state(tp); | |
113 | return (0); | |
114 | } | |
115 | ||
116 | static int tcp_cubic_cleanup(struct tcpcb *tp) | |
117 | { | |
118 | #pragma unused(tp) | |
119 | OSDecrementAtomic((volatile SInt32 *)&tcp_cc_cubic.num_sockets); | |
120 | return (0); | |
121 | } | |
122 | ||
123 | /* | |
124 | * Initialize the congestion window at the beginning of a connection or | |
125 | * after idle time | |
126 | */ | |
127 | static void tcp_cubic_cwnd_init_or_reset(struct tcpcb *tp) | |
128 | { | |
129 | VERIFY(tp->t_ccstate != NULL); | |
130 | ||
131 | tcp_cubic_clear_state(tp); | |
132 | tcp_cc_cwnd_init_or_reset(tp); | |
133 | ||
134 | /* | |
135 | * slow start threshold could get initialized to a lower value | |
136 | * when there is a cached value in the route metrics. In this case, | |
137 | * the connection can enter congestion avoidance without any packet | |
138 | * loss and Cubic will enter steady-state too early. It is better | |
139 | * to always probe to find the initial slow-start threshold. | |
140 | */ | |
141 | if (tp->t_inpcb->inp_stat->txbytes <= TCP_CC_CWND_INIT_BYTES | |
142 | && tp->snd_ssthresh < (TCP_MAXWIN << TCP_MAX_WINSHIFT)) | |
143 | tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; | |
144 | ||
145 | /* Initialize cubic last max to be same as ssthresh */ | |
146 | tp->t_ccstate->cub_last_max = tp->snd_ssthresh; | |
147 | ||
148 | /* If stretch ack was auto-disabled, re-evaluate it */ | |
149 | tcp_cc_after_idle_stretchack(tp); | |
150 | } | |
151 | ||
152 | /* | |
153 | * Compute the target congestion window for the next RTT according to | |
154 | * cubic equation when an ack is received. | |
155 | * | |
156 | * W(t) = C(t-K)^3 + W(last_max) | |
157 | */ | |
158 | static uint32_t | |
159 | tcp_cubic_update(struct tcpcb *tp, u_int32_t rtt) | |
160 | { | |
161 | float K, var; | |
162 | u_int32_t elapsed_time, win; | |
163 | ||
fe8ab488 | 164 | win = min(tp->snd_cwnd, tp->snd_wnd); |
04b8595b A |
165 | if (tp->t_ccstate->cub_last_max == 0) |
166 | tp->t_ccstate->cub_last_max = tp->snd_ssthresh; | |
167 | ||
fe8ab488 A |
168 | if (tp->t_ccstate->cub_epoch_start == 0) { |
169 | /* | |
170 | * This is the beginning of a new epoch, initialize some of | |
171 | * the variables that we need to use for computing the | |
172 | * congestion window later. | |
173 | */ | |
174 | tp->t_ccstate->cub_epoch_start = tcp_now; | |
175 | if (tp->t_ccstate->cub_epoch_start == 0) | |
176 | tp->t_ccstate->cub_epoch_start = 1; | |
177 | if (win < tp->t_ccstate->cub_last_max) { | |
178 | ||
179 | VERIFY(current_task() == kernel_task); | |
180 | ||
181 | /* | |
182 | * Compute cubic epoch period, this is the time | |
183 | * period that the window will take to increase to | |
184 | * last_max again after backoff due to loss. | |
185 | */ | |
186 | K = (tp->t_ccstate->cub_last_max - win) | |
187 | / tp->t_maxseg / tcp_cubic_coeff; | |
188 | K = cbrtf(K); | |
189 | tp->t_ccstate->cub_epoch_period = K * TCP_RETRANSHZ; | |
190 | /* Origin point */ | |
191 | tp->t_ccstate->cub_origin_point = | |
192 | tp->t_ccstate->cub_last_max; | |
193 | } else { | |
194 | tp->t_ccstate->cub_epoch_period = 0; | |
195 | tp->t_ccstate->cub_origin_point = win; | |
196 | } | |
197 | tp->t_ccstate->cub_target_win = 0; | |
198 | } | |
199 | ||
200 | VERIFY(tp->t_ccstate->cub_origin_point > 0); | |
201 | /* | |
202 | * Compute the target window for the next RTT using smoothed RTT | |
203 | * as an estimate for next RTT. | |
204 | */ | |
205 | elapsed_time = timer_diff(tcp_now, 0, | |
206 | tp->t_ccstate->cub_epoch_start, 0); | |
207 | ||
208 | if (tcp_cubic_use_minrtt) | |
209 | elapsed_time += max(tcp_cubic_use_minrtt, rtt); | |
210 | else | |
211 | elapsed_time += rtt; | |
212 | var = (elapsed_time - tp->t_ccstate->cub_epoch_period) / TCP_RETRANSHZ; | |
213 | var = var * var * var * (tcp_cubic_coeff * tp->t_maxseg); | |
214 | ||
215 | tp->t_ccstate->cub_target_win = tp->t_ccstate->cub_origin_point + var; | |
216 | return (tp->t_ccstate->cub_target_win); | |
217 | } | |
218 | ||
219 | /* | |
220 | * Standard TCP utilizes bandwidth well in low RTT and low BDP connections | |
221 | * even when there is some packet loss. Enabling TCP mode will help Cubic | |
222 | * to achieve this kind of utilization. | |
223 | * | |
224 | * But if there is a bottleneck link in the path with a fixed size queue | |
225 | * and fixed bandwidth, TCP Cubic will help to reduce packet loss at this | |
226 | * link because of the steady-state behavior. Using average and mean | |
227 | * absolute deviation of W(lastmax), we try to detect if the congestion | |
228 | * window is close to the bottleneck bandwidth. In that case, disabling | |
229 | * TCP mode will help to minimize packet loss at this link. | |
230 | * | |
231 | * Disable TCP mode if the W(lastmax) (the window where previous packet | |
232 | * loss happened) is within a small range from the average last max | |
233 | * calculated. | |
234 | */ | |
235 | #define TCP_CUBIC_ENABLE_TCPMODE(_tp_) \ | |
236 | ((!soissrcrealtime((_tp_)->t_inpcb->inp_socket) && \ | |
237 | (_tp_)->t_ccstate->cub_mean_dev > (tp->t_maxseg << 1)) ? 1 : 0) | |
238 | ||
239 | /* | |
240 | * Compute the window growth if standard TCP (AIMD) was used with | |
241 | * a backoff of 0.5 and additive increase of 1 packet per RTT. | |
242 | * | |
243 | * TCP window at time t can be calculated using the following equation | |
244 | * with beta as 0.8 | |
245 | * | |
246 | * W(t) <- Wmax * beta + 3 * ((1 - beta)/(1 + beta)) * t/RTT | |
247 | * | |
248 | */ | |
249 | static uint32_t | |
250 | tcp_cubic_tcpwin(struct tcpcb *tp, struct tcphdr *th) | |
251 | { | |
252 | if (tp->t_ccstate->cub_tcp_win == 0) { | |
253 | tp->t_ccstate->cub_tcp_win = min(tp->snd_cwnd, tp->snd_wnd); | |
254 | tp->t_ccstate->cub_tcp_bytes_acked = 0; | |
255 | } else { | |
256 | tp->t_ccstate->cub_tcp_bytes_acked += | |
257 | BYTES_ACKED(th, tp); | |
258 | if (tp->t_ccstate->cub_tcp_bytes_acked >= | |
259 | tp->t_ccstate->cub_tcp_win) { | |
260 | tp->t_ccstate->cub_tcp_bytes_acked -= | |
261 | tp->t_ccstate->cub_tcp_win; | |
262 | tp->t_ccstate->cub_tcp_win += tp->t_maxseg; | |
263 | } | |
264 | } | |
265 | return (tp->t_ccstate->cub_tcp_win); | |
266 | } | |
267 | ||
268 | /* | |
269 | * Handle an in-sequence ack during congestion avoidance phase. | |
270 | */ | |
271 | static void | |
272 | tcp_cubic_congestion_avd(struct tcpcb *tp, struct tcphdr *th) | |
273 | { | |
274 | u_int32_t cubic_target_win, tcp_win, rtt; | |
275 | ||
276 | tp->t_bytes_acked += BYTES_ACKED(th, tp); | |
277 | ||
278 | rtt = get_base_rtt(tp); | |
279 | /* | |
280 | * First compute cubic window. If cubic variables are not | |
281 | * initialized (after coming out of recovery), this call will | |
282 | * initialize them. | |
283 | */ | |
284 | cubic_target_win = tcp_cubic_update(tp, rtt); | |
285 | ||
286 | /* Compute TCP window if a multiplicative decrease of 0.2 is used */ | |
287 | tcp_win = tcp_cubic_tcpwin(tp, th); | |
288 | ||
289 | if (tp->snd_cwnd < tcp_win && | |
290 | (tcp_cubic_tcp_friendliness == 1 || | |
291 | TCP_CUBIC_ENABLE_TCPMODE(tp))) { | |
292 | /* this connection is in TCP-friendly region */ | |
293 | if (tp->t_bytes_acked >= tp->snd_cwnd) { | |
294 | tp->t_bytes_acked -= tp->snd_cwnd; | |
295 | tp->snd_cwnd = min(tcp_win, TCP_MAXWIN << tp->snd_scale); | |
296 | } | |
297 | } else { | |
298 | if (cubic_target_win > tp->snd_cwnd) { | |
299 | /* | |
300 | * The target win is computed for the next RTT. | |
301 | * To reach this value, cwnd will have to be updated | |
302 | * one segment at a time. Compute how many bytes | |
303 | * need to be acknowledged before we can increase | |
304 | * the cwnd by one segment. | |
305 | */ | |
306 | u_int64_t incr_win; | |
307 | incr_win = tp->snd_cwnd * tp->t_maxseg; | |
308 | incr_win /= (cubic_target_win - tp->snd_cwnd); | |
309 | if (incr_win > 0 && | |
310 | tp->t_bytes_acked >= incr_win) { | |
311 | tp->t_bytes_acked -= incr_win; | |
312 | tp->snd_cwnd = | |
313 | min((tp->snd_cwnd + tp->t_maxseg), | |
314 | TCP_MAXWIN << tp->snd_scale); | |
315 | } | |
316 | } | |
317 | } | |
318 | } | |
319 | ||
320 | static void | |
321 | tcp_cubic_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) | |
322 | { | |
323 | if (tp->snd_cwnd >= tp->snd_ssthresh) { | |
324 | /* Congestion avoidance phase */ | |
325 | tcp_cubic_congestion_avd(tp, th); | |
326 | } else { | |
327 | /* | |
328 | * Use 2*SMSS as limit on increment as suggested | |
329 | * by RFC 3465 section 2.3 | |
330 | */ | |
331 | uint32_t acked, abc_lim, incr; | |
332 | acked = BYTES_ACKED(th, tp); | |
333 | abc_lim = (tcp_do_rfc3465_lim2 && | |
334 | tp->snd_nxt == tp->snd_max) ? | |
335 | 2 * tp->t_maxseg : tp->t_maxseg; | |
336 | incr = min(acked, abc_lim); | |
337 | ||
338 | tp->snd_cwnd += incr; | |
339 | tp->snd_cwnd = min(tp->snd_cwnd, | |
340 | TCP_MAXWIN << tp->snd_scale); | |
341 | } | |
342 | } | |
343 | ||
344 | static void | |
345 | tcp_cubic_pre_fr(struct tcpcb *tp) | |
346 | { | |
347 | uint32_t win, avg; | |
348 | int32_t dev; | |
349 | tp->t_ccstate->cub_epoch_start = 0; | |
350 | tp->t_ccstate->cub_tcp_win = 0; | |
351 | tp->t_ccstate->cub_target_win = 0; | |
352 | tp->t_ccstate->cub_tcp_bytes_acked = 0; | |
353 | ||
354 | win = min(tp->snd_cwnd, tp->snd_wnd); | |
355 | /* | |
356 | * Note the congestion window at which packet loss occurred as | |
357 | * cub_last_max. | |
358 | * | |
359 | * If the congestion window is less than the last max window when | |
360 | * loss occurred, it indicates that capacity available in the | |
361 | * network has gone down. This can happen if a new flow has started | |
362 | * and it is capturing some of the bandwidth. To reach convergence | |
363 | * quickly, backoff a little more. Disable fast convergence to | |
364 | * disable this behavior. | |
365 | */ | |
366 | if (win < tp->t_ccstate->cub_last_max && | |
367 | tcp_cubic_fast_convergence == 1) | |
368 | tp->t_ccstate->cub_last_max = win * | |
369 | tcp_cubic_fast_convergence_factor; | |
370 | else | |
371 | tp->t_ccstate->cub_last_max = win; | |
372 | ||
373 | if (tp->t_ccstate->cub_last_max == 0) { | |
374 | /* | |
375 | * If last_max is zero because snd_wnd is zero or for | |
376 | * any other reason, initialize it to the amount of data | |
377 | * in flight | |
378 | */ | |
379 | tp->t_ccstate->cub_last_max = tp->snd_max - tp->snd_una; | |
380 | } | |
381 | ||
382 | /* | |
383 | * Compute average and mean absolute deviation of the | |
384 | * window at which packet loss occurred. | |
385 | */ | |
386 | if (tp->t_ccstate->cub_avg_lastmax == 0) { | |
387 | tp->t_ccstate->cub_avg_lastmax = tp->t_ccstate->cub_last_max; | |
388 | } else { | |
389 | /* | |
390 | * Average is computed by taking 63 parts of | |
391 | * history and one part of the most recent value | |
392 | */ | |
393 | avg = tp->t_ccstate->cub_avg_lastmax; | |
394 | avg = (avg << 6) - avg; | |
395 | tp->t_ccstate->cub_avg_lastmax = | |
396 | (avg + tp->t_ccstate->cub_last_max) >> 6; | |
397 | } | |
398 | ||
399 | /* caluclate deviation from average */ | |
400 | dev = tp->t_ccstate->cub_avg_lastmax - tp->t_ccstate->cub_last_max; | |
401 | ||
402 | /* Take the absolute value */ | |
403 | if (dev < 0) | |
404 | dev = -dev; | |
405 | ||
406 | if (tp->t_ccstate->cub_mean_dev == 0) { | |
407 | tp->t_ccstate->cub_mean_dev = dev; | |
408 | } else { | |
409 | dev = dev + ((tp->t_ccstate->cub_mean_dev << 4) | |
410 | - tp->t_ccstate->cub_mean_dev); | |
411 | tp->t_ccstate->cub_mean_dev = dev >> 4; | |
412 | } | |
413 | ||
414 | /* Backoff congestion window by tcp_cubic_backoff factor */ | |
415 | win = win - (win * tcp_cubic_backoff); | |
416 | win = (win / tp->t_maxseg); | |
417 | if (win < 2) | |
418 | win = 2; | |
419 | tp->snd_ssthresh = win * tp->t_maxseg; | |
420 | tcp_cc_resize_sndbuf(tp); | |
421 | } | |
422 | ||
423 | static void | |
424 | tcp_cubic_post_fr(struct tcpcb *tp, struct tcphdr *th) | |
425 | { | |
426 | uint32_t flight_size = 0; | |
427 | ||
428 | if (SEQ_LEQ(th->th_ack, tp->snd_max)) | |
429 | flight_size = tp->snd_max - th->th_ack; | |
430 | /* | |
431 | * Complete ack. The current window was inflated for fast recovery. | |
432 | * It has to be deflated post recovery. | |
433 | * | |
434 | * Window inflation should have left us with approx snd_ssthresh | |
435 | * outstanding data. If the flight size is zero or one segment, | |
436 | * make congestion window to be at least as big as 2 segments to | |
437 | * avoid delayed acknowledgements. This is according to RFC 6582. | |
438 | */ | |
439 | if (flight_size < tp->snd_ssthresh) | |
440 | tp->snd_cwnd = max(flight_size, tp->t_maxseg) | |
441 | + tp->t_maxseg; | |
442 | else | |
443 | tp->snd_cwnd = tp->snd_ssthresh; | |
444 | tp->t_ccstate->cub_tcp_win = 0; | |
445 | tp->t_ccstate->cub_target_win = 0; | |
446 | tp->t_ccstate->cub_tcp_bytes_acked = 0; | |
447 | } | |
448 | ||
449 | static void | |
450 | tcp_cubic_after_timeout(struct tcpcb *tp) | |
451 | { | |
452 | VERIFY(tp->t_ccstate != NULL); | |
453 | if (!IN_FASTRECOVERY(tp)) { | |
454 | tcp_cubic_clear_state(tp); | |
455 | tcp_cubic_pre_fr(tp); | |
456 | } | |
457 | ||
458 | /* | |
459 | * Close the congestion window down to one segment as a retransmit | |
460 | * timeout might indicate severe congestion. | |
461 | */ | |
462 | tp->snd_cwnd = tp->t_maxseg; | |
463 | } | |
464 | ||
465 | static int | |
466 | tcp_cubic_delay_ack(struct tcpcb *tp, struct tcphdr *th) | |
467 | { | |
468 | return (tcp_cc_delay_ack(tp, th)); | |
469 | } | |
470 | ||
471 | /* | |
472 | * When switching from a different CC it is better for Cubic to start | |
473 | * fresh. The state required for Cubic calculation might be stale and it | |
474 | * might not represent the current state of the network. If it starts as | |
475 | * a new connection it will probe and learn the existing network conditions. | |
476 | */ | |
477 | static void | |
478 | tcp_cubic_switch_cc(struct tcpcb *tp, uint16_t old_cc_index) | |
479 | { | |
480 | #pragma unused(old_cc_index) | |
481 | tcp_cubic_cwnd_init_or_reset(tp); | |
482 | /* Start counting bytes for RFC 3465 again */ | |
483 | tp->t_bytes_acked = 0; | |
484 | ||
485 | OSIncrementAtomic((volatile SInt32 *)&tcp_cc_cubic.num_sockets); | |
486 | } | |
487 | ||
488 | static inline void tcp_cubic_clear_state(struct tcpcb *tp) | |
489 | { | |
490 | tp->t_ccstate->cub_last_max = 0; | |
491 | tp->t_ccstate->cub_epoch_start = 0; | |
492 | tp->t_ccstate->cub_origin_point = 0; | |
493 | tp->t_ccstate->cub_tcp_win = 0; | |
494 | tp->t_ccstate->cub_tcp_bytes_acked = 0; | |
495 | tp->t_ccstate->cub_epoch_period = 0; | |
496 | tp->t_ccstate->cub_target_win = 0; | |
497 | } |