]>
Commit | Line | Data |
---|---|---|
8ad349bb | 1 | /* |
39037602 | 2 | * Copyright (c) 2004-2016 Apple Inc. All rights reserved. |
5d5c5d0d | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
39037602 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
39037602 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
39037602 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
39037602 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
8ad349bb A |
27 | */ |
28 | /* | |
29 | * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 | |
30 | * The Regents of the University of California. All rights reserved. | |
31 | * | |
32 | * Redistribution and use in source and binary forms, with or without | |
33 | * modification, are permitted provided that the following conditions | |
34 | * are met: | |
35 | * 1. Redistributions of source code must retain the above copyright | |
f427ee49 | 36 | * notice, this list of conditions and the following disclaimer. |
8ad349bb | 37 | * 2. Redistributions in binary form must reproduce the above copyright |
f427ee49 A |
38 | * notice, this list of conditions and the following disclaimer in the |
39 | * documentation and/or other materials provided with the distribution. | |
8ad349bb | 40 | * 3. All advertising materials mentioning features or use of this software |
f427ee49 | 41 | * must display the following acknowledgement: |
8ad349bb A |
42 | * This product includes software developed by the University of |
43 | * California, Berkeley and its contributors. | |
44 | * 4. Neither the name of the University nor the names of its contributors | |
f427ee49 A |
45 | * may be used to endorse or promote products derived from this software |
46 | * without specific prior written permission. | |
8ad349bb A |
47 | * |
48 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
49 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
50 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
51 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
52 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
53 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
54 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
55 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
56 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
57 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
58 | * SUCH DAMAGE. | |
59 | * | |
60 | */ | |
61 | ||
0a7de745 | 62 | #define _IP_VHL |
8ad349bb A |
63 | |
64 | ||
65 | #include <sys/param.h> | |
66 | #include <sys/systm.h> | |
67 | #include <sys/kernel.h> | |
68 | #include <sys/sysctl.h> | |
69 | #include <sys/mbuf.h> | |
70 | #include <sys/domain.h> | |
71 | #include <sys/protosw.h> | |
72 | #include <sys/socket.h> | |
73 | #include <sys/socketvar.h> | |
74 | ||
2d21ac55 A |
75 | #include <kern/zalloc.h> |
76 | ||
8ad349bb A |
77 | #include <net/route.h> |
78 | ||
79 | #include <netinet/in.h> | |
80 | #include <netinet/in_systm.h> | |
81 | #include <netinet/ip.h> | |
82 | #include <netinet/in_pcb.h> | |
83 | #include <netinet/ip_var.h> | |
8ad349bb A |
84 | #include <netinet6/in6_pcb.h> |
85 | #include <netinet/ip6.h> | |
86 | #include <netinet6/ip6_var.h> | |
8ad349bb | 87 | #include <netinet/tcp.h> |
8ad349bb A |
88 | #include <netinet/tcp_fsm.h> |
89 | #include <netinet/tcp_seq.h> | |
90 | #include <netinet/tcp_timer.h> | |
91 | #include <netinet/tcp_var.h> | |
92 | #include <netinet/tcpip.h> | |
39037602 | 93 | #include <netinet/tcp_cache.h> |
8ad349bb A |
94 | #if TCPDEBUG |
95 | #include <netinet/tcp_debug.h> | |
96 | #endif | |
97 | #include <sys/kdebug.h> | |
98 | ||
99 | #if IPSEC | |
100 | #include <netinet6/ipsec.h> | |
101 | #endif /*IPSEC*/ | |
102 | ||
fe8ab488 A |
103 | #include <libkern/OSAtomic.h> |
104 | ||
5ba3f43e | 105 | SYSCTL_SKMEM_TCP_INT(OID_AUTO, sack, CTLFLAG_RW | CTLFLAG_LOCKED, |
0a7de745 | 106 | int, tcp_do_sack, 1, "Enable/Disable TCP SACK support"); |
5ba3f43e | 107 | SYSCTL_SKMEM_TCP_INT(OID_AUTO, sack_maxholes, CTLFLAG_RW | CTLFLAG_LOCKED, |
0a7de745 | 108 | static int, tcp_sack_maxholes, 128, |
8ad349bb A |
109 | "Maximum number of TCP SACK holes allowed per connection"); |
110 | ||
5ba3f43e | 111 | SYSCTL_SKMEM_TCP_INT(OID_AUTO, sack_globalmaxholes, |
0a7de745 | 112 | CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_sack_globalmaxholes, 65536, |
8ad349bb A |
113 | "Global maximum number of TCP SACK holes"); |
114 | ||
fe8ab488 | 115 | static SInt32 tcp_sack_globalholes = 0; |
6d2010ae | 116 | SYSCTL_INT(_net_inet_tcp, OID_AUTO, sack_globalholes, CTLFLAG_RD | CTLFLAG_LOCKED, |
8ad349bb A |
117 | &tcp_sack_globalholes, 0, |
118 | "Global number of TCP SACK holes currently allocated"); | |
119 | ||
120 | extern struct zone *sack_hole_zone; | |
121 | ||
0a7de745 | 122 | #define TCP_VALIDATE_SACK_SEQ_NUMBERS(_tp_, _sb_, _ack_) \ |
3e170ce0 A |
123 | (SEQ_GT((_sb_)->end, (_sb_)->start) && \ |
124 | SEQ_GT((_sb_)->start, (_tp_)->snd_una) && \ | |
125 | SEQ_GT((_sb_)->start, (_ack_)) && \ | |
126 | SEQ_LT((_sb_)->start, (_tp_)->snd_max) && \ | |
127 | SEQ_GT((_sb_)->end, (_tp_)->snd_una) && \ | |
128 | SEQ_LEQ((_sb_)->end, (_tp_)->snd_max)) | |
129 | ||
8ad349bb A |
130 | /* |
131 | * This function is called upon receipt of new valid data (while not in header | |
132 | * prediction mode), and it updates the ordered list of sacks. | |
133 | */ | |
134 | void | |
135 | tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end) | |
136 | { | |
137 | /* | |
138 | * First reported block MUST be the most recent one. Subsequent | |
139 | * blocks SHOULD be in the order in which they arrived at the | |
140 | * receiver. These two conditions make the implementation fully | |
141 | * compliant with RFC 2018. | |
142 | */ | |
143 | struct sackblk head_blk, saved_blks[MAX_SACK_BLKS]; | |
144 | int num_head, num_saved, i; | |
145 | ||
146 | /* SACK block for the received segment. */ | |
147 | head_blk.start = rcv_start; | |
148 | head_blk.end = rcv_end; | |
149 | ||
150 | /* | |
151 | * Merge updated SACK blocks into head_blk, and | |
152 | * save unchanged SACK blocks into saved_blks[]. | |
153 | * num_saved will have the number of the saved SACK blocks. | |
154 | */ | |
155 | num_saved = 0; | |
156 | for (i = 0; i < tp->rcv_numsacks; i++) { | |
157 | tcp_seq start = tp->sackblks[i].start; | |
158 | tcp_seq end = tp->sackblks[i].end; | |
159 | if (SEQ_GEQ(start, end) || SEQ_LEQ(start, tp->rcv_nxt)) { | |
160 | /* | |
161 | * Discard this SACK block. | |
162 | */ | |
163 | } else if (SEQ_LEQ(head_blk.start, end) && | |
0a7de745 | 164 | SEQ_GEQ(head_blk.end, start)) { |
8ad349bb A |
165 | /* |
166 | * Merge this SACK block into head_blk. | |
167 | * This SACK block itself will be discarded. | |
168 | */ | |
0a7de745 | 169 | if (SEQ_GT(head_blk.start, start)) { |
8ad349bb | 170 | head_blk.start = start; |
0a7de745 A |
171 | } |
172 | if (SEQ_LT(head_blk.end, end)) { | |
8ad349bb | 173 | head_blk.end = end; |
0a7de745 | 174 | } |
8ad349bb A |
175 | } else { |
176 | /* | |
177 | * Save this SACK block. | |
178 | */ | |
179 | saved_blks[num_saved].start = start; | |
180 | saved_blks[num_saved].end = end; | |
181 | num_saved++; | |
182 | } | |
183 | } | |
184 | ||
185 | /* | |
186 | * Update SACK list in tp->sackblks[]. | |
187 | */ | |
188 | num_head = 0; | |
189 | if (SEQ_GT(head_blk.start, tp->rcv_nxt)) { | |
190 | /* | |
191 | * The received data segment is an out-of-order segment. | |
192 | * Put head_blk at the top of SACK list. | |
193 | */ | |
194 | tp->sackblks[0] = head_blk; | |
195 | num_head = 1; | |
196 | /* | |
197 | * If the number of saved SACK blocks exceeds its limit, | |
198 | * discard the last SACK block. | |
199 | */ | |
0a7de745 | 200 | if (num_saved >= MAX_SACK_BLKS) { |
8ad349bb | 201 | num_saved--; |
0a7de745 | 202 | } |
8ad349bb A |
203 | } |
204 | if (num_saved > 0) { | |
205 | /* | |
206 | * Copy the saved SACK blocks back. | |
207 | */ | |
f427ee49 | 208 | bcopy(saved_blks, &tp->sackblks[num_head], sizeof(struct sackblk) * num_saved); |
8ad349bb A |
209 | } |
210 | ||
211 | /* Save the number of SACK blocks. */ | |
212 | tp->rcv_numsacks = num_head + num_saved; | |
6d2010ae A |
213 | |
214 | /* If we are requesting SACK recovery, reset the stretch-ack state | |
215 | * so that connection will generate more acks after recovery and | |
216 | * sender's cwnd will open. | |
217 | */ | |
0a7de745 | 218 | if ((tp->t_flags & TF_STRETCHACK) != 0 && tp->rcv_numsacks > 0) { |
6d2010ae | 219 | tcp_reset_stretch_ack(tp); |
0a7de745 | 220 | } |
f427ee49 A |
221 | if (tp->rcv_numsacks > 0) { |
222 | tp->t_forced_acks = TCP_FORCED_ACKS_COUNT; | |
223 | } | |
6d2010ae A |
224 | |
225 | #if TRAFFIC_MGT | |
0a7de745 | 226 | if (tp->acc_iaj > 0 && tp->rcv_numsacks > 0) { |
6d2010ae | 227 | reset_acc_iaj(tp); |
0a7de745 | 228 | } |
6d2010ae | 229 | #endif /* TRAFFIC_MGT */ |
8ad349bb A |
230 | } |
231 | ||
232 | /* | |
233 | * Delete all receiver-side SACK information. | |
234 | */ | |
235 | void | |
236 | tcp_clean_sackreport( struct tcpcb *tp) | |
237 | { | |
8ad349bb | 238 | tp->rcv_numsacks = 0; |
0a7de745 | 239 | bzero(&tp->sackblks[0], sizeof(struct sackblk) * MAX_SACK_BLKS); |
8ad349bb A |
240 | } |
241 | ||
242 | /* | |
243 | * Allocate struct sackhole. | |
244 | */ | |
245 | static struct sackhole * | |
246 | tcp_sackhole_alloc(struct tcpcb *tp, tcp_seq start, tcp_seq end) | |
247 | { | |
248 | struct sackhole *hole; | |
249 | ||
250 | if (tp->snd_numholes >= tcp_sack_maxholes || | |
251 | tcp_sack_globalholes >= tcp_sack_globalmaxholes) { | |
252 | tcpstat.tcps_sack_sboverflow++; | |
253 | return NULL; | |
254 | } | |
255 | ||
fe8ab488 | 256 | hole = (struct sackhole *)zalloc(sack_hole_zone); |
0a7de745 | 257 | if (hole == NULL) { |
8ad349bb | 258 | return NULL; |
0a7de745 | 259 | } |
8ad349bb A |
260 | |
261 | hole->start = start; | |
262 | hole->end = end; | |
263 | hole->rxmit = start; | |
264 | ||
265 | tp->snd_numholes++; | |
fe8ab488 | 266 | OSIncrementAtomic(&tcp_sack_globalholes); |
8ad349bb A |
267 | |
268 | return hole; | |
269 | } | |
270 | ||
271 | /* | |
272 | * Free struct sackhole. | |
273 | */ | |
274 | static void | |
275 | tcp_sackhole_free(struct tcpcb *tp, struct sackhole *hole) | |
276 | { | |
277 | zfree(sack_hole_zone, hole); | |
278 | ||
279 | tp->snd_numholes--; | |
fe8ab488 | 280 | OSDecrementAtomic(&tcp_sack_globalholes); |
8ad349bb A |
281 | } |
282 | ||
283 | /* | |
284 | * Insert new SACK hole into scoreboard. | |
285 | */ | |
286 | static struct sackhole * | |
287 | tcp_sackhole_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end, | |
0a7de745 | 288 | struct sackhole *after) |
8ad349bb A |
289 | { |
290 | struct sackhole *hole; | |
291 | ||
292 | /* Allocate a new SACK hole. */ | |
293 | hole = tcp_sackhole_alloc(tp, start, end); | |
0a7de745 | 294 | if (hole == NULL) { |
8ad349bb | 295 | return NULL; |
0a7de745 | 296 | } |
fe8ab488 | 297 | hole->rxmit_start = tcp_now; |
8ad349bb | 298 | /* Insert the new SACK hole into scoreboard */ |
0a7de745 | 299 | if (after != NULL) { |
8ad349bb | 300 | TAILQ_INSERT_AFTER(&tp->snd_holes, after, hole, scblink); |
0a7de745 | 301 | } else { |
8ad349bb | 302 | TAILQ_INSERT_TAIL(&tp->snd_holes, hole, scblink); |
0a7de745 | 303 | } |
8ad349bb A |
304 | |
305 | /* Update SACK hint. */ | |
0a7de745 | 306 | if (tp->sackhint.nexthole == NULL) { |
8ad349bb | 307 | tp->sackhint.nexthole = hole; |
0a7de745 | 308 | } |
8ad349bb | 309 | |
0a7de745 | 310 | return hole; |
8ad349bb A |
311 | } |
312 | ||
313 | /* | |
314 | * Remove SACK hole from scoreboard. | |
315 | */ | |
316 | static void | |
317 | tcp_sackhole_remove(struct tcpcb *tp, struct sackhole *hole) | |
318 | { | |
319 | /* Update SACK hint. */ | |
0a7de745 | 320 | if (tp->sackhint.nexthole == hole) { |
8ad349bb | 321 | tp->sackhint.nexthole = TAILQ_NEXT(hole, scblink); |
0a7de745 | 322 | } |
8ad349bb A |
323 | |
324 | /* Remove this SACK hole. */ | |
325 | TAILQ_REMOVE(&tp->snd_holes, hole, scblink); | |
326 | ||
327 | /* Free this SACK hole. */ | |
328 | tcp_sackhole_free(tp, hole); | |
329 | } | |
fe8ab488 A |
330 | /* |
331 | * When a new ack with SACK is received, check if it indicates packet | |
332 | * reordering. If there is packet reordering, the socket is marked and | |
333 | * the late time offset by which the packet was reordered with | |
334 | * respect to its closest neighboring packets is computed. | |
335 | */ | |
336 | static void | |
337 | tcp_sack_detect_reordering(struct tcpcb *tp, struct sackhole *s, | |
338 | tcp_seq sacked_seq, tcp_seq snd_fack) | |
339 | { | |
340 | int32_t rext = 0, reordered = 0; | |
341 | ||
342 | /* | |
343 | * If the SACK hole is past snd_fack, this is from new SACK | |
344 | * information, so we can ignore it. | |
345 | */ | |
0a7de745 | 346 | if (SEQ_GT(s->end, snd_fack)) { |
fe8ab488 | 347 | return; |
0a7de745 | 348 | } |
fe8ab488 | 349 | /* |
0a7de745 | 350 | * If there has been a retransmit timeout, then the timestamp on |
fe8ab488 A |
351 | * the SACK segment will be newer. This might lead to a |
352 | * false-positive. Avoid re-ordering detection in this case. | |
353 | */ | |
0a7de745 | 354 | if (tp->t_rxtshift > 0) { |
fe8ab488 | 355 | return; |
0a7de745 | 356 | } |
fe8ab488 A |
357 | |
358 | /* | |
359 | * Detect reordering from SACK information by checking | |
360 | * if recently sacked data was never retransmitted from this hole. | |
f427ee49 A |
361 | * |
362 | * First, we look for the byte in the list of retransmitted segments. This one | |
363 | * will contain even the segments that are retransmitted thanks to RTO/TLP. | |
364 | * | |
365 | * Then, we check the sackhole which indicates whether or not the sackhole | |
366 | * was subject to retransmission. | |
fe8ab488 | 367 | */ |
f427ee49 A |
368 | if (SEQ_LT(s->rxmit, sacked_seq) && |
369 | (!tcp_do_better_lr || tcp_rxtseg_find(tp, sacked_seq - 1, sacked_seq - 1) == NULL)) { | |
fe8ab488 A |
370 | reordered = 1; |
371 | tcpstat.tcps_avoid_rxmt++; | |
372 | } | |
373 | ||
374 | if (reordered) { | |
f427ee49 | 375 | if (!(tp->t_flagsext & TF_PKTS_REORDERED)) { |
fe8ab488 A |
376 | tp->t_flagsext |= TF_PKTS_REORDERED; |
377 | tcpstat.tcps_detect_reordering++; | |
378 | } | |
379 | ||
380 | tcpstat.tcps_reordered_pkts++; | |
4bd07ac2 | 381 | tp->t_reordered_pkts++; |
fe8ab488 | 382 | |
39037602 A |
383 | /* |
384 | * If reordering is seen on a connection wth ECN enabled, | |
385 | * increment the heuristic | |
386 | */ | |
387 | if (TCP_ECN_ENABLED(tp)) { | |
388 | INP_INC_IFNET_STAT(tp->t_inpcb, ecn_fallback_reorder); | |
389 | tcpstat.tcps_ecn_fallback_reorder++; | |
390 | tcp_heuristic_ecn_aggressive(tp); | |
391 | } | |
392 | ||
fe8ab488 A |
393 | VERIFY(SEQ_GEQ(snd_fack, s->rxmit)); |
394 | ||
395 | if (s->rxmit_start > 0) { | |
396 | rext = timer_diff(tcp_now, 0, s->rxmit_start, 0); | |
0a7de745 | 397 | if (rext < 0) { |
fe8ab488 | 398 | return; |
0a7de745 | 399 | } |
fe8ab488 A |
400 | |
401 | /* | |
402 | * We take the maximum reorder window to schedule | |
403 | * DELAYFR timer as that will take care of jitter | |
404 | * on the network path. | |
405 | * | |
406 | * Computing average and standard deviation seems | |
407 | * to cause unnecessary retransmissions when there | |
408 | * is high jitter. | |
409 | * | |
410 | * We set a maximum of SRTT/2 and a minimum of | |
411 | * 10 ms on the reorder window. | |
412 | */ | |
413 | tp->t_reorderwin = max(tp->t_reorderwin, rext); | |
414 | tp->t_reorderwin = min(tp->t_reorderwin, | |
415 | (tp->t_srtt >> (TCP_RTT_SHIFT - 1))); | |
416 | tp->t_reorderwin = max(tp->t_reorderwin, 10); | |
417 | } | |
418 | } | |
419 | } | |
8ad349bb | 420 | |
f427ee49 A |
421 | static void |
422 | tcp_sack_update_byte_counter(struct tcpcb *tp, uint32_t start, uint32_t end, | |
423 | uint32_t *newbytes_acked, uint32_t *towards_fr_acked) | |
424 | { | |
425 | *newbytes_acked += (end - start); | |
426 | if (SEQ_GEQ(start, tp->send_highest_sack)) { | |
427 | *towards_fr_acked += (end - start); | |
428 | } | |
429 | } | |
430 | ||
8ad349bb A |
431 | /* |
432 | * Process cumulative ACK and the TCP SACK option to update the scoreboard. | |
433 | * tp->snd_holes is an ordered list of holes (oldest to newest, in terms of | |
434 | * the sequence space). | |
435 | */ | |
436 | void | |
0a7de745 | 437 | tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, |
f427ee49 | 438 | u_int32_t *newbytes_acked, uint32_t *after_rexmit_acked) |
8ad349bb A |
439 | { |
440 | struct sackhole *cur, *temp; | |
441 | struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1], *sblkp; | |
442 | int i, j, num_sack_blks; | |
fe8ab488 | 443 | tcp_seq old_snd_fack = 0, th_ack = th->th_ack; |
8ad349bb A |
444 | |
445 | num_sack_blks = 0; | |
446 | /* | |
447 | * If SND.UNA will be advanced by SEG.ACK, and if SACK holes exist, | |
448 | * treat [SND.UNA, SEG.ACK) as if it is a SACK block. | |
449 | */ | |
450 | if (SEQ_LT(tp->snd_una, th_ack) && !TAILQ_EMPTY(&tp->snd_holes)) { | |
451 | sack_blocks[num_sack_blks].start = tp->snd_una; | |
452 | sack_blocks[num_sack_blks++].end = th_ack; | |
453 | } | |
454 | /* | |
455 | * Append received valid SACK blocks to sack_blocks[]. | |
b0d623f7 | 456 | * Check that the SACK block range is valid. |
8ad349bb | 457 | */ |
39236c6e A |
458 | for (i = 0; i < to->to_nsacks; i++) { |
459 | bcopy((to->to_sacks + i * TCPOLEN_SACK), | |
460 | &sack, sizeof(sack)); | |
461 | sack.start = ntohl(sack.start); | |
462 | sack.end = ntohl(sack.end); | |
0a7de745 | 463 | if (TCP_VALIDATE_SACK_SEQ_NUMBERS(tp, &sack, th_ack)) { |
39236c6e | 464 | sack_blocks[num_sack_blks++] = sack; |
0a7de745 | 465 | } |
8ad349bb A |
466 | } |
467 | ||
468 | /* | |
469 | * Return if SND.UNA is not advanced and no valid SACK block | |
470 | * is received. | |
471 | */ | |
0a7de745 | 472 | if (num_sack_blks == 0) { |
8ad349bb | 473 | return; |
0a7de745 | 474 | } |
8ad349bb | 475 | |
fe8ab488 | 476 | VERIFY(num_sack_blks <= (TCP_MAX_SACK + 1)); |
8ad349bb A |
477 | /* |
478 | * Sort the SACK blocks so we can update the scoreboard | |
479 | * with just one pass. The overhead of sorting upto 4+1 elements | |
480 | * is less than making upto 4+1 passes over the scoreboard. | |
481 | */ | |
482 | for (i = 0; i < num_sack_blks; i++) { | |
483 | for (j = i + 1; j < num_sack_blks; j++) { | |
484 | if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { | |
485 | sack = sack_blocks[i]; | |
486 | sack_blocks[i] = sack_blocks[j]; | |
487 | sack_blocks[j] = sack; | |
488 | } | |
489 | } | |
490 | } | |
39236c6e | 491 | if (TAILQ_EMPTY(&tp->snd_holes)) { |
8ad349bb A |
492 | /* |
493 | * Empty scoreboard. Need to initialize snd_fack (it may be | |
494 | * uninitialized or have a bogus value). Scoreboard holes | |
495 | * (from the sack blocks received) are created later below (in | |
496 | * the logic that adds holes to the tail of the scoreboard). | |
497 | */ | |
498 | tp->snd_fack = SEQ_MAX(tp->snd_una, th_ack); | |
39236c6e A |
499 | } |
500 | ||
fe8ab488 | 501 | old_snd_fack = tp->snd_fack; |
8ad349bb A |
502 | /* |
503 | * In the while-loop below, incoming SACK blocks (sack_blocks[]) | |
504 | * and SACK holes (snd_holes) are traversed from their tails with | |
505 | * just one pass in order to reduce the number of compares especially | |
506 | * when the bandwidth-delay product is large. | |
507 | * Note: Typically, in the first RTT of SACK recovery, the highest | |
508 | * three or four SACK blocks with the same ack number are received. | |
509 | * In the second RTT, if retransmitted data segments are not lost, | |
510 | * the highest three or four SACK blocks with ack number advancing | |
511 | * are received. | |
512 | */ | |
0a7de745 | 513 | sblkp = &sack_blocks[num_sack_blks - 1]; /* Last SACK block */ |
8ad349bb A |
514 | if (SEQ_LT(tp->snd_fack, sblkp->start)) { |
515 | /* | |
516 | * The highest SACK block is beyond fack. | |
517 | * Append new SACK hole at the tail. | |
518 | * If the second or later highest SACK blocks are also | |
519 | * beyond the current fack, they will be inserted by | |
520 | * way of hole splitting in the while-loop below. | |
521 | */ | |
0a7de745 | 522 | temp = tcp_sackhole_insert(tp, tp->snd_fack, sblkp->start, NULL); |
8ad349bb A |
523 | if (temp != NULL) { |
524 | tp->snd_fack = sblkp->end; | |
f427ee49 | 525 | tcp_sack_update_byte_counter(tp, sblkp->start, sblkp->end, newbytes_acked, after_rexmit_acked); |
39236c6e | 526 | |
8ad349bb A |
527 | /* Go to the previous sack block. */ |
528 | sblkp--; | |
529 | } else { | |
0a7de745 A |
530 | /* |
531 | * We failed to add a new hole based on the current | |
532 | * sack block. Skip over all the sack blocks that | |
8ad349bb A |
533 | * fall completely to the right of snd_fack and proceed |
534 | * to trim the scoreboard based on the remaining sack | |
0a7de745 | 535 | * blocks. This also trims the scoreboard for th_ack |
8ad349bb A |
536 | * (which is sack_blocks[0]). |
537 | */ | |
0a7de745 A |
538 | while (sblkp >= sack_blocks && |
539 | SEQ_LT(tp->snd_fack, sblkp->start)) { | |
8ad349bb | 540 | sblkp--; |
0a7de745 A |
541 | } |
542 | if (sblkp >= sack_blocks && | |
39236c6e | 543 | SEQ_LT(tp->snd_fack, sblkp->end)) { |
f427ee49 | 544 | tcp_sack_update_byte_counter(tp, tp->snd_fack, sblkp->end, newbytes_acked, after_rexmit_acked); |
8ad349bb | 545 | tp->snd_fack = sblkp->end; |
39236c6e | 546 | } |
8ad349bb | 547 | } |
39236c6e | 548 | } else if (SEQ_LT(tp->snd_fack, sblkp->end)) { |
8ad349bb | 549 | /* fack is advanced. */ |
f427ee49 | 550 | tcp_sack_update_byte_counter(tp, tp->snd_fack, sblkp->end, newbytes_acked, after_rexmit_acked); |
8ad349bb | 551 | tp->snd_fack = sblkp->end; |
39236c6e | 552 | } |
8ad349bb A |
553 | /* We must have at least one SACK hole in scoreboard */ |
554 | cur = TAILQ_LAST(&tp->snd_holes, sackhole_head); /* Last SACK hole */ | |
555 | /* | |
556 | * Since the incoming sack blocks are sorted, we can process them | |
557 | * making one sweep of the scoreboard. | |
558 | */ | |
0a7de745 | 559 | while (sblkp >= sack_blocks && cur != NULL) { |
8ad349bb A |
560 | if (SEQ_GEQ(sblkp->start, cur->end)) { |
561 | /* | |
562 | * SACKs data beyond the current hole. | |
563 | * Go to the previous sack block. | |
564 | */ | |
565 | sblkp--; | |
566 | continue; | |
567 | } | |
568 | if (SEQ_LEQ(sblkp->end, cur->start)) { | |
569 | /* | |
570 | * SACKs data before the current hole. | |
571 | * Go to the previous hole. | |
572 | */ | |
573 | cur = TAILQ_PREV(cur, sackhole_head, scblink); | |
574 | continue; | |
575 | } | |
576 | tp->sackhint.sack_bytes_rexmit -= (cur->rxmit - cur->start); | |
f427ee49 A |
577 | if (tp->sackhint.sack_bytes_rexmit < 0) { |
578 | tp->sackhint.sack_bytes_rexmit = 0; | |
579 | } | |
580 | ||
8ad349bb A |
581 | if (SEQ_LEQ(sblkp->start, cur->start)) { |
582 | /* Data acks at least the beginning of hole */ | |
583 | if (SEQ_GEQ(sblkp->end, cur->end)) { | |
584 | /* Acks entire hole, so delete hole */ | |
f427ee49 | 585 | tcp_sack_update_byte_counter(tp, cur->start, cur->end, newbytes_acked, after_rexmit_acked); |
fe8ab488 A |
586 | |
587 | tcp_sack_detect_reordering(tp, cur, | |
588 | cur->end, old_snd_fack); | |
8ad349bb A |
589 | temp = cur; |
590 | cur = TAILQ_PREV(cur, sackhole_head, scblink); | |
591 | tcp_sackhole_remove(tp, temp); | |
592 | /* | |
593 | * The sack block may ack all or part of the next | |
594 | * hole too, so continue onto the next hole. | |
595 | */ | |
596 | continue; | |
597 | } else { | |
598 | /* Move start of hole forward */ | |
f427ee49 | 599 | tcp_sack_update_byte_counter(tp, cur->start, sblkp->end, newbytes_acked, after_rexmit_acked); |
fe8ab488 A |
600 | tcp_sack_detect_reordering(tp, cur, |
601 | sblkp->end, old_snd_fack); | |
8ad349bb A |
602 | cur->start = sblkp->end; |
603 | cur->rxmit = SEQ_MAX(cur->rxmit, cur->start); | |
604 | } | |
605 | } else { | |
606 | /* Data acks at least the end of hole */ | |
607 | if (SEQ_GEQ(sblkp->end, cur->end)) { | |
608 | /* Move end of hole backward */ | |
f427ee49 | 609 | tcp_sack_update_byte_counter(tp, sblkp->start, cur->end, newbytes_acked, after_rexmit_acked); |
fe8ab488 A |
610 | tcp_sack_detect_reordering(tp, cur, |
611 | cur->end, old_snd_fack); | |
8ad349bb A |
612 | cur->end = sblkp->start; |
613 | cur->rxmit = SEQ_MIN(cur->rxmit, cur->end); | |
614 | } else { | |
615 | /* | |
fe8ab488 A |
616 | * ACKs some data in the middle of a hole; |
617 | * need to split current hole | |
8ad349bb | 618 | */ |
fe8ab488 A |
619 | tcp_sack_detect_reordering(tp, cur, |
620 | sblkp->end, old_snd_fack); | |
8ad349bb | 621 | temp = tcp_sackhole_insert(tp, sblkp->end, |
fe8ab488 | 622 | cur->end, cur); |
8ad349bb | 623 | if (temp != NULL) { |
f427ee49 | 624 | tcp_sack_update_byte_counter(tp, sblkp->start, sblkp->end, newbytes_acked, after_rexmit_acked); |
8ad349bb A |
625 | if (SEQ_GT(cur->rxmit, temp->rxmit)) { |
626 | temp->rxmit = cur->rxmit; | |
627 | tp->sackhint.sack_bytes_rexmit | |
0a7de745 A |
628 | += (temp->rxmit |
629 | - temp->start); | |
8ad349bb A |
630 | } |
631 | cur->end = sblkp->start; | |
632 | cur->rxmit = SEQ_MIN(cur->rxmit, | |
0a7de745 | 633 | cur->end); |
fe8ab488 A |
634 | /* |
635 | * Reset the rxmit_start to that of | |
636 | * the current hole as that will | |
637 | * help to compute the reorder | |
638 | * window correctly | |
639 | */ | |
640 | temp->rxmit_start = cur->rxmit_start; | |
8ad349bb A |
641 | } |
642 | } | |
643 | } | |
644 | tp->sackhint.sack_bytes_rexmit += (cur->rxmit - cur->start); | |
645 | /* | |
646 | * Testing sblkp->start against cur->start tells us whether | |
647 | * we're done with the sack block or the sack hole. | |
648 | * Accordingly, we advance one or the other. | |
649 | */ | |
0a7de745 | 650 | if (SEQ_LEQ(sblkp->start, cur->start)) { |
8ad349bb | 651 | cur = TAILQ_PREV(cur, sackhole_head, scblink); |
0a7de745 | 652 | } else { |
8ad349bb | 653 | sblkp--; |
0a7de745 | 654 | } |
8ad349bb A |
655 | } |
656 | } | |
657 | ||
658 | /* | |
659 | * Free all SACK holes to clear the scoreboard. | |
660 | */ | |
661 | void | |
662 | tcp_free_sackholes(struct tcpcb *tp) | |
663 | { | |
664 | struct sackhole *q; | |
665 | ||
0a7de745 | 666 | while ((q = TAILQ_FIRST(&tp->snd_holes)) != NULL) { |
8ad349bb | 667 | tcp_sackhole_remove(tp, q); |
0a7de745 | 668 | } |
8ad349bb | 669 | tp->sackhint.sack_bytes_rexmit = 0; |
f427ee49 A |
670 | tp->sackhint.sack_bytes_acked = 0; |
671 | tp->t_new_dupacks = 0; | |
b0d623f7 A |
672 | tp->sackhint.nexthole = NULL; |
673 | tp->sack_newdata = 0; | |
8ad349bb A |
674 | } |
675 | ||
676 | /* | |
0a7de745 | 677 | * Partial ack handling within a sack recovery episode. |
8ad349bb A |
678 | * Keeping this very simple for now. When a partial ack |
679 | * is received, force snd_cwnd to a value that will allow | |
680 | * the sender to transmit no more than 2 segments. | |
0a7de745 | 681 | * If necessary, a better scheme can be adopted at a |
8ad349bb A |
682 | * later point, but for now, the goal is to prevent the |
683 | * sender from bursting a large amount of data in the midst | |
684 | * of sack recovery. | |
685 | */ | |
686 | void | |
39037602 | 687 | tcp_sack_partialack(struct tcpcb *tp, struct tcphdr *th) |
8ad349bb A |
688 | { |
689 | int num_segs = 1; | |
690 | ||
691 | tp->t_timer[TCPT_REXMT] = 0; | |
692 | tp->t_rtttime = 0; | |
693 | /* send one or 2 segments based on how much new data was acked */ | |
0a7de745 | 694 | if (((BYTES_ACKED(th, tp)) / tp->t_maxseg) > 2) { |
8ad349bb | 695 | num_segs = 2; |
0a7de745 | 696 | } |
f427ee49 A |
697 | if (tcp_do_better_lr) { |
698 | tp->snd_cwnd = tcp_flight_size(tp) + num_segs * tp->t_maxseg; | |
699 | } else { | |
700 | tp->snd_cwnd = (tp->sackhint.sack_bytes_rexmit + | |
701 | (tp->snd_nxt - tp->sack_newdata) + | |
702 | num_segs * tp->t_maxseg); | |
703 | } | |
0a7de745 | 704 | if (tp->snd_cwnd > tp->snd_ssthresh) { |
8ad349bb | 705 | tp->snd_cwnd = tp->snd_ssthresh; |
0a7de745 | 706 | } |
3e170ce0 A |
707 | if (SEQ_LT(tp->snd_fack, tp->snd_recover) && |
708 | tp->snd_fack == th->th_ack && TAILQ_EMPTY(&tp->snd_holes)) { | |
709 | struct sackhole *temp; | |
710 | /* | |
711 | * we received a partial ack but there is no sack_hole | |
712 | * that will cover the remaining seq space. In this case, | |
713 | * create a hole from snd_fack to snd_recover so that | |
714 | * the sack recovery will continue. | |
715 | */ | |
716 | temp = tcp_sackhole_insert(tp, tp->snd_fack, | |
717 | tp->snd_recover, NULL); | |
0a7de745 | 718 | if (temp != NULL) { |
3e170ce0 | 719 | tp->snd_fack = tp->snd_recover; |
0a7de745 | 720 | } |
3e170ce0 | 721 | } |
8ad349bb A |
722 | (void) tcp_output(tp); |
723 | } | |
724 | ||
725 | /* | |
726 | * Debug version of tcp_sack_output() that walks the scoreboard. Used for | |
727 | * now to sanity check the hint. | |
728 | */ | |
729 | static struct sackhole * | |
730 | tcp_sack_output_debug(struct tcpcb *tp, int *sack_bytes_rexmt) | |
731 | { | |
732 | struct sackhole *p; | |
733 | ||
734 | *sack_bytes_rexmt = 0; | |
735 | TAILQ_FOREACH(p, &tp->snd_holes, scblink) { | |
736 | if (SEQ_LT(p->rxmit, p->end)) { | |
737 | if (SEQ_LT(p->rxmit, tp->snd_una)) {/* old SACK hole */ | |
738 | continue; | |
739 | } | |
740 | *sack_bytes_rexmt += (p->rxmit - p->start); | |
741 | break; | |
742 | } | |
743 | *sack_bytes_rexmt += (p->rxmit - p->start); | |
744 | } | |
0a7de745 | 745 | return p; |
8ad349bb A |
746 | } |
747 | ||
748 | /* | |
749 | * Returns the next hole to retransmit and the number of retransmitted bytes | |
750 | * from the scoreboard. We store both the next hole and the number of | |
751 | * retransmitted bytes as hints (and recompute these on the fly upon SACK/ACK | |
752 | * reception). This avoids scoreboard traversals completely. | |
753 | * | |
754 | * The loop here will traverse *at most* one link. Here's the argument. | |
755 | * For the loop to traverse more than 1 link before finding the next hole to | |
756 | * retransmit, we would need to have at least 1 node following the current hint | |
757 | * with (rxmit == end). But, for all holes following the current hint, | |
758 | * (start == rxmit), since we have not yet retransmitted from them. Therefore, | |
759 | * in order to traverse more 1 link in the loop below, we need to have at least | |
760 | * one node following the current hint with (start == rxmit == end). | |
761 | * But that can't happen, (start == end) means that all the data in that hole | |
762 | * has been sacked, in which case, the hole would have been removed from the | |
763 | * scoreboard. | |
764 | */ | |
765 | struct sackhole * | |
766 | tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt) | |
767 | { | |
768 | struct sackhole *hole = NULL, *dbg_hole = NULL; | |
769 | int dbg_bytes_rexmt; | |
770 | ||
771 | dbg_hole = tcp_sack_output_debug(tp, &dbg_bytes_rexmt); | |
772 | *sack_bytes_rexmt = tp->sackhint.sack_bytes_rexmit; | |
773 | hole = tp->sackhint.nexthole; | |
0a7de745 | 774 | if (hole == NULL || SEQ_LT(hole->rxmit, hole->end)) { |
8ad349bb | 775 | goto out; |
0a7de745 | 776 | } |
8ad349bb A |
777 | while ((hole = TAILQ_NEXT(hole, scblink)) != NULL) { |
778 | if (SEQ_LT(hole->rxmit, hole->end)) { | |
779 | tp->sackhint.nexthole = hole; | |
780 | break; | |
781 | } | |
782 | } | |
783 | out: | |
784 | if (dbg_hole != hole) { | |
785 | printf("%s: Computed sack hole not the same as cached value\n", __func__); | |
786 | hole = dbg_hole; | |
787 | } | |
788 | if (*sack_bytes_rexmt != dbg_bytes_rexmt) { | |
789 | printf("%s: Computed sack_bytes_retransmitted (%d) not " | |
0a7de745 A |
790 | "the same as cached value (%d)\n", |
791 | __func__, dbg_bytes_rexmt, *sack_bytes_rexmt); | |
8ad349bb A |
792 | *sack_bytes_rexmt = dbg_bytes_rexmt; |
793 | } | |
0a7de745 | 794 | return hole; |
8ad349bb A |
795 | } |
796 | ||
f427ee49 A |
797 | void |
798 | tcp_sack_lost_rexmit(struct tcpcb *tp) | |
799 | { | |
800 | struct sackhole *hole = TAILQ_FIRST(&tp->snd_holes); | |
801 | ||
802 | while (hole) { | |
803 | hole->rxmit = hole->start; | |
804 | hole->rxmit_start = tcp_now; | |
805 | ||
806 | hole = TAILQ_NEXT(hole, scblink); | |
807 | } | |
808 | ||
809 | tp->sackhint.nexthole = TAILQ_FIRST(&tp->snd_holes); | |
810 | tp->sackhint.sack_bytes_rexmit = 0; | |
811 | tp->sack_newdata = tp->snd_nxt; | |
812 | } | |
813 | ||
8ad349bb A |
814 | /* |
815 | * After a timeout, the SACK list may be rebuilt. This SACK information | |
816 | * should be used to avoid retransmitting SACKed data. This function | |
817 | * traverses the SACK list to see if snd_nxt should be moved forward. | |
818 | */ | |
819 | void | |
820 | tcp_sack_adjust(struct tcpcb *tp) | |
821 | { | |
822 | struct sackhole *p, *cur = TAILQ_FIRST(&tp->snd_holes); | |
823 | ||
0a7de745 | 824 | if (cur == NULL) { |
8ad349bb | 825 | return; /* No holes */ |
0a7de745 A |
826 | } |
827 | if (SEQ_GEQ(tp->snd_nxt, tp->snd_fack)) { | |
8ad349bb | 828 | return; /* We're already beyond any SACKed blocks */ |
0a7de745 | 829 | } |
8ad349bb A |
830 | /* |
831 | * Two cases for which we want to advance snd_nxt: | |
832 | * i) snd_nxt lies between end of one hole and beginning of another | |
833 | * ii) snd_nxt lies between end of last hole and snd_fack | |
834 | */ | |
835 | while ((p = TAILQ_NEXT(cur, scblink)) != NULL) { | |
0a7de745 | 836 | if (SEQ_LT(tp->snd_nxt, cur->end)) { |
8ad349bb | 837 | return; |
0a7de745 A |
838 | } |
839 | if (SEQ_GEQ(tp->snd_nxt, p->start)) { | |
8ad349bb | 840 | cur = p; |
0a7de745 | 841 | } else { |
8ad349bb A |
842 | tp->snd_nxt = p->start; |
843 | return; | |
844 | } | |
845 | } | |
0a7de745 | 846 | if (SEQ_LT(tp->snd_nxt, cur->end)) { |
8ad349bb | 847 | return; |
0a7de745 | 848 | } |
8ad349bb A |
849 | tp->snd_nxt = tp->snd_fack; |
850 | return; | |
851 | } | |
fe8ab488 A |
852 | |
853 | /* | |
3e170ce0 | 854 | * This function returns TRUE if more than (tcprexmtthresh - 1) * SMSS |
0a7de745 | 855 | * bytes with sequence numbers greater than snd_una have been SACKed. |
fe8ab488 A |
856 | */ |
857 | boolean_t | |
858 | tcp_sack_byte_islost(struct tcpcb *tp) | |
859 | { | |
860 | u_int32_t unacked_bytes, sndhole_bytes = 0; | |
861 | struct sackhole *sndhole; | |
862 | if (!SACK_ENABLED(tp) || IN_FASTRECOVERY(tp) || | |
863 | TAILQ_EMPTY(&tp->snd_holes) || | |
0a7de745 A |
864 | (tp->t_flagsext & TF_PKTS_REORDERED)) { |
865 | return FALSE; | |
866 | } | |
fe8ab488 A |
867 | |
868 | unacked_bytes = tp->snd_max - tp->snd_una; | |
869 | ||
870 | TAILQ_FOREACH(sndhole, &tp->snd_holes, scblink) { | |
871 | sndhole_bytes += (sndhole->end - sndhole->start); | |
872 | } | |
873 | ||
874 | VERIFY(unacked_bytes >= sndhole_bytes); | |
0a7de745 A |
875 | return (unacked_bytes - sndhole_bytes) > |
876 | ((tcprexmtthresh - 1) * tp->t_maxseg); | |
fe8ab488 | 877 | } |
3e170ce0 A |
878 | |
879 | /* | |
880 | * Process any DSACK options that might be present on an input packet | |
881 | */ | |
882 | ||
883 | boolean_t | |
884 | tcp_sack_process_dsack(struct tcpcb *tp, struct tcpopt *to, | |
885 | struct tcphdr *th) | |
886 | { | |
887 | struct sackblk first_sack, second_sack; | |
888 | struct tcp_rxt_seg *rxseg; | |
889 | ||
890 | bcopy(to->to_sacks, &first_sack, sizeof(first_sack)); | |
891 | first_sack.start = ntohl(first_sack.start); | |
892 | first_sack.end = ntohl(first_sack.end); | |
893 | ||
894 | if (to->to_nsacks > 1) { | |
895 | bcopy((to->to_sacks + TCPOLEN_SACK), &second_sack, | |
896 | sizeof(second_sack)); | |
897 | second_sack.start = ntohl(second_sack.start); | |
898 | second_sack.end = ntohl(second_sack.end); | |
899 | } | |
900 | ||
901 | if (SEQ_LT(first_sack.start, th->th_ack) && | |
902 | SEQ_LEQ(first_sack.end, th->th_ack)) { | |
903 | /* | |
904 | * There is a dsack option reporting a duplicate segment | |
905 | * also covered by cumulative acknowledgement. | |
906 | * | |
907 | * Validate the sequence numbers before looking at dsack | |
908 | * option. The duplicate notification can come after | |
909 | * snd_una moves forward. In order to set a window of valid | |
910 | * sequence numbers to look for, we set a maximum send | |
911 | * window within which the DSACK option will be processed. | |
912 | */ | |
913 | if (!(TCP_DSACK_SEQ_IN_WINDOW(tp, first_sack.start, th->th_ack) && | |
914 | TCP_DSACK_SEQ_IN_WINDOW(tp, first_sack.end, th->th_ack))) { | |
915 | to->to_nsacks--; | |
916 | to->to_sacks += TCPOLEN_SACK; | |
917 | tcpstat.tcps_dsack_recvd_old++; | |
918 | ||
919 | /* | |
920 | * returning true here so that the ack will not be | |
921 | * treated as duplicate ack. | |
922 | */ | |
0a7de745 | 923 | return TRUE; |
3e170ce0 A |
924 | } |
925 | } else if (to->to_nsacks > 1 && | |
926 | SEQ_LEQ(second_sack.start, first_sack.start) && | |
927 | SEQ_GEQ(second_sack.end, first_sack.end)) { | |
928 | /* | |
929 | * there is a dsack option in the first block not | |
930 | * covered by the cumulative acknowledgement but covered | |
931 | * by the second sack block. | |
932 | * | |
933 | * verify the sequence numbes on the second sack block | |
934 | * before processing the DSACK option. Returning false | |
935 | * here will treat the ack as a duplicate ack. | |
936 | */ | |
937 | if (!TCP_VALIDATE_SACK_SEQ_NUMBERS(tp, &second_sack, | |
938 | th->th_ack)) { | |
939 | to->to_nsacks--; | |
940 | to->to_sacks += TCPOLEN_SACK; | |
941 | tcpstat.tcps_dsack_recvd_old++; | |
0a7de745 | 942 | return TRUE; |
3e170ce0 A |
943 | } |
944 | } else { | |
945 | /* no dsack options, proceed with processing the sack */ | |
0a7de745 | 946 | return FALSE; |
3e170ce0 A |
947 | } |
948 | ||
949 | /* Update the tcpopt pointer to exclude dsack block */ | |
950 | to->to_nsacks--; | |
951 | to->to_sacks += TCPOLEN_SACK; | |
952 | tcpstat.tcps_dsack_recvd++; | |
4bd07ac2 | 953 | tp->t_dsack_recvd++; |
3e170ce0 | 954 | |
3e170ce0 A |
955 | /* If the DSACK is for TLP mark it as such */ |
956 | if ((tp->t_flagsext & TF_SENT_TLPROBE) && | |
957 | first_sack.end == tp->t_tlphighrxt) { | |
958 | if ((rxseg = tcp_rxtseg_find(tp, first_sack.start, | |
0a7de745 | 959 | (first_sack.end - 1))) != NULL) { |
3e170ce0 | 960 | rxseg->rx_flags |= TCP_RXT_DSACK_FOR_TLP; |
0a7de745 | 961 | } |
3e170ce0 A |
962 | } |
963 | /* Update the sender's retransmit segment state */ | |
964 | if (((tp->t_rxtshift == 1 && first_sack.start == tp->snd_una) || | |
965 | ((tp->t_flagsext & TF_SENT_TLPROBE) && | |
966 | first_sack.end == tp->t_tlphighrxt)) && | |
967 | TAILQ_EMPTY(&tp->snd_holes) && | |
968 | SEQ_GT(th->th_ack, tp->snd_una)) { | |
969 | /* | |
970 | * If the dsack is for a retransmitted packet and one of | |
971 | * the two cases is true, it indicates ack loss: | |
972 | * - retransmit timeout and first_sack.start == snd_una | |
973 | * - TLP probe and first_sack.end == tlphighrxt | |
974 | * | |
975 | * Ignore dsack and do not update state when there is | |
976 | * ack loss | |
977 | */ | |
978 | tcpstat.tcps_dsack_ackloss++; | |
979 | ||
0a7de745 | 980 | return TRUE; |
3e170ce0 | 981 | } else { |
f427ee49 | 982 | tcp_rxtseg_set_spurious(tp, first_sack.start, (first_sack.end - 1)); |
3e170ce0 | 983 | } |
0a7de745 | 984 | return TRUE; |
3e170ce0 | 985 | } |