]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/netinet/tcp_input.c
xnu-4570.20.62.tar.gz
[apple/xnu.git] / bsd / netinet / tcp_input.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
61 * $FreeBSD: src/sys/netinet/tcp_input.c,v 1.107.2.16 2001/08/22 00:59:12 silby Exp $
62 */
63/*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/kernel.h>
73#include <sys/sysctl.h>
74#include <sys/malloc.h>
75#include <sys/mbuf.h>
76#include <sys/proc.h> /* for proc0 declaration */
77#include <sys/protosw.h>
78#include <sys/socket.h>
79#include <sys/socketvar.h>
80#include <sys/syslog.h>
81#include <sys/mcache.h>
82#if !CONFIG_EMBEDDED
83#include <sys/kasl.h>
84#endif
85#include <sys/kauth.h>
86#include <kern/cpu_number.h> /* before tcp_seq.h, for tcp_random18() */
87
88#include <machine/endian.h>
89
90#include <net/if.h>
91#include <net/if_types.h>
92#include <net/route.h>
93#include <net/ntstat.h>
94#include <net/dlil.h>
95
96#include <netinet/in.h>
97#include <netinet/in_systm.h>
98#include <netinet/ip.h>
99#include <netinet/ip_icmp.h> /* for ICMP_BANDLIM */
100#include <netinet/in_var.h>
101#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
102#include <netinet/in_pcb.h>
103#include <netinet/ip_var.h>
104#include <mach/sdt.h>
105#if INET6
106#include <netinet/ip6.h>
107#include <netinet/icmp6.h>
108#include <netinet6/nd6.h>
109#include <netinet6/ip6_var.h>
110#include <netinet6/in6_pcb.h>
111#endif
112#include <netinet/tcp.h>
113#include <netinet/tcp_cache.h>
114#include <netinet/tcp_fsm.h>
115#include <netinet/tcp_seq.h>
116#include <netinet/tcp_timer.h>
117#include <netinet/tcp_var.h>
118#include <netinet/tcp_cc.h>
119#include <dev/random/randomdev.h>
120#include <kern/zalloc.h>
121#if INET6
122#include <netinet6/tcp6_var.h>
123#endif
124#include <netinet/tcpip.h>
125#if TCPDEBUG
126#include <netinet/tcp_debug.h>
127u_char tcp_saveipgen[40]; /* the size must be of max ip header, now IPv6 */
128struct tcphdr tcp_savetcp;
129#endif /* TCPDEBUG */
130
131#if IPSEC
132#include <netinet6/ipsec.h>
133#if INET6
134#include <netinet6/ipsec6.h>
135#endif
136#include <netkey/key.h>
137#endif /*IPSEC*/
138
139#if CONFIG_MACF_NET || CONFIG_MACF_SOCKET
140#include <security/mac_framework.h>
141#endif /* CONFIG_MACF_NET || CONFIG_MACF_SOCKET */
142
143#include <sys/kdebug.h>
144#include <netinet/lro_ext.h>
145#if MPTCP
146#include <netinet/mptcp_var.h>
147#include <netinet/mptcp.h>
148#include <netinet/mptcp_opt.h>
149#endif /* MPTCP */
150
151#include <corecrypto/ccaes.h>
152
153#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETTCP, 0)
154#define DBG_LAYER_END NETDBG_CODE(DBG_NETTCP, 2)
155#define DBG_FNC_TCP_INPUT NETDBG_CODE(DBG_NETTCP, (3 << 8))
156#define DBG_FNC_TCP_NEWCONN NETDBG_CODE(DBG_NETTCP, (7 << 8))
157
158#define TCP_RTT_HISTORY_EXPIRE_TIME (60 * TCP_RETRANSHZ)
159#define TCP_RECV_THROTTLE_WIN (5 * TCP_RETRANSHZ)
160#define TCP_STRETCHACK_ENABLE_PKTCNT 2000
161
162struct tcpstat tcpstat;
163
164static int log_in_vain = 0;
165SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain,
166 CTLFLAG_RW | CTLFLAG_LOCKED, &log_in_vain, 0,
167 "Log all incoming TCP connections");
168
169static int blackhole = 0;
170SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole,
171 CTLFLAG_RW | CTLFLAG_LOCKED, &blackhole, 0,
172 "Do not send RST when dropping refused connections");
173
174SYSCTL_SKMEM_TCP_INT(OID_AUTO, delayed_ack,
175 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_delack_enabled, 3,
176 "Delay ACK to try and piggyback it onto a data packet");
177
178SYSCTL_SKMEM_TCP_INT(OID_AUTO, tcp_lq_overflow,
179 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_lq_overflow, 1,
180 "Listen Queue Overflow");
181
182SYSCTL_SKMEM_TCP_INT(OID_AUTO, recvbg, CTLFLAG_RW | CTLFLAG_LOCKED,
183 int, tcp_recv_bg, 0, "Receive background");
184
185#if TCP_DROP_SYNFIN
186SYSCTL_SKMEM_TCP_INT(OID_AUTO, drop_synfin,
187 CTLFLAG_RW | CTLFLAG_LOCKED, static int, drop_synfin, 1,
188 "Drop TCP packets with SYN+FIN set");
189#endif
190
191SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
192 "TCP Segment Reassembly Queue");
193
194static int tcp_reass_overflows = 0;
195SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows,
196 CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_reass_overflows, 0,
197 "Global number of TCP Segment Reassembly Queue Overflows");
198
199
200SYSCTL_SKMEM_TCP_INT(OID_AUTO, slowlink_wsize, CTLFLAG_RW | CTLFLAG_LOCKED,
201 __private_extern__ int, slowlink_wsize, 8192,
202 "Maximum advertised window size for slowlink");
203
204SYSCTL_SKMEM_TCP_INT(OID_AUTO, maxseg_unacked,
205 CTLFLAG_RW | CTLFLAG_LOCKED, int, maxseg_unacked, 8,
206 "Maximum number of outstanding segments left unacked");
207
208SYSCTL_SKMEM_TCP_INT(OID_AUTO, rfc3465, CTLFLAG_RW | CTLFLAG_LOCKED,
209 int, tcp_do_rfc3465, 1, "");
210
211SYSCTL_SKMEM_TCP_INT(OID_AUTO, rfc3465_lim2,
212 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_do_rfc3465_lim2, 1,
213 "Appropriate bytes counting w/ L=2*SMSS");
214
215int rtt_samples_per_slot = 20;
216
217int tcp_acc_iaj_high_thresh = ACC_IAJ_HIGH_THRESH;
218u_int32_t tcp_autorcvbuf_inc_shift = 3;
219SYSCTL_SKMEM_TCP_INT(OID_AUTO, recv_allowed_iaj,
220 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_allowed_iaj, ALLOWED_IAJ,
221 "Allowed inter-packet arrival jiter");
222#if (DEVELOPMENT || DEBUG)
223SYSCTL_INT(_net_inet_tcp, OID_AUTO, acc_iaj_high_thresh,
224 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_acc_iaj_high_thresh, 0,
225 "Used in calculating maximum accumulated IAJ");
226
227SYSCTL_INT(_net_inet_tcp, OID_AUTO, autorcvbufincshift,
228 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_autorcvbuf_inc_shift, 0,
229 "Shift for increment in receive socket buffer size");
230#endif /* (DEVELOPMENT || DEBUG) */
231
232SYSCTL_SKMEM_TCP_INT(OID_AUTO, doautorcvbuf,
233 CTLFLAG_RW | CTLFLAG_LOCKED, u_int32_t, tcp_do_autorcvbuf, 1,
234 "Enable automatic socket buffer tuning");
235
236SYSCTL_SKMEM_TCP_INT(OID_AUTO, autorcvbufmax,
237 CTLFLAG_RW | CTLFLAG_LOCKED, u_int32_t, tcp_autorcvbuf_max, 512 * 1024,
238 "Maximum receive socket buffer size");
239
240u_int32_t tcp_autorcvbuf_max_ca = 512 * 1024;
241#if (DEBUG || DEVELOPMENT)
242SYSCTL_INT(_net_inet_tcp, OID_AUTO, autorcvbufmaxca,
243 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_autorcvbuf_max_ca, 0,
244 "Maximum receive socket buffer size");
245#endif /* (DEBUG || DEVELOPMENT) */
246
247#if CONFIG_EMBEDDED
248int sw_lro = 1;
249#else
250int sw_lro = 0;
251#endif /* !CONFIG_EMBEDDED */
252SYSCTL_INT(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_LOCKED,
253 &sw_lro, 0, "Used to coalesce TCP packets");
254
255int lrodebug = 0;
256SYSCTL_INT(_net_inet_tcp, OID_AUTO, lrodbg,
257 CTLFLAG_RW | CTLFLAG_LOCKED, &lrodebug, 0,
258 "Used to debug SW LRO");
259
260int lro_start = 4;
261SYSCTL_INT(_net_inet_tcp, OID_AUTO, lro_startcnt,
262 CTLFLAG_RW | CTLFLAG_LOCKED, &lro_start, 0,
263 "Segments for starting LRO computed as power of 2");
264
265int limited_txmt = 1;
266int early_rexmt = 1;
267int sack_ackadv = 1;
268int tcp_dsack_enable = 1;
269
270#if (DEVELOPMENT || DEBUG)
271SYSCTL_INT(_net_inet_tcp, OID_AUTO, limited_transmit,
272 CTLFLAG_RW | CTLFLAG_LOCKED, &limited_txmt, 0,
273 "Enable limited transmit");
274
275SYSCTL_INT(_net_inet_tcp, OID_AUTO, early_rexmt,
276 CTLFLAG_RW | CTLFLAG_LOCKED, &early_rexmt, 0,
277 "Enable Early Retransmit");
278
279SYSCTL_INT(_net_inet_tcp, OID_AUTO, sack_ackadv,
280 CTLFLAG_RW | CTLFLAG_LOCKED, &sack_ackadv, 0,
281 "Use SACK with cumulative ack advancement as a dupack");
282
283SYSCTL_INT(_net_inet_tcp, OID_AUTO, dsack_enable,
284 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_dsack_enable, 0,
285 "use DSACK TCP option to report duplicate segments");
286
287#endif /* (DEVELOPMENT || DEBUG) */
288int tcp_disable_access_to_stats = 1;
289SYSCTL_INT(_net_inet_tcp, OID_AUTO, disable_access_to_stats,
290 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_disable_access_to_stats, 0,
291 "Disable access to tcpstat");
292
293
294extern int tcp_TCPTV_MIN;
295extern int tcp_acc_iaj_high;
296extern int tcp_acc_iaj_react_limit;
297
298int tcprexmtthresh = 3;
299
300u_int32_t tcp_now;
301struct timeval tcp_uptime; /* uptime when tcp_now was last updated */
302lck_spin_t *tcp_uptime_lock; /* Used to sychronize updates to tcp_now */
303
304struct inpcbhead tcb;
305#define tcb6 tcb /* for KAME src sync over BSD*'s */
306struct inpcbinfo tcbinfo;
307
308static void tcp_dooptions(struct tcpcb *, u_char *, int, struct tcphdr *,
309 struct tcpopt *);
310static void tcp_finalize_options(struct tcpcb *, struct tcpopt *, unsigned int);
311static void tcp_pulloutofband(struct socket *,
312 struct tcphdr *, struct mbuf *, int);
313static int tcp_reass(struct tcpcb *, struct tcphdr *, int *, struct mbuf *,
314 struct ifnet *);
315static void tcp_xmit_timer(struct tcpcb *, int, u_int32_t, tcp_seq);
316static inline unsigned int tcp_maxmtu(struct rtentry *);
317static inline int tcp_stretch_ack_enable(struct tcpcb *tp, int thflags);
318static inline void tcp_adaptive_rwtimo_check(struct tcpcb *, int);
319
320#if TRAFFIC_MGT
321static inline void update_iaj_state(struct tcpcb *tp, uint32_t tlen,
322 int reset_size);
323void compute_iaj(struct tcpcb *tp, int nlropkts, int lro_delay_factor);
324static void compute_iaj_meat(struct tcpcb *tp, uint32_t cur_iaj);
325#endif /* TRAFFIC_MGT */
326
327#if INET6
328static inline unsigned int tcp_maxmtu6(struct rtentry *);
329#endif
330
331unsigned int get_maxmtu(struct rtentry *);
332
333static void tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sb,
334 struct tcpopt *to, u_int32_t tlen, u_int32_t rcvbuf_max);
335void tcp_sbrcv_trim(struct tcpcb *tp, struct sockbuf *sb);
336static void tcp_sbsnd_trim(struct sockbuf *sbsnd);
337static inline void tcp_sbrcv_tstmp_check(struct tcpcb *tp);
338static inline void tcp_sbrcv_reserve(struct tcpcb *tp, struct sockbuf *sb,
339 u_int32_t newsize, u_int32_t idealsize, u_int32_t rcvbuf_max);
340static void tcp_bad_rexmt_restore_state(struct tcpcb *tp, struct tcphdr *th);
341static void tcp_compute_rtt(struct tcpcb *tp, struct tcpopt *to,
342 struct tcphdr *th);
343static void tcp_early_rexmt_check(struct tcpcb *tp, struct tcphdr *th);
344static void tcp_bad_rexmt_check(struct tcpcb *tp, struct tcphdr *th,
345 struct tcpopt *to);
346/*
347 * Constants used for resizing receive socket buffer
348 * when timestamps are not supported
349 */
350#define TCPTV_RCVNOTS_QUANTUM 100
351#define TCP_RCVNOTS_BYTELEVEL 204800
352
353/*
354 * Constants used for limiting early retransmits
355 * to 10 per minute.
356 */
357#define TCP_EARLY_REXMT_WIN (60 * TCP_RETRANSHZ) /* 60 seconds */
358#define TCP_EARLY_REXMT_LIMIT 10
359
360extern void ipfwsyslog( int level, const char *format,...);
361extern int fw_verbose;
362
363#if IPFIREWALL
364extern void ipfw_stealth_stats_incr_tcp(void);
365
366#define log_in_vain_log( a ) { \
367 if ( (log_in_vain == 3 ) && (fw_verbose == 2)) { /* Apple logging, log to ipfw.log */ \
368 ipfwsyslog a ; \
369 } else if ( (log_in_vain == 4 ) && (fw_verbose == 2)) { \
370 ipfw_stealth_stats_incr_tcp(); \
371 } \
372 else log a ; \
373}
374#else
375#define log_in_vain_log( a ) { log a; }
376#endif
377
378int tcp_rcvunackwin = TCPTV_UNACKWIN;
379int tcp_maxrcvidle = TCPTV_MAXRCVIDLE;
380SYSCTL_SKMEM_TCP_INT(OID_AUTO, rcvsspktcnt, CTLFLAG_RW | CTLFLAG_LOCKED,
381 int, tcp_rcvsspktcnt, TCP_RCV_SS_PKTCOUNT, "packets to be seen before receiver stretches acks");
382
383#define DELAY_ACK(tp, th) \
384 (CC_ALGO(tp)->delay_ack != NULL && CC_ALGO(tp)->delay_ack(tp, th))
385
386static int tcp_dropdropablreq(struct socket *head);
387static void tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th);
388static void update_base_rtt(struct tcpcb *tp, uint32_t rtt);
389void tcp_set_background_cc(struct socket *so);
390void tcp_set_foreground_cc(struct socket *so);
391static void tcp_set_new_cc(struct socket *so, uint16_t cc_index);
392static void tcp_bwmeas_check(struct tcpcb *tp);
393
394#if TRAFFIC_MGT
395void
396reset_acc_iaj(struct tcpcb *tp)
397{
398 tp->acc_iaj = 0;
399 CLEAR_IAJ_STATE(tp);
400}
401
402static inline void
403update_iaj_state(struct tcpcb *tp, uint32_t size, int rst_size)
404{
405 if (rst_size > 0)
406 tp->iaj_size = 0;
407 if (tp->iaj_size == 0 || size >= tp->iaj_size) {
408 tp->iaj_size = size;
409 tp->iaj_rcv_ts = tcp_now;
410 tp->iaj_small_pkt = 0;
411 }
412}
413
414/* For every 32 bit unsigned integer(v), this function will find the
415 * largest integer n such that (n*n <= v). This takes at most 16 iterations
416 * irrespective of the value of v and does not involve multiplications.
417 */
418static inline int
419isqrt(unsigned int val)
420{
421 unsigned int sqrt_cache[11] = {0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100};
422 unsigned int temp, g=0, b=0x8000, bshft=15;
423 if ( val <= 100) {
424 for (g = 0; g <= 10; ++g) {
425 if (sqrt_cache[g] > val) {
426 g--;
427 break;
428 } else if (sqrt_cache[g] == val) {
429 break;
430 }
431 }
432 } else {
433 do {
434 temp = (((g << 1) + b) << (bshft--));
435 if (val >= temp) {
436 g += b;
437 val -= temp;
438 }
439 b >>= 1;
440 } while ( b > 0 && val > 0);
441 }
442 return(g);
443}
444
445/*
446* With LRO, roughly estimate the inter arrival time between
447* each sub coalesced packet as an average. Count the delay
448* cur_iaj to be the delay between the last packet received
449* and the first packet of the LRO stream. Due to round off errors
450* cur_iaj may be the same as lro_delay_factor. Averaging has
451* round off errors too. lro_delay_factor may be close to 0
452* in steady state leading to lower values fed to compute_iaj_meat.
453*/
454void
455compute_iaj(struct tcpcb *tp, int nlropkts, int lro_delay_factor)
456{
457 uint32_t cur_iaj = tcp_now - tp->iaj_rcv_ts;
458 uint32_t timediff = 0;
459
460 if (cur_iaj >= lro_delay_factor) {
461 cur_iaj = cur_iaj - lro_delay_factor;
462 }
463
464 compute_iaj_meat(tp, cur_iaj);
465
466 if (nlropkts <= 1)
467 return;
468
469 nlropkts--;
470
471 timediff = lro_delay_factor/nlropkts;
472
473 while (nlropkts > 0)
474 {
475 compute_iaj_meat(tp, timediff);
476 nlropkts--;
477 }
478}
479
480static
481void compute_iaj_meat(struct tcpcb *tp, uint32_t cur_iaj)
482{
483 /* When accumulated IAJ reaches MAX_ACC_IAJ in milliseconds,
484 * throttle the receive window to a minimum of MIN_IAJ_WIN packets
485 */
486#define MAX_ACC_IAJ (tcp_acc_iaj_high_thresh + tcp_acc_iaj_react_limit)
487#define IAJ_DIV_SHIFT 4
488#define IAJ_ROUNDUP_CONST (1 << (IAJ_DIV_SHIFT - 1))
489
490 uint32_t allowed_iaj, acc_iaj = 0;
491
492 uint32_t mean, temp;
493 int32_t cur_iaj_dev;
494
495 cur_iaj_dev = (cur_iaj - tp->avg_iaj);
496
497 /* Allow a jitter of "allowed_iaj" milliseconds. Some connections
498 * may have a constant jitter more than that. We detect this by
499 * using standard deviation.
500 */
501 allowed_iaj = tp->avg_iaj + tp->std_dev_iaj;
502 if (allowed_iaj < tcp_allowed_iaj)
503 allowed_iaj = tcp_allowed_iaj;
504
505 /* Initially when the connection starts, the senders congestion
506 * window is small. During this period we avoid throttling a
507 * connection because we do not have a good starting point for
508 * allowed_iaj. IAJ_IGNORE_PKTCNT is used to quietly gloss over
509 * the first few packets.
510 */
511 if (tp->iaj_pktcnt > IAJ_IGNORE_PKTCNT) {
512 if ( cur_iaj <= allowed_iaj ) {
513 if (tp->acc_iaj >= 2)
514 acc_iaj = tp->acc_iaj - 2;
515 else
516 acc_iaj = 0;
517
518 } else {
519 acc_iaj = tp->acc_iaj + (cur_iaj - allowed_iaj);
520 }
521
522 if (acc_iaj > MAX_ACC_IAJ)
523 acc_iaj = MAX_ACC_IAJ;
524 tp->acc_iaj = acc_iaj;
525 }
526
527 /* Compute weighted average where the history has a weight of
528 * 15 out of 16 and the current value has a weight of 1 out of 16.
529 * This will make the short-term measurements have more weight.
530 *
531 * The addition of 8 will help to round-up the value
532 * instead of round-down
533 */
534 tp->avg_iaj = (((tp->avg_iaj << IAJ_DIV_SHIFT) - tp->avg_iaj)
535 + cur_iaj + IAJ_ROUNDUP_CONST) >> IAJ_DIV_SHIFT;
536
537 /* Compute Root-mean-square of deviation where mean is a weighted
538 * average as described above.
539 */
540 temp = tp->std_dev_iaj * tp->std_dev_iaj;
541 mean = (((temp << IAJ_DIV_SHIFT) - temp)
542 + (cur_iaj_dev * cur_iaj_dev)
543 + IAJ_ROUNDUP_CONST) >> IAJ_DIV_SHIFT;
544
545 tp->std_dev_iaj = isqrt(mean);
546
547 DTRACE_TCP3(iaj, struct tcpcb *, tp, uint32_t, cur_iaj,
548 uint32_t, allowed_iaj);
549
550 return;
551}
552#endif /* TRAFFIC_MGT */
553
554/* Check if enough amount of data has been acknowledged since
555 * bw measurement was started
556 */
557static void
558tcp_bwmeas_check(struct tcpcb *tp)
559{
560 int32_t bw_meas_bytes;
561 uint32_t bw, bytes, elapsed_time;
562
563 if (SEQ_LEQ(tp->snd_una, tp->t_bwmeas->bw_start))
564 return;
565
566 bw_meas_bytes = tp->snd_una - tp->t_bwmeas->bw_start;
567 if ((tp->t_flagsext & TF_BWMEAS_INPROGRESS) &&
568 bw_meas_bytes >= (int32_t)(tp->t_bwmeas->bw_size)) {
569 bytes = bw_meas_bytes;
570 elapsed_time = tcp_now - tp->t_bwmeas->bw_ts;
571 if (elapsed_time > 0) {
572 bw = bytes / elapsed_time;
573 if ( bw > 0) {
574 if (tp->t_bwmeas->bw_sndbw > 0) {
575 tp->t_bwmeas->bw_sndbw =
576 (((tp->t_bwmeas->bw_sndbw << 3)
577 - tp->t_bwmeas->bw_sndbw)
578 + bw) >> 3;
579 } else {
580 tp->t_bwmeas->bw_sndbw = bw;
581 }
582
583 /* Store the maximum value */
584 if (tp->t_bwmeas->bw_sndbw_max == 0) {
585 tp->t_bwmeas->bw_sndbw_max =
586 tp->t_bwmeas->bw_sndbw;
587 } else {
588 tp->t_bwmeas->bw_sndbw_max =
589 max(tp->t_bwmeas->bw_sndbw,
590 tp->t_bwmeas->bw_sndbw_max);
591 }
592 }
593 }
594 tp->t_flagsext &= ~(TF_BWMEAS_INPROGRESS);
595 }
596}
597
598static int
599tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m,
600 struct ifnet *ifp)
601{
602 struct tseg_qent *q;
603 struct tseg_qent *p = NULL;
604 struct tseg_qent *nq;
605 struct tseg_qent *te = NULL;
606 struct inpcb *inp = tp->t_inpcb;
607 struct socket *so = inp->inp_socket;
608 int flags = 0;
609 int dowakeup = 0;
610 struct mbuf *oodata = NULL;
611 int copy_oodata = 0;
612 u_int16_t qlimit;
613 boolean_t cell = IFNET_IS_CELLULAR(ifp);
614 boolean_t wifi = (!cell && IFNET_IS_WIFI(ifp));
615 boolean_t wired = (!wifi && IFNET_IS_WIRED(ifp));
616 boolean_t dsack_set = FALSE;
617
618 /*
619 * Call with th==0 after become established to
620 * force pre-ESTABLISHED data up to user socket.
621 */
622 if (th == NULL)
623 goto present;
624
625 /*
626 * If the reassembly queue already has entries or if we are going
627 * to add a new one, then the connection has reached a loss state.
628 * Reset the stretch-ack algorithm at this point.
629 */
630 tcp_reset_stretch_ack(tp);
631
632#if TRAFFIC_MGT
633 if (tp->acc_iaj > 0)
634 reset_acc_iaj(tp);
635#endif /* TRAFFIC_MGT */
636
637 /*
638 * Limit the number of segments in the reassembly queue to prevent
639 * holding on to too many segments (and thus running out of mbufs).
640 * Make sure to let the missing segment through which caused this
641 * queue. Always keep one global queue entry spare to be able to
642 * process the missing segment.
643 */
644 qlimit = min(max(100, so->so_rcv.sb_hiwat >> 10),
645 (TCP_AUTORCVBUF_MAX(ifp) >> 10));
646 if (th->th_seq != tp->rcv_nxt &&
647 (tp->t_reassqlen + 1) >= qlimit) {
648 tcp_reass_overflows++;
649 tcpstat.tcps_rcvmemdrop++;
650 m_freem(m);
651 *tlenp = 0;
652 return (0);
653 }
654
655 /* Allocate a new queue entry. If we can't, just drop the pkt. XXX */
656 te = (struct tseg_qent *) zalloc(tcp_reass_zone);
657 if (te == NULL) {
658 tcpstat.tcps_rcvmemdrop++;
659 m_freem(m);
660 return (0);
661 }
662 tp->t_reassqlen++;
663
664 /*
665 * Find a segment which begins after this one does.
666 */
667 LIST_FOREACH(q, &tp->t_segq, tqe_q) {
668 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
669 break;
670 p = q;
671 }
672
673 /*
674 * If there is a preceding segment, it may provide some of
675 * our data already. If so, drop the data from the incoming
676 * segment. If it provides all of our data, drop us.
677 */
678 if (p != NULL) {
679 int i;
680 /* conversion to int (in i) handles seq wraparound */
681 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
682 if (i > 0) {
683 if (TCP_DSACK_ENABLED(tp) && i > 1) {
684 /*
685 * Note duplicate data sequnce numbers
686 * to report in DSACK option
687 */
688 tp->t_dsack_lseq = th->th_seq;
689 tp->t_dsack_rseq = th->th_seq +
690 min(i, *tlenp);
691
692 /*
693 * Report only the first part of partial/
694 * non-contiguous duplicate sequence space
695 */
696 dsack_set = TRUE;
697 }
698 if (i >= *tlenp) {
699 tcpstat.tcps_rcvduppack++;
700 tcpstat.tcps_rcvdupbyte += *tlenp;
701 if (nstat_collect) {
702 nstat_route_rx(inp->inp_route.ro_rt,
703 1, *tlenp,
704 NSTAT_RX_FLAG_DUPLICATE);
705 INP_ADD_STAT(inp, cell, wifi, wired,
706 rxpackets, 1);
707 INP_ADD_STAT(inp, cell, wifi, wired,
708 rxbytes, *tlenp);
709 tp->t_stat.rxduplicatebytes += *tlenp;
710 inp_set_activity_bitmap(inp);
711 }
712 m_freem(m);
713 zfree(tcp_reass_zone, te);
714 te = NULL;
715 tp->t_reassqlen--;
716 /*
717 * Try to present any queued data
718 * at the left window edge to the user.
719 * This is needed after the 3-WHS
720 * completes.
721 */
722 goto present;
723 }
724 m_adj(m, i);
725 *tlenp -= i;
726 th->th_seq += i;
727 }
728 }
729 tp->t_rcvoopack++;
730 tcpstat.tcps_rcvoopack++;
731 tcpstat.tcps_rcvoobyte += *tlenp;
732 if (nstat_collect) {
733 nstat_route_rx(inp->inp_route.ro_rt, 1, *tlenp,
734 NSTAT_RX_FLAG_OUT_OF_ORDER);
735 INP_ADD_STAT(inp, cell, wifi, wired, rxpackets, 1);
736 INP_ADD_STAT(inp, cell, wifi, wired, rxbytes, *tlenp);
737 tp->t_stat.rxoutoforderbytes += *tlenp;
738 inp_set_activity_bitmap(inp);
739 }
740
741 /*
742 * While we overlap succeeding segments trim them or,
743 * if they are completely covered, dequeue them.
744 */
745 while (q) {
746 int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
747 if (i <= 0)
748 break;
749
750 /*
751 * Report only the first part of partial/non-contiguous
752 * duplicate segment in dsack option. The variable
753 * dsack_set will be true if a previous entry has some of
754 * the duplicate sequence space.
755 */
756 if (TCP_DSACK_ENABLED(tp) && i > 1 && !dsack_set) {
757 if (tp->t_dsack_lseq == 0) {
758 tp->t_dsack_lseq = q->tqe_th->th_seq;
759 tp->t_dsack_rseq =
760 tp->t_dsack_lseq + min(i, q->tqe_len);
761 } else {
762 /*
763 * this segment overlaps data in multple
764 * entries in the reassembly queue, move
765 * the right sequence number further.
766 */
767 tp->t_dsack_rseq =
768 tp->t_dsack_rseq + min(i, q->tqe_len);
769 }
770 }
771 if (i < q->tqe_len) {
772 q->tqe_th->th_seq += i;
773 q->tqe_len -= i;
774 m_adj(q->tqe_m, i);
775 break;
776 }
777
778 nq = LIST_NEXT(q, tqe_q);
779 LIST_REMOVE(q, tqe_q);
780 m_freem(q->tqe_m);
781 zfree(tcp_reass_zone, q);
782 tp->t_reassqlen--;
783 q = nq;
784 }
785
786 /* Insert the new segment queue entry into place. */
787 te->tqe_m = m;
788 te->tqe_th = th;
789 te->tqe_len = *tlenp;
790
791 if (p == NULL) {
792 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
793 } else {
794 LIST_INSERT_AFTER(p, te, tqe_q);
795 }
796
797 /*
798 * New out-of-order data exists, and is pointed to by
799 * queue entry te. Set copy_oodata to 1 so out-of-order data
800 * can be copied off to sockbuf after in-order data
801 * is copied off.
802 */
803 if (!(so->so_state & SS_CANTRCVMORE))
804 copy_oodata = 1;
805
806present:
807 /*
808 * Present data to user, advancing rcv_nxt through
809 * completed sequence space.
810 */
811 if (!TCPS_HAVEESTABLISHED(tp->t_state))
812 return (0);
813 q = LIST_FIRST(&tp->t_segq);
814 if (!q || q->tqe_th->th_seq != tp->rcv_nxt) {
815 /* Stop using LRO once out of order packets arrive */
816 if (tp->t_flagsext & TF_LRO_OFFLOADED) {
817 tcp_lro_remove_state(inp->inp_laddr, inp->inp_faddr,
818 th->th_dport, th->th_sport);
819 tp->t_flagsext &= ~TF_LRO_OFFLOADED;
820 }
821
822 /*
823 * continue processing if out-of-order data
824 * can be delivered
825 */
826 if (q && (so->so_flags & SOF_ENABLE_MSGS))
827 goto msg_unordered_delivery;
828
829 return (0);
830 }
831
832 /*
833 * If there is already another thread doing reassembly for this
834 * connection, it is better to let it finish the job --
835 * (radar 16316196)
836 */
837 if (tp->t_flagsext & TF_REASS_INPROG)
838 return (0);
839
840 tp->t_flagsext |= TF_REASS_INPROG;
841 /* lost packet was recovered, so ooo data can be returned */
842 tcpstat.tcps_recovered_pkts++;
843
844 do {
845 tp->rcv_nxt += q->tqe_len;
846 flags = q->tqe_th->th_flags & TH_FIN;
847 LIST_REMOVE(q, tqe_q);
848 if (so->so_state & SS_CANTRCVMORE) {
849 m_freem(q->tqe_m);
850 } else {
851 so_recv_data_stat(so, q->tqe_m, 0); /* XXXX */
852 if (so->so_flags & SOF_ENABLE_MSGS) {
853 /*
854 * Append the inorder data as a message to the
855 * receive socket buffer. Also check to see if
856 * the data we are about to deliver is the same
857 * data that we wanted to pass up to the user
858 * out of order. If so, reset copy_oodata --
859 * the received data filled a gap, and
860 * is now in order!
861 */
862 if (q == te)
863 copy_oodata = 0;
864 }
865 if (sbappendstream_rcvdemux(so, q->tqe_m,
866 q->tqe_th->th_seq - (tp->irs + 1), 0))
867 dowakeup = 1;
868 if (tp->t_flagsext & TF_LRO_OFFLOADED) {
869 tcp_update_lro_seq(tp->rcv_nxt,
870 inp->inp_laddr, inp->inp_faddr,
871 th->th_dport, th->th_sport);
872 }
873 }
874 zfree(tcp_reass_zone, q);
875 tp->t_reassqlen--;
876 q = LIST_FIRST(&tp->t_segq);
877 } while (q && q->tqe_th->th_seq == tp->rcv_nxt);
878 tp->t_flagsext &= ~TF_REASS_INPROG;
879
880#if INET6
881 if ((inp->inp_vflag & INP_IPV6) != 0) {
882
883 KERNEL_DEBUG(DBG_LAYER_BEG,
884 ((inp->inp_fport << 16) | inp->inp_lport),
885 (((inp->in6p_laddr.s6_addr16[0] & 0xffff) << 16) |
886 (inp->in6p_faddr.s6_addr16[0] & 0xffff)),
887 0,0,0);
888 }
889 else
890#endif
891 {
892 KERNEL_DEBUG(DBG_LAYER_BEG,
893 ((inp->inp_fport << 16) | inp->inp_lport),
894 (((inp->inp_laddr.s_addr & 0xffff) << 16) |
895 (inp->inp_faddr.s_addr & 0xffff)),
896 0,0,0);
897 }
898
899msg_unordered_delivery:
900 /* Deliver out-of-order data as a message */
901 if (te && (so->so_flags & SOF_ENABLE_MSGS) && copy_oodata && te->tqe_len) {
902 /*
903 * make a copy of the mbuf to be delivered up to
904 * the user, and add it to the sockbuf
905 */
906 oodata = m_copym(te->tqe_m, 0, M_COPYALL, M_DONTWAIT);
907 if (oodata != NULL) {
908 if (sbappendmsgstream_rcv(&so->so_rcv, oodata,
909 te->tqe_th->th_seq - (tp->irs + 1), 1)) {
910 dowakeup = 1;
911 tcpstat.tcps_msg_unopkts++;
912 } else {
913 tcpstat.tcps_msg_unoappendfail++;
914 }
915 }
916 }
917
918 if (dowakeup)
919 sorwakeup(so); /* done with socket lock held */
920 return (flags);
921}
922
923/*
924 * Reduce congestion window -- used when ECN is seen or when a tail loss
925 * probe recovers the last packet.
926 */
927static void
928tcp_reduce_congestion_window(
929 struct tcpcb *tp)
930{
931 /*
932 * If the current tcp cc module has
933 * defined a hook for tasks to run
934 * before entering FR, call it
935 */
936 if (CC_ALGO(tp)->pre_fr != NULL)
937 CC_ALGO(tp)->pre_fr(tp);
938 ENTER_FASTRECOVERY(tp);
939 if (tp->t_flags & TF_SENTFIN)
940 tp->snd_recover = tp->snd_max - 1;
941 else
942 tp->snd_recover = tp->snd_max;
943 tp->t_timer[TCPT_REXMT] = 0;
944 tp->t_timer[TCPT_PTO] = 0;
945 tp->t_rtttime = 0;
946 if (tp->t_flagsext & TF_CWND_NONVALIDATED) {
947 tcp_cc_adjust_nonvalidated_cwnd(tp);
948 } else {
949 tp->snd_cwnd = tp->snd_ssthresh +
950 tp->t_maxseg * tcprexmtthresh;
951 }
952}
953
954/*
955 * This function is called upon reception of data on a socket. It's purpose is
956 * to handle the adaptive keepalive timers that monitor whether the connection
957 * is making progress. First the adaptive read-timer, second the TFO probe-timer.
958 *
959 * The application wants to get an event if there is a stall during read.
960 * Set the initial keepalive timeout to be equal to twice RTO.
961 *
962 * If the outgoing interface is in marginal conditions, we need to
963 * enable read probes for that too.
964 */
965static inline void
966tcp_adaptive_rwtimo_check(struct tcpcb *tp, int tlen)
967{
968 struct ifnet *outifp = tp->t_inpcb->inp_last_outifp;
969
970 if ((tp->t_adaptive_rtimo > 0 ||
971 (outifp != NULL &&
972 (outifp->if_eflags & IFEF_PROBE_CONNECTIVITY)))
973 && tlen > 0 &&
974 tp->t_state == TCPS_ESTABLISHED) {
975 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
976 (TCP_REXMTVAL(tp) << 1));
977 tp->t_flagsext |= TF_DETECT_READSTALL;
978 tp->t_rtimo_probes = 0;
979 }
980}
981
982inline void
983tcp_keepalive_reset(struct tcpcb *tp)
984{
985 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
986 TCP_CONN_KEEPIDLE(tp));
987 tp->t_flagsext &= ~(TF_DETECT_READSTALL);
988 tp->t_rtimo_probes = 0;
989}
990
991/*
992 * TCP input routine, follows pages 65-76 of the
993 * protocol specification dated September, 1981 very closely.
994 */
995#if INET6
996int
997tcp6_input(struct mbuf **mp, int *offp, int proto)
998{
999#pragma unused(proto)
1000 struct mbuf *m = *mp;
1001 uint32_t ia6_flags;
1002 struct ifnet *ifp = m->m_pkthdr.rcvif;
1003
1004 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), return IPPROTO_DONE);
1005
1006 /* Expect 32-bit aligned data pointer on strict-align platforms */
1007 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
1008
1009 /*
1010 * draft-itojun-ipv6-tcp-to-anycast
1011 * better place to put this in?
1012 */
1013 if (ip6_getdstifaddr_info(m, NULL, &ia6_flags) == 0) {
1014 if (ia6_flags & IN6_IFF_ANYCAST) {
1015 struct ip6_hdr *ip6;
1016
1017 ip6 = mtod(m, struct ip6_hdr *);
1018 icmp6_error(m, ICMP6_DST_UNREACH,
1019 ICMP6_DST_UNREACH_ADDR,
1020 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
1021
1022 IF_TCP_STATINC(ifp, icmp6unreach);
1023
1024 return (IPPROTO_DONE);
1025 }
1026 }
1027
1028 tcp_input(m, *offp);
1029 return (IPPROTO_DONE);
1030}
1031#endif
1032
1033/* Depending on the usage of mbuf space in the system, this function
1034 * will return true or false. This is used to determine if a socket
1035 * buffer can take more memory from the system for auto-tuning or not.
1036 */
1037u_int8_t
1038tcp_cansbgrow(struct sockbuf *sb)
1039{
1040 /* Calculate the host level space limit in terms of MSIZE buffers.
1041 * We can use a maximum of half of the available mbuf space for
1042 * socket buffers.
1043 */
1044 u_int32_t mblim = ((nmbclusters >> 1) << (MCLSHIFT - MSIZESHIFT));
1045
1046 /* Calculate per sb limit in terms of bytes. We optimize this limit
1047 * for upto 16 socket buffers.
1048 */
1049
1050 u_int32_t sbspacelim = ((nmbclusters >> 4) << MCLSHIFT);
1051
1052 if ((total_sbmb_cnt < mblim) &&
1053 (sb->sb_hiwat < sbspacelim)) {
1054 return(1);
1055 } else {
1056 OSIncrementAtomic64(&sbmb_limreached);
1057 }
1058 return(0);
1059}
1060
1061static void
1062tcp_sbrcv_reserve(struct tcpcb *tp, struct sockbuf *sbrcv,
1063 u_int32_t newsize, u_int32_t idealsize, u_int32_t rcvbuf_max)
1064{
1065 /* newsize should not exceed max */
1066 newsize = min(newsize, rcvbuf_max);
1067
1068 /* The receive window scale negotiated at the
1069 * beginning of the connection will also set a
1070 * limit on the socket buffer size
1071 */
1072 newsize = min(newsize, TCP_MAXWIN << tp->rcv_scale);
1073
1074 /* Set new socket buffer size */
1075 if (newsize > sbrcv->sb_hiwat &&
1076 (sbreserve(sbrcv, newsize) == 1)) {
1077 sbrcv->sb_idealsize = min(max(sbrcv->sb_idealsize,
1078 (idealsize != 0) ? idealsize : newsize), rcvbuf_max);
1079
1080 /* Again check the limit set by the advertised
1081 * window scale
1082 */
1083 sbrcv->sb_idealsize = min(sbrcv->sb_idealsize,
1084 TCP_MAXWIN << tp->rcv_scale);
1085 }
1086}
1087
1088/*
1089 * This function is used to grow a receive socket buffer. It
1090 * will take into account system-level memory usage and the
1091 * bandwidth available on the link to make a decision.
1092 */
1093static void
1094tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sbrcv,
1095 struct tcpopt *to, u_int32_t pktlen, u_int32_t rcvbuf_max)
1096{
1097 struct socket *so = sbrcv->sb_so;
1098
1099 /*
1100 * Do not grow the receive socket buffer if
1101 * - auto resizing is disabled, globally or on this socket
1102 * - the high water mark already reached the maximum
1103 * - the stream is in background and receive side is being
1104 * throttled
1105 * - if there are segments in reassembly queue indicating loss,
1106 * do not need to increase recv window during recovery as more
1107 * data is not going to be sent. A duplicate ack sent during
1108 * recovery should not change the receive window
1109 */
1110 if (tcp_do_autorcvbuf == 0 ||
1111 (sbrcv->sb_flags & SB_AUTOSIZE) == 0 ||
1112 tcp_cansbgrow(sbrcv) == 0 ||
1113 sbrcv->sb_hiwat >= rcvbuf_max ||
1114 (tp->t_flagsext & TF_RECV_THROTTLE) ||
1115 (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) ||
1116 !LIST_EMPTY(&tp->t_segq)) {
1117 /* Can not resize the socket buffer, just return */
1118 goto out;
1119 }
1120
1121 if (TSTMP_GT(tcp_now,
1122 tp->rfbuf_ts + TCPTV_RCVBUFIDLE)) {
1123 /* If there has been an idle period in the
1124 * connection, just restart the measurement
1125 */
1126 goto out;
1127 }
1128
1129 if (!TSTMP_SUPPORTED(tp)) {
1130 /*
1131 * Timestamp option is not supported on this connection.
1132 * If the connection reached a state to indicate that
1133 * the receive socket buffer needs to grow, increase
1134 * the high water mark.
1135 */
1136 if (TSTMP_GEQ(tcp_now,
1137 tp->rfbuf_ts + TCPTV_RCVNOTS_QUANTUM)) {
1138 if (tp->rfbuf_cnt >= TCP_RCVNOTS_BYTELEVEL) {
1139 tcp_sbrcv_reserve(tp, sbrcv,
1140 tcp_autorcvbuf_max, 0,
1141 tcp_autorcvbuf_max);
1142 }
1143 goto out;
1144 } else {
1145 tp->rfbuf_cnt += pktlen;
1146 return;
1147 }
1148 } else if (to->to_tsecr != 0) {
1149 /*
1150 * If the timestamp shows that one RTT has
1151 * completed, we can stop counting the
1152 * bytes. Here we consider increasing
1153 * the socket buffer if the bandwidth measured in
1154 * last rtt, is more than half of sb_hiwat, this will
1155 * help to scale the buffer according to the bandwidth
1156 * on the link.
1157 */
1158 if (TSTMP_GEQ(to->to_tsecr, tp->rfbuf_ts)) {
1159 if (tp->rfbuf_cnt > (sbrcv->sb_hiwat -
1160 (sbrcv->sb_hiwat >> 1))) {
1161 int32_t rcvbuf_inc, min_incr;
1162 /*
1163 * Increment the receive window by a
1164 * multiple of maximum sized segments.
1165 * This will prevent a connection from
1166 * sending smaller segments on wire if it
1167 * is limited by the receive window.
1168 *
1169 * Set the ideal size based on current
1170 * bandwidth measurements. We set the
1171 * ideal size on receive socket buffer to
1172 * be twice the bandwidth delay product.
1173 */
1174 rcvbuf_inc = (tp->rfbuf_cnt << 1)
1175 - sbrcv->sb_hiwat;
1176
1177 /*
1178 * Make the increment equal to 8 segments
1179 * at least
1180 */
1181 min_incr = tp->t_maxseg << tcp_autorcvbuf_inc_shift;
1182 if (rcvbuf_inc < min_incr)
1183 rcvbuf_inc = min_incr;
1184
1185 rcvbuf_inc =
1186 (rcvbuf_inc / tp->t_maxseg) * tp->t_maxseg;
1187 tcp_sbrcv_reserve(tp, sbrcv,
1188 sbrcv->sb_hiwat + rcvbuf_inc,
1189 (tp->rfbuf_cnt * 2), rcvbuf_max);
1190 }
1191 /* Measure instantaneous receive bandwidth */
1192 if (tp->t_bwmeas != NULL && tp->rfbuf_cnt > 0 &&
1193 TSTMP_GT(tcp_now, tp->rfbuf_ts)) {
1194 u_int32_t rcv_bw;
1195 rcv_bw = tp->rfbuf_cnt /
1196 (int)(tcp_now - tp->rfbuf_ts);
1197 if (tp->t_bwmeas->bw_rcvbw_max == 0) {
1198 tp->t_bwmeas->bw_rcvbw_max = rcv_bw;
1199 } else {
1200 tp->t_bwmeas->bw_rcvbw_max = max(
1201 tp->t_bwmeas->bw_rcvbw_max, rcv_bw);
1202 }
1203 }
1204 goto out;
1205 } else {
1206 tp->rfbuf_cnt += pktlen;
1207 return;
1208 }
1209 }
1210out:
1211 /* Restart the measurement */
1212 tp->rfbuf_ts = 0;
1213 tp->rfbuf_cnt = 0;
1214 return;
1215}
1216
1217/* This function will trim the excess space added to the socket buffer
1218 * to help a slow-reading app. The ideal-size of a socket buffer depends
1219 * on the link bandwidth or it is set by an application and we aim to
1220 * reach that size.
1221 */
1222void
1223tcp_sbrcv_trim(struct tcpcb *tp, struct sockbuf *sbrcv)
1224{
1225 if (tcp_do_autorcvbuf == 1 && sbrcv->sb_idealsize > 0 &&
1226 sbrcv->sb_hiwat > sbrcv->sb_idealsize) {
1227 int32_t trim;
1228 /* compute the difference between ideal and current sizes */
1229 u_int32_t diff = sbrcv->sb_hiwat - sbrcv->sb_idealsize;
1230
1231 /* Compute the maximum advertised window for
1232 * this connection.
1233 */
1234 u_int32_t advwin = tp->rcv_adv - tp->rcv_nxt;
1235
1236 /* How much can we trim the receive socket buffer?
1237 * 1. it can not be trimmed beyond the max rcv win advertised
1238 * 2. if possible, leave 1/16 of bandwidth*delay to
1239 * avoid closing the win completely
1240 */
1241 u_int32_t leave = max(advwin, (sbrcv->sb_idealsize >> 4));
1242
1243 /* Sometimes leave can be zero, in that case leave at least
1244 * a few segments worth of space.
1245 */
1246 if (leave == 0)
1247 leave = tp->t_maxseg << tcp_autorcvbuf_inc_shift;
1248
1249 trim = sbrcv->sb_hiwat - (sbrcv->sb_cc + leave);
1250 trim = imin(trim, (int32_t)diff);
1251
1252 if (trim > 0)
1253 sbreserve(sbrcv, (sbrcv->sb_hiwat - trim));
1254 }
1255}
1256
1257/* We may need to trim the send socket buffer size for two reasons:
1258 * 1. if the rtt seen on the connection is climbing up, we do not
1259 * want to fill the buffers any more.
1260 * 2. if the congestion win on the socket backed off, there is no need
1261 * to hold more mbufs for that connection than what the cwnd will allow.
1262 */
1263void
1264tcp_sbsnd_trim(struct sockbuf *sbsnd)
1265{
1266 if (tcp_do_autosendbuf == 1 &&
1267 ((sbsnd->sb_flags & (SB_AUTOSIZE | SB_TRIM)) ==
1268 (SB_AUTOSIZE | SB_TRIM)) &&
1269 (sbsnd->sb_idealsize > 0) &&
1270 (sbsnd->sb_hiwat > sbsnd->sb_idealsize)) {
1271 u_int32_t trim = 0;
1272 if (sbsnd->sb_cc <= sbsnd->sb_idealsize) {
1273 trim = sbsnd->sb_hiwat - sbsnd->sb_idealsize;
1274 } else {
1275 trim = sbsnd->sb_hiwat - sbsnd->sb_cc;
1276 }
1277 sbreserve(sbsnd, (sbsnd->sb_hiwat - trim));
1278 }
1279 if (sbsnd->sb_hiwat <= sbsnd->sb_idealsize)
1280 sbsnd->sb_flags &= ~(SB_TRIM);
1281}
1282
1283/*
1284 * If timestamp option was not negotiated on this connection
1285 * and this connection is on the receiving side of a stream
1286 * then we can not measure the delay on the link accurately.
1287 * Instead of enabling automatic receive socket buffer
1288 * resizing, just give more space to the receive socket buffer.
1289 */
1290static inline void
1291tcp_sbrcv_tstmp_check(struct tcpcb *tp)
1292{
1293 struct socket *so = tp->t_inpcb->inp_socket;
1294 u_int32_t newsize = 2 * tcp_recvspace;
1295 struct sockbuf *sbrcv = &so->so_rcv;
1296
1297 if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP)) !=
1298 (TF_REQ_TSTMP | TF_RCVD_TSTMP) &&
1299 (sbrcv->sb_flags & SB_AUTOSIZE) != 0) {
1300 tcp_sbrcv_reserve(tp, sbrcv, newsize, 0, newsize);
1301 }
1302}
1303
1304/* A receiver will evaluate the flow of packets on a connection
1305 * to see if it can reduce ack traffic. The receiver will start
1306 * stretching acks if all of the following conditions are met:
1307 * 1. tcp_delack_enabled is set to 3
1308 * 2. If the bytes received in the last 100ms is greater than a threshold
1309 * defined by maxseg_unacked
1310 * 3. If the connection has not been idle for tcp_maxrcvidle period.
1311 * 4. If the connection has seen enough packets to let the slow-start
1312 * finish after connection establishment or after some packet loss.
1313 *
1314 * The receiver will stop stretching acks if there is congestion/reordering
1315 * as indicated by packets on reassembly queue or an ECN. If the delayed-ack
1316 * timer fires while stretching acks, it means that the packet flow has gone
1317 * below the threshold defined by maxseg_unacked and the receiver will stop
1318 * stretching acks. The receiver gets no indication when slow-start is completed
1319 * or when the connection reaches an idle state. That is why we use
1320 * tcp_rcvsspktcnt to cover slow-start and tcp_maxrcvidle to identify idle
1321 * state.
1322 */
1323static inline int
1324tcp_stretch_ack_enable(struct tcpcb *tp, int thflags)
1325{
1326 if (tp->rcv_by_unackwin >= (maxseg_unacked * tp->t_maxseg) &&
1327 TSTMP_GEQ(tp->rcv_unackwin, tcp_now))
1328 tp->t_flags |= TF_STREAMING_ON;
1329 else
1330 tp->t_flags &= ~TF_STREAMING_ON;
1331
1332 /* If there has been an idle time, reset streaming detection */
1333 if (TSTMP_GT(tcp_now, tp->rcv_unackwin + tcp_maxrcvidle))
1334 tp->t_flags &= ~TF_STREAMING_ON;
1335
1336 /*
1337 * If there are flags other than TH_ACK set, reset streaming
1338 * detection
1339 */
1340 if (thflags & ~TH_ACK)
1341 tp->t_flags &= ~TF_STREAMING_ON;
1342
1343 if (tp->t_flagsext & TF_DISABLE_STRETCHACK) {
1344 if (tp->rcv_nostrack_pkts >= TCP_STRETCHACK_ENABLE_PKTCNT) {
1345 tp->t_flagsext &= ~TF_DISABLE_STRETCHACK;
1346 tp->rcv_nostrack_pkts = 0;
1347 tp->rcv_nostrack_ts = 0;
1348 } else {
1349 tp->rcv_nostrack_pkts++;
1350 }
1351 }
1352
1353 if (!(tp->t_flagsext & (TF_NOSTRETCHACK|TF_DISABLE_STRETCHACK)) &&
1354 (tp->t_flags & TF_STREAMING_ON) &&
1355 (!(tp->t_flagsext & TF_RCVUNACK_WAITSS) ||
1356 (tp->rcv_waitforss >= tcp_rcvsspktcnt))) {
1357 return(1);
1358 }
1359
1360 return(0);
1361}
1362
1363/*
1364 * Reset the state related to stretch-ack algorithm. This will make
1365 * the receiver generate an ack every other packet. The receiver
1366 * will start re-evaluating the rate at which packets come to decide
1367 * if it can benefit by lowering the ack traffic.
1368 */
1369void
1370tcp_reset_stretch_ack(struct tcpcb *tp)
1371{
1372 tp->t_flags &= ~(TF_STRETCHACK|TF_STREAMING_ON);
1373 tp->rcv_by_unackwin = 0;
1374 tp->rcv_unackwin = tcp_now + tcp_rcvunackwin;
1375
1376 /*
1377 * When there is packet loss or packet re-ordering or CWR due to
1378 * ECN, the sender's congestion window is reduced. In these states,
1379 * generate an ack for every other packet for some time to allow
1380 * the sender's congestion window to grow.
1381 */
1382 tp->t_flagsext |= TF_RCVUNACK_WAITSS;
1383 tp->rcv_waitforss = 0;
1384}
1385
1386/*
1387 * The last packet was a retransmission, check if this ack
1388 * indicates that the retransmission was spurious.
1389 *
1390 * If the connection supports timestamps, we could use it to
1391 * detect if the last retransmit was not needed. Otherwise,
1392 * we check if the ACK arrived within RTT/2 window, then it
1393 * was a mistake to do the retransmit in the first place.
1394 *
1395 * This function will return 1 if it is a spurious retransmit,
1396 * 0 otherwise.
1397 */
1398int
1399tcp_detect_bad_rexmt(struct tcpcb *tp, struct tcphdr *th,
1400 struct tcpopt *to, u_int32_t rxtime)
1401{
1402 int32_t tdiff, bad_rexmt_win;
1403 bad_rexmt_win = (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
1404
1405 /* If the ack has ECN CE bit, then cwnd has to be adjusted */
1406 if (TCP_ECN_ENABLED(tp) && (th->th_flags & TH_ECE))
1407 return (0);
1408 if (TSTMP_SUPPORTED(tp)) {
1409 if (rxtime > 0 && (to->to_flags & TOF_TS)
1410 && to->to_tsecr != 0
1411 && TSTMP_LT(to->to_tsecr, rxtime))
1412 return (1);
1413 } else {
1414 if ((tp->t_rxtshift == 1
1415 || (tp->t_flagsext & TF_SENT_TLPROBE))
1416 && rxtime > 0) {
1417 tdiff = (int32_t)(tcp_now - rxtime);
1418 if (tdiff < bad_rexmt_win)
1419 return(1);
1420 }
1421 }
1422 return(0);
1423}
1424
1425
1426/*
1427 * Restore congestion window state if a spurious timeout
1428 * was detected.
1429 */
1430static void
1431tcp_bad_rexmt_restore_state(struct tcpcb *tp, struct tcphdr *th)
1432{
1433 if (TSTMP_SUPPORTED(tp)) {
1434 u_int32_t fsize, acked;
1435 fsize = tp->snd_max - th->th_ack;
1436 acked = BYTES_ACKED(th, tp);
1437
1438 /*
1439 * Implement bad retransmit recovery as
1440 * described in RFC 4015.
1441 */
1442 tp->snd_ssthresh = tp->snd_ssthresh_prev;
1443
1444 /* Initialize cwnd to the initial window */
1445 if (CC_ALGO(tp)->cwnd_init != NULL)
1446 CC_ALGO(tp)->cwnd_init(tp);
1447
1448 tp->snd_cwnd = fsize + min(acked, tp->snd_cwnd);
1449
1450 } else {
1451 tp->snd_cwnd = tp->snd_cwnd_prev;
1452 tp->snd_ssthresh = tp->snd_ssthresh_prev;
1453 if (tp->t_flags & TF_WASFRECOVERY)
1454 ENTER_FASTRECOVERY(tp);
1455
1456 /* Do not use the loss flight size in this case */
1457 tp->t_lossflightsize = 0;
1458 }
1459 tp->snd_cwnd = max(tp->snd_cwnd, TCP_CC_CWND_INIT_BYTES);
1460 tp->snd_recover = tp->snd_recover_prev;
1461 tp->snd_nxt = tp->snd_max;
1462
1463 /* Fix send socket buffer to reflect the change in cwnd */
1464 tcp_bad_rexmt_fix_sndbuf(tp);
1465
1466 /*
1467 * This RTT might reflect the extra delay induced
1468 * by the network. Skip using this sample for RTO
1469 * calculation and mark the connection so we can
1470 * recompute RTT when the next eligible sample is
1471 * found.
1472 */
1473 tp->t_flagsext |= TF_RECOMPUTE_RTT;
1474 tp->t_badrexmt_time = tcp_now;
1475 tp->t_rtttime = 0;
1476}
1477
1478/*
1479 * If the previous packet was sent in retransmission timer, and it was
1480 * not needed, then restore the congestion window to the state before that
1481 * transmission.
1482 *
1483 * If the last packet was sent in tail loss probe timeout, check if that
1484 * recovered the last packet. If so, that will indicate a real loss and
1485 * the congestion window needs to be lowered.
1486 */
1487static void
1488tcp_bad_rexmt_check(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
1489{
1490 if (tp->t_rxtshift > 0 &&
1491 tcp_detect_bad_rexmt(tp, th, to, tp->t_rxtstart)) {
1492 ++tcpstat.tcps_sndrexmitbad;
1493 tcp_bad_rexmt_restore_state(tp, th);
1494 tcp_ccdbg_trace(tp, th, TCP_CC_BAD_REXMT_RECOVERY);
1495 } else if ((tp->t_flagsext & TF_SENT_TLPROBE)
1496 && tp->t_tlphighrxt > 0
1497 && SEQ_GEQ(th->th_ack, tp->t_tlphighrxt)
1498 && !tcp_detect_bad_rexmt(tp, th, to, tp->t_tlpstart)) {
1499 /*
1500 * check DSACK information also to make sure that
1501 * the TLP was indeed needed
1502 */
1503 if (tcp_rxtseg_dsack_for_tlp(tp)) {
1504 /*
1505 * received a DSACK to indicate that TLP was
1506 * not needed
1507 */
1508 tcp_rxtseg_clean(tp);
1509 goto out;
1510 }
1511
1512 /*
1513 * The tail loss probe recovered the last packet and
1514 * we need to adjust the congestion window to take
1515 * this loss into account.
1516 */
1517 ++tcpstat.tcps_tlp_recoverlastpkt;
1518 if (!IN_FASTRECOVERY(tp)) {
1519 tcp_reduce_congestion_window(tp);
1520 EXIT_FASTRECOVERY(tp);
1521 }
1522 tcp_ccdbg_trace(tp, th, TCP_CC_TLP_RECOVER_LASTPACKET);
1523 } else if (tcp_rxtseg_detect_bad_rexmt(tp, th->th_ack)) {
1524 /*
1525 * All of the retransmitted segments were duplicated, this
1526 * can be an indication of bad fast retransmit.
1527 */
1528 tcpstat.tcps_dsack_badrexmt++;
1529 tcp_bad_rexmt_restore_state(tp, th);
1530 tcp_ccdbg_trace(tp, th, TCP_CC_DSACK_BAD_REXMT);
1531 tcp_rxtseg_clean(tp);
1532 }
1533out:
1534 tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1535 tp->t_tlphighrxt = 0;
1536 tp->t_tlpstart = 0;
1537
1538 /*
1539 * check if the latest ack was for a segment sent during PMTU
1540 * blackhole detection. If the timestamp on the ack is before
1541 * PMTU blackhole detection, then revert the size of the max
1542 * segment to previous size.
1543 */
1544 if (tp->t_rxtshift > 0 && (tp->t_flags & TF_BLACKHOLE) &&
1545 tp->t_pmtud_start_ts > 0 && TSTMP_SUPPORTED(tp)) {
1546 if ((to->to_flags & TOF_TS) && to->to_tsecr != 0
1547 && TSTMP_LT(to->to_tsecr, tp->t_pmtud_start_ts)) {
1548 tcp_pmtud_revert_segment_size(tp);
1549 }
1550 }
1551 if (tp->t_pmtud_start_ts > 0)
1552 tp->t_pmtud_start_ts = 0;
1553}
1554
1555/*
1556 * Check if early retransmit can be attempted according to RFC 5827.
1557 *
1558 * If packet reordering is detected on a connection, fast recovery will
1559 * be delayed until it is clear that the packet was lost and not reordered.
1560 * But reordering detection is done only when SACK is enabled.
1561 *
1562 * On connections that do not support SACK, there is a limit on the number
1563 * of early retransmits that can be done per minute. This limit is needed
1564 * to make sure that too many packets are not retransmitted when there is
1565 * packet reordering.
1566 */
1567static void
1568tcp_early_rexmt_check (struct tcpcb *tp, struct tcphdr *th)
1569{
1570 u_int32_t obytes, snd_off;
1571 int32_t snd_len;
1572 struct socket *so = tp->t_inpcb->inp_socket;
1573
1574 if (early_rexmt && (SACK_ENABLED(tp) ||
1575 tp->t_early_rexmt_count < TCP_EARLY_REXMT_LIMIT) &&
1576 SEQ_GT(tp->snd_max, tp->snd_una) &&
1577 (tp->t_dupacks == 1 ||
1578 (SACK_ENABLED(tp) &&
1579 !TAILQ_EMPTY(&tp->snd_holes)))) {
1580 /*
1581 * If there are only a few outstanding
1582 * segments on the connection, we might need
1583 * to lower the retransmit threshold. This
1584 * will allow us to do Early Retransmit as
1585 * described in RFC 5827.
1586 */
1587 if (SACK_ENABLED(tp) &&
1588 !TAILQ_EMPTY(&tp->snd_holes)) {
1589 obytes = (tp->snd_max - tp->snd_fack) +
1590 tp->sackhint.sack_bytes_rexmit;
1591 } else {
1592 obytes = (tp->snd_max - tp->snd_una);
1593 }
1594
1595 /*
1596 * In order to lower retransmit threshold the
1597 * following two conditions must be met.
1598 * 1. the amount of outstanding data is less
1599 * than 4*SMSS bytes
1600 * 2. there is no unsent data ready for
1601 * transmission or the advertised window
1602 * will limit sending new segments.
1603 */
1604 snd_off = tp->snd_max - tp->snd_una;
1605 snd_len = min(so->so_snd.sb_cc, tp->snd_wnd) - snd_off;
1606 if (obytes < (tp->t_maxseg << 2) &&
1607 snd_len <= 0) {
1608 u_int32_t osegs;
1609
1610 osegs = obytes / tp->t_maxseg;
1611 if ((osegs * tp->t_maxseg) < obytes)
1612 osegs++;
1613
1614 /*
1615 * Since the connection might have already
1616 * received some dupacks, we add them to
1617 * to the outstanding segments count to get
1618 * the correct retransmit threshold.
1619 *
1620 * By checking for early retransmit after
1621 * receiving some duplicate acks when SACK
1622 * is supported, the connection will
1623 * enter fast recovery even if multiple
1624 * segments are lost in the same window.
1625 */
1626 osegs += tp->t_dupacks;
1627 if (osegs < 4) {
1628 tp->t_rexmtthresh =
1629 ((osegs - 1) > 1) ? (osegs - 1) : 1;
1630 tp->t_rexmtthresh =
1631 min(tp->t_rexmtthresh, tcprexmtthresh);
1632 tp->t_rexmtthresh =
1633 max(tp->t_rexmtthresh, tp->t_dupacks);
1634
1635 if (tp->t_early_rexmt_count == 0)
1636 tp->t_early_rexmt_win = tcp_now;
1637
1638 if (tp->t_flagsext & TF_SENT_TLPROBE) {
1639 tcpstat.tcps_tlp_recovery++;
1640 tcp_ccdbg_trace(tp, th,
1641 TCP_CC_TLP_RECOVERY);
1642 } else {
1643 tcpstat.tcps_early_rexmt++;
1644 tp->t_early_rexmt_count++;
1645 tcp_ccdbg_trace(tp, th,
1646 TCP_CC_EARLY_RETRANSMIT);
1647 }
1648 }
1649 }
1650 }
1651
1652 /*
1653 * If we ever sent a TLP probe, the acknowledgement will trigger
1654 * early retransmit because the value of snd_fack will be close
1655 * to snd_max. This will take care of adjustments to the
1656 * congestion window. So we can reset TF_SENT_PROBE flag.
1657 */
1658 tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1659 tp->t_tlphighrxt = 0;
1660 tp->t_tlpstart = 0;
1661}
1662
1663static boolean_t
1664tcp_tfo_syn(struct tcpcb *tp, struct tcpopt *to)
1665{
1666 u_char out[CCAES_BLOCK_SIZE];
1667 unsigned char len;
1668
1669 if (!(to->to_flags & (TOF_TFO | TOF_TFOREQ)) ||
1670 !(tcp_fastopen & TCP_FASTOPEN_SERVER))
1671 return (FALSE);
1672
1673 if ((to->to_flags & TOF_TFOREQ)) {
1674 tp->t_tfo_flags |= TFO_F_OFFER_COOKIE;
1675
1676 tp->t_tfo_stats |= TFO_S_COOKIEREQ_RECV;
1677 tcpstat.tcps_tfo_cookie_req_rcv++;
1678 return (FALSE);
1679 }
1680
1681 /* Ok, then it must be an offered cookie. We need to check that ... */
1682 tcp_tfo_gen_cookie(tp->t_inpcb, out, sizeof(out));
1683
1684 len = *to->to_tfo - TCPOLEN_FASTOPEN_REQ;
1685 to->to_tfo++;
1686 if (memcmp(out, to->to_tfo, len)) {
1687 /* Cookies are different! Let's return and offer a new cookie */
1688 tp->t_tfo_flags |= TFO_F_OFFER_COOKIE;
1689
1690 tp->t_tfo_stats |= TFO_S_COOKIE_INVALID;
1691 tcpstat.tcps_tfo_cookie_invalid++;
1692 return (FALSE);
1693 }
1694
1695 if (OSIncrementAtomic(&tcp_tfo_halfcnt) >= tcp_tfo_backlog) {
1696 /* Need to decrement again as we just increased it... */
1697 OSDecrementAtomic(&tcp_tfo_halfcnt);
1698 return (FALSE);
1699 }
1700
1701 tp->t_tfo_flags |= TFO_F_COOKIE_VALID;
1702
1703 tp->t_tfo_stats |= TFO_S_SYNDATA_RCV;
1704 tcpstat.tcps_tfo_syn_data_rcv++;
1705
1706 return (TRUE);
1707}
1708
1709static void
1710tcp_tfo_synack(struct tcpcb *tp, struct tcpopt *to)
1711{
1712 if (to->to_flags & TOF_TFO) {
1713 unsigned char len = *to->to_tfo - TCPOLEN_FASTOPEN_REQ;
1714
1715 /*
1716 * If this happens, things have gone terribly wrong. len should
1717 * have been checked in tcp_dooptions.
1718 */
1719 VERIFY(len <= TFO_COOKIE_LEN_MAX);
1720
1721 to->to_tfo++;
1722
1723 tcp_cache_set_cookie(tp, to->to_tfo, len);
1724 tcp_heuristic_tfo_success(tp);
1725
1726 tp->t_tfo_stats |= TFO_S_COOKIE_RCV;
1727 tcpstat.tcps_tfo_cookie_rcv++;
1728 if (tp->t_tfo_flags & TFO_F_COOKIE_SENT) {
1729 tcpstat.tcps_tfo_cookie_wrong++;
1730 tp->t_tfo_stats |= TFO_S_COOKIE_WRONG;
1731 }
1732 } else {
1733 /*
1734 * Thus, no cookie in the response, but we either asked for one
1735 * or sent SYN+DATA. Now, we need to check whether we had to
1736 * rexmit the SYN. If that's the case, it's better to start
1737 * backing of TFO-cookie requests.
1738 */
1739 if (tp->t_tfo_flags & TFO_F_SYN_LOSS) {
1740 tp->t_tfo_stats |= TFO_S_SYN_LOSS;
1741 tcpstat.tcps_tfo_syn_loss++;
1742
1743 tcp_heuristic_tfo_loss(tp);
1744 } else {
1745 if (tp->t_tfo_flags & TFO_F_COOKIE_REQ) {
1746 tp->t_tfo_stats |= TFO_S_NO_COOKIE_RCV;
1747 tcpstat.tcps_tfo_no_cookie_rcv++;
1748 }
1749
1750 tcp_heuristic_tfo_success(tp);
1751 }
1752 }
1753}
1754
1755static void
1756tcp_tfo_rcv_probe(struct tcpcb *tp, int tlen)
1757{
1758 if (tlen != 0)
1759 return;
1760
1761 tp->t_tfo_probe_state = TFO_PROBE_PROBING;
1762
1763 /*
1764 * We send the probe out rather quickly (after one RTO). It does not
1765 * really hurt that much, it's only one additional segment on the wire.
1766 */
1767 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, (TCP_REXMTVAL(tp)));
1768}
1769
1770static void
1771tcp_tfo_rcv_data(struct tcpcb *tp)
1772{
1773 /* Transition from PROBING to NONE as data has been received */
1774 if (tp->t_tfo_probe_state >= TFO_PROBE_PROBING)
1775 tp->t_tfo_probe_state = TFO_PROBE_NONE;
1776}
1777
1778static void
1779tcp_tfo_rcv_ack(struct tcpcb *tp, struct tcphdr *th)
1780{
1781 if (tp->t_tfo_probe_state == TFO_PROBE_PROBING &&
1782 tp->t_tfo_probes > 0) {
1783 if (th->th_seq == tp->rcv_nxt) {
1784 /* No hole, so stop probing */
1785 tp->t_tfo_probe_state = TFO_PROBE_NONE;
1786 } else if (SEQ_GT(th->th_seq, tp->rcv_nxt)) {
1787 /* There is a hole! Wait a bit for data... */
1788 tp->t_tfo_probe_state = TFO_PROBE_WAIT_DATA;
1789 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1790 TCP_REXMTVAL(tp));
1791 }
1792 }
1793}
1794
1795/*
1796 * Update snd_wnd information.
1797 */
1798static inline bool
1799tcp_update_window(struct tcpcb *tp, int thflags, struct tcphdr * th,
1800 u_int32_t tiwin, int tlen)
1801{
1802 /* Don't look at the window if there is no ACK flag */
1803 if ((thflags & TH_ACK) &&
1804 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
1805 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
1806 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
1807 /* keep track of pure window updates */
1808 if (tlen == 0 &&
1809 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
1810 tcpstat.tcps_rcvwinupd++;
1811 tp->snd_wnd = tiwin;
1812 tp->snd_wl1 = th->th_seq;
1813 tp->snd_wl2 = th->th_ack;
1814 if (tp->snd_wnd > tp->max_sndwnd)
1815 tp->max_sndwnd = tp->snd_wnd;
1816
1817 if (tp->t_inpcb->inp_socket->so_flags & SOF_MP_SUBFLOW)
1818 mptcp_update_window_fallback(tp);
1819 return (true);
1820 }
1821 return (false);
1822}
1823
1824void
1825tcp_input(struct mbuf *m, int off0)
1826{
1827 struct tcphdr *th;
1828 struct ip *ip = NULL;
1829 struct inpcb *inp;
1830 u_char *optp = NULL;
1831 int optlen = 0;
1832 int tlen, off;
1833 int drop_hdrlen;
1834 struct tcpcb *tp = 0;
1835 int thflags;
1836 struct socket *so = 0;
1837 int todrop, acked, ourfinisacked, needoutput = 0;
1838 struct in_addr laddr;
1839#if INET6
1840 struct in6_addr laddr6;
1841#endif
1842 int dropsocket = 0;
1843 int iss = 0, nosock = 0;
1844 u_int32_t tiwin, sack_bytes_acked = 0;
1845 struct tcpopt to; /* options in this segment */
1846#if TCPDEBUG
1847 short ostate = 0;
1848#endif
1849#if IPFIREWALL
1850 struct sockaddr_in *next_hop = NULL;
1851 struct m_tag *fwd_tag;
1852#endif /* IPFIREWALL */
1853 u_char ip_ecn = IPTOS_ECN_NOTECT;
1854 unsigned int ifscope;
1855 uint8_t isconnected, isdisconnected;
1856 struct ifnet *ifp = m->m_pkthdr.rcvif;
1857 int pktf_sw_lro_pkt = (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_PKT) ? 1 : 0;
1858 int nlropkts = (pktf_sw_lro_pkt == 1) ? m->m_pkthdr.lro_npkts : 1;
1859 int turnoff_lro = 0, win;
1860#if MPTCP
1861 struct mptcb *mp_tp = NULL;
1862#endif /* MPTCP */
1863 boolean_t cell = IFNET_IS_CELLULAR(ifp);
1864 boolean_t wifi = (!cell && IFNET_IS_WIFI(ifp));
1865 boolean_t wired = (!wifi && IFNET_IS_WIRED(ifp));
1866 boolean_t recvd_dsack = FALSE;
1867 struct tcp_respond_args tra;
1868
1869#define TCP_INC_VAR(stat, npkts) do { \
1870 stat += npkts; \
1871} while (0)
1872
1873 TCP_INC_VAR(tcpstat.tcps_rcvtotal, nlropkts);
1874#if IPFIREWALL
1875 /* Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. */
1876 if (!SLIST_EMPTY(&m->m_pkthdr.tags)) {
1877 fwd_tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
1878 KERNEL_TAG_TYPE_IPFORWARD, NULL);
1879 } else {
1880 fwd_tag = NULL;
1881 }
1882 if (fwd_tag != NULL) {
1883 struct ip_fwd_tag *ipfwd_tag =
1884 (struct ip_fwd_tag *)(fwd_tag+1);
1885
1886 next_hop = ipfwd_tag->next_hop;
1887 m_tag_delete(m, fwd_tag);
1888 }
1889#endif /* IPFIREWALL */
1890
1891#if INET6
1892 struct ip6_hdr *ip6 = NULL;
1893 int isipv6;
1894#endif /* INET6 */
1895 int rstreason; /* For badport_bandlim accounting purposes */
1896 struct proc *proc0=current_proc();
1897
1898 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_START,0,0,0,0,0);
1899
1900#if INET6
1901 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
1902#endif
1903 bzero((char *)&to, sizeof(to));
1904
1905#if INET6
1906 if (isipv6) {
1907 /*
1908 * Expect 32-bit aligned data pointer on
1909 * strict-align platforms
1910 */
1911 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
1912
1913 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */
1914 ip6 = mtod(m, struct ip6_hdr *);
1915 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
1916 th = (struct tcphdr *)(void *)((caddr_t)ip6 + off0);
1917
1918 if (tcp_input_checksum(AF_INET6, m, th, off0, tlen))
1919 goto dropnosock;
1920
1921 KERNEL_DEBUG(DBG_LAYER_BEG, ((th->th_dport << 16) | th->th_sport),
1922 (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])),
1923 th->th_seq, th->th_ack, th->th_win);
1924 /*
1925 * Be proactive about unspecified IPv6 address in source.
1926 * As we use all-zero to indicate unbounded/unconnected pcb,
1927 * unspecified IPv6 address can be used to confuse us.
1928 *
1929 * Note that packets with unspecified IPv6 destination is
1930 * already dropped in ip6_input.
1931 */
1932 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
1933 /* XXX stat */
1934 IF_TCP_STATINC(ifp, unspecv6);
1935 goto dropnosock;
1936 }
1937 DTRACE_TCP5(receive, struct mbuf *, m, struct inpcb *, NULL,
1938 struct ip6_hdr *, ip6, struct tcpcb *, NULL,
1939 struct tcphdr *, th);
1940
1941 ip_ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
1942 } else
1943#endif /* INET6 */
1944 {
1945 /*
1946 * Get IP and TCP header together in first mbuf.
1947 * Note: IP leaves IP header in first mbuf.
1948 */
1949 if (off0 > sizeof (struct ip)) {
1950 ip_stripoptions(m);
1951 off0 = sizeof(struct ip);
1952 }
1953 if (m->m_len < sizeof (struct tcpiphdr)) {
1954 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) == 0) {
1955 tcpstat.tcps_rcvshort++;
1956 return;
1957 }
1958 }
1959
1960 /* Expect 32-bit aligned data pointer on strict-align platforms */
1961 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
1962
1963 ip = mtod(m, struct ip *);
1964 th = (struct tcphdr *)(void *)((caddr_t)ip + off0);
1965 tlen = ip->ip_len;
1966
1967 if (tcp_input_checksum(AF_INET, m, th, off0, tlen))
1968 goto dropnosock;
1969
1970#if INET6
1971 /* Re-initialization for later version check */
1972 ip->ip_v = IPVERSION;
1973#endif
1974 ip_ecn = (ip->ip_tos & IPTOS_ECN_MASK);
1975
1976 DTRACE_TCP5(receive, struct mbuf *, m, struct inpcb *, NULL,
1977 struct ip *, ip, struct tcpcb *, NULL, struct tcphdr *, th);
1978
1979 KERNEL_DEBUG(DBG_LAYER_BEG, ((th->th_dport << 16) | th->th_sport),
1980 (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)),
1981 th->th_seq, th->th_ack, th->th_win);
1982
1983 }
1984
1985 /*
1986 * Check that TCP offset makes sense,
1987 * pull out TCP options and adjust length.
1988 */
1989 off = th->th_off << 2;
1990 if (off < sizeof (struct tcphdr) || off > tlen) {
1991 tcpstat.tcps_rcvbadoff++;
1992 IF_TCP_STATINC(ifp, badformat);
1993 goto dropnosock;
1994 }
1995 tlen -= off; /* tlen is used instead of ti->ti_len */
1996 if (off > sizeof (struct tcphdr)) {
1997#if INET6
1998 if (isipv6) {
1999 IP6_EXTHDR_CHECK(m, off0, off, return);
2000 ip6 = mtod(m, struct ip6_hdr *);
2001 th = (struct tcphdr *)(void *)((caddr_t)ip6 + off0);
2002 } else
2003#endif /* INET6 */
2004 {
2005 if (m->m_len < sizeof(struct ip) + off) {
2006 if ((m = m_pullup(m, sizeof (struct ip) + off)) == 0) {
2007 tcpstat.tcps_rcvshort++;
2008 return;
2009 }
2010 ip = mtod(m, struct ip *);
2011 th = (struct tcphdr *)(void *)((caddr_t)ip + off0);
2012 }
2013 }
2014 optlen = off - sizeof (struct tcphdr);
2015 optp = (u_char *)(th + 1);
2016 /*
2017 * Do quick retrieval of timestamp options ("options
2018 * prediction?"). If timestamp is the only option and it's
2019 * formatted as recommended in RFC 1323 appendix A, we
2020 * quickly get the values now and not bother calling
2021 * tcp_dooptions(), etc.
2022 */
2023 if ((optlen == TCPOLEN_TSTAMP_APPA ||
2024 (optlen > TCPOLEN_TSTAMP_APPA &&
2025 optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) &&
2026 *(u_int32_t *)(void *)optp == htonl(TCPOPT_TSTAMP_HDR) &&
2027 (th->th_flags & TH_SYN) == 0) {
2028 to.to_flags |= TOF_TS;
2029 to.to_tsval = ntohl(*(u_int32_t *)(void *)(optp + 4));
2030 to.to_tsecr = ntohl(*(u_int32_t *)(void *)(optp + 8));
2031 optp = NULL; /* we've parsed the options */
2032 }
2033 }
2034 thflags = th->th_flags;
2035
2036#if TCP_DROP_SYNFIN
2037 /*
2038 * If the drop_synfin option is enabled, drop all packets with
2039 * both the SYN and FIN bits set. This prevents e.g. nmap from
2040 * identifying the TCP/IP stack.
2041 *
2042 * This is a violation of the TCP specification.
2043 */
2044 if (drop_synfin && (thflags & (TH_SYN|TH_FIN)) == (TH_SYN|TH_FIN)) {
2045 IF_TCP_STATINC(ifp, synfin);
2046 goto dropnosock;
2047 }
2048#endif
2049
2050 /*
2051 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options,
2052 * until after ip6_savecontrol() is called and before other functions
2053 * which don't want those proto headers.
2054 * Because ip6_savecontrol() is going to parse the mbuf to
2055 * search for data to be passed up to user-land, it wants mbuf
2056 * parameters to be unchanged.
2057 */
2058 drop_hdrlen = off0 + off;
2059
2060 /* Since this is an entry point for input processing of tcp packets, we
2061 * can update the tcp clock here.
2062 */
2063 calculate_tcp_clock();
2064
2065 /*
2066 * Record the interface where this segment arrived on; this does not
2067 * affect normal data output (for non-detached TCP) as it provides a
2068 * hint about which route and interface to use for sending in the
2069 * absence of a PCB, when scoped routing (and thus source interface
2070 * selection) are enabled.
2071 */
2072 if ((m->m_pkthdr.pkt_flags & PKTF_LOOP) || m->m_pkthdr.rcvif == NULL)
2073 ifscope = IFSCOPE_NONE;
2074 else
2075 ifscope = m->m_pkthdr.rcvif->if_index;
2076
2077 /*
2078 * Convert TCP protocol specific fields to host format.
2079 */
2080
2081#if BYTE_ORDER != BIG_ENDIAN
2082 NTOHL(th->th_seq);
2083 NTOHL(th->th_ack);
2084 NTOHS(th->th_win);
2085 NTOHS(th->th_urp);
2086#endif
2087
2088 /*
2089 * Locate pcb for segment.
2090 */
2091findpcb:
2092
2093 isconnected = FALSE;
2094 isdisconnected = FALSE;
2095
2096#if IPFIREWALL_FORWARD
2097 if (next_hop != NULL
2098#if INET6
2099 && isipv6 == 0 /* IPv6 support is not yet */
2100#endif /* INET6 */
2101 ) {
2102 /*
2103 * Diverted. Pretend to be the destination.
2104 * already got one like this?
2105 */
2106 inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport,
2107 ip->ip_dst, th->th_dport, 0, m->m_pkthdr.rcvif);
2108 if (!inp) {
2109 /*
2110 * No, then it's new. Try find the ambushing socket
2111 */
2112 if (!next_hop->sin_port) {
2113 inp = in_pcblookup_hash(&tcbinfo, ip->ip_src,
2114 th->th_sport, next_hop->sin_addr,
2115 th->th_dport, 1, m->m_pkthdr.rcvif);
2116 } else {
2117 inp = in_pcblookup_hash(&tcbinfo,
2118 ip->ip_src, th->th_sport,
2119 next_hop->sin_addr,
2120 ntohs(next_hop->sin_port), 1,
2121 m->m_pkthdr.rcvif);
2122 }
2123 }
2124 } else
2125#endif /* IPFIREWALL_FORWARD */
2126 {
2127#if INET6
2128 if (isipv6)
2129 inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_src, th->th_sport,
2130 &ip6->ip6_dst, th->th_dport, 1,
2131 m->m_pkthdr.rcvif);
2132 else
2133#endif /* INET6 */
2134 inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport,
2135 ip->ip_dst, th->th_dport, 1, m->m_pkthdr.rcvif);
2136 }
2137
2138 /*
2139 * Use the interface scope information from the PCB for outbound
2140 * segments. If the PCB isn't present and if scoped routing is
2141 * enabled, tcp_respond will use the scope of the interface where
2142 * the segment arrived on.
2143 */
2144 if (inp != NULL && (inp->inp_flags & INP_BOUND_IF))
2145 ifscope = inp->inp_boundifp->if_index;
2146
2147 /*
2148 * If the state is CLOSED (i.e., TCB does not exist) then
2149 * all data in the incoming segment is discarded.
2150 * If the TCB exists but is in CLOSED state, it is embryonic,
2151 * but should either do a listen or a connect soon.
2152 */
2153 if (inp == NULL) {
2154 if (log_in_vain) {
2155#if INET6
2156 char dbuf[MAX_IPv6_STR_LEN], sbuf[MAX_IPv6_STR_LEN];
2157#else /* INET6 */
2158 char dbuf[MAX_IPv4_STR_LEN], sbuf[MAX_IPv4_STR_LEN];
2159#endif /* INET6 */
2160
2161#if INET6
2162 if (isipv6) {
2163 inet_ntop(AF_INET6, &ip6->ip6_dst, dbuf, sizeof(dbuf));
2164 inet_ntop(AF_INET6, &ip6->ip6_src, sbuf, sizeof(sbuf));
2165 } else
2166#endif
2167 {
2168 inet_ntop(AF_INET, &ip->ip_dst, dbuf, sizeof(dbuf));
2169 inet_ntop(AF_INET, &ip->ip_src, sbuf, sizeof(sbuf));
2170 }
2171 switch (log_in_vain) {
2172 case 1:
2173 if(thflags & TH_SYN)
2174 log(LOG_INFO,
2175 "Connection attempt to TCP %s:%d from %s:%d\n",
2176 dbuf, ntohs(th->th_dport),
2177 sbuf,
2178 ntohs(th->th_sport));
2179 break;
2180 case 2:
2181 log(LOG_INFO,
2182 "Connection attempt to TCP %s:%d from %s:%d flags:0x%x\n",
2183 dbuf, ntohs(th->th_dport), sbuf,
2184 ntohs(th->th_sport), thflags);
2185 break;
2186 case 3:
2187 case 4:
2188 if ((thflags & TH_SYN) && !(thflags & TH_ACK) &&
2189 !(m->m_flags & (M_BCAST | M_MCAST)) &&
2190#if INET6
2191 ((isipv6 && !IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) ||
2192 (!isipv6 && ip->ip_dst.s_addr != ip->ip_src.s_addr))
2193#else
2194 ip->ip_dst.s_addr != ip->ip_src.s_addr
2195#endif
2196 )
2197 log_in_vain_log((LOG_INFO,
2198 "Stealth Mode connection attempt to TCP %s:%d from %s:%d\n",
2199 dbuf, ntohs(th->th_dport),
2200 sbuf,
2201 ntohs(th->th_sport)));
2202 break;
2203 default:
2204 break;
2205 }
2206 }
2207 if (blackhole) {
2208 if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type != IFT_LOOP)
2209
2210 switch (blackhole) {
2211 case 1:
2212 if (thflags & TH_SYN)
2213 goto dropnosock;
2214 break;
2215 case 2:
2216 goto dropnosock;
2217 default:
2218 goto dropnosock;
2219 }
2220 }
2221 rstreason = BANDLIM_RST_CLOSEDPORT;
2222 IF_TCP_STATINC(ifp, noconnnolist);
2223 goto dropwithresetnosock;
2224 }
2225 so = inp->inp_socket;
2226 if (so == NULL) {
2227 /* This case shouldn't happen as the socket shouldn't be null
2228 * if inp_state isn't set to INPCB_STATE_DEAD
2229 * But just in case, we pretend we didn't find the socket if we hit this case
2230 * as this isn't cause for a panic (the socket might be leaked however)...
2231 */
2232 inp = NULL;
2233#if TEMPDEBUG
2234 printf("tcp_input: no more socket for inp=%x. This shouldn't happen\n", inp);
2235#endif
2236 goto dropnosock;
2237 }
2238
2239 socket_lock(so, 1);
2240 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2241 socket_unlock(so, 1);
2242 inp = NULL; // pretend we didn't find it
2243 goto dropnosock;
2244 }
2245
2246#if NECP
2247 if (so->so_state & SS_ISCONNECTED) {
2248 // Connected TCP sockets have a fully-bound local and remote,
2249 // so the policy check doesn't need to override addresses
2250 if (!necp_socket_is_allowed_to_send_recv(inp, NULL, NULL)) {
2251 IF_TCP_STATINC(ifp, badformat);
2252 goto drop;
2253 }
2254 } else {
2255#if INET6
2256 if (isipv6) {
2257 if (!necp_socket_is_allowed_to_send_recv_v6(inp,
2258 th->th_dport, th->th_sport, &ip6->ip6_dst,
2259 &ip6->ip6_src, ifp, NULL, NULL)) {
2260 IF_TCP_STATINC(ifp, badformat);
2261 goto drop;
2262 }
2263 } else
2264#endif
2265 {
2266 if (!necp_socket_is_allowed_to_send_recv_v4(inp,
2267 th->th_dport, th->th_sport, &ip->ip_dst, &ip->ip_src,
2268 ifp, NULL, NULL)) {
2269 IF_TCP_STATINC(ifp, badformat);
2270 goto drop;
2271 }
2272 }
2273 }
2274#endif /* NECP */
2275
2276 tp = intotcpcb(inp);
2277 if (tp == 0) {
2278 rstreason = BANDLIM_RST_CLOSEDPORT;
2279 IF_TCP_STATINC(ifp, noconnlist);
2280 goto dropwithreset;
2281 }
2282 if (tp->t_state == TCPS_CLOSED)
2283 goto drop;
2284
2285 /* Unscale the window into a 32-bit value. */
2286 if ((thflags & TH_SYN) == 0)
2287 tiwin = th->th_win << tp->snd_scale;
2288 else
2289 tiwin = th->th_win;
2290
2291#if CONFIG_MACF_NET
2292 if (mac_inpcb_check_deliver(inp, m, AF_INET, SOCK_STREAM))
2293 goto drop;
2294#endif
2295
2296 /* Avoid processing packets while closing a listen socket */
2297 if (tp->t_state == TCPS_LISTEN &&
2298 (so->so_options & SO_ACCEPTCONN) == 0)
2299 goto drop;
2300
2301 if (so->so_options & (SO_DEBUG|SO_ACCEPTCONN)) {
2302#if TCPDEBUG
2303 if (so->so_options & SO_DEBUG) {
2304 ostate = tp->t_state;
2305#if INET6
2306 if (isipv6)
2307 bcopy((char *)ip6, (char *)tcp_saveipgen,
2308 sizeof(*ip6));
2309 else
2310#endif /* INET6 */
2311 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
2312 tcp_savetcp = *th;
2313 }
2314#endif
2315 if (so->so_options & SO_ACCEPTCONN) {
2316 struct tcpcb *tp0 = tp;
2317 struct socket *so2;
2318 struct socket *oso;
2319 struct sockaddr_storage from;
2320#if INET6
2321 struct inpcb *oinp = sotoinpcb(so);
2322#endif /* INET6 */
2323 struct ifnet *head_ifscope;
2324 unsigned int head_nocell, head_recvanyif,
2325 head_noexpensive, head_awdl_unrestricted,
2326 head_intcoproc_allowed;
2327
2328 /* Get listener's bound-to-interface, if any */
2329 head_ifscope = (inp->inp_flags & INP_BOUND_IF) ?
2330 inp->inp_boundifp : NULL;
2331 /* Get listener's no-cellular information, if any */
2332 head_nocell = INP_NO_CELLULAR(inp);
2333 /* Get listener's recv-any-interface, if any */
2334 head_recvanyif = (inp->inp_flags & INP_RECV_ANYIF);
2335 /* Get listener's no-expensive information, if any */
2336 head_noexpensive = INP_NO_EXPENSIVE(inp);
2337 head_awdl_unrestricted = INP_AWDL_UNRESTRICTED(inp);
2338 head_intcoproc_allowed = INP_INTCOPROC_ALLOWED(inp);
2339
2340 /*
2341 * If the state is LISTEN then ignore segment if it contains an RST.
2342 * If the segment contains an ACK then it is bad and send a RST.
2343 * If it does not contain a SYN then it is not interesting; drop it.
2344 * If it is from this socket, drop it, it must be forged.
2345 */
2346 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) != TH_SYN) {
2347 IF_TCP_STATINC(ifp, listbadsyn);
2348
2349 if (thflags & TH_RST) {
2350 goto drop;
2351 }
2352 if (thflags & TH_ACK) {
2353 tp = NULL;
2354 tcpstat.tcps_badsyn++;
2355 rstreason = BANDLIM_RST_OPENPORT;
2356 goto dropwithreset;
2357 }
2358
2359 /* We come here if there is no SYN set */
2360 tcpstat.tcps_badsyn++;
2361 goto drop;
2362 }
2363 KERNEL_DEBUG(DBG_FNC_TCP_NEWCONN | DBG_FUNC_START,0,0,0,0,0);
2364 if (th->th_dport == th->th_sport) {
2365#if INET6
2366 if (isipv6) {
2367 if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
2368 &ip6->ip6_src))
2369 goto drop;
2370 } else
2371#endif /* INET6 */
2372 if (ip->ip_dst.s_addr == ip->ip_src.s_addr)
2373 goto drop;
2374 }
2375 /*
2376 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN
2377 * in_broadcast() should never return true on a received
2378 * packet with M_BCAST not set.
2379 *
2380 * Packets with a multicast source address should also
2381 * be discarded.
2382 */
2383 if (m->m_flags & (M_BCAST|M_MCAST))
2384 goto drop;
2385#if INET6
2386 if (isipv6) {
2387 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
2388 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
2389 goto drop;
2390 } else
2391#endif
2392 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
2393 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
2394 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
2395 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
2396 goto drop;
2397
2398
2399#if INET6
2400 /*
2401 * If deprecated address is forbidden,
2402 * we do not accept SYN to deprecated interface
2403 * address to prevent any new inbound connection from
2404 * getting established.
2405 * When we do not accept SYN, we send a TCP RST,
2406 * with deprecated source address (instead of dropping
2407 * it). We compromise it as it is much better for peer
2408 * to send a RST, and RST will be the final packet
2409 * for the exchange.
2410 *
2411 * If we do not forbid deprecated addresses, we accept
2412 * the SYN packet. RFC 4862 forbids dropping SYN in
2413 * this case.
2414 */
2415 if (isipv6 && !ip6_use_deprecated) {
2416 uint32_t ia6_flags;
2417
2418 if (ip6_getdstifaddr_info(m, NULL,
2419 &ia6_flags) == 0) {
2420 if (ia6_flags & IN6_IFF_DEPRECATED) {
2421 tp = NULL;
2422 rstreason = BANDLIM_RST_OPENPORT;
2423 IF_TCP_STATINC(ifp, deprecate6);
2424 goto dropwithreset;
2425 }
2426 }
2427 }
2428#endif
2429 if (so->so_filt) {
2430#if INET6
2431 if (isipv6) {
2432 struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)&from;
2433
2434 sin6->sin6_len = sizeof(*sin6);
2435 sin6->sin6_family = AF_INET6;
2436 sin6->sin6_port = th->th_sport;
2437 sin6->sin6_flowinfo = 0;
2438 sin6->sin6_addr = ip6->ip6_src;
2439 sin6->sin6_scope_id = 0;
2440 }
2441 else
2442#endif
2443 {
2444 struct sockaddr_in *sin = (struct sockaddr_in*)&from;
2445
2446 sin->sin_len = sizeof(*sin);
2447 sin->sin_family = AF_INET;
2448 sin->sin_port = th->th_sport;
2449 sin->sin_addr = ip->ip_src;
2450 }
2451 so2 = sonewconn(so, 0, (struct sockaddr*)&from);
2452 } else {
2453 so2 = sonewconn(so, 0, NULL);
2454 }
2455 if (so2 == 0) {
2456 tcpstat.tcps_listendrop++;
2457 if (tcp_dropdropablreq(so)) {
2458 if (so->so_filt)
2459 so2 = sonewconn(so, 0, (struct sockaddr*)&from);
2460 else
2461 so2 = sonewconn(so, 0, NULL);
2462 }
2463 if (!so2)
2464 goto drop;
2465 }
2466
2467 /* Point "inp" and "tp" in tandem to new socket */
2468 inp = (struct inpcb *)so2->so_pcb;
2469 tp = intotcpcb(inp);
2470
2471 oso = so;
2472 socket_unlock(so, 0); /* Unlock but keep a reference on listener for now */
2473
2474 so = so2;
2475 socket_lock(so, 1);
2476 /*
2477 * Mark socket as temporary until we're
2478 * committed to keeping it. The code at
2479 * ``drop'' and ``dropwithreset'' check the
2480 * flag dropsocket to see if the temporary
2481 * socket created here should be discarded.
2482 * We mark the socket as discardable until
2483 * we're committed to it below in TCPS_LISTEN.
2484 * There are some error conditions in which we
2485 * have to drop the temporary socket.
2486 */
2487 dropsocket++;
2488 /*
2489 * Inherit INP_BOUND_IF from listener; testing if
2490 * head_ifscope is non-NULL is sufficient, since it
2491 * can only be set to a non-zero value earlier if
2492 * the listener has such a flag set.
2493 */
2494 if (head_ifscope != NULL) {
2495 inp->inp_flags |= INP_BOUND_IF;
2496 inp->inp_boundifp = head_ifscope;
2497 } else {
2498 inp->inp_flags &= ~INP_BOUND_IF;
2499 }
2500 /*
2501 * Inherit restrictions from listener.
2502 */
2503 if (head_nocell)
2504 inp_set_nocellular(inp);
2505 if (head_noexpensive)
2506 inp_set_noexpensive(inp);
2507 if (head_awdl_unrestricted)
2508 inp_set_awdl_unrestricted(inp);
2509 if (head_intcoproc_allowed)
2510 inp_set_intcoproc_allowed(inp);
2511 /*
2512 * Inherit {IN,IN6}_RECV_ANYIF from listener.
2513 */
2514 if (head_recvanyif)
2515 inp->inp_flags |= INP_RECV_ANYIF;
2516 else
2517 inp->inp_flags &= ~INP_RECV_ANYIF;
2518#if INET6
2519 if (isipv6)
2520 inp->in6p_laddr = ip6->ip6_dst;
2521 else {
2522 inp->inp_vflag &= ~INP_IPV6;
2523 inp->inp_vflag |= INP_IPV4;
2524#endif /* INET6 */
2525 inp->inp_laddr = ip->ip_dst;
2526#if INET6
2527 }
2528#endif /* INET6 */
2529 inp->inp_lport = th->th_dport;
2530 if (in_pcbinshash(inp, 0) != 0) {
2531 /*
2532 * Undo the assignments above if we failed to
2533 * put the PCB on the hash lists.
2534 */
2535#if INET6
2536 if (isipv6)
2537 inp->in6p_laddr = in6addr_any;
2538 else
2539#endif /* INET6 */
2540 inp->inp_laddr.s_addr = INADDR_ANY;
2541 inp->inp_lport = 0;
2542 socket_lock(oso, 0); /* release ref on parent */
2543 socket_unlock(oso, 1);
2544 goto drop;
2545 }
2546#if INET6
2547 if (isipv6) {
2548 /*
2549 * Inherit socket options from the listening
2550 * socket.
2551 * Note that in6p_inputopts are not (even
2552 * should not be) copied, since it stores
2553 * previously received options and is used to
2554 * detect if each new option is different than
2555 * the previous one and hence should be passed
2556 * to a user.
2557 * If we copied in6p_inputopts, a user would
2558 * not be able to receive options just after
2559 * calling the accept system call.
2560 */
2561 inp->inp_flags |=
2562 oinp->inp_flags & INP_CONTROLOPTS;
2563 if (oinp->in6p_outputopts)
2564 inp->in6p_outputopts =
2565 ip6_copypktopts(oinp->in6p_outputopts,
2566 M_NOWAIT);
2567 } else
2568#endif /* INET6 */
2569 {
2570 inp->inp_options = ip_srcroute();
2571 inp->inp_ip_tos = oinp->inp_ip_tos;
2572 }
2573 socket_lock(oso, 0);
2574#if IPSEC
2575 /* copy old policy into new socket's */
2576 if (sotoinpcb(oso)->inp_sp)
2577 {
2578 int error = 0;
2579 /* Is it a security hole here to silently fail to copy the policy? */
2580 if (inp->inp_sp != NULL)
2581 error = ipsec_init_policy(so, &inp->inp_sp);
2582 if (error != 0 || ipsec_copy_policy(sotoinpcb(oso)->inp_sp, inp->inp_sp))
2583 printf("tcp_input: could not copy policy\n");
2584 }
2585#endif
2586 /* inherit states from the listener */
2587 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
2588 struct tcpcb *, tp, int32_t, TCPS_LISTEN);
2589 tp->t_state = TCPS_LISTEN;
2590 tp->t_flags |= tp0->t_flags & (TF_NOPUSH|TF_NOOPT|TF_NODELAY);
2591 tp->t_flagsext |= (tp0->t_flagsext & (TF_RXTFINDROP|TF_NOTIMEWAIT|TF_FASTOPEN));
2592 tp->t_keepinit = tp0->t_keepinit;
2593 tp->t_keepcnt = tp0->t_keepcnt;
2594 tp->t_keepintvl = tp0->t_keepintvl;
2595 tp->t_adaptive_wtimo = tp0->t_adaptive_wtimo;
2596 tp->t_adaptive_rtimo = tp0->t_adaptive_rtimo;
2597 tp->t_inpcb->inp_ip_ttl = tp0->t_inpcb->inp_ip_ttl;
2598 if ((so->so_flags & SOF_NOTSENT_LOWAT) != 0)
2599 tp->t_notsent_lowat = tp0->t_notsent_lowat;
2600 tp->t_inpcb->inp_flags2 |=
2601 tp0->t_inpcb->inp_flags2 & INP2_KEEPALIVE_OFFLOAD;
2602
2603 /* now drop the reference on the listener */
2604 socket_unlock(oso, 1);
2605
2606 tcp_set_max_rwinscale(tp, so, TCP_AUTORCVBUF_MAX(ifp));
2607
2608 KERNEL_DEBUG(DBG_FNC_TCP_NEWCONN | DBG_FUNC_END,0,0,0,0,0);
2609 }
2610 }
2611 socket_lock_assert_owned(so);
2612
2613 if (tp->t_state == TCPS_ESTABLISHED && tlen > 0) {
2614 /*
2615 * Evaluate the rate of arrival of packets to see if the
2616 * receiver can reduce the ack traffic. The algorithm to
2617 * stretch acks will be enabled if the connection meets
2618 * certain criteria defined in tcp_stretch_ack_enable function.
2619 */
2620 if ((tp->t_flagsext & TF_RCVUNACK_WAITSS) != 0) {
2621 TCP_INC_VAR(tp->rcv_waitforss, nlropkts);
2622 }
2623 if (tcp_stretch_ack_enable(tp, thflags)) {
2624 tp->t_flags |= TF_STRETCHACK;
2625 tp->t_flagsext &= ~(TF_RCVUNACK_WAITSS);
2626 tp->rcv_waitforss = 0;
2627 } else {
2628 tp->t_flags &= ~(TF_STRETCHACK);
2629 }
2630 if (TSTMP_GT(tp->rcv_unackwin, tcp_now)) {
2631 tp->rcv_by_unackwin += (tlen + off);
2632 } else {
2633 tp->rcv_unackwin = tcp_now + tcp_rcvunackwin;
2634 tp->rcv_by_unackwin = tlen + off;
2635 }
2636 }
2637
2638 /*
2639 * Keep track of how many bytes were received in the LRO packet
2640 */
2641 if ((pktf_sw_lro_pkt) && (nlropkts > 2)) {
2642 tp->t_lropktlen += tlen;
2643 }
2644 /*
2645 * Explicit Congestion Notification - Flag that we need to send ECT if
2646 * + The IP Congestion experienced flag was set.
2647 * + Socket is in established state
2648 * + We negotiated ECN in the TCP setup
2649 * + This isn't a pure ack (tlen > 0)
2650 * + The data is in the valid window
2651 *
2652 * TE_SENDECE will be cleared when we receive a packet with TH_CWR set.
2653 */
2654 if (ip_ecn == IPTOS_ECN_CE && tp->t_state == TCPS_ESTABLISHED &&
2655 TCP_ECN_ENABLED(tp) && tlen > 0 &&
2656 SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2657 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
2658 tp->t_ecn_recv_ce++;
2659 tcpstat.tcps_ecn_recv_ce++;
2660 INP_INC_IFNET_STAT(inp, ecn_recv_ce);
2661 /* Mark this connection as it received CE from network */
2662 tp->ecn_flags |= TE_RECV_ECN_CE;
2663 tp->ecn_flags |= TE_SENDECE;
2664 }
2665
2666 /*
2667 * Clear TE_SENDECE if TH_CWR is set. This is harmless, so we don't
2668 * bother doing extensive checks for state and whatnot.
2669 */
2670 if (thflags & TH_CWR) {
2671 tp->ecn_flags &= ~TE_SENDECE;
2672 tp->t_ecn_recv_cwr++;
2673 }
2674
2675 /*
2676 * If we received an explicit notification of congestion in
2677 * ip tos ecn bits or by the CWR bit in TCP header flags, reset
2678 * the ack-strteching state. We need to handle ECN notification if
2679 * an ECN setup SYN was sent even once.
2680 */
2681 if (tp->t_state == TCPS_ESTABLISHED
2682 && (tp->ecn_flags & TE_SETUPSENT)
2683 && (ip_ecn == IPTOS_ECN_CE || (thflags & TH_CWR))) {
2684 tcp_reset_stretch_ack(tp);
2685 CLEAR_IAJ_STATE(tp);
2686 }
2687
2688 if (ip_ecn == IPTOS_ECN_CE && tp->t_state == TCPS_ESTABLISHED &&
2689 !TCP_ECN_ENABLED(tp) && !(tp->ecn_flags & TE_CEHEURI_SET)) {
2690 tcpstat.tcps_ecn_fallback_ce++;
2691 tcp_heuristic_ecn_aggressive(tp);
2692 tp->ecn_flags |= TE_CEHEURI_SET;
2693 }
2694
2695 if (tp->t_state == TCPS_ESTABLISHED && TCP_ECN_ENABLED(tp) &&
2696 ip_ecn == IPTOS_ECN_CE && !(tp->ecn_flags & TE_CEHEURI_SET)) {
2697 if (inp->inp_stat->rxpackets < ECN_MIN_CE_PROBES) {
2698 tp->t_ecn_recv_ce_pkt++;
2699 } else if (tp->t_ecn_recv_ce_pkt > ECN_MAX_CE_RATIO) {
2700 tcpstat.tcps_ecn_fallback_ce++;
2701 tcp_heuristic_ecn_aggressive(tp);
2702 tp->ecn_flags |= TE_CEHEURI_SET;
2703 INP_INC_IFNET_STAT(inp,ecn_fallback_ce);
2704 } else {
2705 /* We tracked the first ECN_MIN_CE_PROBES segments, we
2706 * now know that the path is good.
2707 */
2708 tp->ecn_flags |= TE_CEHEURI_SET;
2709 }
2710 }
2711
2712 /*
2713 * Try to determine if we are receiving a packet after a long time.
2714 * Use our own approximation of idletime to roughly measure remote
2715 * end's idle time. Since slowstart is used after an idle period
2716 * we want to avoid doing LRO if the remote end is not up to date
2717 * on initial window support and starts with 1 or 2 packets as its IW.
2718 */
2719 if (sw_lro && (tp->t_flagsext & TF_LRO_OFFLOADED) &&
2720 ((tcp_now - tp->t_rcvtime) >= (TCP_IDLETIMEOUT(tp)))) {
2721 turnoff_lro = 1;
2722 }
2723
2724 /* Update rcvtime as a new segment was received on the connection */
2725 tp->t_rcvtime = tcp_now;
2726
2727 /*
2728 * Segment received on connection.
2729 * Reset idle time and keep-alive timer.
2730 */
2731 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
2732 tcp_keepalive_reset(tp);
2733
2734 if (tp->t_mpsub)
2735 mptcp_reset_keepalive(tp);
2736 }
2737
2738 /*
2739 * Process options if not in LISTEN state,
2740 * else do it below (after getting remote address).
2741 */
2742 if (tp->t_state != TCPS_LISTEN && optp) {
2743 tcp_dooptions(tp, optp, optlen, th, &to);
2744#if MPTCP
2745 if (mptcp_input_preproc(tp, m, drop_hdrlen) != 0) {
2746 tp->t_flags |= TF_ACKNOW;
2747 (void) tcp_output(tp);
2748 tcp_check_timer_state(tp);
2749 socket_unlock(so, 1);
2750 KERNEL_DEBUG(DBG_FNC_TCP_INPUT |
2751 DBG_FUNC_END,0,0,0,0,0);
2752 return;
2753 }
2754#endif /* MPTCP */
2755 }
2756 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
2757 if (!(thflags & TH_ACK) ||
2758 (SEQ_GT(th->th_ack, tp->iss) &&
2759 SEQ_LEQ(th->th_ack, tp->snd_max)))
2760 tcp_finalize_options(tp, &to, ifscope);
2761 }
2762
2763#if TRAFFIC_MGT
2764 /*
2765 * Compute inter-packet arrival jitter. According to RFC 3550,
2766 * inter-packet arrival jitter is defined as the difference in
2767 * packet spacing at the receiver compared to the sender for a
2768 * pair of packets. When two packets of maximum segment size come
2769 * one after the other with consecutive sequence numbers, we
2770 * consider them as packets sent together at the sender and use
2771 * them as a pair to compute inter-packet arrival jitter. This
2772 * metric indicates the delay induced by the network components due
2773 * to queuing in edge/access routers.
2774 */
2775 if (tp->t_state == TCPS_ESTABLISHED &&
2776 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK|TH_ECE|TH_PUSH)) == TH_ACK &&
2777 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
2778 ((to.to_flags & TOF_TS) == 0 ||
2779 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) &&
2780 th->th_seq == tp->rcv_nxt && LIST_EMPTY(&tp->t_segq)) {
2781 int seg_size = tlen;
2782 if (tp->iaj_pktcnt <= IAJ_IGNORE_PKTCNT) {
2783 TCP_INC_VAR(tp->iaj_pktcnt, nlropkts);
2784 }
2785
2786 if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_PKT) {
2787 seg_size = m->m_pkthdr.lro_pktlen;
2788 }
2789 if ( tp->iaj_size == 0 || seg_size > tp->iaj_size ||
2790 (seg_size == tp->iaj_size && tp->iaj_rcv_ts == 0)) {
2791 /*
2792 * State related to inter-arrival jitter is
2793 * uninitialized or we are trying to find a good
2794 * first packet to start computing the metric
2795 */
2796 update_iaj_state(tp, seg_size, 0);
2797 } else {
2798 if (seg_size == tp->iaj_size) {
2799 /*
2800 * Compute inter-arrival jitter taking
2801 * this packet as the second packet
2802 */
2803 if (pktf_sw_lro_pkt)
2804 compute_iaj(tp, nlropkts,
2805 m->m_pkthdr.lro_elapsed);
2806 else
2807 compute_iaj(tp, 1, 0);
2808 }
2809 if (seg_size < tp->iaj_size) {
2810 /*
2811 * There is a smaller packet in the stream.
2812 * Some times the maximum size supported
2813 * on a path can change if there is a new
2814 * link with smaller MTU. The receiver will
2815 * not know about this change. If there
2816 * are too many packets smaller than
2817 * iaj_size, we try to learn the iaj_size
2818 * again.
2819 */
2820 TCP_INC_VAR(tp->iaj_small_pkt, nlropkts);
2821 if (tp->iaj_small_pkt > RESET_IAJ_SIZE_THRESH) {
2822 update_iaj_state(tp, seg_size, 1);
2823 } else {
2824 CLEAR_IAJ_STATE(tp);
2825 }
2826 } else {
2827 update_iaj_state(tp, seg_size, 0);
2828 }
2829 }
2830 } else {
2831 CLEAR_IAJ_STATE(tp);
2832 }
2833#endif /* TRAFFIC_MGT */
2834
2835 /*
2836 * Header prediction: check for the two common cases
2837 * of a uni-directional data xfer. If the packet has
2838 * no control flags, is in-sequence, the window didn't
2839 * change and we're not retransmitting, it's a
2840 * candidate. If the length is zero and the ack moved
2841 * forward, we're the sender side of the xfer. Just
2842 * free the data acked & wake any higher level process
2843 * that was blocked waiting for space. If the length
2844 * is non-zero and the ack didn't move, we're the
2845 * receiver side. If we're getting packets in-order
2846 * (the reassembly queue is empty), add the data to
2847 * the socket buffer and note that we need a delayed ack.
2848 * Make sure that the hidden state-flags are also off.
2849 * Since we check for TCPS_ESTABLISHED above, it can only
2850 * be TH_NEEDSYN.
2851 */
2852 if (tp->t_state == TCPS_ESTABLISHED &&
2853 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK|TH_ECE|TH_CWR)) == TH_ACK &&
2854 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
2855 ((to.to_flags & TOF_TS) == 0 ||
2856 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) &&
2857 th->th_seq == tp->rcv_nxt &&
2858 tiwin && tiwin == tp->snd_wnd &&
2859 tp->snd_nxt == tp->snd_max) {
2860
2861 /*
2862 * If last ACK falls within this segment's sequence numbers,
2863 * record the timestamp.
2864 * NOTE that the test is modified according to the latest
2865 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
2866 */
2867 if ((to.to_flags & TOF_TS) != 0 &&
2868 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
2869 tp->ts_recent_age = tcp_now;
2870 tp->ts_recent = to.to_tsval;
2871 }
2872
2873 if (tlen == 0) {
2874 if (SEQ_GT(th->th_ack, tp->snd_una) &&
2875 SEQ_LEQ(th->th_ack, tp->snd_max) &&
2876 tp->snd_cwnd >= tp->snd_ssthresh &&
2877 (!IN_FASTRECOVERY(tp) &&
2878 ((!(SACK_ENABLED(tp)) &&
2879 tp->t_dupacks < tp->t_rexmtthresh) ||
2880 (SACK_ENABLED(tp) && to.to_nsacks == 0 &&
2881 TAILQ_EMPTY(&tp->snd_holes))))) {
2882 /*
2883 * this is a pure ack for outstanding data.
2884 */
2885 ++tcpstat.tcps_predack;
2886
2887 tcp_bad_rexmt_check(tp, th, &to);
2888
2889 /* Recalculate the RTT */
2890 tcp_compute_rtt(tp, &to, th);
2891
2892 VERIFY(SEQ_GEQ(th->th_ack, tp->snd_una));
2893 acked = BYTES_ACKED(th, tp);
2894 tcpstat.tcps_rcvackpack++;
2895 tcpstat.tcps_rcvackbyte += acked;
2896
2897 /*
2898 * Handle an ack that is in sequence during
2899 * congestion avoidance phase. The
2900 * calculations in this function
2901 * assume that snd_una is not updated yet.
2902 */
2903 if (CC_ALGO(tp)->congestion_avd != NULL)
2904 CC_ALGO(tp)->congestion_avd(tp, th);
2905 tcp_ccdbg_trace(tp, th, TCP_CC_INSEQ_ACK_RCVD);
2906 sbdrop(&so->so_snd, acked);
2907 if (so->so_flags & SOF_ENABLE_MSGS) {
2908 VERIFY(acked <= so->so_msg_state->msg_serial_bytes);
2909 so->so_msg_state->msg_serial_bytes -= acked;
2910 }
2911 tcp_sbsnd_trim(&so->so_snd);
2912
2913 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
2914 SEQ_LEQ(th->th_ack, tp->snd_recover))
2915 tp->snd_recover = th->th_ack - 1;
2916 tp->snd_una = th->th_ack;
2917
2918 TCP_RESET_REXMT_STATE(tp);
2919
2920 /*
2921 * pull snd_wl2 up to prevent seq wrap relative
2922 * to th_ack.
2923 */
2924 tp->snd_wl2 = th->th_ack;
2925
2926 if (tp->t_dupacks > 0) {
2927 tp->t_dupacks = 0;
2928 tp->t_rexmtthresh = tcprexmtthresh;
2929 }
2930
2931 m_freem(m);
2932
2933 /*
2934 * If all outstanding data are acked, stop
2935 * retransmit timer, otherwise restart timer
2936 * using current (possibly backed-off) value.
2937 * If process is waiting for space,
2938 * wakeup/selwakeup/signal. If data
2939 * are ready to send, let tcp_output
2940 * decide between more output or persist.
2941 */
2942 if (tp->snd_una == tp->snd_max) {
2943 tp->t_timer[TCPT_REXMT] = 0;
2944 tp->t_timer[TCPT_PTO] = 0;
2945 } else if (tp->t_timer[TCPT_PERSIST] == 0) {
2946 tp->t_timer[TCPT_REXMT] =
2947 OFFSET_FROM_START(tp,
2948 tp->t_rxtcur);
2949 }
2950 if (!SLIST_EMPTY(&tp->t_rxt_segments) &&
2951 !TCP_DSACK_SEQ_IN_WINDOW(tp,
2952 tp->t_dsack_lastuna, tp->snd_una))
2953 tcp_rxtseg_clean(tp);
2954
2955 if ((tp->t_flagsext & TF_MEASURESNDBW) != 0 &&
2956 tp->t_bwmeas != NULL)
2957 tcp_bwmeas_check(tp);
2958
2959 sowwakeup(so); /* has to be done with socket lock held */
2960 if (!SLIST_EMPTY(&tp->t_notify_ack))
2961 tcp_notify_acknowledgement(tp, so);
2962
2963 if ((so->so_snd.sb_cc) || (tp->t_flags & TF_ACKNOW)) {
2964 (void) tcp_output(tp);
2965 }
2966
2967 tcp_tfo_rcv_ack(tp, th);
2968
2969 tcp_check_timer_state(tp);
2970 socket_unlock(so, 1);
2971 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0);
2972 return;
2973 }
2974 } else if (th->th_ack == tp->snd_una &&
2975 LIST_EMPTY(&tp->t_segq) &&
2976 tlen <= tcp_sbspace(tp)) {
2977 /*
2978 * this is a pure, in-sequence data packet
2979 * with nothing on the reassembly queue and
2980 * we have enough buffer space to take it.
2981 */
2982
2983 /*
2984 * If this is a connection in steady state, start
2985 * coalescing packets belonging to this flow.
2986 */
2987 if (turnoff_lro) {
2988 tcp_lro_remove_state(tp->t_inpcb->inp_laddr,
2989 tp->t_inpcb->inp_faddr,
2990 tp->t_inpcb->inp_lport,
2991 tp->t_inpcb->inp_fport);
2992 tp->t_flagsext &= ~TF_LRO_OFFLOADED;
2993 tp->t_idleat = tp->rcv_nxt;
2994 } else if (sw_lro && !pktf_sw_lro_pkt && !isipv6 &&
2995 (so->so_flags & SOF_USELRO) &&
2996 !IFNET_IS_CELLULAR(m->m_pkthdr.rcvif) &&
2997 (m->m_pkthdr.rcvif->if_type != IFT_LOOP) &&
2998 ((th->th_seq - tp->irs) >
2999 (tp->t_maxseg << lro_start)) &&
3000 ((tp->t_idleat == 0) || ((th->th_seq -
3001 tp->t_idleat) > (tp->t_maxseg << lro_start)))) {
3002 tp->t_flagsext |= TF_LRO_OFFLOADED;
3003 tcp_start_coalescing(ip, th, tlen);
3004 tp->t_idleat = 0;
3005 }
3006
3007 /* Clean receiver SACK report if present */
3008 if (SACK_ENABLED(tp) && tp->rcv_numsacks)
3009 tcp_clean_sackreport(tp);
3010 ++tcpstat.tcps_preddat;
3011 tp->rcv_nxt += tlen;
3012 /*
3013 * Pull snd_wl1 up to prevent seq wrap relative to
3014 * th_seq.
3015 */
3016 tp->snd_wl1 = th->th_seq;
3017 /*
3018 * Pull rcv_up up to prevent seq wrap relative to
3019 * rcv_nxt.
3020 */
3021 tp->rcv_up = tp->rcv_nxt;
3022 TCP_INC_VAR(tcpstat.tcps_rcvpack, nlropkts);
3023 tcpstat.tcps_rcvbyte += tlen;
3024 if (nstat_collect) {
3025 if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_PKT) {
3026 INP_ADD_STAT(inp, cell, wifi, wired,
3027 rxpackets, m->m_pkthdr.lro_npkts);
3028 } else {
3029 INP_ADD_STAT(inp, cell, wifi, wired,
3030 rxpackets, 1);
3031 }
3032 INP_ADD_STAT(inp, cell, wifi, wired,rxbytes,
3033 tlen);
3034 inp_set_activity_bitmap(inp);
3035 }
3036
3037 /*
3038 * Calculate the RTT on the receiver only if the
3039 * connection is in streaming mode and the last
3040 * packet was not an end-of-write
3041 */
3042 if (tp->t_flags & TF_STREAMING_ON)
3043 tcp_compute_rtt(tp, &to, th);
3044
3045 tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen,
3046 TCP_AUTORCVBUF_MAX(ifp));
3047
3048 /*
3049 * Add data to socket buffer.
3050 */
3051 so_recv_data_stat(so, m, 0);
3052 m_adj(m, drop_hdrlen); /* delayed header drop */
3053
3054 /*
3055 * If message delivery (SOF_ENABLE_MSGS) is enabled on
3056 * this socket, deliver the packet received as an
3057 * in-order message with sequence number attached to it.
3058 */
3059 if (sbappendstream_rcvdemux(so, m,
3060 th->th_seq - (tp->irs + 1), 0)) {
3061 sorwakeup(so);
3062 }
3063#if INET6
3064 if (isipv6) {
3065 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
3066 (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])),
3067 th->th_seq, th->th_ack, th->th_win);
3068 }
3069 else
3070#endif
3071 {
3072 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
3073 (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)),
3074 th->th_seq, th->th_ack, th->th_win);
3075 }
3076 TCP_INC_VAR(tp->t_unacksegs, nlropkts);
3077 if (DELAY_ACK(tp, th)) {
3078 if ((tp->t_flags & TF_DELACK) == 0) {
3079 tp->t_flags |= TF_DELACK;
3080 tp->t_timer[TCPT_DELACK] = OFFSET_FROM_START(tp, tcp_delack);
3081 }
3082 } else {
3083 tp->t_flags |= TF_ACKNOW;
3084 tcp_output(tp);
3085 }
3086
3087 tcp_adaptive_rwtimo_check(tp, tlen);
3088
3089 if (tlen > 0)
3090 tcp_tfo_rcv_data(tp);
3091
3092 tcp_check_timer_state(tp);
3093 socket_unlock(so, 1);
3094 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0);
3095 return;
3096 }
3097 }
3098
3099 /*
3100 * Calculate amount of space in receive window,
3101 * and then do TCP input processing.
3102 * Receive window is amount of space in rcv queue,
3103 * but not less than advertised window.
3104 */
3105 socket_lock_assert_owned(so);
3106 win = tcp_sbspace(tp);
3107 if (win < 0)
3108 win = 0;
3109 else { /* clip rcv window to 4K for modems */
3110 if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0)
3111 win = min(win, slowlink_wsize);
3112 }
3113 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
3114#if MPTCP
3115 /*
3116 * Ensure that the subflow receive window isn't greater
3117 * than the connection level receive window.
3118 */
3119 if ((tp->t_mpflags & TMPF_MPTCP_TRUE) &&
3120 (mp_tp = tptomptp(tp))) {
3121 mpte_lock_assert_held(mp_tp->mpt_mpte);
3122 if (tp->rcv_wnd > mp_tp->mpt_rcvwnd) {
3123 tp->rcv_wnd = imax(mp_tp->mpt_rcvwnd, (int)(tp->rcv_adv - tp->rcv_nxt));
3124 tcpstat.tcps_mp_reducedwin++;
3125 }
3126 }
3127#endif /* MPTCP */
3128
3129 switch (tp->t_state) {
3130
3131 /*
3132 * Initialize tp->rcv_nxt, and tp->irs, select an initial
3133 * tp->iss, and send a segment:
3134 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
3135 * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss.
3136 * Fill in remote peer address fields if not previously specified.
3137 * Enter SYN_RECEIVED state, and process any other fields of this
3138 * segment in this state.
3139 */
3140 case TCPS_LISTEN: {
3141 struct sockaddr_in *sin;
3142#if INET6
3143 struct sockaddr_in6 *sin6;
3144#endif
3145
3146 socket_lock_assert_owned(so);
3147#if INET6
3148 if (isipv6) {
3149 MALLOC(sin6, struct sockaddr_in6 *, sizeof *sin6,
3150 M_SONAME, M_NOWAIT);
3151 if (sin6 == NULL)
3152 goto drop;
3153 bzero(sin6, sizeof(*sin6));
3154 sin6->sin6_family = AF_INET6;
3155 sin6->sin6_len = sizeof(*sin6);
3156 sin6->sin6_addr = ip6->ip6_src;
3157 sin6->sin6_port = th->th_sport;
3158 laddr6 = inp->in6p_laddr;
3159 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
3160 inp->in6p_laddr = ip6->ip6_dst;
3161 if (in6_pcbconnect(inp, (struct sockaddr *)sin6,
3162 proc0)) {
3163 inp->in6p_laddr = laddr6;
3164 FREE(sin6, M_SONAME);
3165 goto drop;
3166 }
3167 FREE(sin6, M_SONAME);
3168 } else
3169#endif
3170 {
3171 socket_lock_assert_owned(so);
3172 MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME,
3173 M_NOWAIT);
3174 if (sin == NULL)
3175 goto drop;
3176 sin->sin_family = AF_INET;
3177 sin->sin_len = sizeof(*sin);
3178 sin->sin_addr = ip->ip_src;
3179 sin->sin_port = th->th_sport;
3180 bzero((caddr_t)sin->sin_zero, sizeof(sin->sin_zero));
3181 laddr = inp->inp_laddr;
3182 if (inp->inp_laddr.s_addr == INADDR_ANY)
3183 inp->inp_laddr = ip->ip_dst;
3184 if (in_pcbconnect(inp, (struct sockaddr *)sin, proc0,
3185 IFSCOPE_NONE, NULL)) {
3186 inp->inp_laddr = laddr;
3187 FREE(sin, M_SONAME);
3188 goto drop;
3189 }
3190 FREE(sin, M_SONAME);
3191 }
3192
3193 tcp_dooptions(tp, optp, optlen, th, &to);
3194 tcp_finalize_options(tp, &to, ifscope);
3195
3196 if (tfo_enabled(tp) && tcp_tfo_syn(tp, &to))
3197 isconnected = TRUE;
3198
3199 if (iss)
3200 tp->iss = iss;
3201 else {
3202 tp->iss = tcp_new_isn(tp);
3203 }
3204 tp->irs = th->th_seq;
3205 tcp_sendseqinit(tp);
3206 tcp_rcvseqinit(tp);
3207 tp->snd_recover = tp->snd_una;
3208 /*
3209 * Initialization of the tcpcb for transaction;
3210 * set SND.WND = SEG.WND,
3211 * initialize CCsend and CCrecv.
3212 */
3213 tp->snd_wnd = tiwin; /* initial send-window */
3214 tp->t_flags |= TF_ACKNOW;
3215 tp->t_unacksegs = 0;
3216 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
3217 struct tcpcb *, tp, int32_t, TCPS_SYN_RECEIVED);
3218 tp->t_state = TCPS_SYN_RECEIVED;
3219 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
3220 TCP_CONN_KEEPINIT(tp));
3221 dropsocket = 0; /* committed to socket */
3222
3223 if (inp->inp_flowhash == 0)
3224 inp->inp_flowhash = inp_calc_flowhash(inp);
3225#if INET6
3226 /* update flowinfo - RFC 6437 */
3227 if (inp->inp_flow == 0 &&
3228 inp->in6p_flags & IN6P_AUTOFLOWLABEL) {
3229 inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
3230 inp->inp_flow |=
3231 (htonl(inp->inp_flowhash) & IPV6_FLOWLABEL_MASK);
3232 }
3233#endif /* INET6 */
3234
3235 /* reset the incomp processing flag */
3236 so->so_flags &= ~(SOF_INCOMP_INPROGRESS);
3237 tcpstat.tcps_accepts++;
3238 if ((thflags & (TH_ECE | TH_CWR)) == (TH_ECE | TH_CWR)) {
3239 /* ECN-setup SYN */
3240 tp->ecn_flags |= (TE_SETUPRECEIVED | TE_SENDIPECT);
3241 }
3242
3243 goto trimthenstep6;
3244 }
3245
3246 /*
3247 * If the state is SYN_RECEIVED and the seg contains an ACK,
3248 * but not for our SYN/ACK, send a RST.
3249 */
3250 case TCPS_SYN_RECEIVED:
3251 if ((thflags & TH_ACK) &&
3252 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
3253 SEQ_GT(th->th_ack, tp->snd_max))) {
3254 rstreason = BANDLIM_RST_OPENPORT;
3255 IF_TCP_STATINC(ifp, ooopacket);
3256 goto dropwithreset;
3257 }
3258
3259 /*
3260 * In SYN_RECEIVED state, if we recv some SYNS with
3261 * window scale and others without, window scaling should
3262 * be disabled. Otherwise the window advertised will be
3263 * lower if we assume scaling and the other end does not.
3264 */
3265 if ((thflags & TH_SYN) &&
3266 (tp->irs == th->th_seq) &&
3267 !(to.to_flags & TOF_SCALE))
3268 tp->t_flags &= ~TF_RCVD_SCALE;
3269 break;
3270
3271 /*
3272 * If the state is SYN_SENT:
3273 * if seg contains an ACK, but not for our SYN, drop the input.
3274 * if seg contains a RST, then drop the connection.
3275 * if seg does not contain SYN, then drop it.
3276 * Otherwise this is an acceptable SYN segment
3277 * initialize tp->rcv_nxt and tp->irs
3278 * if seg contains ack then advance tp->snd_una
3279 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
3280 * arrange for segment to be acked (eventually)
3281 * continue processing rest of data/controls, beginning with URG
3282 */
3283 case TCPS_SYN_SENT:
3284 if ((thflags & TH_ACK) &&
3285 (SEQ_LEQ(th->th_ack, tp->iss) ||
3286 SEQ_GT(th->th_ack, tp->snd_max))) {
3287 rstreason = BANDLIM_UNLIMITED;
3288 IF_TCP_STATINC(ifp, ooopacket);
3289 goto dropwithreset;
3290 }
3291 if (thflags & TH_RST) {
3292 if ((thflags & TH_ACK) != 0) {
3293 if (tfo_enabled(tp))
3294 tcp_heuristic_tfo_rst(tp);
3295 if ((tp->ecn_flags & (TE_SETUPSENT | TE_RCVD_SYN_RST)) == TE_SETUPSENT) {
3296 /*
3297 * On local connections, send
3298 * non-ECN syn one time before
3299 * dropping the connection
3300 */
3301 if (tp->t_flags & TF_LOCAL) {
3302 tp->ecn_flags |= TE_RCVD_SYN_RST;
3303 goto drop;
3304 } else {
3305 tcp_heuristic_ecn_synrst(tp);
3306 }
3307 }
3308 soevent(so,
3309 (SO_FILT_HINT_LOCKED |
3310 SO_FILT_HINT_CONNRESET));
3311 tp = tcp_drop(tp, ECONNREFUSED);
3312 postevent(so, 0, EV_RESET);
3313 }
3314 goto drop;
3315 }
3316 if ((thflags & TH_SYN) == 0)
3317 goto drop;
3318 tp->snd_wnd = th->th_win; /* initial send window */
3319
3320 tp->irs = th->th_seq;
3321 tcp_rcvseqinit(tp);
3322 if (thflags & TH_ACK) {
3323 tcpstat.tcps_connects++;
3324
3325 if ((thflags & (TH_ECE | TH_CWR)) == (TH_ECE)) {
3326 /* ECN-setup SYN-ACK */
3327 tp->ecn_flags |= TE_SETUPRECEIVED;
3328 if (TCP_ECN_ENABLED(tp)) {
3329 tcp_heuristic_ecn_success(tp);
3330 tcpstat.tcps_ecn_client_success++;
3331 }
3332 } else {
3333 if (tp->ecn_flags & TE_SETUPSENT &&
3334 tp->t_rxtshift == 0) {
3335 tcp_heuristic_ecn_success(tp);
3336 tcpstat.tcps_ecn_not_supported++;
3337 }
3338 if (tp->ecn_flags & TE_SETUPSENT &&
3339 tp->t_rxtshift > 0)
3340 tcp_heuristic_ecn_loss(tp);
3341
3342 /* non-ECN-setup SYN-ACK */
3343 tp->ecn_flags &= ~TE_SENDIPECT;
3344 }
3345
3346#if CONFIG_MACF_NET && CONFIG_MACF_SOCKET
3347 /* XXXMAC: recursive lock: SOCK_LOCK(so); */
3348 mac_socketpeer_label_associate_mbuf(m, so);
3349 /* XXXMAC: SOCK_UNLOCK(so); */
3350#endif
3351 /* Do window scaling on this connection? */
3352 if (TCP_WINDOW_SCALE_ENABLED(tp)) {
3353 tp->snd_scale = tp->requested_s_scale;
3354 tp->rcv_scale = tp->request_r_scale;
3355 }
3356
3357 tp->rcv_adv += min(tp->rcv_wnd, TCP_MAXWIN << tp->rcv_scale);
3358 tp->snd_una++; /* SYN is acked */
3359 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
3360 tp->snd_nxt = tp->snd_una;
3361
3362 /*
3363 * We have sent more in the SYN than what is being
3364 * acked. (e.g., TFO)
3365 * We should restart the sending from what the receiver
3366 * has acknowledged immediately.
3367 */
3368 if (SEQ_GT(tp->snd_nxt, th->th_ack)) {
3369 /*
3370 * rdar://problem/33214601
3371 * There is a middlebox that acks all but one
3372 * byte and still drops the data.
3373 */
3374 if ((tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
3375 tp->snd_max == th->th_ack + 1 &&
3376 tp->snd_max > tp->snd_una + 1) {
3377 tcp_heuristic_tfo_middlebox(tp);
3378
3379 so->so_error = ENODATA;
3380
3381 tp->t_tfo_stats |= TFO_S_ONE_BYTE_PROXY;
3382 }
3383
3384 tp->snd_max = tp->snd_nxt = th->th_ack;
3385 }
3386
3387 /*
3388 * If there's data, delay ACK; if there's also a FIN
3389 * ACKNOW will be turned on later.
3390 */
3391 TCP_INC_VAR(tp->t_unacksegs, nlropkts);
3392 if (DELAY_ACK(tp, th) && tlen != 0 ) {
3393 if ((tp->t_flags & TF_DELACK) == 0) {
3394 tp->t_flags |= TF_DELACK;
3395 tp->t_timer[TCPT_DELACK] = OFFSET_FROM_START(tp, tcp_delack);
3396 }
3397 }
3398 else {
3399 tp->t_flags |= TF_ACKNOW;
3400 }
3401 /*
3402 * Received <SYN,ACK> in SYN_SENT[*] state.
3403 * Transitions:
3404 * SYN_SENT --> ESTABLISHED
3405 * SYN_SENT* --> FIN_WAIT_1
3406 */
3407 tp->t_starttime = tcp_now;
3408 tcp_sbrcv_tstmp_check(tp);
3409 if (tp->t_flags & TF_NEEDFIN) {
3410 DTRACE_TCP4(state__change, void, NULL,
3411 struct inpcb *, inp,
3412 struct tcpcb *, tp, int32_t,
3413 TCPS_FIN_WAIT_1);
3414 tp->t_state = TCPS_FIN_WAIT_1;
3415 tp->t_flags &= ~TF_NEEDFIN;
3416 thflags &= ~TH_SYN;
3417 } else {
3418 DTRACE_TCP4(state__change, void, NULL,
3419 struct inpcb *, inp, struct tcpcb *,
3420 tp, int32_t, TCPS_ESTABLISHED);
3421 tp->t_state = TCPS_ESTABLISHED;
3422 tp->t_timer[TCPT_KEEP] =
3423 OFFSET_FROM_START(tp,
3424 TCP_CONN_KEEPIDLE(tp));
3425 if (nstat_collect)
3426 nstat_route_connect_success(
3427 inp->inp_route.ro_rt);
3428 /*
3429 * The SYN is acknowledged but una is not
3430 * updated yet. So pass the value of
3431 * ack to compute sndbytes correctly
3432 */
3433 inp_count_sndbytes(inp, th->th_ack);
3434 }
3435#if MPTCP
3436 /*
3437 * Do not send the connect notification for additional
3438 * subflows until ACK for 3-way handshake arrives.
3439 */
3440 if ((!(tp->t_mpflags & TMPF_MPTCP_TRUE)) &&
3441 (tp->t_mpflags & TMPF_SENT_JOIN)) {
3442 isconnected = FALSE;
3443 } else
3444#endif /* MPTCP */
3445 isconnected = TRUE;
3446
3447 if ((tp->t_tfo_flags & (TFO_F_COOKIE_REQ | TFO_F_COOKIE_SENT)) ||
3448 (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT)) {
3449 tcp_tfo_synack(tp, &to);
3450
3451 if ((tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
3452 SEQ_LT(tp->snd_una, th->th_ack)) {
3453 tp->t_tfo_stats |= TFO_S_SYN_DATA_ACKED;
3454 tcpstat.tcps_tfo_syn_data_acked++;
3455#if MPTCP
3456 if (so->so_flags & SOF_MP_SUBFLOW)
3457 so->so_flags1 |= SOF1_TFO_REWIND;
3458#endif
3459 tcp_tfo_rcv_probe(tp, tlen);
3460 }
3461 }
3462 } else {
3463 /*
3464 * Received initial SYN in SYN-SENT[*] state => simul-
3465 * taneous open. If segment contains CC option and there is
3466 * a cached CC, apply TAO test; if it succeeds, connection is
3467 * half-synchronized. Otherwise, do 3-way handshake:
3468 * SYN-SENT -> SYN-RECEIVED
3469 * SYN-SENT* -> SYN-RECEIVED*
3470 */
3471 tp->t_flags |= TF_ACKNOW;
3472 tp->t_timer[TCPT_REXMT] = 0;
3473 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
3474 struct tcpcb *, tp, int32_t, TCPS_SYN_RECEIVED);
3475 tp->t_state = TCPS_SYN_RECEIVED;
3476
3477 /*
3478 * During simultaneous open, TFO should not be used.
3479 * So, we disable it here, to prevent that data gets
3480 * sent on the SYN/ACK.
3481 */
3482 tcp_disable_tfo(tp);
3483 }
3484
3485trimthenstep6:
3486 /*
3487 * Advance th->th_seq to correspond to first data byte.
3488 * If data, trim to stay within window,
3489 * dropping FIN if necessary.
3490 */
3491 th->th_seq++;
3492 if (tlen > tp->rcv_wnd) {
3493 todrop = tlen - tp->rcv_wnd;
3494 m_adj(m, -todrop);
3495 tlen = tp->rcv_wnd;
3496 thflags &= ~TH_FIN;
3497 tcpstat.tcps_rcvpackafterwin++;
3498 tcpstat.tcps_rcvbyteafterwin += todrop;
3499 }
3500 tp->snd_wl1 = th->th_seq - 1;
3501 tp->rcv_up = th->th_seq;
3502 /*
3503 * Client side of transaction: already sent SYN and data.
3504 * If the remote host used T/TCP to validate the SYN,
3505 * our data will be ACK'd; if so, enter normal data segment
3506 * processing in the middle of step 5, ack processing.
3507 * Otherwise, goto step 6.
3508 */
3509 if (thflags & TH_ACK)
3510 goto process_ACK;
3511 goto step6;
3512 /*
3513 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
3514 * do normal processing.
3515 *
3516 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
3517 */
3518 case TCPS_LAST_ACK:
3519 case TCPS_CLOSING:
3520 case TCPS_TIME_WAIT:
3521 break; /* continue normal processing */
3522
3523 /* Received a SYN while connection is already established.
3524 * This is a "half open connection and other anomalies" described
3525 * in RFC793 page 34, send an ACK so the remote reset the connection
3526 * or recovers by adjusting its sequence numberering
3527 */
3528 case TCPS_ESTABLISHED:
3529 if (thflags & TH_SYN)
3530 goto dropafterack;
3531 break;
3532 }
3533
3534 /*
3535 * States other than LISTEN or SYN_SENT.
3536 * First check the RST flag and sequence number since reset segments
3537 * are exempt from the timestamp and connection count tests. This
3538 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
3539 * below which allowed reset segments in half the sequence space
3540 * to fall though and be processed (which gives forged reset
3541 * segments with a random sequence number a 50 percent chance of
3542 * killing a connection).
3543 * Then check timestamp, if present.
3544 * Then check the connection count, if present.
3545 * Then check that at least some bytes of segment are within
3546 * receive window. If segment begins before rcv_nxt,
3547 * drop leading data (and SYN); if nothing left, just ack.
3548 *
3549 *
3550 * If the RST bit is set, check the sequence number to see
3551 * if this is a valid reset segment.
3552 * RFC 793 page 37:
3553 * In all states except SYN-SENT, all reset (RST) segments
3554 * are validated by checking their SEQ-fields. A reset is
3555 * valid if its sequence number is in the window.
3556 * Note: this does not take into account delayed ACKs, so
3557 * we should test against last_ack_sent instead of rcv_nxt.
3558 * The sequence number in the reset segment is normally an
3559 * echo of our outgoing acknowlegement numbers, but some hosts
3560 * send a reset with the sequence number at the rightmost edge
3561 * of our receive window, and we have to handle this case.
3562 * Note 2: Paul Watson's paper "Slipping in the Window" has shown
3563 * that brute force RST attacks are possible. To combat this,
3564 * we use a much stricter check while in the ESTABLISHED state,
3565 * only accepting RSTs where the sequence number is equal to
3566 * last_ack_sent. In all other states (the states in which a
3567 * RST is more likely), the more permissive check is used.
3568 * If we have multiple segments in flight, the intial reset
3569 * segment sequence numbers will be to the left of last_ack_sent,
3570 * but they will eventually catch up.
3571 * In any case, it never made sense to trim reset segments to
3572 * fit the receive window since RFC 1122 says:
3573 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
3574 *
3575 * A TCP SHOULD allow a received RST segment to include data.
3576 *
3577 * DISCUSSION
3578 * It has been suggested that a RST segment could contain
3579 * ASCII text that encoded and explained the cause of the
3580 * RST. No standard has yet been established for such
3581 * data.
3582 *
3583 * If the reset segment passes the sequence number test examine
3584 * the state:
3585 * SYN_RECEIVED STATE:
3586 * If passive open, return to LISTEN state.
3587 * If active open, inform user that connection was refused.
3588 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
3589 * Inform user that connection was reset, and close tcb.
3590 * CLOSING, LAST_ACK STATES:
3591 * Close the tcb.
3592 * TIME_WAIT STATE:
3593 * Drop the segment - see Stevens, vol. 2, p. 964 and
3594 * RFC 1337.
3595 *
3596 * Radar 4803931: Allows for the case where we ACKed the FIN but
3597 * there is already a RST in flight from the peer.
3598 * In that case, accept the RST for non-established
3599 * state if it's one off from last_ack_sent.
3600
3601 */
3602 if (thflags & TH_RST) {
3603 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
3604 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
3605 (tp->rcv_wnd == 0 &&
3606 ((tp->last_ack_sent == th->th_seq) ||
3607 ((tp->last_ack_sent -1) == th->th_seq)))) {
3608 switch (tp->t_state) {
3609
3610 case TCPS_SYN_RECEIVED:
3611 IF_TCP_STATINC(ifp, rstinsynrcv);
3612 so->so_error = ECONNREFUSED;
3613 goto close;
3614
3615 case TCPS_ESTABLISHED:
3616 if (tp->last_ack_sent != th->th_seq) {
3617 tcpstat.tcps_badrst++;
3618 goto drop;
3619 }
3620 if (TCP_ECN_ENABLED(tp) &&
3621 tp->snd_una == tp->iss + 1 &&
3622 SEQ_GT(tp->snd_max, tp->snd_una)) {
3623 /*
3624 * If the first data packet on an
3625 * ECN connection, receives a RST
3626 * increment the heuristic
3627 */
3628 tcp_heuristic_ecn_droprst(tp);
3629 }
3630 case TCPS_FIN_WAIT_1:
3631 case TCPS_CLOSE_WAIT:
3632 /*
3633 Drop through ...
3634 */
3635 case TCPS_FIN_WAIT_2:
3636 so->so_error = ECONNRESET;
3637 close:
3638 postevent(so, 0, EV_RESET);
3639 soevent(so,
3640 (SO_FILT_HINT_LOCKED |
3641 SO_FILT_HINT_CONNRESET));
3642
3643 tcpstat.tcps_drops++;
3644 tp = tcp_close(tp);
3645 break;
3646
3647 case TCPS_CLOSING:
3648 case TCPS_LAST_ACK:
3649 tp = tcp_close(tp);
3650 break;
3651
3652 case TCPS_TIME_WAIT:
3653 break;
3654 }
3655 }
3656 goto drop;
3657 }
3658
3659 /*
3660 * RFC 1323 PAWS: If we have a timestamp reply on this segment
3661 * and it's less than ts_recent, drop it.
3662 */
3663 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
3664 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
3665
3666 /* Check to see if ts_recent is over 24 days old. */
3667 if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE) {
3668 /*
3669 * Invalidate ts_recent. If this segment updates
3670 * ts_recent, the age will be reset later and ts_recent
3671 * will get a valid value. If it does not, setting
3672 * ts_recent to zero will at least satisfy the
3673 * requirement that zero be placed in the timestamp
3674 * echo reply when ts_recent isn't valid. The
3675 * age isn't reset until we get a valid ts_recent
3676 * because we don't want out-of-order segments to be
3677 * dropped when ts_recent is old.
3678 */
3679 tp->ts_recent = 0;
3680 } else {
3681 tcpstat.tcps_rcvduppack++;
3682 tcpstat.tcps_rcvdupbyte += tlen;
3683 tp->t_pawsdrop++;
3684 tcpstat.tcps_pawsdrop++;
3685
3686 /*
3687 * PAWS-drop when ECN is being used? That indicates
3688 * that ECT-marked packets take a different path, with
3689 * different congestion-characteristics.
3690 *
3691 * Only fallback when we did send less than 2GB as PAWS
3692 * really has no reason to kick in earlier.
3693 */
3694 if (TCP_ECN_ENABLED(tp) &&
3695 inp->inp_stat->rxbytes < 2147483648) {
3696 INP_INC_IFNET_STAT(inp, ecn_fallback_reorder);
3697 tcpstat.tcps_ecn_fallback_reorder++;
3698 tcp_heuristic_ecn_aggressive(tp);
3699 }
3700
3701 if (nstat_collect) {
3702 nstat_route_rx(tp->t_inpcb->inp_route.ro_rt,
3703 1, tlen, NSTAT_RX_FLAG_DUPLICATE);
3704 INP_ADD_STAT(inp, cell, wifi, wired,
3705 rxpackets, 1);
3706 INP_ADD_STAT(inp, cell, wifi, wired,
3707 rxbytes, tlen);
3708 tp->t_stat.rxduplicatebytes += tlen;
3709 inp_set_activity_bitmap(inp);
3710 }
3711 if (tlen > 0)
3712 goto dropafterack;
3713 goto drop;
3714 }
3715 }
3716
3717 /*
3718 * In the SYN-RECEIVED state, validate that the packet belongs to
3719 * this connection before trimming the data to fit the receive
3720 * window. Check the sequence number versus IRS since we know
3721 * the sequence numbers haven't wrapped. This is a partial fix
3722 * for the "LAND" DoS attack.
3723 */
3724 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
3725 rstreason = BANDLIM_RST_OPENPORT;
3726 IF_TCP_STATINC(ifp, dospacket);
3727 goto dropwithreset;
3728 }
3729
3730 todrop = tp->rcv_nxt - th->th_seq;
3731 if (todrop > 0) {
3732 if (thflags & TH_SYN) {
3733 thflags &= ~TH_SYN;
3734 th->th_seq++;
3735 if (th->th_urp > 1)
3736 th->th_urp--;
3737 else
3738 thflags &= ~TH_URG;
3739 todrop--;
3740 }
3741 /*
3742 * Following if statement from Stevens, vol. 2, p. 960.
3743 */
3744 if (todrop > tlen
3745 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
3746 /*
3747 * Any valid FIN must be to the left of the window.
3748 * At this point the FIN must be a duplicate or out
3749 * of sequence; drop it.
3750 */
3751 thflags &= ~TH_FIN;
3752
3753 /*
3754 * Send an ACK to resynchronize and drop any data.
3755 * But keep on processing for RST or ACK.
3756 */
3757 tp->t_flags |= TF_ACKNOW;
3758 if (todrop == 1) {
3759 /* This could be a keepalive */
3760 soevent(so, SO_FILT_HINT_LOCKED |
3761 SO_FILT_HINT_KEEPALIVE);
3762 }
3763 todrop = tlen;
3764 tcpstat.tcps_rcvduppack++;
3765 tcpstat.tcps_rcvdupbyte += todrop;
3766 } else {
3767 tcpstat.tcps_rcvpartduppack++;
3768 tcpstat.tcps_rcvpartdupbyte += todrop;
3769 }
3770
3771 if (TCP_DSACK_ENABLED(tp) && todrop > 1) {
3772 /*
3773 * Note the duplicate data sequence space so that
3774 * it can be reported in DSACK option.
3775 */
3776 tp->t_dsack_lseq = th->th_seq;
3777 tp->t_dsack_rseq = th->th_seq + todrop;
3778 tp->t_flags |= TF_ACKNOW;
3779 }
3780 if (nstat_collect) {
3781 nstat_route_rx(tp->t_inpcb->inp_route.ro_rt, 1,
3782 todrop, NSTAT_RX_FLAG_DUPLICATE);
3783 INP_ADD_STAT(inp, cell, wifi, wired, rxpackets, 1);
3784 INP_ADD_STAT(inp, cell, wifi, wired, rxbytes, todrop);
3785 tp->t_stat.rxduplicatebytes += todrop;
3786 inp_set_activity_bitmap(inp);
3787 }
3788 drop_hdrlen += todrop; /* drop from the top afterwards */
3789 th->th_seq += todrop;
3790 tlen -= todrop;
3791 if (th->th_urp > todrop)
3792 th->th_urp -= todrop;
3793 else {
3794 thflags &= ~TH_URG;
3795 th->th_urp = 0;
3796 }
3797 }
3798
3799 /*
3800 * If new data are received on a connection after the user
3801 * processes are gone, then RST the other end.
3802 * Send also a RST when we received a data segment after we've
3803 * sent our FIN when the socket is defunct.
3804 * Note that an MPTCP subflow socket would have SS_NOFDREF set
3805 * by default. So, if it's an MPTCP-subflow we rather check the
3806 * MPTCP-level's socket state for SS_NOFDREF.
3807 */
3808 if (tlen) {
3809 boolean_t close_it = FALSE;
3810
3811 if (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF) &&
3812 tp->t_state > TCPS_CLOSE_WAIT)
3813 close_it = TRUE;
3814
3815 if ((so->so_flags & SOF_MP_SUBFLOW) && (mptetoso(tptomptp(tp)->mpt_mpte)->so_state & SS_NOFDREF) &&
3816 tp->t_state > TCPS_CLOSE_WAIT)
3817 close_it = TRUE;
3818
3819 if ((so->so_flags & SOF_DEFUNCT) && tp->t_state > TCPS_FIN_WAIT_1)
3820 close_it = TRUE;
3821
3822 if (close_it) {
3823 tp = tcp_close(tp);
3824 tcpstat.tcps_rcvafterclose++;
3825 rstreason = BANDLIM_UNLIMITED;
3826 IF_TCP_STATINC(ifp, cleanup);
3827 goto dropwithreset;
3828 }
3829 }
3830
3831 /*
3832 * If segment ends after window, drop trailing data
3833 * (and PUSH and FIN); if nothing left, just ACK.
3834 */
3835 todrop = (th->th_seq+tlen) - (tp->rcv_nxt+tp->rcv_wnd);
3836 if (todrop > 0) {
3837 tcpstat.tcps_rcvpackafterwin++;
3838 if (todrop >= tlen) {
3839 tcpstat.tcps_rcvbyteafterwin += tlen;
3840 /*
3841 * If a new connection request is received
3842 * while in TIME_WAIT, drop the old connection
3843 * and start over if the sequence numbers
3844 * are above the previous ones.
3845 */
3846 if (thflags & TH_SYN &&
3847 tp->t_state == TCPS_TIME_WAIT &&
3848 SEQ_GT(th->th_seq, tp->rcv_nxt)) {
3849 iss = tcp_new_isn(tp);
3850 tp = tcp_close(tp);
3851 socket_unlock(so, 1);
3852 goto findpcb;
3853 }
3854 /*
3855 * If window is closed can only take segments at
3856 * window edge, and have to drop data and PUSH from
3857 * incoming segments. Continue processing, but
3858 * remember to ack. Otherwise, drop segment
3859 * and ack.
3860 */
3861 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
3862 tp->t_flags |= TF_ACKNOW;
3863 tcpstat.tcps_rcvwinprobe++;
3864 } else
3865 goto dropafterack;
3866 } else
3867 tcpstat.tcps_rcvbyteafterwin += todrop;
3868 m_adj(m, -todrop);
3869 tlen -= todrop;
3870 thflags &= ~(TH_PUSH|TH_FIN);
3871 }
3872
3873 /*
3874 * If last ACK falls within this segment's sequence numbers,
3875 * record its timestamp.
3876 * NOTE:
3877 * 1) That the test incorporates suggestions from the latest
3878 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
3879 * 2) That updating only on newer timestamps interferes with
3880 * our earlier PAWS tests, so this check should be solely
3881 * predicated on the sequence space of this segment.
3882 * 3) That we modify the segment boundary check to be
3883 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
3884 * instead of RFC1323's
3885 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
3886 * This modified check allows us to overcome RFC1323's
3887 * limitations as described in Stevens TCP/IP Illustrated
3888 * Vol. 2 p.869. In such cases, we can still calculate the
3889 * RTT correctly when RCV.NXT == Last.ACK.Sent.
3890 */
3891 if ((to.to_flags & TOF_TS) != 0 &&
3892 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
3893 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
3894 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
3895 tp->ts_recent_age = tcp_now;
3896 tp->ts_recent = to.to_tsval;
3897 }
3898
3899 /*
3900 * If a SYN is in the window, then this is an
3901 * error and we send an RST and drop the connection.
3902 */
3903 if (thflags & TH_SYN) {
3904 tp = tcp_drop(tp, ECONNRESET);
3905 rstreason = BANDLIM_UNLIMITED;
3906 postevent(so, 0, EV_RESET);
3907 IF_TCP_STATINC(ifp, synwindow);
3908 goto dropwithreset;
3909 }
3910
3911 /*
3912 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
3913 * flag is on (half-synchronized state), then queue data for
3914 * later processing; else drop segment and return.
3915 */
3916 if ((thflags & TH_ACK) == 0) {
3917 if (tp->t_state == TCPS_SYN_RECEIVED ||
3918 (tp->t_flags & TF_NEEDSYN)) {
3919 if ((tfo_enabled(tp))) {
3920 /*
3921 * So, we received a valid segment while in
3922 * SYN-RECEIVED (TF_NEEDSYN is actually never
3923 * set, so this is dead code).
3924 * As this cannot be an RST (see that if a bit
3925 * higher), and it does not have the ACK-flag
3926 * set, we want to retransmit the SYN/ACK.
3927 * Thus, we have to reset snd_nxt to snd_una to
3928 * trigger the going back to sending of the
3929 * SYN/ACK. This is more consistent with the
3930 * behavior of tcp_output(), which expects
3931 * to send the segment that is pointed to by
3932 * snd_nxt.
3933 */
3934 tp->snd_nxt = tp->snd_una;
3935
3936 /*
3937 * We need to make absolutely sure that we are
3938 * going to reply upon a duplicate SYN-segment.
3939 */
3940 if (th->th_flags & TH_SYN)
3941 needoutput = 1;
3942 }
3943
3944 goto step6;
3945 } else if (tp->t_flags & TF_ACKNOW)
3946 goto dropafterack;
3947 else
3948 goto drop;
3949 }
3950
3951 /*
3952 * Ack processing.
3953 */
3954
3955 switch (tp->t_state) {
3956
3957 /*
3958 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
3959 * ESTABLISHED state and continue processing.
3960 * The ACK was checked above.
3961 */
3962 case TCPS_SYN_RECEIVED:
3963
3964 tcpstat.tcps_connects++;
3965
3966 /* Do window scaling? */
3967 if (TCP_WINDOW_SCALE_ENABLED(tp)) {
3968 tp->snd_scale = tp->requested_s_scale;
3969 tp->rcv_scale = tp->request_r_scale;
3970 tp->snd_wnd = th->th_win << tp->snd_scale;
3971 tiwin = tp->snd_wnd;
3972 }
3973 /*
3974 * Make transitions:
3975 * SYN-RECEIVED -> ESTABLISHED
3976 * SYN-RECEIVED* -> FIN-WAIT-1
3977 */
3978 tp->t_starttime = tcp_now;
3979 tcp_sbrcv_tstmp_check(tp);
3980 if (tp->t_flags & TF_NEEDFIN) {
3981 DTRACE_TCP4(state__change, void, NULL,
3982 struct inpcb *, inp,
3983 struct tcpcb *, tp, int32_t, TCPS_FIN_WAIT_1);
3984 tp->t_state = TCPS_FIN_WAIT_1;
3985 tp->t_flags &= ~TF_NEEDFIN;
3986 } else {
3987 DTRACE_TCP4(state__change, void, NULL,
3988 struct inpcb *, inp,
3989 struct tcpcb *, tp, int32_t, TCPS_ESTABLISHED);
3990 tp->t_state = TCPS_ESTABLISHED;
3991 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
3992 TCP_CONN_KEEPIDLE(tp));
3993 if (nstat_collect)
3994 nstat_route_connect_success(
3995 tp->t_inpcb->inp_route.ro_rt);
3996 /*
3997 * The SYN is acknowledged but una is not updated
3998 * yet. So pass the value of ack to compute
3999 * sndbytes correctly
4000 */
4001 inp_count_sndbytes(inp, th->th_ack);
4002 }
4003 /*
4004 * If segment contains data or ACK, will call tcp_reass()
4005 * later; if not, do so now to pass queued data to user.
4006 */
4007 if (tlen == 0 && (thflags & TH_FIN) == 0)
4008 (void) tcp_reass(tp, (struct tcphdr *)0, &tlen,
4009 NULL, ifp);
4010 tp->snd_wl1 = th->th_seq - 1;
4011
4012#if MPTCP
4013 /*
4014 * Do not send the connect notification for additional subflows
4015 * until ACK for 3-way handshake arrives.
4016 */
4017 if ((!(tp->t_mpflags & TMPF_MPTCP_TRUE)) &&
4018 (tp->t_mpflags & TMPF_SENT_JOIN)) {
4019 isconnected = FALSE;
4020 } else
4021#endif /* MPTCP */
4022 isconnected = TRUE;
4023 if ((tp->t_tfo_flags & TFO_F_COOKIE_VALID)) {
4024 /* Done this when receiving the SYN */
4025 isconnected = FALSE;
4026
4027 OSDecrementAtomic(&tcp_tfo_halfcnt);
4028
4029 /* Panic if something has gone terribly wrong. */
4030 VERIFY(tcp_tfo_halfcnt >= 0);
4031
4032 tp->t_tfo_flags &= ~TFO_F_COOKIE_VALID;
4033 }
4034
4035 /*
4036 * In case there is data in the send-queue (e.g., TFO is being
4037 * used, or connectx+data has been done), then if we would
4038 * "FALLTHROUGH", we would handle this ACK as if data has been
4039 * acknowledged. But, we have to prevent this. And this
4040 * can be prevented by increasing snd_una by 1, so that the
4041 * SYN is not considered as data (snd_una++ is actually also
4042 * done in SYN_SENT-state as part of the regular TCP stack).
4043 *
4044 * In case there is data on this ack as well, the data will be
4045 * handled by the label "dodata" right after step6.
4046 */
4047 if (so->so_snd.sb_cc) {
4048 tp->snd_una++; /* SYN is acked */
4049 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
4050 tp->snd_nxt = tp->snd_una;
4051
4052 /*
4053 * No duplicate-ACK handling is needed. So, we
4054 * directly advance to processing the ACK (aka,
4055 * updating the RTT estimation,...)
4056 *
4057 * But, we first need to handle eventual SACKs,
4058 * because TFO will start sending data with the
4059 * SYN/ACK, so it might be that the client
4060 * includes a SACK with its ACK.
4061 */
4062 if (SACK_ENABLED(tp) &&
4063 (to.to_nsacks > 0 ||
4064 !TAILQ_EMPTY(&tp->snd_holes)))
4065 tcp_sack_doack(tp, &to, th,
4066 &sack_bytes_acked);
4067
4068 goto process_ACK;
4069 }
4070
4071 /* FALLTHROUGH */
4072
4073 /*
4074 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
4075 * ACKs. If the ack is in the range
4076 * tp->snd_una < th->th_ack <= tp->snd_max
4077 * then advance tp->snd_una to th->th_ack and drop
4078 * data from the retransmission queue. If this ACK reflects
4079 * more up to date window information we update our window information.
4080 */
4081 case TCPS_ESTABLISHED:
4082 case TCPS_FIN_WAIT_1:
4083 case TCPS_FIN_WAIT_2:
4084 case TCPS_CLOSE_WAIT:
4085 case TCPS_CLOSING:
4086 case TCPS_LAST_ACK:
4087 case TCPS_TIME_WAIT:
4088 if (SEQ_GT(th->th_ack, tp->snd_max)) {
4089 tcpstat.tcps_rcvacktoomuch++;
4090 goto dropafterack;
4091 }
4092 if (SACK_ENABLED(tp) && to.to_nsacks > 0) {
4093 recvd_dsack = tcp_sack_process_dsack(tp, &to, th);
4094 /*
4095 * If DSACK is received and this packet has no
4096 * other SACK information, it can be dropped.
4097 * We do not want to treat it as a duplicate ack.
4098 */
4099 if (recvd_dsack &&
4100 SEQ_LEQ(th->th_ack, tp->snd_una) &&
4101 to.to_nsacks == 0) {
4102 tcp_bad_rexmt_check(tp, th, &to);
4103 goto drop;
4104 }
4105 }
4106
4107 if (SACK_ENABLED(tp) &&
4108 (to.to_nsacks > 0 || !TAILQ_EMPTY(&tp->snd_holes)))
4109 tcp_sack_doack(tp, &to, th, &sack_bytes_acked);
4110
4111#if MPTCP
4112 if (tp->t_mpuna && SEQ_GEQ(th->th_ack, tp->t_mpuna)) {
4113 if (tp->t_mpflags & TMPF_PREESTABLISHED) {
4114 /* MP TCP establishment succeeded */
4115 tp->t_mpuna = 0;
4116 if (tp->t_mpflags & TMPF_JOINED_FLOW) {
4117 if (tp->t_mpflags & TMPF_SENT_JOIN) {
4118 tp->t_mpflags &=
4119 ~TMPF_PREESTABLISHED;
4120 tp->t_mpflags |=
4121 TMPF_MPTCP_TRUE;
4122 so->so_flags |= SOF_MPTCP_TRUE;
4123 mptcplog((LOG_DEBUG, "MPTCP "
4124 "Sockets: %s \n",__func__),
4125 MPTCP_SOCKET_DBG,
4126 MPTCP_LOGLVL_LOG);
4127
4128 tp->t_timer[TCPT_JACK_RXMT] = 0;
4129 tp->t_mprxtshift = 0;
4130 isconnected = TRUE;
4131 } else {
4132 isconnected = FALSE;
4133 }
4134 } else {
4135 isconnected = TRUE;
4136 }
4137 }
4138 }
4139#endif /* MPTCP */
4140
4141 tcp_tfo_rcv_ack(tp, th);
4142
4143 /*
4144 * If we have outstanding data (other than
4145 * a window probe), this is a completely
4146 * duplicate ack and the ack is the biggest we've seen.
4147 *
4148 * Need to accommodate a change in window on duplicate acks
4149 * to allow operating systems that update window during
4150 * recovery with SACK
4151 */
4152 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
4153 if (tlen == 0 && (tiwin == tp->snd_wnd ||
4154 (to.to_nsacks > 0 && sack_bytes_acked > 0))) {
4155 /*
4156 * If both ends send FIN at the same time,
4157 * then the ack will be a duplicate ack
4158 * but we have to process the FIN. Check
4159 * for this condition and process the FIN
4160 * instead of the dupack
4161 */
4162 if ((thflags & TH_FIN) &&
4163 !TCPS_HAVERCVDFIN(tp->t_state))
4164 break;
4165process_dupack:
4166#if MPTCP
4167 /*
4168 * MPTCP options that are ignored must
4169 * not be treated as duplicate ACKs.
4170 */
4171 if (to.to_flags & TOF_MPTCP) {
4172 goto drop;
4173 }
4174
4175 if ((isconnected) && (tp->t_mpflags & TMPF_JOINED_FLOW)) {
4176 mptcplog((LOG_DEBUG, "MPTCP "
4177 "Sockets: bypass ack recovery\n"),
4178 MPTCP_SOCKET_DBG,
4179 MPTCP_LOGLVL_VERBOSE);
4180 break;
4181 }
4182#endif /* MPTCP */
4183 /*
4184 * If a duplicate acknowledgement was seen
4185 * after ECN, it indicates packet loss in
4186 * addition to ECN. Reset INRECOVERY flag
4187 * so that we can process partial acks
4188 * correctly
4189 */
4190 if (tp->ecn_flags & TE_INRECOVERY)
4191 tp->ecn_flags &= ~TE_INRECOVERY;
4192
4193 tcpstat.tcps_rcvdupack++;
4194 ++tp->t_dupacks;
4195
4196 /*
4197 * Check if we need to reset the limit on
4198 * early retransmit
4199 */
4200 if (tp->t_early_rexmt_count > 0 &&
4201 TSTMP_GEQ(tcp_now,
4202 (tp->t_early_rexmt_win +
4203 TCP_EARLY_REXMT_WIN)))
4204 tp->t_early_rexmt_count = 0;
4205
4206 /*
4207 * Is early retransmit needed? We check for
4208 * this when the connection is waiting for
4209 * duplicate acks to enter fast recovery.
4210 */
4211 if (!IN_FASTRECOVERY(tp))
4212 tcp_early_rexmt_check(tp, th);
4213
4214 /*
4215 * If we've seen exactly rexmt threshold
4216 * of duplicate acks, assume a packet
4217 * has been dropped and retransmit it.
4218 * Kludge snd_nxt & the congestion
4219 * window so we send only this one
4220 * packet.
4221 *
4222 * We know we're losing at the current
4223 * window size so do congestion avoidance
4224 * (set ssthresh to half the current window
4225 * and pull our congestion window back to
4226 * the new ssthresh).
4227 *
4228 * Dup acks mean that packets have left the
4229 * network (they're now cached at the receiver)
4230 * so bump cwnd by the amount in the receiver
4231 * to keep a constant cwnd packets in the
4232 * network.
4233 */
4234 if (tp->t_timer[TCPT_REXMT] == 0 ||
4235 (th->th_ack != tp->snd_una
4236 && sack_bytes_acked == 0)) {
4237 tp->t_dupacks = 0;
4238 tp->t_rexmtthresh = tcprexmtthresh;
4239 } else if (tp->t_dupacks > tp->t_rexmtthresh ||
4240 IN_FASTRECOVERY(tp)) {
4241
4242 /*
4243 * If this connection was seeing packet
4244 * reordering, then recovery might be
4245 * delayed to disambiguate between
4246 * reordering and loss
4247 */
4248 if (SACK_ENABLED(tp) && !IN_FASTRECOVERY(tp) &&
4249 (tp->t_flagsext &
4250 (TF_PKTS_REORDERED|TF_DELAY_RECOVERY)) ==
4251 (TF_PKTS_REORDERED|TF_DELAY_RECOVERY)) {
4252 /*
4253 * Since the SACK information is already
4254 * updated, this ACK will be dropped
4255 */
4256 break;
4257 }
4258
4259 if (SACK_ENABLED(tp)
4260 && IN_FASTRECOVERY(tp)) {
4261 int awnd;
4262
4263 /*
4264 * Compute the amount of data in flight first.
4265 * We can inject new data into the pipe iff
4266 * we have less than 1/2 the original window's
4267 * worth of data in flight.
4268 */
4269 awnd = (tp->snd_nxt - tp->snd_fack) +
4270 tp->sackhint.sack_bytes_rexmit;
4271 if (awnd < tp->snd_ssthresh) {
4272 tp->snd_cwnd += tp->t_maxseg;
4273 if (tp->snd_cwnd > tp->snd_ssthresh)
4274 tp->snd_cwnd = tp->snd_ssthresh;
4275 }
4276 } else {
4277 tp->snd_cwnd += tp->t_maxseg;
4278 }
4279
4280 /* Process any window updates */
4281 if (tiwin > tp->snd_wnd)
4282 tcp_update_window(tp, thflags,
4283 th, tiwin, tlen);
4284 tcp_ccdbg_trace(tp, th,
4285 TCP_CC_IN_FASTRECOVERY);
4286
4287 (void) tcp_output(tp);
4288
4289 goto drop;
4290 } else if (tp->t_dupacks == tp->t_rexmtthresh) {
4291 tcp_seq onxt = tp->snd_nxt;
4292
4293 /*
4294 * If we're doing sack, check to
4295 * see if we're already in sack
4296 * recovery. If we're not doing sack,
4297 * check to see if we're in newreno
4298 * recovery.
4299 */
4300 if (SACK_ENABLED(tp)) {
4301 if (IN_FASTRECOVERY(tp)) {
4302 tp->t_dupacks = 0;
4303 break;
4304 } else if (tp->t_flagsext & TF_DELAY_RECOVERY) {
4305 break;
4306 }
4307 } else {
4308 if (SEQ_LEQ(th->th_ack,
4309 tp->snd_recover)) {
4310 tp->t_dupacks = 0;
4311 break;
4312 }
4313 }
4314 if (tp->t_flags & TF_SENTFIN)
4315 tp->snd_recover = tp->snd_max - 1;
4316 else
4317 tp->snd_recover = tp->snd_max;
4318 tp->t_timer[TCPT_PTO] = 0;
4319 tp->t_rtttime = 0;
4320
4321 /*
4322 * If the connection has seen pkt
4323 * reordering, delay recovery until
4324 * it is clear that the packet
4325 * was lost.
4326 */
4327 if (SACK_ENABLED(tp) &&
4328 (tp->t_flagsext &
4329 (TF_PKTS_REORDERED|TF_DELAY_RECOVERY))
4330 == TF_PKTS_REORDERED &&
4331 !IN_FASTRECOVERY(tp) &&
4332 tp->t_reorderwin > 0 &&
4333 (tp->t_state == TCPS_ESTABLISHED ||
4334 tp->t_state == TCPS_FIN_WAIT_1)) {
4335 tp->t_timer[TCPT_DELAYFR] =
4336 OFFSET_FROM_START(tp,
4337 tp->t_reorderwin);
4338 tp->t_flagsext |= TF_DELAY_RECOVERY;
4339 tcpstat.tcps_delay_recovery++;
4340 tcp_ccdbg_trace(tp, th,
4341 TCP_CC_DELAY_FASTRECOVERY);
4342 break;
4343 }
4344
4345 tcp_rexmt_save_state(tp);
4346 /*
4347 * If the current tcp cc module has
4348 * defined a hook for tasks to run
4349 * before entering FR, call it
4350 */
4351 if (CC_ALGO(tp)->pre_fr != NULL)
4352 CC_ALGO(tp)->pre_fr(tp);
4353 ENTER_FASTRECOVERY(tp);
4354 tp->t_timer[TCPT_REXMT] = 0;
4355 if (TCP_ECN_ENABLED(tp))
4356 tp->ecn_flags |= TE_SENDCWR;
4357
4358 if (SACK_ENABLED(tp)) {
4359 tcpstat.tcps_sack_recovery_episode++;
4360 tp->t_sack_recovery_episode++;
4361 tp->sack_newdata = tp->snd_nxt;
4362 tp->snd_cwnd = tp->t_maxseg;
4363 tp->t_flagsext &=
4364 ~TF_CWND_NONVALIDATED;
4365
4366 /* Process any window updates */
4367 if (tiwin > tp->snd_wnd)
4368 tcp_update_window(
4369 tp, thflags,
4370 th, tiwin, tlen);
4371
4372 tcp_ccdbg_trace(tp, th,
4373 TCP_CC_ENTER_FASTRECOVERY);
4374 (void) tcp_output(tp);
4375 goto drop;
4376 }
4377 tp->snd_nxt = th->th_ack;
4378 tp->snd_cwnd = tp->t_maxseg;
4379
4380 /* Process any window updates */
4381 if (tiwin > tp->snd_wnd)
4382 tcp_update_window(tp,
4383 thflags,
4384 th, tiwin, tlen);
4385
4386 (void) tcp_output(tp);
4387 if (tp->t_flagsext & TF_CWND_NONVALIDATED) {
4388 tcp_cc_adjust_nonvalidated_cwnd(tp);
4389 } else {
4390 tp->snd_cwnd = tp->snd_ssthresh +
4391 tp->t_maxseg * tp->t_dupacks;
4392 }
4393 if (SEQ_GT(onxt, tp->snd_nxt))
4394 tp->snd_nxt = onxt;
4395
4396 tcp_ccdbg_trace(tp, th,
4397 TCP_CC_ENTER_FASTRECOVERY);
4398 goto drop;
4399 } else if (limited_txmt &&
4400 ALLOW_LIMITED_TRANSMIT(tp) &&
4401 (!(SACK_ENABLED(tp)) || sack_bytes_acked > 0) &&
4402 (so->so_snd.sb_cc - (tp->snd_max - tp->snd_una)) > 0) {
4403 u_int32_t incr = (tp->t_maxseg * tp->t_dupacks);
4404
4405 /* Use Limited Transmit algorithm on the first two
4406 * duplicate acks when there is new data to transmit
4407 */
4408 tp->snd_cwnd += incr;
4409 tcpstat.tcps_limited_txt++;
4410 (void) tcp_output(tp);
4411
4412 tcp_ccdbg_trace(tp, th, TCP_CC_LIMITED_TRANSMIT);
4413
4414 /* Reset snd_cwnd back to normal */
4415 tp->snd_cwnd -= incr;
4416 }
4417 }
4418 break;
4419 }
4420 /*
4421 * If the congestion window was inflated to account
4422 * for the other side's cached packets, retract it.
4423 */
4424 if (IN_FASTRECOVERY(tp)) {
4425 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
4426 /*
4427 * If we received an ECE and entered
4428 * recovery, the subsequent ACKs should
4429 * not be treated as partial acks.
4430 */
4431 if (tp->ecn_flags & TE_INRECOVERY)
4432 goto process_ACK;
4433
4434 if (SACK_ENABLED(tp))
4435 tcp_sack_partialack(tp, th);
4436 else
4437 tcp_newreno_partial_ack(tp, th);
4438 tcp_ccdbg_trace(tp, th, TCP_CC_PARTIAL_ACK);
4439 } else {
4440 EXIT_FASTRECOVERY(tp);
4441 if (CC_ALGO(tp)->post_fr != NULL)
4442 CC_ALGO(tp)->post_fr(tp, th);
4443 tp->t_pipeack = 0;
4444 tcp_clear_pipeack_state(tp);
4445 tcp_ccdbg_trace(tp, th,
4446 TCP_CC_EXIT_FASTRECOVERY);
4447 }
4448 } else if ((tp->t_flagsext &
4449 (TF_PKTS_REORDERED|TF_DELAY_RECOVERY))
4450 == (TF_PKTS_REORDERED|TF_DELAY_RECOVERY)) {
4451 /*
4452 * If the ack acknowledges upto snd_recover or if
4453 * it acknowledges all the snd holes, exit
4454 * recovery and cancel the timer. Otherwise,
4455 * this is a partial ack. Wait for recovery timer
4456 * to enter recovery. The snd_holes have already
4457 * been updated.
4458 */
4459 if (SEQ_GEQ(th->th_ack, tp->snd_recover) ||
4460 TAILQ_EMPTY(&tp->snd_holes)) {
4461 tp->t_timer[TCPT_DELAYFR] = 0;
4462 tp->t_flagsext &= ~TF_DELAY_RECOVERY;
4463 EXIT_FASTRECOVERY(tp);
4464 tcp_ccdbg_trace(tp, th,
4465 TCP_CC_EXIT_FASTRECOVERY);
4466 }
4467 } else {
4468 /*
4469 * We were not in fast recovery. Reset the
4470 * duplicate ack counter.
4471 */
4472 tp->t_dupacks = 0;
4473 tp->t_rexmtthresh = tcprexmtthresh;
4474 }
4475
4476
4477 /*
4478 * If we reach this point, ACK is not a duplicate,
4479 * i.e., it ACKs something we sent.
4480 */
4481 if (tp->t_flags & TF_NEEDSYN) {
4482 /*
4483 * T/TCP: Connection was half-synchronized, and our
4484 * SYN has been ACK'd (so connection is now fully
4485 * synchronized). Go to non-starred state,
4486 * increment snd_una for ACK of SYN, and check if
4487 * we can do window scaling.
4488 */
4489 tp->t_flags &= ~TF_NEEDSYN;
4490 tp->snd_una++;
4491 /* Do window scaling? */
4492 if (TCP_WINDOW_SCALE_ENABLED(tp)) {
4493 tp->snd_scale = tp->requested_s_scale;
4494 tp->rcv_scale = tp->request_r_scale;
4495 }
4496 }
4497
4498process_ACK:
4499 VERIFY(SEQ_GEQ(th->th_ack, tp->snd_una));
4500 acked = BYTES_ACKED(th, tp);
4501 tcpstat.tcps_rcvackpack++;
4502 tcpstat.tcps_rcvackbyte += acked;
4503
4504 /*
4505 * If the last packet was a retransmit, make sure
4506 * it was not spurious.
4507 *
4508 * This will also take care of congestion window
4509 * adjustment if a last packet was recovered due to a
4510 * tail loss probe.
4511 */
4512 tcp_bad_rexmt_check(tp, th, &to);
4513
4514 /* Recalculate the RTT */
4515 tcp_compute_rtt(tp, &to, th);
4516
4517 /*
4518 * If all outstanding data is acked, stop retransmit
4519 * timer and remember to restart (more output or persist).
4520 * If there is more data to be acked, restart retransmit
4521 * timer, using current (possibly backed-off) value.
4522 */
4523 TCP_RESET_REXMT_STATE(tp);
4524 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
4525 tp->t_rttmin, TCPTV_REXMTMAX,
4526 TCP_ADD_REXMTSLOP(tp));
4527 if (th->th_ack == tp->snd_max) {
4528 tp->t_timer[TCPT_REXMT] = 0;
4529 tp->t_timer[TCPT_PTO] = 0;
4530 needoutput = 1;
4531 } else if (tp->t_timer[TCPT_PERSIST] == 0)
4532 tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp,
4533 tp->t_rxtcur);
4534
4535 /*
4536 * If no data (only SYN) was ACK'd, skip rest of ACK
4537 * processing.
4538 */
4539 if (acked == 0)
4540 goto step6;
4541
4542 /*
4543 * When outgoing data has been acked (except the SYN+data), we
4544 * mark this connection as "sending good" for TFO.
4545 */
4546 if ((tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
4547 !(tp->t_tfo_flags & TFO_F_NO_SNDPROBING) &&
4548 !(th->th_flags & TH_SYN))
4549 tp->t_tfo_flags |= TFO_F_NO_SNDPROBING;
4550
4551 /*
4552 * If TH_ECE is received, make sure that ECN is enabled
4553 * on that connection and we have sent ECT on data packets.
4554 */
4555 if ((thflags & TH_ECE) != 0 && TCP_ECN_ENABLED(tp) &&
4556 (tp->ecn_flags & TE_SENDIPECT)) {
4557 /*
4558 * Reduce the congestion window if we haven't
4559 * done so.
4560 */
4561 if (!IN_FASTRECOVERY(tp)) {
4562 tcp_reduce_congestion_window(tp);
4563 tp->ecn_flags |= (TE_INRECOVERY|TE_SENDCWR);
4564 /*
4565 * Also note that the connection received
4566 * ECE atleast once
4567 */
4568 tp->ecn_flags |= TE_RECV_ECN_ECE;
4569 INP_INC_IFNET_STAT(inp, ecn_recv_ece);
4570 tcpstat.tcps_ecn_recv_ece++;
4571 tcp_ccdbg_trace(tp, th, TCP_CC_ECN_RCVD);
4572 }
4573 }
4574
4575 /*
4576 * When new data is acked, open the congestion window.
4577 * The specifics of how this is achieved are up to the
4578 * congestion control algorithm in use for this connection.
4579 *
4580 * The calculations in this function assume that snd_una is
4581 * not updated yet.
4582 */
4583 if (!IN_FASTRECOVERY(tp)) {
4584 if (CC_ALGO(tp)->ack_rcvd != NULL)
4585 CC_ALGO(tp)->ack_rcvd(tp, th);
4586 tcp_ccdbg_trace(tp, th, TCP_CC_ACK_RCVD);
4587 }
4588 if (acked > so->so_snd.sb_cc) {
4589 tp->snd_wnd -= so->so_snd.sb_cc;
4590 sbdrop(&so->so_snd, (int)so->so_snd.sb_cc);
4591 if (so->so_flags & SOF_ENABLE_MSGS) {
4592 so->so_msg_state->msg_serial_bytes -=
4593 (int)so->so_snd.sb_cc;
4594 }
4595 ourfinisacked = 1;
4596 } else {
4597 sbdrop(&so->so_snd, acked);
4598 if (so->so_flags & SOF_ENABLE_MSGS) {
4599 so->so_msg_state->msg_serial_bytes -=
4600 acked;
4601 }
4602 tcp_sbsnd_trim(&so->so_snd);
4603 tp->snd_wnd -= acked;
4604 ourfinisacked = 0;
4605 }
4606 /* detect una wraparound */
4607 if ( !IN_FASTRECOVERY(tp) &&
4608 SEQ_GT(tp->snd_una, tp->snd_recover) &&
4609 SEQ_LEQ(th->th_ack, tp->snd_recover))
4610 tp->snd_recover = th->th_ack - 1;
4611
4612 if (IN_FASTRECOVERY(tp) &&
4613 SEQ_GEQ(th->th_ack, tp->snd_recover))
4614 EXIT_FASTRECOVERY(tp);
4615
4616 tp->snd_una = th->th_ack;
4617
4618 if (SACK_ENABLED(tp)) {
4619 if (SEQ_GT(tp->snd_una, tp->snd_recover))
4620 tp->snd_recover = tp->snd_una;
4621 }
4622 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
4623 tp->snd_nxt = tp->snd_una;
4624 if (!SLIST_EMPTY(&tp->t_rxt_segments) &&
4625 !TCP_DSACK_SEQ_IN_WINDOW(tp, tp->t_dsack_lastuna,
4626 tp->snd_una))
4627 tcp_rxtseg_clean(tp);
4628 if ((tp->t_flagsext & TF_MEASURESNDBW) != 0 &&
4629 tp->t_bwmeas != NULL)
4630 tcp_bwmeas_check(tp);
4631
4632 /*
4633 * sowwakeup must happen after snd_una, et al. are
4634 * updated so that the sequence numbers are in sync with
4635 * so_snd
4636 */
4637 sowwakeup(so);
4638
4639 if (!SLIST_EMPTY(&tp->t_notify_ack))
4640 tcp_notify_acknowledgement(tp, so);
4641
4642 switch (tp->t_state) {
4643
4644 /*
4645 * In FIN_WAIT_1 STATE in addition to the processing
4646 * for the ESTABLISHED state if our FIN is now acknowledged
4647 * then enter FIN_WAIT_2.
4648 */
4649 case TCPS_FIN_WAIT_1:
4650 if (ourfinisacked) {
4651 /*
4652 * If we can't receive any more
4653 * data, then closing user can proceed.
4654 * Starting the TCPT_2MSL timer is contrary to the
4655 * specification, but if we don't get a FIN
4656 * we'll hang forever.
4657 */
4658 if (so->so_state & SS_CANTRCVMORE) {
4659 tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp,
4660 TCP_CONN_MAXIDLE(tp));
4661 isconnected = FALSE;
4662 isdisconnected = TRUE;
4663 }
4664 DTRACE_TCP4(state__change, void, NULL,
4665 struct inpcb *, inp,
4666 struct tcpcb *, tp,
4667 int32_t, TCPS_FIN_WAIT_2);
4668 tp->t_state = TCPS_FIN_WAIT_2;
4669 /* fall through and make sure we also recognize
4670 * data ACKed with the FIN
4671 */
4672 }
4673 break;
4674
4675 /*
4676 * In CLOSING STATE in addition to the processing for
4677 * the ESTABLISHED state if the ACK acknowledges our FIN
4678 * then enter the TIME-WAIT state, otherwise ignore
4679 * the segment.
4680 */
4681 case TCPS_CLOSING:
4682 if (ourfinisacked) {
4683 DTRACE_TCP4(state__change, void, NULL,
4684 struct inpcb *, inp,
4685 struct tcpcb *, tp,
4686 int32_t, TCPS_TIME_WAIT);
4687 tp->t_state = TCPS_TIME_WAIT;
4688 tcp_canceltimers(tp);
4689 if (tp->t_flagsext & TF_NOTIMEWAIT) {
4690 tp->t_flags |= TF_CLOSING;
4691 } else {
4692 add_to_time_wait(tp, 2 * tcp_msl);
4693 }
4694 isconnected = FALSE;
4695 isdisconnected = TRUE;
4696 }
4697 break;
4698
4699 /*
4700 * In LAST_ACK, we may still be waiting for data to drain
4701 * and/or to be acked, as well as for the ack of our FIN.
4702 * If our FIN is now acknowledged, delete the TCB,
4703 * enter the closed state and return.
4704 */
4705 case TCPS_LAST_ACK:
4706 if (ourfinisacked) {
4707 tp = tcp_close(tp);
4708 goto drop;
4709 }
4710 break;
4711
4712 /*
4713 * In TIME_WAIT state the only thing that should arrive
4714 * is a retransmission of the remote FIN. Acknowledge
4715 * it and restart the finack timer.
4716 */
4717 case TCPS_TIME_WAIT:
4718 add_to_time_wait(tp, 2 * tcp_msl);
4719 goto dropafterack;
4720 }
4721
4722 /*
4723 * If there is a SACK option on the ACK and we
4724 * haven't seen any duplicate acks before, count
4725 * it as a duplicate ack even if the cumulative
4726 * ack is advanced. If the receiver delayed an
4727 * ack and detected loss afterwards, then the ack
4728 * will advance cumulative ack and will also have
4729 * a SACK option. So counting it as one duplicate
4730 * ack is ok.
4731 */
4732 if (sack_ackadv == 1 &&
4733 tp->t_state == TCPS_ESTABLISHED &&
4734 SACK_ENABLED(tp) && sack_bytes_acked > 0 &&
4735 to.to_nsacks > 0 && tp->t_dupacks == 0 &&
4736 SEQ_LEQ(th->th_ack, tp->snd_una) && tlen == 0 &&
4737 !(tp->t_flagsext & TF_PKTS_REORDERED)) {
4738 tcpstat.tcps_sack_ackadv++;
4739 goto process_dupack;
4740 }
4741 }
4742
4743step6:
4744 /*
4745 * Update window information.
4746 */
4747 if (tcp_update_window(tp, thflags, th, tiwin, tlen))
4748 needoutput = 1;
4749
4750 /*
4751 * Process segments with URG.
4752 */
4753 if ((thflags & TH_URG) && th->th_urp &&
4754 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
4755 /*
4756 * This is a kludge, but if we receive and accept
4757 * random urgent pointers, we'll crash in
4758 * soreceive. It's hard to imagine someone
4759 * actually wanting to send this much urgent data.
4760 */
4761 if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
4762 th->th_urp = 0; /* XXX */
4763 thflags &= ~TH_URG; /* XXX */
4764 goto dodata; /* XXX */
4765 }
4766 /*
4767 * If this segment advances the known urgent pointer,
4768 * then mark the data stream. This should not happen
4769 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
4770 * a FIN has been received from the remote side.
4771 * In these states we ignore the URG.
4772 *
4773 * According to RFC961 (Assigned Protocols),
4774 * the urgent pointer points to the last octet
4775 * of urgent data. We continue, however,
4776 * to consider it to indicate the first octet
4777 * of data past the urgent section as the original
4778 * spec states (in one of two places).
4779 */
4780 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
4781 tp->rcv_up = th->th_seq + th->th_urp;
4782 so->so_oobmark = so->so_rcv.sb_cc +
4783 (tp->rcv_up - tp->rcv_nxt) - 1;
4784 if (so->so_oobmark == 0) {
4785 so->so_state |= SS_RCVATMARK;
4786 postevent(so, 0, EV_OOB);
4787 }
4788 sohasoutofband(so);
4789 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
4790 }
4791 /*
4792 * Remove out of band data so doesn't get presented to user.
4793 * This can happen independent of advancing the URG pointer,
4794 * but if two URG's are pending at once, some out-of-band
4795 * data may creep in... ick.
4796 */
4797 if (th->th_urp <= (u_int32_t)tlen
4798#if SO_OOBINLINE
4799 && (so->so_options & SO_OOBINLINE) == 0
4800#endif
4801 )
4802 tcp_pulloutofband(so, th, m,
4803 drop_hdrlen); /* hdr drop is delayed */
4804 } else {
4805 /*
4806 * If no out of band data is expected,
4807 * pull receive urgent pointer along
4808 * with the receive window.
4809 */
4810 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
4811 tp->rcv_up = tp->rcv_nxt;
4812 }
4813dodata:
4814
4815 /* Set socket's connect or disconnect state correcly before doing data.
4816 * The following might unlock the socket if there is an upcall or a socket
4817 * filter.
4818 */
4819 if (isconnected) {
4820 soisconnected(so);
4821 } else if (isdisconnected) {
4822 soisdisconnected(so);
4823 }
4824
4825 /* Let's check the state of pcb just to make sure that it did not get closed
4826 * when we unlocked above
4827 */
4828 if (inp->inp_state == INPCB_STATE_DEAD) {
4829 /* Just drop the packet that we are processing and return */
4830 goto drop;
4831 }
4832
4833 /*
4834 * Process the segment text, merging it into the TCP sequencing queue,
4835 * and arranging for acknowledgment of receipt if necessary.
4836 * This process logically involves adjusting tp->rcv_wnd as data
4837 * is presented to the user (this happens in tcp_usrreq.c,
4838 * case PRU_RCVD). If a FIN has already been received on this
4839 * connection then we just ignore the text.
4840 *
4841 * If we are in SYN-received state and got a valid TFO cookie, we want
4842 * to process the data.
4843 */
4844 if ((tlen || (thflags & TH_FIN)) &&
4845 TCPS_HAVERCVDFIN(tp->t_state) == 0 &&
4846 (TCPS_HAVEESTABLISHED(tp->t_state) ||
4847 (tp->t_state == TCPS_SYN_RECEIVED &&
4848 (tp->t_tfo_flags & TFO_F_COOKIE_VALID)))) {
4849 tcp_seq save_start = th->th_seq;
4850 tcp_seq save_end = th->th_seq + tlen;
4851 m_adj(m, drop_hdrlen); /* delayed header drop */
4852 /*
4853 * Insert segment which includes th into TCP reassembly queue
4854 * with control block tp. Set thflags to whether reassembly now
4855 * includes a segment with FIN. This handles the common case
4856 * inline (segment is the next to be received on an established
4857 * connection, and the queue is empty), avoiding linkage into
4858 * and removal from the queue and repetition of various
4859 * conversions.
4860 * Set DELACK for segments received in order, but ack
4861 * immediately when segments are out of order (so
4862 * fast retransmit can work).
4863 */
4864 if (th->th_seq == tp->rcv_nxt && LIST_EMPTY(&tp->t_segq)) {
4865 TCP_INC_VAR(tp->t_unacksegs, nlropkts);
4866 /*
4867 * Calculate the RTT on the receiver only if the
4868 * connection is in streaming mode and the last
4869 * packet was not an end-of-write
4870 */
4871 if (tp->t_flags & TF_STREAMING_ON)
4872 tcp_compute_rtt(tp, &to, th);
4873
4874 if (DELAY_ACK(tp, th) &&
4875 ((tp->t_flags & TF_ACKNOW) == 0) ) {
4876 if ((tp->t_flags & TF_DELACK) == 0) {
4877 tp->t_flags |= TF_DELACK;
4878 tp->t_timer[TCPT_DELACK] =
4879 OFFSET_FROM_START(tp, tcp_delack);
4880 }
4881 }
4882 else {
4883 tp->t_flags |= TF_ACKNOW;
4884 }
4885 tp->rcv_nxt += tlen;
4886 thflags = th->th_flags & TH_FIN;
4887 TCP_INC_VAR(tcpstat.tcps_rcvpack, nlropkts);
4888 tcpstat.tcps_rcvbyte += tlen;
4889 if (nstat_collect) {
4890 if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_PKT) {
4891 INP_ADD_STAT(inp, cell, wifi, wired,
4892 rxpackets, m->m_pkthdr.lro_npkts);
4893 } else {
4894 INP_ADD_STAT(inp, cell, wifi, wired,
4895 rxpackets, 1);
4896 }
4897 INP_ADD_STAT(inp, cell, wifi, wired,
4898 rxbytes, tlen);
4899 inp_set_activity_bitmap(inp);
4900 }
4901 tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen,
4902 TCP_AUTORCVBUF_MAX(ifp));
4903 so_recv_data_stat(so, m, drop_hdrlen);
4904
4905 if (sbappendstream_rcvdemux(so, m,
4906 th->th_seq - (tp->irs + 1), 0)) {
4907 sorwakeup(so);
4908 }
4909 } else {
4910 thflags = tcp_reass(tp, th, &tlen, m, ifp);
4911 tp->t_flags |= TF_ACKNOW;
4912 }
4913
4914 if ((tlen > 0 || (th->th_flags & TH_FIN)) && SACK_ENABLED(tp)) {
4915 if (th->th_flags & TH_FIN)
4916 save_end++;
4917 tcp_update_sack_list(tp, save_start, save_end);
4918 }
4919
4920 tcp_adaptive_rwtimo_check(tp, tlen);
4921
4922 if (tlen > 0)
4923 tcp_tfo_rcv_data(tp);
4924
4925 if (tp->t_flags & TF_DELACK)
4926 {
4927#if INET6
4928 if (isipv6) {
4929 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
4930 (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])),
4931 th->th_seq, th->th_ack, th->th_win);
4932 }
4933 else
4934#endif
4935 {
4936 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
4937 (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)),
4938 th->th_seq, th->th_ack, th->th_win);
4939 }
4940
4941 }
4942 } else {
4943 m_freem(m);
4944 thflags &= ~TH_FIN;
4945 }
4946
4947 /*
4948 * If FIN is received ACK the FIN and let the user know
4949 * that the connection is closing.
4950 */
4951 if (thflags & TH_FIN) {
4952 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
4953 socantrcvmore(so);
4954 postevent(so, 0, EV_FIN);
4955 /*
4956 * If connection is half-synchronized
4957 * (ie NEEDSYN flag on) then delay ACK,
4958 * so it may be piggybacked when SYN is sent.
4959 * Otherwise, since we received a FIN then no
4960 * more input can be expected, send ACK now.
4961 */
4962 TCP_INC_VAR(tp->t_unacksegs, nlropkts);
4963 if (DELAY_ACK(tp, th) && (tp->t_flags & TF_NEEDSYN)) {
4964 if ((tp->t_flags & TF_DELACK) == 0) {
4965 tp->t_flags |= TF_DELACK;
4966 tp->t_timer[TCPT_DELACK] = OFFSET_FROM_START(tp, tcp_delack);
4967 }
4968 } else {
4969 tp->t_flags |= TF_ACKNOW;
4970 }
4971 tp->rcv_nxt++;
4972 }
4973 switch (tp->t_state) {
4974
4975 /*
4976 * In SYN_RECEIVED and ESTABLISHED STATES
4977 * enter the CLOSE_WAIT state.
4978 */
4979 case TCPS_SYN_RECEIVED:
4980 tp->t_starttime = tcp_now;
4981 case TCPS_ESTABLISHED:
4982 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
4983 struct tcpcb *, tp, int32_t, TCPS_CLOSE_WAIT);
4984 tp->t_state = TCPS_CLOSE_WAIT;
4985 break;
4986
4987 /*
4988 * If still in FIN_WAIT_1 STATE FIN has not been acked so
4989 * enter the CLOSING state.
4990 */
4991 case TCPS_FIN_WAIT_1:
4992 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
4993 struct tcpcb *, tp, int32_t, TCPS_CLOSING);
4994 tp->t_state = TCPS_CLOSING;
4995 break;
4996
4997 /*
4998 * In FIN_WAIT_2 state enter the TIME_WAIT state,
4999 * starting the time-wait timer, turning off the other
5000 * standard timers.
5001 */
5002 case TCPS_FIN_WAIT_2:
5003 DTRACE_TCP4(state__change, void, NULL,
5004 struct inpcb *, inp,
5005 struct tcpcb *, tp,
5006 int32_t, TCPS_TIME_WAIT);
5007 tp->t_state = TCPS_TIME_WAIT;
5008 tcp_canceltimers(tp);
5009 tp->t_flags |= TF_ACKNOW;
5010 if (tp->t_flagsext & TF_NOTIMEWAIT) {
5011 tp->t_flags |= TF_CLOSING;
5012 } else {
5013 add_to_time_wait(tp, 2 * tcp_msl);
5014 }
5015 soisdisconnected(so);
5016 break;
5017
5018 /*
5019 * In TIME_WAIT state restart the 2 MSL time_wait timer.
5020 */
5021 case TCPS_TIME_WAIT:
5022 add_to_time_wait(tp, 2 * tcp_msl);
5023 break;
5024 }
5025 }
5026#if TCPDEBUG
5027 if (so->so_options & SO_DEBUG)
5028 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
5029 &tcp_savetcp, 0);
5030#endif
5031
5032 /*
5033 * Return any desired output.
5034 */
5035 if (needoutput || (tp->t_flags & TF_ACKNOW)) {
5036 (void) tcp_output(tp);
5037 }
5038
5039 tcp_check_timer_state(tp);
5040
5041
5042 socket_unlock(so, 1);
5043 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0);
5044 return;
5045
5046dropafterack:
5047 /*
5048 * Generate an ACK dropping incoming segment if it occupies
5049 * sequence space, where the ACK reflects our state.
5050 *
5051 * We can now skip the test for the RST flag since all
5052 * paths to this code happen after packets containing
5053 * RST have been dropped.
5054 *
5055 * In the SYN-RECEIVED state, don't send an ACK unless the
5056 * segment we received passes the SYN-RECEIVED ACK test.
5057 * If it fails send a RST. This breaks the loop in the
5058 * "LAND" DoS attack, and also prevents an ACK storm
5059 * between two listening ports that have been sent forged
5060 * SYN segments, each with the source address of the other.
5061 */
5062 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
5063 (SEQ_GT(tp->snd_una, th->th_ack) ||
5064 SEQ_GT(th->th_ack, tp->snd_max)) ) {
5065 rstreason = BANDLIM_RST_OPENPORT;
5066 IF_TCP_STATINC(ifp, dospacket);
5067 goto dropwithreset;
5068 }
5069#if TCPDEBUG
5070 if (so->so_options & SO_DEBUG)
5071 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
5072 &tcp_savetcp, 0);
5073#endif
5074 m_freem(m);
5075 tp->t_flags |= TF_ACKNOW;
5076 (void) tcp_output(tp);
5077
5078 /* Don't need to check timer state as we should have done it during tcp_output */
5079 socket_unlock(so, 1);
5080 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0);
5081 return;
5082dropwithresetnosock:
5083 nosock = 1;
5084dropwithreset:
5085 /*
5086 * Generate a RST, dropping incoming segment.
5087 * Make ACK acceptable to originator of segment.
5088 * Don't bother to respond if destination was broadcast/multicast.
5089 */
5090 if ((thflags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
5091 goto drop;
5092#if INET6
5093 if (isipv6) {
5094 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
5095 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
5096 goto drop;
5097 } else
5098#endif /* INET6 */
5099 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
5100 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
5101 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
5102 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
5103 goto drop;
5104 /* IPv6 anycast check is done at tcp6_input() */
5105
5106 /*
5107 * Perform bandwidth limiting.
5108 */
5109#if ICMP_BANDLIM
5110 if (badport_bandlim(rstreason) < 0)
5111 goto drop;
5112#endif
5113
5114#if TCPDEBUG
5115 if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
5116 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
5117 &tcp_savetcp, 0);
5118#endif
5119 bzero(&tra, sizeof(tra));
5120 tra.ifscope = ifscope;
5121 tra.awdl_unrestricted = 1;
5122 tra.intcoproc_allowed = 1;
5123 if (thflags & TH_ACK)
5124 /* mtod() below is safe as long as hdr dropping is delayed */
5125 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, th->th_ack,
5126 TH_RST, &tra);
5127 else {
5128 if (thflags & TH_SYN)
5129 tlen++;
5130 /* mtod() below is safe as long as hdr dropping is delayed */
5131 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
5132 (tcp_seq)0, TH_RST|TH_ACK, &tra);
5133 }
5134 /* destroy temporarily created socket */
5135 if (dropsocket) {
5136 (void) soabort(so);
5137 socket_unlock(so, 1);
5138 } else if ((inp != NULL) && (nosock == 0)) {
5139 socket_unlock(so, 1);
5140 }
5141 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0);
5142 return;
5143dropnosock:
5144 nosock = 1;
5145drop:
5146 /*
5147 * Drop space held by incoming segment and return.
5148 */
5149#if TCPDEBUG
5150 if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
5151 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
5152 &tcp_savetcp, 0);
5153#endif
5154 m_freem(m);
5155 /* destroy temporarily created socket */
5156 if (dropsocket) {
5157 (void) soabort(so);
5158 socket_unlock(so, 1);
5159 }
5160 else if (nosock == 0) {
5161 socket_unlock(so, 1);
5162 }
5163 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0);
5164 return;
5165}
5166
5167/*
5168 * Parse TCP options and place in tcpopt.
5169 */
5170static void
5171tcp_dooptions(struct tcpcb *tp, u_char *cp, int cnt, struct tcphdr *th,
5172 struct tcpopt *to)
5173{
5174 u_short mss = 0;
5175 int opt, optlen;
5176
5177 for (; cnt > 0; cnt -= optlen, cp += optlen) {
5178 opt = cp[0];
5179 if (opt == TCPOPT_EOL)
5180 break;
5181 if (opt == TCPOPT_NOP)
5182 optlen = 1;
5183 else {
5184 if (cnt < 2)
5185 break;
5186 optlen = cp[1];
5187 if (optlen < 2 || optlen > cnt)
5188 break;
5189 }
5190 switch (opt) {
5191
5192 default:
5193 continue;
5194
5195 case TCPOPT_MAXSEG:
5196 if (optlen != TCPOLEN_MAXSEG)
5197 continue;
5198 if (!(th->th_flags & TH_SYN))
5199 continue;
5200 bcopy((char *) cp + 2, (char *) &mss, sizeof(mss));
5201 NTOHS(mss);
5202 to->to_mss = mss;
5203 to->to_flags |= TOF_MSS;
5204 break;
5205
5206 case TCPOPT_WINDOW:
5207 if (optlen != TCPOLEN_WINDOW)
5208 continue;
5209 if (!(th->th_flags & TH_SYN))
5210 continue;
5211 to->to_flags |= TOF_SCALE;
5212 to->to_requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
5213 break;
5214
5215 case TCPOPT_TIMESTAMP:
5216 if (optlen != TCPOLEN_TIMESTAMP)
5217 continue;
5218 to->to_flags |= TOF_TS;
5219 bcopy((char *)cp + 2,
5220 (char *)&to->to_tsval, sizeof(to->to_tsval));
5221 NTOHL(to->to_tsval);
5222 bcopy((char *)cp + 6,
5223 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
5224 NTOHL(to->to_tsecr);
5225 /* Re-enable sending Timestamps if we received them */
5226 if (!(tp->t_flags & TF_REQ_TSTMP) &&
5227 tcp_do_rfc1323 == 1)
5228 tp->t_flags |= TF_REQ_TSTMP;
5229 break;
5230 case TCPOPT_SACK_PERMITTED:
5231 if (!tcp_do_sack ||
5232 optlen != TCPOLEN_SACK_PERMITTED)
5233 continue;
5234 if (th->th_flags & TH_SYN)
5235 to->to_flags |= TOF_SACK;
5236 break;
5237 case TCPOPT_SACK:
5238 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
5239 continue;
5240 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
5241 to->to_sacks = cp + 2;
5242 tcpstat.tcps_sack_rcv_blocks++;
5243
5244 break;
5245 case TCPOPT_FASTOPEN:
5246 if (optlen == TCPOLEN_FASTOPEN_REQ) {
5247 if (tp->t_state != TCPS_LISTEN)
5248 continue;
5249
5250 to->to_flags |= TOF_TFOREQ;
5251 } else {
5252 if (optlen < TCPOLEN_FASTOPEN_REQ ||
5253 (optlen - TCPOLEN_FASTOPEN_REQ) > TFO_COOKIE_LEN_MAX ||
5254 (optlen - TCPOLEN_FASTOPEN_REQ) < TFO_COOKIE_LEN_MIN)
5255 continue;
5256 if (tp->t_state != TCPS_LISTEN &&
5257 tp->t_state != TCPS_SYN_SENT)
5258 continue;
5259
5260 to->to_flags |= TOF_TFO;
5261 to->to_tfo = cp + 1;
5262 }
5263
5264 break;
5265#if MPTCP
5266 case TCPOPT_MULTIPATH:
5267 tcp_do_mptcp_options(tp, cp, th, to, optlen);
5268 break;
5269#endif /* MPTCP */
5270 }
5271 }
5272}
5273
5274static void
5275tcp_finalize_options(struct tcpcb *tp, struct tcpopt *to, unsigned int ifscope)
5276{
5277 if (to->to_flags & TOF_TS) {
5278 tp->t_flags |= TF_RCVD_TSTMP;
5279 tp->ts_recent = to->to_tsval;
5280 tp->ts_recent_age = tcp_now;
5281
5282 }
5283 if (to->to_flags & TOF_MSS)
5284 tcp_mss(tp, to->to_mss, ifscope);
5285 if (SACK_ENABLED(tp)) {
5286 if (!(to->to_flags & TOF_SACK))
5287 tp->t_flagsext &= ~(TF_SACK_ENABLE);
5288 else
5289 tp->t_flags |= TF_SACK_PERMIT;
5290 }
5291 if (to->to_flags & TOF_SCALE) {
5292 tp->t_flags |= TF_RCVD_SCALE;
5293 tp->requested_s_scale = to->to_requested_s_scale;
5294
5295 /* Re-enable window scaling, if the option is received */
5296 if (tp->request_r_scale > 0)
5297 tp->t_flags |= TF_REQ_SCALE;
5298 }
5299}
5300
5301/*
5302 * Pull out of band byte out of a segment so
5303 * it doesn't appear in the user's data queue.
5304 * It is still reflected in the segment length for
5305 * sequencing purposes.
5306 *
5307 * @param off delayed to be droped hdrlen
5308 */
5309static void
5310tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, int off)
5311{
5312 int cnt = off + th->th_urp - 1;
5313
5314 while (cnt >= 0) {
5315 if (m->m_len > cnt) {
5316 char *cp = mtod(m, caddr_t) + cnt;
5317 struct tcpcb *tp = sototcpcb(so);
5318
5319 tp->t_iobc = *cp;
5320 tp->t_oobflags |= TCPOOB_HAVEDATA;
5321 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
5322 m->m_len--;
5323 if (m->m_flags & M_PKTHDR)
5324 m->m_pkthdr.len--;
5325 return;
5326 }
5327 cnt -= m->m_len;
5328 m = m->m_next;
5329 if (m == 0)
5330 break;
5331 }
5332 panic("tcp_pulloutofband");
5333}
5334
5335uint32_t
5336get_base_rtt(struct tcpcb *tp)
5337{
5338 struct rtentry *rt = tp->t_inpcb->inp_route.ro_rt;
5339 return ((rt == NULL) ? 0 : rt->rtt_min);
5340}
5341
5342/* Each value of RTT base represents the minimum RTT seen in a minute.
5343 * We keep upto N_RTT_BASE minutes worth of history.
5344 */
5345void
5346update_base_rtt(struct tcpcb *tp, uint32_t rtt)
5347{
5348 u_int32_t base_rtt, i;
5349 struct rtentry *rt;
5350
5351 if ((rt = tp->t_inpcb->inp_route.ro_rt) == NULL)
5352 return;
5353 if (rt->rtt_expire_ts == 0) {
5354 RT_LOCK_SPIN(rt);
5355 if (rt->rtt_expire_ts != 0) {
5356 RT_UNLOCK(rt);
5357 goto update;
5358 }
5359 rt->rtt_expire_ts = tcp_now;
5360 rt->rtt_index = 0;
5361 rt->rtt_hist[0] = rtt;
5362 rt->rtt_min = rtt;
5363 RT_UNLOCK(rt);
5364 return;
5365 }
5366update:
5367#if TRAFFIC_MGT
5368 /*
5369 * If the recv side is being throttled, check if the
5370 * current RTT is closer to the base RTT seen in
5371 * first (recent) two slots. If so, unthrottle the stream.
5372 */
5373 if ((tp->t_flagsext & TF_RECV_THROTTLE) &&
5374 (int)(tcp_now - tp->t_recv_throttle_ts) >= TCP_RECV_THROTTLE_WIN) {
5375 base_rtt = rt->rtt_min;
5376 if (tp->t_rttcur <= (base_rtt + target_qdelay)) {
5377 tp->t_flagsext &= ~TF_RECV_THROTTLE;
5378 tp->t_recv_throttle_ts = 0;
5379 }
5380 }
5381#endif /* TRAFFIC_MGT */
5382 if ((int)(tcp_now - rt->rtt_expire_ts) >=
5383 TCP_RTT_HISTORY_EXPIRE_TIME) {
5384 RT_LOCK_SPIN(rt);
5385 /* check the condition again to avoid race */
5386 if ((int)(tcp_now - rt->rtt_expire_ts) >=
5387 TCP_RTT_HISTORY_EXPIRE_TIME) {
5388 rt->rtt_index++;
5389 if (rt->rtt_index >= NRTT_HIST)
5390 rt->rtt_index = 0;
5391 rt->rtt_hist[rt->rtt_index] = rtt;
5392 rt->rtt_expire_ts = tcp_now;
5393 } else {
5394 rt->rtt_hist[rt->rtt_index] =
5395 min(rt->rtt_hist[rt->rtt_index], rtt);
5396 }
5397 /* forget the old value and update minimum */
5398 rt->rtt_min = 0;
5399 for (i = 0; i < NRTT_HIST; ++i) {
5400 if (rt->rtt_hist[i] != 0 &&
5401 (rt->rtt_min == 0 ||
5402 rt->rtt_hist[i] < rt->rtt_min))
5403 rt->rtt_min = rt->rtt_hist[i];
5404 }
5405 RT_UNLOCK(rt);
5406 } else {
5407 rt->rtt_hist[rt->rtt_index] =
5408 min(rt->rtt_hist[rt->rtt_index], rtt);
5409 if (rt->rtt_min == 0)
5410 rt->rtt_min = rtt;
5411 else
5412 rt->rtt_min = min(rt->rtt_min, rtt);
5413 }
5414}
5415
5416/*
5417 * If we have a timestamp reply, update smoothed RTT. If no timestamp is
5418 * present but transmit timer is running and timed sequence number was
5419 * acked, update smoothed RTT.
5420 *
5421 * If timestamps are supported, a receiver can update RTT even if
5422 * there is no outstanding data.
5423 *
5424 * Some boxes send broken timestamp replies during the SYN+ACK phase,
5425 * ignore timestamps of 0or we could calculate a huge RTT and blow up
5426 * the retransmit timer.
5427 */
5428static void
5429tcp_compute_rtt(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th)
5430{
5431 int rtt = 0;
5432 VERIFY(to != NULL && th != NULL);
5433 if (tp->t_rtttime != 0 && SEQ_GT(th->th_ack, tp->t_rtseq)) {
5434 u_int32_t pipe_ack_val;
5435 rtt = tcp_now - tp->t_rtttime;
5436 /*
5437 * Compute pipe ack -- the amount of data acknowledged
5438 * in the last RTT
5439 */
5440 if (SEQ_GT(th->th_ack, tp->t_pipeack_lastuna)) {
5441 pipe_ack_val = th->th_ack - tp->t_pipeack_lastuna;
5442 /* Update the sample */
5443 tp->t_pipeack_sample[tp->t_pipeack_ind++] =
5444 pipe_ack_val;
5445 tp->t_pipeack_ind %= TCP_PIPEACK_SAMPLE_COUNT;
5446
5447 /* Compute the max of the pipeack samples */
5448 pipe_ack_val = tcp_get_max_pipeack(tp);
5449 tp->t_pipeack = (pipe_ack_val >
5450 TCP_CC_CWND_INIT_BYTES) ?
5451 pipe_ack_val : 0;
5452 }
5453 /* start another measurement */
5454 tp->t_rtttime = 0;
5455 }
5456 if (((to->to_flags & TOF_TS) != 0) &&
5457 (to->to_tsecr != 0) &&
5458 TSTMP_GEQ(tcp_now, to->to_tsecr)) {
5459 tcp_xmit_timer(tp, (tcp_now - to->to_tsecr),
5460 to->to_tsecr, th->th_ack);
5461 } else if (rtt > 0) {
5462 tcp_xmit_timer(tp, rtt, 0, th->th_ack);
5463 }
5464}
5465
5466/*
5467 * Collect new round-trip time estimate and update averages and
5468 * current timeout.
5469 */
5470static void
5471tcp_xmit_timer(struct tcpcb *tp, int rtt,
5472 u_int32_t tsecr, tcp_seq th_ack)
5473{
5474 int delta;
5475
5476 /*
5477 * On AWDL interface, the initial RTT measurement on SYN
5478 * can be wrong due to peer caching. Avoid the first RTT
5479 * measurement as it might skew up the RTO.
5480 * <rdar://problem/28739046>
5481 */
5482 if (tp->t_inpcb->inp_last_outifp != NULL &&
5483 (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_AWDL) &&
5484 th_ack == tp->iss + 1)
5485 return;
5486
5487 if (tp->t_flagsext & TF_RECOMPUTE_RTT) {
5488 if (SEQ_GT(th_ack, tp->snd_una) &&
5489 SEQ_LEQ(th_ack, tp->snd_max) &&
5490 (tsecr == 0 ||
5491 TSTMP_GEQ(tsecr, tp->t_badrexmt_time))) {
5492 /*
5493 * We received a new ACk after a
5494 * spurious timeout. Adapt retransmission
5495 * timer as described in rfc 4015.
5496 */
5497 tp->t_flagsext &= ~(TF_RECOMPUTE_RTT);
5498 tp->t_badrexmt_time = 0;
5499 tp->t_srtt = max(tp->t_srtt_prev, rtt);
5500 tp->t_srtt = tp->t_srtt << TCP_RTT_SHIFT;
5501 tp->t_rttvar = max(tp->t_rttvar_prev, (rtt >> 1));
5502 tp->t_rttvar = tp->t_rttvar << TCP_RTTVAR_SHIFT;
5503
5504 if (tp->t_rttbest > (tp->t_srtt + tp->t_rttvar))
5505 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
5506
5507 goto compute_rto;
5508 } else {
5509 return;
5510 }
5511 }
5512
5513 tcpstat.tcps_rttupdated++;
5514 tp->t_rttupdated++;
5515
5516 if (rtt > 0) {
5517 tp->t_rttcur = rtt;
5518 update_base_rtt(tp, rtt);
5519 }
5520
5521 if (tp->t_srtt != 0) {
5522 /*
5523 * srtt is stored as fixed point with 5 bits after the
5524 * binary point (i.e., scaled by 32). The following magic
5525 * is equivalent to the smoothing algorithm in rfc793 with
5526 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
5527 * point).
5528 *
5529 * Freebsd adjusts rtt to origin 0 by subtracting 1
5530 * from the provided rtt value. This was required because
5531 * of the way t_rtttime was initiailised to 1 before.
5532 * Since we changed t_rtttime to be based on
5533 * tcp_now, this extra adjustment is not needed.
5534 */
5535 delta = (rtt << TCP_DELTA_SHIFT)
5536 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
5537
5538 if ((tp->t_srtt += delta) <= 0)
5539 tp->t_srtt = 1;
5540
5541 /*
5542 * We accumulate a smoothed rtt variance (actually, a
5543 * smoothed mean difference), then set the retransmit
5544 * timer to smoothed rtt + 4 times the smoothed variance.
5545 * rttvar is stored as fixed point with 4 bits after the
5546 * binary point (scaled by 16). The following is
5547 * equivalent to rfc793 smoothing with an alpha of .75
5548 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
5549 * rfc793's wired-in beta.
5550 */
5551 if (delta < 0)
5552 delta = -delta;
5553 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
5554 if ((tp->t_rttvar += delta) <= 0)
5555 tp->t_rttvar = 1;
5556 if (tp->t_rttbest == 0 ||
5557 tp->t_rttbest > (tp->t_srtt + tp->t_rttvar))
5558 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
5559 } else {
5560 /*
5561 * No rtt measurement yet - use the unsmoothed rtt.
5562 * Set the variance to half the rtt (so our first
5563 * retransmit happens at 3*rtt).
5564 */
5565 tp->t_srtt = rtt << TCP_RTT_SHIFT;
5566 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
5567 }
5568
5569compute_rto:
5570 nstat_route_rtt(tp->t_inpcb->inp_route.ro_rt, tp->t_srtt,
5571 tp->t_rttvar);
5572
5573 /*
5574 * the retransmit should happen at rtt + 4 * rttvar.
5575 * Because of the way we do the smoothing, srtt and rttvar
5576 * will each average +1/2 tick of bias. When we compute
5577 * the retransmit timer, we want 1/2 tick of rounding and
5578 * 1 extra tick because of +-1/2 tick uncertainty in the
5579 * firing of the timer. The bias will give us exactly the
5580 * 1.5 tick we need. But, because the bias is
5581 * statistical, we have to test that we don't drop below
5582 * the minimum feasible timer (which is 2 ticks).
5583 */
5584 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
5585 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX,
5586 TCP_ADD_REXMTSLOP(tp));
5587
5588 /*
5589 * We received an ack for a packet that wasn't retransmitted;
5590 * it is probably safe to discard any error indications we've
5591 * received recently. This isn't quite right, but close enough
5592 * for now (a route might have failed after we sent a segment,
5593 * and the return path might not be symmetrical).
5594 */
5595 tp->t_softerror = 0;
5596}
5597
5598static inline unsigned int
5599tcp_maxmtu(struct rtentry *rt)
5600{
5601 unsigned int maxmtu;
5602
5603 RT_LOCK_ASSERT_HELD(rt);
5604 if (rt->rt_rmx.rmx_mtu == 0)
5605 maxmtu = rt->rt_ifp->if_mtu;
5606 else
5607 maxmtu = MIN(rt->rt_rmx.rmx_mtu, rt->rt_ifp->if_mtu);
5608
5609 return (maxmtu);
5610}
5611
5612#if INET6
5613static inline unsigned int
5614tcp_maxmtu6(struct rtentry *rt)
5615{
5616 unsigned int maxmtu;
5617 struct nd_ifinfo *ndi = NULL;
5618
5619 RT_LOCK_ASSERT_HELD(rt);
5620 if ((ndi = ND_IFINFO(rt->rt_ifp)) != NULL && !ndi->initialized)
5621 ndi = NULL;
5622 if (ndi != NULL)
5623 lck_mtx_lock(&ndi->lock);
5624 if (rt->rt_rmx.rmx_mtu == 0)
5625 maxmtu = IN6_LINKMTU(rt->rt_ifp);
5626 else
5627 maxmtu = MIN(rt->rt_rmx.rmx_mtu, IN6_LINKMTU(rt->rt_ifp));
5628 if (ndi != NULL)
5629 lck_mtx_unlock(&ndi->lock);
5630
5631 return (maxmtu);
5632}
5633#endif
5634
5635unsigned int
5636get_maxmtu(struct rtentry *rt)
5637{
5638 unsigned int maxmtu = 0;
5639
5640 RT_LOCK_ASSERT_NOTHELD(rt);
5641
5642 RT_LOCK(rt);
5643
5644 if (rt_key(rt)->sa_family == AF_INET6) {
5645 maxmtu = tcp_maxmtu6(rt);
5646 } else {
5647 maxmtu = tcp_maxmtu(rt);
5648 }
5649
5650 RT_UNLOCK(rt);
5651
5652 return (maxmtu);
5653}
5654
5655/*
5656 * Determine a reasonable value for maxseg size.
5657 * If the route is known, check route for mtu.
5658 * If none, use an mss that can be handled on the outgoing
5659 * interface without forcing IP to fragment; if bigger than
5660 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
5661 * to utilize large mbufs. If no route is found, route has no mtu,
5662 * or the destination isn't local, use a default, hopefully conservative
5663 * size (usually 512 or the default IP max size, but no more than the mtu
5664 * of the interface), as we can't discover anything about intervening
5665 * gateways or networks. We also initialize the congestion/slow start
5666 * window. While looking at the routing entry, we also initialize
5667 * other path-dependent parameters from pre-set or cached values
5668 * in the routing entry.
5669 *
5670 * Also take into account the space needed for options that we
5671 * send regularly. Make maxseg shorter by that amount to assure
5672 * that we can send maxseg amount of data even when the options
5673 * are present. Store the upper limit of the length of options plus
5674 * data in maxopd.
5675 *
5676 * NOTE that this routine is only called when we process an incoming
5677 * segment, for outgoing segments only tcp_mssopt is called.
5678 *
5679 */
5680void
5681tcp_mss(struct tcpcb *tp, int offer, unsigned int input_ifscope)
5682{
5683 struct rtentry *rt;
5684 struct ifnet *ifp;
5685 int rtt, mss;
5686 u_int32_t bufsize;
5687 struct inpcb *inp;
5688 struct socket *so;
5689 struct rmxp_tao *taop;
5690 int origoffer = offer;
5691 u_int32_t sb_max_corrected;
5692 int isnetlocal = 0;
5693#if INET6
5694 int isipv6;
5695 int min_protoh;
5696#endif
5697
5698 inp = tp->t_inpcb;
5699#if INET6
5700 isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
5701 min_protoh = isipv6 ? sizeof (struct ip6_hdr) + sizeof (struct tcphdr)
5702 : sizeof (struct tcpiphdr);
5703#else
5704#define min_protoh (sizeof (struct tcpiphdr))
5705#endif
5706
5707#if INET6
5708 if (isipv6) {
5709 rt = tcp_rtlookup6(inp, input_ifscope);
5710 }
5711 else
5712#endif /* INET6 */
5713 {
5714 rt = tcp_rtlookup(inp, input_ifscope);
5715 }
5716 isnetlocal = (tp->t_flags & TF_LOCAL);
5717
5718 if (rt == NULL) {
5719 tp->t_maxopd = tp->t_maxseg =
5720#if INET6
5721 isipv6 ? tcp_v6mssdflt :
5722#endif /* INET6 */
5723 tcp_mssdflt;
5724 return;
5725 }
5726 ifp = rt->rt_ifp;
5727 /*
5728 * Slower link window correction:
5729 * If a value is specificied for slowlink_wsize use it for
5730 * PPP links believed to be on a serial modem (speed <128Kbps).
5731 * Excludes 9600bps as it is the default value adversized
5732 * by pseudo-devices over ppp.
5733 */
5734 if (ifp->if_type == IFT_PPP && slowlink_wsize > 0 &&
5735 ifp->if_baudrate > 9600 && ifp->if_baudrate <= 128000) {
5736 tp->t_flags |= TF_SLOWLINK;
5737 }
5738 so = inp->inp_socket;
5739
5740 taop = rmx_taop(rt->rt_rmx);
5741 /*
5742 * Offer == -1 means that we didn't receive SYN yet,
5743 * use cached value in that case;
5744 */
5745 if (offer == -1)
5746 offer = taop->tao_mssopt;
5747 /*
5748 * Offer == 0 means that there was no MSS on the SYN segment,
5749 * in this case we use tcp_mssdflt.
5750 */
5751 if (offer == 0)
5752 offer =
5753#if INET6
5754 isipv6 ? tcp_v6mssdflt :
5755#endif /* INET6 */
5756 tcp_mssdflt;
5757 else {
5758 /*
5759 * Prevent DoS attack with too small MSS. Round up
5760 * to at least minmss.
5761 */
5762 offer = max(offer, tcp_minmss);
5763 /*
5764 * Sanity check: make sure that maxopd will be large
5765 * enough to allow some data on segments even is the
5766 * all the option space is used (40bytes). Otherwise
5767 * funny things may happen in tcp_output.
5768 */
5769 offer = max(offer, 64);
5770 }
5771 taop->tao_mssopt = offer;
5772
5773 /*
5774 * While we're here, check if there's an initial rtt
5775 * or rttvar. Convert from the route-table units
5776 * to scaled multiples of the slow timeout timer.
5777 */
5778 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt) != 0) {
5779 tcp_getrt_rtt(tp, rt);
5780 } else {
5781 tp->t_rttmin = isnetlocal ? tcp_TCPTV_MIN : TCPTV_REXMTMIN;
5782 }
5783
5784#if INET6
5785 mss = (isipv6 ? tcp_maxmtu6(rt) : tcp_maxmtu(rt));
5786#else
5787 mss = tcp_maxmtu(rt);
5788#endif
5789
5790#if NECP
5791 // At this point, the mss is just the MTU. Adjust if necessary.
5792 mss = necp_socket_get_effective_mtu(inp, mss);
5793#endif /* NECP */
5794
5795 mss -= min_protoh;
5796
5797 if (rt->rt_rmx.rmx_mtu == 0) {
5798#if INET6
5799 if (isipv6) {
5800 if (!isnetlocal)
5801 mss = min(mss, tcp_v6mssdflt);
5802 } else
5803#endif /* INET6 */
5804 if (!isnetlocal)
5805 mss = min(mss, tcp_mssdflt);
5806 }
5807
5808 mss = min(mss, offer);
5809 /*
5810 * maxopd stores the maximum length of data AND options
5811 * in a segment; maxseg is the amount of data in a normal
5812 * segment. We need to store this value (maxopd) apart
5813 * from maxseg, because now every segment carries options
5814 * and thus we normally have somewhat less data in segments.
5815 */
5816 tp->t_maxopd = mss;
5817
5818 /*
5819 * origoffer==-1 indicates, that no segments were received yet.
5820 * In this case we just guess.
5821 */
5822 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
5823 (origoffer == -1 ||
5824 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
5825 mss -= TCPOLEN_TSTAMP_APPA;
5826
5827#if MPTCP
5828 mss -= mptcp_adj_mss(tp, FALSE);
5829#endif /* MPTCP */
5830 tp->t_maxseg = mss;
5831
5832 /*
5833 * Calculate corrected value for sb_max; ensure to upgrade the
5834 * numerator for large sb_max values else it will overflow.
5835 */
5836 sb_max_corrected = (sb_max * (u_int64_t)MCLBYTES) / (MSIZE + MCLBYTES);
5837
5838 /*
5839 * If there's a pipesize (ie loopback), change the socket
5840 * buffer to that size only if it's bigger than the current
5841 * sockbuf size. Make the socket buffers an integral
5842 * number of mss units; if the mss is larger than
5843 * the socket buffer, decrease the mss.
5844 */
5845#if RTV_SPIPE
5846 bufsize = rt->rt_rmx.rmx_sendpipe;
5847 if (bufsize < so->so_snd.sb_hiwat)
5848#endif
5849 bufsize = so->so_snd.sb_hiwat;
5850 if (bufsize < mss)
5851 mss = bufsize;
5852 else {
5853 bufsize = (((bufsize + (u_int64_t)mss - 1) / (u_int64_t)mss) * (u_int64_t)mss);
5854 if (bufsize > sb_max_corrected)
5855 bufsize = sb_max_corrected;
5856 (void)sbreserve(&so->so_snd, bufsize);
5857 }
5858 tp->t_maxseg = mss;
5859
5860 /*
5861 * Update MSS using recommendation from link status report. This is
5862 * temporary
5863 */
5864 tcp_update_mss_locked(so, ifp);
5865
5866#if RTV_RPIPE
5867 bufsize = rt->rt_rmx.rmx_recvpipe;
5868 if (bufsize < so->so_rcv.sb_hiwat)
5869#endif
5870 bufsize = so->so_rcv.sb_hiwat;
5871 if (bufsize > mss) {
5872 bufsize = (((bufsize + (u_int64_t)mss - 1) / (u_int64_t)mss) * (u_int64_t)mss);
5873 if (bufsize > sb_max_corrected)
5874 bufsize = sb_max_corrected;
5875 (void)sbreserve(&so->so_rcv, bufsize);
5876 }
5877
5878 set_tcp_stream_priority(so);
5879
5880 if (rt->rt_rmx.rmx_ssthresh) {
5881 /*
5882 * There's some sort of gateway or interface
5883 * buffer limit on the path. Use this to set
5884 * slow-start threshold, but set the threshold to
5885 * no less than 2*mss.
5886 */
5887 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh);
5888 tcpstat.tcps_usedssthresh++;
5889 } else {
5890 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
5891 }
5892
5893 /*
5894 * Set the slow-start flight size depending on whether this
5895 * is a local network or not.
5896 */
5897 if (CC_ALGO(tp)->cwnd_init != NULL)
5898 CC_ALGO(tp)->cwnd_init(tp);
5899
5900 tcp_ccdbg_trace(tp, NULL, TCP_CC_CWND_INIT);
5901
5902 /* Route locked during lookup above */
5903 RT_UNLOCK(rt);
5904}
5905
5906/*
5907 * Determine the MSS option to send on an outgoing SYN.
5908 */
5909int
5910tcp_mssopt(struct tcpcb *tp)
5911{
5912 struct rtentry *rt;
5913 int mss;
5914#if INET6
5915 int isipv6;
5916 int min_protoh;
5917#endif
5918
5919#if INET6
5920 isipv6 = ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
5921 min_protoh = isipv6 ? sizeof (struct ip6_hdr) + sizeof (struct tcphdr)
5922 : sizeof (struct tcpiphdr);
5923#else
5924#define min_protoh (sizeof (struct tcpiphdr))
5925#endif
5926
5927#if INET6
5928 if (isipv6)
5929 rt = tcp_rtlookup6(tp->t_inpcb, IFSCOPE_NONE);
5930 else
5931#endif /* INET6 */
5932 rt = tcp_rtlookup(tp->t_inpcb, IFSCOPE_NONE);
5933 if (rt == NULL) {
5934 return (
5935#if INET6
5936 isipv6 ? tcp_v6mssdflt :
5937#endif /* INET6 */
5938 tcp_mssdflt);
5939 }
5940 /*
5941 * Slower link window correction:
5942 * If a value is specificied for slowlink_wsize use it for PPP links
5943 * believed to be on a serial modem (speed <128Kbps). Excludes 9600bps as
5944 * it is the default value adversized by pseudo-devices over ppp.
5945 */
5946 if (rt->rt_ifp->if_type == IFT_PPP && slowlink_wsize > 0 &&
5947 rt->rt_ifp->if_baudrate > 9600 && rt->rt_ifp->if_baudrate <= 128000) {
5948 tp->t_flags |= TF_SLOWLINK;
5949 }
5950
5951#if INET6
5952 mss = (isipv6 ? tcp_maxmtu6(rt) : tcp_maxmtu(rt));
5953#else
5954 mss = tcp_maxmtu(rt);
5955#endif
5956 /* Route locked during lookup above */
5957 RT_UNLOCK(rt);
5958
5959#if NECP
5960 // At this point, the mss is just the MTU. Adjust if necessary.
5961 mss = necp_socket_get_effective_mtu(tp->t_inpcb, mss);
5962#endif /* NECP */
5963
5964 return (mss - min_protoh);
5965}
5966
5967/*
5968 * On a partial ack arrives, force the retransmission of the
5969 * next unacknowledged segment. Do not clear tp->t_dupacks.
5970 * By setting snd_nxt to th_ack, this forces retransmission timer to
5971 * be started again.
5972 */
5973static void
5974tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
5975{
5976 tcp_seq onxt = tp->snd_nxt;
5977 u_int32_t ocwnd = tp->snd_cwnd;
5978 tp->t_timer[TCPT_REXMT] = 0;
5979 tp->t_timer[TCPT_PTO] = 0;
5980 tp->t_rtttime = 0;
5981 tp->snd_nxt = th->th_ack;
5982 /*
5983 * Set snd_cwnd to one segment beyond acknowledged offset
5984 * (tp->snd_una has not yet been updated when this function
5985 * is called)
5986 */
5987 tp->snd_cwnd = tp->t_maxseg + BYTES_ACKED(th, tp);
5988 tp->t_flags |= TF_ACKNOW;
5989 (void) tcp_output(tp);
5990 tp->snd_cwnd = ocwnd;
5991 if (SEQ_GT(onxt, tp->snd_nxt))
5992 tp->snd_nxt = onxt;
5993 /*
5994 * Partial window deflation. Relies on fact that tp->snd_una
5995 * not updated yet.
5996 */
5997 if (tp->snd_cwnd > BYTES_ACKED(th, tp))
5998 tp->snd_cwnd -= BYTES_ACKED(th, tp);
5999 else
6000 tp->snd_cwnd = 0;
6001 tp->snd_cwnd += tp->t_maxseg;
6002}
6003
6004/*
6005 * Drop a random TCP connection that hasn't been serviced yet and
6006 * is eligible for discard. There is a one in qlen chance that
6007 * we will return a null, saying that there are no dropable
6008 * requests. In this case, the protocol specific code should drop
6009 * the new request. This insures fairness.
6010 *
6011 * The listening TCP socket "head" must be locked
6012 */
6013static int
6014tcp_dropdropablreq(struct socket *head)
6015{
6016 struct socket *so, *sonext;
6017 unsigned int i, j, qlen;
6018 static u_int32_t rnd = 0;
6019 static u_int64_t old_runtime;
6020 static unsigned int cur_cnt, old_cnt;
6021 u_int64_t now_sec;
6022 struct inpcb *inp = NULL;
6023 struct tcpcb *tp;
6024
6025 if ((head->so_options & SO_ACCEPTCONN) == 0)
6026 return (0);
6027
6028 if (TAILQ_EMPTY(&head->so_incomp))
6029 return (0);
6030
6031 so_acquire_accept_list(head, NULL);
6032 socket_unlock(head, 0);
6033
6034 /*
6035 * Check if there is any socket in the incomp queue
6036 * that is closed because of a reset from the peer and is
6037 * waiting to be garbage collected. If so, pick that as
6038 * the victim
6039 */
6040 TAILQ_FOREACH_SAFE(so, &head->so_incomp, so_list, sonext) {
6041 inp = sotoinpcb(so);
6042 tp = intotcpcb(inp);
6043 if (tp != NULL && tp->t_state == TCPS_CLOSED &&
6044 so->so_head != NULL &&
6045 (so->so_state & (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE)) ==
6046 (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE)) {
6047 /*
6048 * The listen socket is already locked but we
6049 * can lock this socket here without lock ordering
6050 * issues because it is in the incomp queue and
6051 * is not visible to others.
6052 */
6053 if (socket_try_lock(so)) {
6054 so->so_usecount++;
6055 goto found_victim;
6056 } else {
6057 continue;
6058 }
6059 }
6060 }
6061
6062 so = TAILQ_FIRST(&head->so_incomp);
6063
6064 now_sec = net_uptime();
6065 if ((i = (now_sec - old_runtime)) != 0) {
6066 old_runtime = now_sec;
6067 old_cnt = cur_cnt / i;
6068 cur_cnt = 0;
6069 }
6070
6071 qlen = head->so_incqlen;
6072 if (rnd == 0)
6073 rnd = RandomULong();
6074
6075 if (++cur_cnt > qlen || old_cnt > qlen) {
6076 rnd = (314159 * rnd + 66329) & 0xffff;
6077 j = ((qlen + 1) * rnd) >> 16;
6078
6079 while (j-- && so)
6080 so = TAILQ_NEXT(so, so_list);
6081 }
6082 /* Find a connection that is not already closing (or being served) */
6083 while (so) {
6084 inp = (struct inpcb *)so->so_pcb;
6085
6086 sonext = TAILQ_NEXT(so, so_list);
6087
6088 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != WNT_STOPUSING) {
6089 /*
6090 * Avoid the issue of a socket being accepted
6091 * by one input thread and being dropped by
6092 * another input thread. If we can't get a hold
6093 * on this mutex, then grab the next socket in
6094 * line.
6095 */
6096 if (socket_try_lock(so)) {
6097 so->so_usecount++;
6098 if ((so->so_usecount == 2) &&
6099 (so->so_state & SS_INCOMP) &&
6100 !(so->so_flags & SOF_INCOMP_INPROGRESS)) {
6101 break;
6102 } else {
6103 /*
6104 * don't use if being accepted or
6105 * used in any other way
6106 */
6107 in_pcb_checkstate(inp, WNT_RELEASE, 1);
6108 socket_unlock(so, 1);
6109 }
6110 } else {
6111 /*
6112 * do not try to lock the inp in
6113 * in_pcb_checkstate because the lock
6114 * is already held in some other thread.
6115 * Only drop the inp_wntcnt reference.
6116 */
6117 in_pcb_checkstate(inp, WNT_RELEASE, 1);
6118 }
6119 }
6120 so = sonext;
6121 }
6122 if (so == NULL) {
6123 socket_lock(head, 0);
6124 so_release_accept_list(head);
6125 return (0);
6126 }
6127
6128 /* Makes sure socket is still in the right state to be discarded */
6129
6130 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
6131 socket_unlock(so, 1);
6132 socket_lock(head, 0);
6133 so_release_accept_list(head);
6134 return (0);
6135 }
6136
6137found_victim:
6138 if (so->so_usecount != 2 || !(so->so_state & SS_INCOMP)) {
6139 /* do not discard: that socket is being accepted */
6140 socket_unlock(so, 1);
6141 socket_lock(head, 0);
6142 so_release_accept_list(head);
6143 return (0);
6144 }
6145
6146 socket_lock(head, 0);
6147 TAILQ_REMOVE(&head->so_incomp, so, so_list);
6148 head->so_incqlen--;
6149 head->so_qlen--;
6150 so->so_state &= ~SS_INCOMP;
6151 so->so_flags |= SOF_OVERFLOW;
6152 so->so_head = NULL;
6153 so_release_accept_list(head);
6154 socket_unlock(head, 0);
6155
6156 socket_lock_assert_owned(so);
6157 tp = sototcpcb(so);
6158
6159 tcp_close(tp);
6160 if (inp->inp_wantcnt > 0 && inp->inp_wantcnt != WNT_STOPUSING) {
6161 /*
6162 * Some one has a wantcnt on this pcb. Since WNT_ACQUIRE
6163 * doesn't require a lock, it could have happened while
6164 * we are holding the lock. This pcb will have to
6165 * be garbage collected later.
6166 * Release the reference held for so_incomp queue
6167 */
6168 VERIFY(so->so_usecount > 0);
6169 so->so_usecount--;
6170 socket_unlock(so, 1);
6171 } else {
6172 /*
6173 * Unlock this socket and leave the reference on.
6174 * We need to acquire the pcbinfo lock in order to
6175 * fully dispose it off
6176 */
6177 socket_unlock(so, 0);
6178
6179 lck_rw_lock_exclusive(tcbinfo.ipi_lock);
6180
6181 socket_lock(so, 0);
6182 /* Release the reference held for so_incomp queue */
6183 VERIFY(so->so_usecount > 0);
6184 so->so_usecount--;
6185
6186 if (so->so_usecount != 1 ||
6187 (inp->inp_wantcnt > 0 &&
6188 inp->inp_wantcnt != WNT_STOPUSING)) {
6189 /*
6190 * There is an extra wantcount or usecount
6191 * that must have been added when the socket
6192 * was unlocked. This socket will have to be
6193 * garbage collected later
6194 */
6195 socket_unlock(so, 1);
6196 } else {
6197 /* Drop the reference held for this function */
6198 VERIFY(so->so_usecount > 0);
6199 so->so_usecount--;
6200
6201 in_pcbdispose(inp);
6202 }
6203 lck_rw_done(tcbinfo.ipi_lock);
6204 }
6205 tcpstat.tcps_drops++;
6206
6207 socket_lock(head, 0);
6208 return(1);
6209}
6210
6211/* Set background congestion control on a socket */
6212void
6213tcp_set_background_cc(struct socket *so)
6214{
6215 tcp_set_new_cc(so, TCP_CC_ALGO_BACKGROUND_INDEX);
6216}
6217
6218/* Set foreground congestion control on a socket */
6219void
6220tcp_set_foreground_cc(struct socket *so)
6221{
6222 if (tcp_use_newreno)
6223 tcp_set_new_cc(so, TCP_CC_ALGO_NEWRENO_INDEX);
6224 else
6225 tcp_set_new_cc(so, TCP_CC_ALGO_CUBIC_INDEX);
6226}
6227
6228static void
6229tcp_set_new_cc(struct socket *so, uint16_t cc_index)
6230{
6231 struct inpcb *inp = sotoinpcb(so);
6232 struct tcpcb *tp = intotcpcb(inp);
6233 u_char old_cc_index = 0;
6234 if (tp->tcp_cc_index != cc_index) {
6235
6236 old_cc_index = tp->tcp_cc_index;
6237
6238 if (CC_ALGO(tp)->cleanup != NULL)
6239 CC_ALGO(tp)->cleanup(tp);
6240 tp->tcp_cc_index = cc_index;
6241
6242 tcp_cc_allocate_state(tp);
6243
6244 if (CC_ALGO(tp)->switch_to != NULL)
6245 CC_ALGO(tp)->switch_to(tp, old_cc_index);
6246
6247 tcp_ccdbg_trace(tp, NULL, TCP_CC_CHANGE_ALGO);
6248 }
6249}
6250
6251void
6252tcp_set_recv_bg(struct socket *so)
6253{
6254 if (!IS_TCP_RECV_BG(so))
6255 so->so_flags1 |= SOF1_TRAFFIC_MGT_TCP_RECVBG;
6256
6257 /* Unset Large Receive Offload on background sockets */
6258 so_set_lro(so, SO_TC_BK);
6259}
6260
6261void
6262tcp_clear_recv_bg(struct socket *so)
6263{
6264 if (IS_TCP_RECV_BG(so))
6265 so->so_flags1 &= ~(SOF1_TRAFFIC_MGT_TCP_RECVBG);
6266
6267 /*
6268 * Set/unset use of Large Receive Offload depending on
6269 * the traffic class
6270 */
6271 so_set_lro(so, so->so_traffic_class);
6272}
6273
6274void
6275inp_fc_unthrottle_tcp(struct inpcb *inp)
6276{
6277 struct tcpcb *tp = inp->inp_ppcb;
6278 /*
6279 * Back off the slow-start threshold and enter
6280 * congestion avoidance phase
6281 */
6282 if (CC_ALGO(tp)->pre_fr != NULL)
6283 CC_ALGO(tp)->pre_fr(tp);
6284
6285 tp->snd_cwnd = tp->snd_ssthresh;
6286 tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
6287 /*
6288 * Restart counting for ABC as we changed the
6289 * congestion window just now.
6290 */
6291 tp->t_bytes_acked = 0;
6292
6293 /* Reset retransmit shift as we know that the reason
6294 * for delay in sending a packet is due to flow
6295 * control on the outgoing interface. There is no need
6296 * to backoff retransmit timer.
6297 */
6298 TCP_RESET_REXMT_STATE(tp);
6299
6300 /*
6301 * Start the output stream again. Since we are
6302 * not retransmitting data, do not reset the
6303 * retransmit timer or rtt calculation.
6304 */
6305 tcp_output(tp);
6306}
6307
6308static int
6309tcp_getstat SYSCTL_HANDLER_ARGS
6310{
6311#pragma unused(oidp, arg1, arg2)
6312
6313 int error;
6314 struct tcpstat *stat;
6315 stat = &tcpstat;
6316#if !CONFIG_EMBEDDED
6317 proc_t caller = PROC_NULL;
6318 proc_t caller_parent = PROC_NULL;
6319 char command_name[MAXCOMLEN + 1] = "";
6320 char parent_name[MAXCOMLEN + 1] = "";
6321 struct tcpstat zero_stat;
6322 if ((caller = proc_self()) != PROC_NULL) {
6323 /* get process name */
6324 strlcpy(command_name, caller->p_comm, sizeof(command_name));
6325
6326 /* get parent process name if possible */
6327 if ((caller_parent = proc_find(caller->p_ppid)) != PROC_NULL) {
6328 strlcpy(parent_name, caller_parent->p_comm,
6329 sizeof(parent_name));
6330 proc_rele(caller_parent);
6331 }
6332
6333 if ((escape_str(command_name, strlen(command_name),
6334 sizeof(command_name)) == 0) &&
6335 (escape_str(parent_name, strlen(parent_name),
6336 sizeof(parent_name)) == 0)) {
6337 kern_asl_msg(LOG_DEBUG, "messagetracer",
6338 5,
6339 "com.apple.message.domain",
6340 "com.apple.kernel.tcpstat", /* 1 */
6341 "com.apple.message.signature",
6342 "tcpstat", /* 2 */
6343 "com.apple.message.signature2", command_name, /* 3 */
6344 "com.apple.message.signature3", parent_name, /* 4 */
6345 "com.apple.message.summarize", "YES", /* 5 */
6346 NULL);
6347 }
6348 }
6349 if (caller != PROC_NULL)
6350 proc_rele(caller);
6351 if (tcp_disable_access_to_stats &&
6352 !kauth_cred_issuser(kauth_cred_get())) {
6353 bzero(&zero_stat, sizeof(zero_stat));
6354 stat = &zero_stat;
6355 }
6356
6357#endif /* !CONFIG_EMBEDDED */
6358
6359 if (req->oldptr == 0) {
6360 req->oldlen= (size_t)sizeof(struct tcpstat);
6361 }
6362
6363 error = SYSCTL_OUT(req, stat, MIN(sizeof (tcpstat), req->oldlen));
6364
6365 return (error);
6366
6367}
6368
6369/*
6370 * Checksum extended TCP header and data.
6371 */
6372int
6373tcp_input_checksum(int af, struct mbuf *m, struct tcphdr *th, int off, int tlen)
6374{
6375 struct ifnet *ifp = m->m_pkthdr.rcvif;
6376
6377 switch (af) {
6378 case AF_INET: {
6379 struct ip *ip = mtod(m, struct ip *);
6380 struct ipovly *ipov = (struct ipovly *)ip;
6381
6382 if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_DID_CSUM)
6383 return (0);
6384
6385 /* ip_stripoptions() must have been called before we get here */
6386 ASSERT((ip->ip_hl << 2) == sizeof (*ip));
6387
6388 if ((hwcksum_rx || (ifp->if_flags & IFF_LOOPBACK) ||
6389 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) &&
6390 (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)) {
6391 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
6392 th->th_sum = m->m_pkthdr.csum_rx_val;
6393 } else {
6394 uint32_t sum = m->m_pkthdr.csum_rx_val;
6395 uint32_t start = m->m_pkthdr.csum_rx_start;
6396 int32_t trailer = (m_pktlen(m) - (off + tlen));
6397
6398 /*
6399 * Perform 1's complement adjustment of octets
6400 * that got included/excluded in the hardware-
6401 * calculated checksum value. Ignore cases
6402 * where the value already includes the entire
6403 * IP header span, as the sum for those octets
6404 * would already be 0 by the time we get here;
6405 * IP has already performed its header checksum
6406 * checks. If we do need to adjust, restore
6407 * the original fields in the IP header when
6408 * computing the adjustment value. Also take
6409 * care of any trailing bytes and subtract out
6410 * their partial sum.
6411 */
6412 ASSERT(trailer >= 0);
6413 if ((m->m_pkthdr.csum_flags & CSUM_PARTIAL) &&
6414 ((start != 0 && start != off) || trailer)) {
6415 uint32_t swbytes = (uint32_t)trailer;
6416
6417 if (start < off) {
6418 ip->ip_len += sizeof (*ip);
6419#if BYTE_ORDER != BIG_ENDIAN
6420 HTONS(ip->ip_len);
6421 HTONS(ip->ip_off);
6422#endif /* BYTE_ORDER != BIG_ENDIAN */
6423 }
6424 /* callee folds in sum */
6425 sum = m_adj_sum16(m, start, off,
6426 tlen, sum);
6427 if (off > start)
6428 swbytes += (off - start);
6429 else
6430 swbytes += (start - off);
6431
6432 if (start < off) {
6433#if BYTE_ORDER != BIG_ENDIAN
6434 NTOHS(ip->ip_off);
6435 NTOHS(ip->ip_len);
6436#endif /* BYTE_ORDER != BIG_ENDIAN */
6437 ip->ip_len -= sizeof (*ip);
6438 }
6439
6440 if (swbytes != 0)
6441 tcp_in_cksum_stats(swbytes);
6442 if (trailer != 0)
6443 m_adj(m, -trailer);
6444 }
6445
6446 /* callee folds in sum */
6447 th->th_sum = in_pseudo(ip->ip_src.s_addr,
6448 ip->ip_dst.s_addr,
6449 sum + htonl(tlen + IPPROTO_TCP));
6450 }
6451 th->th_sum ^= 0xffff;
6452 } else {
6453 uint16_t ip_sum;
6454 int len;
6455 char b[9];
6456
6457 bcopy(ipov->ih_x1, b, sizeof (ipov->ih_x1));
6458 bzero(ipov->ih_x1, sizeof (ipov->ih_x1));
6459 ip_sum = ipov->ih_len;
6460 ipov->ih_len = (u_short)tlen;
6461#if BYTE_ORDER != BIG_ENDIAN
6462 HTONS(ipov->ih_len);
6463#endif
6464 len = sizeof (struct ip) + tlen;
6465 th->th_sum = in_cksum(m, len);
6466 bcopy(b, ipov->ih_x1, sizeof (ipov->ih_x1));
6467 ipov->ih_len = ip_sum;
6468
6469 tcp_in_cksum_stats(len);
6470 }
6471 break;
6472 }
6473#if INET6
6474 case AF_INET6: {
6475 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
6476
6477 if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_DID_CSUM)
6478 return (0);
6479
6480 if ((hwcksum_rx || (ifp->if_flags & IFF_LOOPBACK) ||
6481 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) &&
6482 (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)) {
6483 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
6484 th->th_sum = m->m_pkthdr.csum_rx_val;
6485 } else {
6486 uint32_t sum = m->m_pkthdr.csum_rx_val;
6487 uint32_t start = m->m_pkthdr.csum_rx_start;
6488 int32_t trailer = (m_pktlen(m) - (off + tlen));
6489
6490 /*
6491 * Perform 1's complement adjustment of octets
6492 * that got included/excluded in the hardware-
6493 * calculated checksum value. Also take care
6494 * of any trailing bytes and subtract out their
6495 * partial sum.
6496 */
6497 ASSERT(trailer >= 0);
6498 if ((m->m_pkthdr.csum_flags & CSUM_PARTIAL) &&
6499 (start != off || trailer != 0)) {
6500 uint16_t s = 0, d = 0;
6501 uint32_t swbytes = (uint32_t)trailer;
6502
6503 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
6504 s = ip6->ip6_src.s6_addr16[1];
6505 ip6->ip6_src.s6_addr16[1] = 0 ;
6506 }
6507 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
6508 d = ip6->ip6_dst.s6_addr16[1];
6509 ip6->ip6_dst.s6_addr16[1] = 0;
6510 }
6511
6512 /* callee folds in sum */
6513 sum = m_adj_sum16(m, start, off,
6514 tlen, sum);
6515 if (off > start)
6516 swbytes += (off - start);
6517 else
6518 swbytes += (start - off);
6519
6520 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src))
6521 ip6->ip6_src.s6_addr16[1] = s;
6522 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst))
6523 ip6->ip6_dst.s6_addr16[1] = d;
6524
6525 if (swbytes != 0)
6526 tcp_in6_cksum_stats(swbytes);
6527 if (trailer != 0)
6528 m_adj(m, -trailer);
6529 }
6530
6531 th->th_sum = in6_pseudo(
6532 &ip6->ip6_src, &ip6->ip6_dst,
6533 sum + htonl(tlen + IPPROTO_TCP));
6534 }
6535 th->th_sum ^= 0xffff;
6536 } else {
6537 tcp_in6_cksum_stats(tlen);
6538 th->th_sum = in6_cksum(m, IPPROTO_TCP, off, tlen);
6539 }
6540 break;
6541 }
6542#endif /* INET6 */
6543 default:
6544 VERIFY(0);
6545 /* NOTREACHED */
6546 }
6547
6548 if (th->th_sum != 0) {
6549 tcpstat.tcps_rcvbadsum++;
6550 IF_TCP_STATINC(ifp, badformat);
6551 return (-1);
6552 }
6553
6554 return (0);
6555}
6556
6557SYSCTL_PROC(_net_inet_tcp, TCPCTL_STATS, stats,
6558 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, tcp_getstat,
6559 "S,tcpstat", "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
6560
6561static int
6562sysctl_rexmtthresh SYSCTL_HANDLER_ARGS
6563{
6564#pragma unused(arg1, arg2)
6565
6566 int error, val = tcprexmtthresh;
6567
6568 error = sysctl_handle_int(oidp, &val, 0, req);
6569 if (error || !req->newptr)
6570 return (error);
6571
6572 /*
6573 * Constrain the number of duplicate ACKs
6574 * to consider for TCP fast retransmit
6575 * to either 2 or 3
6576 */
6577
6578 if (val < 2 || val > 3)
6579 return (EINVAL);
6580
6581 tcprexmtthresh = val;
6582
6583 return (0);
6584}
6585
6586SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmt_thresh, CTLTYPE_INT | CTLFLAG_RW |
6587 CTLFLAG_LOCKED, &tcprexmtthresh, 0, &sysctl_rexmtthresh, "I",
6588 "Duplicate ACK Threshold for Fast Retransmit");