]> git.saurik.com Git - apple/xnu.git/blame - bsd/netinet/tcp_input.c
xnu-2422.110.17.tar.gz
[apple/xnu.git] / bsd / netinet / tcp_input.c
CommitLineData
1c79356b 1/*
8a3053a0 2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
9bccf70c 61 * $FreeBSD: src/sys/netinet/tcp_input.c,v 1.107.2.16 2001/08/22 00:59:12 silby Exp $
1c79356b 62 */
2d21ac55
A
63/*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
1c79356b
A
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/kernel.h>
73#include <sys/sysctl.h>
74#include <sys/malloc.h>
75#include <sys/mbuf.h>
76#include <sys/proc.h> /* for proc0 declaration */
77#include <sys/protosw.h>
78#include <sys/socket.h>
79#include <sys/socketvar.h>
80#include <sys/syslog.h>
316670eb 81#include <sys/mcache.h>
39236c6e 82#include <sys/kasl.h>
1c79356b
A
83#include <kern/cpu_number.h> /* before tcp_seq.h, for tcp_random18() */
84
b0d623f7
A
85#include <machine/endian.h>
86
1c79356b 87#include <net/if.h>
d12e1678 88#include <net/if_types.h>
1c79356b 89#include <net/route.h>
6d2010ae 90#include <net/ntstat.h>
39236c6e 91#include <net/dlil.h>
1c79356b
A
92
93#include <netinet/in.h>
94#include <netinet/in_systm.h>
95#include <netinet/ip.h>
9bccf70c 96#include <netinet/ip_icmp.h> /* for ICMP_BANDLIM */
1c79356b 97#include <netinet/in_var.h>
9bccf70c 98#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
1c79356b 99#include <netinet/in_pcb.h>
9bccf70c 100#include <netinet/ip_var.h>
6d2010ae 101#include <mach/sdt.h>
1c79356b
A
102#if INET6
103#include <netinet/ip6.h>
104#include <netinet/icmp6.h>
105#include <netinet6/nd6.h>
106#include <netinet6/ip6_var.h>
107#include <netinet6/in6_pcb.h>
108#endif
109#include <netinet/tcp.h>
110#include <netinet/tcp_fsm.h>
111#include <netinet/tcp_seq.h>
112#include <netinet/tcp_timer.h>
113#include <netinet/tcp_var.h>
6d2010ae 114#include <netinet/tcp_cc.h>
39236c6e 115#include <dev/random/randomdev.h>
6d2010ae 116#include <kern/zalloc.h>
9bccf70c
A
117#if INET6
118#include <netinet6/tcp6_var.h>
119#endif
1c79356b
A
120#include <netinet/tcpip.h>
121#if TCPDEBUG
122#include <netinet/tcp_debug.h>
9bccf70c 123u_char tcp_saveipgen[40]; /* the size must be of max ip header, now IPv6 */
1c79356b
A
124struct tcphdr tcp_savetcp;
125#endif /* TCPDEBUG */
126
127#if IPSEC
128#include <netinet6/ipsec.h>
9bccf70c
A
129#if INET6
130#include <netinet6/ipsec6.h>
131#endif
1c79356b
A
132#include <netkey/key.h>
133#endif /*IPSEC*/
134
2d21ac55
A
135#if CONFIG_MACF_NET || CONFIG_MACF_SOCKET
136#include <security/mac_framework.h>
137#endif /* CONFIG_MACF_NET || CONFIG_MACF_SOCKET */
138
1c79356b 139#include <sys/kdebug.h>
316670eb 140#include <netinet/lro_ext.h>
39236c6e
A
141#if MPTCP
142#include <netinet/mptcp_var.h>
143#include <netinet/mptcp.h>
144#include <netinet/mptcp_opt.h>
145#endif /* MPTCP */
1c79356b
A
146
147#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETTCP, 0)
148#define DBG_LAYER_END NETDBG_CODE(DBG_NETTCP, 2)
149#define DBG_FNC_TCP_INPUT NETDBG_CODE(DBG_NETTCP, (3 << 8))
150#define DBG_FNC_TCP_NEWCONN NETDBG_CODE(DBG_NETTCP, (7 << 8))
151
1c79356b
A
152tcp_cc tcp_ccgen;
153
9bccf70c
A
154#if IPSEC
155extern int ipsec_bypass;
156#endif
157
316670eb
A
158extern int32_t total_sbmb_cnt;
159
1c79356b 160struct tcpstat tcpstat;
1c79356b 161
9bccf70c 162static int log_in_vain = 0;
6d2010ae 163SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW | CTLFLAG_LOCKED,
9bccf70c
A
164 &log_in_vain, 0, "Log all incoming TCP connections");
165
166static int blackhole = 0;
6d2010ae 167SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW | CTLFLAG_LOCKED,
9bccf70c 168 &blackhole, 0, "Do not send RST when dropping refused connections");
1c79356b 169
743b1565 170int tcp_delack_enabled = 3;
6d2010ae 171SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW | CTLFLAG_LOCKED,
9bccf70c
A
172 &tcp_delack_enabled, 0,
173 "Delay ACK to try and piggyback it onto a data packet");
174
175int tcp_lq_overflow = 1;
6d2010ae 176SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_lq_overflow, CTLFLAG_RW | CTLFLAG_LOCKED,
9bccf70c
A
177 &tcp_lq_overflow, 0,
178 "Listen Queue Overflow");
179
6d2010ae
A
180int tcp_recv_bg = 0;
181SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbg, CTLFLAG_RW | CTLFLAG_LOCKED,
182 &tcp_recv_bg, 0,
183 "Receive background");
184
9bccf70c 185#if TCP_DROP_SYNFIN
55e303ae 186static int drop_synfin = 1;
6d2010ae 187SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW | CTLFLAG_LOCKED,
9bccf70c
A
188 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set");
189#endif
1c79356b 190
2d21ac55 191SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
e5568f75
A
192 "TCP Segment Reassembly Queue");
193
194__private_extern__ int tcp_reass_maxseg = 0;
6d2010ae 195SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RW | CTLFLAG_LOCKED,
e5568f75
A
196 &tcp_reass_maxseg, 0,
197 "Global maximum number of TCP Segments in Reassembly Queue");
198
199__private_extern__ int tcp_reass_qsize = 0;
6d2010ae 200SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD | CTLFLAG_LOCKED,
e5568f75
A
201 &tcp_reass_qsize, 0,
202 "Global number of TCP Segments currently in Reassembly Queue");
203
204static int tcp_reass_overflows = 0;
6d2010ae 205SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows, CTLFLAG_RD | CTLFLAG_LOCKED,
e5568f75
A
206 &tcp_reass_overflows, 0,
207 "Global number of TCP Segment Reassembly Queue Overflows");
208
209
d12e1678 210__private_extern__ int slowlink_wsize = 8192;
6d2010ae 211SYSCTL_INT(_net_inet_tcp, OID_AUTO, slowlink_wsize, CTLFLAG_RW | CTLFLAG_LOCKED,
d12e1678
A
212 &slowlink_wsize, 0, "Maximum advertised window size for slowlink");
213
6d2010ae
A
214int maxseg_unacked = 8;
215SYSCTL_INT(_net_inet_tcp, OID_AUTO, maxseg_unacked, CTLFLAG_RW | CTLFLAG_LOCKED,
2d21ac55
A
216 &maxseg_unacked, 0, "Maximum number of outstanding segments left unacked");
217
6d2010ae
A
218int tcp_do_rfc3465 = 1;
219SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_RW | CTLFLAG_LOCKED,
2d21ac55 220 &tcp_do_rfc3465, 0, "");
b0d623f7 221
6d2010ae
A
222int tcp_do_rfc3465_lim2 = 1;
223SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465_lim2, CTLFLAG_RW | CTLFLAG_LOCKED,
b0d623f7
A
224 &tcp_do_rfc3465_lim2, 0, "Appropriate bytes counting w/ L=2*SMSS");
225
6d2010ae
A
226int rtt_samples_per_slot = 20;
227SYSCTL_INT(_net_inet_tcp, OID_AUTO, rtt_samples_per_slot, CTLFLAG_RW | CTLFLAG_LOCKED,
228 &rtt_samples_per_slot, 0, "Number of RTT samples stored for rtt history");
229
230int tcp_allowed_iaj = ALLOWED_IAJ;
231SYSCTL_INT(_net_inet_tcp, OID_AUTO, recv_allowed_iaj, CTLFLAG_RW | CTLFLAG_LOCKED,
232 &tcp_allowed_iaj, 0, "Allowed inter-packet arrival jiter");
233
234int tcp_acc_iaj_high_thresh = ACC_IAJ_HIGH_THRESH;
235SYSCTL_INT(_net_inet_tcp, OID_AUTO, acc_iaj_high_thresh, CTLFLAG_RW | CTLFLAG_LOCKED,
236 &tcp_acc_iaj_high_thresh, 0, "Used in calculating maximum accumulated IAJ");
237
316670eb
A
238u_int32_t tcp_do_autorcvbuf = 1;
239SYSCTL_INT(_net_inet_tcp, OID_AUTO, doautorcvbuf, CTLFLAG_RW | CTLFLAG_LOCKED,
240 &tcp_do_autorcvbuf, 0, "Enable automatic socket buffer tuning");
241
242u_int32_t tcp_autorcvbuf_inc_shift = 3;
243SYSCTL_INT(_net_inet_tcp, OID_AUTO, autorcvbufincshift, CTLFLAG_RW | CTLFLAG_LOCKED,
244 &tcp_autorcvbuf_inc_shift, 0, "Shift for increment in receive socket buffer size");
245
246u_int32_t tcp_autorcvbuf_max = 512 * 1024;
247SYSCTL_INT(_net_inet_tcp, OID_AUTO, autorcvbufmax, CTLFLAG_RW | CTLFLAG_LOCKED,
248 &tcp_autorcvbuf_max, 0, "Maximum receive socket buffer size");
249
39236c6e 250int sw_lro = 0;
316670eb
A
251SYSCTL_INT(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_LOCKED,
252 &sw_lro, 0, "Used to coalesce TCP packets");
253
254int lrodebug = 0;
255SYSCTL_INT(_net_inet_tcp, OID_AUTO, lrodbg, CTLFLAG_RW | CTLFLAG_LOCKED,
256 &lrodebug, 0, "Used to debug SW LRO");
257
39236c6e 258int lro_start = 4;
316670eb
A
259SYSCTL_INT(_net_inet_tcp, OID_AUTO, lro_startcnt, CTLFLAG_RW | CTLFLAG_LOCKED,
260 &lro_start, 0, "Segments for starting LRO computed as power of 2");
261
262extern int tcp_do_autosendbuf;
263
39236c6e
A
264int limited_txmt = 1;
265SYSCTL_INT(_net_inet_tcp, OID_AUTO, limited_transmit, CTLFLAG_RW | CTLFLAG_LOCKED,
266 &limited_txmt, 0, "Enable limited transmit");
267
268int early_rexmt = 1;
269SYSCTL_INT(_net_inet_tcp, OID_AUTO, early_rexmt, CTLFLAG_RW | CTLFLAG_LOCKED,
270 &early_rexmt, 0, "Enable Early Retransmit");
271
272int sack_ackadv = 1;
273SYSCTL_INT(_net_inet_tcp, OID_AUTO, sack_ackadv, CTLFLAG_RW | CTLFLAG_LOCKED,
274 &sack_ackadv, 0, "Use SACK with cumulative ack advancement as a dupack");
275
b0d623f7
A
276#if CONFIG_IFEF_NOWINDOWSCALE
277int tcp_obey_ifef_nowindowscale = 0;
6d2010ae 278SYSCTL_INT(_net_inet_tcp, OID_AUTO, obey_ifef_nowindowscale, CTLFLAG_RW | CTLFLAG_LOCKED,
b0d623f7
A
279 &tcp_obey_ifef_nowindowscale, 0, "");
280#endif
39236c6e 281
316670eb
A
282/* This limit will determine when the receive socket buffer tuning will
283 * kick in. Currently it will start when the bw*delay measured in
284 * last RTT is more than half of the current hiwat on the buffer.
285 */
286uint32_t tcp_rbuf_hiwat_shift = 1;
287
288/* This limit will determine when the socket buffer will be increased
289 * to accommodate an application reading slowly. When the amount of
290 * space left in the buffer is less than one forth of the bw*delay
291 * measured in last RTT.
292 */
293uint32_t tcp_rbuf_win_shift = 2;
b0d623f7 294
2d21ac55 295extern int tcp_TCPTV_MIN;
6d2010ae
A
296extern int tcp_acc_iaj_high;
297extern int tcp_acc_iaj_react_limit;
298extern struct zone *tcp_reass_zone;
299
39236c6e 300int tcprexmtthresh = 3;
d12e1678 301
b0d623f7 302u_int32_t tcp_now;
6d2010ae
A
303struct timeval tcp_uptime; /* uptime when tcp_now was last updated */
304lck_spin_t *tcp_uptime_lock; /* Used to sychronize updates to tcp_now */
2d21ac55 305
1c79356b
A
306struct inpcbhead tcb;
307#define tcb6 tcb /* for KAME src sync over BSD*'s */
308struct inpcbinfo tcbinfo;
309
6d2010ae
A
310static void tcp_dooptions(struct tcpcb *, u_char *, int, struct tcphdr *,
311 struct tcpopt *, unsigned int);
91447636
A
312static void tcp_pulloutofband(struct socket *,
313 struct tcphdr *, struct mbuf *, int);
39236c6e
A
314static int tcp_reass(struct tcpcb *, struct tcphdr *, int *, struct mbuf *,
315 struct ifnet *);
316static void tcp_xmit_timer(struct tcpcb *, int, u_int32_t, tcp_seq);
2d21ac55 317static inline unsigned int tcp_maxmtu(struct rtentry *);
6d2010ae 318static inline int tcp_stretch_ack_enable(struct tcpcb *tp);
39236c6e 319static inline void tcp_adaptive_rwtimo_check(struct tcpcb *, int);
6d2010ae
A
320
321#if TRAFFIC_MGT
322static inline void update_iaj_state(struct tcpcb *tp, uint32_t tlen, int reset_size);
39236c6e
A
323void compute_iaj(struct tcpcb *tp, int nlropkts, int lro_delay_factor);
324static void compute_iaj_meat(struct tcpcb *tp, uint32_t cur_iaj);
6d2010ae
A
325#endif /* TRAFFIC_MGT */
326
2d21ac55
A
327#if INET6
328static inline unsigned int tcp_maxmtu6(struct rtentry *);
329#endif
1c79356b 330
316670eb
A
331static void tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sb,
332 struct tcpopt *to, u_int32_t tlen);
333
334void tcp_sbrcv_trim(struct tcpcb *tp, struct sockbuf *sb);
335static void tcp_sbsnd_trim(struct sockbuf *sbsnd);
336static inline void tcp_sbrcv_tstmp_check(struct tcpcb *tp);
337static inline void tcp_sbrcv_reserve(struct tcpcb *tp, struct sockbuf *sb,
338 u_int32_t newsize, u_int32_t idealsize);
39236c6e
A
339static void tcp_bad_rexmt_restore_state(struct tcpcb *tp, struct tcphdr *th);
340static int tcp_detect_bad_rexmt(struct tcpcb *tp, struct tcpopt *to);
341static void tcp_compute_rtt(struct tcpcb *tp, struct tcpopt *to,
342 struct tcphdr *th);
316670eb 343
39236c6e
A
344/*
345 * Constants used for resizing receive socket buffer
346 * when timestamps are not supported
347 */
316670eb
A
348#define TCPTV_RCVNOTS_QUANTUM 100
349#define TCP_RCVNOTS_BYTELEVEL 204800
39236c6e
A
350
351/*
352 * Constants used for limiting early retransmits
353 * to 10 per minute.
354 */
355#define TCP_EARLY_REXMT_WIN (60 * TCP_RETRANSHZ) /* 60 seconds */
356#define TCP_EARLY_REXMT_LIMIT 10
9bccf70c 357
6d2010ae 358extern void add_to_time_wait(struct tcpcb *, uint32_t delay);
2d21ac55
A
359extern void postevent(struct socket *, struct sockbuf *, int);
360
361extern void ipfwsyslog( int level, const char *format,...);
91447636
A
362extern int fw_verbose;
363
2d21ac55 364#if IPFIREWALL
39236c6e
A
365extern void ipfw_stealth_stats_incr_tcp(void);
366
91447636
A
367#define log_in_vain_log( a ) { \
368 if ( (log_in_vain == 3 ) && (fw_verbose == 2)) { /* Apple logging, log to ipfw.log */ \
369 ipfwsyslog a ; \
39236c6e
A
370 } else if ( (log_in_vain == 4 ) && (fw_verbose == 2)) { \
371 ipfw_stealth_stats_incr_tcp(); \
91447636
A
372 } \
373 else log a ; \
374}
2d21ac55
A
375#else
376#define log_in_vain_log( a ) { log a; }
377#endif
378
6d2010ae
A
379int tcp_rcvunackwin = TCPTV_UNACKWIN;
380int tcp_maxrcvidle = TCPTV_MAXRCVIDLE;
381int tcp_rcvsspktcnt = TCP_RCV_SS_PKTCOUNT;
316670eb
A
382SYSCTL_INT(_net_inet_tcp, OID_AUTO, rcvsspktcnt, CTLFLAG_RW | CTLFLAG_LOCKED,
383 &tcp_rcvsspktcnt, 0, "packets to be seen before receiver stretches acks");
91447636 384
39236c6e
A
385#define DELAY_ACK(tp, th) \
386 (CC_ALGO(tp)->delay_ack != NULL && CC_ALGO(tp)->delay_ack(tp, th))
91447636 387
2d21ac55 388static int tcp_dropdropablreq(struct socket *head);
8ad349bb 389static void tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th);
9bccf70c 390
6d2010ae
A
391static void update_base_rtt(struct tcpcb *tp, uint32_t rtt);
392uint32_t get_base_rtt(struct tcpcb *tp);
393void tcp_set_background_cc(struct socket *so);
394void tcp_set_foreground_cc(struct socket *so);
395static void tcp_set_new_cc(struct socket *so, uint16_t cc_index);
316670eb 396static void tcp_bwmeas_check(struct tcpcb *tp);
6d2010ae
A
397
398#if TRAFFIC_MGT
399void
400reset_acc_iaj(struct tcpcb *tp)
401{
402 tp->acc_iaj = 0;
403 tp->iaj_rwintop = 0;
316670eb 404 CLEAR_IAJ_STATE(tp);
6d2010ae
A
405}
406
407static inline void
408update_iaj_state(struct tcpcb *tp, uint32_t size, int rst_size)
409{
410 if (rst_size > 0)
411 tp->iaj_size = 0;
412 if (tp->iaj_size == 0 || size >= tp->iaj_size) {
413 tp->iaj_size = size;
414 tp->iaj_rcv_ts = tcp_now;
415 tp->iaj_small_pkt = 0;
416 }
417}
418
6d2010ae
A
419/* For every 32 bit unsigned integer(v), this function will find the
420 * largest integer n such that (n*n <= v). This takes at most 16 iterations
421 * irrespective of the value of v and does not involve multiplications.
422 */
423static inline int
424isqrt(unsigned int val) {
425 unsigned int sqrt_cache[11] = {0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100};
426 unsigned int temp, g=0, b=0x8000, bshft=15;
427 if ( val <= 100) {
428 for (g = 0; g <= 10; ++g) {
429 if (sqrt_cache[g] > val) {
430 g--;
431 break;
432 } else if (sqrt_cache[g] == val) {
433 break;
434 }
435 }
436 } else {
437 do {
438 temp = (((g << 1) + b) << (bshft--));
439 if (val >= temp) {
440 g += b;
441 val -= temp;
442 }
443 b >>= 1;
444 } while ( b > 0 && val > 0);
445 }
446 return(g);
447}
448
39236c6e
A
449/*
450* With LRO, roughly estimate the inter arrival time between
451* each sub coalesced packet as an average. Count the delay
452* cur_iaj to be the delay between the last packet received
453* and the first packet of the LRO stream. Due to round off errors
454* cur_iaj may be the same as lro_delay_factor. Averaging has
455* round off errors too. lro_delay_factor may be close to 0
456* in steady state leading to lower values fed to compute_iaj_meat.
457*/
6d2010ae 458void
39236c6e 459compute_iaj(struct tcpcb *tp, int nlropkts, int lro_delay_factor)
6d2010ae 460{
39236c6e
A
461 uint32_t cur_iaj = tcp_now - tp->iaj_rcv_ts;
462 uint32_t timediff = 0;
463
464 if (cur_iaj >= lro_delay_factor) {
465 cur_iaj = cur_iaj - lro_delay_factor;
466 }
467
468 compute_iaj_meat(tp, cur_iaj);
469
470 if (nlropkts <= 1)
471 return;
472
473 nlropkts--;
474
475 timediff = lro_delay_factor/nlropkts;
476
477 while (nlropkts > 0)
478 {
479 compute_iaj_meat(tp, timediff);
480 nlropkts--;
481 }
482}
483
484static
485void compute_iaj_meat(struct tcpcb *tp, uint32_t cur_iaj)
486{
487 /* When accumulated IAJ reaches MAX_ACC_IAJ in milliseconds,
488 * throttle the receive window to a minimum of MIN_IAJ_WIN packets
6d2010ae
A
489 */
490#define MAX_ACC_IAJ (tcp_acc_iaj_high_thresh + tcp_acc_iaj_react_limit)
39236c6e
A
491#define IAJ_DIV_SHIFT 4
492#define IAJ_ROUNDUP_CONST (1 << (IAJ_DIV_SHIFT - 1))
6d2010ae
A
493
494 uint32_t allowed_iaj, acc_iaj = 0;
6d2010ae
A
495
496 uint32_t mean, temp;
497 int32_t cur_iaj_dev;
39236c6e 498
6d2010ae
A
499 cur_iaj_dev = (cur_iaj - tp->avg_iaj);
500
39236c6e
A
501 /* Allow a jitter of "allowed_iaj" milliseconds. Some connections
502 * may have a constant jitter more than that. We detect this by
503 * using standard deviation.
6d2010ae
A
504 */
505 allowed_iaj = tp->avg_iaj + tp->std_dev_iaj;
506 if (allowed_iaj < tcp_allowed_iaj)
507 allowed_iaj = tcp_allowed_iaj;
508
39236c6e
A
509 /* Initially when the connection starts, the senders congestion
510 * window is small. During this period we avoid throttling a
511 * connection because we do not have a good starting point for
512 * allowed_iaj. IAJ_IGNORE_PKTCNT is used to quietly gloss over
513 * the first few packets.
6d2010ae
A
514 */
515 if (tp->iaj_pktcnt > IAJ_IGNORE_PKTCNT) {
516 if ( cur_iaj <= allowed_iaj ) {
517 if (tp->acc_iaj >= 2)
518 acc_iaj = tp->acc_iaj - 2;
519 else
520 acc_iaj = 0;
39236c6e 521
6d2010ae
A
522 } else {
523 acc_iaj = tp->acc_iaj + (cur_iaj - allowed_iaj);
524 }
525
526 if (acc_iaj > MAX_ACC_IAJ)
527 acc_iaj = MAX_ACC_IAJ;
528 tp->acc_iaj = acc_iaj;
529 }
530
531 /* Compute weighted average where the history has a weight of
532 * 15 out of 16 and the current value has a weight of 1 out of 16.
533 * This will make the short-term measurements have more weight.
39236c6e
A
534 *
535 * The addition of 8 will help to round-up the value
536 * instead of round-down
6d2010ae 537 */
39236c6e
A
538 tp->avg_iaj = (((tp->avg_iaj << IAJ_DIV_SHIFT) - tp->avg_iaj)
539 + cur_iaj + IAJ_ROUNDUP_CONST) >> IAJ_DIV_SHIFT;
6d2010ae
A
540
541 /* Compute Root-mean-square of deviation where mean is a weighted
39236c6e 542 * average as described above.
6d2010ae
A
543 */
544 temp = tp->std_dev_iaj * tp->std_dev_iaj;
39236c6e
A
545 mean = (((temp << IAJ_DIV_SHIFT) - temp)
546 + (cur_iaj_dev * cur_iaj_dev)
547 + IAJ_ROUNDUP_CONST) >> IAJ_DIV_SHIFT;
6d2010ae
A
548
549 tp->std_dev_iaj = isqrt(mean);
550
39236c6e
A
551 DTRACE_TCP3(iaj, struct tcpcb *, tp, uint32_t, cur_iaj,
552 uint32_t, allowed_iaj);
6d2010ae
A
553
554 return;
555}
556#endif /* TRAFFIC_MGT */
9bccf70c 557
316670eb
A
558/* Check if enough amount of data has been acknowledged since
559 * bw measurement was started
560 */
561static void
562tcp_bwmeas_check(struct tcpcb *tp)
563{
564 int32_t bw_meas_bytes;
565 uint32_t bw, bytes, elapsed_time;
566 bw_meas_bytes = tp->snd_una - tp->t_bwmeas->bw_start;
567 if ((tp->t_flagsext & TF_BWMEAS_INPROGRESS) != 0 &&
568 bw_meas_bytes >= (int32_t)(tp->t_bwmeas->bw_size)) {
569 bytes = bw_meas_bytes;
570 elapsed_time = tcp_now - tp->t_bwmeas->bw_ts;
571 if (elapsed_time > 0) {
572 bw = bytes / elapsed_time;
573 if ( bw > 0) {
574 if (tp->t_bwmeas->bw_sndbw > 0) {
575 tp->t_bwmeas->bw_sndbw =
576 (((tp->t_bwmeas->bw_sndbw << 3) - tp->t_bwmeas->bw_sndbw) + bw) >> 3;
577 } else {
578 tp->t_bwmeas->bw_sndbw = bw;
579 }
580 }
581 }
582 tp->t_flagsext &= ~(TF_BWMEAS_INPROGRESS);
583 }
584}
585
9bccf70c 586static int
39236c6e
A
587tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m,
588 struct ifnet *ifp)
1c79356b 589{
9bccf70c
A
590 struct tseg_qent *q;
591 struct tseg_qent *p = NULL;
592 struct tseg_qent *nq;
8ad349bb 593 struct tseg_qent *te = NULL;
39236c6e
A
594 struct inpcb *inp = tp->t_inpcb;
595 struct socket *so = inp->inp_socket;
596 int flags = 0;
91447636 597 int dowakeup = 0;
39236c6e
A
598 struct mbuf *oodata = NULL;
599 int copy_oodata = 0;
600 boolean_t cell = IFNET_IS_CELLULAR(ifp);
601 boolean_t wifi = (!cell && IFNET_IS_WIFI(ifp));
1c79356b
A
602
603 /*
604 * Call with th==0 after become established to
605 * force pre-ESTABLISHED data up to user socket.
606 */
8ad349bb 607 if (th == NULL)
1c79356b 608 goto present;
6d2010ae
A
609
610 /* If the reassembly queue already has entries or if we are going to add
611 * a new one, then the connection has reached a loss state.
612 * Reset the stretch-ack algorithm at this point.
613 */
614 if ((tp->t_flags & TF_STRETCHACK) != 0)
615 tcp_reset_stretch_ack(tp);
616
617 /* When the connection reaches a loss state, we need to send more acks
618 * for a period of time so that the sender's congestion window will
619 * open. Wait until we see some packets on the connection before
620 * stretching acks again.
621 */
622 tp->t_flagsext |= TF_RCVUNACK_WAITSS;
623 tp->rcv_waitforss = 0;
624
625
626#if TRAFFIC_MGT
627 if (tp->acc_iaj > 0)
628 reset_acc_iaj(tp);
629#endif /* TRAFFIC_MGT */
1c79356b 630
e5568f75
A
631 /*
632 * Limit the number of segments in the reassembly queue to prevent
633 * holding on to too many segments (and thus running out of mbufs).
634 * Make sure to let the missing segment through which caused this
635 * queue. Always keep one global queue entry spare to be able to
636 * process the missing segment.
637 */
638 if (th->th_seq != tp->rcv_nxt &&
639 tcp_reass_qsize + 1 >= tcp_reass_maxseg) {
640 tcp_reass_overflows++;
641 tcpstat.tcps_rcvmemdrop++;
642 m_freem(m);
2d21ac55 643 *tlenp = 0;
e5568f75
A
644 return (0);
645 }
646
9bccf70c 647 /* Allocate a new queue entry. If we can't, just drop the pkt. XXX */
6d2010ae 648 te = (struct tseg_qent *) zalloc_noblock(tcp_reass_zone);
9bccf70c 649 if (te == NULL) {
1c79356b
A
650 tcpstat.tcps_rcvmemdrop++;
651 m_freem(m);
652 return (0);
653 }
e5568f75 654 tcp_reass_qsize++;
1c79356b
A
655
656 /*
657 * Find a segment which begins after this one does.
658 */
9bccf70c
A
659 LIST_FOREACH(q, &tp->t_segq, tqe_q) {
660 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
1c79356b 661 break;
9bccf70c
A
662 p = q;
663 }
1c79356b
A
664
665 /*
666 * If there is a preceding segment, it may provide some of
667 * our data already. If so, drop the data from the incoming
668 * segment. If it provides all of our data, drop us.
669 */
670 if (p != NULL) {
1c79356b 671 register int i;
1c79356b 672 /* conversion to int (in i) handles seq wraparound */
9bccf70c 673 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
1c79356b 674 if (i > 0) {
9bccf70c 675 if (i >= *tlenp) {
1c79356b 676 tcpstat.tcps_rcvduppack++;
9bccf70c 677 tcpstat.tcps_rcvdupbyte += *tlenp;
6d2010ae 678 if (nstat_collect) {
39236c6e
A
679 nstat_route_rx(inp->inp_route.ro_rt, 1, *tlenp, NSTAT_RX_FLAG_DUPLICATE);
680 INP_ADD_STAT(inp, cell, wifi, rxpackets, 1);
681 INP_ADD_STAT(inp, cell, wifi, rxbytes, *tlenp);
6d2010ae
A
682 tp->t_stat.rxduplicatebytes += *tlenp;
683 }
1c79356b 684 m_freem(m);
6d2010ae 685 zfree(tcp_reass_zone, te);
39236c6e 686 te = NULL;
e5568f75 687 tcp_reass_qsize--;
1c79356b
A
688 /*
689 * Try to present any queued data
690 * at the left window edge to the user.
691 * This is needed after the 3-WHS
692 * completes.
693 */
39236c6e 694 goto present;
1c79356b
A
695 }
696 m_adj(m, i);
9bccf70c 697 *tlenp -= i;
1c79356b
A
698 th->th_seq += i;
699 }
700 }
701 tcpstat.tcps_rcvoopack++;
9bccf70c 702 tcpstat.tcps_rcvoobyte += *tlenp;
6d2010ae 703 if (nstat_collect) {
39236c6e
A
704 nstat_route_rx(inp->inp_route.ro_rt, 1, *tlenp, NSTAT_RX_FLAG_OUT_OF_ORDER);
705 INP_ADD_STAT(inp, cell, wifi, rxpackets, 1);
706 INP_ADD_STAT(inp, cell, wifi, rxbytes, *tlenp);
6d2010ae
A
707 tp->t_stat.rxoutoforderbytes += *tlenp;
708 }
1c79356b
A
709
710 /*
711 * While we overlap succeeding segments trim them or,
712 * if they are completely covered, dequeue them.
713 */
714 while (q) {
9bccf70c 715 register int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
1c79356b
A
716 if (i <= 0)
717 break;
9bccf70c
A
718 if (i < q->tqe_len) {
719 q->tqe_th->th_seq += i;
720 q->tqe_len -= i;
721 m_adj(q->tqe_m, i);
1c79356b
A
722 break;
723 }
9bccf70c
A
724
725 nq = LIST_NEXT(q, tqe_q);
726 LIST_REMOVE(q, tqe_q);
727 m_freem(q->tqe_m);
6d2010ae 728 zfree(tcp_reass_zone, q);
e5568f75 729 tcp_reass_qsize--;
1c79356b
A
730 q = nq;
731 }
732
9bccf70c
A
733 /* Insert the new segment queue entry into place. */
734 te->tqe_m = m;
735 te->tqe_th = th;
736 te->tqe_len = *tlenp;
1c79356b 737
1c79356b 738 if (p == NULL) {
9bccf70c 739 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
1c79356b 740 } else {
9bccf70c 741 LIST_INSERT_AFTER(p, te, tqe_q);
1c79356b
A
742 }
743
39236c6e
A
744 /*
745 * New out-of-order data exists, and is pointed to by
746 * queue entry te. Set copy_oodata to 1 so out-of-order data
747 * can be copied off to sockbuf after in-order data
748 * is copied off.
749 */
750 if (!(so->so_state & SS_CANTRCVMORE))
751 copy_oodata = 1;
752
1c79356b
A
753present:
754 /*
755 * Present data to user, advancing rcv_nxt through
756 * completed sequence space.
757 */
758 if (!TCPS_HAVEESTABLISHED(tp->t_state))
759 return (0);
9bccf70c 760 q = LIST_FIRST(&tp->t_segq);
316670eb
A
761 if (!q || q->tqe_th->th_seq != tp->rcv_nxt) {
762 /* Stop using LRO once out of order packets arrive */
763 if (tp->t_flagsext & TF_LRO_OFFLOADED) {
39236c6e 764 tcp_lro_remove_state(inp->inp_laddr, inp->inp_faddr,
316670eb
A
765 th->th_dport, th->th_sport);
766 tp->t_flagsext &= ~TF_LRO_OFFLOADED;
767 }
39236c6e
A
768
769 /*
770 * continue processing if out-of-order data
771 * can be delivered
772 */
773 if (q && (so->so_flags & SOF_ENABLE_MSGS))
774 goto msg_unordered_delivery;
775
1c79356b 776 return (0);
316670eb 777 }
1c79356b 778 do {
9bccf70c
A
779 tp->rcv_nxt += q->tqe_len;
780 flags = q->tqe_th->th_flags & TH_FIN;
781 nq = LIST_NEXT(q, tqe_q);
782 LIST_REMOVE(q, tqe_q);
39236c6e 783 if (so->so_state & SS_CANTRCVMORE) {
9bccf70c 784 m_freem(q->tqe_m);
39236c6e 785 } else {
6d2010ae 786 so_recv_data_stat(so, q->tqe_m, 0); /* XXXX */
39236c6e
A
787 if (so->so_flags & SOF_ENABLE_MSGS) {
788 /*
789 * Append the inorder data as a message to the
790 * receive socket buffer. Also check to see if
791 * the data we are about to deliver is the same
792 * data that we wanted to pass up to the user
793 * out of order. If so, reset copy_oodata --
794 * the received data filled a gap, and
795 * is now in order!
796 */
797 if (q == te)
798 copy_oodata = 0;
799 }
800 if (sbappendstream_rcvdemux(so, q->tqe_m,
801 q->tqe_th->th_seq - (tp->irs + 1), 0))
91447636 802 dowakeup = 1;
316670eb
A
803 if (tp->t_flagsext & TF_LRO_OFFLOADED) {
804 tcp_update_lro_seq(tp->rcv_nxt,
39236c6e
A
805 inp->inp_laddr, inp->inp_faddr,
806 th->th_dport, th->th_sport);
316670eb 807 }
91447636 808 }
6d2010ae 809 zfree(tcp_reass_zone, q);
e5568f75 810 tcp_reass_qsize--;
1c79356b 811 q = nq;
9bccf70c 812 } while (q && q->tqe_th->th_seq == tp->rcv_nxt);
9bccf70c 813
1c79356b 814#if INET6
39236c6e 815 if ((inp->inp_vflag & INP_IPV6) != 0) {
9bccf70c
A
816
817 KERNEL_DEBUG(DBG_LAYER_BEG,
39236c6e
A
818 ((inp->inp_fport << 16) | inp->inp_lport),
819 (((inp->in6p_laddr.s6_addr16[0] & 0xffff) << 16) |
820 (inp->in6p_faddr.s6_addr16[0] & 0xffff)),
9bccf70c
A
821 0,0,0);
822 }
823 else
1c79356b 824#endif
9bccf70c
A
825 {
826 KERNEL_DEBUG(DBG_LAYER_BEG,
39236c6e
A
827 ((inp->inp_fport << 16) | inp->inp_lport),
828 (((inp->inp_laddr.s_addr & 0xffff) << 16) |
829 (inp->inp_faddr.s_addr & 0xffff)),
9bccf70c 830 0,0,0);
91447636 831 }
39236c6e
A
832
833msg_unordered_delivery:
834 /* Deliver out-of-order data as a message */
835 if (te && (so->so_flags & SOF_ENABLE_MSGS) && copy_oodata && te->tqe_len) {
836 /*
837 * make a copy of the mbuf to be delivered up to
838 * the user, and add it to the sockbuf
839 */
840 oodata = m_copym(te->tqe_m, 0, M_COPYALL, M_DONTWAIT);
841 if (oodata != NULL) {
842 if (sbappendmsgstream_rcv(&so->so_rcv, oodata,
843 te->tqe_th->th_seq - (tp->irs + 1), 1)) {
844 dowakeup = 1;
845 tcpstat.tcps_msg_unopkts++;
846 } else {
847 tcpstat.tcps_msg_unoappendfail++;
848 }
849 }
850 }
851
91447636
A
852 if (dowakeup)
853 sorwakeup(so); /* done with socket lock held */
1c79356b
A
854 return (flags);
855}
856
2d21ac55
A
857/*
858 * Reduce congestion window.
859 */
860static void
861tcp_reduce_congestion_window(
316670eb 862 struct tcpcb *tp)
2d21ac55 863{
6d2010ae
A
864 /*
865 * If the current tcp cc module has
866 * defined a hook for tasks to run
867 * before entering FR, call it
868 */
869 if (CC_ALGO(tp)->pre_fr != NULL)
316670eb 870 CC_ALGO(tp)->pre_fr(tp);
2d21ac55
A
871 ENTER_FASTRECOVERY(tp);
872 tp->snd_recover = tp->snd_max;
873 tp->t_timer[TCPT_REXMT] = 0;
874 tp->t_rtttime = 0;
875 tp->ecn_flags |= TE_SENDCWR;
876 tp->snd_cwnd = tp->snd_ssthresh +
877 tp->t_maxseg * tcprexmtthresh;
878}
879
39236c6e
A
880/*
881 * The application wants to get an event if there
882 * is a stall during read. Set the initial keepalive
883 * timeout to be equal to twice RTO.
884 */
885static inline void
886tcp_adaptive_rwtimo_check(struct tcpcb *tp, int tlen)
887{
888 if (tp->t_adaptive_rtimo > 0 && tlen > 0 &&
889 tp->t_state == TCPS_ESTABLISHED) {
890 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
891 (TCP_REXMTVAL(tp) << 1));
892 tp->t_flagsext |= TF_DETECT_READSTALL;
893 tp->t_rtimo_probes = 0;
894 }
895}
896
897inline void
898tcp_keepalive_reset(struct tcpcb *tp)
899{
900 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
901 TCP_CONN_KEEPIDLE(tp));
902 tp->t_flagsext &= ~(TF_DETECT_READSTALL);
903 tp->t_rtimo_probes = 0;
904}
9bccf70c 905
1c79356b
A
906/*
907 * TCP input routine, follows pages 65-76 of the
908 * protocol specification dated September, 1981 very closely.
909 */
910#if INET6
911int
6d2010ae 912tcp6_input(struct mbuf **mp, int *offp, int proto)
1c79356b 913{
6d2010ae 914#pragma unused(proto)
9bccf70c 915 register struct mbuf *m = *mp;
39236c6e
A
916 uint32_t ia6_flags;
917 struct ifnet *ifp = m->m_pkthdr.rcvif;
9bccf70c 918
91447636 919 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), return IPPROTO_DONE);
9bccf70c 920
316670eb
A
921 /* Expect 32-bit aligned data pointer on strict-align platforms */
922 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
923
9bccf70c
A
924 /*
925 * draft-itojun-ipv6-tcp-to-anycast
926 * better place to put this in?
927 */
39236c6e
A
928 if (ip6_getdstifaddr_info(m, NULL, &ia6_flags) == 0) {
929 if (ia6_flags & IN6_IFF_ANYCAST) {
6d2010ae 930 struct ip6_hdr *ip6;
9bccf70c 931
6d2010ae
A
932 ip6 = mtod(m, struct ip6_hdr *);
933 icmp6_error(m, ICMP6_DST_UNREACH,
934 ICMP6_DST_UNREACH_ADDR,
9bccf70c 935 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
39236c6e
A
936
937 IF_TCP_STATINC(ifp, icmp6unreach);
938
6d2010ae
A
939 return (IPPROTO_DONE);
940 }
9bccf70c
A
941 }
942
943 tcp_input(m, *offp);
6d2010ae 944 return (IPPROTO_DONE);
1c79356b
A
945}
946#endif
947
316670eb
A
948/* Depending on the usage of mbuf space in the system, this function
949 * will return true or false. This is used to determine if a socket
950 * buffer can take more memory from the system for auto-tuning or not.
951 */
952u_int8_t
953tcp_cansbgrow(struct sockbuf *sb)
954{
955 /* Calculate the host level space limit in terms of MSIZE buffers.
956 * We can use a maximum of half of the available mbuf space for
957 * socket buffers.
958 */
959 u_int32_t mblim = ((nmbclusters >> 1) << (MCLSHIFT - MSIZESHIFT));
960
961 /* Calculate per sb limit in terms of bytes. We optimize this limit
962 * for upto 16 socket buffers.
963 */
964
965 u_int32_t sbspacelim = ((nmbclusters >> 4) << MCLSHIFT);
966
967 if ((total_sbmb_cnt < mblim) &&
968 (sb->sb_hiwat < sbspacelim)) {
969 return(1);
970 }
971 return(0);
972}
973
39236c6e
A
974static void
975tcp_sbrcv_reserve(struct tcpcb *tp, struct sockbuf *sbrcv,
976 u_int32_t newsize, u_int32_t idealsize)
977{
316670eb
A
978
979 /* newsize should not exceed max */
980 newsize = min(newsize, tcp_autorcvbuf_max);
981
982 /* The receive window scale negotiated at the
983 * beginning of the connection will also set a
984 * limit on the socket buffer size
985 */
986 newsize = min(newsize, TCP_MAXWIN << tp->rcv_scale);
987
988 /* Set new socket buffer size */
989 if (newsize > sbrcv->sb_hiwat &&
990 (sbreserve(sbrcv, newsize) == 1)) {
991 sbrcv->sb_idealsize = min(max(sbrcv->sb_idealsize,
992 (idealsize != 0) ? idealsize : newsize),
993 tcp_autorcvbuf_max);
994
995 /* Again check the limit set by the advertised
996 * window scale
997 */
998 sbrcv->sb_idealsize = min(sbrcv->sb_idealsize,
999 TCP_MAXWIN << tp->rcv_scale);
1000 }
1001}
1002
1003/*
1004 * This function is used to grow a receive socket buffer. It
1005 * will take into account system-level memory usage and the
1006 * bandwidth available on the link to make a decision.
1007 */
1008static void
1009tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sbrcv,
1010 struct tcpopt *to, u_int32_t pktlen) {
1011
39236c6e
A
1012 /*
1013 * Do not grow the receive socket buffer if
1014 * - auto resizing is disabled, globally or on this socket
1015 * - the high water mark has already reached the maximum
1016 * - the stream is in background and receive side is being
1017 * throttled
1018 * - if there are segments in reassembly queue indicating loss,
1019 * do not need to increase recv window during recovery as more
1020 * data is not going to be sent.
1021 */
316670eb
A
1022 if (tcp_do_autorcvbuf == 0 ||
1023 (sbrcv->sb_flags & SB_AUTOSIZE) == 0 ||
1024 tcp_cansbgrow(sbrcv) == 0 ||
39236c6e
A
1025 sbrcv->sb_hiwat >= tcp_autorcvbuf_max ||
1026 (tp->t_flagsext & TF_RECV_THROTTLE) ||
1027 !LIST_EMPTY(&tp->t_segq)) {
316670eb
A
1028 /* Can not resize the socket buffer, just return */
1029 goto out;
1030 }
1031
1032 if (TSTMP_GT(tcp_now,
1033 tp->rfbuf_ts + TCPTV_RCVBUFIDLE)) {
1034 /* If there has been an idle period in the
1035 * connection, just restart the measurement
1036 */
1037 goto out;
1038 }
1039
39236c6e 1040 if (!TSTMP_SUPPORTED(tp)) {
316670eb
A
1041 /*
1042 * Timestamp option is not supported on this connection.
1043 * If the connection reached a state to indicate that
1044 * the receive socket buffer needs to grow, increase
1045 * the high water mark.
1046 */
1047 if (TSTMP_GEQ(tcp_now,
1048 tp->rfbuf_ts + TCPTV_RCVNOTS_QUANTUM)) {
1049 if (tp->rfbuf_cnt >= TCP_RCVNOTS_BYTELEVEL) {
1050 tcp_sbrcv_reserve(tp, sbrcv,
1051 tcp_autorcvbuf_max, 0);
1052 }
1053 goto out;
1054 } else {
1055 tp->rfbuf_cnt += pktlen;
1056 return;
1057 }
1058 } else if (to->to_tsecr != 0) {
1059 /* If the timestamp shows that one RTT has
1060 * completed, we can stop counting the
1061 * bytes. Here we consider increasing
1062 * the socket buffer if it fits the following
1063 * criteria:
1064 * 1. the bandwidth measured in last rtt, is more
1065 * than half of sb_hiwat, this will help to scale the
1066 * buffer according to the bandwidth on the link.
1067 * 2. the space left in sbrcv is less than
1068 * one forth of the bandwidth measured in last rtt, this
1069 * will help to accommodate an application reading slowly.
1070 */
1071 if (TSTMP_GEQ(to->to_tsecr, tp->rfbuf_ts)) {
1072 if ((tp->rfbuf_cnt > (sbrcv->sb_hiwat -
1073 (sbrcv->sb_hiwat >> tcp_rbuf_hiwat_shift)) ||
1074 (sbrcv->sb_hiwat - sbrcv->sb_cc) <
1075 (tp->rfbuf_cnt >> tcp_rbuf_win_shift))) {
1076 u_int32_t rcvbuf_inc;
1077 /*
1078 * Increment the receive window by a multiple of
1079 * maximum sized segments. This will prevent a
1080 * connection from sending smaller segments on
1081 * wire if it is limited by the receive window.
1082 *
1083 * Set the ideal size based on current bandwidth
1084 * measurements. We set the ideal size on receive
1085 * socket buffer to be twice the bandwidth delay
1086 * product.
1087 */
1088 rcvbuf_inc = tp->t_maxseg << tcp_autorcvbuf_inc_shift;
1089 tcp_sbrcv_reserve(tp, sbrcv,
1090 sbrcv->sb_hiwat + rcvbuf_inc,
1091 (tp->rfbuf_cnt * 2));
1092 }
1093 goto out;
1094 } else {
1095 tp->rfbuf_cnt += pktlen;
1096 return;
1097 }
1098 }
1099out:
1100 /* Restart the measurement */
1101 tp->rfbuf_ts = 0;
1102 tp->rfbuf_cnt = 0;
1103 return;
1104}
1105
1106/* This function will trim the excess space added to the socket buffer
1107 * to help a slow-reading app. The ideal-size of a socket buffer depends
1108 * on the link bandwidth or it is set by an application and we aim to
1109 * reach that size.
1110 */
1111void
1112tcp_sbrcv_trim(struct tcpcb *tp, struct sockbuf *sbrcv) {
1113 if (tcp_do_autorcvbuf == 1 && sbrcv->sb_idealsize > 0 &&
1114 sbrcv->sb_hiwat > sbrcv->sb_idealsize) {
1115 int32_t trim;
1116 /* compute the difference between ideal and current sizes */
1117 u_int32_t diff = sbrcv->sb_hiwat - sbrcv->sb_idealsize;
1118
1119 /* Compute the maximum advertised window for
1120 * this connection.
1121 */
1122 u_int32_t advwin = tp->rcv_adv - tp->rcv_nxt;
1123
1124 /* How much can we trim the receive socket buffer?
1125 * 1. it can not be trimmed beyond the max rcv win advertised
1126 * 2. if possible, leave 1/16 of bandwidth*delay to
1127 * avoid closing the win completely
1128 */
1129 u_int32_t leave = max(advwin, (sbrcv->sb_idealsize >> 4));
1130
1131 /* Sometimes leave can be zero, in that case leave at least
1132 * a few segments worth of space.
1133 */
1134 if (leave == 0)
1135 leave = tp->t_maxseg << tcp_autorcvbuf_inc_shift;
1136
1137 trim = sbrcv->sb_hiwat - (sbrcv->sb_cc + leave);
1138 trim = imin(trim, (int32_t)diff);
1139
1140 if (trim > 0)
1141 sbreserve(sbrcv, (sbrcv->sb_hiwat - trim));
1142 }
1143}
1144
1145/* We may need to trim the send socket buffer size for two reasons:
1146 * 1. if the rtt seen on the connection is climbing up, we do not
1147 * want to fill the buffers any more.
1148 * 2. if the congestion win on the socket backed off, there is no need
1149 * to hold more mbufs for that connection than what the cwnd will allow.
1150 */
1151void
1152tcp_sbsnd_trim(struct sockbuf *sbsnd) {
1153 if (tcp_do_autosendbuf == 1 &&
1154 ((sbsnd->sb_flags & (SB_AUTOSIZE | SB_TRIM)) ==
1155 (SB_AUTOSIZE | SB_TRIM)) &&
1156 (sbsnd->sb_idealsize > 0) &&
1157 (sbsnd->sb_hiwat > sbsnd->sb_idealsize)) {
1158 u_int32_t trim = 0;
1159 if (sbsnd->sb_cc <= sbsnd->sb_idealsize) {
1160 trim = sbsnd->sb_hiwat - sbsnd->sb_idealsize;
1161 } else {
1162 trim = sbsnd->sb_hiwat - sbsnd->sb_cc;
1163 }
1164 sbreserve(sbsnd, (sbsnd->sb_hiwat - trim));
1165 }
1166 if (sbsnd->sb_hiwat <= sbsnd->sb_idealsize)
1167 sbsnd->sb_flags &= ~(SB_TRIM);
1168}
1169
1170/*
1171 * If timestamp option was not negotiated on this connection
1172 * and this connection is on the receiving side of a stream
1173 * then we can not measure the delay on the link accurately.
1174 * Instead of enabling automatic receive socket buffer
1175 * resizing, just give more space to the receive socket buffer.
1176 */
1177static inline void
1178tcp_sbrcv_tstmp_check(struct tcpcb *tp) {
1179 struct socket *so = tp->t_inpcb->inp_socket;
1180 u_int32_t newsize = 2 * tcp_recvspace;
1181 struct sockbuf *sbrcv = &so->so_rcv;
1182
1183 if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP)) !=
1184 (TF_REQ_TSTMP | TF_RCVD_TSTMP) &&
1185 (sbrcv->sb_flags & SB_AUTOSIZE) != 0) {
1186 tcp_sbrcv_reserve(tp, sbrcv, newsize, 0);
1187 }
1188}
1189
6d2010ae
A
1190/* A receiver will evaluate the flow of packets on a connection
1191 * to see if it can reduce ack traffic. The receiver will start
1192 * stretching acks if all of the following conditions are met:
1193 * 1. tcp_delack_enabled is set to 3
1194 * 2. If the bytes received in the last 100ms is greater than a threshold
1195 * defined by maxseg_unacked
1196 * 3. If the connection has not been idle for tcp_maxrcvidle period.
1197 * 4. If the connection has seen enough packets to let the slow-start
1198 * finish after connection establishment or after some packet loss.
1199 *
1200 * The receiver will stop stretching acks if there is congestion/reordering
1201 * as indicated by packets on reassembly queue or an ECN. If the delayed-ack
1202 * timer fires while stretching acks, it means that the packet flow has gone
1203 * below the threshold defined by maxseg_unacked and the receiver will stop
1204 * stretching acks. The receiver gets no indication when slow-start is completed
1205 * or when the connection reaches an idle state. That is why we use
1206 * tcp_rcvsspktcnt to cover slow-start and tcp_maxrcvidle to identify idle
1207 * state.
1208 */
39236c6e
A
1209static inline int
1210tcp_stretch_ack_enable(struct tcpcb *tp)
1211{
1212 if (!(tp->t_flagsext & TF_NOSTRETCHACK) &&
1213 tp->rcv_by_unackwin >= (maxseg_unacked * tp->t_maxseg) &&
6d2010ae 1214 TSTMP_GT(tp->rcv_unackwin + tcp_maxrcvidle, tcp_now) &&
39236c6e 1215 (!(tp->t_flagsext & TF_RCVUNACK_WAITSS) ||
6d2010ae
A
1216 (tp->rcv_waitforss >= tcp_rcvsspktcnt))) {
1217 return(1);
1218 }
316670eb 1219
6d2010ae
A
1220 return(0);
1221}
1222
1223/* Reset the state related to stretch-ack algorithm. This will make
1224 * the receiver generate an ack every other packet. The receiver
1225 * will start re-evaluating the rate at which packets come to decide
1226 * if it can benefit by lowering the ack traffic.
1227 */
1228void
1229tcp_reset_stretch_ack(struct tcpcb *tp)
1230{
1231 tp->t_flags &= ~(TF_STRETCHACK);
1232 tp->rcv_by_unackwin = 0;
1233 tp->rcv_unackwin = tcp_now + tcp_rcvunackwin;
1234}
1235
39236c6e
A
1236/*
1237 * The last packet was a retransmission, check if this ack
1238 * indicates that the retransmission was spurious.
1239 *
1240 * If the connection supports timestamps, we could use it to
1241 * detect if the last retransmit was not needed. Otherwise,
1242 * we check if the ACK arrived within RTT/2 window, then it
1243 * was a mistake to do the retransmit in the first place.
1244 *
1245 * This function will return 1 if it is a spurious retransmit,
1246 * 0 otherwise.
1247 */
1248static int
1249tcp_detect_bad_rexmt(struct tcpcb *tp, struct tcpopt *to)
1250{
1251 int32_t tdiff, bad_rexmt_win;
1252 tdiff = (int32_t)(tcp_now - tp->t_rxtstart);
1253 bad_rexmt_win = (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
1254
1255 if (TSTMP_SUPPORTED(tp) && tp->t_rxtstart > 0 &&
1256 (to->to_flags & TOF_TS) != 0 &&
1257 to->to_tsecr != 0 &&
1258 TSTMP_LT(to->to_tsecr, tp->t_rxtstart)) {
1259 return (1);
1260 } else if (tp->t_rxtshift == 1 &&
1261 tdiff < bad_rexmt_win) {
1262 return(1);
1263 }
1264 return(0);
1265}
1266
1267
1268/*
1269 * Restore congestion window state if a spurious timeout
1270 * was detected.
1271 */
1272static void
1273tcp_bad_rexmt_restore_state(struct tcpcb *tp, struct tcphdr *th)
1274{
1275 if (TSTMP_SUPPORTED(tp)) {
1276 u_int32_t fsize, acked;
1277 fsize = tp->snd_max - th->th_ack;
1278 acked = BYTES_ACKED(th, tp);
1279
1280 /*
1281 * Implement bad retransmit recovery as
1282 * described in RFC 4015.
1283 */
1284 tp->snd_ssthresh = tp->snd_ssthresh_prev;
1285
1286 /* Initialize cwnd to the initial window */
1287 if (CC_ALGO(tp)->cwnd_init != NULL)
1288 CC_ALGO(tp)->cwnd_init(tp);
1289
1290 tp->snd_cwnd = fsize + min(acked, tp->snd_cwnd);
1291
1292 } else {
1293 tp->snd_cwnd = tp->snd_cwnd_prev;
1294 tp->snd_ssthresh = tp->snd_ssthresh_prev;
1295 if (tp->t_flags & TF_WASFRECOVERY)
1296 ENTER_FASTRECOVERY(tp);
1297 }
1298 tp->snd_recover = tp->snd_recover_prev;
1299 tp->snd_nxt = tp->snd_max;
1300 tp->t_rxtshift = 0;
1301 tp->t_rxtstart = 0;
1302
1303 /* Fix send socket buffer to reflect the change in cwnd */
1304 tcp_bad_rexmt_fix_sndbuf(tp);
1305
1306 /*
1307 * This RTT might reflect the extra delay induced
1308 * by the network. Skip using this sample for RTO
1309 * calculation and mark the connection so we can
1310 * recompute RTT when the next eligible sample is
1311 * found.
1312 */
1313 tp->t_flagsext |= TF_RECOMPUTE_RTT;
1314 tp->t_badrexmt_time = tcp_now;
1315 tp->t_rtttime = 0;
1316}
1317
1c79356b 1318void
9bccf70c 1319tcp_input(m, off0)
1c79356b 1320 struct mbuf *m;
9bccf70c 1321 int off0;
1c79356b
A
1322{
1323 register struct tcphdr *th;
1324 register struct ip *ip = NULL;
1c79356b
A
1325 register struct inpcb *inp;
1326 u_char *optp = NULL;
1327 int optlen = 0;
39236c6e 1328 int tlen, off;
9bccf70c 1329 int drop_hdrlen;
1c79356b
A
1330 register struct tcpcb *tp = 0;
1331 register int thflags;
1332 struct socket *so = 0;
1333 int todrop, acked, ourfinisacked, needoutput = 0;
1334 struct in_addr laddr;
9bccf70c 1335#if INET6
1c79356b
A
1336 struct in6_addr laddr6;
1337#endif
1338 int dropsocket = 0;
316670eb 1339 int iss = 0, nosock = 0;
39236c6e 1340 u_int32_t tiwin, sack_bytes_acked = 0;
1c79356b 1341 struct tcpopt to; /* options in this segment */
91447636 1342 struct sockaddr_in *next_hop = NULL;
1c79356b
A
1343#if TCPDEBUG
1344 short ostate = 0;
1345#endif
91447636 1346 struct m_tag *fwd_tag;
2d21ac55 1347 u_char ip_ecn = IPTOS_ECN_NOTECT;
6d2010ae
A
1348 unsigned int ifscope, nocell = 0;
1349 uint8_t isconnected, isdisconnected;
39236c6e
A
1350 struct ifnet *ifp = m->m_pkthdr.rcvif;
1351 int pktf_sw_lro_pkt = (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_PKT) ? 1 : 0;
1352 int nlropkts = (pktf_sw_lro_pkt == 1) ? m->m_pkthdr.lro_npkts : 1;
1353 int turnoff_lro = 0, win;
1354#if MPTCP
1355 struct mptcb *mp_tp = NULL;
1356 uint16_t mptcp_csum = 0;
1357#endif /* MPTCP */
1358 boolean_t cell = IFNET_IS_CELLULAR(ifp);
1359 boolean_t wifi = (!cell && IFNET_IS_WIFI(ifp));
1360
316670eb 1361#define TCP_INC_VAR(stat, npkts) do { \
39236c6e 1362 stat += npkts; \
316670eb 1363} while (0)
c910b4d9 1364
316670eb 1365 TCP_INC_VAR(tcpstat.tcps_rcvtotal, nlropkts);
39236c6e 1366
91447636 1367 /* Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. */
b0d623f7
A
1368 if (!SLIST_EMPTY(&m->m_pkthdr.tags)) {
1369 fwd_tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
1370 KERNEL_TAG_TYPE_IPFORWARD, NULL);
1371 } else {
1372 fwd_tag = NULL;
1373 }
91447636 1374 if (fwd_tag != NULL) {
39236c6e
A
1375 struct ip_fwd_tag *ipfwd_tag =
1376 (struct ip_fwd_tag *)(fwd_tag+1);
1377
91447636
A
1378 next_hop = ipfwd_tag->next_hop;
1379 m_tag_delete(m, fwd_tag);
1380 }
39236c6e 1381
1c79356b
A
1382#if INET6
1383 struct ip6_hdr *ip6 = NULL;
9bccf70c 1384 int isipv6;
1c79356b 1385#endif /* INET6 */
9bccf70c 1386 int rstreason; /* For badport_bandlim accounting purposes */
1c79356b 1387 struct proc *proc0=current_proc();
39236c6e 1388
1c79356b
A
1389 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_START,0,0,0,0,0);
1390
9bccf70c
A
1391#if INET6
1392 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
1393#endif
1c79356b
A
1394 bzero((char *)&to, sizeof(to));
1395
1c79356b
A
1396#if INET6
1397 if (isipv6) {
39236c6e
A
1398 /*
1399 * Expect 32-bit aligned data pointer on
1400 * strict-align platforms
1401 */
316670eb
A
1402 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
1403
9bccf70c 1404 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */
1c79356b 1405 ip6 = mtod(m, struct ip6_hdr *);
9bccf70c 1406 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
316670eb 1407 th = (struct tcphdr *)(void *)((caddr_t)ip6 + off0);
9bccf70c 1408
39236c6e
A
1409 if (tcp_input_checksum(AF_INET6, m, th, off0, tlen))
1410 goto dropnosock;
6d2010ae 1411
9bccf70c
A
1412 KERNEL_DEBUG(DBG_LAYER_BEG, ((th->th_dport << 16) | th->th_sport),
1413 (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])),
1414 th->th_seq, th->th_ack, th->th_win);
1c79356b 1415 /*
9bccf70c
A
1416 * Be proactive about unspecified IPv6 address in source.
1417 * As we use all-zero to indicate unbounded/unconnected pcb,
1418 * unspecified IPv6 address can be used to confuse us.
1419 *
1420 * Note that packets with unspecified IPv6 destination is
1421 * already dropped in ip6_input.
1c79356b 1422 */
9bccf70c
A
1423 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
1424 /* XXX stat */
39236c6e 1425 IF_TCP_STATINC(ifp, unspecv6);
91447636 1426 goto dropnosock;
1c79356b 1427 }
39236c6e 1428 DTRACE_TCP5(receive, struct mbuf *, m, struct inpcb *, NULL,
6d2010ae
A
1429 struct ip6_hdr *, ip6, struct tcpcb *, NULL,
1430 struct tcphdr *, th);
39236c6e
A
1431
1432 ip_ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
9bccf70c
A
1433 } else
1434#endif /* INET6 */
ac5ea4a9 1435 {
9bccf70c
A
1436 /*
1437 * Get IP and TCP header together in first mbuf.
1438 * Note: IP leaves IP header in first mbuf.
1439 */
1440 if (off0 > sizeof (struct ip)) {
1441 ip_stripoptions(m, (struct mbuf *)0);
1442 off0 = sizeof(struct ip);
9bccf70c
A
1443 }
1444 if (m->m_len < sizeof (struct tcpiphdr)) {
1445 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) == 0) {
1446 tcpstat.tcps_rcvshort++;
1447 return;
0b4e3aa0 1448 }
9bccf70c 1449 }
316670eb
A
1450
1451 /* Expect 32-bit aligned data pointer on strict-align platforms */
1452 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
1453
9bccf70c 1454 ip = mtod(m, struct ip *);
316670eb 1455 th = (struct tcphdr *)(void *)((caddr_t)ip + off0);
9bccf70c 1456 tlen = ip->ip_len;
0b4e3aa0 1457
39236c6e 1458 if (tcp_input_checksum(AF_INET, m, th, off0, tlen))
91447636 1459 goto dropnosock;
39236c6e 1460
9bccf70c
A
1461#if INET6
1462 /* Re-initialization for later version check */
1463 ip->ip_v = IPVERSION;
1464#endif
2d21ac55 1465 ip_ecn = (ip->ip_tos & IPTOS_ECN_MASK);
316670eb
A
1466
1467 DTRACE_TCP5(receive, struct mbuf *, m, struct inpcb *, NULL,
1468 struct ip *, ip, struct tcpcb *, NULL, struct tcphdr *, th);
39236c6e 1469
316670eb
A
1470 KERNEL_DEBUG(DBG_LAYER_BEG, ((th->th_dport << 16) | th->th_sport),
1471 (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)),
1472 th->th_seq, th->th_ack, th->th_win);
1473
ac5ea4a9 1474 }
1c79356b
A
1475
1476 /*
1477 * Check that TCP offset makes sense,
1478 * pull out TCP options and adjust length. XXX
1479 */
9bccf70c
A
1480 off = th->th_off << 2;
1481 if (off < sizeof (struct tcphdr) || off > tlen) {
1c79356b 1482 tcpstat.tcps_rcvbadoff++;
39236c6e 1483 IF_TCP_STATINC(ifp, badformat);
91447636 1484 goto dropnosock;
1c79356b 1485 }
9bccf70c
A
1486 tlen -= off; /* tlen is used instead of ti->ti_len */
1487 if (off > sizeof (struct tcphdr)) {
1c79356b
A
1488#if INET6
1489 if (isipv6) {
91447636 1490 IP6_EXTHDR_CHECK(m, off0, off, return);
1c79356b 1491 ip6 = mtod(m, struct ip6_hdr *);
316670eb 1492 th = (struct tcphdr *)(void *)((caddr_t)ip6 + off0);
1c79356b
A
1493 } else
1494#endif /* INET6 */
2d21ac55
A
1495 {
1496 if (m->m_len < sizeof(struct ip) + off) {
1497 if ((m = m_pullup(m, sizeof (struct ip) + off)) == 0) {
1498 tcpstat.tcps_rcvshort++;
1499 return;
1500 }
1501 ip = mtod(m, struct ip *);
316670eb 1502 th = (struct tcphdr *)(void *)((caddr_t)ip + off0);
1c79356b
A
1503 }
1504 }
9bccf70c 1505 optlen = off - sizeof (struct tcphdr);
1c79356b
A
1506 optp = (u_char *)(th + 1);
1507 /*
1508 * Do quick retrieval of timestamp options ("options
1509 * prediction?"). If timestamp is the only option and it's
1510 * formatted as recommended in RFC 1323 appendix A, we
1511 * quickly get the values now and not bother calling
1512 * tcp_dooptions(), etc.
1513 */
1514 if ((optlen == TCPOLEN_TSTAMP_APPA ||
2d21ac55 1515 (optlen > TCPOLEN_TSTAMP_APPA &&
1c79356b 1516 optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) &&
316670eb 1517 *(u_int32_t *)(void *)optp == htonl(TCPOPT_TSTAMP_HDR) &&
2d21ac55 1518 (th->th_flags & TH_SYN) == 0) {
8ad349bb 1519 to.to_flags |= TOF_TS;
316670eb
A
1520 to.to_tsval = ntohl(*(u_int32_t *)(void *)(optp + 4));
1521 to.to_tsecr = ntohl(*(u_int32_t *)(void *)(optp + 8));
1c79356b
A
1522 optp = NULL; /* we've parsed the options */
1523 }
1524 }
1525 thflags = th->th_flags;
1526
9bccf70c
A
1527#if TCP_DROP_SYNFIN
1528 /*
1529 * If the drop_synfin option is enabled, drop all packets with
1530 * both the SYN and FIN bits set. This prevents e.g. nmap from
1531 * identifying the TCP/IP stack.
1532 *
8ad349bb 1533 * This is a violation of the TCP specification.
9bccf70c 1534 */
316670eb 1535 if (drop_synfin && (thflags & (TH_SYN|TH_FIN)) == (TH_SYN|TH_FIN)) {
39236c6e 1536 IF_TCP_STATINC(ifp, synfin);
91447636 1537 goto dropnosock;
316670eb 1538 }
b0d623f7 1539#endif
1c79356b
A
1540
1541 /*
8ad349bb 1542 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options,
9bccf70c
A
1543 * until after ip6_savecontrol() is called and before other functions
1544 * which don't want those proto headers.
1545 * Because ip6_savecontrol() is going to parse the mbuf to
1546 * search for data to be passed up to user-land, it wants mbuf
1547 * parameters to be unchanged.
1c79356b 1548 */
9bccf70c 1549 drop_hdrlen = off0 + off;
316670eb
A
1550
1551 /* Since this is an entry point for input processing of tcp packets, we
1552 * can update the tcp clock here.
1553 */
1554 calculate_tcp_clock();
1555
1556 /*
39236c6e
A
1557 * Record the interface where this segment arrived on; this does not
1558 * affect normal data output (for non-detached TCP) as it provides a
1559 * hint about which route and interface to use for sending in the
1560 * absence of a PCB, when scoped routing (and thus source interface
1561 * selection) are enabled.
1562 */
1563 if ((m->m_pkthdr.pkt_flags & PKTF_LOOP) || m->m_pkthdr.rcvif == NULL)
1564 ifscope = IFSCOPE_NONE;
1565 else
1566 ifscope = m->m_pkthdr.rcvif->if_index;
316670eb
A
1567
1568 /*
1569 * Convert TCP protocol specific fields to host format.
1570 */
1571
1572#if BYTE_ORDER != BIG_ENDIAN
1573 NTOHL(th->th_seq);
1574 NTOHL(th->th_ack);
1575 NTOHS(th->th_win);
1576 NTOHS(th->th_urp);
1577#endif
1c79356b
A
1578
1579 /*
1580 * Locate pcb for segment.
1581 */
1582findpcb:
6d2010ae
A
1583
1584 isconnected = FALSE;
1585 isdisconnected = FALSE;
1586
1c79356b 1587#if IPFIREWALL_FORWARD
91447636 1588 if (next_hop != NULL
1c79356b 1589#if INET6
2d21ac55 1590 && isipv6 == 0 /* IPv6 support is not yet */
1c79356b
A
1591#endif /* INET6 */
1592 ) {
1593 /*
1594 * Diverted. Pretend to be the destination.
1595 * already got one like this?
1596 */
1597 inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport,
1598 ip->ip_dst, th->th_dport, 0, m->m_pkthdr.rcvif);
1599 if (!inp) {
1600 /*
1601 * No, then it's new. Try find the ambushing socket
1602 */
91447636 1603 if (!next_hop->sin_port) {
1c79356b 1604 inp = in_pcblookup_hash(&tcbinfo, ip->ip_src,
91447636 1605 th->th_sport, next_hop->sin_addr,
1c79356b
A
1606 th->th_dport, 1, m->m_pkthdr.rcvif);
1607 } else {
1608 inp = in_pcblookup_hash(&tcbinfo,
1609 ip->ip_src, th->th_sport,
91447636
A
1610 next_hop->sin_addr,
1611 ntohs(next_hop->sin_port), 1,
1c79356b
A
1612 m->m_pkthdr.rcvif);
1613 }
1614 }
1c79356b
A
1615 } else
1616#endif /* IPFIREWALL_FORWARD */
9bccf70c 1617 {
1c79356b
A
1618#if INET6
1619 if (isipv6)
1620 inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_src, th->th_sport,
1621 &ip6->ip6_dst, th->th_dport, 1,
1622 m->m_pkthdr.rcvif);
1623 else
1624#endif /* INET6 */
1625 inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport,
1626 ip->ip_dst, th->th_dport, 1, m->m_pkthdr.rcvif);
9bccf70c 1627 }
1c79356b 1628
c910b4d9
A
1629 /*
1630 * Use the interface scope information from the PCB for outbound
1631 * segments. If the PCB isn't present and if scoped routing is
1632 * enabled, tcp_respond will use the scope of the interface where
1633 * the segment arrived on.
1634 */
1635 if (inp != NULL && (inp->inp_flags & INP_BOUND_IF))
316670eb
A
1636 ifscope = inp->inp_boundifp->if_index;
1637
6d2010ae
A
1638 /*
1639 * If the PCB is present and the socket isn't allowed to use
1640 * the cellular interface, indicate it as such for tcp_respond.
1641 */
1642 if (inp != NULL && (inp->inp_flags & INP_NO_IFT_CELLULAR))
1643 nocell = 1;
1644
1c79356b 1645#if IPSEC
91447636 1646 if (ipsec_bypass == 0) {
1c79356b 1647#if INET6
91447636
A
1648 if (isipv6) {
1649 if (inp != NULL && ipsec6_in_reject_so(m, inp->inp_socket)) {
2d21ac55 1650 IPSEC_STAT_INCREMENT(ipsec6stat.in_polvio);
b0d623f7
A
1651 if (in_pcb_checkstate(inp, WNT_RELEASE, 0) == WNT_STOPUSING)
1652 inp = NULL; // pretend we didn't find it
39236c6e
A
1653
1654 IF_TCP_STATINC(ifp, badformatipsec);
1655
91447636
A
1656 goto dropnosock;
1657 }
1658 } else
1c79356b 1659#endif /* INET6 */
91447636 1660 if (inp != NULL && ipsec4_in_reject_so(m, inp->inp_socket)) {
b0d623f7
A
1661 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
1662 if (in_pcb_checkstate(inp, WNT_RELEASE, 0) == WNT_STOPUSING)
1663 inp = NULL; // pretend we didn't find it
39236c6e
A
1664
1665 IF_TCP_STATINC(ifp, badformatipsec);
1666
91447636
A
1667 goto dropnosock;
1668 }
1c79356b 1669 }
1c79356b
A
1670#endif /*IPSEC*/
1671
1672 /*
1673 * If the state is CLOSED (i.e., TCB does not exist) then
1674 * all data in the incoming segment is discarded.
1675 * If the TCB exists but is in CLOSED state, it is embryonic,
1676 * but should either do a listen or a connect soon.
1677 */
1678 if (inp == NULL) {
9bccf70c 1679 if (log_in_vain) {
1c79356b 1680#if INET6
91447636 1681 char dbuf[MAX_IPv6_STR_LEN], sbuf[MAX_IPv6_STR_LEN];
1c79356b 1682#else /* INET6 */
91447636 1683 char dbuf[MAX_IPv4_STR_LEN], sbuf[MAX_IPv4_STR_LEN];
1c79356b
A
1684#endif /* INET6 */
1685
1686#if INET6
1687 if (isipv6) {
91447636
A
1688 inet_ntop(AF_INET6, &ip6->ip6_dst, dbuf, sizeof(dbuf));
1689 inet_ntop(AF_INET6, &ip6->ip6_src, sbuf, sizeof(sbuf));
9bccf70c 1690 } else
1c79356b 1691#endif
91447636
A
1692 {
1693 inet_ntop(AF_INET, &ip->ip_dst, dbuf, sizeof(dbuf));
1694 inet_ntop(AF_INET, &ip->ip_src, sbuf, sizeof(sbuf));
1695 }
9bccf70c
A
1696 switch (log_in_vain) {
1697 case 1:
1698 if(thflags & TH_SYN)
1699 log(LOG_INFO,
91447636
A
1700 "Connection attempt to TCP %s:%d from %s:%d\n",
1701 dbuf, ntohs(th->th_dport),
1702 sbuf,
1703 ntohs(th->th_sport));
9bccf70c
A
1704 break;
1705 case 2:
1706 log(LOG_INFO,
91447636
A
1707 "Connection attempt to TCP %s:%d from %s:%d flags:0x%x\n",
1708 dbuf, ntohs(th->th_dport), sbuf,
1709 ntohs(th->th_sport), thflags);
1710 break;
1711 case 3:
39236c6e 1712 case 4:
316670eb 1713 if ((thflags & TH_SYN) && !(thflags & TH_ACK) &&
91447636
A
1714 !(m->m_flags & (M_BCAST | M_MCAST)) &&
1715#if INET6
1716 ((isipv6 && !IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) ||
1717 (!isipv6 && ip->ip_dst.s_addr != ip->ip_src.s_addr))
1718#else
1719 ip->ip_dst.s_addr != ip->ip_src.s_addr
1720#endif
1721 )
1722 log_in_vain_log((LOG_INFO,
1723 "Stealth Mode connection attempt to TCP %s:%d from %s:%d\n",
1724 dbuf, ntohs(th->th_dport),
1725 sbuf,
1726 ntohs(th->th_sport)));
9bccf70c
A
1727 break;
1728 default:
1729 break;
1c79356b 1730 }
1c79356b 1731 }
9bccf70c 1732 if (blackhole) {
91447636 1733 if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type != IFT_LOOP)
316670eb 1734
91447636
A
1735 switch (blackhole) {
1736 case 1:
1737 if (thflags & TH_SYN)
1738 goto dropnosock;
1739 break;
1740 case 2:
1741 goto dropnosock;
1742 default:
1743 goto dropnosock;
1744 }
9bccf70c
A
1745 }
1746 rstreason = BANDLIM_RST_CLOSEDPORT;
39236c6e 1747 IF_TCP_STATINC(ifp, noconnnolist);
91447636
A
1748 goto dropwithresetnosock;
1749 }
1750 so = inp->inp_socket;
1751 if (so == NULL) {
b0d623f7
A
1752 /* This case shouldn't happen as the socket shouldn't be null
1753 * if inp_state isn't set to INPCB_STATE_DEAD
1754 * But just in case, we pretend we didn't find the socket if we hit this case
1755 * as this isn't cause for a panic (the socket might be leaked however)...
1756 */
1757 inp = NULL;
91447636 1758#if TEMPDEBUG
b0d623f7 1759 printf("tcp_input: no more socket for inp=%x. This shouldn't happen\n", inp);
91447636
A
1760#endif
1761 goto dropnosock;
1c79356b 1762 }
8ad349bb 1763
6d2010ae 1764 tcp_lock(so, 1, 0);
91447636 1765 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
b0d623f7 1766 tcp_unlock(so, 1, (void *)2);
91447636
A
1767 inp = NULL; // pretend we didn't find it
1768 goto dropnosock;
1769 }
1770
1c79356b 1771 tp = intotcpcb(inp);
9bccf70c
A
1772 if (tp == 0) {
1773 rstreason = BANDLIM_RST_CLOSEDPORT;
39236c6e 1774 IF_TCP_STATINC(ifp, noconnlist);
1c79356b 1775 goto dropwithreset;
9bccf70c 1776 }
1c79356b
A
1777 if (tp->t_state == TCPS_CLOSED)
1778 goto drop;
9bccf70c 1779
1c79356b
A
1780 /* Unscale the window into a 32-bit value. */
1781 if ((thflags & TH_SYN) == 0)
1782 tiwin = th->th_win << tp->snd_scale;
1783 else
1784 tiwin = th->th_win;
1785
2d21ac55
A
1786#if CONFIG_MACF_NET
1787 if (mac_inpcb_check_deliver(inp, m, AF_INET, SOCK_STREAM))
1788 goto drop;
1789#endif
1790
39236c6e
A
1791 /* Avoid processing packets while closing a listen socket */
1792 if (tp->t_state == TCPS_LISTEN &&
1793 (so->so_options & SO_ACCEPTCONN) == 0)
b7266188
A
1794 goto drop;
1795
1c79356b
A
1796 if (so->so_options & (SO_DEBUG|SO_ACCEPTCONN)) {
1797#if TCPDEBUG
1798 if (so->so_options & SO_DEBUG) {
1799 ostate = tp->t_state;
1800#if INET6
1801 if (isipv6)
9bccf70c
A
1802 bcopy((char *)ip6, (char *)tcp_saveipgen,
1803 sizeof(*ip6));
1c79356b 1804 else
1c79356b 1805#endif /* INET6 */
9bccf70c 1806 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
1c79356b
A
1807 tcp_savetcp = *th;
1808 }
1809#endif
1810 if (so->so_options & SO_ACCEPTCONN) {
91447636 1811 register struct tcpcb *tp0 = tp;
1c79356b 1812 struct socket *so2;
1c79356b 1813 struct socket *oso;
91447636 1814 struct sockaddr_storage from;
1c79356b
A
1815#if INET6
1816 struct inpcb *oinp = sotoinpcb(so);
1817#endif /* INET6 */
316670eb
A
1818 struct ifnet *head_ifscope;
1819 unsigned int head_nocell, head_recvanyif;
c910b4d9
A
1820
1821 /* Get listener's bound-to-interface, if any */
1822 head_ifscope = (inp->inp_flags & INP_BOUND_IF) ?
316670eb 1823 inp->inp_boundifp : NULL;
6d2010ae
A
1824 /* Get listener's no-cellular information, if any */
1825 head_nocell = (inp->inp_flags & INP_NO_IFT_CELLULAR) ? 1 : 0;
316670eb
A
1826 /* Get listener's recv-any-interface, if any */
1827 head_recvanyif = (inp->inp_flags & INP_RECV_ANYIF);
1c79356b 1828
9bccf70c 1829 /*
7e4a7d39
A
1830 * If the state is LISTEN then ignore segment if it contains an RST.
1831 * If the segment contains an ACK then it is bad and send a RST.
1832 * If it does not contain a SYN then it is not interesting; drop it.
1833 * If it is from this socket, drop it, it must be forged.
9bccf70c 1834 */
1c79356b 1835 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) != TH_SYN) {
39236c6e
A
1836 IF_TCP_STATINC(ifp, listbadsyn);
1837
7e4a7d39
A
1838 if (thflags & TH_RST) {
1839 goto drop;
1840 }
1c79356b 1841 if (thflags & TH_ACK) {
7e4a7d39 1842 tp = NULL;
1c79356b 1843 tcpstat.tcps_badsyn++;
9bccf70c 1844 rstreason = BANDLIM_RST_OPENPORT;
1c79356b
A
1845 goto dropwithreset;
1846 }
7e4a7d39
A
1847
1848 /* We come here if there is no SYN set */
1849 tcpstat.tcps_badsyn++;
1c79356b
A
1850 goto drop;
1851 }
1c79356b 1852 KERNEL_DEBUG(DBG_FNC_TCP_NEWCONN | DBG_FUNC_START,0,0,0,0,0);
7e4a7d39
A
1853 if (th->th_dport == th->th_sport) {
1854#if INET6
1855 if (isipv6) {
1856 if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
1857 &ip6->ip6_src))
1858 goto drop;
1859 } else
1860#endif /* INET6 */
1861 if (ip->ip_dst.s_addr == ip->ip_src.s_addr)
1862 goto drop;
1863 }
1864 /*
1865 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN
1866 * in_broadcast() should never return true on a received
1867 * packet with M_BCAST not set.
1868 *
1869 * Packets with a multicast source address should also
1870 * be discarded.
1871 */
1872 if (m->m_flags & (M_BCAST|M_MCAST))
1873 goto drop;
1874#if INET6
1875 if (isipv6) {
1876 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1877 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
1878 goto drop;
1879 } else
1880#endif
1881 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1882 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1883 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1884 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
1885 goto drop;
1886
1c79356b 1887
9bccf70c
A
1888#if INET6
1889 /*
1890 * If deprecated address is forbidden,
1891 * we do not accept SYN to deprecated interface
1892 * address to prevent any new inbound connection from
1893 * getting established.
1894 * When we do not accept SYN, we send a TCP RST,
1895 * with deprecated source address (instead of dropping
1896 * it). We compromise it as it is much better for peer
1897 * to send a RST, and RST will be the final packet
1898 * for the exchange.
1899 *
1900 * If we do not forbid deprecated addresses, we accept
39236c6e
A
1901 * the SYN packet. RFC 4862 forbids dropping SYN in
1902 * this case.
9bccf70c
A
1903 */
1904 if (isipv6 && !ip6_use_deprecated) {
39236c6e
A
1905 uint32_t ia6_flags;
1906
1907 if (ip6_getdstifaddr_info(m, NULL,
1908 &ia6_flags) == 0) {
1909 if (ia6_flags & IN6_IFF_DEPRECATED) {
6d2010ae
A
1910 tp = NULL;
1911 rstreason = BANDLIM_RST_OPENPORT;
39236c6e 1912 IF_TCP_STATINC(ifp, deprecate6);
6d2010ae
A
1913 goto dropwithreset;
1914 }
9bccf70c
A
1915 }
1916 }
1917#endif
91447636 1918 if (so->so_filt) {
2d21ac55 1919#if INET6
91447636
A
1920 if (isipv6) {
1921 struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)&from;
1922
1923 sin6->sin6_len = sizeof(*sin6);
1924 sin6->sin6_family = AF_INET6;
1925 sin6->sin6_port = th->th_sport;
1926 sin6->sin6_flowinfo = 0;
1927 sin6->sin6_addr = ip6->ip6_src;
1928 sin6->sin6_scope_id = 0;
2d21ac55
A
1929 }
1930 else
1931#endif
1932 {
91447636
A
1933 struct sockaddr_in *sin = (struct sockaddr_in*)&from;
1934
1935 sin->sin_len = sizeof(*sin);
1936 sin->sin_family = AF_INET;
1937 sin->sin_port = th->th_sport;
1938 sin->sin_addr = ip->ip_src;
1939 }
1940 so2 = sonewconn(so, 0, (struct sockaddr*)&from);
1941 } else {
1942 so2 = sonewconn(so, 0, NULL);
1943 }
1c79356b
A
1944 if (so2 == 0) {
1945 tcpstat.tcps_listendrop++;
2d21ac55 1946 if (tcp_dropdropablreq(so)) {
91447636
A
1947 if (so->so_filt)
1948 so2 = sonewconn(so, 0, (struct sockaddr*)&from);
1949 else
1950 so2 = sonewconn(so, 0, NULL);
1c79356b 1951 }
91447636 1952 if (!so2)
1c79356b
A
1953 goto drop;
1954 }
b0d623f7
A
1955
1956 /* Point "inp" and "tp" in tandem to new socket */
1957 inp = (struct inpcb *)so2->so_pcb;
1958 tp = intotcpcb(inp);
91447636 1959
1c79356b 1960 oso = so;
91447636
A
1961 tcp_unlock(so, 0, 0); /* Unlock but keep a reference on listener for now */
1962
1c79356b 1963 so = so2;
91447636 1964 tcp_lock(so, 1, 0);
1c79356b 1965 /*
1c79356b
A
1966 * Mark socket as temporary until we're
1967 * committed to keeping it. The code at
1968 * ``drop'' and ``dropwithreset'' check the
1969 * flag dropsocket to see if the temporary
1970 * socket created here should be discarded.
1971 * We mark the socket as discardable until
1972 * we're committed to it below in TCPS_LISTEN.
7e4a7d39
A
1973 * There are some error conditions in which we
1974 * have to drop the temporary socket.
1c79356b
A
1975 */
1976 dropsocket++;
c910b4d9
A
1977 /*
1978 * Inherit INP_BOUND_IF from listener; testing if
316670eb 1979 * head_ifscope is non-NULL is sufficient, since it
c910b4d9
A
1980 * can only be set to a non-zero value earlier if
1981 * the listener has such a flag set.
1982 */
316670eb 1983 if (head_ifscope != NULL) {
c910b4d9 1984 inp->inp_flags |= INP_BOUND_IF;
316670eb
A
1985 inp->inp_boundifp = head_ifscope;
1986 } else {
1987 inp->inp_flags &= ~INP_BOUND_IF;
c910b4d9 1988 }
6d2010ae
A
1989 /*
1990 * Inherit INP_NO_IFT_CELLULAR from listener.
1991 */
1992 if (head_nocell) {
1993 inp->inp_flags |= INP_NO_IFT_CELLULAR;
1994 }
316670eb
A
1995 /*
1996 * Inherit {IN,IN6}_RECV_ANYIF from listener.
1997 */
1998 if (head_recvanyif)
1999 inp->inp_flags |= INP_RECV_ANYIF;
2000 else
2001 inp->inp_flags &= ~INP_RECV_ANYIF;
1c79356b
A
2002#if INET6
2003 if (isipv6)
2004 inp->in6p_laddr = ip6->ip6_dst;
2005 else {
9bccf70c
A
2006 inp->inp_vflag &= ~INP_IPV6;
2007 inp->inp_vflag |= INP_IPV4;
1c79356b 2008#endif /* INET6 */
7e4a7d39 2009 inp->inp_laddr = ip->ip_dst;
1c79356b
A
2010#if INET6
2011 }
2012#endif /* INET6 */
1c79356b 2013 inp->inp_lport = th->th_dport;
91447636 2014 if (in_pcbinshash(inp, 0) != 0) {
1c79356b 2015 /*
9bccf70c
A
2016 * Undo the assignments above if we failed to
2017 * put the PCB on the hash lists.
1c79356b
A
2018 */
2019#if INET6
2020 if (isipv6)
2021 inp->in6p_laddr = in6addr_any;
2022 else
2023#endif /* INET6 */
316670eb 2024 inp->inp_laddr.s_addr = INADDR_ANY;
1c79356b 2025 inp->inp_lport = 0;
91447636
A
2026 tcp_lock(oso, 0, 0); /* release ref on parent */
2027 tcp_unlock(oso, 1, 0);
1c79356b
A
2028 goto drop;
2029 }
1c79356b
A
2030#if INET6
2031 if (isipv6) {
9bccf70c
A
2032 /*
2033 * Inherit socket options from the listening
2034 * socket.
2035 * Note that in6p_inputopts are not (even
2036 * should not be) copied, since it stores
1c79356b 2037 * previously received options and is used to
9bccf70c
A
2038 * detect if each new option is different than
2039 * the previous one and hence should be passed
2040 * to a user.
2041 * If we copied in6p_inputopts, a user would
2042 * not be able to receive options just after
2043 * calling the accept system call.
2044 */
1c79356b
A
2045 inp->inp_flags |=
2046 oinp->inp_flags & INP_CONTROLOPTS;
9bccf70c
A
2047 if (oinp->in6p_outputopts)
2048 inp->in6p_outputopts =
2049 ip6_copypktopts(oinp->in6p_outputopts,
2050 M_NOWAIT);
1c79356b
A
2051 } else
2052#endif /* INET6 */
316670eb 2053 inp->inp_options = ip_srcroute();
91447636 2054 tcp_lock(oso, 0, 0);
1c79356b
A
2055#if IPSEC
2056 /* copy old policy into new socket's */
9bccf70c
A
2057 if (sotoinpcb(oso)->inp_sp)
2058 {
2059 int error = 0;
2060 /* Is it a security hole here to silently fail to copy the policy? */
2061 if (inp->inp_sp != NULL)
2062 error = ipsec_init_policy(so, &inp->inp_sp);
2063 if (error != 0 || ipsec_copy_policy(sotoinpcb(oso)->inp_sp, inp->inp_sp))
2064 printf("tcp_input: could not copy policy\n");
2065 }
1c79356b 2066#endif
b0d623f7 2067 /* inherit states from the listener */
6d2010ae
A
2068 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
2069 struct tcpcb *, tp, int32_t, TCPS_LISTEN);
1c79356b 2070 tp->t_state = TCPS_LISTEN;
9bccf70c 2071 tp->t_flags |= tp0->t_flags & (TF_NOPUSH|TF_NOOPT|TF_NODELAY);
6d2010ae 2072 tp->t_flagsext |= (tp0->t_flagsext & TF_RXTFINDROP);
b0d623f7 2073 tp->t_keepinit = tp0->t_keepinit;
39236c6e
A
2074 tp->t_keepcnt = tp0->t_keepcnt;
2075 tp->t_keepintvl = tp0->t_keepintvl;
2076 tp->t_adaptive_wtimo = tp0->t_adaptive_wtimo;
2077 tp->t_adaptive_rtimo = tp0->t_adaptive_rtimo;
91447636 2078 tp->t_inpcb->inp_ip_ttl = tp0->t_inpcb->inp_ip_ttl;
316670eb
A
2079 if ((so->so_flags & SOF_NOTSENT_LOWAT) != 0)
2080 tp->t_notsent_lowat = tp0->t_notsent_lowat;
b0d623f7
A
2081
2082 /* now drop the reference on the listener */
2083 tcp_unlock(oso, 1, 0);
2084
316670eb 2085 tcp_set_max_rwinscale(tp, so);
1c79356b
A
2086
2087 KERNEL_DEBUG(DBG_FNC_TCP_NEWCONN | DBG_FUNC_END,0,0,0,0,0);
2088 }
2089 }
6d2010ae
A
2090 lck_mtx_assert(&((struct inpcb *)so->so_pcb)->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
2091
743b1565 2092 if (tp->t_state == TCPS_ESTABLISHED && tlen > 0) {
39236c6e
A
2093 /*
2094 * Evaluate the rate of arrival of packets to see if the
6d2010ae
A
2095 * receiver can reduce the ack traffic. The algorithm to
2096 * stretch acks will be enabled if the connection meets
2097 * certain criteria defined in tcp_stretch_ack_enable function.
2098 */
2099 if ((tp->t_flagsext & TF_RCVUNACK_WAITSS) != 0) {
316670eb 2100 TCP_INC_VAR(tp->rcv_waitforss, nlropkts);
6d2010ae
A
2101 }
2102 if (tcp_stretch_ack_enable(tp)) {
2103 tp->t_flags |= TF_STRETCHACK;
2104 tp->t_flagsext &= ~(TF_RCVUNACK_WAITSS);
2105 tp->rcv_waitforss = 0;
2106 } else {
2107 tp->t_flags &= ~(TF_STRETCHACK);
2108 }
2109 if (TSTMP_GT(tp->rcv_unackwin, tcp_now)) {
2110 tp->rcv_by_unackwin += (tlen + off);
2111 } else {
2112 tp->rcv_unackwin = tcp_now + tcp_rcvunackwin;
2113 tp->rcv_by_unackwin = tlen + off;
2114 }
91447636 2115 }
316670eb
A
2116
2117 /*
2118 * Keep track of how many bytes were received in the LRO packet
2119 */
39236c6e 2120 if ((pktf_sw_lro_pkt) && (nlropkts > 2)) {
316670eb
A
2121 tp->t_lropktlen += tlen;
2122 }
2d21ac55 2123 /*
39236c6e
A
2124 * Explicit Congestion Notification - Flag that we need to send ECT if
2125 * + The IP Congestion experienced flag was set.
2126 * + Socket is in established state
2127 * + We negotiated ECN in the TCP setup
2128 * + This isn't a pure ack (tlen > 0)
2129 * + The data is in the valid window
2130 *
2131 * TE_SENDECE will be cleared when we receive a packet with TH_CWR set.
2d21ac55
A
2132 */
2133 if (ip_ecn == IPTOS_ECN_CE && tp->t_state == TCPS_ESTABLISHED &&
316670eb 2134 ((tp->ecn_flags & (TE_ECN_ON)) == (TE_ECN_ON)) && tlen > 0 &&
2d21ac55
A
2135 SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2136 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
2137 tp->ecn_flags |= TE_SENDECE;
2138 }
2139
2140 /*
39236c6e
A
2141 * Clear TE_SENDECE if TH_CWR is set. This is harmless, so we don't
2142 * bother doing extensive checks for state and whatnot.
2d21ac55
A
2143 */
2144 if ((thflags & TH_CWR) == TH_CWR) {
2145 tp->ecn_flags &= ~TE_SENDECE;
2146 }
6d2010ae 2147
39236c6e
A
2148 /*
2149 * If we received an explicit notification of congestion in
6d2010ae
A
2150 * ip tos ecn bits or by the CWR bit in TCP header flags, reset
2151 * the ack-strteching state.
2152 */
39236c6e
A
2153 if (tp->t_state == TCPS_ESTABLISHED &&
2154 (ip_ecn == IPTOS_ECN_CE ||
2155 (thflags & TH_CWR)))
6d2010ae 2156 tcp_reset_stretch_ack(tp);
316670eb
A
2157
2158 /*
2159 * Try to determine if we are receiving a packet after a long time.
2160 * Use our own approximation of idletime to roughly measure remote
2161 * end's idle time. Since slowstart is used after an idle period
2162 * we want to avoid doing LRO if the remote end is not up to date
2163 * on initial window support and starts with 1 or 2 packets as its IW.
2164 */
2165 if (sw_lro && (tp->t_flagsext & TF_LRO_OFFLOADED) &&
2166 ((tcp_now - tp->t_rcvtime) >= (TCP_IDLETIMEOUT(tp)))) {
2167 turnoff_lro = 1;
2168 }
2169
39236c6e
A
2170 /* Update rcvtime as a new segment was received on the connection */
2171 tp->t_rcvtime = tcp_now;
2172
1c79356b
A
2173 /*
2174 * Segment received on connection.
2175 * Reset idle time and keep-alive timer.
2176 */
1c79356b 2177 if (TCPS_HAVEESTABLISHED(tp->t_state))
39236c6e 2178 tcp_keepalive_reset(tp);
1c79356b
A
2179
2180 /*
2181 * Process options if not in LISTEN state,
2182 * else do it below (after getting remote address).
2183 */
39236c6e 2184 if (tp->t_state != TCPS_LISTEN && optp) {
c910b4d9 2185 tcp_dooptions(tp, optp, optlen, th, &to, ifscope);
39236c6e
A
2186#if MPTCP
2187 mptcp_csum = mptcp_input_csum(tp, m, drop_hdrlen);
2188 if (mptcp_csum) {
2189 tp->t_mpflags |= TMPF_SND_MPFAIL;
2190 tp->t_mpflags &= ~TMPF_EMBED_DSN;
2191 mptcp_notify_mpfail(so);
2192 m_freem(m);
2193 tcpstat.tcps_mp_badcsum++;
2194 tcp_check_timer_state(tp);
2195 tcp_unlock(so, 1, 0);
2196 KERNEL_DEBUG(DBG_FNC_TCP_INPUT |
2197 DBG_FUNC_END,0,0,0,0,0);
2198 return;
8ad349bb 2199 }
39236c6e
A
2200 mptcp_insert_rmap(tp, m);
2201#endif /* MPTCP */
2202 }
2203 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
8ad349bb
A
2204 if (to.to_flags & TOF_TS) {
2205 tp->t_flags |= TF_RCVD_TSTMP;
2206 tp->ts_recent = to.to_tsval;
2207 tp->ts_recent_age = tcp_now;
2208 }
2209 if (to.to_flags & TOF_MSS)
c910b4d9 2210 tcp_mss(tp, to.to_mss, ifscope);
39236c6e 2211 if (SACK_ENABLED(tp)) {
8ad349bb 2212 if (!(to.to_flags & TOF_SACK))
39236c6e 2213 tp->t_flagsext &= ~(TF_SACK_ENABLE);
8ad349bb
A
2214 else
2215 tp->t_flags |= TF_SACK_PERMIT;
2216 }
2217 }
2218
6d2010ae
A
2219#if TRAFFIC_MGT
2220 /* Compute inter-packet arrival jitter. According to RFC 3550, inter-packet
2221 * arrival jitter is defined as the difference in packet spacing at the
2222 * receiver compared to the sender for a pair of packets. When two packets
2223 * of maximum segment size come one after the other with consecutive
2224 * sequence numbers, we consider them as packets sent together at the
2225 * sender and use them as a pair to compute inter-packet arrival jitter.
2226 * This metric indicates the delay induced by the network components due
2227 * to queuing in edge/access routers.
2228 */
2229 if (tp->t_state == TCPS_ESTABLISHED &&
2230 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK|TH_ECE|TH_PUSH)) == TH_ACK &&
2231 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
2232 ((to.to_flags & TOF_TS) == 0 ||
2233 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) &&
2234 th->th_seq == tp->rcv_nxt &&
2235 LIST_EMPTY(&tp->t_segq)) {
316670eb 2236 int seg_size = tlen;
6d2010ae 2237 if (tp->iaj_pktcnt <= IAJ_IGNORE_PKTCNT) {
316670eb 2238 TCP_INC_VAR(tp->iaj_pktcnt, nlropkts);
6d2010ae
A
2239 }
2240
39236c6e 2241 if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_PKT) {
316670eb
A
2242 seg_size = m->m_pkthdr.lro_pktlen;
2243 }
2244 if ( tp->iaj_size == 0 || seg_size > tp->iaj_size ||
2245 (seg_size == tp->iaj_size && tp->iaj_rcv_ts == 0)) {
6d2010ae
A
2246 /* State related to inter-arrival jitter is uninitialized
2247 * or we are trying to find a good first packet to start
2248 * computing the metric
2249 */
316670eb 2250 update_iaj_state(tp, seg_size, 0);
6d2010ae 2251 } else {
316670eb 2252 if (seg_size == tp->iaj_size) {
6d2010ae
A
2253 /* Compute inter-arrival jitter taking this packet
2254 * as the second packet
2255 */
39236c6e
A
2256 if (pktf_sw_lro_pkt)
2257 compute_iaj(tp, nlropkts,
2258 m->m_pkthdr.lro_elapsed);
2259 else
2260 compute_iaj(tp, 1, 0);
6d2010ae 2261 }
316670eb 2262 if (seg_size < tp->iaj_size) {
6d2010ae
A
2263 /* There is a smaller packet in the stream.
2264 * Some times the maximum size supported on a path can
2265 * change if there is a new link with smaller MTU.
2266 * The receiver will not know about this change.
2267 * If there are too many packets smaller than iaj_size,
2268 * we try to learn the iaj_size again.
2269 */
39236c6e 2270 TCP_INC_VAR(tp->iaj_small_pkt, nlropkts);
6d2010ae 2271 if (tp->iaj_small_pkt > RESET_IAJ_SIZE_THRESH) {
316670eb 2272 update_iaj_state(tp, seg_size, 1);
6d2010ae 2273 } else {
316670eb 2274 CLEAR_IAJ_STATE(tp);
6d2010ae
A
2275 }
2276 } else {
316670eb 2277 update_iaj_state(tp, seg_size, 0);
6d2010ae
A
2278 }
2279 }
2280 } else {
316670eb 2281 CLEAR_IAJ_STATE(tp);
6d2010ae
A
2282 }
2283#endif /* TRAFFIC_MGT */
2284
1c79356b
A
2285 /*
2286 * Header prediction: check for the two common cases
2287 * of a uni-directional data xfer. If the packet has
2288 * no control flags, is in-sequence, the window didn't
2289 * change and we're not retransmitting, it's a
2290 * candidate. If the length is zero and the ack moved
2291 * forward, we're the sender side of the xfer. Just
2292 * free the data acked & wake any higher level process
2293 * that was blocked waiting for space. If the length
2294 * is non-zero and the ack didn't move, we're the
2295 * receiver side. If we're getting packets in-order
2296 * (the reassembly queue is empty), add the data to
2297 * the socket buffer and note that we need a delayed ack.
2298 * Make sure that the hidden state-flags are also off.
2299 * Since we check for TCPS_ESTABLISHED above, it can only
2300 * be TH_NEEDSYN.
2301 */
2302 if (tp->t_state == TCPS_ESTABLISHED &&
2d21ac55 2303 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK|TH_ECE)) == TH_ACK &&
1c79356b 2304 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
8ad349bb 2305 ((to.to_flags & TOF_TS) == 0 ||
1c79356b 2306 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) &&
1c79356b
A
2307 th->th_seq == tp->rcv_nxt &&
2308 tiwin && tiwin == tp->snd_wnd &&
2309 tp->snd_nxt == tp->snd_max) {
2310
2311 /*
2312 * If last ACK falls within this segment's sequence numbers,
2313 * record the timestamp.
2314 * NOTE that the test is modified according to the latest
2315 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
2316 */
8ad349bb 2317 if ((to.to_flags & TOF_TS) != 0 &&
1c79356b
A
2318 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
2319 tp->ts_recent_age = tcp_now;
2320 tp->ts_recent = to.to_tsval;
2321 }
2322
c910b4d9
A
2323 /* Force acknowledgment if we received a FIN */
2324
2325 if (thflags & TH_FIN)
2326 tp->t_flags |= TF_ACKNOW;
2327
9bccf70c 2328 if (tlen == 0) {
1c79356b
A
2329 if (SEQ_GT(th->th_ack, tp->snd_una) &&
2330 SEQ_LEQ(th->th_ack, tp->snd_max) &&
2d21ac55 2331 tp->snd_cwnd >= tp->snd_ssthresh &&
6d2010ae 2332 (!IN_FASTRECOVERY(tp) &&
39236c6e
A
2333 ((!(SACK_ENABLED(tp)) && tp->t_dupacks < tp->t_rexmtthresh) ||
2334 (SACK_ENABLED(tp) && to.to_nsacks == 0 &&
6d2010ae 2335 TAILQ_EMPTY(&tp->snd_holes))))) {
1c79356b
A
2336 /*
2337 * this is a pure ack for outstanding data.
2338 */
2339 ++tcpstat.tcps_predack;
39236c6e 2340
9bccf70c
A
2341 /*
2342 * "bad retransmit" recovery
2343 */
39236c6e
A
2344 if (tp->t_rxtshift > 0 &&
2345 tcp_detect_bad_rexmt(tp, &to)) {
2d21ac55 2346 ++tcpstat.tcps_sndrexmitbad;
39236c6e
A
2347 tcp_bad_rexmt_restore_state(tp, th);
2348
2349 DTRACE_TCP5(cc, void, NULL,
2350 struct inpcb *, tp->t_inpcb,
6d2010ae
A
2351 struct tcpcb *, tp, struct tcphdr *, th,
2352 int32_t, TCP_CC_BAD_REXMT_RECOVERY);
9bccf70c 2353 }
39236c6e
A
2354
2355 /* Recalculate the RTT */
2356 tcp_compute_rtt(tp, &to, th);
2357
2358 acked = BYTES_ACKED(th, tp);
1c79356b
A
2359 tcpstat.tcps_rcvackpack++;
2360 tcpstat.tcps_rcvackbyte += acked;
6d2010ae
A
2361
2362 /* Handle an ack that is in sequence during congestion
2363 * avoidance phase. The calculations in this function
2364 * assume that snd_una is not updated yet.
2d21ac55 2365 */
6d2010ae
A
2366 if (CC_ALGO(tp)->inseq_ack_rcvd != NULL)
2367 CC_ALGO(tp)->inseq_ack_rcvd(tp, th);
2368
2369 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
2370 struct tcpcb *, tp, struct tcphdr *, th,
2371 int32_t, TCP_CC_INSEQ_ACK_RCVD);
2372
1c79356b 2373 sbdrop(&so->so_snd, acked);
39236c6e
A
2374 if (so->so_flags & SOF_ENABLE_MSGS) {
2375 VERIFY(acked <= so->so_msg_state->msg_serial_bytes);
2376 so->so_msg_state->msg_serial_bytes -= acked;
2377 }
316670eb
A
2378 tcp_sbsnd_trim(&so->so_snd);
2379
8ad349bb
A
2380 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
2381 SEQ_LEQ(th->th_ack, tp->snd_recover))
2382 tp->snd_recover = th->th_ack - 1;
1c79356b 2383 tp->snd_una = th->th_ack;
316670eb 2384
8ad349bb
A
2385 /*
2386 * pull snd_wl2 up to prevent seq wrap relative
2387 * to th_ack.
2388 */
2389 tp->snd_wl2 = th->th_ack;
39236c6e
A
2390
2391 if (tp->t_dupacks > 0) {
2392 tp->t_dupacks = 0;
2393 tp->t_rexmtthresh = tcprexmtthresh;
2394 }
2395
1c79356b 2396 m_freem(m);
1c79356b
A
2397
2398 /*
2399 * If all outstanding data are acked, stop
2400 * retransmit timer, otherwise restart timer
2401 * using current (possibly backed-off) value.
2402 * If process is waiting for space,
2403 * wakeup/selwakeup/signal. If data
2404 * are ready to send, let tcp_output
2405 * decide between more output or persist.
2406 */
2407 if (tp->snd_una == tp->snd_max)
2408 tp->t_timer[TCPT_REXMT] = 0;
2409 else if (tp->t_timer[TCPT_PERSIST] == 0)
6d2010ae 2410 tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
1c79356b 2411
316670eb
A
2412 if ((tp->t_flagsext & TF_MEASURESNDBW) != 0 &&
2413 tp->t_bwmeas != NULL)
2414 tcp_bwmeas_check(tp);
91447636 2415 sowwakeup(so); /* has to be done with socket lock held */
2d21ac55 2416 if ((so->so_snd.sb_cc) || (tp->t_flags & TF_ACKNOW)) {
1c79356b 2417 (void) tcp_output(tp);
2d21ac55 2418 }
6d2010ae
A
2419
2420 tcp_check_timer_state(tp);
91447636 2421 tcp_unlock(so, 1, 0);
1c79356b
A
2422 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0);
2423 return;
2424 }
2425 } else if (th->th_ack == tp->snd_una &&
9bccf70c 2426 LIST_EMPTY(&tp->t_segq) &&
2d21ac55 2427 tlen <= tcp_sbspace(tp)) {
1c79356b
A
2428 /*
2429 * this is a pure, in-sequence data packet
2430 * with nothing on the reassembly queue and
2431 * we have enough buffer space to take it.
2432 */
316670eb
A
2433
2434 /*
2435 * If this is a connection in steady state, start
2436 * coalescing packets belonging to this flow.
2437 */
2438 if (turnoff_lro) {
2439 tcp_lro_remove_state(tp->t_inpcb->inp_laddr,
39236c6e
A
2440 tp->t_inpcb->inp_faddr,
2441 tp->t_inpcb->inp_lport,
2442 tp->t_inpcb->inp_fport);
316670eb
A
2443 tp->t_flagsext &= ~TF_LRO_OFFLOADED;
2444 tp->t_idleat = tp->rcv_nxt;
39236c6e
A
2445 } else if (sw_lro && !pktf_sw_lro_pkt && !isipv6 &&
2446 (so->so_flags & SOF_USELRO) &&
2447 !IFNET_IS_CELLULAR(m->m_pkthdr.rcvif) &&
316670eb
A
2448 (m->m_pkthdr.rcvif->if_type != IFT_LOOP) &&
2449 ((th->th_seq - tp->irs) >
39236c6e 2450 (tp->t_maxseg << lro_start)) &&
316670eb
A
2451 ((tp->t_idleat == 0) || ((th->th_seq -
2452 tp->t_idleat) > (tp->t_maxseg << lro_start)))) {
2453 tp->t_flagsext |= TF_LRO_OFFLOADED;
2454 tcp_start_coalescing(ip, th, tlen);
2455 tp->t_idleat = 0;
2456 }
2457
8ad349bb 2458 /* Clean receiver SACK report if present */
39236c6e 2459 if (SACK_ENABLED(tp) && tp->rcv_numsacks)
8ad349bb 2460 tcp_clean_sackreport(tp);
1c79356b 2461 ++tcpstat.tcps_preddat;
9bccf70c 2462 tp->rcv_nxt += tlen;
8ad349bb
A
2463 /*
2464 * Pull snd_wl1 up to prevent seq wrap relative to
2465 * th_seq.
2466 */
2467 tp->snd_wl1 = th->th_seq;
2468 /*
2469 * Pull rcv_up up to prevent seq wrap relative to
2470 * rcv_nxt.
2471 */
2472 tp->rcv_up = tp->rcv_nxt;
316670eb 2473 TCP_INC_VAR(tcpstat.tcps_rcvpack, nlropkts);
9bccf70c 2474 tcpstat.tcps_rcvbyte += tlen;
6d2010ae 2475 if (nstat_collect) {
39236c6e
A
2476 if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_PKT) {
2477 INP_ADD_STAT(inp, cell, wifi, rxpackets,
2478 m->m_pkthdr.lro_npkts);
2479 } else {
2480 INP_ADD_STAT(inp, cell, wifi, rxpackets, 1);
316670eb 2481 }
39236c6e 2482 INP_ADD_STAT(inp, cell, wifi, rxbytes, tlen);
6d2010ae 2483 }
39236c6e
A
2484
2485 /*
2486 * Calculate the RTT on the receiver only if the
2487 * connection is in streaming mode and the last
2488 * packet was not an end-of-write
2489 */
2490 if ((tp->t_flags & TF_STRETCHACK) &&
2491 !(tp->t_flagsext & TF_STREAMEOW))
2492 tcp_compute_rtt(tp, &to, th);
316670eb
A
2493
2494 tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen);
2495
9bccf70c
A
2496 /*
2497 * Add data to socket buffer.
2498 */
6d2010ae 2499 so_recv_data_stat(so, m, 0);
9bccf70c 2500 m_adj(m, drop_hdrlen); /* delayed header drop */
39236c6e
A
2501
2502 /*
2503 * If message delivery (SOF_ENABLE_MSGS) is enabled on
2504 * this socket, deliver the packet received as an
2505 * in-order message with sequence number attached to it.
2506 */
2507 if (sbappendstream_rcvdemux(so, m,
2508 th->th_seq - (tp->irs + 1), 0)) {
91447636 2509 sorwakeup(so);
39236c6e 2510 }
9bccf70c
A
2511#if INET6
2512 if (isipv6) {
2513 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
2514 (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])),
2515 th->th_seq, th->th_ack, th->th_win);
2516 }
2517 else
2518#endif
2519 {
2520 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
2521 (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)),
2522 th->th_seq, th->th_ack, th->th_win);
2523 }
316670eb 2524 TCP_INC_VAR(tp->t_unacksegs, nlropkts);
6d2010ae
A
2525 if (DELAY_ACK(tp, th)) {
2526 if ((tp->t_flags & TF_DELACK) == 0) {
2527 tp->t_flags |= TF_DELACK;
2528 tp->t_timer[TCPT_DELACK] = OFFSET_FROM_START(tp, tcp_delack);
2529 }
1c79356b
A
2530 } else {
2531 tp->t_flags |= TF_ACKNOW;
2532 tcp_output(tp);
2533 }
39236c6e
A
2534
2535 tcp_adaptive_rwtimo_check(tp, tlen);
2536
6d2010ae 2537 tcp_check_timer_state(tp);
91447636 2538 tcp_unlock(so, 1, 0);
1c79356b
A
2539 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0);
2540 return;
2541 }
2542 }
2543
2544 /*
2545 * Calculate amount of space in receive window,
2546 * and then do TCP input processing.
2547 * Receive window is amount of space in rcv queue,
2548 * but not less than advertised window.
2549 */
6d2010ae 2550 lck_mtx_assert(&((struct inpcb *)so->so_pcb)->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
2d21ac55 2551 win = tcp_sbspace(tp);
1c79356b
A
2552 if (win < 0)
2553 win = 0;
d12e1678
A
2554 else { /* clip rcv window to 4K for modems */
2555 if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0)
2556 win = min(win, slowlink_wsize);
2557 }
1c79356b 2558 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
39236c6e
A
2559#if MPTCP
2560 /*
2561 * Ensure that the subflow receive window isn't greater
2562 * than the connection level receive window.
2563 */
2564 if ((tp->t_mpflags & TMPF_MPTCP_TRUE) &&
2565 (mp_tp = tptomptp(tp))) {
2566 MPT_LOCK(mp_tp);
2567 if (tp->rcv_wnd > mp_tp->mpt_rcvwnd) {
2568 tp->rcv_wnd = mp_tp->mpt_rcvwnd;
2569 tcpstat.tcps_mp_reducedwin++;
2570 }
2571 MPT_UNLOCK(mp_tp);
1c79356b 2572 }
39236c6e 2573#endif /* MPTCP */
1c79356b
A
2574
2575 switch (tp->t_state) {
2576
2577 /*
7e4a7d39 2578 * Initialize tp->rcv_nxt, and tp->irs, select an initial
1c79356b
A
2579 * tp->iss, and send a segment:
2580 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
2581 * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss.
2582 * Fill in remote peer address fields if not previously specified.
2583 * Enter SYN_RECEIVED state, and process any other fields of this
2584 * segment in this state.
2585 */
2586 case TCPS_LISTEN: {
2587 register struct sockaddr_in *sin;
9bccf70c 2588#if INET6
1c79356b
A
2589 register struct sockaddr_in6 *sin6;
2590#endif
2591
6d2010ae 2592 lck_mtx_assert(&((struct inpcb *)so->so_pcb)->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
9bccf70c
A
2593#if INET6
2594 if (isipv6) {
1c79356b
A
2595 MALLOC(sin6, struct sockaddr_in6 *, sizeof *sin6,
2596 M_SONAME, M_NOWAIT);
2597 if (sin6 == NULL)
2598 goto drop;
2599 bzero(sin6, sizeof(*sin6));
2600 sin6->sin6_family = AF_INET6;
2601 sin6->sin6_len = sizeof(*sin6);
2602 sin6->sin6_addr = ip6->ip6_src;
2603 sin6->sin6_port = th->th_sport;
2604 laddr6 = inp->in6p_laddr;
2605 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
2606 inp->in6p_laddr = ip6->ip6_dst;
2607 if (in6_pcbconnect(inp, (struct sockaddr *)sin6,
9bccf70c 2608 proc0)) {
1c79356b
A
2609 inp->in6p_laddr = laddr6;
2610 FREE(sin6, M_SONAME);
2611 goto drop;
2612 }
2613 FREE(sin6, M_SONAME);
9bccf70c 2614 } else
1c79356b 2615#endif
9bccf70c 2616 {
6d2010ae 2617 lck_mtx_assert(&((struct inpcb *)so->so_pcb)->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
1c79356b 2618 MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME,
9bccf70c 2619 M_NOWAIT);
1c79356b
A
2620 if (sin == NULL)
2621 goto drop;
2622 sin->sin_family = AF_INET;
2623 sin->sin_len = sizeof(*sin);
2624 sin->sin_addr = ip->ip_src;
2625 sin->sin_port = th->th_sport;
2626 bzero((caddr_t)sin->sin_zero, sizeof(sin->sin_zero));
2627 laddr = inp->inp_laddr;
2628 if (inp->inp_laddr.s_addr == INADDR_ANY)
2629 inp->inp_laddr = ip->ip_dst;
39236c6e
A
2630 if (in_pcbconnect(inp, (struct sockaddr *)sin, proc0,
2631 IFSCOPE_NONE, NULL)) {
1c79356b
A
2632 inp->inp_laddr = laddr;
2633 FREE(sin, M_SONAME);
2634 goto drop;
2635 }
2636 FREE(sin, M_SONAME);
1c79356b 2637 }
8ad349bb 2638
c910b4d9 2639 tcp_dooptions(tp, optp, optlen, th, &to, ifscope);
8ad349bb 2640
39236c6e 2641 if (SACK_ENABLED(tp)) {
8ad349bb 2642 if (!(to.to_flags & TOF_SACK))
39236c6e 2643 tp->t_flagsext &= ~(TF_SACK_ENABLE);
8ad349bb
A
2644 else
2645 tp->t_flags |= TF_SACK_PERMIT;
2646 }
2647
1c79356b
A
2648 if (iss)
2649 tp->iss = iss;
0b4e3aa0 2650 else {
9bccf70c
A
2651 tp->iss = tcp_new_isn(tp);
2652 }
1c79356b
A
2653 tp->irs = th->th_seq;
2654 tcp_sendseqinit(tp);
2655 tcp_rcvseqinit(tp);
9bccf70c 2656 tp->snd_recover = tp->snd_una;
1c79356b
A
2657 /*
2658 * Initialization of the tcpcb for transaction;
2659 * set SND.WND = SEG.WND,
2660 * initialize CCsend and CCrecv.
2661 */
2662 tp->snd_wnd = tiwin; /* initial send-window */
1c79356b 2663 tp->t_flags |= TF_ACKNOW;
2d21ac55 2664 tp->t_unacksegs = 0;
6d2010ae
A
2665 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
2666 struct tcpcb *, tp, int32_t, TCPS_SYN_RECEIVED);
1c79356b 2667 tp->t_state = TCPS_SYN_RECEIVED;
6d2010ae 2668 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
39236c6e 2669 TCP_CONN_KEEPINIT(tp));
1c79356b 2670 dropsocket = 0; /* committed to socket */
6d2010ae 2671
316670eb
A
2672 if (inp->inp_flowhash == 0)
2673 inp->inp_flowhash = inp_calc_flowhash(inp);
39236c6e
A
2674#if INET6
2675 /* update flowinfo - RFC 6437 */
2676 if (inp->inp_flow == 0 &&
2677 inp->in6p_flags & IN6P_AUTOFLOWLABEL) {
2678 inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
2679 inp->inp_flow |=
2680 (htonl(inp->inp_flowhash) & IPV6_FLOWLABEL_MASK);
2681 }
2682#endif /* INET6 */
316670eb 2683
6d2010ae
A
2684 /* reset the incomp processing flag */
2685 so->so_flags &= ~(SOF_INCOMP_INPROGRESS);
1c79356b 2686 tcpstat.tcps_accepts++;
2d21ac55
A
2687 if ((thflags & (TH_ECE | TH_CWR)) == (TH_ECE | TH_CWR)) {
2688 /* ECN-setup SYN */
2689 tp->ecn_flags |= (TE_SETUPRECEIVED | TE_SENDIPECT);
2690 }
316670eb 2691
b0d623f7
A
2692#if CONFIG_IFEF_NOWINDOWSCALE
2693 if (tcp_obey_ifef_nowindowscale && m->m_pkthdr.rcvif != NULL &&
2694 (m->m_pkthdr.rcvif->if_eflags & IFEF_NOWINDOWSCALE)) {
2695 /* Window scaling is not enabled on this interface */
2696 tp->t_flags &= ~TF_REQ_SCALE;
593a1d5f
A
2697 }
2698#endif
1c79356b
A
2699 goto trimthenstep6;
2700 }
2701
2702 /*
2703 * If the state is SYN_RECEIVED:
2704 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
2705 */
2706 case TCPS_SYN_RECEIVED:
2707 if ((thflags & TH_ACK) &&
2708 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
9bccf70c
A
2709 SEQ_GT(th->th_ack, tp->snd_max))) {
2710 rstreason = BANDLIM_RST_OPENPORT;
39236c6e 2711 IF_TCP_STATINC(ifp, ooopacket);
1c79356b 2712 goto dropwithreset;
9bccf70c 2713 }
39236c6e
A
2714
2715 /*
2716 * In SYN_RECEIVED state, if we recv some SYNS with
2717 * window scale and others without, window scaling should
2718 * be disabled. Otherwise the window advertised will be
2719 * lower if we assume scaling and the other end does not.
2720 */
2721 if ((thflags & TH_SYN) &&
2722 !(to.to_flags & TOF_SCALE))
2723 tp->t_flags &= ~TF_RCVD_SCALE;
1c79356b
A
2724 break;
2725
2726 /*
2727 * If the state is SYN_SENT:
2728 * if seg contains an ACK, but not for our SYN, drop the input.
2729 * if seg contains a RST, then drop the connection.
2730 * if seg does not contain SYN, then drop it.
2731 * Otherwise this is an acceptable SYN segment
2732 * initialize tp->rcv_nxt and tp->irs
2733 * if seg contains ack then advance tp->snd_una
2734 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
2735 * arrange for segment to be acked (eventually)
2736 * continue processing rest of data/controls, beginning with URG
2737 */
2738 case TCPS_SYN_SENT:
1c79356b
A
2739 if ((thflags & TH_ACK) &&
2740 (SEQ_LEQ(th->th_ack, tp->iss) ||
2741 SEQ_GT(th->th_ack, tp->snd_max))) {
8ad349bb 2742 rstreason = BANDLIM_UNLIMITED;
39236c6e 2743 IF_TCP_STATINC(ifp, ooopacket);
8ad349bb 2744 goto dropwithreset;
1c79356b
A
2745 }
2746 if (thflags & TH_RST) {
2d21ac55 2747 if ((thflags & TH_ACK) != 0) {
316670eb
A
2748 soevent(so,
2749 (SO_FILT_HINT_LOCKED |
2750 SO_FILT_HINT_CONNRESET));
1c79356b
A
2751 tp = tcp_drop(tp, ECONNREFUSED);
2752 postevent(so, 0, EV_RESET);
2d21ac55 2753 }
1c79356b
A
2754 goto drop;
2755 }
2756 if ((thflags & TH_SYN) == 0)
2757 goto drop;
2758 tp->snd_wnd = th->th_win; /* initial send window */
1c79356b
A
2759
2760 tp->irs = th->th_seq;
2761 tcp_rcvseqinit(tp);
2762 if (thflags & TH_ACK) {
1c79356b 2763 tcpstat.tcps_connects++;
2d21ac55
A
2764
2765 if ((thflags & (TH_ECE | TH_CWR)) == (TH_ECE)) {
2766 /* ECN-setup SYN-ACK */
2767 tp->ecn_flags |= TE_SETUPRECEIVED;
2768 }
2769 else {
2770 /* non-ECN-setup SYN-ACK */
2771 tp->ecn_flags &= ~TE_SENDIPECT;
2772 }
2773
2d21ac55
A
2774#if CONFIG_MACF_NET && CONFIG_MACF_SOCKET
2775 /* XXXMAC: recursive lock: SOCK_LOCK(so); */
2776 mac_socketpeer_label_associate_mbuf(m, so);
2777 /* XXXMAC: SOCK_UNLOCK(so); */
2778#endif
1c79356b
A
2779 /* Do window scaling on this connection? */
2780 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2781 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2782 tp->snd_scale = tp->requested_s_scale;
2783 tp->rcv_scale = tp->request_r_scale;
2784 }
316670eb 2785 tp->rcv_adv += min(tp->rcv_wnd, TCP_MAXWIN << tp->rcv_scale);
1c79356b
A
2786 tp->snd_una++; /* SYN is acked */
2787 /*
2788 * If there's data, delay ACK; if there's also a FIN
2789 * ACKNOW will be turned on later.
2790 */
316670eb
A
2791 TCP_INC_VAR(tp->t_unacksegs, nlropkts);
2792 if (DELAY_ACK(tp, th) && tlen != 0 ) {
6d2010ae
A
2793 if ((tp->t_flags & TF_DELACK) == 0) {
2794 tp->t_flags |= TF_DELACK;
2795 tp->t_timer[TCPT_DELACK] = OFFSET_FROM_START(tp, tcp_delack);
2796 }
1c79356b 2797 }
91447636 2798 else {
1c79356b 2799 tp->t_flags |= TF_ACKNOW;
91447636 2800 }
1c79356b
A
2801 /*
2802 * Received <SYN,ACK> in SYN_SENT[*] state.
2803 * Transitions:
2804 * SYN_SENT --> ESTABLISHED
2805 * SYN_SENT* --> FIN_WAIT_1
2806 */
6d2010ae 2807 tp->t_starttime = tcp_now;
316670eb 2808 tcp_sbrcv_tstmp_check(tp);
1c79356b 2809 if (tp->t_flags & TF_NEEDFIN) {
6d2010ae
A
2810 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
2811 struct tcpcb *, tp, int32_t, TCPS_FIN_WAIT_1);
1c79356b
A
2812 tp->t_state = TCPS_FIN_WAIT_1;
2813 tp->t_flags &= ~TF_NEEDFIN;
2814 thflags &= ~TH_SYN;
2815 } else {
6d2010ae
A
2816 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
2817 struct tcpcb *, tp, int32_t, TCPS_ESTABLISHED);
1c79356b 2818 tp->t_state = TCPS_ESTABLISHED;
39236c6e
A
2819 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
2820 TCP_CONN_KEEPIDLE(tp));
6d2010ae
A
2821 if (nstat_collect)
2822 nstat_route_connect_success(tp->t_inpcb->inp_route.ro_rt);
1c79356b 2823 }
39236c6e
A
2824#if MPTCP
2825 /*
2826 * Do not send the connect notification for additional
2827 * subflows until ACK for 3-way handshake arrives.
2828 */
2829 if ((!(tp->t_mpflags & TMPF_MPTCP_TRUE)) &&
2830 (tp->t_mpflags & TMPF_SENT_JOIN)) {
2831 isconnected = FALSE;
2832 } else
2833#endif /* MPTCP */
2834 isconnected = TRUE;
1c79356b 2835 } else {
6d2010ae
A
2836 /*
2837 * Received initial SYN in SYN-SENT[*] state => simul-
2838 * taneous open. If segment contains CC option and there is
2839 * a cached CC, apply TAO test; if it succeeds, connection is
2840 * half-synchronized. Otherwise, do 3-way handshake:
2841 * SYN-SENT -> SYN-RECEIVED
2842 * SYN-SENT* -> SYN-RECEIVED*
2843 */
1c79356b
A
2844 tp->t_flags |= TF_ACKNOW;
2845 tp->t_timer[TCPT_REXMT] = 0;
6d2010ae
A
2846 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
2847 struct tcpcb *, tp, int32_t, TCPS_SYN_RECEIVED);
8ad349bb
A
2848 tp->t_state = TCPS_SYN_RECEIVED;
2849
1c79356b
A
2850 }
2851
2852trimthenstep6:
2853 /*
2854 * Advance th->th_seq to correspond to first data byte.
2855 * If data, trim to stay within window,
2856 * dropping FIN if necessary.
2857 */
2858 th->th_seq++;
9bccf70c
A
2859 if (tlen > tp->rcv_wnd) {
2860 todrop = tlen - tp->rcv_wnd;
1c79356b 2861 m_adj(m, -todrop);
9bccf70c 2862 tlen = tp->rcv_wnd;
1c79356b
A
2863 thflags &= ~TH_FIN;
2864 tcpstat.tcps_rcvpackafterwin++;
2865 tcpstat.tcps_rcvbyteafterwin += todrop;
2866 }
2867 tp->snd_wl1 = th->th_seq - 1;
2868 tp->rcv_up = th->th_seq;
2869 /*
2870 * Client side of transaction: already sent SYN and data.
2871 * If the remote host used T/TCP to validate the SYN,
2872 * our data will be ACK'd; if so, enter normal data segment
2873 * processing in the middle of step 5, ack processing.
2874 * Otherwise, goto step 6.
2875 */
2876 if (thflags & TH_ACK)
2877 goto process_ACK;
2878 goto step6;
2879 /*
2880 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
8ad349bb 2881 * do normal processing.
1c79356b 2882 *
8ad349bb 2883 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
1c79356b
A
2884 */
2885 case TCPS_LAST_ACK:
2886 case TCPS_CLOSING:
2887 case TCPS_TIME_WAIT:
1c79356b 2888 break; /* continue normal processing */
55e303ae
A
2889
2890 /* Received a SYN while connection is already established.
2891 * This is a "half open connection and other anomalies" described
2892 * in RFC793 page 34, send an ACK so the remote reset the connection
2893 * or recovers by adjusting its sequence numberering
2894 */
2895 case TCPS_ESTABLISHED:
2896 if (thflags & TH_SYN)
2897 goto dropafterack;
2898 break;
1c79356b
A
2899 }
2900
2901 /*
2902 * States other than LISTEN or SYN_SENT.
2903 * First check the RST flag and sequence number since reset segments
2904 * are exempt from the timestamp and connection count tests. This
2905 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
2906 * below which allowed reset segments in half the sequence space
2907 * to fall though and be processed (which gives forged reset
2908 * segments with a random sequence number a 50 percent chance of
2909 * killing a connection).
2910 * Then check timestamp, if present.
2911 * Then check the connection count, if present.
2912 * Then check that at least some bytes of segment are within
2913 * receive window. If segment begins before rcv_nxt,
2914 * drop leading data (and SYN); if nothing left, just ack.
2915 *
2916 *
2917 * If the RST bit is set, check the sequence number to see
2918 * if this is a valid reset segment.
2919 * RFC 793 page 37:
2920 * In all states except SYN-SENT, all reset (RST) segments
2921 * are validated by checking their SEQ-fields. A reset is
2922 * valid if its sequence number is in the window.
2923 * Note: this does not take into account delayed ACKs, so
2924 * we should test against last_ack_sent instead of rcv_nxt.
9bccf70c
A
2925 * The sequence number in the reset segment is normally an
2926 * echo of our outgoing acknowlegement numbers, but some hosts
2927 * send a reset with the sequence number at the rightmost edge
2928 * of our receive window, and we have to handle this case.
8ad349bb
A
2929 * Note 2: Paul Watson's paper "Slipping in the Window" has shown
2930 * that brute force RST attacks are possible. To combat this,
2931 * we use a much stricter check while in the ESTABLISHED state,
2932 * only accepting RSTs where the sequence number is equal to
2933 * last_ack_sent. In all other states (the states in which a
2934 * RST is more likely), the more permissive check is used.
1c79356b
A
2935 * If we have multiple segments in flight, the intial reset
2936 * segment sequence numbers will be to the left of last_ack_sent,
2937 * but they will eventually catch up.
2938 * In any case, it never made sense to trim reset segments to
2939 * fit the receive window since RFC 1122 says:
2940 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
2941 *
2942 * A TCP SHOULD allow a received RST segment to include data.
2943 *
2944 * DISCUSSION
2945 * It has been suggested that a RST segment could contain
2946 * ASCII text that encoded and explained the cause of the
2947 * RST. No standard has yet been established for such
2948 * data.
2949 *
2950 * If the reset segment passes the sequence number test examine
2951 * the state:
2952 * SYN_RECEIVED STATE:
2953 * If passive open, return to LISTEN state.
2954 * If active open, inform user that connection was refused.
8ad349bb 2955 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
1c79356b 2956 * Inform user that connection was reset, and close tcb.
9bccf70c 2957 * CLOSING, LAST_ACK STATES:
1c79356b 2958 * Close the tcb.
9bccf70c 2959 * TIME_WAIT STATE:
1c79356b
A
2960 * Drop the segment - see Stevens, vol. 2, p. 964 and
2961 * RFC 1337.
0c530ab8 2962 *
2d21ac55
A
2963 * Radar 4803931: Allows for the case where we ACKed the FIN but
2964 * there is already a RST in flight from the peer.
2965 * In that case, accept the RST for non-established
2966 * state if it's one off from last_ack_sent.
2967
1c79356b
A
2968 */
2969 if (thflags & TH_RST) {
8ad349bb
A
2970 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2971 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
0c530ab8 2972 (tp->rcv_wnd == 0 &&
39236c6e
A
2973 ((tp->last_ack_sent == th->th_seq) ||
2974 ((tp->last_ack_sent -1) == th->th_seq)))) {
1c79356b
A
2975 switch (tp->t_state) {
2976
2977 case TCPS_SYN_RECEIVED:
39236c6e 2978 IF_TCP_STATINC(ifp, rstinsynrcv);
1c79356b
A
2979 so->so_error = ECONNREFUSED;
2980 goto close;
2981
2982 case TCPS_ESTABLISHED:
8ad349bb 2983 if (tp->last_ack_sent != th->th_seq) {
2d21ac55 2984 tcpstat.tcps_badrst++;
8ad349bb
A
2985 goto drop;
2986 }
1c79356b
A
2987 case TCPS_FIN_WAIT_1:
2988 case TCPS_CLOSE_WAIT:
1c79356b
A
2989 /*
2990 Drop through ...
2991 */
2992 case TCPS_FIN_WAIT_2:
2993 so->so_error = ECONNRESET;
2994 close:
2995 postevent(so, 0, EV_RESET);
316670eb
A
2996 soevent(so,
2997 (SO_FILT_HINT_LOCKED |
2998 SO_FILT_HINT_CONNRESET));
2999
1c79356b
A
3000 tcpstat.tcps_drops++;
3001 tp = tcp_close(tp);
3002 break;
3003
3004 case TCPS_CLOSING:
3005 case TCPS_LAST_ACK:
1c79356b
A
3006 tp = tcp_close(tp);
3007 break;
3008
3009 case TCPS_TIME_WAIT:
3010 break;
3011 }
3012 }
3013 goto drop;
3014 }
3015
3016 /*
3017 * RFC 1323 PAWS: If we have a timestamp reply on this segment
3018 * and it's less than ts_recent, drop it.
3019 */
8ad349bb 3020 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
1c79356b
A
3021 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
3022
3023 /* Check to see if ts_recent is over 24 days old. */
3024 if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE) {
3025 /*
3026 * Invalidate ts_recent. If this segment updates
3027 * ts_recent, the age will be reset later and ts_recent
3028 * will get a valid value. If it does not, setting
3029 * ts_recent to zero will at least satisfy the
3030 * requirement that zero be placed in the timestamp
3031 * echo reply when ts_recent isn't valid. The
3032 * age isn't reset until we get a valid ts_recent
3033 * because we don't want out-of-order segments to be
3034 * dropped when ts_recent is old.
3035 */
3036 tp->ts_recent = 0;
3037 } else {
3038 tcpstat.tcps_rcvduppack++;
9bccf70c 3039 tcpstat.tcps_rcvdupbyte += tlen;
1c79356b 3040 tcpstat.tcps_pawsdrop++;
6d2010ae 3041 if (nstat_collect) {
39236c6e
A
3042 nstat_route_rx(tp->t_inpcb->inp_route.ro_rt,
3043 1, tlen, NSTAT_RX_FLAG_DUPLICATE);
3044 INP_ADD_STAT(inp, cell, wifi, rxpackets, 1);
3045 INP_ADD_STAT(inp, cell, wifi, rxbytes, tlen);
6d2010ae
A
3046 tp->t_stat.rxduplicatebytes += tlen;
3047 }
8ad349bb
A
3048 if (tlen)
3049 goto dropafterack;
3050 goto drop;
1c79356b
A
3051 }
3052 }
3053
1c79356b
A
3054 /*
3055 * In the SYN-RECEIVED state, validate that the packet belongs to
3056 * this connection before trimming the data to fit the receive
3057 * window. Check the sequence number versus IRS since we know
3058 * the sequence numbers haven't wrapped. This is a partial fix
3059 * for the "LAND" DoS attack.
3060 */
9bccf70c
A
3061 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
3062 rstreason = BANDLIM_RST_OPENPORT;
39236c6e 3063 IF_TCP_STATINC(ifp, dospacket);
1c79356b 3064 goto dropwithreset;
9bccf70c 3065 }
1c79356b
A
3066
3067 todrop = tp->rcv_nxt - th->th_seq;
3068 if (todrop > 0) {
3069 if (thflags & TH_SYN) {
3070 thflags &= ~TH_SYN;
3071 th->th_seq++;
3072 if (th->th_urp > 1)
3073 th->th_urp--;
3074 else
3075 thflags &= ~TH_URG;
3076 todrop--;
3077 }
3078 /*
3079 * Following if statement from Stevens, vol. 2, p. 960.
3080 */
9bccf70c
A
3081 if (todrop > tlen
3082 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
1c79356b
A
3083 /*
3084 * Any valid FIN must be to the left of the window.
3085 * At this point the FIN must be a duplicate or out
3086 * of sequence; drop it.
3087 */
3088 thflags &= ~TH_FIN;
3089
3090 /*
3091 * Send an ACK to resynchronize and drop any data.
3092 * But keep on processing for RST or ACK.
3093 */
3094 tp->t_flags |= TF_ACKNOW;
316670eb
A
3095 if (todrop == 1) {
3096 /* This could be a keepalive */
3097 soevent(so, SO_FILT_HINT_LOCKED |
3098 SO_FILT_HINT_KEEPALIVE);
3099 }
9bccf70c 3100 todrop = tlen;
1c79356b 3101 tcpstat.tcps_rcvduppack++;
316670eb 3102 tcpstat.tcps_rcvdupbyte += todrop;
1c79356b
A
3103 } else {
3104 tcpstat.tcps_rcvpartduppack++;
3105 tcpstat.tcps_rcvpartdupbyte += todrop;
3106 }
6d2010ae 3107 if (nstat_collect) {
39236c6e
A
3108 nstat_route_rx(tp->t_inpcb->inp_route.ro_rt, 1,
3109 todrop, NSTAT_RX_FLAG_DUPLICATE);
3110 INP_ADD_STAT(inp, cell, wifi, rxpackets, 1);
3111 INP_ADD_STAT(inp, cell, wifi, rxbytes, todrop);
6d2010ae
A
3112 tp->t_stat.rxduplicatebytes += todrop;
3113 }
9bccf70c 3114 drop_hdrlen += todrop; /* drop from the top afterwards */
1c79356b 3115 th->th_seq += todrop;
9bccf70c 3116 tlen -= todrop;
1c79356b
A
3117 if (th->th_urp > todrop)
3118 th->th_urp -= todrop;
3119 else {
3120 thflags &= ~TH_URG;
3121 th->th_urp = 0;
3122 }
3123 }
3124
3125 /*
39236c6e
A
3126 * If new data are received on a connection after the user processes
3127 * are gone, then RST the other end. Note that an MPTCP subflow socket
3128 * would have SS_NOFDREF set by default, so check to make sure that
3129 * we test for SOF_MP_SUBFLOW socket flag (which would be cleared when
3130 * the socket is closed.)
1c79356b 3131 */
39236c6e
A
3132 if (!(so->so_flags & SOF_MP_SUBFLOW) &&
3133 (so->so_state & SS_NOFDREF) &&
9bccf70c 3134 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
1c79356b
A
3135 tp = tcp_close(tp);
3136 tcpstat.tcps_rcvafterclose++;
9bccf70c 3137 rstreason = BANDLIM_UNLIMITED;
39236c6e 3138 IF_TCP_STATINC(ifp, cleanup);
1c79356b
A
3139 goto dropwithreset;
3140 }
3141
3142 /*
3143 * If segment ends after window, drop trailing data
3144 * (and PUSH and FIN); if nothing left, just ACK.
3145 */
9bccf70c 3146 todrop = (th->th_seq+tlen) - (tp->rcv_nxt+tp->rcv_wnd);
1c79356b
A
3147 if (todrop > 0) {
3148 tcpstat.tcps_rcvpackafterwin++;
9bccf70c
A
3149 if (todrop >= tlen) {
3150 tcpstat.tcps_rcvbyteafterwin += tlen;
1c79356b
A
3151 /*
3152 * If a new connection request is received
3153 * while in TIME_WAIT, drop the old connection
3154 * and start over if the sequence numbers
3155 * are above the previous ones.
3156 */
3157 if (thflags & TH_SYN &&
3158 tp->t_state == TCPS_TIME_WAIT &&
3159 SEQ_GT(th->th_seq, tp->rcv_nxt)) {
9bccf70c 3160 iss = tcp_new_isn(tp);
1c79356b 3161 tp = tcp_close(tp);
91447636 3162 tcp_unlock(so, 1, 0);
1c79356b
A
3163 goto findpcb;
3164 }
3165 /*
3166 * If window is closed can only take segments at
3167 * window edge, and have to drop data and PUSH from
3168 * incoming segments. Continue processing, but
3169 * remember to ack. Otherwise, drop segment
3170 * and ack.
3171 */
3172 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
3173 tp->t_flags |= TF_ACKNOW;
3174 tcpstat.tcps_rcvwinprobe++;
3175 } else
3176 goto dropafterack;
3177 } else
3178 tcpstat.tcps_rcvbyteafterwin += todrop;
3179 m_adj(m, -todrop);
9bccf70c 3180 tlen -= todrop;
1c79356b
A
3181 thflags &= ~(TH_PUSH|TH_FIN);
3182 }
3183
3184 /*
3185 * If last ACK falls within this segment's sequence numbers,
3186 * record its timestamp.
8ad349bb
A
3187 * NOTE:
3188 * 1) That the test incorporates suggestions from the latest
3189 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
3190 * 2) That updating only on newer timestamps interferes with
3191 * our earlier PAWS tests, so this check should be solely
3192 * predicated on the sequence space of this segment.
3193 * 3) That we modify the segment boundary check to be
3194 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
3195 * instead of RFC1323's
3196 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
3197 * This modified check allows us to overcome RFC1323's
3198 * limitations as described in Stevens TCP/IP Illustrated
3199 * Vol. 2 p.869. In such cases, we can still calculate the
3200 * RTT correctly when RCV.NXT == Last.ACK.Sent.
1c79356b 3201 */
8ad349bb
A
3202 if ((to.to_flags & TOF_TS) != 0 &&
3203 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
3204 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
3205 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
1c79356b
A
3206 tp->ts_recent_age = tcp_now;
3207 tp->ts_recent = to.to_tsval;
3208 }
3209
3210 /*
3211 * If a SYN is in the window, then this is an
3212 * error and we send an RST and drop the connection.
3213 */
3214 if (thflags & TH_SYN) {
3215 tp = tcp_drop(tp, ECONNRESET);
9bccf70c 3216 rstreason = BANDLIM_UNLIMITED;
1c79356b 3217 postevent(so, 0, EV_RESET);
39236c6e 3218 IF_TCP_STATINC(ifp, synwindow);
1c79356b
A
3219 goto dropwithreset;
3220 }
3221
3222 /*
3223 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
3224 * flag is on (half-synchronized state), then queue data for
3225 * later processing; else drop segment and return.
3226 */
3227 if ((thflags & TH_ACK) == 0) {
3228 if (tp->t_state == TCPS_SYN_RECEIVED ||
3229 (tp->t_flags & TF_NEEDSYN))
3230 goto step6;
2d21ac55
A
3231 else if (tp->t_flags & TF_ACKNOW)
3232 goto dropafterack;
1c79356b
A
3233 else
3234 goto drop;
3235 }
3236
3237 /*
3238 * Ack processing.
3239 */
39236c6e 3240
1c79356b
A
3241 switch (tp->t_state) {
3242
3243 /*
3244 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
3245 * ESTABLISHED state and continue processing.
3246 * The ACK was checked above.
3247 */
3248 case TCPS_SYN_RECEIVED:
3249
3250 tcpstat.tcps_connects++;
1c79356b
A
3251
3252 /* Do window scaling? */
3253 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
3254 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
3255 tp->snd_scale = tp->requested_s_scale;
3256 tp->rcv_scale = tp->request_r_scale;
6d2010ae
A
3257 tp->snd_wnd = th->th_win << tp->snd_scale;
3258 tiwin = tp->snd_wnd;
1c79356b 3259 }
1c79356b
A
3260 /*
3261 * Make transitions:
3262 * SYN-RECEIVED -> ESTABLISHED
3263 * SYN-RECEIVED* -> FIN-WAIT-1
3264 */
6d2010ae 3265 tp->t_starttime = tcp_now;
316670eb 3266 tcp_sbrcv_tstmp_check(tp);
1c79356b 3267 if (tp->t_flags & TF_NEEDFIN) {
6d2010ae
A
3268 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
3269 struct tcpcb *, tp, int32_t, TCPS_FIN_WAIT_1);
1c79356b
A
3270 tp->t_state = TCPS_FIN_WAIT_1;
3271 tp->t_flags &= ~TF_NEEDFIN;
3272 } else {
6d2010ae
A
3273 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
3274 struct tcpcb *, tp, int32_t, TCPS_ESTABLISHED);
1c79356b 3275 tp->t_state = TCPS_ESTABLISHED;
39236c6e
A
3276 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
3277 TCP_CONN_KEEPIDLE(tp));
6d2010ae
A
3278 if (nstat_collect)
3279 nstat_route_connect_success(tp->t_inpcb->inp_route.ro_rt);
1c79356b
A
3280 }
3281 /*
3282 * If segment contains data or ACK, will call tcp_reass()
3283 * later; if not, do so now to pass queued data to user.
3284 */
9bccf70c 3285 if (tlen == 0 && (thflags & TH_FIN) == 0)
2d21ac55 3286 (void) tcp_reass(tp, (struct tcphdr *)0, &tlen,
39236c6e 3287 NULL, ifp);
1c79356b 3288 tp->snd_wl1 = th->th_seq - 1;
4a3eedf9 3289
8ad349bb 3290 /* FALLTHROUGH */
39236c6e
A
3291#if MPTCP
3292 /*
3293 * Do not send the connect notification for additional subflows
3294 * until ACK for 3-way handshake arrives.
3295 */
3296 if ((!(tp->t_mpflags & TMPF_MPTCP_TRUE)) &&
3297 (tp->t_mpflags & TMPF_SENT_JOIN)) {
3298 isconnected = FALSE;
3299 } else
3300#endif /* MPTCP */
3301 isconnected = TRUE;
4a3eedf9 3302
1c79356b
A
3303 /*
3304 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
3305 * ACKs. If the ack is in the range
3306 * tp->snd_una < th->th_ack <= tp->snd_max
3307 * then advance tp->snd_una to th->th_ack and drop
3308 * data from the retransmission queue. If this ACK reflects
3309 * more up to date window information we update our window information.
3310 */
3311 case TCPS_ESTABLISHED:
3312 case TCPS_FIN_WAIT_1:
3313 case TCPS_FIN_WAIT_2:
3314 case TCPS_CLOSE_WAIT:
3315 case TCPS_CLOSING:
3316 case TCPS_LAST_ACK:
3317 case TCPS_TIME_WAIT:
8ad349bb
A
3318 if (SEQ_GT(th->th_ack, tp->snd_max)) {
3319 tcpstat.tcps_rcvacktoomuch++;
3320 goto dropafterack;
3321 }
39236c6e 3322 if (SACK_ENABLED(tp) &&
8ad349bb 3323 (to.to_nsacks > 0 || !TAILQ_EMPTY(&tp->snd_holes)))
39236c6e
A
3324 tcp_sack_doack(tp, &to, th->th_ack, &sack_bytes_acked);
3325#if MPTCP
3326 if ((tp->t_mpuna) && (SEQ_GEQ(th->th_ack, tp->t_mpuna))) {
3327#if 0
3328 if ((tp->t_mpflags & TMPF_MPTCP_TRUE) &&
3329 !(tp->t_mpflags & TMPF_MPTCP_READY)) {
3330 printf("%s: fallback? %x %x \n", __func__,
3331 th->th_ack, tp->t_mpuna);
3332 tp->t_mpuna = 0;
3333 }
3334#endif
3335 if (tp->t_mpflags & TMPF_PREESTABLISHED) {
3336 /* MP TCP establishment succeeded */
3337 tp->t_mpuna = 0;
3338 if (tp->t_mpflags & TMPF_JOINED_FLOW) {
3339 if (tp->t_mpflags & TMPF_SENT_JOIN) {
3340 tp->t_mpflags &=
3341 ~TMPF_PREESTABLISHED;
3342 tp->t_mpflags |=
3343 TMPF_MPTCP_TRUE;
3344 so->so_flags |= SOF_MPTCP_TRUE;
3345 if (mptcp_dbg >= MP_ERR_DEBUG)
3346 printf("MPTCP SUCCESS"
3347 "%s \n",__func__);
3348 tp->t_timer[TCPT_JACK_RXMT] = 0;
3349 tp->t_mprxtshift = 0;
3350 isconnected = TRUE;
3351 } else {
3352 isconnected = FALSE;
3353 }
3354 } else {
3355 isconnected = TRUE;
3356 tp->t_mpflags &= ~TMPF_SENT_KEYS;
3357
3358 }
3359 }
3360 }
3361#endif /* MPTCP */
3362 /*
3363 * If we have outstanding data (other than
3364 * a window probe), this is a completely
3365 * duplicate ack (ie, window info didn't
3366 * change) and the ack is the biggest we've seen.
3367 */
1c79356b 3368 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
9bccf70c 3369 if (tlen == 0 && tiwin == tp->snd_wnd) {
8a3053a0
A
3370 /*
3371 * If both ends send FIN at the same time,
3372 * then the ack will be a duplicate ack
3373 * but we have to process the FIN. Check
3374 * for this condition and process the FIN
3375 * instead of the dupack
3376 */
3377 if ((thflags & TH_FIN) &&
3378 (tp->t_flags & TF_SENTFIN) &&
3379 !TCPS_HAVERCVDFIN(tp->t_state) &&
3380 (th->th_ack + 1) == tp->snd_max) {
3381 break;
3382 }
39236c6e
A
3383process_dupack:
3384#if MPTCP
3385 /*
3386 * MPTCP options that are ignored must
3387 * not be treated as duplicate ACKs.
3388 */
3389 if (to.to_flags & TOF_MPTCP) {
3390 goto drop;
3391 }
3392#endif /* MPTCP */
1c79356b 3393 tcpstat.tcps_rcvdupack++;
39236c6e
A
3394 ++tp->t_dupacks;
3395 /*
3396 * Check if we need to reset the limit on early
3397 * retransmit
3398 */
3399 if (TSTMP_GEQ(tcp_now,
3400 (tp->t_early_rexmt_win + TCP_EARLY_REXMT_WIN)))
3401 tp->t_early_rexmt_count = 0;
3402
3403 /*
3404 * Is early retransmit needed? We check for
3405 * this when the connection is waiting for
3406 * more duplicate acks to enter fast recovery.
3407 */
3408 if (early_rexmt &&
3409 tp->t_early_rexmt_count < TCP_EARLY_REXMT_LIMIT &&
3410 !IN_FASTRECOVERY(tp) &&
3411 SEQ_GT(tp->snd_max, tp->snd_una) &&
3412 (tp->t_dupacks == 1 ||
3413 (SACK_ENABLED(tp) &&
3414 !TAILQ_EMPTY(&tp->snd_holes)))) {
3415 /*
3416 * If there are only a few outstanding
3417 * segments on the connection, we might need
3418 * to lower the retransmit threshold. This
3419 * will allow us to do Early Retransmit as
3420 * described in RFC 5827.
3421 */
3422 u_int32_t obytes, snd_off;
3423 int32_t snd_len;
3424 if (SACK_ENABLED(tp) &&
3425 !TAILQ_EMPTY(&tp->snd_holes)) {
3426 obytes = (tp->snd_max - tp->snd_fack) +
3427 tp->sackhint.sack_bytes_rexmit;
3428 } else {
3429 obytes = (tp->snd_max - tp->snd_una);
3430 }
3431
3432 /* In order to lower retransmit threshold the
3433 * following two conditions must be met.
3434 * 1. the amount of outstanding data is less
3435 * than 4*SMSS bytes
3436 * 2. there is no unsent data ready for
3437 * transmission or the advertised window
3438 * will limit sending new segments.
3439 */
3440 snd_off = tp->snd_max - tp->snd_una;
3441 snd_len = min(so->so_snd.sb_cc, tp->snd_wnd) - snd_off;
3442 if (obytes < (tp->t_maxseg << 2) &&
3443 snd_len <= 0) {
3444 u_int32_t osegs;
3445
3446
3447 osegs = obytes / tp->t_maxseg;
3448 if ((osegs * tp->t_maxseg) < obytes)
3449 osegs++;
3450
3451 /*
3452 * Since the connection might have already
3453 * received some dupacks, we add them to
3454 * to the outstanding segments count to get
3455 * the correct retransmit threshold.
3456 *
3457 * By checking for early retransmit after
3458 * receiving some duplicate acks when SACK
3459 * is supported, the connection will be able
3460 * to enter fast recovery even if multiple
3461 * segments are lost in the same window.
3462 */
3463 osegs += tp->t_dupacks;
3464 if (osegs < 4) {
3465 tcpstat.tcps_early_rexmt++;
3466 tp->t_rexmtthresh = ((osegs - 1) > 1) ?
3467 (osegs - 1) : 1;
3468 tp->t_rexmtthresh = min(tp->t_rexmtthresh,
3469 tcprexmtthresh);
3470 tp->t_rexmtthresh = max(tp->t_rexmtthresh,
3471 tp->t_dupacks);
3472 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
3473 struct tcpcb *, tp, struct tcphdr *, th,
3474 int32_t, TCP_CC_EARLY_RETRANSMIT);
3475 if (tp->t_early_rexmt_count == 0)
3476 tp->t_early_rexmt_win = tcp_now;
3477 tp->t_early_rexmt_count++;
3478 }
3479 }
3480 }
1c79356b 3481 /*
39236c6e
A
3482 * If we've seen exactly our rexmt threshold
3483 * of duplicate acks, assume a packet
1c79356b
A
3484 * has been dropped and retransmit it.
3485 * Kludge snd_nxt & the congestion
3486 * window so we send only this one
3487 * packet.
3488 *
3489 * We know we're losing at the current
3490 * window size so do congestion avoidance
3491 * (set ssthresh to half the current window
3492 * and pull our congestion window back to
3493 * the new ssthresh).
3494 *
3495 * Dup acks mean that packets have left the
3496 * network (they're now cached at the receiver)
3497 * so bump cwnd by the amount in the receiver
3498 * to keep a constant cwnd packets in the
3499 * network.
3500 */
3501 if (tp->t_timer[TCPT_REXMT] == 0 ||
39236c6e 3502 (th->th_ack != tp->snd_una && sack_bytes_acked == 0)) {
1c79356b 3503 tp->t_dupacks = 0;
39236c6e
A
3504 tp->t_rexmtthresh = tcprexmtthresh;
3505 } else if (tp->t_dupacks > tp->t_rexmtthresh ||
6d2010ae 3506 IN_FASTRECOVERY(tp)) {
39236c6e 3507 if (SACK_ENABLED(tp) && IN_FASTRECOVERY(tp)) {
8ad349bb
A
3508 int awnd;
3509
3510 /*
3511 * Compute the amount of data in flight first.
3512 * We can inject new data into the pipe iff
3513 * we have less than 1/2 the original window's
3514 * worth of data in flight.
9bccf70c 3515 */
8ad349bb
A
3516 awnd = (tp->snd_nxt - tp->snd_fack) +
3517 tp->sackhint.sack_bytes_rexmit;
3518 if (awnd < tp->snd_ssthresh) {
3519 tp->snd_cwnd += tp->t_maxseg;
3520 if (tp->snd_cwnd > tp->snd_ssthresh)
3521 tp->snd_cwnd = tp->snd_ssthresh;
3522 }
3523 } else
9bccf70c 3524 tp->snd_cwnd += tp->t_maxseg;
6d2010ae
A
3525
3526 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
3527 struct tcpcb *, tp, struct tcphdr *, th,
3528 int32_t, TCP_CC_IN_FASTRECOVERY);
3529
8ad349bb
A
3530 (void) tcp_output(tp);
3531 goto drop;
39236c6e 3532 } else if (tp->t_dupacks == tp->t_rexmtthresh) {
8ad349bb 3533 tcp_seq onxt = tp->snd_nxt;
8ad349bb
A
3534
3535 /*
3536 * If we're doing sack, check to
3537 * see if we're already in sack
3538 * recovery. If we're not doing sack,
3539 * check to see if we're in newreno
3540 * recovery.
3541 */
39236c6e 3542 if (SACK_ENABLED(tp)) {
8ad349bb
A
3543 if (IN_FASTRECOVERY(tp)) {
3544 tp->t_dupacks = 0;
3545 break;
3546 }
6d2010ae 3547 } else {
8ad349bb
A
3548 if (SEQ_LEQ(th->th_ack,
3549 tp->snd_recover)) {
3550 tp->t_dupacks = 0;
3551 break;
3552 }
9bccf70c 3553 }
6d2010ae
A
3554
3555 /*
3556 * If the current tcp cc module has
3557 * defined a hook for tasks to run
3558 * before entering FR, call it
3559 */
3560 if (CC_ALGO(tp)->pre_fr != NULL)
316670eb 3561 CC_ALGO(tp)->pre_fr(tp);
8ad349bb 3562 ENTER_FASTRECOVERY(tp);
9bccf70c 3563 tp->snd_recover = tp->snd_max;
1c79356b 3564 tp->t_timer[TCPT_REXMT] = 0;
9bccf70c 3565 tp->t_rtttime = 0;
316670eb
A
3566 if ((tp->ecn_flags & TE_ECN_ON) == TE_ECN_ON) {
3567 tp->ecn_flags |= TE_SENDCWR;
3568 }
39236c6e 3569 if (SACK_ENABLED(tp)) {
8ad349bb
A
3570 tcpstat.tcps_sack_recovery_episode++;
3571 tp->sack_newdata = tp->snd_nxt;
3572 tp->snd_cwnd = tp->t_maxseg;
6d2010ae
A
3573
3574 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
3575 struct tcpcb *, tp, struct tcphdr *, th,
3576 int32_t, TCP_CC_ENTER_FASTRECOVERY);
3577
8ad349bb
A
3578 (void) tcp_output(tp);
3579 goto drop;
3580 }
1c79356b
A
3581 tp->snd_nxt = th->th_ack;
3582 tp->snd_cwnd = tp->t_maxseg;
3583 (void) tcp_output(tp);
3584 tp->snd_cwnd = tp->snd_ssthresh +
8ad349bb 3585 tp->t_maxseg * tp->t_dupacks;
1c79356b
A
3586 if (SEQ_GT(onxt, tp->snd_nxt))
3587 tp->snd_nxt = onxt;
6d2010ae
A
3588 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
3589 struct tcpcb *, tp, struct tcphdr *, th,
3590 int32_t, TCP_CC_ENTER_FASTRECOVERY);
1c79356b 3591 goto drop;
39236c6e
A
3592 } else if (limited_txmt &&
3593 ALLOW_LIMITED_TRANSMIT(tp) &&
3594 (!(SACK_ENABLED(tp)) || sack_bytes_acked > 0) &&
3595 (so->so_snd.sb_cc - (tp->snd_max - tp->snd_una)) > 0) {
3596 u_int32_t incr = (tp->t_maxseg * tp->t_dupacks);
3597
3598 /* Use Limited Transmit algorithm on the first two
3599 * duplicate acks when there is new data to transmit
3600 */
3601 tp->snd_cwnd += incr;
3602 tcpstat.tcps_limited_txt++;
3603 (void) tcp_output(tp);
3604
3605 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
3606 struct tcpcb *, tp, struct tcphdr *, th,
3607 int32_t, TCP_CC_LIMITED_TRANSMIT);
3608
3609 /* Reset snd_cwnd back to normal */
3610 tp->snd_cwnd -= incr;
1c79356b 3611 }
39236c6e 3612 } else {
1c79356b 3613 tp->t_dupacks = 0;
39236c6e
A
3614 tp->t_rexmtthresh = tcprexmtthresh;
3615 }
1c79356b
A
3616 break;
3617 }
b0d623f7
A
3618 /*
3619 * If the congestion window was inflated to account
3620 * for the other side's cached packets, retract it.
3621 */
6d2010ae
A
3622 if (IN_FASTRECOVERY(tp)) {
3623 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
39236c6e 3624 if (SACK_ENABLED(tp))
6d2010ae
A
3625 tcp_sack_partialack(tp, th);
3626 else
3627 tcp_newreno_partial_ack(tp, th);
3628
3629 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
3630 struct tcpcb *, tp, struct tcphdr *, th,
3631 int32_t, TCP_CC_PARTIAL_ACK);
3632 } else {
3633 EXIT_FASTRECOVERY(tp);
3634 if (CC_ALGO(tp)->post_fr != NULL)
3635 CC_ALGO(tp)->post_fr(tp, th);
6d2010ae
A
3636 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
3637 struct tcpcb *, tp, struct tcphdr *, th,
3638 int32_t, TCP_CC_EXIT_FASTRECOVERY);
3639 }
3640 } else {
593a1d5f 3641 /*
6d2010ae 3642 * We were not in fast recovery. Reset the duplicate ack
593a1d5f
A
3643 * counter.
3644 */
3645 tp->t_dupacks = 0;
39236c6e 3646 tp->t_rexmtthresh = tcprexmtthresh;
593a1d5f 3647 }
593a1d5f
A
3648
3649
1c79356b 3650 /*
8ad349bb 3651 * If we reach this point, ACK is not a duplicate,
1c79356b
A
3652 * i.e., it ACKs something we sent.
3653 */
3654 if (tp->t_flags & TF_NEEDSYN) {
3655 /*
3656 * T/TCP: Connection was half-synchronized, and our
3657 * SYN has been ACK'd (so connection is now fully
3658 * synchronized). Go to non-starred state,
3659 * increment snd_una for ACK of SYN, and check if
3660 * we can do window scaling.
3661 */
3662 tp->t_flags &= ~TF_NEEDSYN;
3663 tp->snd_una++;
3664 /* Do window scaling? */
3665 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
3666 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
3667 tp->snd_scale = tp->requested_s_scale;
3668 tp->rcv_scale = tp->request_r_scale;
3669 }
3670 }
3671
3672process_ACK:
39236c6e 3673 acked = BYTES_ACKED(th, tp);
1c79356b
A
3674 tcpstat.tcps_rcvackpack++;
3675 tcpstat.tcps_rcvackbyte += acked;
3676
9bccf70c 3677 /*
39236c6e
A
3678 * If the last packet was a retransmit, make sure
3679 * it was not spurious.
3680 *
3681 * If the ack has ECE bit set, skip bad
3682 * retransmit recovery.
9bccf70c 3683 */
39236c6e
A
3684 if (tp->t_rxtshift > 0 &&
3685 (thflags & TH_ECE) == 0 &&
3686 tcp_detect_bad_rexmt(tp, &to)) {
2d21ac55 3687 ++tcpstat.tcps_sndrexmitbad;
39236c6e 3688 tcp_bad_rexmt_restore_state(tp, th);
6d2010ae
A
3689
3690 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
3691 struct tcpcb *, tp, struct tcphdr *, th,
3692 int32_t, TCP_CC_BAD_REXMT_RECOVERY);
9bccf70c
A
3693 }
3694
39236c6e
A
3695 /* Recalculate the RTT */
3696 tcp_compute_rtt(tp, &to, th);
1c79356b
A
3697
3698 /*
3699 * If all outstanding data is acked, stop retransmit
3700 * timer and remember to restart (more output or persist).
3701 * If there is more data to be acked, restart retransmit
3702 * timer, using current (possibly backed-off) value.
3703 */
3704 if (th->th_ack == tp->snd_max) {
3705 tp->t_timer[TCPT_REXMT] = 0;
3706 needoutput = 1;
3707 } else if (tp->t_timer[TCPT_PERSIST] == 0)
6d2010ae 3708 tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
1c79356b
A
3709
3710 /*
3711 * If no data (only SYN) was ACK'd,
3712 * skip rest of ACK processing.
3713 */
3714 if (acked == 0)
3715 goto step6;
3716
2d21ac55 3717 if ((thflags & TH_ECE) != 0 &&
316670eb 3718 ((tp->ecn_flags & TE_ECN_ON) == TE_ECN_ON)) {
2d21ac55
A
3719 /*
3720 * Reduce the congestion window if we haven't done so.
3721 */
39236c6e 3722 if (!SACK_ENABLED(tp) && !IN_FASTRECOVERY(tp) &&
6d2010ae 3723 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
316670eb 3724 tcp_reduce_congestion_window(tp);
6d2010ae
A
3725 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
3726 struct tcpcb *, tp, struct tcphdr *, th,
3727 int32_t, TCP_CC_ECN_RCVD);
2d21ac55 3728 }
6d2010ae 3729 }
b0d623f7 3730
6d2010ae
A
3731 /*
3732 * When new data is acked, open the congestion window.
3733 * The specifics of how this is achieved are up to the
3734 * congestion control algorithm in use for this connection.
3735 *
3736 * The calculations in this function assume that snd_una is
3737 * not updated yet.
3738 */
3739 if (!IN_FASTRECOVERY(tp)) {
3740 if (CC_ALGO(tp)->ack_rcvd != NULL)
3741 CC_ALGO(tp)->ack_rcvd(tp, th);
3742
3743 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
3744 struct tcpcb *, tp, struct tcphdr *, th,
3745 int32_t, TCP_CC_ACK_RCVD);
1c79356b
A
3746 }
3747 if (acked > so->so_snd.sb_cc) {
3748 tp->snd_wnd -= so->so_snd.sb_cc;
3749 sbdrop(&so->so_snd, (int)so->so_snd.sb_cc);
39236c6e
A
3750 if (so->so_flags & SOF_ENABLE_MSGS) {
3751 so->so_msg_state->msg_serial_bytes -=
3752 (int)so->so_snd.sb_cc;
3753 }
1c79356b
A
3754 ourfinisacked = 1;
3755 } else {
3756 sbdrop(&so->so_snd, acked);
39236c6e
A
3757 if (so->so_flags & SOF_ENABLE_MSGS) {
3758 so->so_msg_state->msg_serial_bytes -=
3759 acked;
3760 }
316670eb 3761 tcp_sbsnd_trim(&so->so_snd);
1c79356b
A
3762 tp->snd_wnd -= acked;
3763 ourfinisacked = 0;
3764 }
91447636 3765 /* detect una wraparound */
6d2010ae 3766 if ( !IN_FASTRECOVERY(tp) &&
8ad349bb
A
3767 SEQ_GT(tp->snd_una, tp->snd_recover) &&
3768 SEQ_LEQ(th->th_ack, tp->snd_recover))
3769 tp->snd_recover = th->th_ack - 1;
6d2010ae
A
3770
3771 if (IN_FASTRECOVERY(tp) &&
8ad349bb
A
3772 SEQ_GEQ(th->th_ack, tp->snd_recover))
3773 EXIT_FASTRECOVERY(tp);
6d2010ae 3774
1c79356b 3775 tp->snd_una = th->th_ack;
39236c6e 3776 if (SACK_ENABLED(tp)) {
8ad349bb
A
3777 if (SEQ_GT(tp->snd_una, tp->snd_recover))
3778 tp->snd_recover = tp->snd_una;
3779 }
1c79356b
A
3780 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
3781 tp->snd_nxt = tp->snd_una;
316670eb
A
3782 if ((tp->t_flagsext & TF_MEASURESNDBW) != 0 &&
3783 tp->t_bwmeas != NULL)
3784 tcp_bwmeas_check(tp);
3785
4a3eedf9
A
3786 /*
3787 * sowwakeup must happen after snd_una, et al. are updated so that
3788 * the sequence numbers are in sync with so_snd
3789 */
3790 sowwakeup(so);
1c79356b
A
3791
3792 switch (tp->t_state) {
3793
3794 /*
3795 * In FIN_WAIT_1 STATE in addition to the processing
3796 * for the ESTABLISHED state if our FIN is now acknowledged
3797 * then enter FIN_WAIT_2.
3798 */
3799 case TCPS_FIN_WAIT_1:
3800 if (ourfinisacked) {
3801 /*
3802 * If we can't receive any more
3803 * data, then closing user can proceed.
39236c6e 3804 * Starting the TCPT_2MSL timer is contrary to the
1c79356b
A
3805 * specification, but if we don't get a FIN
3806 * we'll hang forever.
3807 */
3808 if (so->so_state & SS_CANTRCVMORE) {
39236c6e
A
3809 tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp,
3810 TCP_CONN_MAXIDLE(tp));
6d2010ae
A
3811 isconnected = FALSE;
3812 isdisconnected = TRUE;
1c79356b 3813 }
39236c6e
A
3814 DTRACE_TCP4(state__change, void, NULL,
3815 struct inpcb *, inp,
3816 struct tcpcb *, tp,
3817 int32_t, TCPS_FIN_WAIT_2);
1c79356b 3818 tp->t_state = TCPS_FIN_WAIT_2;
39236c6e
A
3819 /* fall through and make sure we also recognize
3820 * data ACKed with the FIN
3821 */
1c79356b 3822 }
c910b4d9 3823 tp->t_flags |= TF_ACKNOW;
1c79356b
A
3824 break;
3825
3826 /*
3827 * In CLOSING STATE in addition to the processing for
3828 * the ESTABLISHED state if the ACK acknowledges our FIN
3829 * then enter the TIME-WAIT state, otherwise ignore
3830 * the segment.
3831 */
3832 case TCPS_CLOSING:
3833 if (ourfinisacked) {
39236c6e
A
3834 DTRACE_TCP4(state__change, void, NULL,
3835 struct inpcb *, inp,
3836 struct tcpcb *, tp,
3837 int32_t, TCPS_TIME_WAIT);
1c79356b
A
3838 tp->t_state = TCPS_TIME_WAIT;
3839 tcp_canceltimers(tp);
39236c6e 3840 add_to_time_wait(tp, 2 * tcp_msl);
6d2010ae
A
3841 isconnected = FALSE;
3842 isdisconnected = TRUE;
1c79356b 3843 }
c910b4d9 3844 tp->t_flags |= TF_ACKNOW;
1c79356b
A
3845 break;
3846
3847 /*
3848 * In LAST_ACK, we may still be waiting for data to drain
3849 * and/or to be acked, as well as for the ack of our FIN.
3850 * If our FIN is now acknowledged, delete the TCB,
3851 * enter the closed state and return.
3852 */
3853 case TCPS_LAST_ACK:
3854 if (ourfinisacked) {
3855 tp = tcp_close(tp);
3856 goto drop;
3857 }
3858 break;
3859
3860 /*
3861 * In TIME_WAIT state the only thing that should arrive
3862 * is a retransmission of the remote FIN. Acknowledge
3863 * it and restart the finack timer.
3864 */
3865 case TCPS_TIME_WAIT:
6d2010ae 3866 add_to_time_wait(tp, 2 * tcp_msl);
1c79356b
A
3867 goto dropafterack;
3868 }
39236c6e
A
3869
3870 /*
3871 * If there is a SACK option on the ACK and we
3872 * haven't seen any duplicate acks before, count
3873 * it as a duplicate ack even if the cumulative
3874 * ack is advanced. If the receiver delayed an
3875 * ack and detected loss afterwards, then the ack
3876 * will advance cumulative ack and will also have
3877 * a SACK option. So counting it as one duplicate
3878 * ack is ok.
3879 */
3880 if (sack_ackadv == 1 &&
3881 tp->t_state == TCPS_ESTABLISHED &&
3882 SACK_ENABLED(tp) &&
3883 sack_bytes_acked > 0 &&
3884 tp->t_dupacks == 0 &&
3885 SEQ_LEQ(th->th_ack, tp->snd_una) && tlen == 0) {
3886 tcpstat.tcps_sack_ackadv++;
3887 goto process_dupack;
3888 }
1c79356b
A
3889 }
3890
3891step6:
3892 /*
3893 * Update window information.
3894 * Don't look at window if no ACK: TAC's send garbage on first SYN.
3895 */
3896 if ((thflags & TH_ACK) &&
3897 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
3898 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
3899 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
3900 /* keep track of pure window updates */
9bccf70c 3901 if (tlen == 0 &&
1c79356b
A
3902 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
3903 tcpstat.tcps_rcvwinupd++;
3904 tp->snd_wnd = tiwin;
3905 tp->snd_wl1 = th->th_seq;
3906 tp->snd_wl2 = th->th_ack;
3907 if (tp->snd_wnd > tp->max_sndwnd)
3908 tp->max_sndwnd = tp->snd_wnd;
3909 needoutput = 1;
3910 }
3911
3912 /*
3913 * Process segments with URG.
3914 */
3915 if ((thflags & TH_URG) && th->th_urp &&
3916 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3917 /*
3918 * This is a kludge, but if we receive and accept
3919 * random urgent pointers, we'll crash in
3920 * soreceive. It's hard to imagine someone
3921 * actually wanting to send this much urgent data.
3922 */
3923 if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
3924 th->th_urp = 0; /* XXX */
3925 thflags &= ~TH_URG; /* XXX */
3926 goto dodata; /* XXX */
3927 }
3928 /*
3929 * If this segment advances the known urgent pointer,
3930 * then mark the data stream. This should not happen
3931 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
3932 * a FIN has been received from the remote side.
3933 * In these states we ignore the URG.
3934 *
3935 * According to RFC961 (Assigned Protocols),
3936 * the urgent pointer points to the last octet
3937 * of urgent data. We continue, however,
3938 * to consider it to indicate the first octet
3939 * of data past the urgent section as the original
3940 * spec states (in one of two places).
3941 */
3942 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
3943 tp->rcv_up = th->th_seq + th->th_urp;
3944 so->so_oobmark = so->so_rcv.sb_cc +
3945 (tp->rcv_up - tp->rcv_nxt) - 1;
3946 if (so->so_oobmark == 0) {
3947 so->so_state |= SS_RCVATMARK;
3948 postevent(so, 0, EV_OOB);
3949 }
3950 sohasoutofband(so);
3951 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
3952 }
3953 /*
3954 * Remove out of band data so doesn't get presented to user.
3955 * This can happen independent of advancing the URG pointer,
3956 * but if two URG's are pending at once, some out-of-band
3957 * data may creep in... ick.
3958 */
b0d623f7 3959 if (th->th_urp <= (u_int32_t)tlen
1c79356b
A
3960#if SO_OOBINLINE
3961 && (so->so_options & SO_OOBINLINE) == 0
3962#endif
3963 )
9bccf70c
A
3964 tcp_pulloutofband(so, th, m,
3965 drop_hdrlen); /* hdr drop is delayed */
6d2010ae 3966 } else {
1c79356b
A
3967 /*
3968 * If no out of band data is expected,
3969 * pull receive urgent pointer along
3970 * with the receive window.
3971 */
3972 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
3973 tp->rcv_up = tp->rcv_nxt;
6d2010ae
A
3974 }
3975dodata:
1c79356b 3976
6d2010ae
A
3977 /* Set socket's connect or disconnect state correcly before doing data.
3978 * The following might unlock the socket if there is an upcall or a socket
3979 * filter.
3980 */
3981 if (isconnected) {
3982 soisconnected(so);
3983 } else if (isdisconnected) {
3984 soisdisconnected(so);
3985 }
3986
3987 /* Let's check the state of pcb just to make sure that it did not get closed
3988 * when we unlocked above
3989 */
3990 if (inp->inp_state == INPCB_STATE_DEAD) {
3991 /* Just drop the packet that we are processing and return */
3992 goto drop;
3993 }
3994
1c79356b
A
3995 /*
3996 * Process the segment text, merging it into the TCP sequencing queue,
3997 * and arranging for acknowledgment of receipt if necessary.
3998 * This process logically involves adjusting tp->rcv_wnd as data
3999 * is presented to the user (this happens in tcp_usrreq.c,
4000 * case PRU_RCVD). If a FIN has already been received on this
4001 * connection then we just ignore the text.
4002 */
c910b4d9 4003 if ((tlen || (thflags & TH_FIN)) &&
1c79356b 4004 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
8ad349bb
A
4005 tcp_seq save_start = th->th_seq;
4006 tcp_seq save_end = th->th_seq + tlen;
9bccf70c
A
4007 m_adj(m, drop_hdrlen); /* delayed header drop */
4008 /*
8ad349bb
A
4009 * Insert segment which includes th into TCP reassembly queue
4010 * with control block tp. Set thflags to whether reassembly now
4011 * includes a segment with FIN. This handles the common case
4012 * inline (segment is the next to be received on an established
4013 * connection, and the queue is empty), avoiding linkage into
4014 * and removal from the queue and repetition of various
4015 * conversions.
4016 * Set DELACK for segments received in order, but ack
4017 * immediately when segments are out of order (so
4018 * fast retransmit can work).
9bccf70c
A
4019 */
4020 if (th->th_seq == tp->rcv_nxt &&
4021 LIST_EMPTY(&tp->t_segq) &&
4022 TCPS_HAVEESTABLISHED(tp->t_state)) {
316670eb 4023 TCP_INC_VAR(tp->t_unacksegs, nlropkts);
39236c6e
A
4024 /*
4025 * Calculate the RTT on the receiver only if the
4026 * connection is in streaming mode and the last
4027 * packet was not an end-of-write
4028 */
4029 if ((tp->t_flags & TF_STRETCHACK) &&
4030 !(tp->t_flagsext & TF_STREAMEOW))
4031 tcp_compute_rtt(tp, &to, th);
4032
316670eb
A
4033 if (DELAY_ACK(tp, th) &&
4034 ((tp->t_flags & TF_ACKNOW) == 0) ) {
6d2010ae
A
4035 if ((tp->t_flags & TF_DELACK) == 0) {
4036 tp->t_flags |= TF_DELACK;
39236c6e
A
4037 tp->t_timer[TCPT_DELACK] =
4038 OFFSET_FROM_START(tp, tcp_delack);
6d2010ae 4039 }
9bccf70c 4040 }
91447636 4041 else {
9bccf70c 4042 tp->t_flags |= TF_ACKNOW;
91447636 4043 }
9bccf70c
A
4044 tp->rcv_nxt += tlen;
4045 thflags = th->th_flags & TH_FIN;
316670eb 4046 TCP_INC_VAR(tcpstat.tcps_rcvpack, nlropkts);
9bccf70c 4047 tcpstat.tcps_rcvbyte += tlen;
6d2010ae 4048 if (nstat_collect) {
39236c6e
A
4049 if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_PKT) {
4050 INP_ADD_STAT(inp, cell, wifi, rxpackets,
4051 m->m_pkthdr.lro_npkts);
4052 } else {
4053 INP_ADD_STAT(inp, cell, wifi, rxpackets, 1);
316670eb 4054 }
39236c6e 4055 INP_ADD_STAT(inp, cell, wifi, rxbytes, tlen);
6d2010ae 4056 }
316670eb 4057 tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen);
6d2010ae 4058 so_recv_data_stat(so, m, drop_hdrlen);
39236c6e
A
4059
4060 if (sbappendstream_rcvdemux(so, m,
4061 th->th_seq - (tp->irs + 1), 0)) {
91447636 4062 sorwakeup(so);
39236c6e 4063 }
9bccf70c 4064 } else {
39236c6e 4065 thflags = tcp_reass(tp, th, &tlen, m, ifp);
9bccf70c
A
4066 tp->t_flags |= TF_ACKNOW;
4067 }
1c79356b 4068
39236c6e 4069 if (tlen > 0 && SACK_ENABLED(tp))
8ad349bb
A
4070 tcp_update_sack_list(tp, save_start, save_end);
4071
39236c6e
A
4072 tcp_adaptive_rwtimo_check(tp, tlen);
4073
1c79356b
A
4074 if (tp->t_flags & TF_DELACK)
4075 {
9bccf70c
A
4076#if INET6
4077 if (isipv6) {
4078 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
4079 (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])),
4080 th->th_seq, th->th_ack, th->th_win);
4081 }
4082 else
4083#endif
4084 {
4085 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
4086 (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)),
4087 th->th_seq, th->th_ack, th->th_win);
4088 }
4089
1c79356b 4090 }
1c79356b
A
4091 } else {
4092 m_freem(m);
4093 thflags &= ~TH_FIN;
4094 }
4095
4096 /*
4097 * If FIN is received ACK the FIN and let the user know
4098 * that the connection is closing.
4099 */
4100 if (thflags & TH_FIN) {
4101 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
4102 socantrcvmore(so);
4103 postevent(so, 0, EV_FIN);
4104 /*
8ad349bb
A
4105 * If connection is half-synchronized
4106 * (ie NEEDSYN flag on) then delay ACK,
4107 * so it may be piggybacked when SYN is sent.
4108 * Otherwise, since we received a FIN then no
4109 * more input can be expected, send ACK now.
1c79356b 4110 */
316670eb 4111 TCP_INC_VAR(tp->t_unacksegs, nlropkts);
6d2010ae
A
4112 if (DELAY_ACK(tp, th) && (tp->t_flags & TF_NEEDSYN)) {
4113 if ((tp->t_flags & TF_DELACK) == 0) {
4114 tp->t_flags |= TF_DELACK;
4115 tp->t_timer[TCPT_DELACK] = OFFSET_FROM_START(tp, tcp_delack);
4116 }
1c79356b 4117 }
91447636 4118 else {
1c79356b 4119 tp->t_flags |= TF_ACKNOW;
91447636 4120 }
1c79356b
A
4121 tp->rcv_nxt++;
4122 }
4123 switch (tp->t_state) {
4124
4125 /*
4126 * In SYN_RECEIVED and ESTABLISHED STATES
4127 * enter the CLOSE_WAIT state.
4128 */
4129 case TCPS_SYN_RECEIVED:
6d2010ae 4130 tp->t_starttime = tcp_now;
1c79356b 4131 case TCPS_ESTABLISHED:
6d2010ae
A
4132 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
4133 struct tcpcb *, tp, int32_t, TCPS_CLOSE_WAIT);
1c79356b
A
4134 tp->t_state = TCPS_CLOSE_WAIT;
4135 break;
4136
4137 /*
4138 * If still in FIN_WAIT_1 STATE FIN has not been acked so
4139 * enter the CLOSING state.
4140 */
4141 case TCPS_FIN_WAIT_1:
6d2010ae
A
4142 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
4143 struct tcpcb *, tp, int32_t, TCPS_CLOSING);
1c79356b
A
4144 tp->t_state = TCPS_CLOSING;
4145 break;
4146
4147 /*
4148 * In FIN_WAIT_2 state enter the TIME_WAIT state,
4149 * starting the time-wait timer, turning off the other
4150 * standard timers.
4151 */
4152 case TCPS_FIN_WAIT_2:
39236c6e
A
4153 DTRACE_TCP4(state__change, void, NULL,
4154 struct inpcb *, inp,
4155 struct tcpcb *, tp,
4156 int32_t, TCPS_TIME_WAIT);
1c79356b
A
4157 tp->t_state = TCPS_TIME_WAIT;
4158 tcp_canceltimers(tp);
1c79356b 4159 if (tp->cc_recv != 0 &&
6d2010ae 4160 ((int)(tcp_now - tp->t_starttime)) < tcp_msl) {
1c79356b
A
4161 /* For transaction client, force ACK now. */
4162 tp->t_flags |= TF_ACKNOW;
2d21ac55 4163 tp->t_unacksegs = 0;
1c79356b 4164 }
39236c6e 4165 add_to_time_wait(tp, 2 * tcp_msl);
1c79356b
A
4166 soisdisconnected(so);
4167 break;
4168
4169 /*
4170 * In TIME_WAIT state restart the 2 MSL time_wait timer.
4171 */
4172 case TCPS_TIME_WAIT:
6d2010ae 4173 add_to_time_wait(tp, 2 * tcp_msl);
1c79356b
A
4174 break;
4175 }
4176 }
4177#if TCPDEBUG
9bccf70c
A
4178 if (so->so_options & SO_DEBUG)
4179 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
1c79356b 4180 &tcp_savetcp, 0);
1c79356b
A
4181#endif
4182
4183 /*
4184 * Return any desired output.
4185 */
2d21ac55 4186 if (needoutput || (tp->t_flags & TF_ACKNOW)) {
1c79356b 4187 (void) tcp_output(tp);
2d21ac55 4188 }
6d2010ae
A
4189
4190 tcp_check_timer_state(tp);
4191
4192
91447636 4193 tcp_unlock(so, 1, 0);
1c79356b
A
4194 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0);
4195 return;
4196
4197dropafterack:
4198 /*
4199 * Generate an ACK dropping incoming segment if it occupies
4200 * sequence space, where the ACK reflects our state.
4201 *
4202 * We can now skip the test for the RST flag since all
4203 * paths to this code happen after packets containing
4204 * RST have been dropped.
4205 *
4206 * In the SYN-RECEIVED state, don't send an ACK unless the
4207 * segment we received passes the SYN-RECEIVED ACK test.
4208 * If it fails send a RST. This breaks the loop in the
4209 * "LAND" DoS attack, and also prevents an ACK storm
4210 * between two listening ports that have been sent forged
4211 * SYN segments, each with the source address of the other.
4212 */
4213 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
4214 (SEQ_GT(tp->snd_una, th->th_ack) ||
9bccf70c
A
4215 SEQ_GT(th->th_ack, tp->snd_max)) ) {
4216 rstreason = BANDLIM_RST_OPENPORT;
39236c6e 4217 IF_TCP_STATINC(ifp, dospacket);
1c79356b 4218 goto dropwithreset;
9bccf70c 4219 }
1c79356b 4220#if TCPDEBUG
9bccf70c
A
4221 if (so->so_options & SO_DEBUG)
4222 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
1c79356b 4223 &tcp_savetcp, 0);
1c79356b
A
4224#endif
4225 m_freem(m);
4226 tp->t_flags |= TF_ACKNOW;
4227 (void) tcp_output(tp);
6d2010ae
A
4228
4229 /* Don't need to check timer state as we should have done it during tcp_output */
91447636 4230 tcp_unlock(so, 1, 0);
1c79356b
A
4231 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0);
4232 return;
91447636
A
4233dropwithresetnosock:
4234 nosock = 1;
1c79356b
A
4235dropwithreset:
4236 /*
4237 * Generate a RST, dropping incoming segment.
4238 * Make ACK acceptable to originator of segment.
4239 * Don't bother to respond if destination was broadcast/multicast.
4240 */
4241 if ((thflags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
4242 goto drop;
4243#if INET6
4244 if (isipv6) {
9bccf70c
A
4245 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
4246 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
4247 goto drop;
1c79356b
A
4248 } else
4249#endif /* INET6 */
14353aa8
A
4250 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
4251 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
4252 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
4253 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
1c79356b 4254 goto drop;
9bccf70c
A
4255 /* IPv6 anycast check is done at tcp6_input() */
4256
4257 /*
4258 * Perform bandwidth limiting.
4259 */
4260#if ICMP_BANDLIM
4261 if (badport_bandlim(rstreason) < 0)
4262 goto drop;
4263#endif
4264
1c79356b 4265#if TCPDEBUG
9bccf70c
A
4266 if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
4267 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
1c79356b 4268 &tcp_savetcp, 0);
1c79356b
A
4269#endif
4270 if (thflags & TH_ACK)
9bccf70c
A
4271 /* mtod() below is safe as long as hdr dropping is delayed */
4272 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, th->th_ack,
6d2010ae 4273 TH_RST, ifscope, nocell);
1c79356b
A
4274 else {
4275 if (thflags & TH_SYN)
9bccf70c
A
4276 tlen++;
4277 /* mtod() below is safe as long as hdr dropping is delayed */
4278 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
6d2010ae 4279 (tcp_seq)0, TH_RST|TH_ACK, ifscope, nocell);
1c79356b
A
4280 }
4281 /* destroy temporarily created socket */
91447636
A
4282 if (dropsocket) {
4283 (void) soabort(so);
4284 tcp_unlock(so, 1, 0);
39236c6e 4285 } else if ((inp != NULL) && (nosock == 0)) {
6d2010ae
A
4286 tcp_unlock(so, 1, 0);
4287 }
1c79356b
A
4288 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0);
4289 return;
91447636
A
4290dropnosock:
4291 nosock = 1;
1c79356b
A
4292drop:
4293 /*
4294 * Drop space held by incoming segment and return.
4295 */
4296#if TCPDEBUG
9bccf70c
A
4297 if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
4298 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
1c79356b 4299 &tcp_savetcp, 0);
1c79356b
A
4300#endif
4301 m_freem(m);
1c79356b 4302 /* destroy temporarily created socket */
91447636
A
4303 if (dropsocket) {
4304 (void) soabort(so);
4305 tcp_unlock(so, 1, 0);
4306 }
6d2010ae
A
4307 else if (nosock == 0) {
4308 tcp_unlock(so, 1, 0);
4309 }
1c79356b
A
4310 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0);
4311 return;
4312}
4313
4314static void
c910b4d9 4315tcp_dooptions(tp, cp, cnt, th, to, input_ifscope)
8ad349bb
A
4316/*
4317 * Parse TCP options and place in tcpopt.
4318 */
1c79356b
A
4319 struct tcpcb *tp;
4320 u_char *cp;
4321 int cnt;
4322 struct tcphdr *th;
4323 struct tcpopt *to;
c910b4d9 4324 unsigned int input_ifscope;
1c79356b
A
4325{
4326 u_short mss = 0;
4327 int opt, optlen;
4328
4329 for (; cnt > 0; cnt -= optlen, cp += optlen) {
4330 opt = cp[0];
4331 if (opt == TCPOPT_EOL)
4332 break;
4333 if (opt == TCPOPT_NOP)
4334 optlen = 1;
4335 else {
9bccf70c
A
4336 if (cnt < 2)
4337 break;
1c79356b 4338 optlen = cp[1];
9bccf70c 4339 if (optlen < 2 || optlen > cnt)
1c79356b
A
4340 break;
4341 }
4342 switch (opt) {
4343
4344 default:
4345 continue;
4346
4347 case TCPOPT_MAXSEG:
4348 if (optlen != TCPOLEN_MAXSEG)
4349 continue;
4350 if (!(th->th_flags & TH_SYN))
4351 continue;
4352 bcopy((char *) cp + 2, (char *) &mss, sizeof(mss));
9bccf70c 4353 NTOHS(mss);
1c79356b
A
4354 break;
4355
4356 case TCPOPT_WINDOW:
4357 if (optlen != TCPOLEN_WINDOW)
4358 continue;
4359 if (!(th->th_flags & TH_SYN))
4360 continue;
39236c6e 4361 to->to_flags |= TOF_SCALE;
1c79356b
A
4362 tp->t_flags |= TF_RCVD_SCALE;
4363 tp->requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
4364 break;
4365
4366 case TCPOPT_TIMESTAMP:
4367 if (optlen != TCPOLEN_TIMESTAMP)
4368 continue;
8ad349bb 4369 to->to_flags |= TOF_TS;
1c79356b
A
4370 bcopy((char *)cp + 2,
4371 (char *)&to->to_tsval, sizeof(to->to_tsval));
4372 NTOHL(to->to_tsval);
4373 bcopy((char *)cp + 6,
4374 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
4375 NTOHL(to->to_tsecr);
1c79356b
A
4376 /*
4377 * A timestamp received in a SYN makes
4378 * it ok to send timestamp requests and replies.
4379 */
4380 if (th->th_flags & TH_SYN) {
4381 tp->t_flags |= TF_RCVD_TSTMP;
4382 tp->ts_recent = to->to_tsval;
4383 tp->ts_recent_age = tcp_now;
4384 }
4385 break;
8ad349bb
A
4386 case TCPOPT_SACK_PERMITTED:
4387 if (!tcp_do_sack ||
4388 optlen != TCPOLEN_SACK_PERMITTED)
1c79356b 4389 continue;
1c79356b 4390 if (th->th_flags & TH_SYN)
8ad349bb 4391 to->to_flags |= TOF_SACK;
1c79356b 4392 break;
8ad349bb
A
4393 case TCPOPT_SACK:
4394 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
1c79356b 4395 continue;
8ad349bb
A
4396 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
4397 to->to_sacks = cp + 2;
4398 tcpstat.tcps_sack_rcv_blocks++;
4399
1c79356b 4400 break;
39236c6e
A
4401
4402#if MPTCP
4403 case TCPOPT_MULTIPATH:
4404 tcp_do_mptcp_options(tp, cp, th, to, optlen);
4405 break;
4406#endif /* MPTCP */
1c79356b
A
4407 }
4408 }
9bccf70c 4409 if (th->th_flags & TH_SYN)
c910b4d9 4410 tcp_mss(tp, mss, input_ifscope); /* sets t_maxseg */
1c79356b
A
4411}
4412
4413/*
4414 * Pull out of band byte out of a segment so
4415 * it doesn't appear in the user's data queue.
4416 * It is still reflected in the segment length for
4417 * sequencing purposes.
4418 */
4419static void
9bccf70c 4420tcp_pulloutofband(so, th, m, off)
1c79356b
A
4421 struct socket *so;
4422 struct tcphdr *th;
4423 register struct mbuf *m;
9bccf70c 4424 int off; /* delayed to be droped hdrlen */
1c79356b 4425{
9bccf70c 4426 int cnt = off + th->th_urp - 1;
1c79356b
A
4427
4428 while (cnt >= 0) {
4429 if (m->m_len > cnt) {
4430 char *cp = mtod(m, caddr_t) + cnt;
4431 struct tcpcb *tp = sototcpcb(so);
4432
4433 tp->t_iobc = *cp;
4434 tp->t_oobflags |= TCPOOB_HAVEDATA;
4435 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
4436 m->m_len--;
9bccf70c
A
4437 if (m->m_flags & M_PKTHDR)
4438 m->m_pkthdr.len--;
1c79356b
A
4439 return;
4440 }
4441 cnt -= m->m_len;
4442 m = m->m_next;
4443 if (m == 0)
4444 break;
4445 }
4446 panic("tcp_pulloutofband");
4447}
4448
6d2010ae
A
4449uint32_t
4450get_base_rtt(struct tcpcb *tp)
4451{
4452 uint32_t base_rtt = 0, i;
4453 for (i = 0; i < N_RTT_BASE; ++i) {
4454 if (tp->rtt_hist[i] != 0 &&
4455 (base_rtt == 0 || tp->rtt_hist[i] < base_rtt))
4456 base_rtt = tp->rtt_hist[i];
4457 }
4458 return base_rtt;
4459}
4460
4461/* Each value of RTT base represents the minimum RTT seen in a minute.
4462 * We keep upto N_RTT_BASE minutes worth of history.
4463 */
4464void
4465update_base_rtt(struct tcpcb *tp, uint32_t rtt)
4466{
39236c6e
A
4467 int32_t i, qdelay;
4468 u_int32_t base_rtt;
4469
6d2010ae 4470 if (++tp->rtt_count >= rtt_samples_per_slot) {
39236c6e
A
4471#if TRAFFIC_MGT
4472 /*
4473 * If the recv side is being throttled, check if the
4474 * current RTT is closer to the base RTT seen in
4475 * first (recent) two slots. If so, unthrottle the stream.
4476 */
4477 if (tp->t_flagsext & TF_RECV_THROTTLE) {
4478 base_rtt = min(tp->rtt_hist[0], tp->rtt_hist[1]);
4479 qdelay = tp->t_rttcur - base_rtt;
4480 if (qdelay < target_qdelay)
4481 tp->t_flagsext &= ~(TF_RECV_THROTTLE);
4482 }
4483#endif /* TRAFFIC_MGT */
4484
6d2010ae
A
4485 for (i = (N_RTT_BASE-1); i > 0; --i) {
4486 tp->rtt_hist[i] = tp->rtt_hist[i-1];
4487 }
4488 tp->rtt_hist[0] = rtt;
4489 tp->rtt_count = 0;
4490 } else {
4491 tp->rtt_hist[0] = min(tp->rtt_hist[0], rtt);
4492 }
4493}
4494
39236c6e
A
4495/*
4496 * If we have a timestamp reply, update smoothed RTT. If no timestamp is
4497 * present but transmit timer is running and timed sequence number was
4498 * acked, update smoothed RTT.
4499 *
4500 * If timestamps are supported, a receiver can update RTT even if
4501 * there is no outstanding data.
4502 *
4503 * Some boxes send broken timestamp replies during the SYN+ACK phase,
4504 * ignore timestamps of 0or we could calculate a huge RTT and blow up
4505 * the retransmit timer.
4506 */
4507static void
4508tcp_compute_rtt(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th)
4509{
4510 VERIFY(to != NULL && th != NULL);
4511 if (((to->to_flags & TOF_TS) != 0) &&
4512 (to->to_tsecr != 0) &&
4513 TSTMP_GEQ(tcp_now, to->to_tsecr)) {
4514 tcp_xmit_timer(tp, tcp_now - to->to_tsecr,
4515 to->to_tsecr, th->th_ack);
4516 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
4517 tcp_xmit_timer(tp, tcp_now - tp->t_rtttime, 0,
4518 th->th_ack);
4519 }
4520}
4521
1c79356b
A
4522/*
4523 * Collect new round-trip time estimate
4524 * and update averages and current timeout.
4525 */
4526static void
39236c6e
A
4527tcp_xmit_timer(register struct tcpcb *tp, int rtt,
4528 u_int32_t tsecr, tcp_seq th_ack)
1c79356b
A
4529{
4530 register int delta;
4531
39236c6e
A
4532 if (tp->t_flagsext & TF_RECOMPUTE_RTT) {
4533 if (SEQ_GT(th_ack, tp->snd_una) &&
4534 SEQ_LEQ(th_ack, tp->snd_max) &&
4535 (tsecr == 0 ||
4536 TSTMP_GEQ(tsecr, tp->t_badrexmt_time))) {
4537 /*
4538 * We received a new ACk after a
4539 * spurious timeout. Adapt retransmission
4540 * timer as described in rfc 4015.
4541 */
4542 tp->t_flagsext &= ~(TF_RECOMPUTE_RTT);
4543 tp->t_badrexmt_time = 0;
4544 tp->t_srtt = max(tp->t_srtt_prev, rtt);
4545 tp->t_srtt = tp->t_srtt << TCP_RTT_SHIFT;
4546 tp->t_rttvar = max(tp->t_rttvar_prev, (rtt >> 1));
4547 tp->t_rttvar = tp->t_rttvar << TCP_RTTVAR_SHIFT;
4548
4549 if (tp->t_rttbest > (tp->t_srtt + tp->t_rttvar))
4550 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
4551
4552 goto compute_rto;
4553 } else {
4554 return;
4555 }
4556 }
4557
1c79356b
A
4558 tcpstat.tcps_rttupdated++;
4559 tp->t_rttupdated++;
6d2010ae
A
4560
4561 if (rtt > 0) {
4562 tp->t_rttcur = rtt;
4563 update_base_rtt(tp, rtt);
4564 }
4565
1c79356b
A
4566 if (tp->t_srtt != 0) {
4567 /*
4568 * srtt is stored as fixed point with 5 bits after the
6d2010ae 4569 * binary point (i.e., scaled by 32). The following magic
1c79356b
A
4570 * is equivalent to the smoothing algorithm in rfc793 with
4571 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
6d2010ae
A
4572 * point).
4573 *
39236c6e
A
4574 * Freebsd adjusts rtt to origin 0 by subtracting 1
4575 * from the provided rtt value. This was required because
4576 * of the way t_rtttime was initiailised to 1 before.
4577 * Since we changed t_rtttime to be based on
6d2010ae 4578 * tcp_now, this extra adjustment is not needed.
1c79356b 4579 */
6d2010ae 4580 delta = (rtt << TCP_DELTA_SHIFT)
1c79356b
A
4581 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
4582
4583 if ((tp->t_srtt += delta) <= 0)
4584 tp->t_srtt = 1;
4585
4586 /*
4587 * We accumulate a smoothed rtt variance (actually, a
4588 * smoothed mean difference), then set the retransmit
4589 * timer to smoothed rtt + 4 times the smoothed variance.
4590 * rttvar is stored as fixed point with 4 bits after the
4591 * binary point (scaled by 16). The following is
4592 * equivalent to rfc793 smoothing with an alpha of .75
4593 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
4594 * rfc793's wired-in beta.
4595 */
4596 if (delta < 0)
4597 delta = -delta;
4598 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
4599 if ((tp->t_rttvar += delta) <= 0)
4600 tp->t_rttvar = 1;
316670eb
A
4601 if (tp->t_rttbest == 0 ||
4602 tp->t_rttbest > (tp->t_srtt + tp->t_rttvar))
4603 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
1c79356b
A
4604 } else {
4605 /*
4606 * No rtt measurement yet - use the unsmoothed rtt.
4607 * Set the variance to half the rtt (so our first
4608 * retransmit happens at 3*rtt).
4609 */
4610 tp->t_srtt = rtt << TCP_RTT_SHIFT;
4611 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
4612 }
39236c6e
A
4613
4614compute_rto:
4615 nstat_route_rtt(tp->t_inpcb->inp_route.ro_rt, tp->t_srtt,
4616 tp->t_rttvar);
9bccf70c 4617 tp->t_rtttime = 0;
1c79356b 4618 tp->t_rxtshift = 0;
39236c6e 4619 tp->t_rxtstart = 0;
1c79356b
A
4620
4621 /*
4622 * the retransmit should happen at rtt + 4 * rttvar.
4623 * Because of the way we do the smoothing, srtt and rttvar
4624 * will each average +1/2 tick of bias. When we compute
4625 * the retransmit timer, we want 1/2 tick of rounding and
4626 * 1 extra tick because of +-1/2 tick uncertainty in the
4627 * firing of the timer. The bias will give us exactly the
4628 * 1.5 tick we need. But, because the bias is
4629 * statistical, we have to test that we don't drop below
4630 * the minimum feasible timer (which is 2 ticks).
4631 */
4632 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
6d2010ae
A
4633 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX,
4634 TCP_ADD_REXMTSLOP(tp));
1c79356b
A
4635
4636 /*
4637 * We received an ack for a packet that wasn't retransmitted;
4638 * it is probably safe to discard any error indications we've
4639 * received recently. This isn't quite right, but close enough
4640 * for now (a route might have failed after we sent a segment,
4641 * and the return path might not be symmetrical).
4642 */
4643 tp->t_softerror = 0;
4644}
4645
2d21ac55
A
4646static inline unsigned int
4647tcp_maxmtu(struct rtentry *rt)
4648{
4649 unsigned int maxmtu;
4650
b0d623f7 4651 RT_LOCK_ASSERT_HELD(rt);
2d21ac55
A
4652 if (rt->rt_rmx.rmx_mtu == 0)
4653 maxmtu = rt->rt_ifp->if_mtu;
4654 else
4655 maxmtu = MIN(rt->rt_rmx.rmx_mtu, rt->rt_ifp->if_mtu);
4656
4657 return (maxmtu);
4658}
4659
4660#if INET6
4661static inline unsigned int
4662tcp_maxmtu6(struct rtentry *rt)
4663{
4664 unsigned int maxmtu;
316670eb 4665 struct nd_ifinfo *ndi;
2d21ac55 4666
b0d623f7
A
4667 RT_LOCK_ASSERT_HELD(rt);
4668 lck_rw_lock_shared(nd_if_rwlock);
316670eb
A
4669 if ((ndi = ND_IFINFO(rt->rt_ifp)) != NULL && !ndi->initialized)
4670 ndi = NULL;
4671 if (ndi != NULL)
4672 lck_mtx_lock(&ndi->lock);
2d21ac55
A
4673 if (rt->rt_rmx.rmx_mtu == 0)
4674 maxmtu = IN6_LINKMTU(rt->rt_ifp);
4675 else
4676 maxmtu = MIN(rt->rt_rmx.rmx_mtu, IN6_LINKMTU(rt->rt_ifp));
316670eb
A
4677 if (ndi != NULL)
4678 lck_mtx_unlock(&ndi->lock);
b0d623f7 4679 lck_rw_done(nd_if_rwlock);
2d21ac55
A
4680
4681 return (maxmtu);
4682}
4683#endif
4684
1c79356b
A
4685/*
4686 * Determine a reasonable value for maxseg size.
4687 * If the route is known, check route for mtu.
4688 * If none, use an mss that can be handled on the outgoing
4689 * interface without forcing IP to fragment; if bigger than
4690 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
4691 * to utilize large mbufs. If no route is found, route has no mtu,
4692 * or the destination isn't local, use a default, hopefully conservative
4693 * size (usually 512 or the default IP max size, but no more than the mtu
4694 * of the interface), as we can't discover anything about intervening
4695 * gateways or networks. We also initialize the congestion/slow start
4696 * window to be a single segment if the destination isn't local.
4697 * While looking at the routing entry, we also initialize other path-dependent
4698 * parameters from pre-set or cached values in the routing entry.
4699 *
4700 * Also take into account the space needed for options that we
4701 * send regularly. Make maxseg shorter by that amount to assure
4702 * that we can send maxseg amount of data even when the options
4703 * are present. Store the upper limit of the length of options plus
4704 * data in maxopd.
4705 *
4706 * NOTE that this routine is only called when we process an incoming
4707 * segment, for outgoing segments only tcp_mssopt is called.
4708 *
1c79356b
A
4709 */
4710void
c910b4d9 4711tcp_mss(tp, offer, input_ifscope)
1c79356b
A
4712 struct tcpcb *tp;
4713 int offer;
c910b4d9 4714 unsigned int input_ifscope;
1c79356b
A
4715{
4716 register struct rtentry *rt;
4717 struct ifnet *ifp;
4718 register int rtt, mss;
b0d623f7 4719 u_int32_t bufsize;
1c79356b
A
4720 struct inpcb *inp;
4721 struct socket *so;
4722 struct rmxp_tao *taop;
4723 int origoffer = offer;
b0d623f7 4724 u_int32_t sb_max_corrected;
2d21ac55 4725 int isnetlocal = 0;
1c79356b 4726#if INET6
9bccf70c
A
4727 int isipv6;
4728 int min_protoh;
4729#endif
1c79356b
A
4730
4731 inp = tp->t_inpcb;
9bccf70c
A
4732#if INET6
4733 isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
4734 min_protoh = isipv6 ? sizeof (struct ip6_hdr) + sizeof (struct tcphdr)
4735 : sizeof (struct tcpiphdr);
4736#else
4737#define min_protoh (sizeof (struct tcpiphdr))
4738#endif
b0d623f7 4739
1c79356b 4740#if INET6
2d21ac55 4741 if (isipv6) {
6d2010ae 4742 rt = tcp_rtlookup6(inp, input_ifscope);
2d21ac55 4743 }
1c79356b
A
4744 else
4745#endif /* INET6 */
2d21ac55 4746 {
c910b4d9 4747 rt = tcp_rtlookup(inp, input_ifscope);
2d21ac55 4748 }
6d2010ae
A
4749 isnetlocal = (tp->t_flags & TF_LOCAL);
4750
1c79356b
A
4751 if (rt == NULL) {
4752 tp->t_maxopd = tp->t_maxseg =
4753#if INET6
4754 isipv6 ? tcp_v6mssdflt :
4755#endif /* INET6 */
4756 tcp_mssdflt;
4757 return;
4758 }
4759 ifp = rt->rt_ifp;
d12e1678
A
4760 /*
4761 * Slower link window correction:
4762 * If a value is specificied for slowlink_wsize use it for PPP links
4763 * believed to be on a serial modem (speed <128Kbps). Excludes 9600bps as
4764 * it is the default value adversized by pseudo-devices over ppp.
4765 */
4766 if (ifp->if_type == IFT_PPP && slowlink_wsize > 0 &&
4767 ifp->if_baudrate > 9600 && ifp->if_baudrate <= 128000) {
4768 tp->t_flags |= TF_SLOWLINK;
4769 }
1c79356b
A
4770 so = inp->inp_socket;
4771
4772 taop = rmx_taop(rt->rt_rmx);
4773 /*
4774 * Offer == -1 means that we didn't receive SYN yet,
4775 * use cached value in that case;
4776 */
4777 if (offer == -1)
4778 offer = taop->tao_mssopt;
4779 /*
4780 * Offer == 0 means that there was no MSS on the SYN segment,
4781 * in this case we use tcp_mssdflt.
4782 */
4783 if (offer == 0)
4784 offer =
4785#if INET6
4786 isipv6 ? tcp_v6mssdflt :
4787#endif /* INET6 */
4788 tcp_mssdflt;
e5568f75
A
4789 else {
4790 /*
4791 * Prevent DoS attack with too small MSS. Round up
4792 * to at least minmss.
4793 */
4794 offer = max(offer, tcp_minmss);
1c79356b
A
4795 /*
4796 * Sanity check: make sure that maxopd will be large
4797 * enough to allow some data on segments even is the
4798 * all the option space is used (40bytes). Otherwise
4799 * funny things may happen in tcp_output.
4800 */
4801 offer = max(offer, 64);
e5568f75 4802 }
1c79356b
A
4803 taop->tao_mssopt = offer;
4804
4805 /*
4806 * While we're here, check if there's an initial rtt
4807 * or rttvar. Convert from the route-table units
4808 * to scaled multiples of the slow timeout timer.
4809 */
316670eb
A
4810 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt) != 0) {
4811 tcp_getrt_rtt(tp, rt);
4812 } else {
6d2010ae 4813 tp->t_rttmin = isnetlocal ? tcp_TCPTV_MIN : TCPTV_REXMTMIN;
316670eb 4814 }
2d21ac55 4815
9bccf70c 4816#if INET6
2d21ac55
A
4817 mss = (isipv6 ? tcp_maxmtu6(rt) : tcp_maxmtu(rt));
4818#else
4819 mss = tcp_maxmtu(rt);
9bccf70c 4820#endif
2d21ac55
A
4821 mss -= min_protoh;
4822
4823 if (rt->rt_rmx.rmx_mtu == 0) {
1c79356b
A
4824#if INET6
4825 if (isipv6) {
2d21ac55 4826 if (!isnetlocal)
1c79356b
A
4827 mss = min(mss, tcp_v6mssdflt);
4828 } else
4829#endif /* INET6 */
2d21ac55 4830 if (!isnetlocal)
1c79356b
A
4831 mss = min(mss, tcp_mssdflt);
4832 }
2d21ac55 4833
1c79356b
A
4834 mss = min(mss, offer);
4835 /*
4836 * maxopd stores the maximum length of data AND options
4837 * in a segment; maxseg is the amount of data in a normal
4838 * segment. We need to store this value (maxopd) apart
4839 * from maxseg, because now every segment carries options
4840 * and thus we normally have somewhat less data in segments.
4841 */
4842 tp->t_maxopd = mss;
4843
4844 /*
8ad349bb
A
4845 * origoffer==-1 indicates, that no segments were received yet.
4846 * In this case we just guess.
1c79356b 4847 */
8ad349bb 4848 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
1c79356b
A
4849 (origoffer == -1 ||
4850 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
4851 mss -= TCPOLEN_TSTAMP_APPA;
1c79356b 4852
39236c6e
A
4853#if MPTCP
4854 mss -= mptcp_adj_mss(tp, FALSE);
4855#endif /* MPTCP */
4856 tp->t_maxseg = mss;
4857
2d21ac55
A
4858 /*
4859 * Calculate corrected value for sb_max; ensure to upgrade the
4860 * numerator for large sb_max values else it will overflow.
4861 */
4862 sb_max_corrected = (sb_max * (u_int64_t)MCLBYTES) / (MSIZE + MCLBYTES);
4863
1c79356b 4864 /*
55e303ae
A
4865 * If there's a pipesize (ie loopback), change the socket
4866 * buffer to that size only if it's bigger than the current
4867 * sockbuf size. Make the socket buffers an integral
1c79356b
A
4868 * number of mss units; if the mss is larger than
4869 * the socket buffer, decrease the mss.
4870 */
4871#if RTV_SPIPE
55e303ae
A
4872 bufsize = rt->rt_rmx.rmx_sendpipe;
4873 if (bufsize < so->so_snd.sb_hiwat)
1c79356b
A
4874#endif
4875 bufsize = so->so_snd.sb_hiwat;
4876 if (bufsize < mss)
4877 mss = bufsize;
4878 else {
2d21ac55
A
4879 bufsize = (((bufsize + (u_int64_t)mss - 1) / (u_int64_t)mss) * (u_int64_t)mss);
4880 if (bufsize > sb_max_corrected)
4881 bufsize = sb_max_corrected;
1c79356b
A
4882 (void)sbreserve(&so->so_snd, bufsize);
4883 }
4884 tp->t_maxseg = mss;
4885
4886#if RTV_RPIPE
55e303ae
A
4887 bufsize = rt->rt_rmx.rmx_recvpipe;
4888 if (bufsize < so->so_rcv.sb_hiwat)
1c79356b
A
4889#endif
4890 bufsize = so->so_rcv.sb_hiwat;
4891 if (bufsize > mss) {
2d21ac55
A
4892 bufsize = (((bufsize + (u_int64_t)mss - 1) / (u_int64_t)mss) * (u_int64_t)mss);
4893 if (bufsize > sb_max_corrected)
4894 bufsize = sb_max_corrected;
1c79356b
A
4895 (void)sbreserve(&so->so_rcv, bufsize);
4896 }
9bccf70c 4897
6d2010ae 4898 set_tcp_stream_priority(so);
1c79356b
A
4899
4900 if (rt->rt_rmx.rmx_ssthresh) {
4901 /*
4902 * There's some sort of gateway or interface
4903 * buffer limit on the path. Use this to set
4904 * the slow start threshhold, but set the
4905 * threshold to no less than 2*mss.
4906 */
4907 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh);
4908 tcpstat.tcps_usedssthresh++;
b0d623f7 4909 } else {
cf7d32b8 4910 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
b0d623f7 4911 }
cf7d32b8 4912
6d2010ae
A
4913
4914 /*
4915 * Set the slow-start flight size depending on whether this
4916 * is a local network or not.
4917 */
4918 if (CC_ALGO(tp)->cwnd_init != NULL)
4919 CC_ALGO(tp)->cwnd_init(tp);
4920
4921 DTRACE_TCP5(cc, void, NULL, struct inpcb *, tp->t_inpcb, struct tcpcb *, tp,
4922 struct tcphdr *, NULL, int32_t, TCP_CC_CWND_INIT);
4923
b0d623f7
A
4924 /* Route locked during lookup above */
4925 RT_UNLOCK(rt);
1c79356b
A
4926}
4927
4928/*
4929 * Determine the MSS option to send on an outgoing SYN.
4930 */
4931int
9bccf70c 4932tcp_mssopt(tp)
1c79356b 4933 struct tcpcb *tp;
1c79356b
A
4934{
4935 struct rtentry *rt;
8ad349bb 4936 int mss;
1c79356b 4937#if INET6
9bccf70c
A
4938 int isipv6;
4939 int min_protoh;
4940#endif
1c79356b 4941
9bccf70c
A
4942#if INET6
4943 isipv6 = ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
4944 min_protoh = isipv6 ? sizeof (struct ip6_hdr) + sizeof (struct tcphdr)
4945 : sizeof (struct tcpiphdr);
4946#else
4947#define min_protoh (sizeof (struct tcpiphdr))
4948#endif
b0d623f7 4949
1c79356b
A
4950#if INET6
4951 if (isipv6)
6d2010ae 4952 rt = tcp_rtlookup6(tp->t_inpcb, IFSCOPE_NONE);
1c79356b
A
4953 else
4954#endif /* INET6 */
c910b4d9 4955 rt = tcp_rtlookup(tp->t_inpcb, IFSCOPE_NONE);
2d21ac55 4956 if (rt == NULL) {
2d21ac55 4957 return (
1c79356b
A
4958#if INET6
4959 isipv6 ? tcp_v6mssdflt :
4960#endif /* INET6 */
2d21ac55
A
4961 tcp_mssdflt);
4962 }
d12e1678
A
4963 /*
4964 * Slower link window correction:
4965 * If a value is specificied for slowlink_wsize use it for PPP links
4966 * believed to be on a serial modem (speed <128Kbps). Excludes 9600bps as
4967 * it is the default value adversized by pseudo-devices over ppp.
4968 */
4969 if (rt->rt_ifp->if_type == IFT_PPP && slowlink_wsize > 0 &&
4970 rt->rt_ifp->if_baudrate > 9600 && rt->rt_ifp->if_baudrate <= 128000) {
4971 tp->t_flags |= TF_SLOWLINK;
4972 }
1c79356b 4973
8ad349bb 4974#if INET6
2d21ac55
A
4975 mss = (isipv6 ? tcp_maxmtu6(rt) : tcp_maxmtu(rt));
4976#else
4977 mss = tcp_maxmtu(rt);
8ad349bb 4978#endif
b0d623f7
A
4979 /* Route locked during lookup above */
4980 RT_UNLOCK(rt);
8ad349bb 4981 return (mss - min_protoh);
9bccf70c
A
4982}
4983
9bccf70c 4984/*
8ad349bb
A
4985 * On a partial ack arrives, force the retransmission of the
4986 * next unacknowledged segment. Do not clear tp->t_dupacks.
6d2010ae 4987 * By setting snd_nxt to th_ack, this forces retransmission timer to
8ad349bb 4988 * be started again.
9bccf70c 4989 */
8ad349bb
A
4990static void
4991tcp_newreno_partial_ack(tp, th)
9bccf70c
A
4992 struct tcpcb *tp;
4993 struct tcphdr *th;
4994{
9bccf70c 4995 tcp_seq onxt = tp->snd_nxt;
b0d623f7 4996 u_int32_t ocwnd = tp->snd_cwnd;
9bccf70c 4997 tp->t_timer[TCPT_REXMT] = 0;
9bccf70c
A
4998 tp->t_rtttime = 0;
4999 tp->snd_nxt = th->th_ack;
5000 /*
5001 * Set snd_cwnd to one segment beyond acknowledged offset
5002 * (tp->snd_una has not yet been updated when this function
5003 * is called)
5004 */
39236c6e 5005 tp->snd_cwnd = tp->t_maxseg + BYTES_ACKED(th, tp);
91447636 5006 tp->t_flags |= TF_ACKNOW;
9bccf70c
A
5007 (void) tcp_output(tp);
5008 tp->snd_cwnd = ocwnd;
5009 if (SEQ_GT(onxt, tp->snd_nxt))
5010 tp->snd_nxt = onxt;
5011 /*
5012 * Partial window deflation. Relies on fact that tp->snd_una
5013 * not updated yet.
5014 */
39236c6e
A
5015 if (tp->snd_cwnd > BYTES_ACKED(th, tp))
5016 tp->snd_cwnd -= BYTES_ACKED(th, tp);
2d21ac55
A
5017 else
5018 tp->snd_cwnd = 0;
5019 tp->snd_cwnd += tp->t_maxseg;
5020
1c79356b 5021}
91447636
A
5022
5023/*
5024 * Drop a random TCP connection that hasn't been serviced yet and
5025 * is eligible for discard. There is a one in qlen chance that
5026 * we will return a null, saying that there are no dropable
5027 * requests. In this case, the protocol specific code should drop
5028 * the new request. This insures fairness.
5029 *
5030 * The listening TCP socket "head" must be locked
5031 */
5032static int
2d21ac55 5033tcp_dropdropablreq(struct socket *head)
91447636 5034{
2d21ac55 5035 struct socket *so, *sonext;
91447636 5036 unsigned int i, j, qlen;
39236c6e
A
5037 static u_int32_t rnd = 0;
5038 static u_int64_t old_runtime;
91447636 5039 static unsigned int cur_cnt, old_cnt;
39236c6e 5040 u_int64_t now_sec;
91447636 5041 struct inpcb *inp = NULL;
3a60a9f5 5042 struct tcpcb *tp;
2d21ac55
A
5043
5044 if ((head->so_options & SO_ACCEPTCONN) == 0)
39236c6e
A
5045 return (0);
5046
5047 if (TAILQ_EMPTY(&head->so_incomp))
5048 return (0);
5049
5050 /*
5051 * Check if there is any socket in the incomp queue
5052 * that is closed because of a reset from the peer and is
5053 * waiting to be garbage collected. If so, pick that as
5054 * the victim
5055 */
5056 TAILQ_FOREACH_SAFE(so, &head->so_incomp, so_list, sonext) {
5057 inp = sotoinpcb(so);
5058 tp = intotcpcb(inp);
5059 if (tp != NULL && tp->t_state == TCPS_CLOSED &&
5060 so->so_head != NULL &&
5061 (so->so_state & (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE)) ==
5062 (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE)) {
5063 /*
5064 * The listen socket is already locked but we
5065 * can lock this socket here without lock ordering
5066 * issues because it is in the incomp queue and
5067 * is not visible to others.
5068 */
5069 if (lck_mtx_try_lock(&inp->inpcb_mtx)) {
5070 so->so_usecount++;
5071 goto found_victim;
5072 } else {
5073 continue;
5074 }
5075 }
5076 }
2d21ac55
A
5077
5078 so = TAILQ_FIRST(&head->so_incomp);
2d21ac55 5079
39236c6e
A
5080 now_sec = net_uptime();
5081 if ((i = (now_sec - old_runtime)) != 0) {
5082 old_runtime = now_sec;
91447636
A
5083 old_cnt = cur_cnt / i;
5084 cur_cnt = 0;
5085 }
5086
91447636
A
5087
5088 qlen = head->so_incqlen;
39236c6e
A
5089 if (rnd == 0)
5090 rnd = RandomULong();
5091
91447636
A
5092 if (++cur_cnt > qlen || old_cnt > qlen) {
5093 rnd = (314159 * rnd + 66329) & 0xffff;
5094 j = ((qlen + 1) * rnd) >> 16;
5095
5096 while (j-- && so)
5097 so = TAILQ_NEXT(so, so_list);
5098 }
2d21ac55 5099 /* Find a connection that is not already closing (or being served) */
91447636
A
5100 while (so) {
5101 inp = (struct inpcb *)so->so_pcb;
5102
2d21ac55
A
5103 sonext = TAILQ_NEXT(so, so_list);
5104
39236c6e
A
5105 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0)
5106 != WNT_STOPUSING) {
5107 /*
5108 * Avoid the issue of a socket being accepted
5109 * by one input thread and being dropped by
5110 * another input thread. If we can't get a hold
5111 * on this mutex, then grab the next socket in
5112 * line.
2d21ac55 5113 */
6d2010ae 5114 if (lck_mtx_try_lock(&inp->inpcb_mtx)) {
2d21ac55 5115 so->so_usecount++;
6d2010ae 5116 if ((so->so_usecount == 2) &&
39236c6e
A
5117 (so->so_state & SS_INCOMP) &&
5118 !(so->so_flags & SOF_INCOMP_INPROGRESS)) {
2d21ac55 5119 break;
39236c6e
A
5120 } else {
5121 /*
5122 * don't use if being accepted or
5123 * used in any other way
5124 */
2d21ac55
A
5125 in_pcb_checkstate(inp, WNT_RELEASE, 1);
5126 tcp_unlock(so, 1, 0);
5127 }
39236c6e
A
5128 } else {
5129 /*
5130 * do not try to lock the inp in
5131 * in_pcb_checkstate because the lock
5132 * is already held in some other thread.
b0d623f7
A
5133 * Only drop the inp_wntcnt reference.
5134 */
5135 in_pcb_checkstate(inp, WNT_RELEASE, 1);
5136 }
2d21ac55
A
5137 }
5138 so = sonext;
91447636 5139
91447636 5140 }
39236c6e
A
5141 if (so == NULL) {
5142 return (0);
5143 }
2d21ac55 5144
2d21ac55
A
5145 /* Makes sure socket is still in the right state to be discarded */
5146
91447636
A
5147 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
5148 tcp_unlock(so, 1, 0);
39236c6e 5149 return (0);
91447636 5150 }
2d21ac55 5151
39236c6e 5152found_victim:
2d21ac55 5153 if (so->so_usecount != 2 || !(so->so_state & SS_INCOMP)) {
6d2010ae 5154 /* do not discard: that socket is being accepted */
2d21ac55 5155 tcp_unlock(so, 1, 0);
39236c6e 5156 return (0);
2d21ac55
A
5157 }
5158
6d2010ae
A
5159 TAILQ_REMOVE(&head->so_incomp, so, so_list);
5160 tcp_unlock(head, 0, 0);
91447636 5161
6d2010ae 5162 lck_mtx_assert(&inp->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
3a60a9f5 5163 tp = sototcpcb(so);
2d21ac55 5164 so->so_flags |= SOF_OVERFLOW;
6d2010ae
A
5165 so->so_head = NULL;
5166
5167 tcp_close(tp);
6d2010ae 5168 if (inp->inp_wantcnt > 0 && inp->inp_wantcnt != WNT_STOPUSING) {
39236c6e
A
5169 /*
5170 * Some one has a wantcnt on this pcb. Since WNT_ACQUIRE
6d2010ae
A
5171 * doesn't require a lock, it could have happened while
5172 * we are holding the lock. This pcb will have to
5173 * be garbage collected later.
5174 * Release the reference held for so_incomp queue
5175 */
5176 so->so_usecount--;
6d2010ae
A
5177 tcp_unlock(so, 1, 0);
5178 } else {
39236c6e
A
5179 /*
5180 * Unlock this socket and leave the reference on.
5181 * We need to acquire the pcbinfo lock in order to
5182 * fully dispose it off
6d2010ae
A
5183 */
5184 tcp_unlock(so, 0, 0);
5185
39236c6e 5186 lck_rw_lock_exclusive(tcbinfo.ipi_lock);
6d2010ae
A
5187
5188 tcp_lock(so, 0, 0);
6d2010ae
A
5189 /* Release the reference held for so_incomp queue */
5190 so->so_usecount--;
5191
5192 if (so->so_usecount != 1 ||
39236c6e
A
5193 (inp->inp_wantcnt > 0 &&
5194 inp->inp_wantcnt != WNT_STOPUSING)) {
5195 /*
5196 * There is an extra wantcount or usecount
5197 * that must have been added when the socket
5198 * was unlocked. This socket will have to be
5199 * garbage collected later
6d2010ae
A
5200 */
5201 tcp_unlock(so, 1, 0);
5202 } else {
5203
5204 /* Drop the reference held for this function */
5205 so->so_usecount--;
5206
5207 in_pcbdispose(inp);
5208 }
39236c6e 5209 lck_rw_done(tcbinfo.ipi_lock);
6d2010ae 5210 }
3a60a9f5 5211 tcpstat.tcps_drops++;
6d2010ae 5212
3a60a9f5 5213 tcp_lock(head, 0, 0);
2d21ac55
A
5214 head->so_incqlen--;
5215 head->so_qlen--;
6d2010ae
A
5216 return(1);
5217}
5218
5219/* Set background congestion control on a socket */
5220void
5221tcp_set_background_cc(struct socket *so)
5222{
5223 tcp_set_new_cc(so, TCP_CC_ALGO_BACKGROUND_INDEX);
5224}
5225
5226/* Set foreground congestion control on a socket */
5227void
5228tcp_set_foreground_cc(struct socket *so)
5229{
5230 tcp_set_new_cc(so, TCP_CC_ALGO_NEWRENO_INDEX);
5231}
5232
5233static void
5234tcp_set_new_cc(struct socket *so, uint16_t cc_index)
5235{
5236 struct inpcb *inp = sotoinpcb(so);
5237 struct tcpcb *tp = intotcpcb(inp);
39236c6e 5238 u_char old_cc_index = 0;
6d2010ae
A
5239 if (tp->tcp_cc_index != cc_index) {
5240
5241 old_cc_index = tp->tcp_cc_index;
5242
5243 if (CC_ALGO(tp)->cleanup != NULL)
5244 CC_ALGO(tp)->cleanup(tp);
5245 tp->tcp_cc_index = cc_index;
5246
5247 /* Decide if the connection is just starting or if
5248 * we have sent some packets on it.
5249 */
5250 if (tp->snd_nxt > tp->iss) {
5251 /* Already sent some packets */
5252 if (CC_ALGO(tp)->switch_to != NULL)
5253 CC_ALGO(tp)->switch_to(tp, old_cc_index);
5254 } else {
5255 if (CC_ALGO(tp)->init != NULL)
5256 CC_ALGO(tp)->init(tp);
5257 }
5258 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
5259 struct tcpcb *, tp, struct tcphdr *, NULL,
5260 int32_t, TCP_CC_CHANGE_ALGO);
5261 }
91447636
A
5262}
5263
316670eb
A
5264void
5265tcp_set_recv_bg(struct socket *so)
5266{
5267 if (!IS_TCP_RECV_BG(so))
5268 so->so_traffic_mgt_flags |= TRAFFIC_MGT_TCP_RECVBG;
39236c6e
A
5269
5270 /* Unset Large Receive Offload on background sockets */
5271 so_set_lro(so, SO_TC_BK);
316670eb
A
5272}
5273
5274void
5275tcp_clear_recv_bg(struct socket *so)
5276{
5277 if (IS_TCP_RECV_BG(so))
5278 so->so_traffic_mgt_flags &= ~(TRAFFIC_MGT_TCP_RECVBG);
39236c6e
A
5279
5280 /*
5281 * Set/unset use of Large Receive Offload depending on
5282 * the traffic class
5283 */
5284 so_set_lro(so, so->so_traffic_class);
316670eb
A
5285}
5286
5287void
5288inp_fc_unthrottle_tcp(struct inpcb *inp)
5289{
5290 struct tcpcb *tp = inp->inp_ppcb;
5291 /*
5292 * Back off the slow-start threshold and enter
5293 * congestion avoidance phase
5294 */
5295 if (CC_ALGO(tp)->pre_fr != NULL)
5296 CC_ALGO(tp)->pre_fr(tp);
5297
5298 tp->snd_cwnd = tp->snd_ssthresh;
5299
5300 /*
5301 * Restart counting for ABC as we changed the
5302 * congestion window just now.
5303 */
5304 tp->t_bytes_acked = 0;
5305
5306 /* Reset retransmit shift as we know that the reason
5307 * for delay in sending a packet is due to flow
5308 * control on the outgoing interface. There is no need
5309 * to backoff retransmit timer.
5310 */
5311 tp->t_rxtshift = 0;
5312
5313 /*
5314 * Start the output stream again. Since we are
5315 * not retransmitting data, do not reset the
5316 * retransmit timer or rtt calculation.
5317 */
5318 tcp_output(tp);
5319}
39236c6e 5320
8ad349bb
A
5321static int
5322tcp_getstat SYSCTL_HANDLER_ARGS
5323{
2d21ac55 5324#pragma unused(oidp, arg1, arg2)
8ad349bb
A
5325
5326 int error;
5327
39236c6e
A
5328 proc_t caller = PROC_NULL;
5329 proc_t caller_parent = PROC_NULL;
5330 char command_name[MAXCOMLEN + 1] = "";
5331 char parent_name[MAXCOMLEN + 1] = "";
5332
5333 if ((caller = proc_self()) != PROC_NULL) {
5334 /* get process name */
5335 strlcpy(command_name, caller->p_comm, sizeof(command_name));
5336
5337 /* get parent process name if possible */
5338 if ((caller_parent = proc_find(caller->p_ppid)) != PROC_NULL) {
5339 strlcpy(parent_name, caller_parent->p_comm,
5340 sizeof(parent_name));
5341 proc_rele(caller_parent);
5342 }
5343
5344 if ((escape_str(command_name, strlen(command_name),
5345 sizeof(command_name)) == 0) &&
5346 (escape_str(parent_name, strlen(parent_name),
5347 sizeof(parent_name)) == 0)) {
5348 kern_asl_msg(LOG_DEBUG, "messagetracer",
5349 5,
5350 "com.apple.message.domain",
5351 "com.apple.kernel.tcpstat", /* 1 */
5352 "com.apple.message.signature",
5353 "tcpstat", /* 2 */
5354 "com.apple.message.signature2", command_name, /* 3 */
5355 "com.apple.message.signature3", parent_name, /* 4 */
5356 "com.apple.message.summarize", "YES", /* 5 */
5357 NULL);
5358 }
5359 }
5360 if (caller != PROC_NULL)
5361 proc_rele(caller);
5362
8ad349bb
A
5363 if (req->oldptr == 0) {
5364 req->oldlen= (size_t)sizeof(struct tcpstat);
5365 }
5366
2d21ac55 5367 error = SYSCTL_OUT(req, &tcpstat, MIN(sizeof (tcpstat), req->oldlen));
8ad349bb
A
5368
5369 return (error);
5370
5371}
5372
39236c6e
A
5373/*
5374 * Checksum extended TCP header and data.
5375 */
5376int
5377tcp_input_checksum(int af, struct mbuf *m, struct tcphdr *th, int off, int tlen)
5378{
5379 struct ifnet *ifp = m->m_pkthdr.rcvif;
5380
5381 switch (af) {
5382 case AF_INET: {
5383 struct ip *ip = mtod(m, struct ip *);
5384 struct ipovly *ipov = (struct ipovly *)ip;
5385
5386 if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_DID_CSUM)
5387 return (0);
5388
5389 if ((hwcksum_rx || (ifp->if_flags & IFF_LOOPBACK) ||
5390 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) &&
5391 (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)) {
5392 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5393 th->th_sum = m->m_pkthdr.csum_rx_val;
5394 } else {
5395 uint16_t sum = m->m_pkthdr.csum_rx_val;
5396 uint16_t start = m->m_pkthdr.csum_rx_start;
5397
5398 /*
5399 * Perform 1's complement adjustment of octets
5400 * that got included/excluded in the hardware-
5401 * calculated checksum value. Ignore cases
5402 * where the value includes or excludes the IP
5403 * header span, as the sum for those octets
5404 * would already be 0xffff and thus no-op.
5405 */
5406 if ((m->m_pkthdr.csum_flags & CSUM_PARTIAL) &&
5407 start != 0 && (off - start) != off) {
5408#if BYTE_ORDER != BIG_ENDIAN
5409 if (start < off) {
5410 HTONS(ip->ip_len);
5411 HTONS(ip->ip_off);
5412 }
5413#endif
5414 /* callee folds in sum */
5415 sum = m_adj_sum16(m, start, off, sum);
5416#if BYTE_ORDER != BIG_ENDIAN
5417 if (start < off) {
5418 NTOHS(ip->ip_off);
5419 NTOHS(ip->ip_len);
5420 }
5421#endif
5422 }
5423
5424 /* callee folds in sum */
5425 th->th_sum = in_pseudo(ip->ip_src.s_addr,
5426 ip->ip_dst.s_addr,
5427 sum + htonl(tlen + IPPROTO_TCP));
5428 }
5429 th->th_sum ^= 0xffff;
5430 } else {
5431 uint16_t ip_sum;
5432 int len;
5433 char b[9];
5434
5435 bcopy(ipov->ih_x1, b, sizeof (ipov->ih_x1));
5436 bzero(ipov->ih_x1, sizeof (ipov->ih_x1));
5437 ip_sum = ipov->ih_len;
5438 ipov->ih_len = (u_short)tlen;
5439#if BYTE_ORDER != BIG_ENDIAN
5440 HTONS(ipov->ih_len);
5441#endif
5442 len = sizeof (struct ip) + tlen;
5443 th->th_sum = in_cksum(m, len);
5444 bcopy(b, ipov->ih_x1, sizeof (ipov->ih_x1));
5445 ipov->ih_len = ip_sum;
5446
5447 tcp_in_cksum_stats(len);
5448 }
5449 break;
5450 }
5451#if INET6
5452 case AF_INET6: {
5453 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
5454
5455 if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_DID_CSUM)
5456 return (0);
5457
5458 if ((hwcksum_rx || (ifp->if_flags & IFF_LOOPBACK) ||
5459 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) &&
5460 (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)) {
5461 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5462 th->th_sum = m->m_pkthdr.csum_rx_val;
5463 } else {
5464 uint16_t sum = m->m_pkthdr.csum_rx_val;
5465 uint16_t start = m->m_pkthdr.csum_rx_start;
5466
5467 /*
5468 * Perform 1's complement adjustment of octets
5469 * that got included/excluded in the hardware-
5470 * calculated checksum value.
5471 */
5472 if ((m->m_pkthdr.csum_flags & CSUM_PARTIAL) &&
5473 start != off) {
5474 uint16_t s, d;
5475
5476 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
5477 s = ip6->ip6_src.s6_addr16[1];
5478 ip6->ip6_src.s6_addr16[1] = 0 ;
5479 }
5480 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
5481 d = ip6->ip6_dst.s6_addr16[1];
5482 ip6->ip6_dst.s6_addr16[1] = 0;
5483 }
5484
5485 /* callee folds in sum */
5486 sum = m_adj_sum16(m, start, off, sum);
5487
5488 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src))
5489 ip6->ip6_src.s6_addr16[1] = s;
5490 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst))
5491 ip6->ip6_dst.s6_addr16[1] = d;
5492 }
5493
5494 th->th_sum = in6_pseudo(
5495 &ip6->ip6_src, &ip6->ip6_dst,
5496 sum + htonl(tlen + IPPROTO_TCP));
5497 }
5498 th->th_sum ^= 0xffff;
5499 } else {
5500 tcp_in6_cksum_stats(tlen);
5501 th->th_sum = in6_cksum(m, IPPROTO_TCP, off, tlen);
5502 }
5503 break;
5504 }
5505#endif /* INET6 */
5506 default:
5507 VERIFY(0);
5508 /* NOTREACHED */
5509 }
5510
5511 if (th->th_sum != 0) {
5512 tcpstat.tcps_rcvbadsum++;
5513 IF_TCP_STATINC(ifp, badformat);
5514 return (-1);
5515 }
5516
5517 return (0);
5518}
5519
6d2010ae 5520SYSCTL_PROC(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
8ad349bb
A
5521 tcp_getstat, "S,tcpstat", "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
5522
2d21ac55
A
5523static int
5524sysctl_rexmtthresh SYSCTL_HANDLER_ARGS
5525{
5526#pragma unused(arg1, arg2)
5527
5528 int error, val = tcprexmtthresh;
5529
5530 error = sysctl_handle_int(oidp, &val, 0, req);
5531 if (error || !req->newptr)
5532 return (error);
5533
5534 /*
5535 * Constrain the number of duplicate ACKs
5536 * to consider for TCP fast retransmit
5537 * to either 2 or 3
5538 */
5539
5540 if (val < 2 || val > 3)
5541 return (EINVAL);
5542
5543 tcprexmtthresh = val;
5544
5545 return (0);
5546}
91447636 5547
6d2010ae 5548SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmt_thresh, CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
2d21ac55 5549 &tcprexmtthresh, 0, &sysctl_rexmtthresh, "I", "Duplicate ACK Threshold for Fast Retransmit");