]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/tcp_input.c
7d29a31c24a74e5750ee8563d3508f167e893cab
[apple/xnu.git] / bsd / netinet / tcp_input.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
61 * $FreeBSD: src/sys/netinet/tcp_input.c,v 1.107.2.16 2001/08/22 00:59:12 silby Exp $
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
73 #include <sys/sysctl.h>
74 #include <sys/malloc.h>
75 #include <sys/mbuf.h>
76 #include <sys/proc.h> /* for proc0 declaration */
77 #include <sys/protosw.h>
78 #include <sys/socket.h>
79 #include <sys/socketvar.h>
80 #include <sys/syslog.h>
81 #include <sys/mcache.h>
82
83 #include <kern/cpu_number.h> /* before tcp_seq.h, for tcp_random18() */
84
85 #include <machine/endian.h>
86
87 #include <net/if.h>
88 #include <net/if_types.h>
89 #include <net/route.h>
90 #include <net/ntstat.h>
91
92 #include <netinet/in.h>
93 #include <netinet/in_systm.h>
94 #include <netinet/ip.h>
95 #include <netinet/ip_icmp.h> /* for ICMP_BANDLIM */
96 #include <netinet/in_var.h>
97 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
98 #include <netinet/in_pcb.h>
99 #include <netinet/ip_var.h>
100 #include <mach/sdt.h>
101 #if INET6
102 #include <netinet/ip6.h>
103 #include <netinet/icmp6.h>
104 #include <netinet6/nd6.h>
105 #include <netinet6/ip6_var.h>
106 #include <netinet6/in6_pcb.h>
107 #endif
108 #include <netinet/tcp.h>
109 #include <netinet/tcp_fsm.h>
110 #include <netinet/tcp_seq.h>
111 #include <netinet/tcp_timer.h>
112 #include <netinet/tcp_var.h>
113 #include <netinet/tcp_cc.h>
114 #include <kern/zalloc.h>
115 #if INET6
116 #include <netinet6/tcp6_var.h>
117 #endif
118 #include <netinet/tcpip.h>
119 #if TCPDEBUG
120 #include <netinet/tcp_debug.h>
121 u_char tcp_saveipgen[40]; /* the size must be of max ip header, now IPv6 */
122 struct tcphdr tcp_savetcp;
123 #endif /* TCPDEBUG */
124
125 #if IPSEC
126 #include <netinet6/ipsec.h>
127 #if INET6
128 #include <netinet6/ipsec6.h>
129 #endif
130 #include <netkey/key.h>
131 #endif /*IPSEC*/
132
133 #if CONFIG_MACF_NET || CONFIG_MACF_SOCKET
134 #include <security/mac_framework.h>
135 #endif /* CONFIG_MACF_NET || CONFIG_MACF_SOCKET */
136
137 #include <sys/kdebug.h>
138 #include <netinet/lro_ext.h>
139
140 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETTCP, 0)
141 #define DBG_LAYER_END NETDBG_CODE(DBG_NETTCP, 2)
142 #define DBG_FNC_TCP_INPUT NETDBG_CODE(DBG_NETTCP, (3 << 8))
143 #define DBG_FNC_TCP_NEWCONN NETDBG_CODE(DBG_NETTCP, (7 << 8))
144
145 static int tcprexmtthresh = 2;
146 tcp_cc tcp_ccgen;
147
148 #if IPSEC
149 extern int ipsec_bypass;
150 #endif
151
152 extern int32_t total_sbmb_cnt;
153
154 struct tcpstat tcpstat;
155
156 static int log_in_vain = 0;
157 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW | CTLFLAG_LOCKED,
158 &log_in_vain, 0, "Log all incoming TCP connections");
159
160 static int blackhole = 0;
161 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW | CTLFLAG_LOCKED,
162 &blackhole, 0, "Do not send RST when dropping refused connections");
163
164 int tcp_delack_enabled = 3;
165 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW | CTLFLAG_LOCKED,
166 &tcp_delack_enabled, 0,
167 "Delay ACK to try and piggyback it onto a data packet");
168
169 int tcp_lq_overflow = 1;
170 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_lq_overflow, CTLFLAG_RW | CTLFLAG_LOCKED,
171 &tcp_lq_overflow, 0,
172 "Listen Queue Overflow");
173
174 int tcp_recv_bg = 0;
175 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbg, CTLFLAG_RW | CTLFLAG_LOCKED,
176 &tcp_recv_bg, 0,
177 "Receive background");
178
179 #if TCP_DROP_SYNFIN
180 static int drop_synfin = 1;
181 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW | CTLFLAG_LOCKED,
182 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set");
183 #endif
184
185 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
186 "TCP Segment Reassembly Queue");
187
188 __private_extern__ int tcp_reass_maxseg = 0;
189 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RW | CTLFLAG_LOCKED,
190 &tcp_reass_maxseg, 0,
191 "Global maximum number of TCP Segments in Reassembly Queue");
192
193 __private_extern__ int tcp_reass_qsize = 0;
194 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD | CTLFLAG_LOCKED,
195 &tcp_reass_qsize, 0,
196 "Global number of TCP Segments currently in Reassembly Queue");
197
198 static int tcp_reass_overflows = 0;
199 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows, CTLFLAG_RD | CTLFLAG_LOCKED,
200 &tcp_reass_overflows, 0,
201 "Global number of TCP Segment Reassembly Queue Overflows");
202
203
204 __private_extern__ int slowlink_wsize = 8192;
205 SYSCTL_INT(_net_inet_tcp, OID_AUTO, slowlink_wsize, CTLFLAG_RW | CTLFLAG_LOCKED,
206 &slowlink_wsize, 0, "Maximum advertised window size for slowlink");
207
208 int maxseg_unacked = 8;
209 SYSCTL_INT(_net_inet_tcp, OID_AUTO, maxseg_unacked, CTLFLAG_RW | CTLFLAG_LOCKED,
210 &maxseg_unacked, 0, "Maximum number of outstanding segments left unacked");
211
212 int tcp_do_rfc3465 = 1;
213 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_RW | CTLFLAG_LOCKED,
214 &tcp_do_rfc3465, 0, "");
215
216 int tcp_do_rfc3465_lim2 = 1;
217 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465_lim2, CTLFLAG_RW | CTLFLAG_LOCKED,
218 &tcp_do_rfc3465_lim2, 0, "Appropriate bytes counting w/ L=2*SMSS");
219
220 int rtt_samples_per_slot = 20;
221 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rtt_samples_per_slot, CTLFLAG_RW | CTLFLAG_LOCKED,
222 &rtt_samples_per_slot, 0, "Number of RTT samples stored for rtt history");
223
224 int tcp_allowed_iaj = ALLOWED_IAJ;
225 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recv_allowed_iaj, CTLFLAG_RW | CTLFLAG_LOCKED,
226 &tcp_allowed_iaj, 0, "Allowed inter-packet arrival jiter");
227
228 int tcp_acc_iaj_high_thresh = ACC_IAJ_HIGH_THRESH;
229 SYSCTL_INT(_net_inet_tcp, OID_AUTO, acc_iaj_high_thresh, CTLFLAG_RW | CTLFLAG_LOCKED,
230 &tcp_acc_iaj_high_thresh, 0, "Used in calculating maximum accumulated IAJ");
231
232 u_int32_t tcp_do_autorcvbuf = 1;
233 SYSCTL_INT(_net_inet_tcp, OID_AUTO, doautorcvbuf, CTLFLAG_RW | CTLFLAG_LOCKED,
234 &tcp_do_autorcvbuf, 0, "Enable automatic socket buffer tuning");
235
236 u_int32_t tcp_autorcvbuf_inc_shift = 3;
237 SYSCTL_INT(_net_inet_tcp, OID_AUTO, autorcvbufincshift, CTLFLAG_RW | CTLFLAG_LOCKED,
238 &tcp_autorcvbuf_inc_shift, 0, "Shift for increment in receive socket buffer size");
239
240 u_int32_t tcp_autorcvbuf_max = 512 * 1024;
241 SYSCTL_INT(_net_inet_tcp, OID_AUTO, autorcvbufmax, CTLFLAG_RW | CTLFLAG_LOCKED,
242 &tcp_autorcvbuf_max, 0, "Maximum receive socket buffer size");
243
244 int sw_lro = 1;
245 SYSCTL_INT(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_LOCKED,
246 &sw_lro, 0, "Used to coalesce TCP packets");
247
248 int lrodebug = 0;
249 SYSCTL_INT(_net_inet_tcp, OID_AUTO, lrodbg, CTLFLAG_RW | CTLFLAG_LOCKED,
250 &lrodebug, 0, "Used to debug SW LRO");
251
252 int lro_start = 3;
253 SYSCTL_INT(_net_inet_tcp, OID_AUTO, lro_startcnt, CTLFLAG_RW | CTLFLAG_LOCKED,
254 &lro_start, 0, "Segments for starting LRO computed as power of 2");
255
256 extern int tcp_do_autosendbuf;
257
258 #if CONFIG_IFEF_NOWINDOWSCALE
259 int tcp_obey_ifef_nowindowscale = 0;
260 SYSCTL_INT(_net_inet_tcp, OID_AUTO, obey_ifef_nowindowscale, CTLFLAG_RW | CTLFLAG_LOCKED,
261 &tcp_obey_ifef_nowindowscale, 0, "");
262 #endif
263 /* This limit will determine when the receive socket buffer tuning will
264 * kick in. Currently it will start when the bw*delay measured in
265 * last RTT is more than half of the current hiwat on the buffer.
266 */
267 uint32_t tcp_rbuf_hiwat_shift = 1;
268
269 /* This limit will determine when the socket buffer will be increased
270 * to accommodate an application reading slowly. When the amount of
271 * space left in the buffer is less than one forth of the bw*delay
272 * measured in last RTT.
273 */
274 uint32_t tcp_rbuf_win_shift = 2;
275
276 extern int tcp_TCPTV_MIN;
277 extern int tcp_acc_iaj_high;
278 extern int tcp_acc_iaj_react_limit;
279 extern struct zone *tcp_reass_zone;
280
281
282 u_int32_t tcp_now;
283 struct timeval tcp_uptime; /* uptime when tcp_now was last updated */
284 lck_spin_t *tcp_uptime_lock; /* Used to sychronize updates to tcp_now */
285
286 struct inpcbhead tcb;
287 #define tcb6 tcb /* for KAME src sync over BSD*'s */
288 struct inpcbinfo tcbinfo;
289
290 static void tcp_dooptions(struct tcpcb *, u_char *, int, struct tcphdr *,
291 struct tcpopt *, unsigned int);
292 static void tcp_pulloutofband(struct socket *,
293 struct tcphdr *, struct mbuf *, int);
294 static int tcp_reass(struct tcpcb *, struct tcphdr *, int *,
295 struct mbuf *);
296 static void tcp_xmit_timer(struct tcpcb *, int);
297 static inline unsigned int tcp_maxmtu(struct rtentry *);
298 static inline int tcp_stretch_ack_enable(struct tcpcb *tp);
299
300 #if TRAFFIC_MGT
301 static inline void update_iaj_state(struct tcpcb *tp, uint32_t tlen, int reset_size);
302 void compute_iaj(struct tcpcb *tp);
303 #endif /* TRAFFIC_MGT */
304
305 #if INET6
306 static inline unsigned int tcp_maxmtu6(struct rtentry *);
307 #endif
308
309 static void tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sb,
310 struct tcpopt *to, u_int32_t tlen);
311
312 void tcp_sbrcv_trim(struct tcpcb *tp, struct sockbuf *sb);
313 static void tcp_sbsnd_trim(struct sockbuf *sbsnd);
314 static inline void tcp_sbrcv_tstmp_check(struct tcpcb *tp);
315 static inline void tcp_sbrcv_reserve(struct tcpcb *tp, struct sockbuf *sb,
316 u_int32_t newsize, u_int32_t idealsize);
317
318 #define TCPTV_RCVNOTS_QUANTUM 100
319 #define TCP_RCVNOTS_BYTELEVEL 204800
320 /* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
321 #if INET6
322 #define ND6_HINT(tp) \
323 do { \
324 if ((tp) && (tp)->t_inpcb && \
325 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0 && \
326 (tp)->t_inpcb->in6p_route.ro_rt) \
327 nd6_nud_hint((tp)->t_inpcb->in6p_route.ro_rt, NULL, 0); \
328 } while (0)
329 #else
330 #define ND6_HINT(tp)
331 #endif
332
333 extern void add_to_time_wait(struct tcpcb *, uint32_t delay);
334 extern void postevent(struct socket *, struct sockbuf *, int);
335
336 extern void ipfwsyslog( int level, const char *format,...);
337 extern int ChkAddressOK( __uint32_t dstaddr, __uint32_t srcaddr );
338 extern int fw_verbose;
339
340 #if IPFIREWALL
341 #define log_in_vain_log( a ) { \
342 if ( (log_in_vain == 3 ) && (fw_verbose == 2)) { /* Apple logging, log to ipfw.log */ \
343 ipfwsyslog a ; \
344 } \
345 else log a ; \
346 }
347 #else
348 #define log_in_vain_log( a ) { log a; }
349 #endif
350
351 int tcp_rcvunackwin = TCPTV_UNACKWIN;
352 int tcp_maxrcvidle = TCPTV_MAXRCVIDLE;
353 int tcp_rcvsspktcnt = TCP_RCV_SS_PKTCOUNT;
354 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rcvsspktcnt, CTLFLAG_RW | CTLFLAG_LOCKED,
355 &tcp_rcvsspktcnt, 0, "packets to be seen before receiver stretches acks");
356
357 #define DELAY_ACK(tp, th) (CC_ALGO(tp)->delay_ack != NULL && CC_ALGO(tp)->delay_ack(tp, th))
358
359 static int tcp_dropdropablreq(struct socket *head);
360 static void tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th);
361
362 static void update_base_rtt(struct tcpcb *tp, uint32_t rtt);
363 uint32_t get_base_rtt(struct tcpcb *tp);
364 void tcp_set_background_cc(struct socket *so);
365 void tcp_set_foreground_cc(struct socket *so);
366 static void tcp_set_new_cc(struct socket *so, uint16_t cc_index);
367 static void tcp_bwmeas_check(struct tcpcb *tp);
368
369 #if TRAFFIC_MGT
370 void
371 reset_acc_iaj(struct tcpcb *tp)
372 {
373 tp->acc_iaj = 0;
374 tp->iaj_rwintop = 0;
375 CLEAR_IAJ_STATE(tp);
376 }
377
378 static inline void
379 update_iaj_state(struct tcpcb *tp, uint32_t size, int rst_size)
380 {
381 if (rst_size > 0)
382 tp->iaj_size = 0;
383 if (tp->iaj_size == 0 || size >= tp->iaj_size) {
384 tp->iaj_size = size;
385 tp->iaj_rcv_ts = tcp_now;
386 tp->iaj_small_pkt = 0;
387 }
388 }
389
390 /* For every 32 bit unsigned integer(v), this function will find the
391 * largest integer n such that (n*n <= v). This takes at most 16 iterations
392 * irrespective of the value of v and does not involve multiplications.
393 */
394 static inline int
395 isqrt(unsigned int val) {
396 unsigned int sqrt_cache[11] = {0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100};
397 unsigned int temp, g=0, b=0x8000, bshft=15;
398 if ( val <= 100) {
399 for (g = 0; g <= 10; ++g) {
400 if (sqrt_cache[g] > val) {
401 g--;
402 break;
403 } else if (sqrt_cache[g] == val) {
404 break;
405 }
406 }
407 } else {
408 do {
409 temp = (((g << 1) + b) << (bshft--));
410 if (val >= temp) {
411 g += b;
412 val -= temp;
413 }
414 b >>= 1;
415 } while ( b > 0 && val > 0);
416 }
417 return(g);
418 }
419
420 void
421 compute_iaj(struct tcpcb *tp)
422 {
423 /* When accumulated IAJ reaches MAX_ACC_IAJ in milliseconds, throttle the
424 * receive window to a minimum of MIN_IAJ_WIN packets
425 */
426 #define MAX_ACC_IAJ (tcp_acc_iaj_high_thresh + tcp_acc_iaj_react_limit)
427
428 uint32_t allowed_iaj, acc_iaj = 0;
429 uint32_t cur_iaj = tcp_now - tp->iaj_rcv_ts;
430
431 uint32_t mean, temp;
432 int32_t cur_iaj_dev;
433 cur_iaj_dev = (cur_iaj - tp->avg_iaj);
434
435 /* Allow a jitter of "allowed_iaj" milliseconds. Some connections may have a
436 * constant jitter more than that. We detect this by using
437 * standard deviation.
438 */
439 allowed_iaj = tp->avg_iaj + tp->std_dev_iaj;
440 if (allowed_iaj < tcp_allowed_iaj)
441 allowed_iaj = tcp_allowed_iaj;
442
443 /* Initially when the connection starts, the senders congestion window
444 * is small. During this period we avoid throttling a connection because
445 * we do not have a good starting point for allowed_iaj. IAJ_IGNORE_PKTCNT
446 * is used to quietly gloss over the first few packets.
447 */
448 if (tp->iaj_pktcnt > IAJ_IGNORE_PKTCNT) {
449 if ( cur_iaj <= allowed_iaj ) {
450 if (tp->acc_iaj >= 2)
451 acc_iaj = tp->acc_iaj - 2;
452 else
453 acc_iaj = 0;
454 } else {
455 acc_iaj = tp->acc_iaj + (cur_iaj - allowed_iaj);
456 }
457
458 if (acc_iaj > MAX_ACC_IAJ)
459 acc_iaj = MAX_ACC_IAJ;
460 tp->acc_iaj = acc_iaj;
461 }
462
463 /* Compute weighted average where the history has a weight of
464 * 15 out of 16 and the current value has a weight of 1 out of 16.
465 * This will make the short-term measurements have more weight.
466 */
467 tp->avg_iaj = (((tp->avg_iaj << 4) - tp->avg_iaj) + cur_iaj) >> 4;
468
469 /* Compute Root-mean-square of deviation where mean is a weighted
470 * average as described above
471 */
472 temp = tp->std_dev_iaj * tp->std_dev_iaj;
473 mean = (((temp << 4) - temp) + (cur_iaj_dev * cur_iaj_dev)) >> 4;
474
475 tp->std_dev_iaj = isqrt(mean);
476
477 DTRACE_TCP3(iaj, struct tcpcb *, tp, uint32_t, cur_iaj, uint32_t, allowed_iaj);
478
479 return;
480 }
481 #endif /* TRAFFIC_MGT */
482
483 /* Check if enough amount of data has been acknowledged since
484 * bw measurement was started
485 */
486 static void
487 tcp_bwmeas_check(struct tcpcb *tp)
488 {
489 int32_t bw_meas_bytes;
490 uint32_t bw, bytes, elapsed_time;
491 bw_meas_bytes = tp->snd_una - tp->t_bwmeas->bw_start;
492 if ((tp->t_flagsext & TF_BWMEAS_INPROGRESS) != 0 &&
493 bw_meas_bytes >= (int32_t)(tp->t_bwmeas->bw_size)) {
494 bytes = bw_meas_bytes;
495 elapsed_time = tcp_now - tp->t_bwmeas->bw_ts;
496 if (elapsed_time > 0) {
497 bw = bytes / elapsed_time;
498 if ( bw > 0) {
499 if (tp->t_bwmeas->bw_sndbw > 0) {
500 tp->t_bwmeas->bw_sndbw =
501 (((tp->t_bwmeas->bw_sndbw << 3) - tp->t_bwmeas->bw_sndbw) + bw) >> 3;
502 } else {
503 tp->t_bwmeas->bw_sndbw = bw;
504 }
505 }
506 }
507 tp->t_flagsext &= ~(TF_BWMEAS_INPROGRESS);
508 }
509 }
510
511 static int
512 tcp_reass(tp, th, tlenp, m)
513 register struct tcpcb *tp;
514 register struct tcphdr *th;
515 int *tlenp;
516 struct mbuf *m;
517 {
518 struct tseg_qent *q;
519 struct tseg_qent *p = NULL;
520 struct tseg_qent *nq;
521 struct tseg_qent *te = NULL;
522 struct socket *so = tp->t_inpcb->inp_socket;
523 int flags;
524 int dowakeup = 0;
525
526 /*
527 * Call with th==0 after become established to
528 * force pre-ESTABLISHED data up to user socket.
529 */
530 if (th == NULL)
531 goto present;
532
533 /* If the reassembly queue already has entries or if we are going to add
534 * a new one, then the connection has reached a loss state.
535 * Reset the stretch-ack algorithm at this point.
536 */
537 if ((tp->t_flags & TF_STRETCHACK) != 0)
538 tcp_reset_stretch_ack(tp);
539
540 /* When the connection reaches a loss state, we need to send more acks
541 * for a period of time so that the sender's congestion window will
542 * open. Wait until we see some packets on the connection before
543 * stretching acks again.
544 */
545 tp->t_flagsext |= TF_RCVUNACK_WAITSS;
546 tp->rcv_waitforss = 0;
547
548
549 #if TRAFFIC_MGT
550 if (tp->acc_iaj > 0)
551 reset_acc_iaj(tp);
552 #endif /* TRAFFIC_MGT */
553
554 /*
555 * Limit the number of segments in the reassembly queue to prevent
556 * holding on to too many segments (and thus running out of mbufs).
557 * Make sure to let the missing segment through which caused this
558 * queue. Always keep one global queue entry spare to be able to
559 * process the missing segment.
560 */
561 if (th->th_seq != tp->rcv_nxt &&
562 tcp_reass_qsize + 1 >= tcp_reass_maxseg) {
563 tcp_reass_overflows++;
564 tcpstat.tcps_rcvmemdrop++;
565 m_freem(m);
566 *tlenp = 0;
567 return (0);
568 }
569
570 /* Allocate a new queue entry. If we can't, just drop the pkt. XXX */
571 te = (struct tseg_qent *) zalloc_noblock(tcp_reass_zone);
572 if (te == NULL) {
573 tcpstat.tcps_rcvmemdrop++;
574 m_freem(m);
575 return (0);
576 }
577 tcp_reass_qsize++;
578
579 /*
580 * Find a segment which begins after this one does.
581 */
582 LIST_FOREACH(q, &tp->t_segq, tqe_q) {
583 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
584 break;
585 p = q;
586 }
587
588 /*
589 * If there is a preceding segment, it may provide some of
590 * our data already. If so, drop the data from the incoming
591 * segment. If it provides all of our data, drop us.
592 */
593 if (p != NULL) {
594 register int i;
595 /* conversion to int (in i) handles seq wraparound */
596 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
597 if (i > 0) {
598 if (i >= *tlenp) {
599 tcpstat.tcps_rcvduppack++;
600 tcpstat.tcps_rcvdupbyte += *tlenp;
601 if (nstat_collect) {
602 nstat_route_rx(tp->t_inpcb->inp_route.ro_rt, 1, *tlenp, NSTAT_RX_FLAG_DUPLICATE);
603 locked_add_64(&tp->t_inpcb->inp_stat->rxpackets, 1);
604 locked_add_64(&tp->t_inpcb->inp_stat->rxbytes, *tlenp);
605 tp->t_stat.rxduplicatebytes += *tlenp;
606 }
607 m_freem(m);
608 zfree(tcp_reass_zone, te);
609 tcp_reass_qsize--;
610 /*
611 * Try to present any queued data
612 * at the left window edge to the user.
613 * This is needed after the 3-WHS
614 * completes.
615 */
616 goto present; /* ??? */
617 }
618 m_adj(m, i);
619 *tlenp -= i;
620 th->th_seq += i;
621 }
622 }
623 tcpstat.tcps_rcvoopack++;
624 tcpstat.tcps_rcvoobyte += *tlenp;
625 if (nstat_collect) {
626 nstat_route_rx(tp->t_inpcb->inp_route.ro_rt, 1, *tlenp, NSTAT_RX_FLAG_OUT_OF_ORDER);
627 locked_add_64(&tp->t_inpcb->inp_stat->rxpackets, 1);
628 locked_add_64(&tp->t_inpcb->inp_stat->rxbytes, *tlenp);
629 tp->t_stat.rxoutoforderbytes += *tlenp;
630 }
631
632 /*
633 * While we overlap succeeding segments trim them or,
634 * if they are completely covered, dequeue them.
635 */
636 while (q) {
637 register int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
638 if (i <= 0)
639 break;
640 if (i < q->tqe_len) {
641 q->tqe_th->th_seq += i;
642 q->tqe_len -= i;
643 m_adj(q->tqe_m, i);
644 break;
645 }
646
647 nq = LIST_NEXT(q, tqe_q);
648 LIST_REMOVE(q, tqe_q);
649 m_freem(q->tqe_m);
650 zfree(tcp_reass_zone, q);
651 tcp_reass_qsize--;
652 q = nq;
653 }
654
655 /* Insert the new segment queue entry into place. */
656 te->tqe_m = m;
657 te->tqe_th = th;
658 te->tqe_len = *tlenp;
659
660 if (p == NULL) {
661 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
662 } else {
663 LIST_INSERT_AFTER(p, te, tqe_q);
664 }
665
666 present:
667 /*
668 * Present data to user, advancing rcv_nxt through
669 * completed sequence space.
670 */
671 if (!TCPS_HAVEESTABLISHED(tp->t_state))
672 return (0);
673 q = LIST_FIRST(&tp->t_segq);
674 if (!q || q->tqe_th->th_seq != tp->rcv_nxt) {
675 /* Stop using LRO once out of order packets arrive */
676 if (tp->t_flagsext & TF_LRO_OFFLOADED) {
677 tcp_lro_remove_state(tp->t_inpcb->inp_laddr,
678 tp->t_inpcb->inp_faddr,
679 th->th_dport, th->th_sport);
680 tp->t_flagsext &= ~TF_LRO_OFFLOADED;
681 }
682 return (0);
683 }
684 do {
685 tp->rcv_nxt += q->tqe_len;
686 flags = q->tqe_th->th_flags & TH_FIN;
687 nq = LIST_NEXT(q, tqe_q);
688 LIST_REMOVE(q, tqe_q);
689 if (so->so_state & SS_CANTRCVMORE)
690 m_freem(q->tqe_m);
691 else {
692 so_recv_data_stat(so, q->tqe_m, 0); /* XXXX */
693 if (sbappendstream(&so->so_rcv, q->tqe_m))
694 dowakeup = 1;
695 if (tp->t_flagsext & TF_LRO_OFFLOADED) {
696 tcp_update_lro_seq(tp->rcv_nxt,
697 tp->t_inpcb->inp_laddr,
698 tp->t_inpcb->inp_faddr, th->th_dport, th->th_sport);
699 }
700 }
701 zfree(tcp_reass_zone, q);
702 tcp_reass_qsize--;
703 q = nq;
704 } while (q && q->tqe_th->th_seq == tp->rcv_nxt);
705 ND6_HINT(tp);
706
707 #if INET6
708 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
709
710 KERNEL_DEBUG(DBG_LAYER_BEG,
711 ((tp->t_inpcb->inp_fport << 16) | tp->t_inpcb->inp_lport),
712 (((tp->t_inpcb->in6p_laddr.s6_addr16[0] & 0xffff) << 16) |
713 (tp->t_inpcb->in6p_faddr.s6_addr16[0] & 0xffff)),
714 0,0,0);
715 }
716 else
717 #endif
718 {
719 KERNEL_DEBUG(DBG_LAYER_BEG,
720 ((tp->t_inpcb->inp_fport << 16) | tp->t_inpcb->inp_lport),
721 (((tp->t_inpcb->inp_laddr.s_addr & 0xffff) << 16) |
722 (tp->t_inpcb->inp_faddr.s_addr & 0xffff)),
723 0,0,0);
724 }
725 if (dowakeup)
726 sorwakeup(so); /* done with socket lock held */
727 return (flags);
728
729 }
730
731 /*
732 * Reduce congestion window.
733 */
734 static void
735 tcp_reduce_congestion_window(
736 struct tcpcb *tp)
737 {
738 /*
739 * If the current tcp cc module has
740 * defined a hook for tasks to run
741 * before entering FR, call it
742 */
743 if (CC_ALGO(tp)->pre_fr != NULL)
744 CC_ALGO(tp)->pre_fr(tp);
745 ENTER_FASTRECOVERY(tp);
746 tp->snd_recover = tp->snd_max;
747 tp->t_timer[TCPT_REXMT] = 0;
748 tp->t_rtttime = 0;
749 tp->ecn_flags |= TE_SENDCWR;
750 tp->snd_cwnd = tp->snd_ssthresh +
751 tp->t_maxseg * tcprexmtthresh;
752 }
753
754
755 /*
756 * TCP input routine, follows pages 65-76 of the
757 * protocol specification dated September, 1981 very closely.
758 */
759 #if INET6
760 int
761 tcp6_input(struct mbuf **mp, int *offp, int proto)
762 {
763 #pragma unused(proto)
764 register struct mbuf *m = *mp;
765 struct in6_ifaddr *ia6;
766 struct ifnet *ifp = ((m->m_flags & M_PKTHDR) && m->m_pkthdr.rcvif != NULL) ? m->m_pkthdr.rcvif: NULL;
767
768 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), return IPPROTO_DONE);
769
770 /* Expect 32-bit aligned data pointer on strict-align platforms */
771 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
772
773 /*
774 * draft-itojun-ipv6-tcp-to-anycast
775 * better place to put this in?
776 */
777 ia6 = ip6_getdstifaddr(m);
778 if (ia6 != NULL) {
779 IFA_LOCK_SPIN(&ia6->ia_ifa);
780 if (ia6->ia6_flags & IN6_IFF_ANYCAST) {
781 struct ip6_hdr *ip6;
782
783 IFA_UNLOCK(&ia6->ia_ifa);
784 IFA_REMREF(&ia6->ia_ifa);
785 ip6 = mtod(m, struct ip6_hdr *);
786 icmp6_error(m, ICMP6_DST_UNREACH,
787 ICMP6_DST_UNREACH_ADDR,
788 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
789
790 if (ifp != NULL && ifp->if_tcp_stat != NULL)
791 atomic_add_64(&ifp->if_tcp_stat->icmp6unreach, 1);
792
793 return (IPPROTO_DONE);
794 }
795 IFA_UNLOCK(&ia6->ia_ifa);
796 IFA_REMREF(&ia6->ia_ifa);
797 }
798
799 tcp_input(m, *offp);
800 return (IPPROTO_DONE);
801 }
802 #endif
803
804 /* Depending on the usage of mbuf space in the system, this function
805 * will return true or false. This is used to determine if a socket
806 * buffer can take more memory from the system for auto-tuning or not.
807 */
808 u_int8_t
809 tcp_cansbgrow(struct sockbuf *sb)
810 {
811 /* Calculate the host level space limit in terms of MSIZE buffers.
812 * We can use a maximum of half of the available mbuf space for
813 * socket buffers.
814 */
815 u_int32_t mblim = ((nmbclusters >> 1) << (MCLSHIFT - MSIZESHIFT));
816
817 /* Calculate per sb limit in terms of bytes. We optimize this limit
818 * for upto 16 socket buffers.
819 */
820
821 u_int32_t sbspacelim = ((nmbclusters >> 4) << MCLSHIFT);
822
823 if ((total_sbmb_cnt < mblim) &&
824 (sb->sb_hiwat < sbspacelim)) {
825 return(1);
826 }
827 return(0);
828 }
829
830 void
831 tcp_sbrcv_reserve(struct tcpcb *tp,
832 struct sockbuf *sbrcv,
833 u_int32_t newsize,
834 u_int32_t idealsize) {
835
836 /* newsize should not exceed max */
837 newsize = min(newsize, tcp_autorcvbuf_max);
838
839 /* The receive window scale negotiated at the
840 * beginning of the connection will also set a
841 * limit on the socket buffer size
842 */
843 newsize = min(newsize, TCP_MAXWIN << tp->rcv_scale);
844
845 /* Set new socket buffer size */
846 if (newsize > sbrcv->sb_hiwat &&
847 (sbreserve(sbrcv, newsize) == 1)) {
848 sbrcv->sb_idealsize = min(max(sbrcv->sb_idealsize,
849 (idealsize != 0) ? idealsize : newsize),
850 tcp_autorcvbuf_max);
851
852 /* Again check the limit set by the advertised
853 * window scale
854 */
855 sbrcv->sb_idealsize = min(sbrcv->sb_idealsize,
856 TCP_MAXWIN << tp->rcv_scale);
857 }
858 }
859
860 /*
861 * This function is used to grow a receive socket buffer. It
862 * will take into account system-level memory usage and the
863 * bandwidth available on the link to make a decision.
864 */
865 static void
866 tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sbrcv,
867 struct tcpopt *to, u_int32_t pktlen) {
868
869 if (tcp_do_autorcvbuf == 0 ||
870 (sbrcv->sb_flags & SB_AUTOSIZE) == 0 ||
871 tcp_cansbgrow(sbrcv) == 0 ||
872 sbrcv->sb_hiwat >= tcp_autorcvbuf_max) {
873 /* Can not resize the socket buffer, just return */
874 goto out;
875 }
876
877 if (TSTMP_GT(tcp_now,
878 tp->rfbuf_ts + TCPTV_RCVBUFIDLE)) {
879 /* If there has been an idle period in the
880 * connection, just restart the measurement
881 */
882 goto out;
883 }
884
885 if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP)) !=
886 (TF_REQ_TSTMP | TF_RCVD_TSTMP)) {
887 /*
888 * Timestamp option is not supported on this connection.
889 * If the connection reached a state to indicate that
890 * the receive socket buffer needs to grow, increase
891 * the high water mark.
892 */
893 if (TSTMP_GEQ(tcp_now,
894 tp->rfbuf_ts + TCPTV_RCVNOTS_QUANTUM)) {
895 if (tp->rfbuf_cnt >= TCP_RCVNOTS_BYTELEVEL) {
896 tcp_sbrcv_reserve(tp, sbrcv,
897 tcp_autorcvbuf_max, 0);
898 }
899 goto out;
900 } else {
901 tp->rfbuf_cnt += pktlen;
902 return;
903 }
904 } else if (to->to_tsecr != 0) {
905 /* If the timestamp shows that one RTT has
906 * completed, we can stop counting the
907 * bytes. Here we consider increasing
908 * the socket buffer if it fits the following
909 * criteria:
910 * 1. the bandwidth measured in last rtt, is more
911 * than half of sb_hiwat, this will help to scale the
912 * buffer according to the bandwidth on the link.
913 * 2. the space left in sbrcv is less than
914 * one forth of the bandwidth measured in last rtt, this
915 * will help to accommodate an application reading slowly.
916 */
917 if (TSTMP_GEQ(to->to_tsecr, tp->rfbuf_ts)) {
918 if ((tp->rfbuf_cnt > (sbrcv->sb_hiwat -
919 (sbrcv->sb_hiwat >> tcp_rbuf_hiwat_shift)) ||
920 (sbrcv->sb_hiwat - sbrcv->sb_cc) <
921 (tp->rfbuf_cnt >> tcp_rbuf_win_shift))) {
922 u_int32_t rcvbuf_inc;
923 /*
924 * Increment the receive window by a multiple of
925 * maximum sized segments. This will prevent a
926 * connection from sending smaller segments on
927 * wire if it is limited by the receive window.
928 *
929 * Set the ideal size based on current bandwidth
930 * measurements. We set the ideal size on receive
931 * socket buffer to be twice the bandwidth delay
932 * product.
933 */
934 rcvbuf_inc = tp->t_maxseg << tcp_autorcvbuf_inc_shift;
935 tcp_sbrcv_reserve(tp, sbrcv,
936 sbrcv->sb_hiwat + rcvbuf_inc,
937 (tp->rfbuf_cnt * 2));
938 }
939 goto out;
940 } else {
941 tp->rfbuf_cnt += pktlen;
942 return;
943 }
944 }
945 out:
946 /* Restart the measurement */
947 tp->rfbuf_ts = 0;
948 tp->rfbuf_cnt = 0;
949 return;
950 }
951
952 /* This function will trim the excess space added to the socket buffer
953 * to help a slow-reading app. The ideal-size of a socket buffer depends
954 * on the link bandwidth or it is set by an application and we aim to
955 * reach that size.
956 */
957 void
958 tcp_sbrcv_trim(struct tcpcb *tp, struct sockbuf *sbrcv) {
959 if (tcp_do_autorcvbuf == 1 && sbrcv->sb_idealsize > 0 &&
960 sbrcv->sb_hiwat > sbrcv->sb_idealsize) {
961 int32_t trim;
962 /* compute the difference between ideal and current sizes */
963 u_int32_t diff = sbrcv->sb_hiwat - sbrcv->sb_idealsize;
964
965 /* Compute the maximum advertised window for
966 * this connection.
967 */
968 u_int32_t advwin = tp->rcv_adv - tp->rcv_nxt;
969
970 /* How much can we trim the receive socket buffer?
971 * 1. it can not be trimmed beyond the max rcv win advertised
972 * 2. if possible, leave 1/16 of bandwidth*delay to
973 * avoid closing the win completely
974 */
975 u_int32_t leave = max(advwin, (sbrcv->sb_idealsize >> 4));
976
977 /* Sometimes leave can be zero, in that case leave at least
978 * a few segments worth of space.
979 */
980 if (leave == 0)
981 leave = tp->t_maxseg << tcp_autorcvbuf_inc_shift;
982
983 trim = sbrcv->sb_hiwat - (sbrcv->sb_cc + leave);
984 trim = imin(trim, (int32_t)diff);
985
986 if (trim > 0)
987 sbreserve(sbrcv, (sbrcv->sb_hiwat - trim));
988 }
989 }
990
991 /* We may need to trim the send socket buffer size for two reasons:
992 * 1. if the rtt seen on the connection is climbing up, we do not
993 * want to fill the buffers any more.
994 * 2. if the congestion win on the socket backed off, there is no need
995 * to hold more mbufs for that connection than what the cwnd will allow.
996 */
997 void
998 tcp_sbsnd_trim(struct sockbuf *sbsnd) {
999 if (tcp_do_autosendbuf == 1 &&
1000 ((sbsnd->sb_flags & (SB_AUTOSIZE | SB_TRIM)) ==
1001 (SB_AUTOSIZE | SB_TRIM)) &&
1002 (sbsnd->sb_idealsize > 0) &&
1003 (sbsnd->sb_hiwat > sbsnd->sb_idealsize)) {
1004 u_int32_t trim = 0;
1005 if (sbsnd->sb_cc <= sbsnd->sb_idealsize) {
1006 trim = sbsnd->sb_hiwat - sbsnd->sb_idealsize;
1007 } else {
1008 trim = sbsnd->sb_hiwat - sbsnd->sb_cc;
1009 }
1010 sbreserve(sbsnd, (sbsnd->sb_hiwat - trim));
1011 }
1012 if (sbsnd->sb_hiwat <= sbsnd->sb_idealsize)
1013 sbsnd->sb_flags &= ~(SB_TRIM);
1014 }
1015
1016 /*
1017 * If timestamp option was not negotiated on this connection
1018 * and this connection is on the receiving side of a stream
1019 * then we can not measure the delay on the link accurately.
1020 * Instead of enabling automatic receive socket buffer
1021 * resizing, just give more space to the receive socket buffer.
1022 */
1023 static inline void
1024 tcp_sbrcv_tstmp_check(struct tcpcb *tp) {
1025 struct socket *so = tp->t_inpcb->inp_socket;
1026 u_int32_t newsize = 2 * tcp_recvspace;
1027 struct sockbuf *sbrcv = &so->so_rcv;
1028
1029 if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP)) !=
1030 (TF_REQ_TSTMP | TF_RCVD_TSTMP) &&
1031 (sbrcv->sb_flags & SB_AUTOSIZE) != 0) {
1032 tcp_sbrcv_reserve(tp, sbrcv, newsize, 0);
1033 }
1034 }
1035
1036 /* A receiver will evaluate the flow of packets on a connection
1037 * to see if it can reduce ack traffic. The receiver will start
1038 * stretching acks if all of the following conditions are met:
1039 * 1. tcp_delack_enabled is set to 3
1040 * 2. If the bytes received in the last 100ms is greater than a threshold
1041 * defined by maxseg_unacked
1042 * 3. If the connection has not been idle for tcp_maxrcvidle period.
1043 * 4. If the connection has seen enough packets to let the slow-start
1044 * finish after connection establishment or after some packet loss.
1045 *
1046 * The receiver will stop stretching acks if there is congestion/reordering
1047 * as indicated by packets on reassembly queue or an ECN. If the delayed-ack
1048 * timer fires while stretching acks, it means that the packet flow has gone
1049 * below the threshold defined by maxseg_unacked and the receiver will stop
1050 * stretching acks. The receiver gets no indication when slow-start is completed
1051 * or when the connection reaches an idle state. That is why we use
1052 * tcp_rcvsspktcnt to cover slow-start and tcp_maxrcvidle to identify idle
1053 * state.
1054 */
1055 static inline int
1056 tcp_stretch_ack_enable(struct tcpcb *tp) {
1057 if (tp->rcv_by_unackwin >= (maxseg_unacked * tp->t_maxseg) &&
1058 TSTMP_GT(tp->rcv_unackwin + tcp_maxrcvidle, tcp_now) &&
1059 (((tp->t_flagsext & TF_RCVUNACK_WAITSS) == 0) ||
1060 (tp->rcv_waitforss >= tcp_rcvsspktcnt))) {
1061 return(1);
1062 }
1063
1064 return(0);
1065 }
1066
1067 /* Reset the state related to stretch-ack algorithm. This will make
1068 * the receiver generate an ack every other packet. The receiver
1069 * will start re-evaluating the rate at which packets come to decide
1070 * if it can benefit by lowering the ack traffic.
1071 */
1072 void
1073 tcp_reset_stretch_ack(struct tcpcb *tp)
1074 {
1075 tp->t_flags &= ~(TF_STRETCHACK);
1076 tp->rcv_by_unackwin = 0;
1077 tp->rcv_unackwin = tcp_now + tcp_rcvunackwin;
1078 }
1079
1080 void
1081 tcp_input(m, off0)
1082 struct mbuf *m;
1083 int off0;
1084 {
1085 register struct tcphdr *th;
1086 register struct ip *ip = NULL;
1087 register struct ipovly *ipov;
1088 register struct inpcb *inp;
1089 u_char *optp = NULL;
1090 int optlen = 0;
1091 int len, tlen, off;
1092 int drop_hdrlen;
1093 register struct tcpcb *tp = 0;
1094 register int thflags;
1095 struct socket *so = 0;
1096 int todrop, acked, ourfinisacked, needoutput = 0;
1097 struct in_addr laddr;
1098 #if INET6
1099 struct in6_addr laddr6;
1100 #endif
1101 int dropsocket = 0;
1102 int iss = 0, nosock = 0;
1103 u_int32_t tiwin;
1104 struct tcpopt to; /* options in this segment */
1105 struct sockaddr_in *next_hop = NULL;
1106 #if TCPDEBUG
1107 short ostate = 0;
1108 #endif
1109 struct m_tag *fwd_tag;
1110 u_char ip_ecn = IPTOS_ECN_NOTECT;
1111 unsigned int ifscope, nocell = 0;
1112 uint8_t isconnected, isdisconnected;
1113 struct ifnet *ifp = ((m->m_flags & M_PKTHDR) && m->m_pkthdr.rcvif != NULL) ? m->m_pkthdr.rcvif: NULL;
1114 int nlropkts = m->m_pkthdr.lro_npkts;
1115 int mauxf_sw_lro_pkt = (m->m_pkthdr.aux_flags & MAUXF_SW_LRO_PKT) ? 1 : 0;
1116 int turnoff_lro = 0;
1117 #define TCP_INC_VAR(stat, npkts) do { \
1118 if (mauxf_sw_lro_pkt) { \
1119 stat += npkts; \
1120 } else { \
1121 stat++; \
1122 } \
1123 } while (0)
1124
1125 TCP_INC_VAR(tcpstat.tcps_rcvtotal, nlropkts);
1126
1127 /* Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. */
1128 if (!SLIST_EMPTY(&m->m_pkthdr.tags)) {
1129 fwd_tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
1130 KERNEL_TAG_TYPE_IPFORWARD, NULL);
1131 } else {
1132 fwd_tag = NULL;
1133 }
1134 if (fwd_tag != NULL) {
1135 struct ip_fwd_tag *ipfwd_tag = (struct ip_fwd_tag *)(fwd_tag+1);
1136
1137 next_hop = ipfwd_tag->next_hop;
1138 m_tag_delete(m, fwd_tag);
1139 }
1140
1141 #if INET6
1142 struct ip6_hdr *ip6 = NULL;
1143 int isipv6;
1144 #endif /* INET6 */
1145 int rstreason; /* For badport_bandlim accounting purposes */
1146 struct proc *proc0=current_proc();
1147
1148 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_START,0,0,0,0,0);
1149
1150 #if INET6
1151 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
1152 #endif
1153 bzero((char *)&to, sizeof(to));
1154
1155 #if INET6
1156 if (isipv6) {
1157 /* Expect 32-bit aligned data pointer on strict-align platforms */
1158 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
1159
1160 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */
1161 ip6 = mtod(m, struct ip6_hdr *);
1162 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
1163 th = (struct tcphdr *)(void *)((caddr_t)ip6 + off0);
1164
1165 if ((apple_hwcksum_rx != 0) && (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)) {
1166 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
1167 th->th_sum = m->m_pkthdr.csum_data;
1168 else {
1169 /*
1170 * There is no established protocol for the case
1171 * where IPv6 psuedoheader checksum is not computed
1172 * with our current drivers. Current drivers set
1173 * CSUM_PSEUDO_HDR. So if we do get here, we should
1174 * recalculate checksum.
1175 */
1176 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) {
1177 th->th_sum = 0;
1178 } else {
1179 th->th_sum = 0xffff;
1180 }
1181 }
1182
1183 th->th_sum ^= 0xffff;
1184 if (th->th_sum) {
1185 tcpstat.tcps_rcvbadsum++;
1186
1187 if (ifp != NULL && ifp->if_tcp_stat != NULL)
1188 atomic_add_64(&ifp->if_tcp_stat->badformat, 1);
1189
1190 goto dropnosock;
1191 }
1192 }
1193 else {
1194 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) {
1195 tcpstat.tcps_rcvbadsum++;
1196
1197 if (ifp != NULL && ifp->if_tcp_stat != NULL)
1198 atomic_add_64(&ifp->if_tcp_stat->badformat, 1);
1199
1200 goto dropnosock;
1201 }
1202 }
1203
1204 KERNEL_DEBUG(DBG_LAYER_BEG, ((th->th_dport << 16) | th->th_sport),
1205 (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])),
1206 th->th_seq, th->th_ack, th->th_win);
1207 /*
1208 * Be proactive about unspecified IPv6 address in source.
1209 * As we use all-zero to indicate unbounded/unconnected pcb,
1210 * unspecified IPv6 address can be used to confuse us.
1211 *
1212 * Note that packets with unspecified IPv6 destination is
1213 * already dropped in ip6_input.
1214 */
1215 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
1216 /* XXX stat */
1217
1218 if (ifp != NULL && ifp->if_tcp_stat != NULL)
1219 atomic_add_64(&ifp->if_tcp_stat->unspecv6, 1);
1220
1221 goto dropnosock;
1222 }
1223 DTRACE_TCP5(receive, sruct mbuf *, m, struct inpcb *, NULL,
1224 struct ip6_hdr *, ip6, struct tcpcb *, NULL,
1225 struct tcphdr *, th);
1226
1227 ip_ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
1228 } else
1229 #endif /* INET6 */
1230 {
1231 /*
1232 * Get IP and TCP header together in first mbuf.
1233 * Note: IP leaves IP header in first mbuf.
1234 */
1235 if (off0 > sizeof (struct ip)) {
1236 ip_stripoptions(m, (struct mbuf *)0);
1237 off0 = sizeof(struct ip);
1238 if (m->m_pkthdr.csum_flags & CSUM_TCP_SUM16)
1239 m->m_pkthdr.csum_flags = 0; /* invalidate hwcksuming */
1240
1241 }
1242 if (m->m_len < sizeof (struct tcpiphdr)) {
1243 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) == 0) {
1244 tcpstat.tcps_rcvshort++;
1245 return;
1246 }
1247 }
1248
1249 /* Expect 32-bit aligned data pointer on strict-align platforms */
1250 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
1251
1252 ip = mtod(m, struct ip *);
1253 ipov = (struct ipovly *)ip;
1254 th = (struct tcphdr *)(void *)((caddr_t)ip + off0);
1255 tlen = ip->ip_len;
1256
1257 if (m->m_pkthdr.aux_flags & MAUXF_SW_LRO_DID_CSUM) {
1258 goto skip_checksum;
1259 }
1260 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
1261 if (m->m_pkthdr.csum_flags & CSUM_TCP_SUM16) {
1262 u_short pseudo;
1263 char b[9];
1264
1265 bcopy(ipov->ih_x1, b, sizeof (ipov->ih_x1));
1266 bzero(ipov->ih_x1, sizeof (ipov->ih_x1));
1267 ipov->ih_len = (u_short)tlen;
1268 #if BYTE_ORDER != BIG_ENDIAN
1269 HTONS(ipov->ih_len);
1270 #endif
1271 pseudo = in_cksum(m, sizeof (struct ip));
1272 bcopy(b, ipov->ih_x1, sizeof (ipov->ih_x1));
1273
1274 th->th_sum = in_addword(pseudo, (m->m_pkthdr.csum_data & 0xFFFF));
1275 } else {
1276 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
1277 th->th_sum = m->m_pkthdr.csum_data;
1278 else
1279 th->th_sum = in_pseudo(ip->ip_src.s_addr,
1280 ip->ip_dst.s_addr, htonl(m->m_pkthdr.csum_data +
1281 ip->ip_len + IPPROTO_TCP));
1282 }
1283 th->th_sum ^= 0xffff;
1284 } else {
1285 char b[9];
1286 /*
1287 * Checksum extended TCP header and data.
1288 */
1289 bcopy(ipov->ih_x1, b, sizeof (ipov->ih_x1));
1290 bzero(ipov->ih_x1, sizeof (ipov->ih_x1));
1291 ipov->ih_len = (u_short)tlen;
1292 #if BYTE_ORDER != BIG_ENDIAN
1293 HTONS(ipov->ih_len);
1294 #endif
1295 len = sizeof (struct ip) + tlen;
1296 th->th_sum = in_cksum(m, len);
1297 bcopy(b, ipov->ih_x1, sizeof (ipov->ih_x1));
1298
1299 tcp_in_cksum_stats(len);
1300 }
1301 if (th->th_sum) {
1302 tcpstat.tcps_rcvbadsum++;
1303
1304 if (ifp != NULL && ifp->if_tcp_stat != NULL)
1305 atomic_add_64(&ifp->if_tcp_stat->badformat, 1);
1306 if (lrodebug) printf("tcp_input: bad xsum len = %d, tlen = %d, flags = %x, csum_flags = %x.\n",len, tlen, m->m_flags, m->m_pkthdr.csum_flags);
1307 goto dropnosock;
1308 }
1309 skip_checksum:
1310 #if INET6
1311 /* Re-initialization for later version check */
1312 ip->ip_v = IPVERSION;
1313 #endif
1314 ip_ecn = (ip->ip_tos & IPTOS_ECN_MASK);
1315
1316 DTRACE_TCP5(receive, struct mbuf *, m, struct inpcb *, NULL,
1317 struct ip *, ip, struct tcpcb *, NULL, struct tcphdr *, th);
1318
1319 KERNEL_DEBUG(DBG_LAYER_BEG, ((th->th_dport << 16) | th->th_sport),
1320 (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)),
1321 th->th_seq, th->th_ack, th->th_win);
1322
1323 }
1324
1325 /*
1326 * Check that TCP offset makes sense,
1327 * pull out TCP options and adjust length. XXX
1328 */
1329 off = th->th_off << 2;
1330 if (off < sizeof (struct tcphdr) || off > tlen) {
1331 tcpstat.tcps_rcvbadoff++;
1332
1333 if (ifp != NULL && ifp->if_tcp_stat != NULL)
1334 atomic_add_64(&ifp->if_tcp_stat->badformat, 1);
1335
1336 goto dropnosock;
1337 }
1338 tlen -= off; /* tlen is used instead of ti->ti_len */
1339 if (off > sizeof (struct tcphdr)) {
1340 #if INET6
1341 if (isipv6) {
1342 IP6_EXTHDR_CHECK(m, off0, off, return);
1343 ip6 = mtod(m, struct ip6_hdr *);
1344 th = (struct tcphdr *)(void *)((caddr_t)ip6 + off0);
1345 } else
1346 #endif /* INET6 */
1347 {
1348 if (m->m_len < sizeof(struct ip) + off) {
1349 if ((m = m_pullup(m, sizeof (struct ip) + off)) == 0) {
1350 tcpstat.tcps_rcvshort++;
1351 return;
1352 }
1353 ip = mtod(m, struct ip *);
1354 ipov = (struct ipovly *)ip;
1355 th = (struct tcphdr *)(void *)((caddr_t)ip + off0);
1356 }
1357 }
1358 optlen = off - sizeof (struct tcphdr);
1359 optp = (u_char *)(th + 1);
1360 /*
1361 * Do quick retrieval of timestamp options ("options
1362 * prediction?"). If timestamp is the only option and it's
1363 * formatted as recommended in RFC 1323 appendix A, we
1364 * quickly get the values now and not bother calling
1365 * tcp_dooptions(), etc.
1366 */
1367 if ((optlen == TCPOLEN_TSTAMP_APPA ||
1368 (optlen > TCPOLEN_TSTAMP_APPA &&
1369 optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) &&
1370 *(u_int32_t *)(void *)optp == htonl(TCPOPT_TSTAMP_HDR) &&
1371 (th->th_flags & TH_SYN) == 0) {
1372 to.to_flags |= TOF_TS;
1373 to.to_tsval = ntohl(*(u_int32_t *)(void *)(optp + 4));
1374 to.to_tsecr = ntohl(*(u_int32_t *)(void *)(optp + 8));
1375 optp = NULL; /* we've parsed the options */
1376 }
1377 }
1378 thflags = th->th_flags;
1379
1380 #if TCP_DROP_SYNFIN
1381 /*
1382 * If the drop_synfin option is enabled, drop all packets with
1383 * both the SYN and FIN bits set. This prevents e.g. nmap from
1384 * identifying the TCP/IP stack.
1385 *
1386 * This is a violation of the TCP specification.
1387 */
1388 if (drop_synfin && (thflags & (TH_SYN|TH_FIN)) == (TH_SYN|TH_FIN)) {
1389
1390 if (ifp != NULL && ifp->if_tcp_stat != NULL)
1391 atomic_add_64(&ifp->if_tcp_stat->synfin, 1);
1392
1393 goto dropnosock;
1394 }
1395 #endif
1396
1397 /*
1398 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options,
1399 * until after ip6_savecontrol() is called and before other functions
1400 * which don't want those proto headers.
1401 * Because ip6_savecontrol() is going to parse the mbuf to
1402 * search for data to be passed up to user-land, it wants mbuf
1403 * parameters to be unchanged.
1404 */
1405 drop_hdrlen = off0 + off;
1406
1407 /* Since this is an entry point for input processing of tcp packets, we
1408 * can update the tcp clock here.
1409 */
1410 calculate_tcp_clock();
1411
1412 /*
1413 * Record the interface where this segment arrived on; this does not
1414 * affect normal data output (for non-detached TCP) as it provides a
1415 * hint about which route and interface to use for sending in the
1416 * absence of a PCB, when scoped routing (and thus source interface
1417 * selection) are enabled.
1418 */
1419 if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.rcvif != NULL)
1420 ifscope = m->m_pkthdr.rcvif->if_index;
1421 else
1422 ifscope = IFSCOPE_NONE;
1423
1424 /*
1425 * Convert TCP protocol specific fields to host format.
1426 */
1427
1428 #if BYTE_ORDER != BIG_ENDIAN
1429 NTOHL(th->th_seq);
1430 NTOHL(th->th_ack);
1431 NTOHS(th->th_win);
1432 NTOHS(th->th_urp);
1433 #endif
1434
1435 /*
1436 * Locate pcb for segment.
1437 */
1438 findpcb:
1439
1440 isconnected = FALSE;
1441 isdisconnected = FALSE;
1442
1443 #if IPFIREWALL_FORWARD
1444 if (next_hop != NULL
1445 #if INET6
1446 && isipv6 == 0 /* IPv6 support is not yet */
1447 #endif /* INET6 */
1448 ) {
1449 /*
1450 * Diverted. Pretend to be the destination.
1451 * already got one like this?
1452 */
1453 inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport,
1454 ip->ip_dst, th->th_dport, 0, m->m_pkthdr.rcvif);
1455 if (!inp) {
1456 /*
1457 * No, then it's new. Try find the ambushing socket
1458 */
1459 if (!next_hop->sin_port) {
1460 inp = in_pcblookup_hash(&tcbinfo, ip->ip_src,
1461 th->th_sport, next_hop->sin_addr,
1462 th->th_dport, 1, m->m_pkthdr.rcvif);
1463 } else {
1464 inp = in_pcblookup_hash(&tcbinfo,
1465 ip->ip_src, th->th_sport,
1466 next_hop->sin_addr,
1467 ntohs(next_hop->sin_port), 1,
1468 m->m_pkthdr.rcvif);
1469 }
1470 }
1471 } else
1472 #endif /* IPFIREWALL_FORWARD */
1473 {
1474 #if INET6
1475 if (isipv6)
1476 inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_src, th->th_sport,
1477 &ip6->ip6_dst, th->th_dport, 1,
1478 m->m_pkthdr.rcvif);
1479 else
1480 #endif /* INET6 */
1481 inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport,
1482 ip->ip_dst, th->th_dport, 1, m->m_pkthdr.rcvif);
1483 }
1484
1485 /*
1486 * Use the interface scope information from the PCB for outbound
1487 * segments. If the PCB isn't present and if scoped routing is
1488 * enabled, tcp_respond will use the scope of the interface where
1489 * the segment arrived on.
1490 */
1491 if (inp != NULL && (inp->inp_flags & INP_BOUND_IF))
1492 ifscope = inp->inp_boundifp->if_index;
1493
1494 /*
1495 * If the PCB is present and the socket isn't allowed to use
1496 * the cellular interface, indicate it as such for tcp_respond.
1497 */
1498 if (inp != NULL && (inp->inp_flags & INP_NO_IFT_CELLULAR))
1499 nocell = 1;
1500
1501 #if IPSEC
1502 if (ipsec_bypass == 0) {
1503 #if INET6
1504 if (isipv6) {
1505 if (inp != NULL && ipsec6_in_reject_so(m, inp->inp_socket)) {
1506 IPSEC_STAT_INCREMENT(ipsec6stat.in_polvio);
1507 if (in_pcb_checkstate(inp, WNT_RELEASE, 0) == WNT_STOPUSING)
1508 inp = NULL; // pretend we didn't find it
1509
1510 if (ifp != NULL && ifp->if_tcp_stat != NULL)
1511 atomic_add_64(&ifp->if_tcp_stat->badformatipsec, 1);
1512
1513 goto dropnosock;
1514 }
1515 } else
1516 #endif /* INET6 */
1517 if (inp != NULL && ipsec4_in_reject_so(m, inp->inp_socket)) {
1518 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
1519 if (in_pcb_checkstate(inp, WNT_RELEASE, 0) == WNT_STOPUSING)
1520 inp = NULL; // pretend we didn't find it
1521
1522 if (ifp != NULL && ifp->if_tcp_stat != NULL)
1523 atomic_add_64(&ifp->if_tcp_stat->badformatipsec, 1);
1524
1525 goto dropnosock;
1526 }
1527 }
1528 #endif /*IPSEC*/
1529
1530 /*
1531 * If the state is CLOSED (i.e., TCB does not exist) then
1532 * all data in the incoming segment is discarded.
1533 * If the TCB exists but is in CLOSED state, it is embryonic,
1534 * but should either do a listen or a connect soon.
1535 */
1536 if (inp == NULL) {
1537 if (log_in_vain) {
1538 #if INET6
1539 char dbuf[MAX_IPv6_STR_LEN], sbuf[MAX_IPv6_STR_LEN];
1540 #else /* INET6 */
1541 char dbuf[MAX_IPv4_STR_LEN], sbuf[MAX_IPv4_STR_LEN];
1542 #endif /* INET6 */
1543
1544 #if INET6
1545 if (isipv6) {
1546 inet_ntop(AF_INET6, &ip6->ip6_dst, dbuf, sizeof(dbuf));
1547 inet_ntop(AF_INET6, &ip6->ip6_src, sbuf, sizeof(sbuf));
1548 } else
1549 #endif
1550 {
1551 inet_ntop(AF_INET, &ip->ip_dst, dbuf, sizeof(dbuf));
1552 inet_ntop(AF_INET, &ip->ip_src, sbuf, sizeof(sbuf));
1553 }
1554 switch (log_in_vain) {
1555 case 1:
1556 if(thflags & TH_SYN)
1557 log(LOG_INFO,
1558 "Connection attempt to TCP %s:%d from %s:%d\n",
1559 dbuf, ntohs(th->th_dport),
1560 sbuf,
1561 ntohs(th->th_sport));
1562 break;
1563 case 2:
1564 log(LOG_INFO,
1565 "Connection attempt to TCP %s:%d from %s:%d flags:0x%x\n",
1566 dbuf, ntohs(th->th_dport), sbuf,
1567 ntohs(th->th_sport), thflags);
1568 break;
1569 case 3:
1570 if ((thflags & TH_SYN) && !(thflags & TH_ACK) &&
1571 !(m->m_flags & (M_BCAST | M_MCAST)) &&
1572 #if INET6
1573 ((isipv6 && !IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) ||
1574 (!isipv6 && ip->ip_dst.s_addr != ip->ip_src.s_addr))
1575 #else
1576 ip->ip_dst.s_addr != ip->ip_src.s_addr
1577 #endif
1578 )
1579 log_in_vain_log((LOG_INFO,
1580 "Stealth Mode connection attempt to TCP %s:%d from %s:%d\n",
1581 dbuf, ntohs(th->th_dport),
1582 sbuf,
1583 ntohs(th->th_sport)));
1584 break;
1585 default:
1586 break;
1587 }
1588 }
1589 if (blackhole) {
1590 if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type != IFT_LOOP)
1591
1592 switch (blackhole) {
1593 case 1:
1594 if (thflags & TH_SYN)
1595 goto dropnosock;
1596 break;
1597 case 2:
1598 goto dropnosock;
1599 default:
1600 goto dropnosock;
1601 }
1602 }
1603 rstreason = BANDLIM_RST_CLOSEDPORT;
1604
1605 if (ifp != NULL && ifp->if_tcp_stat != NULL)
1606 atomic_add_64(&ifp->if_tcp_stat->noconnnolist, 1);
1607
1608 goto dropwithresetnosock;
1609 }
1610 so = inp->inp_socket;
1611 if (so == NULL) {
1612 /* This case shouldn't happen as the socket shouldn't be null
1613 * if inp_state isn't set to INPCB_STATE_DEAD
1614 * But just in case, we pretend we didn't find the socket if we hit this case
1615 * as this isn't cause for a panic (the socket might be leaked however)...
1616 */
1617 inp = NULL;
1618 #if TEMPDEBUG
1619 printf("tcp_input: no more socket for inp=%x. This shouldn't happen\n", inp);
1620 #endif
1621 goto dropnosock;
1622 }
1623
1624 tcp_lock(so, 1, 0);
1625 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
1626 tcp_unlock(so, 1, (void *)2);
1627 inp = NULL; // pretend we didn't find it
1628 goto dropnosock;
1629 }
1630
1631 tp = intotcpcb(inp);
1632 if (tp == 0) {
1633 rstreason = BANDLIM_RST_CLOSEDPORT;
1634
1635 if (ifp != NULL && ifp->if_tcp_stat != NULL)
1636 atomic_add_64(&ifp->if_tcp_stat->noconnlist, 1);
1637
1638 goto dropwithreset;
1639 }
1640 if (tp->t_state == TCPS_CLOSED)
1641 goto drop;
1642
1643 /* Unscale the window into a 32-bit value. */
1644 if ((thflags & TH_SYN) == 0)
1645 tiwin = th->th_win << tp->snd_scale;
1646 else
1647 tiwin = th->th_win;
1648
1649 #if CONFIG_MACF_NET
1650 if (mac_inpcb_check_deliver(inp, m, AF_INET, SOCK_STREAM))
1651 goto drop;
1652 #endif
1653
1654 /* Radar 7377561: Avoid processing packets while closing a listen socket */
1655 if (tp->t_state == TCPS_LISTEN && (so->so_options & SO_ACCEPTCONN) == 0)
1656 goto drop;
1657
1658 if (so->so_options & (SO_DEBUG|SO_ACCEPTCONN)) {
1659 #if TCPDEBUG
1660 if (so->so_options & SO_DEBUG) {
1661 ostate = tp->t_state;
1662 #if INET6
1663 if (isipv6)
1664 bcopy((char *)ip6, (char *)tcp_saveipgen,
1665 sizeof(*ip6));
1666 else
1667 #endif /* INET6 */
1668 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
1669 tcp_savetcp = *th;
1670 }
1671 #endif
1672 if (so->so_options & SO_ACCEPTCONN) {
1673 register struct tcpcb *tp0 = tp;
1674 struct socket *so2;
1675 struct socket *oso;
1676 struct sockaddr_storage from;
1677 #if INET6
1678 struct inpcb *oinp = sotoinpcb(so);
1679 #endif /* INET6 */
1680 struct ifnet *head_ifscope;
1681 unsigned int head_nocell, head_recvanyif;
1682
1683 /* Get listener's bound-to-interface, if any */
1684 head_ifscope = (inp->inp_flags & INP_BOUND_IF) ?
1685 inp->inp_boundifp : NULL;
1686 /* Get listener's no-cellular information, if any */
1687 head_nocell = (inp->inp_flags & INP_NO_IFT_CELLULAR) ? 1 : 0;
1688 /* Get listener's recv-any-interface, if any */
1689 head_recvanyif = (inp->inp_flags & INP_RECV_ANYIF);
1690
1691 /*
1692 * If the state is LISTEN then ignore segment if it contains an RST.
1693 * If the segment contains an ACK then it is bad and send a RST.
1694 * If it does not contain a SYN then it is not interesting; drop it.
1695 * If it is from this socket, drop it, it must be forged.
1696 */
1697 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) != TH_SYN) {
1698
1699 if (ifp != NULL && ifp->if_tcp_stat != NULL)
1700 atomic_add_64(&ifp->if_tcp_stat->listbadsyn, 1);
1701
1702 if (thflags & TH_RST) {
1703 goto drop;
1704 }
1705 if (thflags & TH_ACK) {
1706 tp = NULL;
1707 tcpstat.tcps_badsyn++;
1708 rstreason = BANDLIM_RST_OPENPORT;
1709 goto dropwithreset;
1710 }
1711
1712 /* We come here if there is no SYN set */
1713 tcpstat.tcps_badsyn++;
1714 goto drop;
1715 }
1716 KERNEL_DEBUG(DBG_FNC_TCP_NEWCONN | DBG_FUNC_START,0,0,0,0,0);
1717 if (th->th_dport == th->th_sport) {
1718 #if INET6
1719 if (isipv6) {
1720 if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
1721 &ip6->ip6_src))
1722 goto drop;
1723 } else
1724 #endif /* INET6 */
1725 if (ip->ip_dst.s_addr == ip->ip_src.s_addr)
1726 goto drop;
1727 }
1728 /*
1729 * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN
1730 * in_broadcast() should never return true on a received
1731 * packet with M_BCAST not set.
1732 *
1733 * Packets with a multicast source address should also
1734 * be discarded.
1735 */
1736 if (m->m_flags & (M_BCAST|M_MCAST))
1737 goto drop;
1738 #if INET6
1739 if (isipv6) {
1740 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1741 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
1742 goto drop;
1743 } else
1744 #endif
1745 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
1746 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
1747 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
1748 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
1749 goto drop;
1750
1751
1752 #if INET6
1753 /*
1754 * If deprecated address is forbidden,
1755 * we do not accept SYN to deprecated interface
1756 * address to prevent any new inbound connection from
1757 * getting established.
1758 * When we do not accept SYN, we send a TCP RST,
1759 * with deprecated source address (instead of dropping
1760 * it). We compromise it as it is much better for peer
1761 * to send a RST, and RST will be the final packet
1762 * for the exchange.
1763 *
1764 * If we do not forbid deprecated addresses, we accept
1765 * the SYN packet. RFC2462 does not suggest dropping
1766 * SYN in this case.
1767 * If we decipher RFC2462 5.5.4, it says like this:
1768 * 1. use of deprecated addr with existing
1769 * communication is okay - "SHOULD continue to be
1770 * used"
1771 * 2. use of it with new communication:
1772 * (2a) "SHOULD NOT be used if alternate address
1773 * with sufficient scope is available"
1774 * (2b) nothing mentioned otherwise.
1775 * Here we fall into (2b) case as we have no choice in
1776 * our source address selection - we must obey the peer.
1777 *
1778 * The wording in RFC2462 is confusing, and there are
1779 * multiple description text for deprecated address
1780 * handling - worse, they are not exactly the same.
1781 * I believe 5.5.4 is the best one, so we follow 5.5.4.
1782 */
1783 if (isipv6 && !ip6_use_deprecated) {
1784 struct in6_ifaddr *ia6;
1785
1786 ia6 = ip6_getdstifaddr(m);
1787 if (ia6 != NULL) {
1788 IFA_LOCK_SPIN(&ia6->ia_ifa);
1789 if (ia6->ia6_flags & IN6_IFF_DEPRECATED) {
1790 IFA_UNLOCK(&ia6->ia_ifa);
1791 IFA_REMREF(&ia6->ia_ifa);
1792 tp = NULL;
1793 rstreason = BANDLIM_RST_OPENPORT;
1794
1795 if (ifp != NULL && ifp->if_tcp_stat != NULL)
1796 atomic_add_64(&ifp->if_tcp_stat->deprecate6, 1);
1797
1798 goto dropwithreset;
1799 }
1800 IFA_UNLOCK(&ia6->ia_ifa);
1801 IFA_REMREF(&ia6->ia_ifa);
1802 }
1803 }
1804 #endif
1805 if (so->so_filt) {
1806 #if INET6
1807 if (isipv6) {
1808 struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)&from;
1809
1810 sin6->sin6_len = sizeof(*sin6);
1811 sin6->sin6_family = AF_INET6;
1812 sin6->sin6_port = th->th_sport;
1813 sin6->sin6_flowinfo = 0;
1814 sin6->sin6_addr = ip6->ip6_src;
1815 sin6->sin6_scope_id = 0;
1816 }
1817 else
1818 #endif
1819 {
1820 struct sockaddr_in *sin = (struct sockaddr_in*)&from;
1821
1822 sin->sin_len = sizeof(*sin);
1823 sin->sin_family = AF_INET;
1824 sin->sin_port = th->th_sport;
1825 sin->sin_addr = ip->ip_src;
1826 }
1827 so2 = sonewconn(so, 0, (struct sockaddr*)&from);
1828 } else {
1829 so2 = sonewconn(so, 0, NULL);
1830 }
1831 if (so2 == 0) {
1832 tcpstat.tcps_listendrop++;
1833 if (tcp_dropdropablreq(so)) {
1834 if (so->so_filt)
1835 so2 = sonewconn(so, 0, (struct sockaddr*)&from);
1836 else
1837 so2 = sonewconn(so, 0, NULL);
1838 }
1839 if (!so2)
1840 goto drop;
1841 }
1842
1843 /* Point "inp" and "tp" in tandem to new socket */
1844 inp = (struct inpcb *)so2->so_pcb;
1845 tp = intotcpcb(inp);
1846
1847 oso = so;
1848 tcp_unlock(so, 0, 0); /* Unlock but keep a reference on listener for now */
1849
1850 so = so2;
1851 tcp_lock(so, 1, 0);
1852 /*
1853 * Mark socket as temporary until we're
1854 * committed to keeping it. The code at
1855 * ``drop'' and ``dropwithreset'' check the
1856 * flag dropsocket to see if the temporary
1857 * socket created here should be discarded.
1858 * We mark the socket as discardable until
1859 * we're committed to it below in TCPS_LISTEN.
1860 * There are some error conditions in which we
1861 * have to drop the temporary socket.
1862 */
1863 dropsocket++;
1864 /*
1865 * Inherit INP_BOUND_IF from listener; testing if
1866 * head_ifscope is non-NULL is sufficient, since it
1867 * can only be set to a non-zero value earlier if
1868 * the listener has such a flag set.
1869 */
1870 if (head_ifscope != NULL) {
1871 inp->inp_flags |= INP_BOUND_IF;
1872 inp->inp_boundifp = head_ifscope;
1873 } else {
1874 inp->inp_flags &= ~INP_BOUND_IF;
1875 }
1876 /*
1877 * Inherit INP_NO_IFT_CELLULAR from listener.
1878 */
1879 if (head_nocell) {
1880 inp->inp_flags |= INP_NO_IFT_CELLULAR;
1881 }
1882 /*
1883 * Inherit {IN,IN6}_RECV_ANYIF from listener.
1884 */
1885 if (head_recvanyif)
1886 inp->inp_flags |= INP_RECV_ANYIF;
1887 else
1888 inp->inp_flags &= ~INP_RECV_ANYIF;
1889 #if INET6
1890 if (isipv6)
1891 inp->in6p_laddr = ip6->ip6_dst;
1892 else {
1893 inp->inp_vflag &= ~INP_IPV6;
1894 inp->inp_vflag |= INP_IPV4;
1895 #endif /* INET6 */
1896 inp->inp_laddr = ip->ip_dst;
1897 #if INET6
1898 }
1899 #endif /* INET6 */
1900 inp->inp_lport = th->th_dport;
1901 if (in_pcbinshash(inp, 0) != 0) {
1902 /*
1903 * Undo the assignments above if we failed to
1904 * put the PCB on the hash lists.
1905 */
1906 #if INET6
1907 if (isipv6)
1908 inp->in6p_laddr = in6addr_any;
1909 else
1910 #endif /* INET6 */
1911 inp->inp_laddr.s_addr = INADDR_ANY;
1912 inp->inp_lport = 0;
1913 tcp_lock(oso, 0, 0); /* release ref on parent */
1914 tcp_unlock(oso, 1, 0);
1915 goto drop;
1916 }
1917 #if INET6
1918 if (isipv6) {
1919 /*
1920 * Inherit socket options from the listening
1921 * socket.
1922 * Note that in6p_inputopts are not (even
1923 * should not be) copied, since it stores
1924 * previously received options and is used to
1925 * detect if each new option is different than
1926 * the previous one and hence should be passed
1927 * to a user.
1928 * If we copied in6p_inputopts, a user would
1929 * not be able to receive options just after
1930 * calling the accept system call.
1931 */
1932 inp->inp_flags |=
1933 oinp->inp_flags & INP_CONTROLOPTS;
1934 if (oinp->in6p_outputopts)
1935 inp->in6p_outputopts =
1936 ip6_copypktopts(oinp->in6p_outputopts,
1937 M_NOWAIT);
1938 } else
1939 #endif /* INET6 */
1940 inp->inp_options = ip_srcroute();
1941 tcp_lock(oso, 0, 0);
1942 #if IPSEC
1943 /* copy old policy into new socket's */
1944 if (sotoinpcb(oso)->inp_sp)
1945 {
1946 int error = 0;
1947 /* Is it a security hole here to silently fail to copy the policy? */
1948 if (inp->inp_sp != NULL)
1949 error = ipsec_init_policy(so, &inp->inp_sp);
1950 if (error != 0 || ipsec_copy_policy(sotoinpcb(oso)->inp_sp, inp->inp_sp))
1951 printf("tcp_input: could not copy policy\n");
1952 }
1953 #endif
1954 /* inherit states from the listener */
1955 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
1956 struct tcpcb *, tp, int32_t, TCPS_LISTEN);
1957 tp->t_state = TCPS_LISTEN;
1958 tp->t_flags |= tp0->t_flags & (TF_NOPUSH|TF_NOOPT|TF_NODELAY);
1959 tp->t_flagsext |= (tp0->t_flagsext & TF_RXTFINDROP);
1960 tp->t_keepinit = tp0->t_keepinit;
1961 tp->t_inpcb->inp_ip_ttl = tp0->t_inpcb->inp_ip_ttl;
1962 if ((so->so_flags & SOF_NOTSENT_LOWAT) != 0)
1963 tp->t_notsent_lowat = tp0->t_notsent_lowat;
1964
1965 /* now drop the reference on the listener */
1966 tcp_unlock(oso, 1, 0);
1967
1968 tcp_set_max_rwinscale(tp, so);
1969
1970 KERNEL_DEBUG(DBG_FNC_TCP_NEWCONN | DBG_FUNC_END,0,0,0,0,0);
1971 }
1972 }
1973 lck_mtx_assert(&((struct inpcb *)so->so_pcb)->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
1974
1975 /*
1976 * Radar 3529618
1977 * This is the second part of the MSS DoS prevention code (after
1978 * minmss on the sending side) and it deals with too many too small
1979 * tcp packets in a too short timeframe (1 second).
1980 *
1981 * For every full second we count the number of received packets
1982 * and bytes. If we get a lot of packets per second for this connection
1983 * (tcp_minmssoverload) we take a closer look at it and compute the
1984 * average packet size for the past second. If that is less than
1985 * tcp_minmss we get too many packets with very small payload which
1986 * is not good and burdens our system (and every packet generates
1987 * a wakeup to the process connected to our socket). We can reasonable
1988 * expect this to be small packet DoS attack to exhaust our CPU
1989 * cycles.
1990 *
1991 * Care has to be taken for the minimum packet overload value. This
1992 * value defines the minimum number of packets per second before we
1993 * start to worry. This must not be too low to avoid killing for
1994 * example interactive connections with many small packets like
1995 * telnet or SSH.
1996 *
1997 * Setting either tcp_minmssoverload or tcp_minmss to "0" disables
1998 * this check.
1999 *
2000 * Account for packet if payload packet, skip over ACK, etc.
2001 */
2002 if (tp->t_state == TCPS_ESTABLISHED && tlen > 0) {
2003 if (TSTMP_GT(tp->rcv_reset, tcp_now)) {
2004 tp->rcv_pps++;
2005 tp->rcv_byps += tlen + off;
2006 if (tp->rcv_byps > tp->rcv_maxbyps)
2007 tp->rcv_maxbyps = tp->rcv_byps;
2008 /*
2009 * Setting either tcp_minmssoverload or tcp_minmss to "0" disables
2010 * the check.
2011 */
2012 if (tcp_minmss && tcp_minmssoverload && tp->rcv_pps > tcp_minmssoverload) {
2013 if ((tp->rcv_byps / tp->rcv_pps) < tcp_minmss) {
2014 char ipstrbuf[MAX_IPv6_STR_LEN];
2015 printf("too many small tcp packets from "
2016 "%s:%u, av. %ubyte/packet, "
2017 "dropping connection\n",
2018 #if INET6
2019 isipv6 ?
2020 inet_ntop(AF_INET6, &inp->in6p_faddr, ipstrbuf,
2021 sizeof(ipstrbuf)) :
2022 #endif
2023 inet_ntop(AF_INET, &inp->inp_faddr, ipstrbuf,
2024 sizeof(ipstrbuf)),
2025 inp->inp_fport,
2026 tp->rcv_byps / tp->rcv_pps);
2027 tp = tcp_drop(tp, ECONNRESET);
2028 /* tcpstat.tcps_minmssdrops++; */
2029 goto drop;
2030 }
2031 }
2032 } else {
2033 tp->rcv_reset = tcp_now + TCP_RETRANSHZ;
2034 tp->rcv_pps = 1;
2035 tp->rcv_byps = tlen + off;
2036 }
2037
2038 /* Evaluate the rate of arrival of packets to see if the
2039 * receiver can reduce the ack traffic. The algorithm to
2040 * stretch acks will be enabled if the connection meets
2041 * certain criteria defined in tcp_stretch_ack_enable function.
2042 */
2043 if ((tp->t_flagsext & TF_RCVUNACK_WAITSS) != 0) {
2044 TCP_INC_VAR(tp->rcv_waitforss, nlropkts);
2045 }
2046 if (tcp_stretch_ack_enable(tp)) {
2047 tp->t_flags |= TF_STRETCHACK;
2048 tp->t_flagsext &= ~(TF_RCVUNACK_WAITSS);
2049 tp->rcv_waitforss = 0;
2050 } else {
2051 tp->t_flags &= ~(TF_STRETCHACK);
2052 }
2053 if (TSTMP_GT(tp->rcv_unackwin, tcp_now)) {
2054 tp->rcv_by_unackwin += (tlen + off);
2055 } else {
2056 tp->rcv_unackwin = tcp_now + tcp_rcvunackwin;
2057 tp->rcv_by_unackwin = tlen + off;
2058 }
2059 }
2060
2061 /*
2062 * Keep track of how many bytes were received in the LRO packet
2063 */
2064 if ((mauxf_sw_lro_pkt) && (nlropkts > 2)) {
2065 tp->t_lropktlen += tlen;
2066 }
2067 /*
2068 Explicit Congestion Notification - Flag that we need to send ECT if
2069 + The IP Congestion experienced flag was set.
2070 + Socket is in established state
2071 + We negotiated ECN in the TCP setup
2072 + This isn't a pure ack (tlen > 0)
2073 + The data is in the valid window
2074
2075 TE_SENDECE will be cleared when we receive a packet with TH_CWR set.
2076 */
2077 if (ip_ecn == IPTOS_ECN_CE && tp->t_state == TCPS_ESTABLISHED &&
2078 ((tp->ecn_flags & (TE_ECN_ON)) == (TE_ECN_ON)) && tlen > 0 &&
2079 SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2080 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
2081 tp->ecn_flags |= TE_SENDECE;
2082 }
2083
2084 /*
2085 Clear TE_SENDECE if TH_CWR is set. This is harmless, so we don't
2086 bother doing extensive checks for state and whatnot.
2087 */
2088 if ((thflags & TH_CWR) == TH_CWR) {
2089 tp->ecn_flags &= ~TE_SENDECE;
2090 }
2091
2092 /* If we received an explicit notification of congestion in
2093 * ip tos ecn bits or by the CWR bit in TCP header flags, reset
2094 * the ack-strteching state.
2095 */
2096 if (tp->t_state == TCPS_ESTABLISHED && (tp->t_flags & TF_STRETCHACK) != 0 &&
2097 ((ip_ecn == IPTOS_ECN_CE) || ((thflags & TH_CWR) == TH_CWR)))
2098 tcp_reset_stretch_ack(tp);
2099
2100 /*
2101 * Try to determine if we are receiving a packet after a long time.
2102 * Use our own approximation of idletime to roughly measure remote
2103 * end's idle time. Since slowstart is used after an idle period
2104 * we want to avoid doing LRO if the remote end is not up to date
2105 * on initial window support and starts with 1 or 2 packets as its IW.
2106 */
2107 if (sw_lro && (tp->t_flagsext & TF_LRO_OFFLOADED) &&
2108 ((tcp_now - tp->t_rcvtime) >= (TCP_IDLETIMEOUT(tp)))) {
2109 turnoff_lro = 1;
2110 }
2111
2112 /*
2113 * Segment received on connection.
2114 * Reset idle time and keep-alive timer.
2115 */
2116 tp->t_rcvtime = tcp_now;
2117 if (TCPS_HAVEESTABLISHED(tp->t_state))
2118 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, TCP_KEEPIDLE(tp));
2119
2120 /*
2121 * Process options if not in LISTEN state,
2122 * else do it below (after getting remote address).
2123 */
2124 if (tp->t_state != TCPS_LISTEN && optp)
2125 tcp_dooptions(tp, optp, optlen, th, &to, ifscope);
2126
2127 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
2128 if (to.to_flags & TOF_SCALE) {
2129 tp->t_flags |= TF_RCVD_SCALE;
2130 tp->requested_s_scale = to.to_requested_s_scale;
2131 tp->snd_wnd = th->th_win << tp->snd_scale;
2132 tiwin = tp->snd_wnd;
2133 }
2134 if (to.to_flags & TOF_TS) {
2135 tp->t_flags |= TF_RCVD_TSTMP;
2136 tp->ts_recent = to.to_tsval;
2137 tp->ts_recent_age = tcp_now;
2138 }
2139 if (to.to_flags & TOF_MSS)
2140 tcp_mss(tp, to.to_mss, ifscope);
2141 if (tp->sack_enable) {
2142 if (!(to.to_flags & TOF_SACK))
2143 tp->sack_enable = 0;
2144 else
2145 tp->t_flags |= TF_SACK_PERMIT;
2146 }
2147 }
2148
2149 #if TRAFFIC_MGT
2150 /* Compute inter-packet arrival jitter. According to RFC 3550, inter-packet
2151 * arrival jitter is defined as the difference in packet spacing at the
2152 * receiver compared to the sender for a pair of packets. When two packets
2153 * of maximum segment size come one after the other with consecutive
2154 * sequence numbers, we consider them as packets sent together at the
2155 * sender and use them as a pair to compute inter-packet arrival jitter.
2156 * This metric indicates the delay induced by the network components due
2157 * to queuing in edge/access routers.
2158 */
2159 if (tp->t_state == TCPS_ESTABLISHED &&
2160 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK|TH_ECE|TH_PUSH)) == TH_ACK &&
2161 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
2162 ((to.to_flags & TOF_TS) == 0 ||
2163 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) &&
2164 th->th_seq == tp->rcv_nxt &&
2165 LIST_EMPTY(&tp->t_segq)) {
2166 int seg_size = tlen;
2167 if (tp->iaj_pktcnt <= IAJ_IGNORE_PKTCNT) {
2168 TCP_INC_VAR(tp->iaj_pktcnt, nlropkts);
2169 }
2170
2171 if (m->m_pkthdr.aux_flags & MAUXF_SW_LRO_PKT) {
2172 seg_size = m->m_pkthdr.lro_pktlen;
2173 }
2174 if ( tp->iaj_size == 0 || seg_size > tp->iaj_size ||
2175 (seg_size == tp->iaj_size && tp->iaj_rcv_ts == 0)) {
2176 /* State related to inter-arrival jitter is uninitialized
2177 * or we are trying to find a good first packet to start
2178 * computing the metric
2179 */
2180 update_iaj_state(tp, seg_size, 0);
2181 } else {
2182 if (seg_size == tp->iaj_size) {
2183 /* Compute inter-arrival jitter taking this packet
2184 * as the second packet
2185 */
2186 compute_iaj(tp);
2187 }
2188 if (seg_size < tp->iaj_size) {
2189 /* There is a smaller packet in the stream.
2190 * Some times the maximum size supported on a path can
2191 * change if there is a new link with smaller MTU.
2192 * The receiver will not know about this change.
2193 * If there are too many packets smaller than iaj_size,
2194 * we try to learn the iaj_size again.
2195 */
2196 tp->iaj_small_pkt++;
2197 if (tp->iaj_small_pkt > RESET_IAJ_SIZE_THRESH) {
2198 update_iaj_state(tp, seg_size, 1);
2199 } else {
2200 CLEAR_IAJ_STATE(tp);
2201 }
2202 } else {
2203 update_iaj_state(tp, seg_size, 0);
2204 }
2205 }
2206 } else {
2207 CLEAR_IAJ_STATE(tp);
2208 }
2209 #endif /* TRAFFIC_MGT */
2210
2211 /*
2212 * Header prediction: check for the two common cases
2213 * of a uni-directional data xfer. If the packet has
2214 * no control flags, is in-sequence, the window didn't
2215 * change and we're not retransmitting, it's a
2216 * candidate. If the length is zero and the ack moved
2217 * forward, we're the sender side of the xfer. Just
2218 * free the data acked & wake any higher level process
2219 * that was blocked waiting for space. If the length
2220 * is non-zero and the ack didn't move, we're the
2221 * receiver side. If we're getting packets in-order
2222 * (the reassembly queue is empty), add the data to
2223 * the socket buffer and note that we need a delayed ack.
2224 * Make sure that the hidden state-flags are also off.
2225 * Since we check for TCPS_ESTABLISHED above, it can only
2226 * be TH_NEEDSYN.
2227 */
2228 if (tp->t_state == TCPS_ESTABLISHED &&
2229 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK|TH_ECE)) == TH_ACK &&
2230 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
2231 ((to.to_flags & TOF_TS) == 0 ||
2232 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) &&
2233 th->th_seq == tp->rcv_nxt &&
2234 tiwin && tiwin == tp->snd_wnd &&
2235 tp->snd_nxt == tp->snd_max) {
2236
2237 /*
2238 * If last ACK falls within this segment's sequence numbers,
2239 * record the timestamp.
2240 * NOTE that the test is modified according to the latest
2241 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
2242 */
2243 if ((to.to_flags & TOF_TS) != 0 &&
2244 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
2245 tp->ts_recent_age = tcp_now;
2246 tp->ts_recent = to.to_tsval;
2247 }
2248
2249 /* Force acknowledgment if we received a FIN */
2250
2251 if (thflags & TH_FIN)
2252 tp->t_flags |= TF_ACKNOW;
2253
2254 if (tlen == 0) {
2255 if (SEQ_GT(th->th_ack, tp->snd_una) &&
2256 SEQ_LEQ(th->th_ack, tp->snd_max) &&
2257 tp->snd_cwnd >= tp->snd_ssthresh &&
2258 (!IN_FASTRECOVERY(tp) &&
2259 ((!tp->sack_enable && tp->t_dupacks < tcprexmtthresh) ||
2260 (tp->sack_enable && to.to_nsacks == 0 &&
2261 TAILQ_EMPTY(&tp->snd_holes))))) {
2262 /*
2263 * this is a pure ack for outstanding data.
2264 */
2265 ++tcpstat.tcps_predack;
2266 /*
2267 * "bad retransmit" recovery
2268 */
2269 if (tp->t_rxtshift == 1 &&
2270 TSTMP_LT(tcp_now, tp->t_badrxtwin)) {
2271 ++tcpstat.tcps_sndrexmitbad;
2272 tp->snd_cwnd = tp->snd_cwnd_prev;
2273 tp->snd_ssthresh =
2274 tp->snd_ssthresh_prev;
2275 tp->snd_recover = tp->snd_recover_prev;
2276 if (tp->t_flags & TF_WASFRECOVERY)
2277 ENTER_FASTRECOVERY(tp);
2278 tp->snd_nxt = tp->snd_max;
2279 tp->t_badrxtwin = 0;
2280 tp->t_rxtshift = 0;
2281 tp->rxt_start = 0;
2282 tcp_bad_rexmt_fix_sndbuf(tp);
2283 DTRACE_TCP5(cc, void, NULL, struct inpcb *, tp->t_inpcb,
2284 struct tcpcb *, tp, struct tcphdr *, th,
2285 int32_t, TCP_CC_BAD_REXMT_RECOVERY);
2286 }
2287 /*
2288 * Recalculate the transmit timer / rtt.
2289 *
2290 * Some boxes send broken timestamp replies
2291 * during the SYN+ACK phase, ignore
2292 * timestamps of 0 or we could calculate a
2293 * huge RTT and blow up the retransmit timer.
2294 */
2295 if (((to.to_flags & TOF_TS) != 0) && (to.to_tsecr != 0) &&
2296 TSTMP_GEQ(tcp_now, to.to_tsecr)) {
2297 tcp_xmit_timer(tp,
2298 tcp_now - to.to_tsecr);
2299 } else if (tp->t_rtttime &&
2300 SEQ_GT(th->th_ack, tp->t_rtseq)) {
2301 tcp_xmit_timer(tp, tcp_now - tp->t_rtttime);
2302 }
2303 acked = th->th_ack - tp->snd_una;
2304 tcpstat.tcps_rcvackpack++;
2305 tcpstat.tcps_rcvackbyte += acked;
2306
2307 /* Handle an ack that is in sequence during congestion
2308 * avoidance phase. The calculations in this function
2309 * assume that snd_una is not updated yet.
2310 */
2311 if (CC_ALGO(tp)->inseq_ack_rcvd != NULL)
2312 CC_ALGO(tp)->inseq_ack_rcvd(tp, th);
2313
2314 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
2315 struct tcpcb *, tp, struct tcphdr *, th,
2316 int32_t, TCP_CC_INSEQ_ACK_RCVD);
2317
2318 sbdrop(&so->so_snd, acked);
2319 tcp_sbsnd_trim(&so->so_snd);
2320
2321 if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
2322 SEQ_LEQ(th->th_ack, tp->snd_recover))
2323 tp->snd_recover = th->th_ack - 1;
2324 tp->snd_una = th->th_ack;
2325
2326 /*
2327 * pull snd_wl2 up to prevent seq wrap relative
2328 * to th_ack.
2329 */
2330 tp->snd_wl2 = th->th_ack;
2331 tp->t_dupacks = 0;
2332 m_freem(m);
2333 ND6_HINT(tp); /* some progress has been done */
2334
2335 /*
2336 * If all outstanding data are acked, stop
2337 * retransmit timer, otherwise restart timer
2338 * using current (possibly backed-off) value.
2339 * If process is waiting for space,
2340 * wakeup/selwakeup/signal. If data
2341 * are ready to send, let tcp_output
2342 * decide between more output or persist.
2343 */
2344 if (tp->snd_una == tp->snd_max)
2345 tp->t_timer[TCPT_REXMT] = 0;
2346 else if (tp->t_timer[TCPT_PERSIST] == 0)
2347 tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
2348
2349 if ((tp->t_flagsext & TF_MEASURESNDBW) != 0 &&
2350 tp->t_bwmeas != NULL)
2351 tcp_bwmeas_check(tp);
2352 sowwakeup(so); /* has to be done with socket lock held */
2353 if ((so->so_snd.sb_cc) || (tp->t_flags & TF_ACKNOW)) {
2354 (void) tcp_output(tp);
2355 }
2356
2357 tcp_check_timer_state(tp);
2358 tcp_unlock(so, 1, 0);
2359 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0);
2360 return;
2361 }
2362 } else if (th->th_ack == tp->snd_una &&
2363 LIST_EMPTY(&tp->t_segq) &&
2364 tlen <= tcp_sbspace(tp)) {
2365 /*
2366 * this is a pure, in-sequence data packet
2367 * with nothing on the reassembly queue and
2368 * we have enough buffer space to take it.
2369 */
2370
2371 /*
2372 * If this is a connection in steady state, start
2373 * coalescing packets belonging to this flow.
2374 */
2375 if (turnoff_lro) {
2376 tcp_lro_remove_state(tp->t_inpcb->inp_laddr,
2377 tp->t_inpcb->inp_faddr,
2378 tp->t_inpcb->inp_lport,
2379 tp->t_inpcb->inp_fport);
2380 tp->t_flagsext &= ~TF_LRO_OFFLOADED;
2381 tp->t_idleat = tp->rcv_nxt;
2382 } else if (sw_lro && !mauxf_sw_lro_pkt && !isipv6 &&
2383 (so->so_flags & SOF_USELRO) &&
2384 (m->m_pkthdr.rcvif->if_type != IFT_CELLULAR) &&
2385 (m->m_pkthdr.rcvif->if_type != IFT_LOOP) &&
2386 ((th->th_seq - tp->irs) >
2387 (tp->t_maxseg << lro_start)) &&
2388 ((tp->t_idleat == 0) || ((th->th_seq -
2389 tp->t_idleat) > (tp->t_maxseg << lro_start)))) {
2390 tp->t_flagsext |= TF_LRO_OFFLOADED;
2391 tcp_start_coalescing(ip, th, tlen);
2392 tp->t_idleat = 0;
2393 }
2394
2395 /* Clean receiver SACK report if present */
2396 if (tp->sack_enable && tp->rcv_numsacks)
2397 tcp_clean_sackreport(tp);
2398 ++tcpstat.tcps_preddat;
2399 tp->rcv_nxt += tlen;
2400 /*
2401 * Pull snd_wl1 up to prevent seq wrap relative to
2402 * th_seq.
2403 */
2404 tp->snd_wl1 = th->th_seq;
2405 /*
2406 * Pull rcv_up up to prevent seq wrap relative to
2407 * rcv_nxt.
2408 */
2409 tp->rcv_up = tp->rcv_nxt;
2410 TCP_INC_VAR(tcpstat.tcps_rcvpack, nlropkts);
2411 tcpstat.tcps_rcvbyte += tlen;
2412 if (nstat_collect) {
2413 if (m->m_pkthdr.aux_flags & MAUXF_SW_LRO_PKT) {
2414 locked_add_64(&inp->inp_stat->rxpackets, m->m_pkthdr.lro_npkts);
2415 }
2416 else {
2417 locked_add_64(&inp->inp_stat->rxpackets, 1);
2418 }
2419 locked_add_64(&inp->inp_stat->rxbytes, tlen);
2420 }
2421 ND6_HINT(tp); /* some progress has been done */
2422
2423 tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen);
2424
2425 /*
2426 * Add data to socket buffer.
2427 */
2428 so_recv_data_stat(so, m, 0);
2429 m_adj(m, drop_hdrlen); /* delayed header drop */
2430 if (sbappendstream(&so->so_rcv, m))
2431 sorwakeup(so);
2432 #if INET6
2433 if (isipv6) {
2434 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
2435 (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])),
2436 th->th_seq, th->th_ack, th->th_win);
2437 }
2438 else
2439 #endif
2440 {
2441 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
2442 (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)),
2443 th->th_seq, th->th_ack, th->th_win);
2444 }
2445 TCP_INC_VAR(tp->t_unacksegs, nlropkts);
2446 if (DELAY_ACK(tp, th)) {
2447 if ((tp->t_flags & TF_DELACK) == 0) {
2448 tp->t_flags |= TF_DELACK;
2449 tp->t_timer[TCPT_DELACK] = OFFSET_FROM_START(tp, tcp_delack);
2450 }
2451 } else {
2452 tp->t_flags |= TF_ACKNOW;
2453 tcp_output(tp);
2454 }
2455 tcp_check_timer_state(tp);
2456 tcp_unlock(so, 1, 0);
2457 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0);
2458 return;
2459 }
2460 }
2461
2462 /*
2463 * Calculate amount of space in receive window,
2464 * and then do TCP input processing.
2465 * Receive window is amount of space in rcv queue,
2466 * but not less than advertised window.
2467 */
2468 lck_mtx_assert(&((struct inpcb *)so->so_pcb)->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
2469
2470 { int win;
2471
2472 win = tcp_sbspace(tp);
2473
2474 if (win < 0)
2475 win = 0;
2476 else { /* clip rcv window to 4K for modems */
2477 if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0)
2478 win = min(win, slowlink_wsize);
2479 }
2480 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
2481 }
2482
2483 switch (tp->t_state) {
2484
2485 /*
2486 * Initialize tp->rcv_nxt, and tp->irs, select an initial
2487 * tp->iss, and send a segment:
2488 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
2489 * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss.
2490 * Fill in remote peer address fields if not previously specified.
2491 * Enter SYN_RECEIVED state, and process any other fields of this
2492 * segment in this state.
2493 */
2494 case TCPS_LISTEN: {
2495 register struct sockaddr_in *sin;
2496 #if INET6
2497 register struct sockaddr_in6 *sin6;
2498 #endif
2499
2500 lck_mtx_assert(&((struct inpcb *)so->so_pcb)->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
2501 #if INET6
2502 if (isipv6) {
2503 MALLOC(sin6, struct sockaddr_in6 *, sizeof *sin6,
2504 M_SONAME, M_NOWAIT);
2505 if (sin6 == NULL)
2506 goto drop;
2507 bzero(sin6, sizeof(*sin6));
2508 sin6->sin6_family = AF_INET6;
2509 sin6->sin6_len = sizeof(*sin6);
2510 sin6->sin6_addr = ip6->ip6_src;
2511 sin6->sin6_port = th->th_sport;
2512 laddr6 = inp->in6p_laddr;
2513 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
2514 inp->in6p_laddr = ip6->ip6_dst;
2515 if (in6_pcbconnect(inp, (struct sockaddr *)sin6,
2516 proc0)) {
2517 inp->in6p_laddr = laddr6;
2518 FREE(sin6, M_SONAME);
2519 goto drop;
2520 }
2521 FREE(sin6, M_SONAME);
2522 } else
2523 #endif
2524 {
2525 lck_mtx_assert(&((struct inpcb *)so->so_pcb)->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
2526 MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME,
2527 M_NOWAIT);
2528 if (sin == NULL)
2529 goto drop;
2530 sin->sin_family = AF_INET;
2531 sin->sin_len = sizeof(*sin);
2532 sin->sin_addr = ip->ip_src;
2533 sin->sin_port = th->th_sport;
2534 bzero((caddr_t)sin->sin_zero, sizeof(sin->sin_zero));
2535 laddr = inp->inp_laddr;
2536 if (inp->inp_laddr.s_addr == INADDR_ANY)
2537 inp->inp_laddr = ip->ip_dst;
2538 if (in_pcbconnect(inp, (struct sockaddr *)sin, proc0, NULL)) {
2539 inp->inp_laddr = laddr;
2540 FREE(sin, M_SONAME);
2541 goto drop;
2542 }
2543 FREE(sin, M_SONAME);
2544 }
2545
2546 tcp_dooptions(tp, optp, optlen, th, &to, ifscope);
2547
2548 if (tp->sack_enable) {
2549 if (!(to.to_flags & TOF_SACK))
2550 tp->sack_enable = 0;
2551 else
2552 tp->t_flags |= TF_SACK_PERMIT;
2553 }
2554
2555 if (iss)
2556 tp->iss = iss;
2557 else {
2558 tp->iss = tcp_new_isn(tp);
2559 }
2560 tp->irs = th->th_seq;
2561 tcp_sendseqinit(tp);
2562 tcp_rcvseqinit(tp);
2563 tp->snd_recover = tp->snd_una;
2564 /*
2565 * Initialization of the tcpcb for transaction;
2566 * set SND.WND = SEG.WND,
2567 * initialize CCsend and CCrecv.
2568 */
2569 tp->snd_wnd = tiwin; /* initial send-window */
2570 tp->t_flags |= TF_ACKNOW;
2571 tp->t_unacksegs = 0;
2572 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
2573 struct tcpcb *, tp, int32_t, TCPS_SYN_RECEIVED);
2574 tp->t_state = TCPS_SYN_RECEIVED;
2575 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
2576 tp->t_keepinit ? tp->t_keepinit : tcp_keepinit);
2577 dropsocket = 0; /* committed to socket */
2578
2579 if (inp->inp_flowhash == 0)
2580 inp->inp_flowhash = inp_calc_flowhash(inp);
2581
2582 /* reset the incomp processing flag */
2583 so->so_flags &= ~(SOF_INCOMP_INPROGRESS);
2584 tcpstat.tcps_accepts++;
2585 if ((thflags & (TH_ECE | TH_CWR)) == (TH_ECE | TH_CWR)) {
2586 /* ECN-setup SYN */
2587 tp->ecn_flags |= (TE_SETUPRECEIVED | TE_SENDIPECT);
2588 }
2589
2590 #if CONFIG_IFEF_NOWINDOWSCALE
2591 if (tcp_obey_ifef_nowindowscale && m->m_pkthdr.rcvif != NULL &&
2592 (m->m_pkthdr.rcvif->if_eflags & IFEF_NOWINDOWSCALE)) {
2593 /* Window scaling is not enabled on this interface */
2594 tp->t_flags &= ~TF_REQ_SCALE;
2595 }
2596 #endif
2597 goto trimthenstep6;
2598 }
2599
2600 /*
2601 * If the state is SYN_RECEIVED:
2602 * if seg contains an ACK, but not for our SYN/ACK, send a RST.
2603 */
2604 case TCPS_SYN_RECEIVED:
2605 if ((thflags & TH_ACK) &&
2606 (SEQ_LEQ(th->th_ack, tp->snd_una) ||
2607 SEQ_GT(th->th_ack, tp->snd_max))) {
2608 rstreason = BANDLIM_RST_OPENPORT;
2609
2610 if (ifp != NULL && ifp->if_tcp_stat != NULL)
2611 atomic_add_64(&ifp->if_tcp_stat->ooopacket, 1);
2612
2613 goto dropwithreset;
2614 }
2615 break;
2616
2617 /*
2618 * If the state is SYN_SENT:
2619 * if seg contains an ACK, but not for our SYN, drop the input.
2620 * if seg contains a RST, then drop the connection.
2621 * if seg does not contain SYN, then drop it.
2622 * Otherwise this is an acceptable SYN segment
2623 * initialize tp->rcv_nxt and tp->irs
2624 * if seg contains ack then advance tp->snd_una
2625 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
2626 * arrange for segment to be acked (eventually)
2627 * continue processing rest of data/controls, beginning with URG
2628 */
2629 case TCPS_SYN_SENT:
2630 if ((thflags & TH_ACK) &&
2631 (SEQ_LEQ(th->th_ack, tp->iss) ||
2632 SEQ_GT(th->th_ack, tp->snd_max))) {
2633 rstreason = BANDLIM_UNLIMITED;
2634
2635 if (ifp != NULL && ifp->if_tcp_stat != NULL)
2636 atomic_add_64(&ifp->if_tcp_stat->ooopacket, 1);
2637
2638 goto dropwithreset;
2639 }
2640 if (thflags & TH_RST) {
2641 if ((thflags & TH_ACK) != 0) {
2642 soevent(so,
2643 (SO_FILT_HINT_LOCKED |
2644 SO_FILT_HINT_CONNRESET));
2645 tp = tcp_drop(tp, ECONNREFUSED);
2646 postevent(so, 0, EV_RESET);
2647 }
2648 goto drop;
2649 }
2650 if ((thflags & TH_SYN) == 0)
2651 goto drop;
2652 tp->snd_wnd = th->th_win; /* initial send window */
2653
2654 tp->irs = th->th_seq;
2655 tcp_rcvseqinit(tp);
2656 if (thflags & TH_ACK) {
2657 tcpstat.tcps_connects++;
2658
2659 if ((thflags & (TH_ECE | TH_CWR)) == (TH_ECE)) {
2660 /* ECN-setup SYN-ACK */
2661 tp->ecn_flags |= TE_SETUPRECEIVED;
2662 }
2663 else {
2664 /* non-ECN-setup SYN-ACK */
2665 tp->ecn_flags &= ~TE_SENDIPECT;
2666 }
2667
2668 #if CONFIG_MACF_NET && CONFIG_MACF_SOCKET
2669 /* XXXMAC: recursive lock: SOCK_LOCK(so); */
2670 mac_socketpeer_label_associate_mbuf(m, so);
2671 /* XXXMAC: SOCK_UNLOCK(so); */
2672 #endif
2673 /* Do window scaling on this connection? */
2674 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
2675 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
2676 tp->snd_scale = tp->requested_s_scale;
2677 tp->rcv_scale = tp->request_r_scale;
2678 }
2679 tp->rcv_adv += min(tp->rcv_wnd, TCP_MAXWIN << tp->rcv_scale);
2680 tp->snd_una++; /* SYN is acked */
2681 /*
2682 * If there's data, delay ACK; if there's also a FIN
2683 * ACKNOW will be turned on later.
2684 */
2685 TCP_INC_VAR(tp->t_unacksegs, nlropkts);
2686 if (DELAY_ACK(tp, th) && tlen != 0 ) {
2687 if ((tp->t_flags & TF_DELACK) == 0) {
2688 tp->t_flags |= TF_DELACK;
2689 tp->t_timer[TCPT_DELACK] = OFFSET_FROM_START(tp, tcp_delack);
2690 }
2691 }
2692 else {
2693 tp->t_flags |= TF_ACKNOW;
2694 }
2695 /*
2696 * Received <SYN,ACK> in SYN_SENT[*] state.
2697 * Transitions:
2698 * SYN_SENT --> ESTABLISHED
2699 * SYN_SENT* --> FIN_WAIT_1
2700 */
2701 tp->t_starttime = tcp_now;
2702 tcp_sbrcv_tstmp_check(tp);
2703 if (tp->t_flags & TF_NEEDFIN) {
2704 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
2705 struct tcpcb *, tp, int32_t, TCPS_FIN_WAIT_1);
2706 tp->t_state = TCPS_FIN_WAIT_1;
2707 tp->t_flags &= ~TF_NEEDFIN;
2708 thflags &= ~TH_SYN;
2709 } else {
2710 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
2711 struct tcpcb *, tp, int32_t, TCPS_ESTABLISHED);
2712 tp->t_state = TCPS_ESTABLISHED;
2713 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, TCP_KEEPIDLE(tp));
2714 if (nstat_collect)
2715 nstat_route_connect_success(tp->t_inpcb->inp_route.ro_rt);
2716 }
2717 isconnected = TRUE;
2718 } else {
2719 /*
2720 * Received initial SYN in SYN-SENT[*] state => simul-
2721 * taneous open. If segment contains CC option and there is
2722 * a cached CC, apply TAO test; if it succeeds, connection is
2723 * half-synchronized. Otherwise, do 3-way handshake:
2724 * SYN-SENT -> SYN-RECEIVED
2725 * SYN-SENT* -> SYN-RECEIVED*
2726 */
2727 tp->t_flags |= TF_ACKNOW;
2728 tp->t_timer[TCPT_REXMT] = 0;
2729 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
2730 struct tcpcb *, tp, int32_t, TCPS_SYN_RECEIVED);
2731 tp->t_state = TCPS_SYN_RECEIVED;
2732
2733 }
2734
2735 trimthenstep6:
2736 /*
2737 * Advance th->th_seq to correspond to first data byte.
2738 * If data, trim to stay within window,
2739 * dropping FIN if necessary.
2740 */
2741 th->th_seq++;
2742 if (tlen > tp->rcv_wnd) {
2743 todrop = tlen - tp->rcv_wnd;
2744 m_adj(m, -todrop);
2745 tlen = tp->rcv_wnd;
2746 thflags &= ~TH_FIN;
2747 tcpstat.tcps_rcvpackafterwin++;
2748 tcpstat.tcps_rcvbyteafterwin += todrop;
2749 }
2750 tp->snd_wl1 = th->th_seq - 1;
2751 tp->rcv_up = th->th_seq;
2752 /*
2753 * Client side of transaction: already sent SYN and data.
2754 * If the remote host used T/TCP to validate the SYN,
2755 * our data will be ACK'd; if so, enter normal data segment
2756 * processing in the middle of step 5, ack processing.
2757 * Otherwise, goto step 6.
2758 */
2759 if (thflags & TH_ACK)
2760 goto process_ACK;
2761 goto step6;
2762 /*
2763 * If the state is LAST_ACK or CLOSING or TIME_WAIT:
2764 * do normal processing.
2765 *
2766 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
2767 */
2768 case TCPS_LAST_ACK:
2769 case TCPS_CLOSING:
2770 case TCPS_TIME_WAIT:
2771 break; /* continue normal processing */
2772
2773 /* Received a SYN while connection is already established.
2774 * This is a "half open connection and other anomalies" described
2775 * in RFC793 page 34, send an ACK so the remote reset the connection
2776 * or recovers by adjusting its sequence numberering
2777 */
2778 case TCPS_ESTABLISHED:
2779 if (thflags & TH_SYN)
2780 goto dropafterack;
2781 break;
2782 }
2783
2784 /*
2785 * States other than LISTEN or SYN_SENT.
2786 * First check the RST flag and sequence number since reset segments
2787 * are exempt from the timestamp and connection count tests. This
2788 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
2789 * below which allowed reset segments in half the sequence space
2790 * to fall though and be processed (which gives forged reset
2791 * segments with a random sequence number a 50 percent chance of
2792 * killing a connection).
2793 * Then check timestamp, if present.
2794 * Then check the connection count, if present.
2795 * Then check that at least some bytes of segment are within
2796 * receive window. If segment begins before rcv_nxt,
2797 * drop leading data (and SYN); if nothing left, just ack.
2798 *
2799 *
2800 * If the RST bit is set, check the sequence number to see
2801 * if this is a valid reset segment.
2802 * RFC 793 page 37:
2803 * In all states except SYN-SENT, all reset (RST) segments
2804 * are validated by checking their SEQ-fields. A reset is
2805 * valid if its sequence number is in the window.
2806 * Note: this does not take into account delayed ACKs, so
2807 * we should test against last_ack_sent instead of rcv_nxt.
2808 * The sequence number in the reset segment is normally an
2809 * echo of our outgoing acknowlegement numbers, but some hosts
2810 * send a reset with the sequence number at the rightmost edge
2811 * of our receive window, and we have to handle this case.
2812 * Note 2: Paul Watson's paper "Slipping in the Window" has shown
2813 * that brute force RST attacks are possible. To combat this,
2814 * we use a much stricter check while in the ESTABLISHED state,
2815 * only accepting RSTs where the sequence number is equal to
2816 * last_ack_sent. In all other states (the states in which a
2817 * RST is more likely), the more permissive check is used.
2818 * If we have multiple segments in flight, the intial reset
2819 * segment sequence numbers will be to the left of last_ack_sent,
2820 * but they will eventually catch up.
2821 * In any case, it never made sense to trim reset segments to
2822 * fit the receive window since RFC 1122 says:
2823 * 4.2.2.12 RST Segment: RFC-793 Section 3.4
2824 *
2825 * A TCP SHOULD allow a received RST segment to include data.
2826 *
2827 * DISCUSSION
2828 * It has been suggested that a RST segment could contain
2829 * ASCII text that encoded and explained the cause of the
2830 * RST. No standard has yet been established for such
2831 * data.
2832 *
2833 * If the reset segment passes the sequence number test examine
2834 * the state:
2835 * SYN_RECEIVED STATE:
2836 * If passive open, return to LISTEN state.
2837 * If active open, inform user that connection was refused.
2838 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
2839 * Inform user that connection was reset, and close tcb.
2840 * CLOSING, LAST_ACK STATES:
2841 * Close the tcb.
2842 * TIME_WAIT STATE:
2843 * Drop the segment - see Stevens, vol. 2, p. 964 and
2844 * RFC 1337.
2845 *
2846 * Radar 4803931: Allows for the case where we ACKed the FIN but
2847 * there is already a RST in flight from the peer.
2848 * In that case, accept the RST for non-established
2849 * state if it's one off from last_ack_sent.
2850
2851 */
2852 if (thflags & TH_RST) {
2853 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) &&
2854 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) ||
2855 (tp->rcv_wnd == 0 &&
2856 ((tp->last_ack_sent == th->th_seq) || ((tp->last_ack_sent -1) == th->th_seq)))) {
2857 switch (tp->t_state) {
2858
2859 case TCPS_SYN_RECEIVED:
2860 if (ifp != NULL && ifp->if_tcp_stat != NULL)
2861 atomic_add_64(&ifp->if_tcp_stat->rstinsynrcv, 1);
2862 so->so_error = ECONNREFUSED;
2863 goto close;
2864
2865 case TCPS_ESTABLISHED:
2866 if (tp->last_ack_sent != th->th_seq) {
2867 tcpstat.tcps_badrst++;
2868 goto drop;
2869 }
2870 case TCPS_FIN_WAIT_1:
2871 case TCPS_CLOSE_WAIT:
2872 /*
2873 Drop through ...
2874 */
2875 case TCPS_FIN_WAIT_2:
2876 so->so_error = ECONNRESET;
2877 close:
2878 postevent(so, 0, EV_RESET);
2879 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
2880 struct tcpcb *, tp, int32_t, TCPS_CLOSED);
2881
2882 soevent(so,
2883 (SO_FILT_HINT_LOCKED |
2884 SO_FILT_HINT_CONNRESET));
2885
2886 tp->t_state = TCPS_CLOSED;
2887 tcpstat.tcps_drops++;
2888 tp = tcp_close(tp);
2889 break;
2890
2891 case TCPS_CLOSING:
2892 case TCPS_LAST_ACK:
2893 tp = tcp_close(tp);
2894 break;
2895
2896 case TCPS_TIME_WAIT:
2897 break;
2898 }
2899 }
2900 goto drop;
2901 }
2902
2903 /*
2904 * RFC 1323 PAWS: If we have a timestamp reply on this segment
2905 * and it's less than ts_recent, drop it.
2906 */
2907 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
2908 TSTMP_LT(to.to_tsval, tp->ts_recent)) {
2909
2910 /* Check to see if ts_recent is over 24 days old. */
2911 if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE) {
2912 /*
2913 * Invalidate ts_recent. If this segment updates
2914 * ts_recent, the age will be reset later and ts_recent
2915 * will get a valid value. If it does not, setting
2916 * ts_recent to zero will at least satisfy the
2917 * requirement that zero be placed in the timestamp
2918 * echo reply when ts_recent isn't valid. The
2919 * age isn't reset until we get a valid ts_recent
2920 * because we don't want out-of-order segments to be
2921 * dropped when ts_recent is old.
2922 */
2923 tp->ts_recent = 0;
2924 } else {
2925 tcpstat.tcps_rcvduppack++;
2926 tcpstat.tcps_rcvdupbyte += tlen;
2927 tcpstat.tcps_pawsdrop++;
2928 if (nstat_collect) {
2929 nstat_route_rx(tp->t_inpcb->inp_route.ro_rt, 1, tlen, NSTAT_RX_FLAG_DUPLICATE);
2930 locked_add_64(&inp->inp_stat->rxpackets, 1);
2931 locked_add_64(&inp->inp_stat->rxbytes, tlen);
2932 tp->t_stat.rxduplicatebytes += tlen;
2933 }
2934 if (tlen)
2935 goto dropafterack;
2936 goto drop;
2937 }
2938 }
2939
2940 /*
2941 * In the SYN-RECEIVED state, validate that the packet belongs to
2942 * this connection before trimming the data to fit the receive
2943 * window. Check the sequence number versus IRS since we know
2944 * the sequence numbers haven't wrapped. This is a partial fix
2945 * for the "LAND" DoS attack.
2946 */
2947 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
2948 rstreason = BANDLIM_RST_OPENPORT;
2949
2950 if (ifp != NULL && ifp->if_tcp_stat != NULL)
2951 atomic_add_64(&ifp->if_tcp_stat->dospacket, 1);
2952
2953 goto dropwithreset;
2954 }
2955
2956 todrop = tp->rcv_nxt - th->th_seq;
2957 if (todrop > 0) {
2958 if (thflags & TH_SYN) {
2959 thflags &= ~TH_SYN;
2960 th->th_seq++;
2961 if (th->th_urp > 1)
2962 th->th_urp--;
2963 else
2964 thflags &= ~TH_URG;
2965 todrop--;
2966 }
2967 /*
2968 * Following if statement from Stevens, vol. 2, p. 960.
2969 */
2970 if (todrop > tlen
2971 || (todrop == tlen && (thflags & TH_FIN) == 0)) {
2972 /*
2973 * Any valid FIN must be to the left of the window.
2974 * At this point the FIN must be a duplicate or out
2975 * of sequence; drop it.
2976 */
2977 thflags &= ~TH_FIN;
2978
2979 /*
2980 * Send an ACK to resynchronize and drop any data.
2981 * But keep on processing for RST or ACK.
2982 */
2983 tp->t_flags |= TF_ACKNOW;
2984 if (todrop == 1) {
2985 /* This could be a keepalive */
2986 soevent(so, SO_FILT_HINT_LOCKED |
2987 SO_FILT_HINT_KEEPALIVE);
2988 }
2989 todrop = tlen;
2990 tcpstat.tcps_rcvduppack++;
2991 tcpstat.tcps_rcvdupbyte += todrop;
2992 } else {
2993 tcpstat.tcps_rcvpartduppack++;
2994 tcpstat.tcps_rcvpartdupbyte += todrop;
2995 }
2996 if (nstat_collect) {
2997 nstat_route_rx(tp->t_inpcb->inp_route.ro_rt, 1, todrop, NSTAT_RX_FLAG_DUPLICATE);
2998 locked_add_64(&inp->inp_stat->rxpackets, 1);
2999 locked_add_64(&inp->inp_stat->rxbytes, todrop);
3000 tp->t_stat.rxduplicatebytes += todrop;
3001 }
3002 drop_hdrlen += todrop; /* drop from the top afterwards */
3003 th->th_seq += todrop;
3004 tlen -= todrop;
3005 if (th->th_urp > todrop)
3006 th->th_urp -= todrop;
3007 else {
3008 thflags &= ~TH_URG;
3009 th->th_urp = 0;
3010 }
3011 }
3012
3013 /*
3014 * If new data are received on a connection after the
3015 * user processes are gone, then RST the other end.
3016 */
3017 if ((so->so_state & SS_NOFDREF) &&
3018 tp->t_state > TCPS_CLOSE_WAIT && tlen) {
3019 tp = tcp_close(tp);
3020 tcpstat.tcps_rcvafterclose++;
3021 rstreason = BANDLIM_UNLIMITED;
3022
3023 if (ifp != NULL && ifp->if_tcp_stat != NULL)
3024 atomic_add_64(&ifp->if_tcp_stat->cleanup, 1);
3025
3026 goto dropwithreset;
3027 }
3028
3029 /*
3030 * If segment ends after window, drop trailing data
3031 * (and PUSH and FIN); if nothing left, just ACK.
3032 */
3033 todrop = (th->th_seq+tlen) - (tp->rcv_nxt+tp->rcv_wnd);
3034 if (todrop > 0) {
3035 tcpstat.tcps_rcvpackafterwin++;
3036 if (todrop >= tlen) {
3037 tcpstat.tcps_rcvbyteafterwin += tlen;
3038 /*
3039 * If a new connection request is received
3040 * while in TIME_WAIT, drop the old connection
3041 * and start over if the sequence numbers
3042 * are above the previous ones.
3043 */
3044 if (thflags & TH_SYN &&
3045 tp->t_state == TCPS_TIME_WAIT &&
3046 SEQ_GT(th->th_seq, tp->rcv_nxt)) {
3047 iss = tcp_new_isn(tp);
3048 tp = tcp_close(tp);
3049 tcp_unlock(so, 1, 0);
3050 goto findpcb;
3051 }
3052 /*
3053 * If window is closed can only take segments at
3054 * window edge, and have to drop data and PUSH from
3055 * incoming segments. Continue processing, but
3056 * remember to ack. Otherwise, drop segment
3057 * and ack.
3058 */
3059 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
3060 tp->t_flags |= TF_ACKNOW;
3061 tcpstat.tcps_rcvwinprobe++;
3062 } else
3063 goto dropafterack;
3064 } else
3065 tcpstat.tcps_rcvbyteafterwin += todrop;
3066 m_adj(m, -todrop);
3067 tlen -= todrop;
3068 thflags &= ~(TH_PUSH|TH_FIN);
3069 }
3070
3071 /*
3072 * If last ACK falls within this segment's sequence numbers,
3073 * record its timestamp.
3074 * NOTE:
3075 * 1) That the test incorporates suggestions from the latest
3076 * proposal of the tcplw@cray.com list (Braden 1993/04/26).
3077 * 2) That updating only on newer timestamps interferes with
3078 * our earlier PAWS tests, so this check should be solely
3079 * predicated on the sequence space of this segment.
3080 * 3) That we modify the segment boundary check to be
3081 * Last.ACK.Sent <= SEG.SEQ + SEG.Len
3082 * instead of RFC1323's
3083 * Last.ACK.Sent < SEG.SEQ + SEG.Len,
3084 * This modified check allows us to overcome RFC1323's
3085 * limitations as described in Stevens TCP/IP Illustrated
3086 * Vol. 2 p.869. In such cases, we can still calculate the
3087 * RTT correctly when RCV.NXT == Last.ACK.Sent.
3088 */
3089 if ((to.to_flags & TOF_TS) != 0 &&
3090 SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
3091 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
3092 ((thflags & (TH_SYN|TH_FIN)) != 0))) {
3093 tp->ts_recent_age = tcp_now;
3094 tp->ts_recent = to.to_tsval;
3095 }
3096
3097 /*
3098 * If a SYN is in the window, then this is an
3099 * error and we send an RST and drop the connection.
3100 */
3101 if (thflags & TH_SYN) {
3102 tp = tcp_drop(tp, ECONNRESET);
3103 rstreason = BANDLIM_UNLIMITED;
3104 postevent(so, 0, EV_RESET);
3105
3106 if (ifp != NULL && ifp->if_tcp_stat != NULL)
3107 atomic_add_64(&ifp->if_tcp_stat->synwindow, 1);
3108
3109 goto dropwithreset;
3110 }
3111
3112 /*
3113 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
3114 * flag is on (half-synchronized state), then queue data for
3115 * later processing; else drop segment and return.
3116 */
3117 if ((thflags & TH_ACK) == 0) {
3118 if (tp->t_state == TCPS_SYN_RECEIVED ||
3119 (tp->t_flags & TF_NEEDSYN))
3120 goto step6;
3121 else if (tp->t_flags & TF_ACKNOW)
3122 goto dropafterack;
3123 else
3124 goto drop;
3125 }
3126
3127 /*
3128 * Ack processing.
3129 */
3130 switch (tp->t_state) {
3131
3132 /*
3133 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
3134 * ESTABLISHED state and continue processing.
3135 * The ACK was checked above.
3136 */
3137 case TCPS_SYN_RECEIVED:
3138
3139 tcpstat.tcps_connects++;
3140
3141 /* Do window scaling? */
3142 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
3143 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
3144 tp->snd_scale = tp->requested_s_scale;
3145 tp->rcv_scale = tp->request_r_scale;
3146 tp->snd_wnd = th->th_win << tp->snd_scale;
3147 tiwin = tp->snd_wnd;
3148 }
3149 /*
3150 * Make transitions:
3151 * SYN-RECEIVED -> ESTABLISHED
3152 * SYN-RECEIVED* -> FIN-WAIT-1
3153 */
3154 tp->t_starttime = tcp_now;
3155 tcp_sbrcv_tstmp_check(tp);
3156 if (tp->t_flags & TF_NEEDFIN) {
3157 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
3158 struct tcpcb *, tp, int32_t, TCPS_FIN_WAIT_1);
3159 tp->t_state = TCPS_FIN_WAIT_1;
3160 tp->t_flags &= ~TF_NEEDFIN;
3161 } else {
3162 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
3163 struct tcpcb *, tp, int32_t, TCPS_ESTABLISHED);
3164 tp->t_state = TCPS_ESTABLISHED;
3165 tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, TCP_KEEPIDLE(tp));
3166 if (nstat_collect)
3167 nstat_route_connect_success(tp->t_inpcb->inp_route.ro_rt);
3168 }
3169 /*
3170 * If segment contains data or ACK, will call tcp_reass()
3171 * later; if not, do so now to pass queued data to user.
3172 */
3173 if (tlen == 0 && (thflags & TH_FIN) == 0)
3174 (void) tcp_reass(tp, (struct tcphdr *)0, &tlen,
3175 (struct mbuf *)0);
3176 tp->snd_wl1 = th->th_seq - 1;
3177
3178 /* FALLTHROUGH */
3179
3180 isconnected = TRUE;
3181
3182 /*
3183 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
3184 * ACKs. If the ack is in the range
3185 * tp->snd_una < th->th_ack <= tp->snd_max
3186 * then advance tp->snd_una to th->th_ack and drop
3187 * data from the retransmission queue. If this ACK reflects
3188 * more up to date window information we update our window information.
3189 */
3190 case TCPS_ESTABLISHED:
3191 case TCPS_FIN_WAIT_1:
3192 case TCPS_FIN_WAIT_2:
3193 case TCPS_CLOSE_WAIT:
3194 case TCPS_CLOSING:
3195 case TCPS_LAST_ACK:
3196 case TCPS_TIME_WAIT:
3197 if (SEQ_GT(th->th_ack, tp->snd_max)) {
3198 tcpstat.tcps_rcvacktoomuch++;
3199 goto dropafterack;
3200 }
3201 if (tp->sack_enable &&
3202 (to.to_nsacks > 0 || !TAILQ_EMPTY(&tp->snd_holes)))
3203 tcp_sack_doack(tp, &to, th->th_ack);
3204 if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
3205 if (tlen == 0 && tiwin == tp->snd_wnd) {
3206 tcpstat.tcps_rcvdupack++;
3207 /*
3208 * If we have outstanding data (other than
3209 * a window probe), this is a completely
3210 * duplicate ack (ie, window info didn't
3211 * change), the ack is the biggest we've
3212 * seen and we've seen exactly our rexmt
3213 * threshhold of them, assume a packet
3214 * has been dropped and retransmit it.
3215 * Kludge snd_nxt & the congestion
3216 * window so we send only this one
3217 * packet.
3218 *
3219 * We know we're losing at the current
3220 * window size so do congestion avoidance
3221 * (set ssthresh to half the current window
3222 * and pull our congestion window back to
3223 * the new ssthresh).
3224 *
3225 * Dup acks mean that packets have left the
3226 * network (they're now cached at the receiver)
3227 * so bump cwnd by the amount in the receiver
3228 * to keep a constant cwnd packets in the
3229 * network.
3230 */
3231 if (tp->t_timer[TCPT_REXMT] == 0 ||
3232 th->th_ack != tp->snd_una)
3233 tp->t_dupacks = 0;
3234 else if (++tp->t_dupacks > tcprexmtthresh ||
3235 IN_FASTRECOVERY(tp)) {
3236 if (tp->sack_enable && IN_FASTRECOVERY(tp)) {
3237 int awnd;
3238
3239 /*
3240 * Compute the amount of data in flight first.
3241 * We can inject new data into the pipe iff
3242 * we have less than 1/2 the original window's
3243 * worth of data in flight.
3244 */
3245 awnd = (tp->snd_nxt - tp->snd_fack) +
3246 tp->sackhint.sack_bytes_rexmit;
3247 if (awnd < tp->snd_ssthresh) {
3248 tp->snd_cwnd += tp->t_maxseg;
3249 if (tp->snd_cwnd > tp->snd_ssthresh)
3250 tp->snd_cwnd = tp->snd_ssthresh;
3251 }
3252 } else
3253 tp->snd_cwnd += tp->t_maxseg;
3254
3255 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
3256 struct tcpcb *, tp, struct tcphdr *, th,
3257 int32_t, TCP_CC_IN_FASTRECOVERY);
3258
3259 (void) tcp_output(tp);
3260 goto drop;
3261 } else if (tp->t_dupacks == tcprexmtthresh) {
3262 tcp_seq onxt = tp->snd_nxt;
3263
3264 /*
3265 * If we're doing sack, check to
3266 * see if we're already in sack
3267 * recovery. If we're not doing sack,
3268 * check to see if we're in newreno
3269 * recovery.
3270 */
3271 if (tp->sack_enable) {
3272 if (IN_FASTRECOVERY(tp)) {
3273 tp->t_dupacks = 0;
3274 break;
3275 }
3276 } else {
3277 if (SEQ_LEQ(th->th_ack,
3278 tp->snd_recover)) {
3279 tp->t_dupacks = 0;
3280 break;
3281 }
3282 }
3283
3284 /*
3285 * If the current tcp cc module has
3286 * defined a hook for tasks to run
3287 * before entering FR, call it
3288 */
3289 if (CC_ALGO(tp)->pre_fr != NULL)
3290 CC_ALGO(tp)->pre_fr(tp);
3291 ENTER_FASTRECOVERY(tp);
3292 tp->snd_recover = tp->snd_max;
3293 tp->t_timer[TCPT_REXMT] = 0;
3294 tp->t_rtttime = 0;
3295 if ((tp->ecn_flags & TE_ECN_ON) == TE_ECN_ON) {
3296 tp->ecn_flags |= TE_SENDCWR;
3297 }
3298 if (tp->sack_enable) {
3299 tcpstat.tcps_sack_recovery_episode++;
3300 tp->sack_newdata = tp->snd_nxt;
3301 tp->snd_cwnd = tp->t_maxseg;
3302
3303 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
3304 struct tcpcb *, tp, struct tcphdr *, th,
3305 int32_t, TCP_CC_ENTER_FASTRECOVERY);
3306
3307 (void) tcp_output(tp);
3308 goto drop;
3309 }
3310 tp->snd_nxt = th->th_ack;
3311 tp->snd_cwnd = tp->t_maxseg;
3312 (void) tcp_output(tp);
3313 tp->snd_cwnd = tp->snd_ssthresh +
3314 tp->t_maxseg * tp->t_dupacks;
3315 if (SEQ_GT(onxt, tp->snd_nxt))
3316 tp->snd_nxt = onxt;
3317 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
3318 struct tcpcb *, tp, struct tcphdr *, th,
3319 int32_t, TCP_CC_ENTER_FASTRECOVERY);
3320 goto drop;
3321 }
3322 } else
3323 tp->t_dupacks = 0;
3324 break;
3325 }
3326 /*
3327 * If the congestion window was inflated to account
3328 * for the other side's cached packets, retract it.
3329 */
3330 if (IN_FASTRECOVERY(tp)) {
3331 if (SEQ_LT(th->th_ack, tp->snd_recover)) {
3332 if (tp->sack_enable)
3333 tcp_sack_partialack(tp, th);
3334 else
3335 tcp_newreno_partial_ack(tp, th);
3336
3337 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
3338 struct tcpcb *, tp, struct tcphdr *, th,
3339 int32_t, TCP_CC_PARTIAL_ACK);
3340 } else {
3341 EXIT_FASTRECOVERY(tp);
3342 if (CC_ALGO(tp)->post_fr != NULL)
3343 CC_ALGO(tp)->post_fr(tp, th);
3344 tp->t_dupacks = 0;
3345
3346 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
3347 struct tcpcb *, tp, struct tcphdr *, th,
3348 int32_t, TCP_CC_EXIT_FASTRECOVERY);
3349 }
3350 } else {
3351 /*
3352 * We were not in fast recovery. Reset the duplicate ack
3353 * counter.
3354 */
3355 tp->t_dupacks = 0;
3356 }
3357
3358
3359 /*
3360 * If we reach this point, ACK is not a duplicate,
3361 * i.e., it ACKs something we sent.
3362 */
3363 if (tp->t_flags & TF_NEEDSYN) {
3364 /*
3365 * T/TCP: Connection was half-synchronized, and our
3366 * SYN has been ACK'd (so connection is now fully
3367 * synchronized). Go to non-starred state,
3368 * increment snd_una for ACK of SYN, and check if
3369 * we can do window scaling.
3370 */
3371 tp->t_flags &= ~TF_NEEDSYN;
3372 tp->snd_una++;
3373 /* Do window scaling? */
3374 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
3375 (TF_RCVD_SCALE|TF_REQ_SCALE)) {
3376 tp->snd_scale = tp->requested_s_scale;
3377 tp->rcv_scale = tp->request_r_scale;
3378 }
3379 }
3380
3381 process_ACK:
3382 acked = th->th_ack - tp->snd_una;
3383 tcpstat.tcps_rcvackpack++;
3384 tcpstat.tcps_rcvackbyte += acked;
3385
3386 /*
3387 * If we just performed our first retransmit, and the ACK
3388 * arrives within our recovery window, then it was a mistake
3389 * to do the retransmit in the first place. Recover our
3390 * original cwnd and ssthresh, and proceed to transmit where
3391 * we left off.
3392 */
3393 if (tp->t_rxtshift == 1 &&
3394 TSTMP_LT(tcp_now, tp->t_badrxtwin)) {
3395 ++tcpstat.tcps_sndrexmitbad;
3396 tp->snd_cwnd = tp->snd_cwnd_prev;
3397 tp->snd_ssthresh = tp->snd_ssthresh_prev;
3398 tp->snd_recover = tp->snd_recover_prev;
3399 if (tp->t_flags & TF_WASFRECOVERY)
3400 ENTER_FASTRECOVERY(tp);
3401 tp->snd_nxt = tp->snd_max;
3402 tp->t_badrxtwin = 0; /* XXX probably not required */
3403 tp->t_rxtshift = 0;
3404 tp->rxt_start = 0;
3405 tcp_bad_rexmt_fix_sndbuf(tp);
3406
3407 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
3408 struct tcpcb *, tp, struct tcphdr *, th,
3409 int32_t, TCP_CC_BAD_REXMT_RECOVERY);
3410 }
3411
3412 /*
3413 * If we have a timestamp reply, update smoothed
3414 * round trip time. If no timestamp is present but
3415 * transmit timer is running and timed sequence
3416 * number was acked, update smoothed round trip time.
3417 * Since we now have an rtt measurement, cancel the
3418 * timer backoff (cf., Phil Karn's retransmit alg.).
3419 * Recompute the initial retransmit timer.
3420 * Also makes sure we have a valid time stamp in hand
3421 *
3422 * Some boxes send broken timestamp replies
3423 * during the SYN+ACK phase, ignore
3424 * timestamps of 0 or we could calculate a
3425 * huge RTT and blow up the retransmit timer.
3426 */
3427 if (((to.to_flags & TOF_TS) != 0) && (to.to_tsecr != 0) &&
3428 TSTMP_GEQ(tcp_now, to.to_tsecr)) {
3429 tcp_xmit_timer(tp, tcp_now - to.to_tsecr);
3430 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
3431 tcp_xmit_timer(tp, tcp_now - tp->t_rtttime);
3432 }
3433
3434 /*
3435 * If all outstanding data is acked, stop retransmit
3436 * timer and remember to restart (more output or persist).
3437 * If there is more data to be acked, restart retransmit
3438 * timer, using current (possibly backed-off) value.
3439 */
3440 if (th->th_ack == tp->snd_max) {
3441 tp->t_timer[TCPT_REXMT] = 0;
3442 needoutput = 1;
3443 } else if (tp->t_timer[TCPT_PERSIST] == 0)
3444 tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
3445
3446 /*
3447 * If no data (only SYN) was ACK'd,
3448 * skip rest of ACK processing.
3449 */
3450 if (acked == 0)
3451 goto step6;
3452
3453 if ((thflags & TH_ECE) != 0 &&
3454 ((tp->ecn_flags & TE_ECN_ON) == TE_ECN_ON)) {
3455 /*
3456 * Reduce the congestion window if we haven't done so.
3457 */
3458 if (!tp->sack_enable && !IN_FASTRECOVERY(tp) &&
3459 SEQ_GEQ(th->th_ack, tp->snd_recover)) {
3460 tcp_reduce_congestion_window(tp);
3461 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
3462 struct tcpcb *, tp, struct tcphdr *, th,
3463 int32_t, TCP_CC_ECN_RCVD);
3464 }
3465 }
3466
3467 /*
3468 * When new data is acked, open the congestion window.
3469 * The specifics of how this is achieved are up to the
3470 * congestion control algorithm in use for this connection.
3471 *
3472 * The calculations in this function assume that snd_una is
3473 * not updated yet.
3474 */
3475 if (!IN_FASTRECOVERY(tp)) {
3476 if (CC_ALGO(tp)->ack_rcvd != NULL)
3477 CC_ALGO(tp)->ack_rcvd(tp, th);
3478
3479 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
3480 struct tcpcb *, tp, struct tcphdr *, th,
3481 int32_t, TCP_CC_ACK_RCVD);
3482 }
3483 if (acked > so->so_snd.sb_cc) {
3484 tp->snd_wnd -= so->so_snd.sb_cc;
3485 sbdrop(&so->so_snd, (int)so->so_snd.sb_cc);
3486 ourfinisacked = 1;
3487 } else {
3488 sbdrop(&so->so_snd, acked);
3489 tcp_sbsnd_trim(&so->so_snd);
3490 tp->snd_wnd -= acked;
3491 ourfinisacked = 0;
3492 }
3493 /* detect una wraparound */
3494 if ( !IN_FASTRECOVERY(tp) &&
3495 SEQ_GT(tp->snd_una, tp->snd_recover) &&
3496 SEQ_LEQ(th->th_ack, tp->snd_recover))
3497 tp->snd_recover = th->th_ack - 1;
3498
3499 if (IN_FASTRECOVERY(tp) &&
3500 SEQ_GEQ(th->th_ack, tp->snd_recover))
3501 EXIT_FASTRECOVERY(tp);
3502
3503 tp->snd_una = th->th_ack;
3504 if (tp->sack_enable) {
3505 if (SEQ_GT(tp->snd_una, tp->snd_recover))
3506 tp->snd_recover = tp->snd_una;
3507 }
3508 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
3509 tp->snd_nxt = tp->snd_una;
3510 if ((tp->t_flagsext & TF_MEASURESNDBW) != 0 &&
3511 tp->t_bwmeas != NULL)
3512 tcp_bwmeas_check(tp);
3513
3514 /*
3515 * sowwakeup must happen after snd_una, et al. are updated so that
3516 * the sequence numbers are in sync with so_snd
3517 */
3518 sowwakeup(so);
3519
3520 switch (tp->t_state) {
3521
3522 /*
3523 * In FIN_WAIT_1 STATE in addition to the processing
3524 * for the ESTABLISHED state if our FIN is now acknowledged
3525 * then enter FIN_WAIT_2.
3526 */
3527 case TCPS_FIN_WAIT_1:
3528 if (ourfinisacked) {
3529 /*
3530 * If we can't receive any more
3531 * data, then closing user can proceed.
3532 * Starting the timer is contrary to the
3533 * specification, but if we don't get a FIN
3534 * we'll hang forever.
3535 */
3536 if (so->so_state & SS_CANTRCVMORE) {
3537 add_to_time_wait(tp, tcp_maxidle);
3538 isconnected = FALSE;
3539 isdisconnected = TRUE;
3540 }
3541 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
3542 struct tcpcb *, tp, int32_t, TCPS_FIN_WAIT_2);
3543 tp->t_state = TCPS_FIN_WAIT_2;
3544 /* fall through and make sure we also recognize data ACKed with the FIN */
3545 }
3546 tp->t_flags |= TF_ACKNOW;
3547 break;
3548
3549 /*
3550 * In CLOSING STATE in addition to the processing for
3551 * the ESTABLISHED state if the ACK acknowledges our FIN
3552 * then enter the TIME-WAIT state, otherwise ignore
3553 * the segment.
3554 */
3555 case TCPS_CLOSING:
3556 if (ourfinisacked) {
3557 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
3558 struct tcpcb *, tp, int32_t, TCPS_TIME_WAIT);
3559 tp->t_state = TCPS_TIME_WAIT;
3560 tcp_canceltimers(tp);
3561 /* Shorten TIME_WAIT [RFC-1644, p.28] */
3562 if (tp->cc_recv != 0 &&
3563 ((int)(tcp_now - tp->t_starttime)) < tcp_msl)
3564 add_to_time_wait(tp, tp->t_rxtcur * TCPTV_TWTRUNC);
3565 else
3566 add_to_time_wait(tp, 2 * tcp_msl);
3567 isconnected = FALSE;
3568 isdisconnected = TRUE;
3569 }
3570 tp->t_flags |= TF_ACKNOW;
3571 break;
3572
3573 /*
3574 * In LAST_ACK, we may still be waiting for data to drain
3575 * and/or to be acked, as well as for the ack of our FIN.
3576 * If our FIN is now acknowledged, delete the TCB,
3577 * enter the closed state and return.
3578 */
3579 case TCPS_LAST_ACK:
3580 if (ourfinisacked) {
3581 tp = tcp_close(tp);
3582 goto drop;
3583 }
3584 break;
3585
3586 /*
3587 * In TIME_WAIT state the only thing that should arrive
3588 * is a retransmission of the remote FIN. Acknowledge
3589 * it and restart the finack timer.
3590 */
3591 case TCPS_TIME_WAIT:
3592 add_to_time_wait(tp, 2 * tcp_msl);
3593 goto dropafterack;
3594 }
3595 }
3596
3597 step6:
3598 /*
3599 * Update window information.
3600 * Don't look at window if no ACK: TAC's send garbage on first SYN.
3601 */
3602 if ((thflags & TH_ACK) &&
3603 (SEQ_LT(tp->snd_wl1, th->th_seq) ||
3604 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
3605 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
3606 /* keep track of pure window updates */
3607 if (tlen == 0 &&
3608 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
3609 tcpstat.tcps_rcvwinupd++;
3610 tp->snd_wnd = tiwin;
3611 tp->snd_wl1 = th->th_seq;
3612 tp->snd_wl2 = th->th_ack;
3613 if (tp->snd_wnd > tp->max_sndwnd)
3614 tp->max_sndwnd = tp->snd_wnd;
3615 needoutput = 1;
3616 }
3617
3618 /*
3619 * Process segments with URG.
3620 */
3621 if ((thflags & TH_URG) && th->th_urp &&
3622 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3623 /*
3624 * This is a kludge, but if we receive and accept
3625 * random urgent pointers, we'll crash in
3626 * soreceive. It's hard to imagine someone
3627 * actually wanting to send this much urgent data.
3628 */
3629 if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
3630 th->th_urp = 0; /* XXX */
3631 thflags &= ~TH_URG; /* XXX */
3632 goto dodata; /* XXX */
3633 }
3634 /*
3635 * If this segment advances the known urgent pointer,
3636 * then mark the data stream. This should not happen
3637 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
3638 * a FIN has been received from the remote side.
3639 * In these states we ignore the URG.
3640 *
3641 * According to RFC961 (Assigned Protocols),
3642 * the urgent pointer points to the last octet
3643 * of urgent data. We continue, however,
3644 * to consider it to indicate the first octet
3645 * of data past the urgent section as the original
3646 * spec states (in one of two places).
3647 */
3648 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
3649 tp->rcv_up = th->th_seq + th->th_urp;
3650 so->so_oobmark = so->so_rcv.sb_cc +
3651 (tp->rcv_up - tp->rcv_nxt) - 1;
3652 if (so->so_oobmark == 0) {
3653 so->so_state |= SS_RCVATMARK;
3654 postevent(so, 0, EV_OOB);
3655 }
3656 sohasoutofband(so);
3657 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
3658 }
3659 /*
3660 * Remove out of band data so doesn't get presented to user.
3661 * This can happen independent of advancing the URG pointer,
3662 * but if two URG's are pending at once, some out-of-band
3663 * data may creep in... ick.
3664 */
3665 if (th->th_urp <= (u_int32_t)tlen
3666 #if SO_OOBINLINE
3667 && (so->so_options & SO_OOBINLINE) == 0
3668 #endif
3669 )
3670 tcp_pulloutofband(so, th, m,
3671 drop_hdrlen); /* hdr drop is delayed */
3672 } else {
3673 /*
3674 * If no out of band data is expected,
3675 * pull receive urgent pointer along
3676 * with the receive window.
3677 */
3678 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
3679 tp->rcv_up = tp->rcv_nxt;
3680 }
3681 dodata:
3682
3683 /* Set socket's connect or disconnect state correcly before doing data.
3684 * The following might unlock the socket if there is an upcall or a socket
3685 * filter.
3686 */
3687 if (isconnected) {
3688 soisconnected(so);
3689 } else if (isdisconnected) {
3690 soisdisconnected(so);
3691 }
3692
3693 /* Let's check the state of pcb just to make sure that it did not get closed
3694 * when we unlocked above
3695 */
3696 if (inp->inp_state == INPCB_STATE_DEAD) {
3697 /* Just drop the packet that we are processing and return */
3698 goto drop;
3699 }
3700
3701 /*
3702 * Process the segment text, merging it into the TCP sequencing queue,
3703 * and arranging for acknowledgment of receipt if necessary.
3704 * This process logically involves adjusting tp->rcv_wnd as data
3705 * is presented to the user (this happens in tcp_usrreq.c,
3706 * case PRU_RCVD). If a FIN has already been received on this
3707 * connection then we just ignore the text.
3708 */
3709 if ((tlen || (thflags & TH_FIN)) &&
3710 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3711 tcp_seq save_start = th->th_seq;
3712 tcp_seq save_end = th->th_seq + tlen;
3713 m_adj(m, drop_hdrlen); /* delayed header drop */
3714 /*
3715 * Insert segment which includes th into TCP reassembly queue
3716 * with control block tp. Set thflags to whether reassembly now
3717 * includes a segment with FIN. This handles the common case
3718 * inline (segment is the next to be received on an established
3719 * connection, and the queue is empty), avoiding linkage into
3720 * and removal from the queue and repetition of various
3721 * conversions.
3722 * Set DELACK for segments received in order, but ack
3723 * immediately when segments are out of order (so
3724 * fast retransmit can work).
3725 */
3726 if (th->th_seq == tp->rcv_nxt &&
3727 LIST_EMPTY(&tp->t_segq) &&
3728 TCPS_HAVEESTABLISHED(tp->t_state)) {
3729 TCP_INC_VAR(tp->t_unacksegs, nlropkts);
3730 if (DELAY_ACK(tp, th) &&
3731 ((tp->t_flags & TF_ACKNOW) == 0) ) {
3732 if ((tp->t_flags & TF_DELACK) == 0) {
3733 tp->t_flags |= TF_DELACK;
3734 tp->t_timer[TCPT_DELACK] = OFFSET_FROM_START(tp, tcp_delack);
3735 }
3736 }
3737 else {
3738 tp->t_flags |= TF_ACKNOW;
3739 }
3740 tp->rcv_nxt += tlen;
3741 thflags = th->th_flags & TH_FIN;
3742 TCP_INC_VAR(tcpstat.tcps_rcvpack, nlropkts);
3743 tcpstat.tcps_rcvbyte += tlen;
3744 if (nstat_collect) {
3745 if (m->m_pkthdr.aux_flags & MAUXF_SW_LRO_PKT) {
3746 locked_add_64(&inp->inp_stat->rxpackets, m->m_pkthdr.lro_npkts);
3747 } else {
3748 locked_add_64(&inp->inp_stat->rxpackets, 1);
3749 }
3750 locked_add_64(&inp->inp_stat->rxbytes, tlen);
3751 }
3752 ND6_HINT(tp);
3753
3754 tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen);
3755 so_recv_data_stat(so, m, drop_hdrlen);
3756 if (sbappendstream(&so->so_rcv, m))
3757 sorwakeup(so);
3758 } else {
3759 thflags = tcp_reass(tp, th, &tlen, m);
3760 tp->t_flags |= TF_ACKNOW;
3761 }
3762
3763 if (tlen > 0 && tp->sack_enable)
3764 tcp_update_sack_list(tp, save_start, save_end);
3765
3766 if (tp->t_flags & TF_DELACK)
3767 {
3768 #if INET6
3769 if (isipv6) {
3770 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
3771 (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])),
3772 th->th_seq, th->th_ack, th->th_win);
3773 }
3774 else
3775 #endif
3776 {
3777 KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport),
3778 (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)),
3779 th->th_seq, th->th_ack, th->th_win);
3780 }
3781
3782 }
3783 } else {
3784 m_freem(m);
3785 thflags &= ~TH_FIN;
3786 }
3787
3788 /*
3789 * If FIN is received ACK the FIN and let the user know
3790 * that the connection is closing.
3791 */
3792 if (thflags & TH_FIN) {
3793 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
3794 socantrcvmore(so);
3795 postevent(so, 0, EV_FIN);
3796 /*
3797 * If connection is half-synchronized
3798 * (ie NEEDSYN flag on) then delay ACK,
3799 * so it may be piggybacked when SYN is sent.
3800 * Otherwise, since we received a FIN then no
3801 * more input can be expected, send ACK now.
3802 */
3803 TCP_INC_VAR(tp->t_unacksegs, nlropkts);
3804 if (DELAY_ACK(tp, th) && (tp->t_flags & TF_NEEDSYN)) {
3805 if ((tp->t_flags & TF_DELACK) == 0) {
3806 tp->t_flags |= TF_DELACK;
3807 tp->t_timer[TCPT_DELACK] = OFFSET_FROM_START(tp, tcp_delack);
3808 }
3809 }
3810 else {
3811 tp->t_flags |= TF_ACKNOW;
3812 }
3813 tp->rcv_nxt++;
3814 }
3815 switch (tp->t_state) {
3816
3817 /*
3818 * In SYN_RECEIVED and ESTABLISHED STATES
3819 * enter the CLOSE_WAIT state.
3820 */
3821 case TCPS_SYN_RECEIVED:
3822 tp->t_starttime = tcp_now;
3823 case TCPS_ESTABLISHED:
3824 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
3825 struct tcpcb *, tp, int32_t, TCPS_CLOSE_WAIT);
3826 tp->t_state = TCPS_CLOSE_WAIT;
3827 break;
3828
3829 /*
3830 * If still in FIN_WAIT_1 STATE FIN has not been acked so
3831 * enter the CLOSING state.
3832 */
3833 case TCPS_FIN_WAIT_1:
3834 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
3835 struct tcpcb *, tp, int32_t, TCPS_CLOSING);
3836 tp->t_state = TCPS_CLOSING;
3837 break;
3838
3839 /*
3840 * In FIN_WAIT_2 state enter the TIME_WAIT state,
3841 * starting the time-wait timer, turning off the other
3842 * standard timers.
3843 */
3844 case TCPS_FIN_WAIT_2:
3845 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
3846 struct tcpcb *, tp, int32_t, TCPS_TIME_WAIT);
3847 tp->t_state = TCPS_TIME_WAIT;
3848 tcp_canceltimers(tp);
3849 /* Shorten TIME_WAIT [RFC-1644, p.28] */
3850 if (tp->cc_recv != 0 &&
3851 ((int)(tcp_now - tp->t_starttime)) < tcp_msl) {
3852 add_to_time_wait(tp, tp->t_rxtcur * TCPTV_TWTRUNC);
3853 /* For transaction client, force ACK now. */
3854 tp->t_flags |= TF_ACKNOW;
3855 tp->t_unacksegs = 0;
3856 }
3857 else
3858 add_to_time_wait(tp, 2 * tcp_msl);
3859 soisdisconnected(so);
3860 break;
3861
3862 /*
3863 * In TIME_WAIT state restart the 2 MSL time_wait timer.
3864 */
3865 case TCPS_TIME_WAIT:
3866 add_to_time_wait(tp, 2 * tcp_msl);
3867 break;
3868 }
3869 }
3870 #if TCPDEBUG
3871 if (so->so_options & SO_DEBUG)
3872 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
3873 &tcp_savetcp, 0);
3874 #endif
3875
3876 /*
3877 * Return any desired output.
3878 */
3879 if (needoutput || (tp->t_flags & TF_ACKNOW)) {
3880 (void) tcp_output(tp);
3881 }
3882
3883 tcp_check_timer_state(tp);
3884
3885
3886 tcp_unlock(so, 1, 0);
3887 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0);
3888 return;
3889
3890 dropafterack:
3891 /*
3892 * Generate an ACK dropping incoming segment if it occupies
3893 * sequence space, where the ACK reflects our state.
3894 *
3895 * We can now skip the test for the RST flag since all
3896 * paths to this code happen after packets containing
3897 * RST have been dropped.
3898 *
3899 * In the SYN-RECEIVED state, don't send an ACK unless the
3900 * segment we received passes the SYN-RECEIVED ACK test.
3901 * If it fails send a RST. This breaks the loop in the
3902 * "LAND" DoS attack, and also prevents an ACK storm
3903 * between two listening ports that have been sent forged
3904 * SYN segments, each with the source address of the other.
3905 */
3906 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
3907 (SEQ_GT(tp->snd_una, th->th_ack) ||
3908 SEQ_GT(th->th_ack, tp->snd_max)) ) {
3909 rstreason = BANDLIM_RST_OPENPORT;
3910
3911 if (ifp != NULL && ifp->if_tcp_stat != NULL)
3912 atomic_add_64(&ifp->if_tcp_stat->dospacket, 1);
3913
3914 goto dropwithreset;
3915 }
3916 #if TCPDEBUG
3917 if (so->so_options & SO_DEBUG)
3918 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3919 &tcp_savetcp, 0);
3920 #endif
3921 m_freem(m);
3922 tp->t_flags |= TF_ACKNOW;
3923 (void) tcp_output(tp);
3924
3925 /* Don't need to check timer state as we should have done it during tcp_output */
3926 tcp_unlock(so, 1, 0);
3927 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0);
3928 return;
3929 dropwithresetnosock:
3930 nosock = 1;
3931 dropwithreset:
3932 /*
3933 * Generate a RST, dropping incoming segment.
3934 * Make ACK acceptable to originator of segment.
3935 * Don't bother to respond if destination was broadcast/multicast.
3936 */
3937 if ((thflags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
3938 goto drop;
3939 #if INET6
3940 if (isipv6) {
3941 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
3942 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
3943 goto drop;
3944 } else
3945 #endif /* INET6 */
3946 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
3947 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
3948 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
3949 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
3950 goto drop;
3951 /* IPv6 anycast check is done at tcp6_input() */
3952
3953 /*
3954 * Perform bandwidth limiting.
3955 */
3956 #if ICMP_BANDLIM
3957 if (badport_bandlim(rstreason) < 0)
3958 goto drop;
3959 #endif
3960
3961 #if TCPDEBUG
3962 if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
3963 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3964 &tcp_savetcp, 0);
3965 #endif
3966 if (thflags & TH_ACK)
3967 /* mtod() below is safe as long as hdr dropping is delayed */
3968 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, th->th_ack,
3969 TH_RST, ifscope, nocell);
3970 else {
3971 if (thflags & TH_SYN)
3972 tlen++;
3973 /* mtod() below is safe as long as hdr dropping is delayed */
3974 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
3975 (tcp_seq)0, TH_RST|TH_ACK, ifscope, nocell);
3976 }
3977 /* destroy temporarily created socket */
3978 if (dropsocket) {
3979 (void) soabort(so);
3980 tcp_unlock(so, 1, 0);
3981 }
3982 else if ((inp != NULL) && (nosock == 0)) {
3983 tcp_unlock(so, 1, 0);
3984 }
3985 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0);
3986 return;
3987 dropnosock:
3988 nosock = 1;
3989 drop:
3990 /*
3991 * Drop space held by incoming segment and return.
3992 */
3993 #if TCPDEBUG
3994 if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
3995 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
3996 &tcp_savetcp, 0);
3997 #endif
3998 m_freem(m);
3999 /* destroy temporarily created socket */
4000 if (dropsocket) {
4001 (void) soabort(so);
4002 tcp_unlock(so, 1, 0);
4003 }
4004 else if (nosock == 0) {
4005 tcp_unlock(so, 1, 0);
4006 }
4007 KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0);
4008 return;
4009 }
4010
4011 static void
4012 tcp_dooptions(tp, cp, cnt, th, to, input_ifscope)
4013 /*
4014 * Parse TCP options and place in tcpopt.
4015 */
4016 struct tcpcb *tp;
4017 u_char *cp;
4018 int cnt;
4019 struct tcphdr *th;
4020 struct tcpopt *to;
4021 unsigned int input_ifscope;
4022 {
4023 u_short mss = 0;
4024 int opt, optlen;
4025
4026 for (; cnt > 0; cnt -= optlen, cp += optlen) {
4027 opt = cp[0];
4028 if (opt == TCPOPT_EOL)
4029 break;
4030 if (opt == TCPOPT_NOP)
4031 optlen = 1;
4032 else {
4033 if (cnt < 2)
4034 break;
4035 optlen = cp[1];
4036 if (optlen < 2 || optlen > cnt)
4037 break;
4038 }
4039 switch (opt) {
4040
4041 default:
4042 continue;
4043
4044 case TCPOPT_MAXSEG:
4045 if (optlen != TCPOLEN_MAXSEG)
4046 continue;
4047 if (!(th->th_flags & TH_SYN))
4048 continue;
4049 bcopy((char *) cp + 2, (char *) &mss, sizeof(mss));
4050
4051 #if BYTE_ORDER != BIG_ENDIAN
4052 NTOHS(mss);
4053 #endif
4054
4055 break;
4056
4057 case TCPOPT_WINDOW:
4058 if (optlen != TCPOLEN_WINDOW)
4059 continue;
4060 if (!(th->th_flags & TH_SYN))
4061 continue;
4062 tp->t_flags |= TF_RCVD_SCALE;
4063 tp->requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
4064 break;
4065
4066 case TCPOPT_TIMESTAMP:
4067 if (optlen != TCPOLEN_TIMESTAMP)
4068 continue;
4069 to->to_flags |= TOF_TS;
4070 bcopy((char *)cp + 2,
4071 (char *)&to->to_tsval, sizeof(to->to_tsval));
4072
4073 #if BYTE_ORDER != BIG_ENDIAN
4074 NTOHL(to->to_tsval);
4075 #endif
4076
4077 bcopy((char *)cp + 6,
4078 (char *)&to->to_tsecr, sizeof(to->to_tsecr));
4079
4080 #if BYTE_ORDER != BIG_ENDIAN
4081 NTOHL(to->to_tsecr);
4082 #endif
4083
4084 /*
4085 * A timestamp received in a SYN makes
4086 * it ok to send timestamp requests and replies.
4087 */
4088 if (th->th_flags & TH_SYN) {
4089 tp->t_flags |= TF_RCVD_TSTMP;
4090 tp->ts_recent = to->to_tsval;
4091 tp->ts_recent_age = tcp_now;
4092 }
4093 break;
4094 case TCPOPT_SACK_PERMITTED:
4095 if (!tcp_do_sack ||
4096 optlen != TCPOLEN_SACK_PERMITTED)
4097 continue;
4098 if (th->th_flags & TH_SYN)
4099 to->to_flags |= TOF_SACK;
4100 break;
4101 case TCPOPT_SACK:
4102 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
4103 continue;
4104 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
4105 to->to_sacks = cp + 2;
4106 tcpstat.tcps_sack_rcv_blocks++;
4107
4108 break;
4109 }
4110 }
4111 if (th->th_flags & TH_SYN)
4112 tcp_mss(tp, mss, input_ifscope); /* sets t_maxseg */
4113 }
4114
4115 /*
4116 * Pull out of band byte out of a segment so
4117 * it doesn't appear in the user's data queue.
4118 * It is still reflected in the segment length for
4119 * sequencing purposes.
4120 */
4121 static void
4122 tcp_pulloutofband(so, th, m, off)
4123 struct socket *so;
4124 struct tcphdr *th;
4125 register struct mbuf *m;
4126 int off; /* delayed to be droped hdrlen */
4127 {
4128 int cnt = off + th->th_urp - 1;
4129
4130 while (cnt >= 0) {
4131 if (m->m_len > cnt) {
4132 char *cp = mtod(m, caddr_t) + cnt;
4133 struct tcpcb *tp = sototcpcb(so);
4134
4135 tp->t_iobc = *cp;
4136 tp->t_oobflags |= TCPOOB_HAVEDATA;
4137 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
4138 m->m_len--;
4139 if (m->m_flags & M_PKTHDR)
4140 m->m_pkthdr.len--;
4141 return;
4142 }
4143 cnt -= m->m_len;
4144 m = m->m_next;
4145 if (m == 0)
4146 break;
4147 }
4148 panic("tcp_pulloutofband");
4149 }
4150
4151 uint32_t
4152 get_base_rtt(struct tcpcb *tp)
4153 {
4154 uint32_t base_rtt = 0, i;
4155 for (i = 0; i < N_RTT_BASE; ++i) {
4156 if (tp->rtt_hist[i] != 0 &&
4157 (base_rtt == 0 || tp->rtt_hist[i] < base_rtt))
4158 base_rtt = tp->rtt_hist[i];
4159 }
4160 return base_rtt;
4161 }
4162
4163 /* Each value of RTT base represents the minimum RTT seen in a minute.
4164 * We keep upto N_RTT_BASE minutes worth of history.
4165 */
4166 void
4167 update_base_rtt(struct tcpcb *tp, uint32_t rtt)
4168 {
4169 if (++tp->rtt_count >= rtt_samples_per_slot) {
4170 int i=0;
4171 for (i = (N_RTT_BASE-1); i > 0; --i) {
4172 tp->rtt_hist[i] = tp->rtt_hist[i-1];
4173 }
4174 tp->rtt_hist[0] = rtt;
4175 tp->rtt_count = 0;
4176 } else {
4177 tp->rtt_hist[0] = min(tp->rtt_hist[0], rtt);
4178 }
4179 }
4180
4181 /*
4182 * Collect new round-trip time estimate
4183 * and update averages and current timeout.
4184 */
4185 static void
4186 tcp_xmit_timer(tp, rtt)
4187 register struct tcpcb *tp;
4188 int rtt;
4189 {
4190 register int delta;
4191
4192 tcpstat.tcps_rttupdated++;
4193 tp->t_rttupdated++;
4194
4195 if (rtt > 0) {
4196 tp->t_rttcur = rtt;
4197 update_base_rtt(tp, rtt);
4198 }
4199
4200 if (tp->t_srtt != 0) {
4201 /*
4202 * srtt is stored as fixed point with 5 bits after the
4203 * binary point (i.e., scaled by 32). The following magic
4204 * is equivalent to the smoothing algorithm in rfc793 with
4205 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
4206 * point).
4207 *
4208 * Freebsd adjusts rtt to origin 0 by subtracting 1 from the provided
4209 * rtt value. This was required because of the way t_rtttime was
4210 * initiailised to 1 before. Since we changed t_rtttime to be based on
4211 * tcp_now, this extra adjustment is not needed.
4212 */
4213 delta = (rtt << TCP_DELTA_SHIFT)
4214 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
4215
4216 if ((tp->t_srtt += delta) <= 0)
4217 tp->t_srtt = 1;
4218
4219 /*
4220 * We accumulate a smoothed rtt variance (actually, a
4221 * smoothed mean difference), then set the retransmit
4222 * timer to smoothed rtt + 4 times the smoothed variance.
4223 * rttvar is stored as fixed point with 4 bits after the
4224 * binary point (scaled by 16). The following is
4225 * equivalent to rfc793 smoothing with an alpha of .75
4226 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
4227 * rfc793's wired-in beta.
4228 */
4229 if (delta < 0)
4230 delta = -delta;
4231 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
4232 if ((tp->t_rttvar += delta) <= 0)
4233 tp->t_rttvar = 1;
4234 if (tp->t_rttbest == 0 ||
4235 tp->t_rttbest > (tp->t_srtt + tp->t_rttvar))
4236 tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
4237 } else {
4238 /*
4239 * No rtt measurement yet - use the unsmoothed rtt.
4240 * Set the variance to half the rtt (so our first
4241 * retransmit happens at 3*rtt).
4242 */
4243 tp->t_srtt = rtt << TCP_RTT_SHIFT;
4244 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
4245 }
4246 nstat_route_rtt(tp->t_inpcb->inp_route.ro_rt, tp->t_srtt, tp->t_rttvar);
4247 tp->t_rtttime = 0;
4248 tp->t_rxtshift = 0;
4249 tp->rxt_start = 0;
4250
4251 /*
4252 * the retransmit should happen at rtt + 4 * rttvar.
4253 * Because of the way we do the smoothing, srtt and rttvar
4254 * will each average +1/2 tick of bias. When we compute
4255 * the retransmit timer, we want 1/2 tick of rounding and
4256 * 1 extra tick because of +-1/2 tick uncertainty in the
4257 * firing of the timer. The bias will give us exactly the
4258 * 1.5 tick we need. But, because the bias is
4259 * statistical, we have to test that we don't drop below
4260 * the minimum feasible timer (which is 2 ticks).
4261 */
4262 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
4263 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX,
4264 TCP_ADD_REXMTSLOP(tp));
4265
4266 /*
4267 * We received an ack for a packet that wasn't retransmitted;
4268 * it is probably safe to discard any error indications we've
4269 * received recently. This isn't quite right, but close enough
4270 * for now (a route might have failed after we sent a segment,
4271 * and the return path might not be symmetrical).
4272 */
4273 tp->t_softerror = 0;
4274 }
4275
4276 static inline unsigned int
4277 tcp_maxmtu(struct rtentry *rt)
4278 {
4279 unsigned int maxmtu;
4280
4281 RT_LOCK_ASSERT_HELD(rt);
4282 if (rt->rt_rmx.rmx_mtu == 0)
4283 maxmtu = rt->rt_ifp->if_mtu;
4284 else
4285 maxmtu = MIN(rt->rt_rmx.rmx_mtu, rt->rt_ifp->if_mtu);
4286
4287 return (maxmtu);
4288 }
4289
4290 #if INET6
4291 static inline unsigned int
4292 tcp_maxmtu6(struct rtentry *rt)
4293 {
4294 unsigned int maxmtu;
4295 struct nd_ifinfo *ndi;
4296
4297 RT_LOCK_ASSERT_HELD(rt);
4298 lck_rw_lock_shared(nd_if_rwlock);
4299 if ((ndi = ND_IFINFO(rt->rt_ifp)) != NULL && !ndi->initialized)
4300 ndi = NULL;
4301 if (ndi != NULL)
4302 lck_mtx_lock(&ndi->lock);
4303 if (rt->rt_rmx.rmx_mtu == 0)
4304 maxmtu = IN6_LINKMTU(rt->rt_ifp);
4305 else
4306 maxmtu = MIN(rt->rt_rmx.rmx_mtu, IN6_LINKMTU(rt->rt_ifp));
4307 if (ndi != NULL)
4308 lck_mtx_unlock(&ndi->lock);
4309 lck_rw_done(nd_if_rwlock);
4310
4311 return (maxmtu);
4312 }
4313 #endif
4314
4315 /*
4316 * Determine a reasonable value for maxseg size.
4317 * If the route is known, check route for mtu.
4318 * If none, use an mss that can be handled on the outgoing
4319 * interface without forcing IP to fragment; if bigger than
4320 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
4321 * to utilize large mbufs. If no route is found, route has no mtu,
4322 * or the destination isn't local, use a default, hopefully conservative
4323 * size (usually 512 or the default IP max size, but no more than the mtu
4324 * of the interface), as we can't discover anything about intervening
4325 * gateways or networks. We also initialize the congestion/slow start
4326 * window to be a single segment if the destination isn't local.
4327 * While looking at the routing entry, we also initialize other path-dependent
4328 * parameters from pre-set or cached values in the routing entry.
4329 *
4330 * Also take into account the space needed for options that we
4331 * send regularly. Make maxseg shorter by that amount to assure
4332 * that we can send maxseg amount of data even when the options
4333 * are present. Store the upper limit of the length of options plus
4334 * data in maxopd.
4335 *
4336 * NOTE that this routine is only called when we process an incoming
4337 * segment, for outgoing segments only tcp_mssopt is called.
4338 *
4339 */
4340 void
4341 tcp_mss(tp, offer, input_ifscope)
4342 struct tcpcb *tp;
4343 int offer;
4344 unsigned int input_ifscope;
4345 {
4346 register struct rtentry *rt;
4347 struct ifnet *ifp;
4348 register int rtt, mss;
4349 u_int32_t bufsize;
4350 struct inpcb *inp;
4351 struct socket *so;
4352 struct rmxp_tao *taop;
4353 int origoffer = offer;
4354 u_int32_t sb_max_corrected;
4355 int isnetlocal = 0;
4356 #if INET6
4357 int isipv6;
4358 int min_protoh;
4359 #endif
4360
4361 inp = tp->t_inpcb;
4362 #if INET6
4363 isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
4364 min_protoh = isipv6 ? sizeof (struct ip6_hdr) + sizeof (struct tcphdr)
4365 : sizeof (struct tcpiphdr);
4366 #else
4367 #define min_protoh (sizeof (struct tcpiphdr))
4368 #endif
4369
4370 #if INET6
4371 if (isipv6) {
4372 rt = tcp_rtlookup6(inp, input_ifscope);
4373 if (rt != NULL &&
4374 (IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr) ||
4375 IN6_IS_ADDR_LINKLOCAL(&inp->in6p_faddr) ||
4376 rt->rt_gateway->sa_family == AF_LINK ||
4377 in6_localaddr(&inp->in6p_faddr))) {
4378 tp->t_flags |= TF_LOCAL;
4379 }
4380 }
4381 else
4382 #endif /* INET6 */
4383 {
4384 rt = tcp_rtlookup(inp, input_ifscope);
4385 if (rt != NULL &&
4386 (rt->rt_gateway->sa_family == AF_LINK ||
4387 rt->rt_ifp->if_flags & IFF_LOOPBACK ||
4388 in_localaddr(inp->inp_faddr))) {
4389 tp->t_flags |= TF_LOCAL;
4390 }
4391 }
4392 isnetlocal = (tp->t_flags & TF_LOCAL);
4393
4394 if (rt == NULL) {
4395 tp->t_maxopd = tp->t_maxseg =
4396 #if INET6
4397 isipv6 ? tcp_v6mssdflt :
4398 #endif /* INET6 */
4399 tcp_mssdflt;
4400 return;
4401 }
4402 ifp = rt->rt_ifp;
4403 /*
4404 * Slower link window correction:
4405 * If a value is specificied for slowlink_wsize use it for PPP links
4406 * believed to be on a serial modem (speed <128Kbps). Excludes 9600bps as
4407 * it is the default value adversized by pseudo-devices over ppp.
4408 */
4409 if (ifp->if_type == IFT_PPP && slowlink_wsize > 0 &&
4410 ifp->if_baudrate > 9600 && ifp->if_baudrate <= 128000) {
4411 tp->t_flags |= TF_SLOWLINK;
4412 }
4413 so = inp->inp_socket;
4414
4415 taop = rmx_taop(rt->rt_rmx);
4416 /*
4417 * Offer == -1 means that we didn't receive SYN yet,
4418 * use cached value in that case;
4419 */
4420 if (offer == -1)
4421 offer = taop->tao_mssopt;
4422 /*
4423 * Offer == 0 means that there was no MSS on the SYN segment,
4424 * in this case we use tcp_mssdflt.
4425 */
4426 if (offer == 0)
4427 offer =
4428 #if INET6
4429 isipv6 ? tcp_v6mssdflt :
4430 #endif /* INET6 */
4431 tcp_mssdflt;
4432 else {
4433 /*
4434 * Prevent DoS attack with too small MSS. Round up
4435 * to at least minmss.
4436 */
4437 offer = max(offer, tcp_minmss);
4438 /*
4439 * Sanity check: make sure that maxopd will be large
4440 * enough to allow some data on segments even is the
4441 * all the option space is used (40bytes). Otherwise
4442 * funny things may happen in tcp_output.
4443 */
4444 offer = max(offer, 64);
4445 }
4446 taop->tao_mssopt = offer;
4447
4448 /*
4449 * While we're here, check if there's an initial rtt
4450 * or rttvar. Convert from the route-table units
4451 * to scaled multiples of the slow timeout timer.
4452 */
4453 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt) != 0) {
4454 tcp_getrt_rtt(tp, rt);
4455 } else {
4456 tp->t_rttmin = isnetlocal ? tcp_TCPTV_MIN : TCPTV_REXMTMIN;
4457 }
4458
4459 #if INET6
4460 mss = (isipv6 ? tcp_maxmtu6(rt) : tcp_maxmtu(rt));
4461 #else
4462 mss = tcp_maxmtu(rt);
4463 #endif
4464 mss -= min_protoh;
4465
4466 if (rt->rt_rmx.rmx_mtu == 0) {
4467 #if INET6
4468 if (isipv6) {
4469 if (!isnetlocal)
4470 mss = min(mss, tcp_v6mssdflt);
4471 } else
4472 #endif /* INET6 */
4473 if (!isnetlocal)
4474 mss = min(mss, tcp_mssdflt);
4475 }
4476
4477 mss = min(mss, offer);
4478 /*
4479 * maxopd stores the maximum length of data AND options
4480 * in a segment; maxseg is the amount of data in a normal
4481 * segment. We need to store this value (maxopd) apart
4482 * from maxseg, because now every segment carries options
4483 * and thus we normally have somewhat less data in segments.
4484 */
4485 tp->t_maxopd = mss;
4486
4487 /*
4488 * origoffer==-1 indicates, that no segments were received yet.
4489 * In this case we just guess.
4490 */
4491 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
4492 (origoffer == -1 ||
4493 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
4494 mss -= TCPOLEN_TSTAMP_APPA;
4495 tp->t_maxseg = mss;
4496
4497 /*
4498 * Calculate corrected value for sb_max; ensure to upgrade the
4499 * numerator for large sb_max values else it will overflow.
4500 */
4501 sb_max_corrected = (sb_max * (u_int64_t)MCLBYTES) / (MSIZE + MCLBYTES);
4502
4503 /*
4504 * If there's a pipesize (ie loopback), change the socket
4505 * buffer to that size only if it's bigger than the current
4506 * sockbuf size. Make the socket buffers an integral
4507 * number of mss units; if the mss is larger than
4508 * the socket buffer, decrease the mss.
4509 */
4510 #if RTV_SPIPE
4511 bufsize = rt->rt_rmx.rmx_sendpipe;
4512 if (bufsize < so->so_snd.sb_hiwat)
4513 #endif
4514 bufsize = so->so_snd.sb_hiwat;
4515 if (bufsize < mss)
4516 mss = bufsize;
4517 else {
4518 bufsize = (((bufsize + (u_int64_t)mss - 1) / (u_int64_t)mss) * (u_int64_t)mss);
4519 if (bufsize > sb_max_corrected)
4520 bufsize = sb_max_corrected;
4521 (void)sbreserve(&so->so_snd, bufsize);
4522 }
4523 tp->t_maxseg = mss;
4524
4525 #if RTV_RPIPE
4526 bufsize = rt->rt_rmx.rmx_recvpipe;
4527 if (bufsize < so->so_rcv.sb_hiwat)
4528 #endif
4529 bufsize = so->so_rcv.sb_hiwat;
4530 if (bufsize > mss) {
4531 bufsize = (((bufsize + (u_int64_t)mss - 1) / (u_int64_t)mss) * (u_int64_t)mss);
4532 if (bufsize > sb_max_corrected)
4533 bufsize = sb_max_corrected;
4534 (void)sbreserve(&so->so_rcv, bufsize);
4535 }
4536
4537 set_tcp_stream_priority(so);
4538
4539 if (rt->rt_rmx.rmx_ssthresh) {
4540 /*
4541 * There's some sort of gateway or interface
4542 * buffer limit on the path. Use this to set
4543 * the slow start threshhold, but set the
4544 * threshold to no less than 2*mss.
4545 */
4546 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh);
4547 tcpstat.tcps_usedssthresh++;
4548 } else {
4549 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
4550 }
4551
4552
4553 /*
4554 * Set the slow-start flight size depending on whether this
4555 * is a local network or not.
4556 */
4557 if (CC_ALGO(tp)->cwnd_init != NULL)
4558 CC_ALGO(tp)->cwnd_init(tp);
4559
4560 DTRACE_TCP5(cc, void, NULL, struct inpcb *, tp->t_inpcb, struct tcpcb *, tp,
4561 struct tcphdr *, NULL, int32_t, TCP_CC_CWND_INIT);
4562
4563 /* Route locked during lookup above */
4564 RT_UNLOCK(rt);
4565 }
4566
4567 /*
4568 * Determine the MSS option to send on an outgoing SYN.
4569 */
4570 int
4571 tcp_mssopt(tp)
4572 struct tcpcb *tp;
4573 {
4574 struct rtentry *rt;
4575 int mss;
4576 #if INET6
4577 int isipv6;
4578 int min_protoh;
4579 #endif
4580
4581 #if INET6
4582 isipv6 = ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
4583 min_protoh = isipv6 ? sizeof (struct ip6_hdr) + sizeof (struct tcphdr)
4584 : sizeof (struct tcpiphdr);
4585 #else
4586 #define min_protoh (sizeof (struct tcpiphdr))
4587 #endif
4588
4589 #if INET6
4590 if (isipv6)
4591 rt = tcp_rtlookup6(tp->t_inpcb, IFSCOPE_NONE);
4592 else
4593 #endif /* INET6 */
4594 rt = tcp_rtlookup(tp->t_inpcb, IFSCOPE_NONE);
4595 if (rt == NULL) {
4596 return (
4597 #if INET6
4598 isipv6 ? tcp_v6mssdflt :
4599 #endif /* INET6 */
4600 tcp_mssdflt);
4601 }
4602 /*
4603 * Slower link window correction:
4604 * If a value is specificied for slowlink_wsize use it for PPP links
4605 * believed to be on a serial modem (speed <128Kbps). Excludes 9600bps as
4606 * it is the default value adversized by pseudo-devices over ppp.
4607 */
4608 if (rt->rt_ifp->if_type == IFT_PPP && slowlink_wsize > 0 &&
4609 rt->rt_ifp->if_baudrate > 9600 && rt->rt_ifp->if_baudrate <= 128000) {
4610 tp->t_flags |= TF_SLOWLINK;
4611 }
4612
4613 #if INET6
4614 mss = (isipv6 ? tcp_maxmtu6(rt) : tcp_maxmtu(rt));
4615 #else
4616 mss = tcp_maxmtu(rt);
4617 #endif
4618 /* Route locked during lookup above */
4619 RT_UNLOCK(rt);
4620 return (mss - min_protoh);
4621 }
4622
4623 /*
4624 * On a partial ack arrives, force the retransmission of the
4625 * next unacknowledged segment. Do not clear tp->t_dupacks.
4626 * By setting snd_nxt to th_ack, this forces retransmission timer to
4627 * be started again.
4628 */
4629 static void
4630 tcp_newreno_partial_ack(tp, th)
4631 struct tcpcb *tp;
4632 struct tcphdr *th;
4633 {
4634 tcp_seq onxt = tp->snd_nxt;
4635 u_int32_t ocwnd = tp->snd_cwnd;
4636 tp->t_timer[TCPT_REXMT] = 0;
4637 tp->t_rtttime = 0;
4638 tp->snd_nxt = th->th_ack;
4639 /*
4640 * Set snd_cwnd to one segment beyond acknowledged offset
4641 * (tp->snd_una has not yet been updated when this function
4642 * is called)
4643 */
4644 tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una);
4645 tp->t_flags |= TF_ACKNOW;
4646 (void) tcp_output(tp);
4647 tp->snd_cwnd = ocwnd;
4648 if (SEQ_GT(onxt, tp->snd_nxt))
4649 tp->snd_nxt = onxt;
4650 /*
4651 * Partial window deflation. Relies on fact that tp->snd_una
4652 * not updated yet.
4653 */
4654 if (tp->snd_cwnd > th->th_ack - tp->snd_una)
4655 tp->snd_cwnd -= th->th_ack - tp->snd_una;
4656 else
4657 tp->snd_cwnd = 0;
4658 tp->snd_cwnd += tp->t_maxseg;
4659
4660 }
4661
4662 /*
4663 * Drop a random TCP connection that hasn't been serviced yet and
4664 * is eligible for discard. There is a one in qlen chance that
4665 * we will return a null, saying that there are no dropable
4666 * requests. In this case, the protocol specific code should drop
4667 * the new request. This insures fairness.
4668 *
4669 * The listening TCP socket "head" must be locked
4670 */
4671 static int
4672 tcp_dropdropablreq(struct socket *head)
4673 {
4674 struct socket *so, *sonext;
4675 unsigned int i, j, qlen;
4676 static int rnd;
4677 static struct timeval old_runtime;
4678 static unsigned int cur_cnt, old_cnt;
4679 struct timeval tv;
4680 struct inpcb *inp = NULL;
4681 struct tcpcb *tp;
4682
4683 if ((head->so_options & SO_ACCEPTCONN) == 0)
4684 return 0;
4685
4686 so = TAILQ_FIRST(&head->so_incomp);
4687 if (!so)
4688 return 0;
4689
4690 microtime(&tv);
4691 if ((i = (tv.tv_sec - old_runtime.tv_sec)) != 0) {
4692 old_runtime = tv;
4693 old_cnt = cur_cnt / i;
4694 cur_cnt = 0;
4695 }
4696
4697
4698 qlen = head->so_incqlen;
4699 if (++cur_cnt > qlen || old_cnt > qlen) {
4700 rnd = (314159 * rnd + 66329) & 0xffff;
4701 j = ((qlen + 1) * rnd) >> 16;
4702
4703 while (j-- && so)
4704 so = TAILQ_NEXT(so, so_list);
4705 }
4706 /* Find a connection that is not already closing (or being served) */
4707 while (so) {
4708 inp = (struct inpcb *)so->so_pcb;
4709
4710 sonext = TAILQ_NEXT(so, so_list);
4711
4712 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != WNT_STOPUSING) {
4713 /* Avoid the issue of a socket being accepted by one input thread
4714 * and being dropped by another input thread.
4715 * If we can't get a hold on this mutex, then grab the next socket in line.
4716 */
4717 if (lck_mtx_try_lock(&inp->inpcb_mtx)) {
4718 so->so_usecount++;
4719 if ((so->so_usecount == 2) &&
4720 (so->so_state & SS_INCOMP) != 0 &&
4721 (so->so_flags & SOF_INCOMP_INPROGRESS) == 0)
4722 break;
4723 else {/* don't use if being accepted or used in any other way */
4724 in_pcb_checkstate(inp, WNT_RELEASE, 1);
4725 tcp_unlock(so, 1, 0);
4726 }
4727 }
4728 else {
4729 /* do not try to lock the inp in in_pcb_checkstate
4730 * because the lock is already held in some other thread.
4731 * Only drop the inp_wntcnt reference.
4732 */
4733 in_pcb_checkstate(inp, WNT_RELEASE, 1);
4734 }
4735 }
4736 so = sonext;
4737
4738 }
4739 if (!so)
4740 return 0;
4741
4742 /* Makes sure socket is still in the right state to be discarded */
4743
4744 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
4745 tcp_unlock(so, 1, 0);
4746 return 0;
4747 }
4748
4749 if (so->so_usecount != 2 || !(so->so_state & SS_INCOMP)) {
4750 /* do not discard: that socket is being accepted */
4751 tcp_unlock(so, 1, 0);
4752 return 0;
4753 }
4754
4755 TAILQ_REMOVE(&head->so_incomp, so, so_list);
4756 tcp_unlock(head, 0, 0);
4757
4758 lck_mtx_assert(&inp->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
4759 tp = sototcpcb(so);
4760 so->so_flags |= SOF_OVERFLOW;
4761 so->so_head = NULL;
4762
4763 tcp_close(tp);
4764 tp->t_unacksegs = 0;
4765
4766 if (inp->inp_wantcnt > 0 && inp->inp_wantcnt != WNT_STOPUSING) {
4767 /* Some one has a wantcnt on this pcb. Since WNT_ACQUIRE
4768 * doesn't require a lock, it could have happened while
4769 * we are holding the lock. This pcb will have to
4770 * be garbage collected later.
4771 * Release the reference held for so_incomp queue
4772 */
4773 so->so_usecount--;
4774
4775 tcp_unlock(so, 1, 0);
4776 } else {
4777 /* Unlock this socket and leave the reference on. We need to
4778 * acquire the pcbinfo lock in order to fully dispose it off
4779 */
4780 tcp_unlock(so, 0, 0);
4781
4782 lck_rw_lock_exclusive(tcbinfo.mtx);
4783
4784 tcp_lock(so, 0, 0);
4785 /* Release the reference held for so_incomp queue */
4786 so->so_usecount--;
4787
4788 if (so->so_usecount != 1 ||
4789 (inp->inp_wantcnt > 0 && inp->inp_wantcnt != WNT_STOPUSING)) {
4790 /* There is an extra wantcount or usecount that must
4791 * have been added when the socket was unlocked. This
4792 * socket will have to be garbage collected later
4793 */
4794 tcp_unlock(so, 1, 0);
4795 } else {
4796
4797 /* Drop the reference held for this function */
4798 so->so_usecount--;
4799
4800 in_pcbdispose(inp);
4801 }
4802 lck_rw_done(tcbinfo.mtx);
4803 }
4804 tcpstat.tcps_drops++;
4805
4806 tcp_lock(head, 0, 0);
4807 head->so_incqlen--;
4808 head->so_qlen--;
4809 return(1);
4810 }
4811
4812 /* Set background congestion control on a socket */
4813 void
4814 tcp_set_background_cc(struct socket *so)
4815 {
4816 tcp_set_new_cc(so, TCP_CC_ALGO_BACKGROUND_INDEX);
4817 }
4818
4819 /* Set foreground congestion control on a socket */
4820 void
4821 tcp_set_foreground_cc(struct socket *so)
4822 {
4823 tcp_set_new_cc(so, TCP_CC_ALGO_NEWRENO_INDEX);
4824 }
4825
4826 static void
4827 tcp_set_new_cc(struct socket *so, uint16_t cc_index)
4828 {
4829 struct inpcb *inp = sotoinpcb(so);
4830 struct tcpcb *tp = intotcpcb(inp);
4831 uint16_t old_cc_index = 0;
4832 if (tp->tcp_cc_index != cc_index) {
4833
4834 old_cc_index = tp->tcp_cc_index;
4835
4836 if (CC_ALGO(tp)->cleanup != NULL)
4837 CC_ALGO(tp)->cleanup(tp);
4838 tp->tcp_cc_index = cc_index;
4839
4840 /* Decide if the connection is just starting or if
4841 * we have sent some packets on it.
4842 */
4843 if (tp->snd_nxt > tp->iss) {
4844 /* Already sent some packets */
4845 if (CC_ALGO(tp)->switch_to != NULL)
4846 CC_ALGO(tp)->switch_to(tp, old_cc_index);
4847 } else {
4848 if (CC_ALGO(tp)->init != NULL)
4849 CC_ALGO(tp)->init(tp);
4850 }
4851 DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
4852 struct tcpcb *, tp, struct tcphdr *, NULL,
4853 int32_t, TCP_CC_CHANGE_ALGO);
4854 }
4855 }
4856
4857 void
4858 tcp_set_recv_bg(struct socket *so)
4859 {
4860 if (!IS_TCP_RECV_BG(so))
4861 so->so_traffic_mgt_flags |= TRAFFIC_MGT_TCP_RECVBG;
4862 }
4863
4864 void
4865 tcp_clear_recv_bg(struct socket *so)
4866 {
4867 if (IS_TCP_RECV_BG(so))
4868 so->so_traffic_mgt_flags &= ~(TRAFFIC_MGT_TCP_RECVBG);
4869 }
4870
4871 void
4872 inp_fc_unthrottle_tcp(struct inpcb *inp)
4873 {
4874 struct tcpcb *tp = inp->inp_ppcb;
4875 /*
4876 * Back off the slow-start threshold and enter
4877 * congestion avoidance phase
4878 */
4879 if (CC_ALGO(tp)->pre_fr != NULL)
4880 CC_ALGO(tp)->pre_fr(tp);
4881
4882 tp->snd_cwnd = tp->snd_ssthresh;
4883
4884 /*
4885 * Restart counting for ABC as we changed the
4886 * congestion window just now.
4887 */
4888 tp->t_bytes_acked = 0;
4889
4890 /* Reset retransmit shift as we know that the reason
4891 * for delay in sending a packet is due to flow
4892 * control on the outgoing interface. There is no need
4893 * to backoff retransmit timer.
4894 */
4895 tp->t_rxtshift = 0;
4896
4897 /*
4898 * Start the output stream again. Since we are
4899 * not retransmitting data, do not reset the
4900 * retransmit timer or rtt calculation.
4901 */
4902 tcp_output(tp);
4903 }
4904
4905 static int
4906 tcp_getstat SYSCTL_HANDLER_ARGS
4907 {
4908 #pragma unused(oidp, arg1, arg2)
4909
4910 int error;
4911
4912 if (req->oldptr == 0) {
4913 req->oldlen= (size_t)sizeof(struct tcpstat);
4914 }
4915
4916 error = SYSCTL_OUT(req, &tcpstat, MIN(sizeof (tcpstat), req->oldlen));
4917
4918 return (error);
4919
4920 }
4921
4922 SYSCTL_PROC(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
4923 tcp_getstat, "S,tcpstat", "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
4924
4925 static int
4926 sysctl_rexmtthresh SYSCTL_HANDLER_ARGS
4927 {
4928 #pragma unused(arg1, arg2)
4929
4930 int error, val = tcprexmtthresh;
4931
4932 error = sysctl_handle_int(oidp, &val, 0, req);
4933 if (error || !req->newptr)
4934 return (error);
4935
4936 /*
4937 * Constrain the number of duplicate ACKs
4938 * to consider for TCP fast retransmit
4939 * to either 2 or 3
4940 */
4941
4942 if (val < 2 || val > 3)
4943 return (EINVAL);
4944
4945 tcprexmtthresh = val;
4946
4947 return (0);
4948 }
4949
4950 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmt_thresh, CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
4951 &tcprexmtthresh, 0, &sysctl_rexmtthresh, "I", "Duplicate ACK Threshold for Fast Retransmit");