]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/tcp_subr.c
xnu-6153.121.1.tar.gz
[apple/xnu.git] / bsd / netinet / tcp_subr.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
61 */
62 /*
63 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
64 * support for mandatory and extensible security protections. This notice
65 * is included in support of clause 2.2 (b) of the Apple Public License,
66 * Version 2.0.
67 */
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/callout.h>
72 #include <sys/kernel.h>
73 #include <sys/sysctl.h>
74 #include <sys/malloc.h>
75 #include <sys/mbuf.h>
76 #include <sys/domain.h>
77 #include <sys/proc.h>
78 #include <sys/kauth.h>
79 #include <sys/socket.h>
80 #include <sys/socketvar.h>
81 #include <sys/protosw.h>
82 #include <sys/random.h>
83 #include <sys/syslog.h>
84 #include <sys/mcache.h>
85 #include <kern/locks.h>
86 #include <kern/zalloc.h>
87
88 #include <dev/random/randomdev.h>
89
90 #include <net/route.h>
91 #include <net/if.h>
92 #include <net/content_filter.h>
93 #include <net/ntstat.h>
94 #include <net/multi_layer_pkt_log.h>
95
96 #define tcp_minmssoverload fring
97 #define _IP_VHL
98 #include <netinet/in.h>
99 #include <netinet/in_systm.h>
100 #include <netinet/ip.h>
101 #include <netinet/ip_icmp.h>
102 #if INET6
103 #include <netinet/ip6.h>
104 #include <netinet/icmp6.h>
105 #endif
106 #include <netinet/in_pcb.h>
107 #if INET6
108 #include <netinet6/in6_pcb.h>
109 #endif
110 #include <netinet/in_var.h>
111 #include <netinet/ip_var.h>
112 #include <netinet/icmp_var.h>
113 #if INET6
114 #include <netinet6/ip6_var.h>
115 #endif
116 #include <netinet/mptcp_var.h>
117 #include <netinet/tcp.h>
118 #include <netinet/tcp_fsm.h>
119 #include <netinet/tcp_seq.h>
120 #include <netinet/tcp_timer.h>
121 #include <netinet/tcp_var.h>
122 #include <netinet/tcp_cc.h>
123 #include <netinet/tcp_cache.h>
124 #include <kern/thread_call.h>
125
126 #if INET6
127 #include <netinet6/tcp6_var.h>
128 #endif
129 #include <netinet/tcpip.h>
130 #if TCPDEBUG
131 #include <netinet/tcp_debug.h>
132 #endif
133 #include <netinet/tcp_log.h>
134
135 #include <netinet6/ip6protosw.h>
136
137 #if IPSEC
138 #include <netinet6/ipsec.h>
139 #if INET6
140 #include <netinet6/ipsec6.h>
141 #endif
142 #endif /* IPSEC */
143
144 #if NECP
145 #include <net/necp.h>
146 #endif /* NECP */
147
148 #undef tcp_minmssoverload
149
150 #if CONFIG_MACF_NET
151 #include <security/mac_framework.h>
152 #endif /* MAC_NET */
153
154 #include <corecrypto/ccaes.h>
155 #include <libkern/crypto/aes.h>
156 #include <libkern/crypto/md5.h>
157 #include <sys/kdebug.h>
158 #include <mach/sdt.h>
159 #include <atm/atm_internal.h>
160 #include <pexpert/pexpert.h>
161
162 #include <netinet/lro_ext.h>
163
164 #define DBG_FNC_TCP_CLOSE NETDBG_CODE(DBG_NETTCP, ((5 << 8) | 2))
165
166 static tcp_cc tcp_ccgen;
167 extern int tcp_lq_overflow;
168
169 extern struct tcptimerlist tcp_timer_list;
170 extern struct tcptailq tcp_tw_tailq;
171
172 SYSCTL_SKMEM_TCP_INT(TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW | CTLFLAG_LOCKED,
173 int, tcp_mssdflt, TCP_MSS, "Default TCP Maximum Segment Size");
174
175 #if INET6
176 SYSCTL_SKMEM_TCP_INT(TCPCTL_V6MSSDFLT, v6mssdflt,
177 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_v6mssdflt, TCP6_MSS,
178 "Default TCP Maximum Segment Size for IPv6");
179 #endif
180
181 int tcp_sysctl_fastopenkey(struct sysctl_oid *, void *, int,
182 struct sysctl_req *);
183 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fastopen_key, CTLTYPE_STRING | CTLFLAG_WR,
184 0, 0, tcp_sysctl_fastopenkey, "S", "TCP Fastopen key");
185
186 /* Current count of half-open TFO connections */
187 int tcp_tfo_halfcnt = 0;
188
189 /* Maximum of half-open TFO connection backlog */
190 SYSCTL_SKMEM_TCP_INT(OID_AUTO, fastopen_backlog,
191 CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_tfo_backlog, 10,
192 "Backlog queue for half-open TFO connections");
193
194 SYSCTL_SKMEM_TCP_INT(OID_AUTO, fastopen, CTLFLAG_RW | CTLFLAG_LOCKED,
195 int, tcp_fastopen, TCP_FASTOPEN_CLIENT | TCP_FASTOPEN_SERVER,
196 "Enable TCP Fastopen (RFC 7413)");
197
198 SYSCTL_SKMEM_TCP_INT(OID_AUTO, now_init, CTLFLAG_RD | CTLFLAG_LOCKED,
199 uint32_t, tcp_now_init, 0, "Initial tcp now value");
200
201 SYSCTL_SKMEM_TCP_INT(OID_AUTO, microuptime_init, CTLFLAG_RD | CTLFLAG_LOCKED,
202 uint32_t, tcp_microuptime_init, 0, "Initial tcp uptime value in micro seconds");
203
204 /*
205 * Minimum MSS we accept and use. This prevents DoS attacks where
206 * we are forced to a ridiculous low MSS like 20 and send hundreds
207 * of packets instead of one. The effect scales with the available
208 * bandwidth and quickly saturates the CPU and network interface
209 * with packet generation and sending. Set to zero to disable MINMSS
210 * checking. This setting prevents us from sending too small packets.
211 */
212 SYSCTL_SKMEM_TCP_INT(OID_AUTO, minmss, CTLFLAG_RW | CTLFLAG_LOCKED,
213 int, tcp_minmss, TCP_MINMSS, "Minmum TCP Maximum Segment Size");
214 int tcp_do_rfc1323 = 1;
215 #if (DEVELOPMENT || DEBUG)
216 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323,
217 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_do_rfc1323, 0,
218 "Enable rfc1323 (high performance TCP) extensions");
219 #endif /* (DEVELOPMENT || DEBUG) */
220
221 // Not used
222 static int tcp_do_rfc1644 = 0;
223 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644,
224 CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_do_rfc1644, 0,
225 "Enable rfc1644 (TTCP) extensions");
226
227 SYSCTL_SKMEM_TCP_INT(OID_AUTO, do_tcpdrain, CTLFLAG_RW | CTLFLAG_LOCKED,
228 static int, do_tcpdrain, 0,
229 "Enable tcp_drain routine for extra help when low on mbufs");
230
231 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
232 &tcbinfo.ipi_count, 0, "Number of active PCBs");
233
234 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tw_pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
235 &tcbinfo.ipi_twcount, 0, "Number of pcbs in time-wait state");
236
237 SYSCTL_SKMEM_TCP_INT(OID_AUTO, icmp_may_rst, CTLFLAG_RW | CTLFLAG_LOCKED,
238 static int, icmp_may_rst, 1,
239 "Certain ICMP unreachable messages may abort connections in SYN_SENT");
240
241 static int tcp_strict_rfc1948 = 0;
242 static int tcp_isn_reseed_interval = 0;
243 #if (DEVELOPMENT || DEBUG)
244 SYSCTL_INT(_net_inet_tcp, OID_AUTO, strict_rfc1948, CTLFLAG_RW | CTLFLAG_LOCKED,
245 &tcp_strict_rfc1948, 0, "Determines if RFC1948 is followed exactly");
246
247 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval,
248 CTLFLAG_RW | CTLFLAG_LOCKED,
249 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
250 #endif /* (DEVELOPMENT || DEBUG) */
251
252 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rtt_min, CTLFLAG_RW | CTLFLAG_LOCKED,
253 int, tcp_TCPTV_MIN, 100, "min rtt value allowed");
254
255 SYSCTL_SKMEM_TCP_INT(OID_AUTO, rexmt_slop, CTLFLAG_RW,
256 int, tcp_rexmt_slop, TCPTV_REXMTSLOP, "Slop added to retransmit timeout");
257
258 SYSCTL_SKMEM_TCP_INT(OID_AUTO, randomize_ports, CTLFLAG_RW | CTLFLAG_LOCKED,
259 __private_extern__ int, tcp_use_randomport, 0,
260 "Randomize TCP port numbers");
261
262 SYSCTL_SKMEM_TCP_INT(OID_AUTO, win_scale_factor, CTLFLAG_RW | CTLFLAG_LOCKED,
263 __private_extern__ int, tcp_win_scale, 3, "Window scaling factor");
264
265 #if (DEVELOPMENT || DEBUG)
266 SYSCTL_SKMEM_TCP_INT(OID_AUTO, init_rtt_from_cache,
267 CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_init_rtt_from_cache, 1,
268 "Initalize RTT from route cache");
269 #else
270 SYSCTL_SKMEM_TCP_INT(OID_AUTO, init_rtt_from_cache,
271 CTLFLAG_RD | CTLFLAG_LOCKED, static int, tcp_init_rtt_from_cache, 1,
272 "Initalize RTT from route cache");
273 #endif /* (DEVELOPMENT || DEBUG) */
274
275 static void tcp_cleartaocache(void);
276 static void tcp_notify(struct inpcb *, int);
277
278 struct zone *sack_hole_zone;
279 struct zone *tcp_reass_zone;
280 struct zone *tcp_bwmeas_zone;
281 struct zone *tcp_rxt_seg_zone;
282
283 extern int slowlink_wsize; /* window correction for slow links */
284 extern int path_mtu_discovery;
285
286 static void tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb);
287
288 #define TCP_BWMEAS_BURST_MINSIZE 6
289 #define TCP_BWMEAS_BURST_MAXSIZE 25
290
291 static uint32_t bwmeas_elm_size;
292
293 /*
294 * Target size of TCP PCB hash tables. Must be a power of two.
295 *
296 * Note that this can be overridden by the kernel environment
297 * variable net.inet.tcp.tcbhashsize
298 */
299 #ifndef TCBHASHSIZE
300 #define TCBHASHSIZE CONFIG_TCBHASHSIZE
301 #endif
302
303 __private_extern__ int tcp_tcbhashsize = TCBHASHSIZE;
304 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD | CTLFLAG_LOCKED,
305 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
306
307 /*
308 * This is the actual shape of what we allocate using the zone
309 * allocator. Doing it this way allows us to protect both structures
310 * using the same generation count, and also eliminates the overhead
311 * of allocating tcpcbs separately. By hiding the structure here,
312 * we avoid changing most of the rest of the code (although it needs
313 * to be changed, eventually, for greater efficiency).
314 */
315 #define ALIGNMENT 32
316 struct inp_tp {
317 struct inpcb inp;
318 struct tcpcb tcb __attribute__((aligned(ALIGNMENT)));
319 };
320 #undef ALIGNMENT
321
322 int get_inpcb_str_size(void);
323 int get_tcp_str_size(void);
324
325 os_log_t tcp_mpkl_log_object = NULL;
326
327 static void tcpcb_to_otcpcb(struct tcpcb *, struct otcpcb *);
328
329 static lck_attr_t *tcp_uptime_mtx_attr = NULL;
330 static lck_grp_t *tcp_uptime_mtx_grp = NULL;
331 static lck_grp_attr_t *tcp_uptime_mtx_grp_attr = NULL;
332 int tcp_notsent_lowat_check(struct socket *so);
333 static void tcp_flow_lim_stats(struct ifnet_stats_per_flow *ifs,
334 struct if_lim_perf_stat *stat);
335 static void tcp_flow_ecn_perf_stats(struct ifnet_stats_per_flow *ifs,
336 struct if_tcp_ecn_perf_stat *stat);
337
338 static aes_encrypt_ctx tfo_ctx; /* Crypto-context for TFO */
339
340 void
341 tcp_tfo_gen_cookie(struct inpcb *inp, u_char *out, size_t blk_size)
342 {
343 u_char in[CCAES_BLOCK_SIZE];
344 #if INET6
345 int isipv6 = inp->inp_vflag & INP_IPV6;
346 #endif
347
348 VERIFY(blk_size == CCAES_BLOCK_SIZE);
349
350 bzero(&in[0], CCAES_BLOCK_SIZE);
351 bzero(&out[0], CCAES_BLOCK_SIZE);
352
353 #if INET6
354 if (isipv6) {
355 memcpy(in, &inp->in6p_faddr, sizeof(struct in6_addr));
356 } else
357 #endif /* INET6 */
358 memcpy(in, &inp->inp_faddr, sizeof(struct in_addr));
359
360 aes_encrypt_cbc(in, NULL, 1, out, &tfo_ctx);
361 }
362
363 __private_extern__ int
364 tcp_sysctl_fastopenkey(__unused struct sysctl_oid *oidp, __unused void *arg1,
365 __unused int arg2, struct sysctl_req *req)
366 {
367 int error = 0;
368 /*
369 * TFO-key is expressed as a string in hex format
370 * (+1 to account for \0 char)
371 */
372 char keystring[TCP_FASTOPEN_KEYLEN * 2 + 1];
373 u_int32_t key[TCP_FASTOPEN_KEYLEN / sizeof(u_int32_t)];
374 int i;
375
376 /* -1, because newlen is len without the terminating \0 character */
377 if (req->newlen != (sizeof(keystring) - 1)) {
378 error = EINVAL;
379 goto exit;
380 }
381
382 /*
383 * sysctl_io_string copies keystring into the oldptr of the sysctl_req.
384 * Make sure everything is zero, to avoid putting garbage in there or
385 * leaking the stack.
386 */
387 bzero(keystring, sizeof(keystring));
388
389 error = sysctl_io_string(req, keystring, sizeof(keystring), 0, NULL);
390 if (error) {
391 goto exit;
392 }
393
394 for (i = 0; i < (TCP_FASTOPEN_KEYLEN / sizeof(u_int32_t)); i++) {
395 /*
396 * We jump over the keystring in 8-character (4 byte in hex)
397 * steps
398 */
399 if (sscanf(&keystring[i * 8], "%8x", &key[i]) != 1) {
400 error = EINVAL;
401 goto exit;
402 }
403 }
404
405 aes_encrypt_key128((u_char *)key, &tfo_ctx);
406
407 exit:
408 return error;
409 }
410
411 int
412 get_inpcb_str_size(void)
413 {
414 return sizeof(struct inpcb);
415 }
416
417 int
418 get_tcp_str_size(void)
419 {
420 return sizeof(struct tcpcb);
421 }
422
423 static int scale_to_powerof2(int size);
424
425 /*
426 * This helper routine returns one of the following scaled value of size:
427 * 1. Rounded down power of two value of size if the size value passed as
428 * argument is not a power of two and the rounded up value overflows.
429 * OR
430 * 2. Rounded up power of two value of size if the size value passed as
431 * argument is not a power of two and the rounded up value does not overflow
432 * OR
433 * 3. Same value as argument size if it is already a power of two.
434 */
435 static int
436 scale_to_powerof2(int size)
437 {
438 /* Handle special case of size = 0 */
439 int ret = size ? size : 1;
440
441 if (!powerof2(ret)) {
442 while (!powerof2(size)) {
443 /*
444 * Clear out least significant
445 * set bit till size is left with
446 * its highest set bit at which point
447 * it is rounded down power of two.
448 */
449 size = size & (size - 1);
450 }
451
452 /* Check for overflow when rounding up */
453 if (0 == (size << 1)) {
454 ret = size;
455 } else {
456 ret = size << 1;
457 }
458 }
459
460 return ret;
461 }
462
463 static void
464 tcp_tfo_init(void)
465 {
466 u_char key[TCP_FASTOPEN_KEYLEN];
467
468 read_frandom(key, sizeof(key));
469 aes_encrypt_key128(key, &tfo_ctx);
470 }
471
472 /*
473 * Tcp initialization
474 */
475 void
476 tcp_init(struct protosw *pp, struct domain *dp)
477 {
478 #pragma unused(dp)
479 static int tcp_initialized = 0;
480 vm_size_t str_size;
481 struct inpcbinfo *pcbinfo;
482 uint32_t logging_config;
483
484 VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
485
486 if (tcp_initialized) {
487 return;
488 }
489 tcp_initialized = 1;
490
491 tcp_ccgen = 1;
492 tcp_cleartaocache();
493
494 tcp_keepinit = TCPTV_KEEP_INIT;
495 tcp_keepidle = TCPTV_KEEP_IDLE;
496 tcp_keepintvl = TCPTV_KEEPINTVL;
497 tcp_keepcnt = TCPTV_KEEPCNT;
498 tcp_maxpersistidle = TCPTV_KEEP_IDLE;
499 tcp_msl = TCPTV_MSL;
500
501 microuptime(&tcp_uptime);
502 read_frandom(&tcp_now, sizeof(tcp_now));
503
504 /* Starts tcp internal clock at a random value */
505 tcp_now = tcp_now & 0x3fffffff;
506
507 /* expose initial uptime/now via systcl for utcp to keep time sync */
508 tcp_now_init = tcp_now;
509 tcp_microuptime_init =
510 tcp_uptime.tv_usec + (tcp_uptime.tv_sec * USEC_PER_SEC);
511 SYSCTL_SKMEM_UPDATE_FIELD(tcp.microuptime_init, tcp_microuptime_init);
512 SYSCTL_SKMEM_UPDATE_FIELD(tcp.now_init, tcp_now_init);
513
514 tcp_tfo_init();
515
516 LIST_INIT(&tcb);
517 tcbinfo.ipi_listhead = &tcb;
518
519 pcbinfo = &tcbinfo;
520 /*
521 * allocate lock group attribute and group for tcp pcb mutexes
522 */
523 pcbinfo->ipi_lock_grp_attr = lck_grp_attr_alloc_init();
524 pcbinfo->ipi_lock_grp = lck_grp_alloc_init("tcppcb",
525 pcbinfo->ipi_lock_grp_attr);
526
527 /*
528 * allocate the lock attribute for tcp pcb mutexes
529 */
530 pcbinfo->ipi_lock_attr = lck_attr_alloc_init();
531
532 if ((pcbinfo->ipi_lock = lck_rw_alloc_init(pcbinfo->ipi_lock_grp,
533 pcbinfo->ipi_lock_attr)) == NULL) {
534 panic("%s: unable to allocate PCB lock\n", __func__);
535 /* NOTREACHED */
536 }
537
538 if (tcp_tcbhashsize == 0) {
539 /* Set to default */
540 tcp_tcbhashsize = 512;
541 }
542
543 if (!powerof2(tcp_tcbhashsize)) {
544 int old_hash_size = tcp_tcbhashsize;
545 tcp_tcbhashsize = scale_to_powerof2(tcp_tcbhashsize);
546 /* Lower limit of 16 */
547 if (tcp_tcbhashsize < 16) {
548 tcp_tcbhashsize = 16;
549 }
550 printf("WARNING: TCB hash size not a power of 2, "
551 "scaled from %d to %d.\n",
552 old_hash_size,
553 tcp_tcbhashsize);
554 }
555
556 tcbinfo.ipi_hashbase = hashinit(tcp_tcbhashsize, M_PCB,
557 &tcbinfo.ipi_hashmask);
558 tcbinfo.ipi_porthashbase = hashinit(tcp_tcbhashsize, M_PCB,
559 &tcbinfo.ipi_porthashmask);
560 str_size = P2ROUNDUP(sizeof(struct inp_tp), sizeof(u_int64_t));
561 tcbinfo.ipi_zone = zinit(str_size, 120000 * str_size, 8192, "tcpcb");
562 zone_change(tcbinfo.ipi_zone, Z_CALLERACCT, FALSE);
563 zone_change(tcbinfo.ipi_zone, Z_EXPAND, TRUE);
564
565 tcbinfo.ipi_gc = tcp_gc;
566 tcbinfo.ipi_timer = tcp_itimer;
567 in_pcbinfo_attach(&tcbinfo);
568
569 str_size = P2ROUNDUP(sizeof(struct sackhole), sizeof(u_int64_t));
570 sack_hole_zone = zinit(str_size, 120000 * str_size, 8192,
571 "sack_hole zone");
572 zone_change(sack_hole_zone, Z_CALLERACCT, FALSE);
573 zone_change(sack_hole_zone, Z_EXPAND, TRUE);
574
575 str_size = P2ROUNDUP(sizeof(struct tseg_qent), sizeof(u_int64_t));
576 tcp_reass_zone = zinit(str_size, (nmbclusters >> 4) * str_size,
577 0, "tcp_reass_zone");
578 if (tcp_reass_zone == NULL) {
579 panic("%s: failed allocating tcp_reass_zone", __func__);
580 /* NOTREACHED */
581 }
582 zone_change(tcp_reass_zone, Z_CALLERACCT, FALSE);
583 zone_change(tcp_reass_zone, Z_EXPAND, TRUE);
584
585 bwmeas_elm_size = P2ROUNDUP(sizeof(struct bwmeas), sizeof(u_int64_t));
586 tcp_bwmeas_zone = zinit(bwmeas_elm_size, (100 * bwmeas_elm_size), 0,
587 "tcp_bwmeas_zone");
588 if (tcp_bwmeas_zone == NULL) {
589 panic("%s: failed allocating tcp_bwmeas_zone", __func__);
590 /* NOTREACHED */
591 }
592 zone_change(tcp_bwmeas_zone, Z_CALLERACCT, FALSE);
593 zone_change(tcp_bwmeas_zone, Z_EXPAND, TRUE);
594
595 str_size = P2ROUNDUP(sizeof(struct tcp_ccstate), sizeof(u_int64_t));
596 tcp_cc_zone = zinit(str_size, 20000 * str_size, 0, "tcp_cc_zone");
597 zone_change(tcp_cc_zone, Z_CALLERACCT, FALSE);
598 zone_change(tcp_cc_zone, Z_EXPAND, TRUE);
599
600 str_size = P2ROUNDUP(sizeof(struct tcp_rxt_seg), sizeof(u_int64_t));
601 tcp_rxt_seg_zone = zinit(str_size, 10000 * str_size, 0,
602 "tcp_rxt_seg_zone");
603 zone_change(tcp_rxt_seg_zone, Z_CALLERACCT, FALSE);
604 zone_change(tcp_rxt_seg_zone, Z_EXPAND, TRUE);
605
606 #if INET6
607 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
608 #else /* INET6 */
609 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
610 #endif /* INET6 */
611 if (max_protohdr < TCP_MINPROTOHDR) {
612 _max_protohdr = TCP_MINPROTOHDR;
613 _max_protohdr = max_protohdr; /* round it up */
614 }
615 if (max_linkhdr + max_protohdr > MCLBYTES) {
616 panic("tcp_init");
617 }
618 #undef TCP_MINPROTOHDR
619
620 /* Initialize time wait and timer lists */
621 TAILQ_INIT(&tcp_tw_tailq);
622
623 bzero(&tcp_timer_list, sizeof(tcp_timer_list));
624 LIST_INIT(&tcp_timer_list.lhead);
625 /*
626 * allocate lock group attribute, group and attribute for
627 * the tcp timer list
628 */
629 tcp_timer_list.mtx_grp_attr = lck_grp_attr_alloc_init();
630 tcp_timer_list.mtx_grp = lck_grp_alloc_init("tcptimerlist",
631 tcp_timer_list.mtx_grp_attr);
632 tcp_timer_list.mtx_attr = lck_attr_alloc_init();
633 if ((tcp_timer_list.mtx = lck_mtx_alloc_init(tcp_timer_list.mtx_grp,
634 tcp_timer_list.mtx_attr)) == NULL) {
635 panic("failed to allocate memory for tcp_timer_list.mtx\n");
636 }
637 ;
638 tcp_timer_list.call = thread_call_allocate(tcp_run_timerlist, NULL);
639 if (tcp_timer_list.call == NULL) {
640 panic("failed to allocate call entry 1 in tcp_init\n");
641 }
642
643 /*
644 * allocate lock group attribute, group and attribute for
645 * tcp_uptime_lock
646 */
647 tcp_uptime_mtx_grp_attr = lck_grp_attr_alloc_init();
648 tcp_uptime_mtx_grp = lck_grp_alloc_init("tcpuptime",
649 tcp_uptime_mtx_grp_attr);
650 tcp_uptime_mtx_attr = lck_attr_alloc_init();
651 tcp_uptime_lock = lck_spin_alloc_init(tcp_uptime_mtx_grp,
652 tcp_uptime_mtx_attr);
653
654 /* Initialize TCP LRO data structures */
655 tcp_lro_init();
656
657 /* Initialize TCP Cache */
658 tcp_cache_init();
659
660 tcp_mpkl_log_object = MPKL_CREATE_LOGOBJECT("com.apple.xnu.tcp");
661 if (tcp_mpkl_log_object == NULL) {
662 panic("MPKL_CREATE_LOGOBJECT failed");
663 }
664
665 logging_config = atm_get_diagnostic_config();
666 if (logging_config & 0x80000000) {
667 tcp_log_privacy = 1;
668 }
669
670 PE_parse_boot_argn("tcp_log", &tcp_log_enable_flags, sizeof(tcp_log_enable_flags));
671
672 /*
673 * If more than 60 MB of mbuf pool is available, increase the
674 * maximum allowed receive and send socket buffer size.
675 */
676 if (nmbclusters > 30720) {
677 tcp_autorcvbuf_max = 2 * 1024 * 1024;
678 tcp_autosndbuf_max = 2 * 1024 * 1024;
679
680 SYSCTL_SKMEM_UPDATE_FIELD(tcp.autorcvbufmax, tcp_autorcvbuf_max);
681 SYSCTL_SKMEM_UPDATE_FIELD(tcp.autosndbufmax, tcp_autosndbuf_max);
682 }
683 }
684
685 /*
686 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
687 * tcp_template used to store this data in mbufs, but we now recopy it out
688 * of the tcpcb each time to conserve mbufs.
689 */
690 void
691 tcp_fillheaders(struct tcpcb *tp, void *ip_ptr, void *tcp_ptr)
692 {
693 struct inpcb *inp = tp->t_inpcb;
694 struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr;
695
696 #if INET6
697 if ((inp->inp_vflag & INP_IPV6) != 0) {
698 struct ip6_hdr *ip6;
699
700 ip6 = (struct ip6_hdr *)ip_ptr;
701 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
702 (inp->inp_flow & IPV6_FLOWINFO_MASK);
703 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
704 (IPV6_VERSION & IPV6_VERSION_MASK);
705 ip6->ip6_plen = htons(sizeof(struct tcphdr));
706 ip6->ip6_nxt = IPPROTO_TCP;
707 ip6->ip6_hlim = 0;
708 ip6->ip6_src = inp->in6p_laddr;
709 ip6->ip6_dst = inp->in6p_faddr;
710 tcp_hdr->th_sum = in6_pseudo(&inp->in6p_laddr, &inp->in6p_faddr,
711 htonl(sizeof(struct tcphdr) + IPPROTO_TCP));
712 } else
713 #endif
714 {
715 struct ip *ip = (struct ip *) ip_ptr;
716
717 ip->ip_vhl = IP_VHL_BORING;
718 ip->ip_tos = 0;
719 ip->ip_len = 0;
720 ip->ip_id = 0;
721 ip->ip_off = 0;
722 ip->ip_ttl = 0;
723 ip->ip_sum = 0;
724 ip->ip_p = IPPROTO_TCP;
725 ip->ip_src = inp->inp_laddr;
726 ip->ip_dst = inp->inp_faddr;
727 tcp_hdr->th_sum =
728 in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
729 htons(sizeof(struct tcphdr) + IPPROTO_TCP));
730 }
731
732 tcp_hdr->th_sport = inp->inp_lport;
733 tcp_hdr->th_dport = inp->inp_fport;
734 tcp_hdr->th_seq = 0;
735 tcp_hdr->th_ack = 0;
736 tcp_hdr->th_x2 = 0;
737 tcp_hdr->th_off = 5;
738 tcp_hdr->th_flags = 0;
739 tcp_hdr->th_win = 0;
740 tcp_hdr->th_urp = 0;
741 }
742
743 /*
744 * Create template to be used to send tcp packets on a connection.
745 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
746 * use for this function is in keepalives, which use tcp_respond.
747 */
748 struct tcptemp *
749 tcp_maketemplate(struct tcpcb *tp)
750 {
751 struct mbuf *m;
752 struct tcptemp *n;
753
754 m = m_get(M_DONTWAIT, MT_HEADER);
755 if (m == NULL) {
756 return 0;
757 }
758 m->m_len = sizeof(struct tcptemp);
759 n = mtod(m, struct tcptemp *);
760
761 tcp_fillheaders(tp, (void *)&n->tt_ipgen, (void *)&n->tt_t);
762 return n;
763 }
764
765 /*
766 * Send a single message to the TCP at address specified by
767 * the given TCP/IP header. If m == 0, then we make a copy
768 * of the tcpiphdr at ti and send directly to the addressed host.
769 * This is used to force keep alive messages out using the TCP
770 * template for a connection. If flags are given then we send
771 * a message back to the TCP which originated the * segment ti,
772 * and discard the mbuf containing it and any other attached mbufs.
773 *
774 * In any case the ack and sequence number of the transmitted
775 * segment are as specified by the parameters.
776 *
777 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
778 */
779 void
780 tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
781 tcp_seq ack, tcp_seq seq, int flags, struct tcp_respond_args *tra)
782 {
783 int tlen;
784 int win = 0;
785 struct route *ro = 0;
786 struct route sro;
787 struct ip *ip;
788 struct tcphdr *nth;
789 #if INET6
790 struct route_in6 *ro6 = 0;
791 struct route_in6 sro6;
792 struct ip6_hdr *ip6;
793 int isipv6;
794 #endif /* INET6 */
795 struct ifnet *outif;
796 int sotc = SO_TC_UNSPEC;
797
798 #if INET6
799 isipv6 = IP_VHL_V(((struct ip *)ipgen)->ip_vhl) == 6;
800 ip6 = ipgen;
801 #endif /* INET6 */
802 ip = ipgen;
803
804 if (tp) {
805 if (!(flags & TH_RST)) {
806 win = tcp_sbspace(tp);
807 if (win > (int32_t)TCP_MAXWIN << tp->rcv_scale) {
808 win = (int32_t)TCP_MAXWIN << tp->rcv_scale;
809 }
810 }
811 #if INET6
812 if (isipv6) {
813 ro6 = &tp->t_inpcb->in6p_route;
814 } else
815 #endif /* INET6 */
816 ro = &tp->t_inpcb->inp_route;
817 } else {
818 #if INET6
819 if (isipv6) {
820 ro6 = &sro6;
821 bzero(ro6, sizeof(*ro6));
822 } else
823 #endif /* INET6 */
824 {
825 ro = &sro;
826 bzero(ro, sizeof(*ro));
827 }
828 }
829 if (m == 0) {
830 m = m_gethdr(M_DONTWAIT, MT_HEADER); /* MAC-OK */
831 if (m == NULL) {
832 return;
833 }
834 tlen = 0;
835 m->m_data += max_linkhdr;
836 #if INET6
837 if (isipv6) {
838 VERIFY((MHLEN - max_linkhdr) >=
839 (sizeof(*ip6) + sizeof(*nth)));
840 bcopy((caddr_t)ip6, mtod(m, caddr_t),
841 sizeof(struct ip6_hdr));
842 ip6 = mtod(m, struct ip6_hdr *);
843 nth = (struct tcphdr *)(void *)(ip6 + 1);
844 } else
845 #endif /* INET6 */
846 {
847 VERIFY((MHLEN - max_linkhdr) >=
848 (sizeof(*ip) + sizeof(*nth)));
849 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
850 ip = mtod(m, struct ip *);
851 nth = (struct tcphdr *)(void *)(ip + 1);
852 }
853 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
854 #if MPTCP
855 if ((tp) && (tp->t_mpflags & TMPF_RESET)) {
856 flags = (TH_RST | TH_ACK);
857 } else
858 #endif
859 flags = TH_ACK;
860 } else {
861 m_freem(m->m_next);
862 m->m_next = 0;
863 m->m_data = (caddr_t)ipgen;
864 /* m_len is set later */
865 tlen = 0;
866 #define xchg(a, b, type) { type t; t = a; a = b; b = t; }
867 #if INET6
868 if (isipv6) {
869 /* Expect 32-bit aligned IP on strict-align platforms */
870 IP6_HDR_STRICT_ALIGNMENT_CHECK(ip6);
871 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
872 nth = (struct tcphdr *)(void *)(ip6 + 1);
873 } else
874 #endif /* INET6 */
875 {
876 /* Expect 32-bit aligned IP on strict-align platforms */
877 IP_HDR_STRICT_ALIGNMENT_CHECK(ip);
878 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
879 nth = (struct tcphdr *)(void *)(ip + 1);
880 }
881 if (th != nth) {
882 /*
883 * this is usually a case when an extension header
884 * exists between the IPv6 header and the
885 * TCP header.
886 */
887 nth->th_sport = th->th_sport;
888 nth->th_dport = th->th_dport;
889 }
890 xchg(nth->th_dport, nth->th_sport, n_short);
891 #undef xchg
892 }
893 #if INET6
894 if (isipv6) {
895 ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) +
896 tlen));
897 tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
898 } else
899 #endif
900 {
901 tlen += sizeof(struct tcpiphdr);
902 ip->ip_len = tlen;
903 ip->ip_ttl = ip_defttl;
904 }
905 m->m_len = tlen;
906 m->m_pkthdr.len = tlen;
907 m->m_pkthdr.rcvif = 0;
908 if (tra->keep_alive) {
909 m->m_pkthdr.pkt_flags |= PKTF_KEEPALIVE;
910 }
911 #if CONFIG_MACF_NET
912 if (tp != NULL && tp->t_inpcb != NULL) {
913 /*
914 * Packet is associated with a socket, so allow the
915 * label of the response to reflect the socket label.
916 */
917 mac_mbuf_label_associate_inpcb(tp->t_inpcb, m);
918 } else {
919 /*
920 * Packet is not associated with a socket, so possibly
921 * update the label in place.
922 */
923 mac_netinet_tcp_reply(m);
924 }
925 #endif
926
927 nth->th_seq = htonl(seq);
928 nth->th_ack = htonl(ack);
929 nth->th_x2 = 0;
930 nth->th_off = sizeof(struct tcphdr) >> 2;
931 nth->th_flags = flags;
932 if (tp) {
933 nth->th_win = htons((u_short) (win >> tp->rcv_scale));
934 } else {
935 nth->th_win = htons((u_short)win);
936 }
937 nth->th_urp = 0;
938 #if INET6
939 if (isipv6) {
940 nth->th_sum = 0;
941 nth->th_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst,
942 htonl((tlen - sizeof(struct ip6_hdr)) + IPPROTO_TCP));
943 m->m_pkthdr.csum_flags = CSUM_TCPIPV6;
944 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
945 ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL,
946 ro6 && ro6->ro_rt ? ro6->ro_rt->rt_ifp : NULL);
947 } else
948 #endif /* INET6 */
949 {
950 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
951 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
952 m->m_pkthdr.csum_flags = CSUM_TCP;
953 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
954 }
955 #if TCPDEBUG
956 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) {
957 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
958 }
959 #endif
960
961 #if NECP
962 necp_mark_packet_from_socket(m, tp ? tp->t_inpcb : NULL, 0, 0, 0);
963 #endif /* NECP */
964
965 #if IPSEC
966 if (tp != NULL && tp->t_inpcb->inp_sp != NULL &&
967 ipsec_setsocket(m, tp ? tp->t_inpcb->inp_socket : NULL) != 0) {
968 m_freem(m);
969 return;
970 }
971 #endif
972
973 if (tp != NULL) {
974 u_int32_t svc_flags = 0;
975 if (isipv6) {
976 svc_flags |= PKT_SCF_IPV6;
977 }
978 sotc = tp->t_inpcb->inp_socket->so_traffic_class;
979 set_packet_service_class(m, tp->t_inpcb->inp_socket,
980 sotc, svc_flags);
981
982 /* Embed flowhash and flow control flags */
983 m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB;
984 m->m_pkthdr.pkt_flowid = tp->t_inpcb->inp_flowhash;
985 m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC | PKTF_FLOW_ADV);
986 m->m_pkthdr.pkt_proto = IPPROTO_TCP;
987 m->m_pkthdr.tx_tcp_pid = tp->t_inpcb->inp_socket->last_pid;
988 m->m_pkthdr.tx_tcp_e_pid = tp->t_inpcb->inp_socket->e_pid;
989 }
990
991 #if INET6
992 if (isipv6) {
993 struct ip6_out_args ip6oa;
994 bzero(&ip6oa, sizeof(ip6oa));
995 ip6oa.ip6oa_boundif = tra->ifscope;
996 ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
997 ip6oa.ip6oa_sotc = SO_TC_UNSPEC;
998 ip6oa.ip6oa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
999
1000 if (tra->ifscope != IFSCOPE_NONE) {
1001 ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
1002 }
1003 if (tra->nocell) {
1004 ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR;
1005 }
1006 if (tra->noexpensive) {
1007 ip6oa.ip6oa_flags |= IP6OAF_NO_EXPENSIVE;
1008 }
1009 if (tra->noconstrained) {
1010 ip6oa.ip6oa_flags |= IP6OAF_NO_CONSTRAINED;
1011 }
1012 if (tra->awdl_unrestricted) {
1013 ip6oa.ip6oa_flags |= IP6OAF_AWDL_UNRESTRICTED;
1014 }
1015 if (tra->intcoproc_allowed) {
1016 ip6oa.ip6oa_flags |= IP6OAF_INTCOPROC_ALLOWED;
1017 }
1018 ip6oa.ip6oa_sotc = sotc;
1019 if (tp != NULL) {
1020 if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
1021 ip6oa.ip6oa_flags |= IP6OAF_QOSMARKING_ALLOWED;
1022 }
1023 ip6oa.ip6oa_netsvctype = tp->t_inpcb->inp_socket->so_netsvctype;
1024 }
1025 (void) ip6_output(m, NULL, ro6, IPV6_OUTARGS, NULL,
1026 NULL, &ip6oa);
1027
1028 if (tp != NULL && ro6 != NULL && ro6->ro_rt != NULL &&
1029 (outif = ro6->ro_rt->rt_ifp) !=
1030 tp->t_inpcb->in6p_last_outifp) {
1031 tp->t_inpcb->in6p_last_outifp = outif;
1032 }
1033
1034 if (ro6 == &sro6) {
1035 ROUTE_RELEASE(ro6);
1036 }
1037 } else
1038 #endif /* INET6 */
1039 {
1040 struct ip_out_args ipoa;
1041 bzero(&ipoa, sizeof(ipoa));
1042 ipoa.ipoa_boundif = tra->ifscope;
1043 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF | IPOAF_BOUND_SRCADDR;
1044 ipoa.ipoa_sotc = SO_TC_UNSPEC;
1045 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
1046
1047 if (tra->ifscope != IFSCOPE_NONE) {
1048 ipoa.ipoa_flags |= IPOAF_BOUND_IF;
1049 }
1050 if (tra->nocell) {
1051 ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
1052 }
1053 if (tra->noexpensive) {
1054 ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
1055 }
1056 if (tra->noconstrained) {
1057 ipoa.ipoa_flags |= IPOAF_NO_CONSTRAINED;
1058 }
1059 if (tra->awdl_unrestricted) {
1060 ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED;
1061 }
1062 ipoa.ipoa_sotc = sotc;
1063 if (tp != NULL) {
1064 if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
1065 ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
1066 }
1067 ipoa.ipoa_netsvctype = tp->t_inpcb->inp_socket->so_netsvctype;
1068 }
1069 if (ro != &sro) {
1070 /* Copy the cached route and take an extra reference */
1071 inp_route_copyout(tp->t_inpcb, &sro);
1072 }
1073 /*
1074 * For consistency, pass a local route copy.
1075 */
1076 (void) ip_output(m, NULL, &sro, IP_OUTARGS, NULL, &ipoa);
1077
1078 if (tp != NULL && sro.ro_rt != NULL &&
1079 (outif = sro.ro_rt->rt_ifp) !=
1080 tp->t_inpcb->inp_last_outifp) {
1081 tp->t_inpcb->inp_last_outifp = outif;
1082 }
1083 if (ro != &sro) {
1084 /* Synchronize cached PCB route */
1085 inp_route_copyin(tp->t_inpcb, &sro);
1086 } else {
1087 ROUTE_RELEASE(&sro);
1088 }
1089 }
1090 }
1091
1092 /*
1093 * Create a new TCP control block, making an
1094 * empty reassembly queue and hooking it to the argument
1095 * protocol control block. The `inp' parameter must have
1096 * come from the zone allocator set up in tcp_init().
1097 */
1098 struct tcpcb *
1099 tcp_newtcpcb(struct inpcb *inp)
1100 {
1101 struct inp_tp *it;
1102 struct tcpcb *tp;
1103 struct socket *so = inp->inp_socket;
1104 #if INET6
1105 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
1106 #endif /* INET6 */
1107
1108 calculate_tcp_clock();
1109
1110 if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) {
1111 it = (struct inp_tp *)(void *)inp;
1112 tp = &it->tcb;
1113 } else {
1114 tp = (struct tcpcb *)(void *)inp->inp_saved_ppcb;
1115 }
1116
1117 bzero((char *) tp, sizeof(struct tcpcb));
1118 LIST_INIT(&tp->t_segq);
1119 tp->t_maxseg = tp->t_maxopd =
1120 #if INET6
1121 isipv6 ? tcp_v6mssdflt :
1122 #endif /* INET6 */
1123 tcp_mssdflt;
1124
1125 if (tcp_do_rfc1323) {
1126 tp->t_flags = (TF_REQ_SCALE | TF_REQ_TSTMP);
1127 }
1128 if (tcp_do_sack) {
1129 tp->t_flagsext |= TF_SACK_ENABLE;
1130 }
1131
1132 TAILQ_INIT(&tp->snd_holes);
1133 SLIST_INIT(&tp->t_rxt_segments);
1134 SLIST_INIT(&tp->t_notify_ack);
1135 tp->t_inpcb = inp;
1136 /*
1137 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
1138 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
1139 * reasonable initial retransmit time.
1140 */
1141 tp->t_srtt = TCPTV_SRTTBASE;
1142 tp->t_rttvar =
1143 ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1144 tp->t_rttmin = tcp_TCPTV_MIN;
1145 tp->t_rxtcur = TCPTV_RTOBASE;
1146
1147 if (tcp_use_newreno) {
1148 /* use newreno by default */
1149 tp->tcp_cc_index = TCP_CC_ALGO_NEWRENO_INDEX;
1150 } else {
1151 tp->tcp_cc_index = TCP_CC_ALGO_CUBIC_INDEX;
1152 }
1153
1154 tcp_cc_allocate_state(tp);
1155
1156 if (CC_ALGO(tp)->init != NULL) {
1157 CC_ALGO(tp)->init(tp);
1158 }
1159
1160 tp->snd_cwnd = TCP_CC_CWND_INIT_BYTES;
1161 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1162 tp->snd_ssthresh_prev = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1163 tp->t_rcvtime = tcp_now;
1164 tp->tentry.timer_start = tcp_now;
1165 tp->rcv_unackwin = tcp_now;
1166 tp->t_persist_timeout = tcp_max_persist_timeout;
1167 tp->t_persist_stop = 0;
1168 tp->t_flagsext |= TF_RCVUNACK_WAITSS;
1169 tp->t_rexmtthresh = tcprexmtthresh;
1170
1171 /* Enable bandwidth measurement on this connection */
1172 tp->t_flagsext |= TF_MEASURESNDBW;
1173 if (tp->t_bwmeas == NULL) {
1174 tp->t_bwmeas = tcp_bwmeas_alloc(tp);
1175 if (tp->t_bwmeas == NULL) {
1176 tp->t_flagsext &= ~TF_MEASURESNDBW;
1177 }
1178 }
1179
1180 /* Clear time wait tailq entry */
1181 tp->t_twentry.tqe_next = NULL;
1182 tp->t_twentry.tqe_prev = NULL;
1183
1184 /*
1185 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
1186 * because the socket may be bound to an IPv6 wildcard address,
1187 * which may match an IPv4-mapped IPv6 address.
1188 */
1189 inp->inp_ip_ttl = ip_defttl;
1190 inp->inp_ppcb = (caddr_t)tp;
1191 return tp; /* XXX */
1192 }
1193
1194 /*
1195 * Drop a TCP connection, reporting
1196 * the specified error. If connection is synchronized,
1197 * then send a RST to peer.
1198 */
1199 struct tcpcb *
1200 tcp_drop(struct tcpcb *tp, int errno)
1201 {
1202 struct socket *so = tp->t_inpcb->inp_socket;
1203 #if CONFIG_DTRACE
1204 struct inpcb *inp = tp->t_inpcb;
1205 #endif
1206
1207 if (TCPS_HAVERCVDSYN(tp->t_state)) {
1208 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
1209 struct tcpcb *, tp, int32_t, TCPS_CLOSED);
1210 tp->t_state = TCPS_CLOSED;
1211 (void) tcp_output(tp);
1212 tcpstat.tcps_drops++;
1213 } else {
1214 tcpstat.tcps_conndrops++;
1215 }
1216 if (errno == ETIMEDOUT && tp->t_softerror) {
1217 errno = tp->t_softerror;
1218 }
1219 so->so_error = errno;
1220
1221 TCP_LOG_CONNECTION_SUMMARY(tp);
1222
1223 return tcp_close(tp);
1224 }
1225
1226 void
1227 tcp_getrt_rtt(struct tcpcb *tp, struct rtentry *rt)
1228 {
1229 u_int32_t rtt = rt->rt_rmx.rmx_rtt;
1230 int isnetlocal = (tp->t_flags & TF_LOCAL);
1231
1232 TCP_LOG_RTM_RTT(tp, rt);
1233
1234 if (rtt != 0 && tcp_init_rtt_from_cache != 0) {
1235 /*
1236 * XXX the lock bit for RTT indicates that the value
1237 * is also a minimum value; this is subject to time.
1238 */
1239 if (rt->rt_rmx.rmx_locks & RTV_RTT) {
1240 tp->t_rttmin = rtt / (RTM_RTTUNIT / TCP_RETRANSHZ);
1241 } else {
1242 tp->t_rttmin = isnetlocal ? tcp_TCPTV_MIN :
1243 TCPTV_REXMTMIN;
1244 }
1245
1246 tp->t_srtt =
1247 rtt / (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
1248 tcpstat.tcps_usedrtt++;
1249
1250 if (rt->rt_rmx.rmx_rttvar) {
1251 tp->t_rttvar = rt->rt_rmx.rmx_rttvar /
1252 (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
1253 tcpstat.tcps_usedrttvar++;
1254 } else {
1255 /* default variation is +- 1 rtt */
1256 tp->t_rttvar =
1257 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
1258 }
1259
1260 /*
1261 * The RTO formula in the route metric case is based on:
1262 * 4 * srtt + 8 * rttvar
1263 * modulo the min, max and slop
1264 */
1265 TCPT_RANGESET(tp->t_rxtcur,
1266 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
1267 tp->t_rttmin, TCPTV_REXMTMAX,
1268 TCP_ADD_REXMTSLOP(tp));
1269 }
1270
1271 TCP_LOG_RTT_INFO(tp);
1272 }
1273
1274 static inline void
1275 tcp_create_ifnet_stats_per_flow(struct tcpcb *tp,
1276 struct ifnet_stats_per_flow *ifs)
1277 {
1278 struct inpcb *inp;
1279 struct socket *so;
1280 if (tp == NULL || ifs == NULL) {
1281 return;
1282 }
1283
1284 bzero(ifs, sizeof(*ifs));
1285 inp = tp->t_inpcb;
1286 so = inp->inp_socket;
1287
1288 ifs->ipv4 = (inp->inp_vflag & INP_IPV6) ? 0 : 1;
1289 ifs->local = (tp->t_flags & TF_LOCAL) ? 1 : 0;
1290 ifs->connreset = (so->so_error == ECONNRESET) ? 1 : 0;
1291 ifs->conntimeout = (so->so_error == ETIMEDOUT) ? 1 : 0;
1292 ifs->ecn_flags = tp->ecn_flags;
1293 ifs->txretransmitbytes = tp->t_stat.txretransmitbytes;
1294 ifs->rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1295 ifs->rxmitpkts = tp->t_stat.rxmitpkts;
1296 ifs->rcvoopack = tp->t_rcvoopack;
1297 ifs->pawsdrop = tp->t_pawsdrop;
1298 ifs->sack_recovery_episodes = tp->t_sack_recovery_episode;
1299 ifs->reordered_pkts = tp->t_reordered_pkts;
1300 ifs->dsack_sent = tp->t_dsack_sent;
1301 ifs->dsack_recvd = tp->t_dsack_recvd;
1302 ifs->srtt = tp->t_srtt;
1303 ifs->rttupdated = tp->t_rttupdated;
1304 ifs->rttvar = tp->t_rttvar;
1305 ifs->rttmin = get_base_rtt(tp);
1306 if (tp->t_bwmeas != NULL && tp->t_bwmeas->bw_sndbw_max > 0) {
1307 ifs->bw_sndbw_max = tp->t_bwmeas->bw_sndbw_max;
1308 } else {
1309 ifs->bw_sndbw_max = 0;
1310 }
1311 if (tp->t_bwmeas != NULL && tp->t_bwmeas->bw_rcvbw_max > 0) {
1312 ifs->bw_rcvbw_max = tp->t_bwmeas->bw_rcvbw_max;
1313 } else {
1314 ifs->bw_rcvbw_max = 0;
1315 }
1316 ifs->bk_txpackets = so->so_tc_stats[MBUF_TC_BK].txpackets;
1317 ifs->txpackets = inp->inp_stat->txpackets;
1318 ifs->rxpackets = inp->inp_stat->rxpackets;
1319 }
1320
1321 static inline void
1322 tcp_flow_ecn_perf_stats(struct ifnet_stats_per_flow *ifs,
1323 struct if_tcp_ecn_perf_stat *stat)
1324 {
1325 u_int64_t curval, oldval;
1326 stat->total_txpkts += ifs->txpackets;
1327 stat->total_rxpkts += ifs->rxpackets;
1328 stat->total_rxmitpkts += ifs->rxmitpkts;
1329 stat->total_oopkts += ifs->rcvoopack;
1330 stat->total_reorderpkts += (ifs->reordered_pkts +
1331 ifs->pawsdrop + ifs->dsack_sent + ifs->dsack_recvd);
1332
1333 /* Average RTT */
1334 curval = ifs->srtt >> TCP_RTT_SHIFT;
1335 if (curval > 0 && ifs->rttupdated >= 16) {
1336 if (stat->rtt_avg == 0) {
1337 stat->rtt_avg = curval;
1338 } else {
1339 oldval = stat->rtt_avg;
1340 stat->rtt_avg = ((oldval << 4) - oldval + curval) >> 4;
1341 }
1342 }
1343
1344 /* RTT variance */
1345 curval = ifs->rttvar >> TCP_RTTVAR_SHIFT;
1346 if (curval > 0 && ifs->rttupdated >= 16) {
1347 if (stat->rtt_var == 0) {
1348 stat->rtt_var = curval;
1349 } else {
1350 oldval = stat->rtt_var;
1351 stat->rtt_var =
1352 ((oldval << 4) - oldval + curval) >> 4;
1353 }
1354 }
1355
1356 /* SACK episodes */
1357 stat->sack_episodes += ifs->sack_recovery_episodes;
1358 if (ifs->connreset) {
1359 stat->rst_drop++;
1360 }
1361 }
1362
1363 static inline void
1364 tcp_flow_lim_stats(struct ifnet_stats_per_flow *ifs,
1365 struct if_lim_perf_stat *stat)
1366 {
1367 u_int64_t curval, oldval;
1368
1369 stat->lim_total_txpkts += ifs->txpackets;
1370 stat->lim_total_rxpkts += ifs->rxpackets;
1371 stat->lim_total_retxpkts += ifs->rxmitpkts;
1372 stat->lim_total_oopkts += ifs->rcvoopack;
1373
1374 if (ifs->bw_sndbw_max > 0) {
1375 /* convert from bytes per ms to bits per second */
1376 ifs->bw_sndbw_max *= 8000;
1377 stat->lim_ul_max_bandwidth = max(stat->lim_ul_max_bandwidth,
1378 ifs->bw_sndbw_max);
1379 }
1380
1381 if (ifs->bw_rcvbw_max > 0) {
1382 /* convert from bytes per ms to bits per second */
1383 ifs->bw_rcvbw_max *= 8000;
1384 stat->lim_dl_max_bandwidth = max(stat->lim_dl_max_bandwidth,
1385 ifs->bw_rcvbw_max);
1386 }
1387
1388 /* Average RTT */
1389 curval = ifs->srtt >> TCP_RTT_SHIFT;
1390 if (curval > 0 && ifs->rttupdated >= 16) {
1391 if (stat->lim_rtt_average == 0) {
1392 stat->lim_rtt_average = curval;
1393 } else {
1394 oldval = stat->lim_rtt_average;
1395 stat->lim_rtt_average =
1396 ((oldval << 4) - oldval + curval) >> 4;
1397 }
1398 }
1399
1400 /* RTT variance */
1401 curval = ifs->rttvar >> TCP_RTTVAR_SHIFT;
1402 if (curval > 0 && ifs->rttupdated >= 16) {
1403 if (stat->lim_rtt_variance == 0) {
1404 stat->lim_rtt_variance = curval;
1405 } else {
1406 oldval = stat->lim_rtt_variance;
1407 stat->lim_rtt_variance =
1408 ((oldval << 4) - oldval + curval) >> 4;
1409 }
1410 }
1411
1412 if (stat->lim_rtt_min == 0) {
1413 stat->lim_rtt_min = ifs->rttmin;
1414 } else {
1415 stat->lim_rtt_min = min(stat->lim_rtt_min, ifs->rttmin);
1416 }
1417
1418 /* connection timeouts */
1419 stat->lim_conn_attempts++;
1420 if (ifs->conntimeout) {
1421 stat->lim_conn_timeouts++;
1422 }
1423
1424 /* bytes sent using background delay-based algorithms */
1425 stat->lim_bk_txpkts += ifs->bk_txpackets;
1426 }
1427
1428 /*
1429 * Close a TCP control block:
1430 * discard all space held by the tcp
1431 * discard internet protocol block
1432 * wake up any sleepers
1433 */
1434 struct tcpcb *
1435 tcp_close(struct tcpcb *tp)
1436 {
1437 struct inpcb *inp = tp->t_inpcb;
1438 struct socket *so = inp->inp_socket;
1439 #if INET6
1440 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
1441 #endif /* INET6 */
1442 struct route *ro;
1443 struct rtentry *rt;
1444 int dosavessthresh;
1445 struct ifnet_stats_per_flow ifs;
1446
1447 /* tcp_close was called previously, bail */
1448 if (inp->inp_ppcb == NULL) {
1449 return NULL;
1450 }
1451
1452 tcp_canceltimers(tp);
1453 KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_START, tp, 0, 0, 0, 0);
1454
1455 /*
1456 * If another thread for this tcp is currently in ip (indicated by
1457 * the TF_SENDINPROG flag), defer the cleanup until after it returns
1458 * back to tcp. This is done to serialize the close until after all
1459 * pending output is finished, in order to avoid having the PCB be
1460 * detached and the cached route cleaned, only for ip to cache the
1461 * route back into the PCB again. Note that we've cleared all the
1462 * timers at this point. Set TF_CLOSING to indicate to tcp_output()
1463 * that is should call us again once it returns from ip; at that
1464 * point both flags should be cleared and we can proceed further
1465 * with the cleanup.
1466 */
1467 if ((tp->t_flags & TF_CLOSING) ||
1468 inp->inp_sndinprog_cnt > 0) {
1469 tp->t_flags |= TF_CLOSING;
1470 return NULL;
1471 }
1472
1473 TCP_LOG_CONNECTION_SUMMARY(tp);
1474
1475 DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
1476 struct tcpcb *, tp, int32_t, TCPS_CLOSED);
1477
1478 #if INET6
1479 ro = (isipv6 ? (struct route *)&inp->in6p_route : &inp->inp_route);
1480 #else
1481 ro = &inp->inp_route;
1482 #endif
1483 rt = ro->ro_rt;
1484 if (rt != NULL) {
1485 RT_LOCK_SPIN(rt);
1486 }
1487
1488 /*
1489 * If we got enough samples through the srtt filter,
1490 * save the rtt and rttvar in the routing entry.
1491 * 'Enough' is arbitrarily defined as the 16 samples.
1492 * 16 samples is enough for the srtt filter to converge
1493 * to within 5% of the correct value; fewer samples and
1494 * we could save a very bogus rtt.
1495 *
1496 * Don't update the default route's characteristics and don't
1497 * update anything that the user "locked".
1498 */
1499 if (tp->t_rttupdated >= 16) {
1500 u_int32_t i = 0;
1501 bool log_rtt = false;
1502
1503 #if INET6
1504 if (isipv6) {
1505 struct sockaddr_in6 *sin6;
1506
1507 if (rt == NULL) {
1508 goto no_valid_rt;
1509 }
1510 sin6 = (struct sockaddr_in6 *)(void *)rt_key(rt);
1511 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1512 goto no_valid_rt;
1513 }
1514 } else
1515 #endif /* INET6 */
1516 if (ROUTE_UNUSABLE(ro) ||
1517 SIN(rt_key(rt))->sin_addr.s_addr == INADDR_ANY) {
1518 DTRACE_TCP4(state__change, void, NULL,
1519 struct inpcb *, inp, struct tcpcb *, tp,
1520 int32_t, TCPS_CLOSED);
1521 tp->t_state = TCPS_CLOSED;
1522 goto no_valid_rt;
1523 }
1524
1525 RT_LOCK_ASSERT_HELD(rt);
1526 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
1527 i = tp->t_srtt *
1528 (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE));
1529 if (rt->rt_rmx.rmx_rtt && i) {
1530 /*
1531 * filter this update to half the old & half
1532 * the new values, converting scale.
1533 * See route.h and tcp_var.h for a
1534 * description of the scaling constants.
1535 */
1536 rt->rt_rmx.rmx_rtt =
1537 (rt->rt_rmx.rmx_rtt + i) / 2;
1538 } else {
1539 rt->rt_rmx.rmx_rtt = i;
1540 }
1541 tcpstat.tcps_cachedrtt++;
1542 log_rtt = true;
1543 }
1544 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
1545 i = tp->t_rttvar *
1546 (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE));
1547 if (rt->rt_rmx.rmx_rttvar && i) {
1548 rt->rt_rmx.rmx_rttvar =
1549 (rt->rt_rmx.rmx_rttvar + i) / 2;
1550 } else {
1551 rt->rt_rmx.rmx_rttvar = i;
1552 }
1553 tcpstat.tcps_cachedrttvar++;
1554 log_rtt = true;
1555 }
1556 if (log_rtt) {
1557 TCP_LOG_RTM_RTT(tp, rt);
1558 TCP_LOG_RTT_INFO(tp);
1559 }
1560 /*
1561 * The old comment here said:
1562 * update the pipelimit (ssthresh) if it has been updated
1563 * already or if a pipesize was specified & the threshhold
1564 * got below half the pipesize. I.e., wait for bad news
1565 * before we start updating, then update on both good
1566 * and bad news.
1567 *
1568 * But we want to save the ssthresh even if no pipesize is
1569 * specified explicitly in the route, because such
1570 * connections still have an implicit pipesize specified
1571 * by the global tcp_sendspace. In the absence of a reliable
1572 * way to calculate the pipesize, it will have to do.
1573 */
1574 i = tp->snd_ssthresh;
1575 if (rt->rt_rmx.rmx_sendpipe != 0) {
1576 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2);
1577 } else {
1578 dosavessthresh = (i < so->so_snd.sb_hiwat / 2);
1579 }
1580 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
1581 i != 0 && rt->rt_rmx.rmx_ssthresh != 0) ||
1582 dosavessthresh) {
1583 /*
1584 * convert the limit from user data bytes to
1585 * packets then to packet data bytes.
1586 */
1587 i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
1588 if (i < 2) {
1589 i = 2;
1590 }
1591 i *= (u_int32_t)(tp->t_maxseg +
1592 #if INET6
1593 isipv6 ? sizeof(struct ip6_hdr) +
1594 sizeof(struct tcphdr) :
1595 #endif /* INET6 */
1596 sizeof(struct tcpiphdr));
1597 if (rt->rt_rmx.rmx_ssthresh) {
1598 rt->rt_rmx.rmx_ssthresh =
1599 (rt->rt_rmx.rmx_ssthresh + i) / 2;
1600 } else {
1601 rt->rt_rmx.rmx_ssthresh = i;
1602 }
1603 tcpstat.tcps_cachedssthresh++;
1604 }
1605 }
1606
1607 /*
1608 * Mark route for deletion if no information is cached.
1609 */
1610 if (rt != NULL && (so->so_flags & SOF_OVERFLOW) && tcp_lq_overflow) {
1611 if (!(rt->rt_rmx.rmx_locks & RTV_RTT) &&
1612 rt->rt_rmx.rmx_rtt == 0) {
1613 rt->rt_flags |= RTF_DELCLONE;
1614 }
1615 }
1616
1617 no_valid_rt:
1618 if (rt != NULL) {
1619 RT_UNLOCK(rt);
1620 }
1621
1622 /* free the reassembly queue, if any */
1623 (void) tcp_freeq(tp);
1624
1625 /* performance stats per interface */
1626 tcp_create_ifnet_stats_per_flow(tp, &ifs);
1627 tcp_update_stats_per_flow(&ifs, inp->inp_last_outifp);
1628
1629 tcp_free_sackholes(tp);
1630 tcp_notify_ack_free(tp);
1631
1632 inp_decr_sndbytes_allunsent(so, tp->snd_una);
1633
1634 if (tp->t_bwmeas != NULL) {
1635 tcp_bwmeas_free(tp);
1636 }
1637 tcp_rxtseg_clean(tp);
1638 /* Free the packet list */
1639 if (tp->t_pktlist_head != NULL) {
1640 m_freem_list(tp->t_pktlist_head);
1641 }
1642 TCP_PKTLIST_CLEAR(tp);
1643
1644 if (so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) {
1645 inp->inp_saved_ppcb = (caddr_t) tp;
1646 }
1647
1648 tp->t_state = TCPS_CLOSED;
1649
1650 /*
1651 * Issue a wakeup before detach so that we don't miss
1652 * a wakeup
1653 */
1654 sodisconnectwakeup(so);
1655
1656 /*
1657 * Clean up any LRO state
1658 */
1659 if (tp->t_flagsext & TF_LRO_OFFLOADED) {
1660 tcp_lro_remove_state(inp->inp_laddr, inp->inp_faddr,
1661 inp->inp_lport, inp->inp_fport);
1662 tp->t_flagsext &= ~TF_LRO_OFFLOADED;
1663 }
1664 /*
1665 * Make sure to clear the TCP Keep Alive Offload as it is
1666 * ref counted on the interface
1667 */
1668 tcp_clear_keep_alive_offload(so);
1669
1670 /*
1671 * If this is a socket that does not want to wakeup the device
1672 * for it's traffic, the application might need to know that the
1673 * socket is closed, send a notification.
1674 */
1675 if ((so->so_options & SO_NOWAKEFROMSLEEP) &&
1676 inp->inp_state != INPCB_STATE_DEAD &&
1677 !(inp->inp_flags2 & INP2_TIMEWAIT)) {
1678 socket_post_kev_msg_closed(so);
1679 }
1680
1681 if (CC_ALGO(tp)->cleanup != NULL) {
1682 CC_ALGO(tp)->cleanup(tp);
1683 }
1684
1685 if (tp->t_ccstate != NULL) {
1686 zfree(tcp_cc_zone, tp->t_ccstate);
1687 tp->t_ccstate = NULL;
1688 }
1689 tp->tcp_cc_index = TCP_CC_ALGO_NONE;
1690
1691 /* Can happen if we close the socket before receiving the third ACK */
1692 if ((tp->t_tfo_flags & TFO_F_COOKIE_VALID)) {
1693 OSDecrementAtomic(&tcp_tfo_halfcnt);
1694
1695 /* Panic if something has gone terribly wrong. */
1696 VERIFY(tcp_tfo_halfcnt >= 0);
1697
1698 tp->t_tfo_flags &= ~TFO_F_COOKIE_VALID;
1699 }
1700
1701 #if INET6
1702 if (SOCK_CHECK_DOM(so, PF_INET6)) {
1703 in6_pcbdetach(inp);
1704 } else
1705 #endif /* INET6 */
1706 in_pcbdetach(inp);
1707
1708 /*
1709 * Call soisdisconnected after detach because it might unlock the socket
1710 */
1711 soisdisconnected(so);
1712 tcpstat.tcps_closed++;
1713 KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_END,
1714 tcpstat.tcps_closed, 0, 0, 0, 0);
1715 return NULL;
1716 }
1717
1718 int
1719 tcp_freeq(struct tcpcb *tp)
1720 {
1721 struct tseg_qent *q;
1722 int rv = 0;
1723
1724 while ((q = LIST_FIRST(&tp->t_segq)) != NULL) {
1725 LIST_REMOVE(q, tqe_q);
1726 m_freem(q->tqe_m);
1727 zfree(tcp_reass_zone, q);
1728 rv = 1;
1729 }
1730 tp->t_reassqlen = 0;
1731 return rv;
1732 }
1733
1734
1735 /*
1736 * Walk the tcpbs, if existing, and flush the reassembly queue,
1737 * if there is one when do_tcpdrain is enabled
1738 * Also defunct the extended background idle socket
1739 * Do it next time if the pcbinfo lock is in use
1740 */
1741 void
1742 tcp_drain(void)
1743 {
1744 struct inpcb *inp;
1745 struct tcpcb *tp;
1746
1747 if (!lck_rw_try_lock_exclusive(tcbinfo.ipi_lock)) {
1748 return;
1749 }
1750
1751 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
1752 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) !=
1753 WNT_STOPUSING) {
1754 socket_lock(inp->inp_socket, 1);
1755 if (in_pcb_checkstate(inp, WNT_RELEASE, 1)
1756 == WNT_STOPUSING) {
1757 /* lost a race, try the next one */
1758 socket_unlock(inp->inp_socket, 1);
1759 continue;
1760 }
1761 tp = intotcpcb(inp);
1762
1763 if (do_tcpdrain) {
1764 tcp_freeq(tp);
1765 }
1766
1767 so_drain_extended_bk_idle(inp->inp_socket);
1768
1769 socket_unlock(inp->inp_socket, 1);
1770 }
1771 }
1772 lck_rw_done(tcbinfo.ipi_lock);
1773 }
1774
1775 /*
1776 * Notify a tcp user of an asynchronous error;
1777 * store error as soft error, but wake up user
1778 * (for now, won't do anything until can select for soft error).
1779 *
1780 * Do not wake up user since there currently is no mechanism for
1781 * reporting soft errors (yet - a kqueue filter may be added).
1782 */
1783 static void
1784 tcp_notify(struct inpcb *inp, int error)
1785 {
1786 struct tcpcb *tp;
1787
1788 if (inp == NULL || (inp->inp_state == INPCB_STATE_DEAD)) {
1789 return; /* pcb is gone already */
1790 }
1791 tp = (struct tcpcb *)inp->inp_ppcb;
1792
1793 VERIFY(tp != NULL);
1794 /*
1795 * Ignore some errors if we are hooked up.
1796 * If connection hasn't completed, has retransmitted several times,
1797 * and receives a second error, give up now. This is better
1798 * than waiting a long time to establish a connection that
1799 * can never complete.
1800 */
1801 if (tp->t_state == TCPS_ESTABLISHED &&
1802 (error == EHOSTUNREACH || error == ENETUNREACH ||
1803 error == EHOSTDOWN)) {
1804 if (inp->inp_route.ro_rt) {
1805 rtfree(inp->inp_route.ro_rt);
1806 inp->inp_route.ro_rt = (struct rtentry *)NULL;
1807 }
1808 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
1809 tp->t_softerror) {
1810 tcp_drop(tp, error);
1811 } else {
1812 tp->t_softerror = error;
1813 }
1814 }
1815
1816 struct bwmeas *
1817 tcp_bwmeas_alloc(struct tcpcb *tp)
1818 {
1819 struct bwmeas *elm;
1820 elm = zalloc(tcp_bwmeas_zone);
1821 if (elm == NULL) {
1822 return elm;
1823 }
1824
1825 bzero(elm, bwmeas_elm_size);
1826 elm->bw_minsizepkts = TCP_BWMEAS_BURST_MINSIZE;
1827 elm->bw_minsize = elm->bw_minsizepkts * tp->t_maxseg;
1828 return elm;
1829 }
1830
1831 void
1832 tcp_bwmeas_free(struct tcpcb *tp)
1833 {
1834 zfree(tcp_bwmeas_zone, tp->t_bwmeas);
1835 tp->t_bwmeas = NULL;
1836 tp->t_flagsext &= ~(TF_MEASURESNDBW);
1837 }
1838
1839 int
1840 get_tcp_inp_list(struct inpcb **inp_list, int n, inp_gen_t gencnt)
1841 {
1842 struct tcpcb *tp;
1843 struct inpcb *inp;
1844 int i = 0;
1845
1846 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
1847 if (inp->inp_gencnt <= gencnt &&
1848 inp->inp_state != INPCB_STATE_DEAD) {
1849 inp_list[i++] = inp;
1850 }
1851 if (i >= n) {
1852 break;
1853 }
1854 }
1855
1856 TAILQ_FOREACH(tp, &tcp_tw_tailq, t_twentry) {
1857 inp = tp->t_inpcb;
1858 if (inp->inp_gencnt <= gencnt &&
1859 inp->inp_state != INPCB_STATE_DEAD) {
1860 inp_list[i++] = inp;
1861 }
1862 if (i >= n) {
1863 break;
1864 }
1865 }
1866 return i;
1867 }
1868
1869 /*
1870 * tcpcb_to_otcpcb copies specific bits of a tcpcb to a otcpcb format.
1871 * The otcpcb data structure is passed to user space and must not change.
1872 */
1873 static void
1874 tcpcb_to_otcpcb(struct tcpcb *tp, struct otcpcb *otp)
1875 {
1876 otp->t_segq = (uint32_t)VM_KERNEL_ADDRPERM(tp->t_segq.lh_first);
1877 otp->t_dupacks = tp->t_dupacks;
1878 otp->t_timer[TCPT_REXMT_EXT] = tp->t_timer[TCPT_REXMT];
1879 otp->t_timer[TCPT_PERSIST_EXT] = tp->t_timer[TCPT_PERSIST];
1880 otp->t_timer[TCPT_KEEP_EXT] = tp->t_timer[TCPT_KEEP];
1881 otp->t_timer[TCPT_2MSL_EXT] = tp->t_timer[TCPT_2MSL];
1882 otp->t_inpcb =
1883 (_TCPCB_PTR(struct inpcb *))VM_KERNEL_ADDRPERM(tp->t_inpcb);
1884 otp->t_state = tp->t_state;
1885 otp->t_flags = tp->t_flags;
1886 otp->t_force = (tp->t_flagsext & TF_FORCE) ? 1 : 0;
1887 otp->snd_una = tp->snd_una;
1888 otp->snd_max = tp->snd_max;
1889 otp->snd_nxt = tp->snd_nxt;
1890 otp->snd_up = tp->snd_up;
1891 otp->snd_wl1 = tp->snd_wl1;
1892 otp->snd_wl2 = tp->snd_wl2;
1893 otp->iss = tp->iss;
1894 otp->irs = tp->irs;
1895 otp->rcv_nxt = tp->rcv_nxt;
1896 otp->rcv_adv = tp->rcv_adv;
1897 otp->rcv_wnd = tp->rcv_wnd;
1898 otp->rcv_up = tp->rcv_up;
1899 otp->snd_wnd = tp->snd_wnd;
1900 otp->snd_cwnd = tp->snd_cwnd;
1901 otp->snd_ssthresh = tp->snd_ssthresh;
1902 otp->t_maxopd = tp->t_maxopd;
1903 otp->t_rcvtime = tp->t_rcvtime;
1904 otp->t_starttime = tp->t_starttime;
1905 otp->t_rtttime = tp->t_rtttime;
1906 otp->t_rtseq = tp->t_rtseq;
1907 otp->t_rxtcur = tp->t_rxtcur;
1908 otp->t_maxseg = tp->t_maxseg;
1909 otp->t_srtt = tp->t_srtt;
1910 otp->t_rttvar = tp->t_rttvar;
1911 otp->t_rxtshift = tp->t_rxtshift;
1912 otp->t_rttmin = tp->t_rttmin;
1913 otp->t_rttupdated = tp->t_rttupdated;
1914 otp->max_sndwnd = tp->max_sndwnd;
1915 otp->t_softerror = tp->t_softerror;
1916 otp->t_oobflags = tp->t_oobflags;
1917 otp->t_iobc = tp->t_iobc;
1918 otp->snd_scale = tp->snd_scale;
1919 otp->rcv_scale = tp->rcv_scale;
1920 otp->request_r_scale = tp->request_r_scale;
1921 otp->requested_s_scale = tp->requested_s_scale;
1922 otp->ts_recent = tp->ts_recent;
1923 otp->ts_recent_age = tp->ts_recent_age;
1924 otp->last_ack_sent = tp->last_ack_sent;
1925 otp->cc_send = 0;
1926 otp->cc_recv = 0;
1927 otp->snd_recover = tp->snd_recover;
1928 otp->snd_cwnd_prev = tp->snd_cwnd_prev;
1929 otp->snd_ssthresh_prev = tp->snd_ssthresh_prev;
1930 otp->t_badrxtwin = 0;
1931 }
1932
1933 static int
1934 tcp_pcblist SYSCTL_HANDLER_ARGS
1935 {
1936 #pragma unused(oidp, arg1, arg2)
1937 int error, i = 0, n;
1938 struct inpcb **inp_list;
1939 inp_gen_t gencnt;
1940 struct xinpgen xig;
1941
1942 /*
1943 * The process of preparing the TCB list is too time-consuming and
1944 * resource-intensive to repeat twice on every request.
1945 */
1946 lck_rw_lock_shared(tcbinfo.ipi_lock);
1947 if (req->oldptr == USER_ADDR_NULL) {
1948 n = tcbinfo.ipi_count;
1949 req->oldidx = 2 * (sizeof(xig))
1950 + (n + n / 8) * sizeof(struct xtcpcb);
1951 lck_rw_done(tcbinfo.ipi_lock);
1952 return 0;
1953 }
1954
1955 if (req->newptr != USER_ADDR_NULL) {
1956 lck_rw_done(tcbinfo.ipi_lock);
1957 return EPERM;
1958 }
1959
1960 /*
1961 * OK, now we're committed to doing something.
1962 */
1963 gencnt = tcbinfo.ipi_gencnt;
1964 n = tcbinfo.ipi_count;
1965
1966 bzero(&xig, sizeof(xig));
1967 xig.xig_len = sizeof(xig);
1968 xig.xig_count = n;
1969 xig.xig_gen = gencnt;
1970 xig.xig_sogen = so_gencnt;
1971 error = SYSCTL_OUT(req, &xig, sizeof(xig));
1972 if (error) {
1973 lck_rw_done(tcbinfo.ipi_lock);
1974 return error;
1975 }
1976 /*
1977 * We are done if there is no pcb
1978 */
1979 if (n == 0) {
1980 lck_rw_done(tcbinfo.ipi_lock);
1981 return 0;
1982 }
1983
1984 inp_list = _MALLOC(n * sizeof(*inp_list), M_TEMP, M_WAITOK);
1985 if (inp_list == 0) {
1986 lck_rw_done(tcbinfo.ipi_lock);
1987 return ENOMEM;
1988 }
1989
1990 n = get_tcp_inp_list(inp_list, n, gencnt);
1991
1992 error = 0;
1993 for (i = 0; i < n; i++) {
1994 struct xtcpcb xt;
1995 caddr_t inp_ppcb;
1996 struct inpcb *inp;
1997
1998 inp = inp_list[i];
1999
2000 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
2001 continue;
2002 }
2003 socket_lock(inp->inp_socket, 1);
2004 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2005 socket_unlock(inp->inp_socket, 1);
2006 continue;
2007 }
2008 if (inp->inp_gencnt > gencnt) {
2009 socket_unlock(inp->inp_socket, 1);
2010 continue;
2011 }
2012
2013 bzero(&xt, sizeof(xt));
2014 xt.xt_len = sizeof(xt);
2015 /* XXX should avoid extra copy */
2016 inpcb_to_compat(inp, &xt.xt_inp);
2017 inp_ppcb = inp->inp_ppcb;
2018 if (inp_ppcb != NULL) {
2019 tcpcb_to_otcpcb((struct tcpcb *)(void *)inp_ppcb,
2020 &xt.xt_tp);
2021 } else {
2022 bzero((char *) &xt.xt_tp, sizeof(xt.xt_tp));
2023 }
2024 if (inp->inp_socket) {
2025 sotoxsocket(inp->inp_socket, &xt.xt_socket);
2026 }
2027
2028 socket_unlock(inp->inp_socket, 1);
2029
2030 error = SYSCTL_OUT(req, &xt, sizeof(xt));
2031 }
2032 if (!error) {
2033 /*
2034 * Give the user an updated idea of our state.
2035 * If the generation differs from what we told
2036 * her before, she knows that something happened
2037 * while we were processing this request, and it
2038 * might be necessary to retry.
2039 */
2040 bzero(&xig, sizeof(xig));
2041 xig.xig_len = sizeof(xig);
2042 xig.xig_gen = tcbinfo.ipi_gencnt;
2043 xig.xig_sogen = so_gencnt;
2044 xig.xig_count = tcbinfo.ipi_count;
2045 error = SYSCTL_OUT(req, &xig, sizeof(xig));
2046 }
2047 FREE(inp_list, M_TEMP);
2048 lck_rw_done(tcbinfo.ipi_lock);
2049 return error;
2050 }
2051
2052 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist,
2053 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2054 tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
2055
2056 #if !CONFIG_EMBEDDED
2057
2058 static void
2059 tcpcb_to_xtcpcb64(struct tcpcb *tp, struct xtcpcb64 *otp)
2060 {
2061 otp->t_segq = (uint32_t)VM_KERNEL_ADDRPERM(tp->t_segq.lh_first);
2062 otp->t_dupacks = tp->t_dupacks;
2063 otp->t_timer[TCPT_REXMT_EXT] = tp->t_timer[TCPT_REXMT];
2064 otp->t_timer[TCPT_PERSIST_EXT] = tp->t_timer[TCPT_PERSIST];
2065 otp->t_timer[TCPT_KEEP_EXT] = tp->t_timer[TCPT_KEEP];
2066 otp->t_timer[TCPT_2MSL_EXT] = tp->t_timer[TCPT_2MSL];
2067 otp->t_state = tp->t_state;
2068 otp->t_flags = tp->t_flags;
2069 otp->t_force = (tp->t_flagsext & TF_FORCE) ? 1 : 0;
2070 otp->snd_una = tp->snd_una;
2071 otp->snd_max = tp->snd_max;
2072 otp->snd_nxt = tp->snd_nxt;
2073 otp->snd_up = tp->snd_up;
2074 otp->snd_wl1 = tp->snd_wl1;
2075 otp->snd_wl2 = tp->snd_wl2;
2076 otp->iss = tp->iss;
2077 otp->irs = tp->irs;
2078 otp->rcv_nxt = tp->rcv_nxt;
2079 otp->rcv_adv = tp->rcv_adv;
2080 otp->rcv_wnd = tp->rcv_wnd;
2081 otp->rcv_up = tp->rcv_up;
2082 otp->snd_wnd = tp->snd_wnd;
2083 otp->snd_cwnd = tp->snd_cwnd;
2084 otp->snd_ssthresh = tp->snd_ssthresh;
2085 otp->t_maxopd = tp->t_maxopd;
2086 otp->t_rcvtime = tp->t_rcvtime;
2087 otp->t_starttime = tp->t_starttime;
2088 otp->t_rtttime = tp->t_rtttime;
2089 otp->t_rtseq = tp->t_rtseq;
2090 otp->t_rxtcur = tp->t_rxtcur;
2091 otp->t_maxseg = tp->t_maxseg;
2092 otp->t_srtt = tp->t_srtt;
2093 otp->t_rttvar = tp->t_rttvar;
2094 otp->t_rxtshift = tp->t_rxtshift;
2095 otp->t_rttmin = tp->t_rttmin;
2096 otp->t_rttupdated = tp->t_rttupdated;
2097 otp->max_sndwnd = tp->max_sndwnd;
2098 otp->t_softerror = tp->t_softerror;
2099 otp->t_oobflags = tp->t_oobflags;
2100 otp->t_iobc = tp->t_iobc;
2101 otp->snd_scale = tp->snd_scale;
2102 otp->rcv_scale = tp->rcv_scale;
2103 otp->request_r_scale = tp->request_r_scale;
2104 otp->requested_s_scale = tp->requested_s_scale;
2105 otp->ts_recent = tp->ts_recent;
2106 otp->ts_recent_age = tp->ts_recent_age;
2107 otp->last_ack_sent = tp->last_ack_sent;
2108 otp->cc_send = 0;
2109 otp->cc_recv = 0;
2110 otp->snd_recover = tp->snd_recover;
2111 otp->snd_cwnd_prev = tp->snd_cwnd_prev;
2112 otp->snd_ssthresh_prev = tp->snd_ssthresh_prev;
2113 otp->t_badrxtwin = 0;
2114 }
2115
2116
2117 static int
2118 tcp_pcblist64 SYSCTL_HANDLER_ARGS
2119 {
2120 #pragma unused(oidp, arg1, arg2)
2121 int error, i = 0, n;
2122 struct inpcb **inp_list;
2123 inp_gen_t gencnt;
2124 struct xinpgen xig;
2125
2126 /*
2127 * The process of preparing the TCB list is too time-consuming and
2128 * resource-intensive to repeat twice on every request.
2129 */
2130 lck_rw_lock_shared(tcbinfo.ipi_lock);
2131 if (req->oldptr == USER_ADDR_NULL) {
2132 n = tcbinfo.ipi_count;
2133 req->oldidx = 2 * (sizeof(xig))
2134 + (n + n / 8) * sizeof(struct xtcpcb64);
2135 lck_rw_done(tcbinfo.ipi_lock);
2136 return 0;
2137 }
2138
2139 if (req->newptr != USER_ADDR_NULL) {
2140 lck_rw_done(tcbinfo.ipi_lock);
2141 return EPERM;
2142 }
2143
2144 /*
2145 * OK, now we're committed to doing something.
2146 */
2147 gencnt = tcbinfo.ipi_gencnt;
2148 n = tcbinfo.ipi_count;
2149
2150 bzero(&xig, sizeof(xig));
2151 xig.xig_len = sizeof(xig);
2152 xig.xig_count = n;
2153 xig.xig_gen = gencnt;
2154 xig.xig_sogen = so_gencnt;
2155 error = SYSCTL_OUT(req, &xig, sizeof(xig));
2156 if (error) {
2157 lck_rw_done(tcbinfo.ipi_lock);
2158 return error;
2159 }
2160 /*
2161 * We are done if there is no pcb
2162 */
2163 if (n == 0) {
2164 lck_rw_done(tcbinfo.ipi_lock);
2165 return 0;
2166 }
2167
2168 inp_list = _MALLOC(n * sizeof(*inp_list), M_TEMP, M_WAITOK);
2169 if (inp_list == 0) {
2170 lck_rw_done(tcbinfo.ipi_lock);
2171 return ENOMEM;
2172 }
2173
2174 n = get_tcp_inp_list(inp_list, n, gencnt);
2175
2176 error = 0;
2177 for (i = 0; i < n; i++) {
2178 struct xtcpcb64 xt;
2179 struct inpcb *inp;
2180
2181 inp = inp_list[i];
2182
2183 if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
2184 continue;
2185 }
2186 socket_lock(inp->inp_socket, 1);
2187 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
2188 socket_unlock(inp->inp_socket, 1);
2189 continue;
2190 }
2191 if (inp->inp_gencnt > gencnt) {
2192 socket_unlock(inp->inp_socket, 1);
2193 continue;
2194 }
2195
2196 bzero(&xt, sizeof(xt));
2197 xt.xt_len = sizeof(xt);
2198 inpcb_to_xinpcb64(inp, &xt.xt_inpcb);
2199 xt.xt_inpcb.inp_ppcb =
2200 (uint64_t)VM_KERNEL_ADDRPERM(inp->inp_ppcb);
2201 if (inp->inp_ppcb != NULL) {
2202 tcpcb_to_xtcpcb64((struct tcpcb *)inp->inp_ppcb,
2203 &xt);
2204 }
2205 if (inp->inp_socket) {
2206 sotoxsocket64(inp->inp_socket,
2207 &xt.xt_inpcb.xi_socket);
2208 }
2209
2210 socket_unlock(inp->inp_socket, 1);
2211
2212 error = SYSCTL_OUT(req, &xt, sizeof(xt));
2213 }
2214 if (!error) {
2215 /*
2216 * Give the user an updated idea of our state.
2217 * If the generation differs from what we told
2218 * her before, she knows that something happened
2219 * while we were processing this request, and it
2220 * might be necessary to retry.
2221 */
2222 bzero(&xig, sizeof(xig));
2223 xig.xig_len = sizeof(xig);
2224 xig.xig_gen = tcbinfo.ipi_gencnt;
2225 xig.xig_sogen = so_gencnt;
2226 xig.xig_count = tcbinfo.ipi_count;
2227 error = SYSCTL_OUT(req, &xig, sizeof(xig));
2228 }
2229 FREE(inp_list, M_TEMP);
2230 lck_rw_done(tcbinfo.ipi_lock);
2231 return error;
2232 }
2233
2234 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist64,
2235 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2236 tcp_pcblist64, "S,xtcpcb64", "List of active TCP connections");
2237
2238 #endif /* !CONFIG_EMBEDDED */
2239
2240 static int
2241 tcp_pcblist_n SYSCTL_HANDLER_ARGS
2242 {
2243 #pragma unused(oidp, arg1, arg2)
2244 int error = 0;
2245
2246 error = get_pcblist_n(IPPROTO_TCP, req, &tcbinfo);
2247
2248 return error;
2249 }
2250
2251
2252 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist_n,
2253 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
2254 tcp_pcblist_n, "S,xtcpcb_n", "List of active TCP connections");
2255
2256 static int
2257 tcp_progress_indicators SYSCTL_HANDLER_ARGS
2258 {
2259 #pragma unused(oidp, arg1, arg2)
2260
2261 return ntstat_tcp_progress_indicators(req);
2262 }
2263
2264 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, progress,
2265 CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, 0,
2266 tcp_progress_indicators, "S", "Various items that indicate the current state of progress on the link");
2267
2268
2269 __private_extern__ void
2270 tcp_get_ports_used(uint32_t ifindex, int protocol, uint32_t flags,
2271 bitstr_t *bitfield)
2272 {
2273 inpcb_get_ports_used(ifindex, protocol, flags, bitfield,
2274 &tcbinfo);
2275 }
2276
2277 __private_extern__ uint32_t
2278 tcp_count_opportunistic(unsigned int ifindex, u_int32_t flags)
2279 {
2280 return inpcb_count_opportunistic(ifindex, &tcbinfo, flags);
2281 }
2282
2283 __private_extern__ uint32_t
2284 tcp_find_anypcb_byaddr(struct ifaddr *ifa)
2285 {
2286 return inpcb_find_anypcb_byaddr(ifa, &tcbinfo);
2287 }
2288
2289 static void
2290 tcp_handle_msgsize(struct ip *ip, struct inpcb *inp)
2291 {
2292 struct rtentry *rt = NULL;
2293 u_short ifscope = IFSCOPE_NONE;
2294 int mtu;
2295 struct sockaddr_in icmpsrc = {
2296 .sin_len = sizeof(struct sockaddr_in),
2297 .sin_family = AF_INET, .sin_port = 0, .sin_addr = { .s_addr = 0 },
2298 .sin_zero = { 0, 0, 0, 0, 0, 0, 0, 0 }
2299 };
2300 struct icmp *icp = NULL;
2301
2302 icp = (struct icmp *)(void *)
2303 ((caddr_t)ip - offsetof(struct icmp, icmp_ip));
2304
2305 icmpsrc.sin_addr = icp->icmp_ip.ip_dst;
2306
2307 /*
2308 * MTU discovery:
2309 * If we got a needfrag and there is a host route to the
2310 * original destination, and the MTU is not locked, then
2311 * set the MTU in the route to the suggested new value
2312 * (if given) and then notify as usual. The ULPs will
2313 * notice that the MTU has changed and adapt accordingly.
2314 * If no new MTU was suggested, then we guess a new one
2315 * less than the current value. If the new MTU is
2316 * unreasonably small (defined by sysctl tcp_minmss), then
2317 * we reset the MTU to the interface value and enable the
2318 * lock bit, indicating that we are no longer doing MTU
2319 * discovery.
2320 */
2321 if (ROUTE_UNUSABLE(&(inp->inp_route)) == false) {
2322 rt = inp->inp_route.ro_rt;
2323 }
2324
2325 /*
2326 * icmp6_mtudisc_update scopes the routing lookup
2327 * to the incoming interface (delivered from mbuf
2328 * packet header.
2329 * That is mostly ok but for asymmetric networks
2330 * that may be an issue.
2331 * Frag needed OR Packet too big really communicates
2332 * MTU for the out data path.
2333 * Take the interface scope from cached route or
2334 * the last outgoing interface from inp
2335 */
2336 if (rt != NULL) {
2337 ifscope = (rt->rt_ifp != NULL) ?
2338 rt->rt_ifp->if_index : IFSCOPE_NONE;
2339 } else {
2340 ifscope = (inp->inp_last_outifp != NULL) ?
2341 inp->inp_last_outifp->if_index : IFSCOPE_NONE;
2342 }
2343
2344 if ((rt == NULL) ||
2345 !(rt->rt_flags & RTF_HOST) ||
2346 (rt->rt_flags & (RTF_CLONING | RTF_PRCLONING))) {
2347 rt = rtalloc1_scoped((struct sockaddr *)&icmpsrc, 0,
2348 RTF_CLONING | RTF_PRCLONING, ifscope);
2349 } else if (rt) {
2350 RT_LOCK(rt);
2351 rtref(rt);
2352 RT_UNLOCK(rt);
2353 }
2354
2355 if (rt != NULL) {
2356 RT_LOCK(rt);
2357 if ((rt->rt_flags & RTF_HOST) &&
2358 !(rt->rt_rmx.rmx_locks & RTV_MTU)) {
2359 mtu = ntohs(icp->icmp_nextmtu);
2360 /*
2361 * XXX Stock BSD has changed the following
2362 * to compare with icp->icmp_ip.ip_len
2363 * to converge faster when sent packet
2364 * < route's MTU. We may want to adopt
2365 * that change.
2366 */
2367 if (mtu == 0) {
2368 mtu = ip_next_mtu(rt->rt_rmx.
2369 rmx_mtu, 1);
2370 }
2371 #if DEBUG_MTUDISC
2372 printf("MTU for %s reduced to %d\n",
2373 inet_ntop(AF_INET,
2374 &icmpsrc.sin_addr, ipv4str,
2375 sizeof(ipv4str)), mtu);
2376 #endif
2377 if (mtu < max(296, (tcp_minmss +
2378 sizeof(struct tcpiphdr)))) {
2379 rt->rt_rmx.rmx_locks |= RTV_MTU;
2380 } else if (rt->rt_rmx.rmx_mtu > mtu) {
2381 rt->rt_rmx.rmx_mtu = mtu;
2382 }
2383 }
2384 RT_UNLOCK(rt);
2385 rtfree(rt);
2386 }
2387 }
2388
2389 void
2390 tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip, __unused struct ifnet *ifp)
2391 {
2392 tcp_seq icmp_tcp_seq;
2393 struct ip *ip = vip;
2394 struct in_addr faddr;
2395 struct inpcb *inp;
2396 struct tcpcb *tp;
2397 struct tcphdr *th;
2398 struct icmp *icp;
2399 void (*notify)(struct inpcb *, int) = tcp_notify;
2400
2401 faddr = ((struct sockaddr_in *)(void *)sa)->sin_addr;
2402 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) {
2403 return;
2404 }
2405
2406 if ((unsigned)cmd >= PRC_NCMDS) {
2407 return;
2408 }
2409
2410 /* Source quench is deprecated */
2411 if (cmd == PRC_QUENCH) {
2412 return;
2413 }
2414
2415 if (cmd == PRC_MSGSIZE) {
2416 notify = tcp_mtudisc;
2417 } else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
2418 cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL ||
2419 cmd == PRC_TIMXCEED_INTRANS) && ip) {
2420 notify = tcp_drop_syn_sent;
2421 }
2422 /*
2423 * Hostdead is ugly because it goes linearly through all PCBs.
2424 * XXX: We never get this from ICMP, otherwise it makes an
2425 * excellent DoS attack on machines with many connections.
2426 */
2427 else if (cmd == PRC_HOSTDEAD) {
2428 ip = NULL;
2429 } else if (inetctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd)) {
2430 return;
2431 }
2432
2433
2434 if (ip == NULL) {
2435 in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify);
2436 return;
2437 }
2438
2439 icp = (struct icmp *)(void *)
2440 ((caddr_t)ip - offsetof(struct icmp, icmp_ip));
2441 th = (struct tcphdr *)(void *)((caddr_t)ip + (IP_VHL_HL(ip->ip_vhl) << 2));
2442 icmp_tcp_seq = ntohl(th->th_seq);
2443
2444 inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
2445 ip->ip_src, th->th_sport, 0, NULL);
2446
2447 if (inp == NULL ||
2448 inp->inp_socket == NULL) {
2449 return;
2450 }
2451
2452 socket_lock(inp->inp_socket, 1);
2453 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
2454 WNT_STOPUSING) {
2455 socket_unlock(inp->inp_socket, 1);
2456 return;
2457 }
2458
2459 if (PRC_IS_REDIRECT(cmd)) {
2460 /* signal EHOSTDOWN, as it flushes the cached route */
2461 (*notify)(inp, EHOSTDOWN);
2462 } else {
2463 tp = intotcpcb(inp);
2464 if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
2465 SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
2466 if (cmd == PRC_MSGSIZE) {
2467 tcp_handle_msgsize(ip, inp);
2468 }
2469
2470 (*notify)(inp, inetctlerrmap[cmd]);
2471 }
2472 }
2473 socket_unlock(inp->inp_socket, 1);
2474 }
2475
2476 #if INET6
2477 void
2478 tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp)
2479 {
2480 tcp_seq icmp_tcp_seq;
2481 struct in6_addr *dst;
2482 void (*notify)(struct inpcb *, int) = tcp_notify;
2483 struct ip6_hdr *ip6;
2484 struct mbuf *m;
2485 struct inpcb *inp;
2486 struct tcpcb *tp;
2487 struct icmp6_hdr *icmp6;
2488 struct ip6ctlparam *ip6cp = NULL;
2489 const struct sockaddr_in6 *sa6_src = NULL;
2490 unsigned int mtu;
2491 unsigned int off;
2492
2493 struct tcp_ports {
2494 uint16_t th_sport;
2495 uint16_t th_dport;
2496 } t_ports;
2497
2498 if (sa->sa_family != AF_INET6 ||
2499 sa->sa_len != sizeof(struct sockaddr_in6)) {
2500 return;
2501 }
2502
2503 /* Source quench is deprecated */
2504 if (cmd == PRC_QUENCH) {
2505 return;
2506 }
2507
2508 if ((unsigned)cmd >= PRC_NCMDS) {
2509 return;
2510 }
2511
2512 /* if the parameter is from icmp6, decode it. */
2513 if (d != NULL) {
2514 ip6cp = (struct ip6ctlparam *)d;
2515 icmp6 = ip6cp->ip6c_icmp6;
2516 m = ip6cp->ip6c_m;
2517 ip6 = ip6cp->ip6c_ip6;
2518 off = ip6cp->ip6c_off;
2519 sa6_src = ip6cp->ip6c_src;
2520 dst = ip6cp->ip6c_finaldst;
2521 } else {
2522 m = NULL;
2523 ip6 = NULL;
2524 off = 0; /* fool gcc */
2525 sa6_src = &sa6_any;
2526 dst = NULL;
2527 }
2528
2529 if (cmd == PRC_MSGSIZE) {
2530 notify = tcp_mtudisc;
2531 } else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
2532 cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) &&
2533 ip6 != NULL) {
2534 notify = tcp_drop_syn_sent;
2535 }
2536 /*
2537 * Hostdead is ugly because it goes linearly through all PCBs.
2538 * XXX: We never get this from ICMP, otherwise it makes an
2539 * excellent DoS attack on machines with many connections.
2540 */
2541 else if (cmd == PRC_HOSTDEAD) {
2542 ip6 = NULL;
2543 } else if (inet6ctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd)) {
2544 return;
2545 }
2546
2547
2548 if (ip6 == NULL) {
2549 in6_pcbnotify(&tcbinfo, sa, 0, (struct sockaddr *)(size_t)sa6_src,
2550 0, cmd, NULL, notify);
2551 return;
2552 }
2553
2554 /* Check if we can safely get the ports from the tcp hdr */
2555 if (m == NULL ||
2556 (m->m_pkthdr.len <
2557 (int32_t) (off + sizeof(struct tcp_ports)))) {
2558 return;
2559 }
2560 bzero(&t_ports, sizeof(struct tcp_ports));
2561 m_copydata(m, off, sizeof(struct tcp_ports), (caddr_t)&t_ports);
2562
2563 off += sizeof(struct tcp_ports);
2564 if (m->m_pkthdr.len < (int32_t) (off + sizeof(tcp_seq))) {
2565 return;
2566 }
2567 m_copydata(m, off, sizeof(tcp_seq), (caddr_t)&icmp_tcp_seq);
2568 icmp_tcp_seq = ntohl(icmp_tcp_seq);
2569
2570 if (cmd == PRC_MSGSIZE) {
2571 mtu = ntohl(icmp6->icmp6_mtu);
2572 /*
2573 * If no alternative MTU was proposed, or the proposed
2574 * MTU was too small, set to the min.
2575 */
2576 if (mtu < IPV6_MMTU) {
2577 mtu = IPV6_MMTU - 8;
2578 }
2579 }
2580
2581 inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_dst, t_ports.th_dport,
2582 &ip6->ip6_src, t_ports.th_sport, 0, NULL);
2583
2584 if (inp == NULL ||
2585 inp->inp_socket == NULL) {
2586 return;
2587 }
2588
2589 socket_lock(inp->inp_socket, 1);
2590 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) ==
2591 WNT_STOPUSING) {
2592 socket_unlock(inp->inp_socket, 1);
2593 return;
2594 }
2595
2596 if (PRC_IS_REDIRECT(cmd)) {
2597 /* signal EHOSTDOWN, as it flushes the cached route */
2598 (*notify)(inp, EHOSTDOWN);
2599 } else {
2600 tp = intotcpcb(inp);
2601 if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
2602 SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
2603 if (cmd == PRC_MSGSIZE) {
2604 /*
2605 * Only process the offered MTU if it
2606 * is smaller than the current one.
2607 */
2608 if (mtu < tp->t_maxseg +
2609 (sizeof(struct tcphdr) + sizeof(struct ip6_hdr))) {
2610 (*notify)(inp, inetctlerrmap[cmd]);
2611 }
2612 } else {
2613 (*notify)(inp, inetctlerrmap[cmd]);
2614 }
2615 }
2616 }
2617 socket_unlock(inp->inp_socket, 1);
2618 }
2619 #endif /* INET6 */
2620
2621
2622 /*
2623 * Following is where TCP initial sequence number generation occurs.
2624 *
2625 * There are two places where we must use initial sequence numbers:
2626 * 1. In SYN-ACK packets.
2627 * 2. In SYN packets.
2628 *
2629 * The ISNs in SYN-ACK packets have no monotonicity requirement,
2630 * and should be as unpredictable as possible to avoid the possibility
2631 * of spoofing and/or connection hijacking. To satisfy this
2632 * requirement, SYN-ACK ISNs are generated via the arc4random()
2633 * function. If exact RFC 1948 compliance is requested via sysctl,
2634 * these ISNs will be generated just like those in SYN packets.
2635 *
2636 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
2637 * depends on this property. In addition, these ISNs should be
2638 * unguessable so as to prevent connection hijacking. To satisfy
2639 * the requirements of this situation, the algorithm outlined in
2640 * RFC 1948 is used to generate sequence numbers.
2641 *
2642 * For more information on the theory of operation, please see
2643 * RFC 1948.
2644 *
2645 * Implementation details:
2646 *
2647 * Time is based off the system timer, and is corrected so that it
2648 * increases by one megabyte per second. This allows for proper
2649 * recycling on high speed LANs while still leaving over an hour
2650 * before rollover.
2651 *
2652 * Two sysctls control the generation of ISNs:
2653 *
2654 * net.inet.tcp.isn_reseed_interval controls the number of seconds
2655 * between seeding of isn_secret. This is normally set to zero,
2656 * as reseeding should not be necessary.
2657 *
2658 * net.inet.tcp.strict_rfc1948 controls whether RFC 1948 is followed
2659 * strictly. When strict compliance is requested, reseeding is
2660 * disabled and SYN-ACKs will be generated in the same manner as
2661 * SYNs. Strict mode is disabled by default.
2662 *
2663 */
2664
2665 #define ISN_BYTES_PER_SECOND 1048576
2666
2667 tcp_seq
2668 tcp_new_isn(struct tcpcb *tp)
2669 {
2670 u_int32_t md5_buffer[4];
2671 tcp_seq new_isn;
2672 struct timeval timenow;
2673 u_char isn_secret[32];
2674 int isn_last_reseed = 0;
2675 MD5_CTX isn_ctx;
2676
2677 /* Use arc4random for SYN-ACKs when not in exact RFC1948 mode. */
2678 if (((tp->t_state == TCPS_LISTEN) || (tp->t_state == TCPS_TIME_WAIT)) &&
2679 tcp_strict_rfc1948 == 0)
2680 #ifdef __APPLE__
2681 { return RandomULong(); }
2682 #else
2683 { return arc4random(); }
2684 #endif
2685 getmicrotime(&timenow);
2686
2687 /* Seed if this is the first use, reseed if requested. */
2688 if ((isn_last_reseed == 0) ||
2689 ((tcp_strict_rfc1948 == 0) && (tcp_isn_reseed_interval > 0) &&
2690 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval * hz)
2691 < (u_int)timenow.tv_sec))) {
2692 #ifdef __APPLE__
2693 read_frandom(&isn_secret, sizeof(isn_secret));
2694 #else
2695 read_random_unlimited(&isn_secret, sizeof(isn_secret));
2696 #endif
2697 isn_last_reseed = timenow.tv_sec;
2698 }
2699
2700 /* Compute the md5 hash and return the ISN. */
2701 MD5Init(&isn_ctx);
2702 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport,
2703 sizeof(u_short));
2704 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport,
2705 sizeof(u_short));
2706 #if INET6
2707 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
2708 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
2709 sizeof(struct in6_addr));
2710 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
2711 sizeof(struct in6_addr));
2712 } else
2713 #endif
2714 {
2715 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
2716 sizeof(struct in_addr));
2717 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
2718 sizeof(struct in_addr));
2719 }
2720 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
2721 MD5Final((u_char *) &md5_buffer, &isn_ctx);
2722 new_isn = (tcp_seq) md5_buffer[0];
2723 new_isn += timenow.tv_sec * (ISN_BYTES_PER_SECOND / hz);
2724 return new_isn;
2725 }
2726
2727
2728 /*
2729 * When a specific ICMP unreachable message is received and the
2730 * connection state is SYN-SENT, drop the connection. This behavior
2731 * is controlled by the icmp_may_rst sysctl.
2732 */
2733 void
2734 tcp_drop_syn_sent(struct inpcb *inp, int errno)
2735 {
2736 struct tcpcb *tp = intotcpcb(inp);
2737
2738 if (tp && tp->t_state == TCPS_SYN_SENT) {
2739 tcp_drop(tp, errno);
2740 }
2741 }
2742
2743 /*
2744 * When `need fragmentation' ICMP is received, update our idea of the MSS
2745 * based on the new value in the route. Also nudge TCP to send something,
2746 * since we know the packet we just sent was dropped.
2747 * This duplicates some code in the tcp_mss() function in tcp_input.c.
2748 */
2749 void
2750 tcp_mtudisc(
2751 struct inpcb *inp,
2752 __unused int errno
2753 )
2754 {
2755 struct tcpcb *tp = intotcpcb(inp);
2756 struct rtentry *rt;
2757 struct rmxp_tao *taop;
2758 struct socket *so = inp->inp_socket;
2759 int offered;
2760 int mss;
2761 u_int32_t mtu;
2762 u_int32_t protoHdrOverhead = sizeof(struct tcpiphdr);
2763 #if INET6
2764 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
2765
2766 /*
2767 * Nothing left to send after the socket is defunct or TCP is in the closed state
2768 */
2769 if ((so->so_state & SS_DEFUNCT) || (tp != NULL && tp->t_state == TCPS_CLOSED)) {
2770 return;
2771 }
2772
2773 if (isipv6) {
2774 protoHdrOverhead = sizeof(struct ip6_hdr) +
2775 sizeof(struct tcphdr);
2776 }
2777 #endif /* INET6 */
2778
2779 if (tp != NULL) {
2780 #if INET6
2781 if (isipv6) {
2782 rt = tcp_rtlookup6(inp, IFSCOPE_NONE);
2783 } else
2784 #endif /* INET6 */
2785 rt = tcp_rtlookup(inp, IFSCOPE_NONE);
2786 if (!rt || !rt->rt_rmx.rmx_mtu) {
2787 tp->t_maxopd = tp->t_maxseg =
2788 #if INET6
2789 isipv6 ? tcp_v6mssdflt :
2790 #endif /* INET6 */
2791 tcp_mssdflt;
2792
2793 /* Route locked during lookup above */
2794 if (rt != NULL) {
2795 RT_UNLOCK(rt);
2796 }
2797 return;
2798 }
2799 taop = rmx_taop(rt->rt_rmx);
2800 offered = taop->tao_mssopt;
2801 mtu = rt->rt_rmx.rmx_mtu;
2802
2803 /* Route locked during lookup above */
2804 RT_UNLOCK(rt);
2805
2806 #if NECP
2807 // Adjust MTU if necessary.
2808 mtu = necp_socket_get_effective_mtu(inp, mtu);
2809 #endif /* NECP */
2810 mss = mtu - protoHdrOverhead;
2811
2812 if (offered) {
2813 mss = min(mss, offered);
2814 }
2815 /*
2816 * XXX - The above conditional probably violates the TCP
2817 * spec. The problem is that, since we don't know the
2818 * other end's MSS, we are supposed to use a conservative
2819 * default. But, if we do that, then MTU discovery will
2820 * never actually take place, because the conservative
2821 * default is much less than the MTUs typically seen
2822 * on the Internet today. For the moment, we'll sweep
2823 * this under the carpet.
2824 *
2825 * The conservative default might not actually be a problem
2826 * if the only case this occurs is when sending an initial
2827 * SYN with options and data to a host we've never talked
2828 * to before. Then, they will reply with an MSS value which
2829 * will get recorded and the new parameters should get
2830 * recomputed. For Further Study.
2831 */
2832 if (tp->t_maxopd <= mss) {
2833 return;
2834 }
2835 tp->t_maxopd = mss;
2836
2837 if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
2838 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) {
2839 mss -= TCPOLEN_TSTAMP_APPA;
2840 }
2841
2842 #if MPTCP
2843 mss -= mptcp_adj_mss(tp, TRUE);
2844 #endif
2845 if (so->so_snd.sb_hiwat < mss) {
2846 mss = so->so_snd.sb_hiwat;
2847 }
2848
2849 tp->t_maxseg = mss;
2850
2851 ASSERT(tp->t_maxseg);
2852
2853 /*
2854 * Reset the slow-start flight size as it may depends on the
2855 * new MSS
2856 */
2857 if (CC_ALGO(tp)->cwnd_init != NULL) {
2858 CC_ALGO(tp)->cwnd_init(tp);
2859 }
2860 tcpstat.tcps_mturesent++;
2861 tp->t_rtttime = 0;
2862 tp->snd_nxt = tp->snd_una;
2863 tcp_output(tp);
2864 }
2865 }
2866
2867 /*
2868 * Look-up the routing entry to the peer of this inpcb. If no route
2869 * is found and it cannot be allocated the return NULL. This routine
2870 * is called by TCP routines that access the rmx structure and by tcp_mss
2871 * to get the interface MTU. If a route is found, this routine will
2872 * hold the rtentry lock; the caller is responsible for unlocking.
2873 */
2874 struct rtentry *
2875 tcp_rtlookup(struct inpcb *inp, unsigned int input_ifscope)
2876 {
2877 struct route *ro;
2878 struct rtentry *rt;
2879 struct tcpcb *tp;
2880
2881 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
2882
2883 ro = &inp->inp_route;
2884 if ((rt = ro->ro_rt) != NULL) {
2885 RT_LOCK(rt);
2886 }
2887
2888 if (ROUTE_UNUSABLE(ro)) {
2889 if (rt != NULL) {
2890 RT_UNLOCK(rt);
2891 rt = NULL;
2892 }
2893 ROUTE_RELEASE(ro);
2894 /* No route yet, so try to acquire one */
2895 if (inp->inp_faddr.s_addr != INADDR_ANY) {
2896 unsigned int ifscope;
2897
2898 ro->ro_dst.sa_family = AF_INET;
2899 ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
2900 ((struct sockaddr_in *)(void *)&ro->ro_dst)->sin_addr =
2901 inp->inp_faddr;
2902
2903 /*
2904 * If the socket was bound to an interface, then
2905 * the bound-to-interface takes precedence over
2906 * the inbound interface passed in by the caller
2907 * (if we get here as part of the output path then
2908 * input_ifscope is IFSCOPE_NONE).
2909 */
2910 ifscope = (inp->inp_flags & INP_BOUND_IF) ?
2911 inp->inp_boundifp->if_index : input_ifscope;
2912
2913 rtalloc_scoped(ro, ifscope);
2914 if ((rt = ro->ro_rt) != NULL) {
2915 RT_LOCK(rt);
2916 }
2917 }
2918 }
2919 if (rt != NULL) {
2920 RT_LOCK_ASSERT_HELD(rt);
2921 }
2922
2923 /*
2924 * Update MTU discovery determination. Don't do it if:
2925 * 1) it is disabled via the sysctl
2926 * 2) the route isn't up
2927 * 3) the MTU is locked (if it is, then discovery has been
2928 * disabled)
2929 */
2930
2931 tp = intotcpcb(inp);
2932
2933 if (!path_mtu_discovery || ((rt != NULL) &&
2934 (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) {
2935 tp->t_flags &= ~TF_PMTUD;
2936 } else {
2937 tp->t_flags |= TF_PMTUD;
2938 }
2939
2940 if (rt != NULL && rt->rt_ifp != NULL) {
2941 somultipages(inp->inp_socket,
2942 (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
2943 tcp_set_tso(tp, rt->rt_ifp);
2944 soif2kcl(inp->inp_socket,
2945 (rt->rt_ifp->if_eflags & IFEF_2KCL));
2946 tcp_set_ecn(tp, rt->rt_ifp);
2947 if (inp->inp_last_outifp == NULL) {
2948 inp->inp_last_outifp = rt->rt_ifp;
2949 }
2950 }
2951
2952 /* Note if the peer is local */
2953 if (rt != NULL && !(rt->rt_ifp->if_flags & IFF_POINTOPOINT) &&
2954 (rt->rt_gateway->sa_family == AF_LINK ||
2955 rt->rt_ifp->if_flags & IFF_LOOPBACK ||
2956 in_localaddr(inp->inp_faddr))) {
2957 tp->t_flags |= TF_LOCAL;
2958 }
2959
2960 /*
2961 * Caller needs to call RT_UNLOCK(rt).
2962 */
2963 return rt;
2964 }
2965
2966 #if INET6
2967 struct rtentry *
2968 tcp_rtlookup6(struct inpcb *inp, unsigned int input_ifscope)
2969 {
2970 struct route_in6 *ro6;
2971 struct rtentry *rt;
2972 struct tcpcb *tp;
2973
2974 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
2975
2976 ro6 = &inp->in6p_route;
2977 if ((rt = ro6->ro_rt) != NULL) {
2978 RT_LOCK(rt);
2979 }
2980
2981 if (ROUTE_UNUSABLE(ro6)) {
2982 if (rt != NULL) {
2983 RT_UNLOCK(rt);
2984 rt = NULL;
2985 }
2986 ROUTE_RELEASE(ro6);
2987 /* No route yet, so try to acquire one */
2988 if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
2989 struct sockaddr_in6 *dst6;
2990 unsigned int ifscope;
2991
2992 dst6 = (struct sockaddr_in6 *)&ro6->ro_dst;
2993 dst6->sin6_family = AF_INET6;
2994 dst6->sin6_len = sizeof(*dst6);
2995 dst6->sin6_addr = inp->in6p_faddr;
2996
2997 /*
2998 * If the socket was bound to an interface, then
2999 * the bound-to-interface takes precedence over
3000 * the inbound interface passed in by the caller
3001 * (if we get here as part of the output path then
3002 * input_ifscope is IFSCOPE_NONE).
3003 */
3004 ifscope = (inp->inp_flags & INP_BOUND_IF) ?
3005 inp->inp_boundifp->if_index : input_ifscope;
3006
3007 rtalloc_scoped((struct route *)ro6, ifscope);
3008 if ((rt = ro6->ro_rt) != NULL) {
3009 RT_LOCK(rt);
3010 }
3011 }
3012 }
3013 if (rt != NULL) {
3014 RT_LOCK_ASSERT_HELD(rt);
3015 }
3016
3017 /*
3018 * Update path MTU Discovery determination
3019 * while looking up the route:
3020 * 1) we have a valid route to the destination
3021 * 2) the MTU is not locked (if it is, then discovery has been
3022 * disabled)
3023 */
3024
3025
3026 tp = intotcpcb(inp);
3027
3028 /*
3029 * Update MTU discovery determination. Don't do it if:
3030 * 1) it is disabled via the sysctl
3031 * 2) the route isn't up
3032 * 3) the MTU is locked (if it is, then discovery has been
3033 * disabled)
3034 */
3035
3036 if (!path_mtu_discovery || ((rt != NULL) &&
3037 (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) {
3038 tp->t_flags &= ~TF_PMTUD;
3039 } else {
3040 tp->t_flags |= TF_PMTUD;
3041 }
3042
3043 if (rt != NULL && rt->rt_ifp != NULL) {
3044 somultipages(inp->inp_socket,
3045 (rt->rt_ifp->if_hwassist & IFNET_MULTIPAGES));
3046 tcp_set_tso(tp, rt->rt_ifp);
3047 soif2kcl(inp->inp_socket,
3048 (rt->rt_ifp->if_eflags & IFEF_2KCL));
3049 tcp_set_ecn(tp, rt->rt_ifp);
3050 if (inp->inp_last_outifp == NULL) {
3051 inp->inp_last_outifp = rt->rt_ifp;
3052 }
3053
3054 /* Note if the peer is local */
3055 if (!(rt->rt_ifp->if_flags & IFF_POINTOPOINT) &&
3056 (IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr) ||
3057 IN6_IS_ADDR_LINKLOCAL(&inp->in6p_faddr) ||
3058 rt->rt_gateway->sa_family == AF_LINK ||
3059 in6_localaddr(&inp->in6p_faddr))) {
3060 tp->t_flags |= TF_LOCAL;
3061 }
3062 }
3063
3064 /*
3065 * Caller needs to call RT_UNLOCK(rt).
3066 */
3067 return rt;
3068 }
3069 #endif /* INET6 */
3070
3071 #if IPSEC
3072 /* compute ESP/AH header size for TCP, including outer IP header. */
3073 size_t
3074 ipsec_hdrsiz_tcp(struct tcpcb *tp)
3075 {
3076 struct inpcb *inp;
3077 struct mbuf *m;
3078 size_t hdrsiz;
3079 struct ip *ip;
3080 #if INET6
3081 struct ip6_hdr *ip6 = NULL;
3082 #endif /* INET6 */
3083 struct tcphdr *th;
3084
3085 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) {
3086 return 0;
3087 }
3088 MGETHDR(m, M_DONTWAIT, MT_DATA); /* MAC-OK */
3089 if (!m) {
3090 return 0;
3091 }
3092
3093 #if INET6
3094 if ((inp->inp_vflag & INP_IPV6) != 0) {
3095 ip6 = mtod(m, struct ip6_hdr *);
3096 th = (struct tcphdr *)(void *)(ip6 + 1);
3097 m->m_pkthdr.len = m->m_len =
3098 sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
3099 tcp_fillheaders(tp, ip6, th);
3100 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
3101 } else
3102 #endif /* INET6 */
3103 {
3104 ip = mtod(m, struct ip *);
3105 th = (struct tcphdr *)(ip + 1);
3106 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
3107 tcp_fillheaders(tp, ip, th);
3108 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
3109 }
3110 m_free(m);
3111 return hdrsiz;
3112 }
3113 #endif /* IPSEC */
3114
3115 /*
3116 * Return a pointer to the cached information about the remote host.
3117 * The cached information is stored in the protocol specific part of
3118 * the route metrics.
3119 */
3120 struct rmxp_tao *
3121 tcp_gettaocache(struct inpcb *inp)
3122 {
3123 struct rtentry *rt;
3124 struct rmxp_tao *taop;
3125
3126 #if INET6
3127 if ((inp->inp_vflag & INP_IPV6) != 0) {
3128 rt = tcp_rtlookup6(inp, IFSCOPE_NONE);
3129 } else
3130 #endif /* INET6 */
3131 rt = tcp_rtlookup(inp, IFSCOPE_NONE);
3132
3133 /* Make sure this is a host route and is up. */
3134 if (rt == NULL ||
3135 (rt->rt_flags & (RTF_UP | RTF_HOST)) != (RTF_UP | RTF_HOST)) {
3136 /* Route locked during lookup above */
3137 if (rt != NULL) {
3138 RT_UNLOCK(rt);
3139 }
3140 return NULL;
3141 }
3142
3143 taop = rmx_taop(rt->rt_rmx);
3144 /* Route locked during lookup above */
3145 RT_UNLOCK(rt);
3146 return taop;
3147 }
3148
3149 /*
3150 * Clear all the TAO cache entries, called from tcp_init.
3151 *
3152 * XXX
3153 * This routine is just an empty one, because we assume that the routing
3154 * routing tables are initialized at the same time when TCP, so there is
3155 * nothing in the cache left over.
3156 */
3157 static void
3158 tcp_cleartaocache(void)
3159 {
3160 }
3161
3162 int
3163 tcp_lock(struct socket *so, int refcount, void *lr)
3164 {
3165 void *lr_saved;
3166
3167 if (lr == NULL) {
3168 lr_saved = __builtin_return_address(0);
3169 } else {
3170 lr_saved = lr;
3171 }
3172
3173 retry:
3174 if (so->so_pcb != NULL) {
3175 if (so->so_flags & SOF_MP_SUBFLOW) {
3176 struct mptcb *mp_tp = tptomptp(sototcpcb(so));
3177 struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
3178
3179 socket_lock(mp_so, refcount);
3180
3181 /*
3182 * Check if we became non-MPTCP while waiting for the lock.
3183 * If yes, we have to retry to grab the right lock.
3184 */
3185 if (!(so->so_flags & SOF_MP_SUBFLOW)) {
3186 socket_unlock(mp_so, refcount);
3187 goto retry;
3188 }
3189 } else {
3190 lck_mtx_lock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
3191
3192 if (so->so_flags & SOF_MP_SUBFLOW) {
3193 /*
3194 * While waiting for the lock, we might have
3195 * become MPTCP-enabled (see mptcp_subflow_socreate).
3196 */
3197 lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
3198 goto retry;
3199 }
3200 }
3201 } else {
3202 panic("tcp_lock: so=%p NO PCB! lr=%p lrh= %s\n",
3203 so, lr_saved, solockhistory_nr(so));
3204 /* NOTREACHED */
3205 }
3206
3207 if (so->so_usecount < 0) {
3208 panic("tcp_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
3209 so, so->so_pcb, lr_saved, so->so_usecount,
3210 solockhistory_nr(so));
3211 /* NOTREACHED */
3212 }
3213 if (refcount) {
3214 so->so_usecount++;
3215 }
3216 so->lock_lr[so->next_lock_lr] = lr_saved;
3217 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
3218 return 0;
3219 }
3220
3221 int
3222 tcp_unlock(struct socket *so, int refcount, void *lr)
3223 {
3224 void *lr_saved;
3225
3226 if (lr == NULL) {
3227 lr_saved = __builtin_return_address(0);
3228 } else {
3229 lr_saved = lr;
3230 }
3231
3232 #ifdef MORE_TCPLOCK_DEBUG
3233 printf("tcp_unlock: so=0x%llx sopcb=0x%llx lock=0x%llx ref=%x "
3234 "lr=0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(so),
3235 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb),
3236 (uint64_t)VM_KERNEL_ADDRPERM(&(sotoinpcb(so)->inpcb_mtx)),
3237 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
3238 #endif
3239 if (refcount) {
3240 so->so_usecount--;
3241 }
3242
3243 if (so->so_usecount < 0) {
3244 panic("tcp_unlock: so=%p usecount=%x lrh= %s\n",
3245 so, so->so_usecount, solockhistory_nr(so));
3246 /* NOTREACHED */
3247 }
3248 if (so->so_pcb == NULL) {
3249 panic("tcp_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
3250 so, so->so_usecount, lr_saved, solockhistory_nr(so));
3251 /* NOTREACHED */
3252 } else {
3253 so->unlock_lr[so->next_unlock_lr] = lr_saved;
3254 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
3255
3256 if (so->so_flags & SOF_MP_SUBFLOW) {
3257 struct mptcb *mp_tp = tptomptp(sototcpcb(so));
3258 struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
3259
3260 socket_lock_assert_owned(mp_so);
3261
3262 socket_unlock(mp_so, refcount);
3263 } else {
3264 LCK_MTX_ASSERT(&((struct inpcb *)so->so_pcb)->inpcb_mtx,
3265 LCK_MTX_ASSERT_OWNED);
3266 lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx);
3267 }
3268 }
3269 return 0;
3270 }
3271
3272 lck_mtx_t *
3273 tcp_getlock(struct socket *so, int flags)
3274 {
3275 struct inpcb *inp = sotoinpcb(so);
3276
3277 if (so->so_pcb) {
3278 if (so->so_usecount < 0) {
3279 panic("tcp_getlock: so=%p usecount=%x lrh= %s\n",
3280 so, so->so_usecount, solockhistory_nr(so));
3281 }
3282
3283 if (so->so_flags & SOF_MP_SUBFLOW) {
3284 struct mptcb *mp_tp = tptomptp(sototcpcb(so));
3285 struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
3286
3287 return mp_so->so_proto->pr_getlock(mp_so, flags);
3288 } else {
3289 return &inp->inpcb_mtx;
3290 }
3291 } else {
3292 panic("tcp_getlock: so=%p NULL so_pcb %s\n",
3293 so, solockhistory_nr(so));
3294 return so->so_proto->pr_domain->dom_mtx;
3295 }
3296 }
3297
3298 /*
3299 * Determine if we can grow the recieve socket buffer to avoid sending
3300 * a zero window update to the peer. We allow even socket buffers that
3301 * have fixed size (set by the application) to grow if the resource
3302 * constraints are met. They will also be trimmed after the application
3303 * reads data.
3304 */
3305 static void
3306 tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb)
3307 {
3308 u_int32_t rcvbufinc = tp->t_maxseg << 4;
3309 u_int32_t rcvbuf = sb->sb_hiwat;
3310 struct socket *so = tp->t_inpcb->inp_socket;
3311
3312 if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(so)) {
3313 return;
3314 }
3315 /*
3316 * If message delivery is enabled, do not count
3317 * unordered bytes in receive buffer towards hiwat
3318 */
3319 if (so->so_flags & SOF_ENABLE_MSGS) {
3320 rcvbuf = rcvbuf - so->so_msg_state->msg_uno_bytes;
3321 }
3322
3323 if (tcp_do_autorcvbuf == 1 &&
3324 tcp_cansbgrow(sb) &&
3325 (tp->t_flags & TF_SLOWLINK) == 0 &&
3326 (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) == 0 &&
3327 (rcvbuf - sb->sb_cc) < rcvbufinc &&
3328 rcvbuf < tcp_autorcvbuf_max &&
3329 (sb->sb_idealsize > 0 &&
3330 sb->sb_hiwat <= (sb->sb_idealsize + rcvbufinc))) {
3331 sbreserve(sb,
3332 min((sb->sb_hiwat + rcvbufinc), tcp_autorcvbuf_max));
3333 }
3334 }
3335
3336 int32_t
3337 tcp_sbspace(struct tcpcb *tp)
3338 {
3339 struct socket *so = tp->t_inpcb->inp_socket;
3340 struct sockbuf *sb = &so->so_rcv;
3341 u_int32_t rcvbuf;
3342 int32_t space;
3343 int32_t pending = 0;
3344
3345 if (so->so_flags & SOF_MP_SUBFLOW) {
3346 /* We still need to grow TCP's buffer to have a BDP-estimate */
3347 tcp_sbrcv_grow_rwin(tp, sb);
3348
3349 return mptcp_sbspace(tptomptp(tp));
3350 }
3351
3352 tcp_sbrcv_grow_rwin(tp, sb);
3353
3354 /* hiwat might have changed */
3355 rcvbuf = sb->sb_hiwat;
3356
3357 /*
3358 * If message delivery is enabled, do not count
3359 * unordered bytes in receive buffer towards hiwat mark.
3360 * This value is used to return correct rwnd that does
3361 * not reflect the extra unordered bytes added to the
3362 * receive socket buffer.
3363 */
3364 if (so->so_flags & SOF_ENABLE_MSGS) {
3365 rcvbuf = rcvbuf - so->so_msg_state->msg_uno_bytes;
3366 }
3367
3368 space = ((int32_t) imin((rcvbuf - sb->sb_cc),
3369 (sb->sb_mbmax - sb->sb_mbcnt)));
3370 if (space < 0) {
3371 space = 0;
3372 }
3373
3374 #if CONTENT_FILTER
3375 /* Compensate for data being processed by content filters */
3376 pending = cfil_sock_data_space(sb);
3377 #endif /* CONTENT_FILTER */
3378 if (pending > space) {
3379 space = 0;
3380 } else {
3381 space -= pending;
3382 }
3383
3384 /*
3385 * Avoid increasing window size if the current window
3386 * is already very low, we could be in "persist" mode and
3387 * we could break some apps (see rdar://5409343)
3388 */
3389
3390 if (space < tp->t_maxseg) {
3391 return space;
3392 }
3393
3394 /* Clip window size for slower link */
3395
3396 if (((tp->t_flags & TF_SLOWLINK) != 0) && slowlink_wsize > 0) {
3397 return imin(space, slowlink_wsize);
3398 }
3399
3400 return space;
3401 }
3402 /*
3403 * Checks TCP Segment Offloading capability for a given connection
3404 * and interface pair.
3405 */
3406 void
3407 tcp_set_tso(struct tcpcb *tp, struct ifnet *ifp)
3408 {
3409 #if INET6
3410 struct inpcb *inp;
3411 int isipv6;
3412 #endif /* INET6 */
3413 #if MPTCP
3414 /*
3415 * We can't use TSO if this tcpcb belongs to an MPTCP session.
3416 */
3417 if (tp->t_mpflags & TMPF_MPTCP_TRUE) {
3418 tp->t_flags &= ~TF_TSO;
3419 return;
3420 }
3421 #endif
3422 #if INET6
3423 inp = tp->t_inpcb;
3424 isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
3425
3426 if (isipv6) {
3427 if (ifp && (ifp->if_hwassist & IFNET_TSO_IPV6)) {
3428 tp->t_flags |= TF_TSO;
3429 if (ifp->if_tso_v6_mtu != 0) {
3430 tp->tso_max_segment_size = ifp->if_tso_v6_mtu;
3431 } else {
3432 tp->tso_max_segment_size = TCP_MAXWIN;
3433 }
3434 } else {
3435 tp->t_flags &= ~TF_TSO;
3436 }
3437 } else
3438 #endif /* INET6 */
3439
3440 {
3441 if (ifp && (ifp->if_hwassist & IFNET_TSO_IPV4)) {
3442 tp->t_flags |= TF_TSO;
3443 if (ifp->if_tso_v4_mtu != 0) {
3444 tp->tso_max_segment_size = ifp->if_tso_v4_mtu;
3445 } else {
3446 tp->tso_max_segment_size = TCP_MAXWIN;
3447 }
3448 } else {
3449 tp->t_flags &= ~TF_TSO;
3450 }
3451 }
3452 }
3453
3454 #define TIMEVAL_TO_TCPHZ(_tv_) ((_tv_).tv_sec * TCP_RETRANSHZ + \
3455 (_tv_).tv_usec / TCP_RETRANSHZ_TO_USEC)
3456
3457 /*
3458 * Function to calculate the tcp clock. The tcp clock will get updated
3459 * at the boundaries of the tcp layer. This is done at 3 places:
3460 * 1. Right before processing an input tcp packet
3461 * 2. Whenever a connection wants to access the network using tcp_usrreqs
3462 * 3. When a tcp timer fires or before tcp slow timeout
3463 *
3464 */
3465
3466 void
3467 calculate_tcp_clock(void)
3468 {
3469 struct timeval tv = tcp_uptime;
3470 struct timeval interval = {.tv_sec = 0, .tv_usec = TCP_RETRANSHZ_TO_USEC};
3471 struct timeval now, hold_now;
3472 uint32_t incr = 0;
3473
3474 microuptime(&now);
3475
3476 /*
3477 * Update coarse-grained networking timestamp (in sec.); the idea
3478 * is to update the counter returnable via net_uptime() when
3479 * we read time.
3480 */
3481 net_update_uptime_with_time(&now);
3482
3483 timevaladd(&tv, &interval);
3484 if (timevalcmp(&now, &tv, >)) {
3485 /* time to update the clock */
3486 lck_spin_lock(tcp_uptime_lock);
3487 if (timevalcmp(&tcp_uptime, &now, >=)) {
3488 /* clock got updated while waiting for the lock */
3489 lck_spin_unlock(tcp_uptime_lock);
3490 return;
3491 }
3492
3493 microuptime(&now);
3494 hold_now = now;
3495 tv = tcp_uptime;
3496 timevalsub(&now, &tv);
3497
3498 incr = TIMEVAL_TO_TCPHZ(now);
3499 if (incr > 0) {
3500 tcp_uptime = hold_now;
3501 tcp_now += incr;
3502 }
3503
3504 lck_spin_unlock(tcp_uptime_lock);
3505 }
3506 }
3507
3508 /*
3509 * Compute receive window scaling that we are going to request
3510 * for this connection based on sb_hiwat. Try to leave some
3511 * room to potentially increase the window size upto a maximum
3512 * defined by the constant tcp_autorcvbuf_max.
3513 */
3514 void
3515 tcp_set_max_rwinscale(struct tcpcb *tp, struct socket *so, struct ifnet *ifp)
3516 {
3517 uint32_t maxsockbufsize;
3518 uint32_t rcvbuf_max;
3519
3520 if (!tcp_do_rfc1323) {
3521 tp->request_r_scale = 0;
3522 return;
3523 }
3524
3525 /*
3526 * When we start a connection and don't know about the interface, set
3527 * the scaling factor simply to the max - we can always announce less.
3528 */
3529 if (!ifp || (IFNET_IS_CELLULAR(ifp) && (ifp->if_eflags & IFEF_3CA))) {
3530 rcvbuf_max = (tcp_autorcvbuf_max << 1);
3531 } else {
3532 rcvbuf_max = tcp_autorcvbuf_max;
3533 }
3534
3535 tp->request_r_scale = max(tcp_win_scale, tp->request_r_scale);
3536 maxsockbufsize = ((so->so_rcv.sb_flags & SB_USRSIZE) != 0) ?
3537 so->so_rcv.sb_hiwat : rcvbuf_max;
3538
3539 while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
3540 (TCP_MAXWIN << tp->request_r_scale) < maxsockbufsize) {
3541 tp->request_r_scale++;
3542 }
3543 tp->request_r_scale = min(tp->request_r_scale, TCP_MAX_WINSHIFT);
3544 }
3545
3546 int
3547 tcp_notsent_lowat_check(struct socket *so)
3548 {
3549 struct inpcb *inp = sotoinpcb(so);
3550 struct tcpcb *tp = NULL;
3551 int notsent = 0;
3552
3553 if (inp != NULL) {
3554 tp = intotcpcb(inp);
3555 }
3556
3557 if (tp == NULL) {
3558 return 0;
3559 }
3560
3561 notsent = so->so_snd.sb_cc -
3562 (tp->snd_nxt - tp->snd_una);
3563
3564 /*
3565 * When we send a FIN or SYN, not_sent can be negative.
3566 * In that case also we need to send a write event to the
3567 * process if it is waiting. In the FIN case, it will
3568 * get an error from send because cantsendmore will be set.
3569 */
3570 if (notsent <= tp->t_notsent_lowat) {
3571 return 1;
3572 }
3573
3574 /*
3575 * When Nagle's algorithm is not disabled, it is better
3576 * to wakeup the client until there is atleast one
3577 * maxseg of data to write.
3578 */
3579 if ((tp->t_flags & TF_NODELAY) == 0 &&
3580 notsent > 0 && notsent < tp->t_maxseg) {
3581 return 1;
3582 }
3583 return 0;
3584 }
3585
3586 void
3587 tcp_rxtseg_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end)
3588 {
3589 struct tcp_rxt_seg *rxseg = NULL, *prev = NULL, *next = NULL;
3590 u_int32_t rxcount = 0;
3591
3592 if (SLIST_EMPTY(&tp->t_rxt_segments)) {
3593 tp->t_dsack_lastuna = tp->snd_una;
3594 }
3595 /*
3596 * First check if there is a segment already existing for this
3597 * sequence space.
3598 */
3599
3600 SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3601 if (SEQ_GT(rxseg->rx_start, start)) {
3602 break;
3603 }
3604 prev = rxseg;
3605 }
3606 next = rxseg;
3607
3608 /* check if prev seg is for this sequence */
3609 if (prev != NULL && SEQ_LEQ(prev->rx_start, start) &&
3610 SEQ_GEQ(prev->rx_end, end)) {
3611 prev->rx_count++;
3612 return;
3613 }
3614
3615 /*
3616 * There are a couple of possibilities at this point.
3617 * 1. prev overlaps with the beginning of this sequence
3618 * 2. next overlaps with the end of this sequence
3619 * 3. there is no overlap.
3620 */
3621
3622 if (prev != NULL && SEQ_GT(prev->rx_end, start)) {
3623 if (prev->rx_start == start && SEQ_GT(end, prev->rx_end)) {
3624 start = prev->rx_end + 1;
3625 prev->rx_count++;
3626 } else {
3627 prev->rx_end = (start - 1);
3628 rxcount = prev->rx_count;
3629 }
3630 }
3631
3632 if (next != NULL && SEQ_LT(next->rx_start, end)) {
3633 if (SEQ_LEQ(next->rx_end, end)) {
3634 end = next->rx_start - 1;
3635 next->rx_count++;
3636 } else {
3637 next->rx_start = end + 1;
3638 rxcount = next->rx_count;
3639 }
3640 }
3641 if (!SEQ_LT(start, end)) {
3642 return;
3643 }
3644
3645 rxseg = (struct tcp_rxt_seg *) zalloc(tcp_rxt_seg_zone);
3646 if (rxseg == NULL) {
3647 return;
3648 }
3649 bzero(rxseg, sizeof(*rxseg));
3650 rxseg->rx_start = start;
3651 rxseg->rx_end = end;
3652 rxseg->rx_count = rxcount + 1;
3653
3654 if (prev != NULL) {
3655 SLIST_INSERT_AFTER(prev, rxseg, rx_link);
3656 } else {
3657 SLIST_INSERT_HEAD(&tp->t_rxt_segments, rxseg, rx_link);
3658 }
3659 }
3660
3661 struct tcp_rxt_seg *
3662 tcp_rxtseg_find(struct tcpcb *tp, tcp_seq start, tcp_seq end)
3663 {
3664 struct tcp_rxt_seg *rxseg;
3665 if (SLIST_EMPTY(&tp->t_rxt_segments)) {
3666 return NULL;
3667 }
3668
3669 SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3670 if (SEQ_LEQ(rxseg->rx_start, start) &&
3671 SEQ_GEQ(rxseg->rx_end, end)) {
3672 return rxseg;
3673 }
3674 if (SEQ_GT(rxseg->rx_start, start)) {
3675 break;
3676 }
3677 }
3678 return NULL;
3679 }
3680
3681 void
3682 tcp_rxtseg_clean(struct tcpcb *tp)
3683 {
3684 struct tcp_rxt_seg *rxseg, *next;
3685
3686 SLIST_FOREACH_SAFE(rxseg, &tp->t_rxt_segments, rx_link, next) {
3687 SLIST_REMOVE(&tp->t_rxt_segments, rxseg,
3688 tcp_rxt_seg, rx_link);
3689 zfree(tcp_rxt_seg_zone, rxseg);
3690 }
3691 tp->t_dsack_lastuna = tp->snd_max;
3692 }
3693
3694 boolean_t
3695 tcp_rxtseg_detect_bad_rexmt(struct tcpcb *tp, tcp_seq th_ack)
3696 {
3697 boolean_t bad_rexmt;
3698 struct tcp_rxt_seg *rxseg;
3699
3700 if (SLIST_EMPTY(&tp->t_rxt_segments)) {
3701 return FALSE;
3702 }
3703
3704 /*
3705 * If all of the segments in this window are not cumulatively
3706 * acknowledged, then there can still be undetected packet loss.
3707 * Do not restore congestion window in that case.
3708 */
3709 if (SEQ_LT(th_ack, tp->snd_recover)) {
3710 return FALSE;
3711 }
3712
3713 bad_rexmt = TRUE;
3714 SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3715 if (rxseg->rx_count > 1 ||
3716 !(rxseg->rx_flags & TCP_RXT_SPURIOUS)) {
3717 bad_rexmt = FALSE;
3718 break;
3719 }
3720 }
3721 return bad_rexmt;
3722 }
3723
3724 boolean_t
3725 tcp_rxtseg_dsack_for_tlp(struct tcpcb *tp)
3726 {
3727 boolean_t dsack_for_tlp = FALSE;
3728 struct tcp_rxt_seg *rxseg;
3729 if (SLIST_EMPTY(&tp->t_rxt_segments)) {
3730 return FALSE;
3731 }
3732
3733 SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3734 if (rxseg->rx_count == 1 &&
3735 SLIST_NEXT(rxseg, rx_link) == NULL &&
3736 (rxseg->rx_flags & TCP_RXT_DSACK_FOR_TLP)) {
3737 dsack_for_tlp = TRUE;
3738 break;
3739 }
3740 }
3741 return dsack_for_tlp;
3742 }
3743
3744 u_int32_t
3745 tcp_rxtseg_total_size(struct tcpcb *tp)
3746 {
3747 struct tcp_rxt_seg *rxseg;
3748 u_int32_t total_size = 0;
3749
3750 SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
3751 total_size += (rxseg->rx_end - rxseg->rx_start) + 1;
3752 }
3753 return total_size;
3754 }
3755
3756 void
3757 tcp_get_connectivity_status(struct tcpcb *tp,
3758 struct tcp_conn_status *connstatus)
3759 {
3760 if (tp == NULL || connstatus == NULL) {
3761 return;
3762 }
3763 bzero(connstatus, sizeof(*connstatus));
3764 if (tp->t_rxtshift >= TCP_CONNECTIVITY_PROBES_MAX) {
3765 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
3766 connstatus->write_probe_failed = 1;
3767 } else {
3768 connstatus->conn_probe_failed = 1;
3769 }
3770 }
3771 if (tp->t_rtimo_probes >= TCP_CONNECTIVITY_PROBES_MAX) {
3772 connstatus->read_probe_failed = 1;
3773 }
3774 if (tp->t_inpcb != NULL && tp->t_inpcb->inp_last_outifp != NULL &&
3775 (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_PROBE_CONNECTIVITY)) {
3776 connstatus->probe_activated = 1;
3777 }
3778 }
3779
3780 boolean_t
3781 tfo_enabled(const struct tcpcb *tp)
3782 {
3783 return (tp->t_flagsext & TF_FASTOPEN)? TRUE : FALSE;
3784 }
3785
3786 void
3787 tcp_disable_tfo(struct tcpcb *tp)
3788 {
3789 tp->t_flagsext &= ~TF_FASTOPEN;
3790 }
3791
3792 static struct mbuf *
3793 tcp_make_keepalive_frame(struct tcpcb *tp, struct ifnet *ifp,
3794 boolean_t is_probe)
3795 {
3796 struct inpcb *inp = tp->t_inpcb;
3797 struct tcphdr *th;
3798 u_int8_t *data;
3799 int win = 0;
3800 struct mbuf *m;
3801
3802 /*
3803 * The code assumes the IP + TCP headers fit in an mbuf packet header
3804 */
3805 _CASSERT(sizeof(struct ip) + sizeof(struct tcphdr) <= _MHLEN);
3806 _CASSERT(sizeof(struct ip6_hdr) + sizeof(struct tcphdr) <= _MHLEN);
3807
3808 MGETHDR(m, M_WAIT, MT_HEADER);
3809 if (m == NULL) {
3810 return NULL;
3811 }
3812 m->m_pkthdr.pkt_proto = IPPROTO_TCP;
3813
3814 data = mbuf_datastart(m);
3815
3816 if (inp->inp_vflag & INP_IPV4) {
3817 bzero(data, sizeof(struct ip) + sizeof(struct tcphdr));
3818 th = (struct tcphdr *)(void *) (data + sizeof(struct ip));
3819 m->m_len = sizeof(struct ip) + sizeof(struct tcphdr);
3820 m->m_pkthdr.len = m->m_len;
3821 } else {
3822 VERIFY(inp->inp_vflag & INP_IPV6);
3823
3824 bzero(data, sizeof(struct ip6_hdr)
3825 + sizeof(struct tcphdr));
3826 th = (struct tcphdr *)(void *)(data + sizeof(struct ip6_hdr));
3827 m->m_len = sizeof(struct ip6_hdr) +
3828 sizeof(struct tcphdr);
3829 m->m_pkthdr.len = m->m_len;
3830 }
3831
3832 tcp_fillheaders(tp, data, th);
3833
3834 if (inp->inp_vflag & INP_IPV4) {
3835 struct ip *ip;
3836
3837 ip = (__typeof__(ip))(void *)data;
3838
3839 ip->ip_id = rfc6864 ? 0 : ip_randomid();
3840 ip->ip_off = htons(IP_DF);
3841 ip->ip_len = htons(sizeof(struct ip) + sizeof(struct tcphdr));
3842 ip->ip_ttl = inp->inp_ip_ttl;
3843 ip->ip_tos |= (inp->inp_ip_tos & ~IPTOS_ECN_MASK);
3844 ip->ip_sum = in_cksum_hdr(ip);
3845 } else {
3846 struct ip6_hdr *ip6;
3847
3848 ip6 = (__typeof__(ip6))(void *)data;
3849
3850 ip6->ip6_plen = htons(sizeof(struct tcphdr));
3851 ip6->ip6_hlim = in6_selecthlim(inp, ifp);
3852 ip6->ip6_flow = ip6->ip6_flow & ~IPV6_FLOW_ECN_MASK;
3853
3854 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
3855 ip6->ip6_src.s6_addr16[1] = 0;
3856 }
3857 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
3858 ip6->ip6_dst.s6_addr16[1] = 0;
3859 }
3860 }
3861 th->th_flags = TH_ACK;
3862
3863 win = tcp_sbspace(tp);
3864 if (win > ((int32_t)TCP_MAXWIN << tp->rcv_scale)) {
3865 win = (int32_t)TCP_MAXWIN << tp->rcv_scale;
3866 }
3867 th->th_win = htons((u_short) (win >> tp->rcv_scale));
3868
3869 if (is_probe) {
3870 th->th_seq = htonl(tp->snd_una - 1);
3871 } else {
3872 th->th_seq = htonl(tp->snd_una);
3873 }
3874 th->th_ack = htonl(tp->rcv_nxt);
3875
3876 /* Force recompute TCP checksum to be the final value */
3877 th->th_sum = 0;
3878 if (inp->inp_vflag & INP_IPV4) {
3879 th->th_sum = inet_cksum(m, IPPROTO_TCP,
3880 sizeof(struct ip), sizeof(struct tcphdr));
3881 } else {
3882 th->th_sum = inet6_cksum(m, IPPROTO_TCP,
3883 sizeof(struct ip6_hdr), sizeof(struct tcphdr));
3884 }
3885
3886 return m;
3887 }
3888
3889 void
3890 tcp_fill_keepalive_offload_frames(ifnet_t ifp,
3891 struct ifnet_keepalive_offload_frame *frames_array,
3892 u_int32_t frames_array_count, size_t frame_data_offset,
3893 u_int32_t *used_frames_count)
3894 {
3895 struct inpcb *inp;
3896 inp_gen_t gencnt;
3897 u_int32_t frame_index = *used_frames_count;
3898
3899 if (ifp == NULL || frames_array == NULL ||
3900 frames_array_count == 0 ||
3901 frame_index >= frames_array_count ||
3902 frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
3903 return;
3904 }
3905
3906 /*
3907 * This function is called outside the regular TCP processing
3908 * so we need to update the TCP clock.
3909 */
3910 calculate_tcp_clock();
3911
3912 lck_rw_lock_shared(tcbinfo.ipi_lock);
3913 gencnt = tcbinfo.ipi_gencnt;
3914 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
3915 struct socket *so;
3916 struct ifnet_keepalive_offload_frame *frame;
3917 struct mbuf *m = NULL;
3918 struct tcpcb *tp = intotcpcb(inp);
3919
3920 if (frame_index >= frames_array_count) {
3921 break;
3922 }
3923
3924 if (inp->inp_gencnt > gencnt ||
3925 inp->inp_state == INPCB_STATE_DEAD) {
3926 continue;
3927 }
3928
3929 if ((so = inp->inp_socket) == NULL ||
3930 (so->so_state & SS_DEFUNCT)) {
3931 continue;
3932 }
3933 /*
3934 * check for keepalive offload flag without socket
3935 * lock to avoid a deadlock
3936 */
3937 if (!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD)) {
3938 continue;
3939 }
3940
3941 if (!(inp->inp_vflag & (INP_IPV4 | INP_IPV6))) {
3942 continue;
3943 }
3944 if (inp->inp_ppcb == NULL ||
3945 in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
3946 continue;
3947 }
3948 socket_lock(so, 1);
3949 /* Release the want count */
3950 if (inp->inp_ppcb == NULL ||
3951 (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) {
3952 socket_unlock(so, 1);
3953 continue;
3954 }
3955 if ((inp->inp_vflag & INP_IPV4) &&
3956 (inp->inp_laddr.s_addr == INADDR_ANY ||
3957 inp->inp_faddr.s_addr == INADDR_ANY)) {
3958 socket_unlock(so, 1);
3959 continue;
3960 }
3961 if ((inp->inp_vflag & INP_IPV6) &&
3962 (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ||
3963 IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr))) {
3964 socket_unlock(so, 1);
3965 continue;
3966 }
3967 if (inp->inp_lport == 0 || inp->inp_fport == 0) {
3968 socket_unlock(so, 1);
3969 continue;
3970 }
3971 if (inp->inp_last_outifp == NULL ||
3972 inp->inp_last_outifp->if_index != ifp->if_index) {
3973 socket_unlock(so, 1);
3974 continue;
3975 }
3976 if ((inp->inp_vflag & INP_IPV4) && frame_data_offset +
3977 sizeof(struct ip) + sizeof(struct tcphdr) >
3978 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
3979 socket_unlock(so, 1);
3980 continue;
3981 } else if (!(inp->inp_vflag & INP_IPV4) && frame_data_offset +
3982 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) >
3983 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
3984 socket_unlock(so, 1);
3985 continue;
3986 }
3987 /*
3988 * There is no point in waking up the device for connections
3989 * that are not established. Long lived connection are meant
3990 * for processes that will sent and receive data
3991 */
3992 if (tp->t_state != TCPS_ESTABLISHED) {
3993 socket_unlock(so, 1);
3994 continue;
3995 }
3996 /*
3997 * This inp has all the information that is needed to
3998 * generate an offload frame.
3999 */
4000 frame = &frames_array[frame_index];
4001 frame->type = IFNET_KEEPALIVE_OFFLOAD_FRAME_TCP;
4002 frame->ether_type = (inp->inp_vflag & INP_IPV4) ?
4003 IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4 :
4004 IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6;
4005 frame->interval = tp->t_keepidle > 0 ? tp->t_keepidle :
4006 tcp_keepidle;
4007 frame->keep_cnt = TCP_CONN_KEEPCNT(tp);
4008 frame->keep_retry = TCP_CONN_KEEPINTVL(tp);
4009 if (so->so_options & SO_NOWAKEFROMSLEEP) {
4010 frame->flags |=
4011 IFNET_KEEPALIVE_OFFLOAD_FLAG_NOWAKEFROMSLEEP;
4012 }
4013 frame->local_port = ntohs(inp->inp_lport);
4014 frame->remote_port = ntohs(inp->inp_fport);
4015 frame->local_seq = tp->snd_nxt;
4016 frame->remote_seq = tp->rcv_nxt;
4017 if (inp->inp_vflag & INP_IPV4) {
4018 frame->length = frame_data_offset +
4019 sizeof(struct ip) + sizeof(struct tcphdr);
4020 frame->reply_length = frame->length;
4021
4022 frame->addr_length = sizeof(struct in_addr);
4023 bcopy(&inp->inp_laddr, frame->local_addr,
4024 sizeof(struct in_addr));
4025 bcopy(&inp->inp_faddr, frame->remote_addr,
4026 sizeof(struct in_addr));
4027 } else {
4028 struct in6_addr *ip6;
4029
4030 frame->length = frame_data_offset +
4031 sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
4032 frame->reply_length = frame->length;
4033
4034 frame->addr_length = sizeof(struct in6_addr);
4035 ip6 = (struct in6_addr *)(void *)frame->local_addr;
4036 bcopy(&inp->in6p_laddr, ip6, sizeof(struct in6_addr));
4037 if (IN6_IS_SCOPE_EMBED(ip6)) {
4038 ip6->s6_addr16[1] = 0;
4039 }
4040
4041 ip6 = (struct in6_addr *)(void *)frame->remote_addr;
4042 bcopy(&inp->in6p_faddr, ip6, sizeof(struct in6_addr));
4043 if (IN6_IS_SCOPE_EMBED(ip6)) {
4044 ip6->s6_addr16[1] = 0;
4045 }
4046 }
4047
4048 /*
4049 * First the probe
4050 */
4051 m = tcp_make_keepalive_frame(tp, ifp, TRUE);
4052 if (m == NULL) {
4053 socket_unlock(so, 1);
4054 continue;
4055 }
4056 bcopy(m->m_data, frame->data + frame_data_offset,
4057 m->m_len);
4058 m_freem(m);
4059
4060 /*
4061 * Now the response packet to incoming probes
4062 */
4063 m = tcp_make_keepalive_frame(tp, ifp, FALSE);
4064 if (m == NULL) {
4065 socket_unlock(so, 1);
4066 continue;
4067 }
4068 bcopy(m->m_data, frame->reply_data + frame_data_offset,
4069 m->m_len);
4070 m_freem(m);
4071
4072 frame_index++;
4073 socket_unlock(so, 1);
4074 }
4075 lck_rw_done(tcbinfo.ipi_lock);
4076 *used_frames_count = frame_index;
4077 }
4078
4079 static bool
4080 inp_matches_kao_frame(ifnet_t ifp, struct ifnet_keepalive_offload_frame *frame,
4081 struct inpcb *inp)
4082 {
4083 if (inp->inp_ppcb == NULL) {
4084 return false;
4085 }
4086 /* Release the want count */
4087 if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
4088 return false;
4089 }
4090 if (inp->inp_last_outifp == NULL ||
4091 inp->inp_last_outifp->if_index != ifp->if_index) {
4092 return false;
4093 }
4094 if (frame->local_port != ntohs(inp->inp_lport) ||
4095 frame->remote_port != ntohs(inp->inp_fport)) {
4096 return false;
4097 }
4098 if (inp->inp_vflag & INP_IPV4) {
4099 if (memcmp(&inp->inp_laddr, frame->local_addr,
4100 sizeof(struct in_addr)) != 0 ||
4101 memcmp(&inp->inp_faddr, frame->remote_addr,
4102 sizeof(struct in_addr)) != 0) {
4103 return false;
4104 }
4105 } else if (inp->inp_vflag & INP_IPV6) {
4106 if (memcmp(&inp->inp_laddr, frame->local_addr,
4107 sizeof(struct in6_addr)) != 0 ||
4108 memcmp(&inp->inp_faddr, frame->remote_addr,
4109 sizeof(struct in6_addr)) != 0) {
4110 return false;
4111 }
4112 } else {
4113 return false;
4114 }
4115 return true;
4116 }
4117
4118 int
4119 tcp_notify_kao_timeout(ifnet_t ifp,
4120 struct ifnet_keepalive_offload_frame *frame)
4121 {
4122 struct inpcb *inp = NULL;
4123 struct socket *so = NULL;
4124 bool found = false;
4125
4126 /*
4127 * Unlock the list before posting event on the matching socket
4128 */
4129 lck_rw_lock_shared(tcbinfo.ipi_lock);
4130
4131 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
4132 if ((so = inp->inp_socket) == NULL ||
4133 (so->so_state & SS_DEFUNCT)) {
4134 continue;
4135 }
4136 if (!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD)) {
4137 continue;
4138 }
4139 if (!(inp->inp_vflag & (INP_IPV4 | INP_IPV6))) {
4140 continue;
4141 }
4142 if (inp->inp_ppcb == NULL ||
4143 in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
4144 continue;
4145 }
4146 socket_lock(so, 1);
4147 if (inp_matches_kao_frame(ifp, frame, inp)) {
4148 /*
4149 * Keep the matching socket locked
4150 */
4151 found = true;
4152 break;
4153 }
4154 socket_unlock(so, 1);
4155 }
4156 lck_rw_done(tcbinfo.ipi_lock);
4157
4158 if (found) {
4159 ASSERT(inp != NULL);
4160 ASSERT(so != NULL);
4161 ASSERT(so == inp->inp_socket);
4162 /*
4163 * Drop the TCP connection like tcptimers() does
4164 */
4165 struct tcpcb *tp = inp->inp_ppcb;
4166
4167 tcpstat.tcps_keepdrops++;
4168 postevent(so, 0, EV_TIMEOUT);
4169 soevent(so,
4170 (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
4171 tp = tcp_drop(tp, ETIMEDOUT);
4172
4173 tcpstat.tcps_ka_offload_drops++;
4174 os_log_info(OS_LOG_DEFAULT, "%s: dropped lport %u fport %u\n",
4175 __func__, frame->local_port, frame->remote_port);
4176
4177 socket_unlock(so, 1);
4178 }
4179
4180 return 0;
4181 }
4182
4183 errno_t
4184 tcp_notify_ack_id_valid(struct tcpcb *tp, struct socket *so,
4185 u_int32_t notify_id)
4186 {
4187 struct tcp_notify_ack_marker *elm;
4188
4189 if (so->so_snd.sb_cc == 0) {
4190 return ENOBUFS;
4191 }
4192
4193 SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
4194 /* Duplicate id is not allowed */
4195 if (elm->notify_id == notify_id) {
4196 return EINVAL;
4197 }
4198 /* Duplicate position is not allowed */
4199 if (elm->notify_snd_una == tp->snd_una + so->so_snd.sb_cc) {
4200 return EINVAL;
4201 }
4202 }
4203 return 0;
4204 }
4205
4206 errno_t
4207 tcp_add_notify_ack_marker(struct tcpcb *tp, u_int32_t notify_id)
4208 {
4209 struct tcp_notify_ack_marker *nm, *elm = NULL;
4210 struct socket *so = tp->t_inpcb->inp_socket;
4211
4212 MALLOC(nm, struct tcp_notify_ack_marker *, sizeof(*nm),
4213 M_TEMP, M_WAIT | M_ZERO);
4214 if (nm == NULL) {
4215 return ENOMEM;
4216 }
4217 nm->notify_id = notify_id;
4218 nm->notify_snd_una = tp->snd_una + so->so_snd.sb_cc;
4219
4220 SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
4221 if (SEQ_GT(nm->notify_snd_una, elm->notify_snd_una)) {
4222 break;
4223 }
4224 }
4225
4226 if (elm == NULL) {
4227 VERIFY(SLIST_EMPTY(&tp->t_notify_ack));
4228 SLIST_INSERT_HEAD(&tp->t_notify_ack, nm, notify_next);
4229 } else {
4230 SLIST_INSERT_AFTER(elm, nm, notify_next);
4231 }
4232 tp->t_notify_ack_count++;
4233 return 0;
4234 }
4235
4236 void
4237 tcp_notify_ack_free(struct tcpcb *tp)
4238 {
4239 struct tcp_notify_ack_marker *elm, *next;
4240 if (SLIST_EMPTY(&tp->t_notify_ack)) {
4241 return;
4242 }
4243
4244 SLIST_FOREACH_SAFE(elm, &tp->t_notify_ack, notify_next, next) {
4245 SLIST_REMOVE(&tp->t_notify_ack, elm, tcp_notify_ack_marker,
4246 notify_next);
4247 FREE(elm, M_TEMP);
4248 }
4249 SLIST_INIT(&tp->t_notify_ack);
4250 tp->t_notify_ack_count = 0;
4251 }
4252
4253 inline void
4254 tcp_notify_acknowledgement(struct tcpcb *tp, struct socket *so)
4255 {
4256 struct tcp_notify_ack_marker *elm;
4257
4258 elm = SLIST_FIRST(&tp->t_notify_ack);
4259 if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
4260 soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_NOTIFY_ACK);
4261 }
4262 }
4263
4264 void
4265 tcp_get_notify_ack_count(struct tcpcb *tp,
4266 struct tcp_notify_ack_complete *retid)
4267 {
4268 struct tcp_notify_ack_marker *elm;
4269 size_t complete = 0;
4270
4271 SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) {
4272 if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
4273 complete++;
4274 } else {
4275 break;
4276 }
4277 }
4278 retid->notify_pending = tp->t_notify_ack_count - complete;
4279 retid->notify_complete_count = min(TCP_MAX_NOTIFY_ACK, complete);
4280 }
4281
4282 void
4283 tcp_get_notify_ack_ids(struct tcpcb *tp,
4284 struct tcp_notify_ack_complete *retid)
4285 {
4286 size_t i = 0;
4287 struct tcp_notify_ack_marker *elm, *next;
4288
4289 SLIST_FOREACH_SAFE(elm, &tp->t_notify_ack, notify_next, next) {
4290 if (i >= retid->notify_complete_count) {
4291 break;
4292 }
4293 if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
4294 retid->notify_complete_id[i++] = elm->notify_id;
4295 SLIST_REMOVE(&tp->t_notify_ack, elm,
4296 tcp_notify_ack_marker, notify_next);
4297 FREE(elm, M_TEMP);
4298 tp->t_notify_ack_count--;
4299 } else {
4300 break;
4301 }
4302 }
4303 }
4304
4305 bool
4306 tcp_notify_ack_active(struct socket *so)
4307 {
4308 if ((SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) &&
4309 SOCK_TYPE(so) == SOCK_STREAM) {
4310 struct tcpcb *tp = intotcpcb(sotoinpcb(so));
4311
4312 if (!SLIST_EMPTY(&tp->t_notify_ack)) {
4313 struct tcp_notify_ack_marker *elm;
4314 elm = SLIST_FIRST(&tp->t_notify_ack);
4315 if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) {
4316 return true;
4317 }
4318 }
4319 }
4320 return false;
4321 }
4322
4323 inline int32_t
4324 inp_get_sndbytes_allunsent(struct socket *so, u_int32_t th_ack)
4325 {
4326 struct inpcb *inp = sotoinpcb(so);
4327 struct tcpcb *tp = intotcpcb(inp);
4328
4329 if ((so->so_snd.sb_flags & SB_SNDBYTE_CNT) &&
4330 so->so_snd.sb_cc > 0) {
4331 int32_t unsent, sent;
4332 sent = tp->snd_max - th_ack;
4333 if (tp->t_flags & TF_SENTFIN) {
4334 sent--;
4335 }
4336 unsent = so->so_snd.sb_cc - sent;
4337 return unsent;
4338 }
4339 return 0;
4340 }
4341
4342 #define IFP_PER_FLOW_STAT(_ipv4_, _stat_) { \
4343 if (_ipv4_) { \
4344 ifp->if_ipv4_stat->_stat_++; \
4345 } else { \
4346 ifp->if_ipv6_stat->_stat_++; \
4347 } \
4348 }
4349
4350 #define FLOW_ECN_ENABLED(_flags_) \
4351 ((_flags_ & (TE_ECN_ON)) == (TE_ECN_ON))
4352
4353 void
4354 tcp_update_stats_per_flow(struct ifnet_stats_per_flow *ifs,
4355 struct ifnet *ifp)
4356 {
4357 if (ifp == NULL || !IF_FULLY_ATTACHED(ifp)) {
4358 return;
4359 }
4360
4361 ifnet_lock_shared(ifp);
4362 if (ifs->ecn_flags & TE_SETUPSENT) {
4363 if (ifs->ecn_flags & TE_CLIENT_SETUP) {
4364 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_client_setup);
4365 if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
4366 IFP_PER_FLOW_STAT(ifs->ipv4,
4367 ecn_client_success);
4368 } else if (ifs->ecn_flags & TE_LOST_SYN) {
4369 IFP_PER_FLOW_STAT(ifs->ipv4,
4370 ecn_syn_lost);
4371 } else {
4372 IFP_PER_FLOW_STAT(ifs->ipv4,
4373 ecn_peer_nosupport);
4374 }
4375 } else {
4376 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_server_setup);
4377 if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
4378 IFP_PER_FLOW_STAT(ifs->ipv4,
4379 ecn_server_success);
4380 } else if (ifs->ecn_flags & TE_LOST_SYN) {
4381 IFP_PER_FLOW_STAT(ifs->ipv4,
4382 ecn_synack_lost);
4383 } else {
4384 IFP_PER_FLOW_STAT(ifs->ipv4,
4385 ecn_peer_nosupport);
4386 }
4387 }
4388 } else {
4389 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_off_conn);
4390 }
4391 if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
4392 if (ifs->ecn_flags & TE_RECV_ECN_CE) {
4393 tcpstat.tcps_ecn_conn_recv_ce++;
4394 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_recv_ce);
4395 }
4396 if (ifs->ecn_flags & TE_RECV_ECN_ECE) {
4397 tcpstat.tcps_ecn_conn_recv_ece++;
4398 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_recv_ece);
4399 }
4400 if (ifs->ecn_flags & (TE_RECV_ECN_CE | TE_RECV_ECN_ECE)) {
4401 if (ifs->txretransmitbytes > 0 ||
4402 ifs->rxoutoforderbytes > 0) {
4403 tcpstat.tcps_ecn_conn_pl_ce++;
4404 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_plce);
4405 } else {
4406 tcpstat.tcps_ecn_conn_nopl_ce++;
4407 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_noplce);
4408 }
4409 } else {
4410 if (ifs->txretransmitbytes > 0 ||
4411 ifs->rxoutoforderbytes > 0) {
4412 tcpstat.tcps_ecn_conn_plnoce++;
4413 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_plnoce);
4414 }
4415 }
4416 }
4417
4418 /* Other stats are interesting for non-local connections only */
4419 if (ifs->local) {
4420 ifnet_lock_done(ifp);
4421 return;
4422 }
4423
4424 if (ifs->ipv4) {
4425 ifp->if_ipv4_stat->timestamp = net_uptime();
4426 if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
4427 tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv4_stat->ecn_on);
4428 } else {
4429 tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv4_stat->ecn_off);
4430 }
4431 } else {
4432 ifp->if_ipv6_stat->timestamp = net_uptime();
4433 if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
4434 tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv6_stat->ecn_on);
4435 } else {
4436 tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv6_stat->ecn_off);
4437 }
4438 }
4439
4440 if (ifs->rxmit_drop) {
4441 if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
4442 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_on.rxmit_drop);
4443 } else {
4444 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_off.rxmit_drop);
4445 }
4446 }
4447 if (ifs->ecn_fallback_synloss) {
4448 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_synloss);
4449 }
4450 if (ifs->ecn_fallback_droprst) {
4451 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_droprst);
4452 }
4453 if (ifs->ecn_fallback_droprxmt) {
4454 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_droprxmt);
4455 }
4456 if (ifs->ecn_fallback_ce) {
4457 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_ce);
4458 }
4459 if (ifs->ecn_fallback_reorder) {
4460 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_reorder);
4461 }
4462 if (ifs->ecn_recv_ce > 0) {
4463 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_recv_ce);
4464 }
4465 if (ifs->ecn_recv_ece > 0) {
4466 IFP_PER_FLOW_STAT(ifs->ipv4, ecn_recv_ece);
4467 }
4468
4469 tcp_flow_lim_stats(ifs, &ifp->if_lim_stat);
4470 ifnet_lock_done(ifp);
4471 }