]> git.saurik.com Git - apple/xnu.git/blame - bsd/netinet/tcp_cache.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / bsd / netinet / tcp_cache.c
CommitLineData
3e170ce0 1/*
5ba3f43e 2 * Copyright (c) 2015-2017 Apple Inc. All rights reserved.
3e170ce0
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* TCP-cache to store and retrieve TCP-related information */
30
31#include <net/flowhash.h>
32#include <net/route.h>
5ba3f43e 33#include <net/necp.h>
3e170ce0 34#include <netinet/in_pcb.h>
5ba3f43e 35#include <netinet/mptcp_var.h>
3e170ce0
A
36#include <netinet/tcp_cache.h>
37#include <netinet/tcp_seq.h>
38#include <netinet/tcp_var.h>
39#include <kern/locks.h>
40#include <sys/queue.h>
41#include <dev/random/randomdev.h>
42
5ba3f43e
A
43typedef union {
44 struct in_addr addr;
45 struct in6_addr addr6;
46} in_4_6_addr;
47
3e170ce0
A
48struct tcp_heuristic_key {
49 union {
50 uint8_t thk_net_signature[IFNET_SIGNATURELEN];
5ba3f43e 51 in_4_6_addr thk_ip;
3e170ce0 52 };
0a7de745 53 sa_family_t thk_family;
3e170ce0
A
54};
55
56struct tcp_heuristic {
57 SLIST_ENTRY(tcp_heuristic) list;
58
0a7de745 59 uint32_t th_last_access;
3e170ce0 60
0a7de745 61 struct tcp_heuristic_key th_key;
3e170ce0 62
0a7de745 63 char th_val_start[0]; /* Marker for memsetting to 0 */
4bd07ac2 64
0a7de745
A
65 uint8_t th_tfo_data_loss; /* The number of times a SYN+data has been lost */
66 uint8_t th_tfo_req_loss; /* The number of times a SYN+cookie-req has been lost */
67 uint8_t th_tfo_data_rst; /* The number of times a SYN+data has received a RST */
68 uint8_t th_tfo_req_rst; /* The number of times a SYN+cookie-req has received a RST */
69 uint8_t th_mptcp_loss; /* The number of times a SYN+MP_CAPABLE has been lost */
cb323159 70 uint8_t th_mptcp_success; /* The number of times MPTCP-negotiation has been successful */
0a7de745
A
71 uint8_t th_ecn_loss; /* The number of times a SYN+ecn has been lost */
72 uint8_t th_ecn_aggressive; /* The number of times we did an aggressive fallback */
73 uint8_t th_ecn_droprst; /* The number of times ECN connections received a RST after first data pkt */
74 uint8_t th_ecn_droprxmt; /* The number of times ECN connection is dropped after multiple retransmits */
75 uint8_t th_ecn_synrst; /* number of times RST was received in response to an ECN enabled SYN */
76 uint32_t th_tfo_enabled_time; /* The moment when we reenabled TFO after backing off */
77 uint32_t th_tfo_backoff_until; /* Time until when we should not try out TFO */
78 uint32_t th_tfo_backoff; /* Current backoff timer */
79 uint32_t th_mptcp_backoff; /* Time until when we should not try out MPTCP */
80 uint32_t th_ecn_backoff; /* Time until when we should not try out ECN */
5ba3f43e 81
0a7de745 82 uint8_t th_tfo_in_backoff:1, /* Are we avoiding TFO due to the backoff timer? */
cb323159
A
83 th_mptcp_in_backoff:1, /* Are we avoiding MPTCP due to the backoff timer? */
84 th_mptcp_heuristic_disabled:1; /* Are heuristics disabled? */
4bd07ac2 85
0a7de745 86 char th_val_end[0]; /* Marker for memsetting to 0 */
3e170ce0
A
87};
88
89struct tcp_heuristics_head {
90 SLIST_HEAD(tcp_heur_bucket, tcp_heuristic) tcp_heuristics;
91
92 /* Per-hashbucket lock to avoid lock-contention */
0a7de745 93 lck_mtx_t thh_mtx;
3e170ce0
A
94};
95
96struct tcp_cache_key {
0a7de745 97 sa_family_t tck_family;
3e170ce0
A
98
99 struct tcp_heuristic_key tck_src;
5ba3f43e 100 in_4_6_addr tck_dst;
3e170ce0
A
101};
102
103struct tcp_cache {
104 SLIST_ENTRY(tcp_cache) list;
105
f427ee49 106 uint32_t tc_last_access;
3e170ce0
A
107
108 struct tcp_cache_key tc_key;
109
f427ee49
A
110 uint8_t tc_tfo_cookie[TFO_COOKIE_LEN_MAX];
111 uint8_t tc_tfo_cookie_len;
3e170ce0
A
112};
113
114struct tcp_cache_head {
115 SLIST_HEAD(tcp_cache_bucket, tcp_cache) tcp_caches;
116
117 /* Per-hashbucket lock to avoid lock-contention */
0a7de745 118 lck_mtx_t tch_mtx;
3e170ce0
A
119};
120
5ba3f43e
A
121struct tcp_cache_key_src {
122 struct ifnet *ifp;
123 in_4_6_addr laddr;
124 in_4_6_addr faddr;
125 int af;
126};
127
f427ee49 128static uint32_t tcp_cache_hash_seed;
3e170ce0
A
129
130size_t tcp_cache_size;
131
132/*
133 * The maximum depth of the hash-bucket. This way we limit the tcp_cache to
134 * TCP_CACHE_BUCKET_SIZE * tcp_cache_size and have "natural" garbage collection
135 */
0a7de745 136#define TCP_CACHE_BUCKET_SIZE 5
3e170ce0
A
137
138static struct tcp_cache_head *tcp_cache;
139
140decl_lck_mtx_data(, tcp_cache_mtx);
141
0a7de745
A
142static lck_attr_t *tcp_cache_mtx_attr;
143static lck_grp_t *tcp_cache_mtx_grp;
144static lck_grp_attr_t *tcp_cache_mtx_grp_attr;
3e170ce0
A
145
146static struct tcp_heuristics_head *tcp_heuristics;
147
148decl_lck_mtx_data(, tcp_heuristics_mtx);
149
0a7de745
A
150static lck_attr_t *tcp_heuristic_mtx_attr;
151static lck_grp_t *tcp_heuristic_mtx_grp;
152static lck_grp_attr_t *tcp_heuristic_mtx_grp_attr;
3e170ce0 153
5ba3f43e
A
154static uint32_t tcp_backoff_maximum = 65536;
155
156SYSCTL_UINT(_net_inet_tcp, OID_AUTO, backoff_maximum, CTLFLAG_RW | CTLFLAG_LOCKED,
0a7de745 157 &tcp_backoff_maximum, 0, "Maximum time for which we won't try TFO");
5ba3f43e 158
f427ee49 159static uint32_t tcp_ecn_timeout = 60;
5ba3f43e 160
f427ee49
A
161SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ecn_timeout, CTLFLAG_RW | CTLFLAG_LOCKED,
162 &tcp_ecn_timeout, 60, "Initial minutes to wait before re-trying ECN");
163
164static int disable_tcp_heuristics = 0;
165SYSCTL_INT(_net_inet_tcp, OID_AUTO, disable_tcp_heuristics, CTLFLAG_RW | CTLFLAG_LOCKED,
166 &disable_tcp_heuristics, 0, "Set to 1, to disable all TCP heuristics (TFO, ECN, MPTCP)");
3e170ce0 167
0a7de745
A
168static uint32_t
169tcp_min_to_hz(uint32_t minutes)
5ba3f43e 170{
0a7de745
A
171 if (minutes > 65536) {
172 return (uint32_t)65536 * 60 * TCP_RETRANSHZ;
173 }
5ba3f43e 174
0a7de745 175 return minutes * 60 * TCP_RETRANSHZ;
5ba3f43e 176}
39037602
A
177
178/*
179 * This number is coupled with tcp_ecn_timeout, because we want to prevent
180 * integer overflow. Need to find an unexpensive way to prevent integer overflow
181 * while still allowing a dynamic sysctl.
182 */
0a7de745 183#define TCP_CACHE_OVERFLOW_PROTECT 9
39037602
A
184
185/* Number of SYN-losses we accept */
0a7de745
A
186#define TFO_MAX_COOKIE_LOSS 2
187#define ECN_MAX_SYN_LOSS 2
188#define MPTCP_MAX_SYN_LOSS 2
cb323159 189#define MPTCP_SUCCESS_TRIGGER 10
0a7de745
A
190#define ECN_MAX_DROPRST 1
191#define ECN_MAX_DROPRXMT 4
192#define ECN_MAX_SYNRST 4
5ba3f43e
A
193
194/* Flags for setting/unsetting loss-heuristics, limited to 4 bytes */
0a7de745
A
195#define TCPCACHE_F_TFO_REQ 0x01
196#define TCPCACHE_F_TFO_DATA 0x02
197#define TCPCACHE_F_ECN 0x04
198#define TCPCACHE_F_MPTCP 0x08
199#define TCPCACHE_F_ECN_DROPRST 0x10
200#define TCPCACHE_F_ECN_DROPRXMT 0x20
201#define TCPCACHE_F_TFO_REQ_RST 0x40
202#define TCPCACHE_F_TFO_DATA_RST 0x80
203#define TCPCACHE_F_ECN_SYNRST 0x100
39037602
A
204
205/* Always retry ECN after backing off to this level for some heuristics */
0a7de745 206#define ECN_RETRY_LIMIT 9
39037602 207
5ba3f43e
A
208#define TCP_CACHE_INC_IFNET_STAT(_ifp_, _af_, _stat_) { \
209 if ((_ifp_) != NULL) { \
0a7de745
A
210 if ((_af_) == AF_INET6) { \
211 (_ifp_)->if_ipv6_stat->_stat_++;\
212 } else { \
213 (_ifp_)->if_ipv4_stat->_stat_++;\
214 }\
5ba3f43e
A
215 }\
216}
217
3e170ce0
A
218/*
219 * Round up to next higher power-of 2. See "Bit Twiddling Hacks".
220 *
221 * Might be worth moving this to a library so that others
222 * (e.g., scale_to_powerof2()) can use this as well instead of a while-loop.
223 */
f427ee49
A
224static uint32_t
225tcp_cache_roundup2(uint32_t a)
3e170ce0
A
226{
227 a--;
228 a |= a >> 1;
229 a |= a >> 2;
230 a |= a >> 4;
231 a |= a >> 8;
232 a |= a >> 16;
233 a++;
234
235 return a;
236}
237
0a7de745
A
238static void
239tcp_cache_hash_src(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *key)
3e170ce0 240{
5ba3f43e 241 struct ifnet *ifp = tcks->ifp;
3e170ce0
A
242 uint8_t len = sizeof(key->thk_net_signature);
243 uint16_t flags;
244
5ba3f43e 245 if (tcks->af == AF_INET6) {
3e170ce0
A
246 int ret;
247
248 key->thk_family = AF_INET6;
5ba3f43e 249 ret = ifnet_get_netsignature(ifp, AF_INET6, &len, &flags,
3e170ce0
A
250 key->thk_net_signature);
251
252 /*
253 * ifnet_get_netsignature only returns EINVAL if ifn is NULL
254 * (we made sure that in the other cases it does not). So,
255 * in this case we should take the connection's address.
256 */
0a7de745 257 if (ret == ENOENT || ret == EINVAL) {
5ba3f43e 258 memcpy(&key->thk_ip.addr6, &tcks->laddr.addr6, sizeof(struct in6_addr));
0a7de745 259 }
3e170ce0
A
260 } else {
261 int ret;
262
263 key->thk_family = AF_INET;
5ba3f43e 264 ret = ifnet_get_netsignature(ifp, AF_INET, &len, &flags,
0a7de745 265 key->thk_net_signature);
3e170ce0
A
266
267 /*
268 * ifnet_get_netsignature only returns EINVAL if ifn is NULL
269 * (we made sure that in the other cases it does not). So,
270 * in this case we should take the connection's address.
271 */
0a7de745 272 if (ret == ENOENT || ret == EINVAL) {
5ba3f43e 273 memcpy(&key->thk_ip.addr, &tcks->laddr.addr, sizeof(struct in_addr));
0a7de745 274 }
3e170ce0
A
275 }
276}
277
f427ee49 278static uint16_t
0a7de745 279tcp_cache_hash(struct tcp_cache_key_src *tcks, struct tcp_cache_key *key)
3e170ce0 280{
f427ee49 281 uint32_t hash;
3e170ce0
A
282
283 bzero(key, sizeof(struct tcp_cache_key));
284
5ba3f43e 285 tcp_cache_hash_src(tcks, &key->tck_src);
3e170ce0 286
5ba3f43e 287 if (tcks->af == AF_INET6) {
3e170ce0 288 key->tck_family = AF_INET6;
5ba3f43e 289 memcpy(&key->tck_dst.addr6, &tcks->faddr.addr6,
3e170ce0
A
290 sizeof(struct in6_addr));
291 } else {
292 key->tck_family = AF_INET;
5ba3f43e 293 memcpy(&key->tck_dst.addr, &tcks->faddr.addr,
3e170ce0
A
294 sizeof(struct in_addr));
295 }
296
297 hash = net_flowhash(key, sizeof(struct tcp_cache_key),
298 tcp_cache_hash_seed);
299
f427ee49 300 return (uint16_t)(hash & (tcp_cache_size - 1));
3e170ce0
A
301}
302
0a7de745
A
303static void
304tcp_cache_unlock(struct tcp_cache_head *head)
3e170ce0
A
305{
306 lck_mtx_unlock(&head->tch_mtx);
307}
308
309/*
310 * Make sure that everything that happens after tcp_getcache_with_lock()
311 * is short enough to justify that you hold the per-bucket lock!!!
312 *
313 * Otherwise, better build another lookup-function that does not hold the
314 * lock and you copy out the bits and bytes.
315 *
316 * That's why we provide the head as a "return"-pointer so that the caller
317 * can give it back to use for tcp_cache_unlock().
318 */
0a7de745
A
319static struct tcp_cache *
320tcp_getcache_with_lock(struct tcp_cache_key_src *tcks,
5ba3f43e 321 int create, struct tcp_cache_head **headarg)
3e170ce0 322{
3e170ce0
A
323 struct tcp_cache *tpcache = NULL;
324 struct tcp_cache_head *head;
325 struct tcp_cache_key key;
f427ee49 326 uint16_t hash;
3e170ce0
A
327 int i = 0;
328
5ba3f43e 329 hash = tcp_cache_hash(tcks, &key);
3e170ce0
A
330 head = &tcp_cache[hash];
331
332 lck_mtx_lock(&head->tch_mtx);
333
334 /*** First step: Look for the tcp_cache in our bucket ***/
335 SLIST_FOREACH(tpcache, &head->tcp_caches, list) {
0a7de745 336 if (memcmp(&tpcache->tc_key, &key, sizeof(key)) == 0) {
3e170ce0 337 break;
0a7de745 338 }
3e170ce0
A
339
340 i++;
341 }
342
343 /*** Second step: If it's not there, create/recycle it ***/
344 if ((tpcache == NULL) && create) {
345 if (i >= TCP_CACHE_BUCKET_SIZE) {
346 struct tcp_cache *oldest_cache = NULL;
f427ee49 347 uint32_t max_age = 0;
3e170ce0
A
348
349 /* Look for the oldest tcp_cache in the bucket */
350 SLIST_FOREACH(tpcache, &head->tcp_caches, list) {
f427ee49 351 uint32_t age = tcp_now - tpcache->tc_last_access;
3e170ce0
A
352 if (age > max_age) {
353 max_age = age;
354 oldest_cache = tpcache;
355 }
356 }
357 VERIFY(oldest_cache != NULL);
358
359 tpcache = oldest_cache;
360
361 /* We recycle, thus let's indicate that there is no cookie */
362 tpcache->tc_tfo_cookie_len = 0;
363 } else {
364 /* Create a new cache and add it to the list */
365 tpcache = _MALLOC(sizeof(struct tcp_cache), M_TEMP,
366 M_NOWAIT | M_ZERO);
0a7de745 367 if (tpcache == NULL) {
f427ee49 368 os_log_error(OS_LOG_DEFAULT, "%s could not allocate cache", __func__);
3e170ce0 369 goto out_null;
0a7de745 370 }
3e170ce0
A
371
372 SLIST_INSERT_HEAD(&head->tcp_caches, tpcache, list);
373 }
374
375 memcpy(&tpcache->tc_key, &key, sizeof(key));
376 }
377
0a7de745 378 if (tpcache == NULL) {
3e170ce0 379 goto out_null;
0a7de745 380 }
3e170ce0
A
381
382 /* Update timestamp for garbage collection purposes */
383 tpcache->tc_last_access = tcp_now;
384 *headarg = head;
385
0a7de745 386 return tpcache;
3e170ce0
A
387
388out_null:
389 tcp_cache_unlock(head);
0a7de745 390 return NULL;
3e170ce0
A
391}
392
0a7de745
A
393static void
394tcp_cache_key_src_create(struct tcpcb *tp, struct tcp_cache_key_src *tcks)
5ba3f43e
A
395{
396 struct inpcb *inp = tp->t_inpcb;
397 memset(tcks, 0, sizeof(*tcks));
398
399 tcks->ifp = inp->inp_last_outifp;
400
401 if (inp->inp_vflag & INP_IPV6) {
402 memcpy(&tcks->laddr.addr6, &inp->in6p_laddr, sizeof(struct in6_addr));
403 memcpy(&tcks->faddr.addr6, &inp->in6p_faddr, sizeof(struct in6_addr));
404 tcks->af = AF_INET6;
405 } else {
406 memcpy(&tcks->laddr.addr, &inp->inp_laddr, sizeof(struct in_addr));
407 memcpy(&tcks->faddr.addr, &inp->inp_faddr, sizeof(struct in_addr));
408 tcks->af = AF_INET;
409 }
410
411 return;
412}
413
0a7de745 414static void
f427ee49 415tcp_cache_set_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, uint8_t len)
3e170ce0
A
416{
417 struct tcp_cache_head *head;
418 struct tcp_cache *tpcache;
419
420 /* Call lookup/create function */
5ba3f43e 421 tpcache = tcp_getcache_with_lock(tcks, 1, &head);
0a7de745 422 if (tpcache == NULL) {
3e170ce0 423 return;
0a7de745 424 }
3e170ce0 425
a39ff7e2 426 tpcache->tc_tfo_cookie_len = len > TFO_COOKIE_LEN_MAX ?
0a7de745 427 TFO_COOKIE_LEN_MAX : len;
a39ff7e2 428 memcpy(tpcache->tc_tfo_cookie, cookie, tpcache->tc_tfo_cookie_len);
3e170ce0
A
429
430 tcp_cache_unlock(head);
431}
432
0a7de745 433void
f427ee49 434tcp_cache_set_cookie(struct tcpcb *tp, u_char *cookie, uint8_t len)
5ba3f43e
A
435{
436 struct tcp_cache_key_src tcks;
437
438 tcp_cache_key_src_create(tp, &tcks);
439 tcp_cache_set_cookie_common(&tcks, cookie, len);
440}
441
0a7de745 442static int
f427ee49 443tcp_cache_get_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, uint8_t *len)
3e170ce0
A
444{
445 struct tcp_cache_head *head;
446 struct tcp_cache *tpcache;
447
448 /* Call lookup/create function */
5ba3f43e
A
449 tpcache = tcp_getcache_with_lock(tcks, 1, &head);
450 if (tpcache == NULL) {
0a7de745 451 return 0;
5ba3f43e 452 }
3e170ce0
A
453
454 if (tpcache->tc_tfo_cookie_len == 0) {
455 tcp_cache_unlock(head);
0a7de745 456 return 0;
3e170ce0
A
457 }
458
459 /*
460 * Not enough space - this should never happen as it has been checked
461 * in tcp_tfo_check. So, fail here!
462 */
463 VERIFY(tpcache->tc_tfo_cookie_len <= *len);
464
465 memcpy(cookie, tpcache->tc_tfo_cookie, tpcache->tc_tfo_cookie_len);
466 *len = tpcache->tc_tfo_cookie_len;
467
468 tcp_cache_unlock(head);
469
0a7de745 470 return 1;
3e170ce0
A
471}
472
5ba3f43e
A
473/*
474 * Get the cookie related to 'tp', and copy it into 'cookie', provided that len
475 * is big enough (len designates the available memory.
476 * Upon return, 'len' is set to the cookie's length.
477 *
478 * Returns 0 if we should request a cookie.
479 * Returns 1 if the cookie has been found and written.
480 */
0a7de745 481int
f427ee49 482tcp_cache_get_cookie(struct tcpcb *tp, u_char *cookie, uint8_t *len)
5ba3f43e
A
483{
484 struct tcp_cache_key_src tcks;
485
486 tcp_cache_key_src_create(tp, &tcks);
487 return tcp_cache_get_cookie_common(&tcks, cookie, len);
488}
489
0a7de745
A
490static unsigned int
491tcp_cache_get_cookie_len_common(struct tcp_cache_key_src *tcks)
3e170ce0
A
492{
493 struct tcp_cache_head *head;
494 struct tcp_cache *tpcache;
495 unsigned int cookie_len;
496
497 /* Call lookup/create function */
5ba3f43e 498 tpcache = tcp_getcache_with_lock(tcks, 1, &head);
0a7de745
A
499 if (tpcache == NULL) {
500 return 0;
501 }
3e170ce0
A
502
503 cookie_len = tpcache->tc_tfo_cookie_len;
504
505 tcp_cache_unlock(head);
506
507 return cookie_len;
508}
509
0a7de745
A
510unsigned int
511tcp_cache_get_cookie_len(struct tcpcb *tp)
5ba3f43e
A
512{
513 struct tcp_cache_key_src tcks;
514
515 tcp_cache_key_src_create(tp, &tcks);
516 return tcp_cache_get_cookie_len_common(&tcks);
517}
518
f427ee49 519static uint16_t
0a7de745 520tcp_heuristics_hash(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *key)
3e170ce0 521{
f427ee49 522 uint32_t hash;
3e170ce0
A
523
524 bzero(key, sizeof(struct tcp_heuristic_key));
525
5ba3f43e 526 tcp_cache_hash_src(tcks, key);
3e170ce0
A
527
528 hash = net_flowhash(key, sizeof(struct tcp_heuristic_key),
529 tcp_cache_hash_seed);
530
f427ee49 531 return (uint16_t)(hash & (tcp_cache_size - 1));
3e170ce0
A
532}
533
0a7de745
A
534static void
535tcp_heuristic_unlock(struct tcp_heuristics_head *head)
3e170ce0
A
536{
537 lck_mtx_unlock(&head->thh_mtx);
538}
539
540/*
541 * Make sure that everything that happens after tcp_getheuristic_with_lock()
542 * is short enough to justify that you hold the per-bucket lock!!!
543 *
544 * Otherwise, better build another lookup-function that does not hold the
545 * lock and you copy out the bits and bytes.
546 *
547 * That's why we provide the head as a "return"-pointer so that the caller
548 * can give it back to use for tcp_heur_unlock().
549 *
550 *
551 * ToDo - way too much code-duplication. We should create an interface to handle
552 * bucketized hashtables with recycling of the oldest element.
553 */
0a7de745
A
554static struct tcp_heuristic *
555tcp_getheuristic_with_lock(struct tcp_cache_key_src *tcks,
3e170ce0
A
556 int create, struct tcp_heuristics_head **headarg)
557{
3e170ce0
A
558 struct tcp_heuristic *tpheur = NULL;
559 struct tcp_heuristics_head *head;
560 struct tcp_heuristic_key key;
f427ee49 561 uint16_t hash;
3e170ce0
A
562 int i = 0;
563
5ba3f43e 564 hash = tcp_heuristics_hash(tcks, &key);
3e170ce0
A
565 head = &tcp_heuristics[hash];
566
567 lck_mtx_lock(&head->thh_mtx);
568
569 /*** First step: Look for the tcp_heur in our bucket ***/
570 SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) {
0a7de745 571 if (memcmp(&tpheur->th_key, &key, sizeof(key)) == 0) {
3e170ce0 572 break;
0a7de745 573 }
3e170ce0
A
574
575 i++;
576 }
577
578 /*** Second step: If it's not there, create/recycle it ***/
579 if ((tpheur == NULL) && create) {
580 if (i >= TCP_CACHE_BUCKET_SIZE) {
581 struct tcp_heuristic *oldest_heur = NULL;
f427ee49 582 uint32_t max_age = 0;
3e170ce0
A
583
584 /* Look for the oldest tcp_heur in the bucket */
585 SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) {
f427ee49 586 uint32_t age = tcp_now - tpheur->th_last_access;
3e170ce0
A
587 if (age > max_age) {
588 max_age = age;
589 oldest_heur = tpheur;
590 }
591 }
592 VERIFY(oldest_heur != NULL);
593
594 tpheur = oldest_heur;
595
596 /* We recycle - set everything to 0 */
4bd07ac2 597 bzero(tpheur->th_val_start,
0a7de745 598 tpheur->th_val_end - tpheur->th_val_start);
3e170ce0
A
599 } else {
600 /* Create a new heuristic and add it to the list */
601 tpheur = _MALLOC(sizeof(struct tcp_heuristic), M_TEMP,
602 M_NOWAIT | M_ZERO);
0a7de745 603 if (tpheur == NULL) {
f427ee49 604 os_log_error(OS_LOG_DEFAULT, "%s could not allocate cache", __func__);
3e170ce0 605 goto out_null;
0a7de745 606 }
3e170ce0
A
607
608 SLIST_INSERT_HEAD(&head->tcp_heuristics, tpheur, list);
609 }
610
4bd07ac2
A
611 /*
612 * Set to tcp_now, to make sure it won't be > than tcp_now in the
613 * near future.
614 */
615 tpheur->th_ecn_backoff = tcp_now;
5ba3f43e 616 tpheur->th_tfo_backoff_until = tcp_now;
39037602 617 tpheur->th_mptcp_backoff = tcp_now;
5ba3f43e 618 tpheur->th_tfo_backoff = tcp_min_to_hz(tcp_ecn_timeout);
4bd07ac2 619
3e170ce0
A
620 memcpy(&tpheur->th_key, &key, sizeof(key));
621 }
622
0a7de745 623 if (tpheur == NULL) {
3e170ce0 624 goto out_null;
0a7de745 625 }
3e170ce0
A
626
627 /* Update timestamp for garbage collection purposes */
628 tpheur->th_last_access = tcp_now;
629 *headarg = head;
630
0a7de745 631 return tpheur;
3e170ce0
A
632
633out_null:
634 tcp_heuristic_unlock(head);
0a7de745 635 return NULL;
3e170ce0
A
636}
637
0a7de745 638static void
f427ee49 639tcp_heuristic_reset_counters(struct tcp_cache_key_src *tcks, uint8_t flags)
3e170ce0
A
640{
641 struct tcp_heuristics_head *head;
39037602 642 struct tcp_heuristic *tpheur;
3e170ce0 643
39037602 644 /*
cb323159
A
645 * Always create heuristics here because MPTCP needs to write success
646 * into it. Thus, we always end up creating them.
39037602 647 */
cb323159 648 tpheur = tcp_getheuristic_with_lock(tcks, 1, &head);
0a7de745 649 if (tpheur == NULL) {
3e170ce0 650 return;
0a7de745 651 }
3e170ce0 652
5ba3f43e 653 if (flags & TCPCACHE_F_TFO_DATA) {
cb323159
A
654 if (tpheur->th_tfo_data_loss >= TFO_MAX_COOKIE_LOSS) {
655 os_log(OS_LOG_DEFAULT, "%s: Resetting TFO-data loss to 0 from %u on heur %lx\n",
656 __func__, tpheur->th_tfo_data_loss, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
657 }
5ba3f43e
A
658 tpheur->th_tfo_data_loss = 0;
659 }
660
661 if (flags & TCPCACHE_F_TFO_REQ) {
cb323159
A
662 if (tpheur->th_tfo_req_loss >= TFO_MAX_COOKIE_LOSS) {
663 os_log(OS_LOG_DEFAULT, "%s: Resetting TFO-req loss to 0 from %u on heur %lx\n",
664 __func__, tpheur->th_tfo_req_loss, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
665 }
5ba3f43e
A
666 tpheur->th_tfo_req_loss = 0;
667 }
668
669 if (flags & TCPCACHE_F_TFO_DATA_RST) {
cb323159
A
670 if (tpheur->th_tfo_data_rst >= TFO_MAX_COOKIE_LOSS) {
671 os_log(OS_LOG_DEFAULT, "%s: Resetting TFO-data RST to 0 from %u on heur %lx\n",
672 __func__, tpheur->th_tfo_data_rst, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
673 }
5ba3f43e
A
674 tpheur->th_tfo_data_rst = 0;
675 }
676
677 if (flags & TCPCACHE_F_TFO_REQ_RST) {
cb323159
A
678 if (tpheur->th_tfo_req_rst >= TFO_MAX_COOKIE_LOSS) {
679 os_log(OS_LOG_DEFAULT, "%s: Resetting TFO-req RST to 0 from %u on heur %lx\n",
680 __func__, tpheur->th_tfo_req_rst, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
681 }
5ba3f43e
A
682 tpheur->th_tfo_req_rst = 0;
683 }
39037602 684
5ba3f43e 685 if (flags & TCPCACHE_F_ECN) {
cb323159
A
686 if (tpheur->th_ecn_loss >= ECN_MAX_SYN_LOSS || tpheur->th_ecn_synrst >= ECN_MAX_SYNRST) {
687 os_log(OS_LOG_DEFAULT, "%s: Resetting ECN-loss to 0 from %u and synrst from %u on heur %lx\n",
688 __func__, tpheur->th_ecn_loss, tpheur->th_ecn_synrst, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
689 }
39037602 690 tpheur->th_ecn_loss = 0;
5ba3f43e
A
691 tpheur->th_ecn_synrst = 0;
692 }
39037602 693
0a7de745 694 if (flags & TCPCACHE_F_MPTCP) {
39037602 695 tpheur->th_mptcp_loss = 0;
cb323159
A
696 if (tpheur->th_mptcp_success < MPTCP_SUCCESS_TRIGGER) {
697 tpheur->th_mptcp_success++;
698
699 if (tpheur->th_mptcp_success == MPTCP_SUCCESS_TRIGGER) {
700 os_log(mptcp_log_handle, "%s disabling heuristics for 12 hours", __func__);
701 tpheur->th_mptcp_heuristic_disabled = 1;
702 /* Disable heuristics for 12 hours */
703 tpheur->th_mptcp_backoff = tcp_now + tcp_min_to_hz(tcp_ecn_timeout * 12);
704 }
705 }
0a7de745 706 }
3e170ce0
A
707
708 tcp_heuristic_unlock(head);
709}
710
0a7de745
A
711void
712tcp_heuristic_tfo_success(struct tcpcb *tp)
39037602 713{
5ba3f43e
A
714 struct tcp_cache_key_src tcks;
715 uint8_t flag = 0;
716
717 tcp_cache_key_src_create(tp, &tcks);
718
0a7de745 719 if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) {
5ba3f43e 720 flag = (TCPCACHE_F_TFO_DATA | TCPCACHE_F_TFO_REQ |
0a7de745
A
721 TCPCACHE_F_TFO_DATA_RST | TCPCACHE_F_TFO_REQ_RST);
722 }
723 if (tp->t_tfo_stats & TFO_S_COOKIE_REQ) {
5ba3f43e 724 flag = (TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_REQ_RST);
0a7de745 725 }
5ba3f43e
A
726
727 tcp_heuristic_reset_counters(&tcks, flag);
39037602
A
728}
729
0a7de745
A
730void
731tcp_heuristic_mptcp_success(struct tcpcb *tp)
39037602 732{
5ba3f43e
A
733 struct tcp_cache_key_src tcks;
734
735 tcp_cache_key_src_create(tp, &tcks);
736 tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_MPTCP);
39037602
A
737}
738
0a7de745
A
739void
740tcp_heuristic_ecn_success(struct tcpcb *tp)
39037602 741{
5ba3f43e
A
742 struct tcp_cache_key_src tcks;
743
744 tcp_cache_key_src_create(tp, &tcks);
745 tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_ECN);
39037602
A
746}
747
0a7de745
A
748static void
749__tcp_heuristic_tfo_middlebox_common(struct tcp_heuristic *tpheur)
3e170ce0 750{
0a7de745 751 if (tpheur->th_tfo_in_backoff) {
3e170ce0 752 return;
0a7de745 753 }
3e170ce0 754
5ba3f43e 755 tpheur->th_tfo_in_backoff = 1;
3e170ce0 756
5ba3f43e
A
757 if (tpheur->th_tfo_enabled_time) {
758 uint32_t old_backoff = tpheur->th_tfo_backoff;
759
760 tpheur->th_tfo_backoff -= (tcp_now - tpheur->th_tfo_enabled_time);
0a7de745 761 if (tpheur->th_tfo_backoff > old_backoff) {
5ba3f43e 762 tpheur->th_tfo_backoff = tcp_min_to_hz(tcp_ecn_timeout);
0a7de745 763 }
5ba3f43e 764 }
3e170ce0 765
5ba3f43e
A
766 tpheur->th_tfo_backoff_until = tcp_now + tpheur->th_tfo_backoff;
767
768 /* Then, increase the backoff time */
769 tpheur->th_tfo_backoff *= 2;
770
0a7de745 771 if (tpheur->th_tfo_backoff > tcp_min_to_hz(tcp_backoff_maximum)) {
5ba3f43e 772 tpheur->th_tfo_backoff = tcp_min_to_hz(tcp_ecn_timeout);
0a7de745 773 }
cb323159
A
774
775 os_log(OS_LOG_DEFAULT, "%s disable TFO until %u now %u on %lx\n", __func__,
776 tpheur->th_tfo_backoff_until, tcp_now, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
3e170ce0
A
777}
778
0a7de745
A
779static void
780tcp_heuristic_tfo_middlebox_common(struct tcp_cache_key_src *tcks)
3e170ce0
A
781{
782 struct tcp_heuristics_head *head;
5ba3f43e 783 struct tcp_heuristic *tpheur;
3e170ce0 784
5ba3f43e 785 tpheur = tcp_getheuristic_with_lock(tcks, 1, &head);
0a7de745 786 if (tpheur == NULL) {
3e170ce0 787 return;
0a7de745 788 }
3e170ce0 789
5ba3f43e 790 __tcp_heuristic_tfo_middlebox_common(tpheur);
3e170ce0
A
791
792 tcp_heuristic_unlock(head);
3e170ce0
A
793}
794
0a7de745
A
795static void
796tcp_heuristic_inc_counters(struct tcp_cache_key_src *tcks,
f427ee49 797 uint32_t flags)
3e170ce0
A
798{
799 struct tcp_heuristics_head *head;
800 struct tcp_heuristic *tpheur;
801
5ba3f43e 802 tpheur = tcp_getheuristic_with_lock(tcks, 1, &head);
0a7de745 803 if (tpheur == NULL) {
3e170ce0 804 return;
0a7de745 805 }
3e170ce0 806
39037602 807 /* Limit to prevent integer-overflow during exponential backoff */
5ba3f43e
A
808 if ((flags & TCPCACHE_F_TFO_DATA) && tpheur->th_tfo_data_loss < TCP_CACHE_OVERFLOW_PROTECT) {
809 tpheur->th_tfo_data_loss++;
810
0a7de745 811 if (tpheur->th_tfo_data_loss >= TFO_MAX_COOKIE_LOSS) {
5ba3f43e 812 __tcp_heuristic_tfo_middlebox_common(tpheur);
0a7de745 813 }
5ba3f43e
A
814 }
815
816 if ((flags & TCPCACHE_F_TFO_REQ) && tpheur->th_tfo_req_loss < TCP_CACHE_OVERFLOW_PROTECT) {
817 tpheur->th_tfo_req_loss++;
818
0a7de745 819 if (tpheur->th_tfo_req_loss >= TFO_MAX_COOKIE_LOSS) {
5ba3f43e 820 __tcp_heuristic_tfo_middlebox_common(tpheur);
0a7de745 821 }
5ba3f43e
A
822 }
823
824 if ((flags & TCPCACHE_F_TFO_DATA_RST) && tpheur->th_tfo_data_rst < TCP_CACHE_OVERFLOW_PROTECT) {
825 tpheur->th_tfo_data_rst++;
826
0a7de745 827 if (tpheur->th_tfo_data_rst >= TFO_MAX_COOKIE_LOSS) {
5ba3f43e 828 __tcp_heuristic_tfo_middlebox_common(tpheur);
0a7de745 829 }
5ba3f43e
A
830 }
831
832 if ((flags & TCPCACHE_F_TFO_REQ_RST) && tpheur->th_tfo_req_rst < TCP_CACHE_OVERFLOW_PROTECT) {
833 tpheur->th_tfo_req_rst++;
834
0a7de745 835 if (tpheur->th_tfo_req_rst >= TFO_MAX_COOKIE_LOSS) {
5ba3f43e 836 __tcp_heuristic_tfo_middlebox_common(tpheur);
0a7de745 837 }
5ba3f43e 838 }
4bd07ac2 839
cb323159
A
840 if ((flags & TCPCACHE_F_ECN) &&
841 tpheur->th_ecn_loss < TCP_CACHE_OVERFLOW_PROTECT &&
842 TSTMP_LEQ(tpheur->th_ecn_backoff, tcp_now)) {
4bd07ac2
A
843 tpheur->th_ecn_loss++;
844 if (tpheur->th_ecn_loss >= ECN_MAX_SYN_LOSS) {
845 tcpstat.tcps_ecn_fallback_synloss++;
5ba3f43e 846 TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af, ecn_fallback_synloss);
4bd07ac2 847 tpheur->th_ecn_backoff = tcp_now +
5ba3f43e 848 (tcp_min_to_hz(tcp_ecn_timeout) <<
39037602 849 (tpheur->th_ecn_loss - ECN_MAX_SYN_LOSS));
cb323159
A
850
851 os_log(OS_LOG_DEFAULT, "%s disable ECN until %u now %u on %lx for SYN-loss\n",
852 __func__, tpheur->th_ecn_backoff, tcp_now,
853 (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
39037602
A
854 }
855 }
856
857 if ((flags & TCPCACHE_F_MPTCP) &&
cb323159
A
858 tpheur->th_mptcp_loss < TCP_CACHE_OVERFLOW_PROTECT &&
859 tpheur->th_mptcp_heuristic_disabled == 0) {
39037602
A
860 tpheur->th_mptcp_loss++;
861 if (tpheur->th_mptcp_loss >= MPTCP_MAX_SYN_LOSS) {
862 /*
863 * Yes, we take tcp_ecn_timeout, to avoid adding yet
864 * another sysctl that is just used for testing.
865 */
866 tpheur->th_mptcp_backoff = tcp_now +
5ba3f43e 867 (tcp_min_to_hz(tcp_ecn_timeout) <<
39037602 868 (tpheur->th_mptcp_loss - MPTCP_MAX_SYN_LOSS));
cb323159
A
869 tpheur->th_mptcp_in_backoff = 1;
870
871 os_log(OS_LOG_DEFAULT, "%s disable MPTCP until %u now %u on %lx\n",
872 __func__, tpheur->th_mptcp_backoff, tcp_now,
873 (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
39037602
A
874 }
875 }
876
877 if ((flags & TCPCACHE_F_ECN_DROPRST) &&
cb323159
A
878 tpheur->th_ecn_droprst < TCP_CACHE_OVERFLOW_PROTECT &&
879 TSTMP_LEQ(tpheur->th_ecn_backoff, tcp_now)) {
39037602
A
880 tpheur->th_ecn_droprst++;
881 if (tpheur->th_ecn_droprst >= ECN_MAX_DROPRST) {
882 tcpstat.tcps_ecn_fallback_droprst++;
5ba3f43e
A
883 TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af,
884 ecn_fallback_droprst);
39037602 885 tpheur->th_ecn_backoff = tcp_now +
5ba3f43e 886 (tcp_min_to_hz(tcp_ecn_timeout) <<
39037602 887 (tpheur->th_ecn_droprst - ECN_MAX_DROPRST));
cb323159
A
888
889 os_log(OS_LOG_DEFAULT, "%s disable ECN until %u now %u on %lx for drop-RST\n",
890 __func__, tpheur->th_ecn_backoff, tcp_now,
891 (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
4bd07ac2
A
892 }
893 }
3e170ce0 894
39037602 895 if ((flags & TCPCACHE_F_ECN_DROPRXMT) &&
cb323159
A
896 tpheur->th_ecn_droprxmt < TCP_CACHE_OVERFLOW_PROTECT &&
897 TSTMP_LEQ(tpheur->th_ecn_backoff, tcp_now)) {
39037602
A
898 tpheur->th_ecn_droprxmt++;
899 if (tpheur->th_ecn_droprxmt >= ECN_MAX_DROPRXMT) {
900 tcpstat.tcps_ecn_fallback_droprxmt++;
5ba3f43e
A
901 TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af,
902 ecn_fallback_droprxmt);
39037602 903 tpheur->th_ecn_backoff = tcp_now +
5ba3f43e 904 (tcp_min_to_hz(tcp_ecn_timeout) <<
39037602 905 (tpheur->th_ecn_droprxmt - ECN_MAX_DROPRXMT));
cb323159
A
906
907 os_log(OS_LOG_DEFAULT, "%s disable ECN until %u now %u on %lx for drop-Rxmit\n",
908 __func__, tpheur->th_ecn_backoff, tcp_now,
909 (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
39037602
A
910 }
911 }
5ba3f43e
A
912 if ((flags & TCPCACHE_F_ECN_SYNRST) &&
913 tpheur->th_ecn_synrst < TCP_CACHE_OVERFLOW_PROTECT) {
914 tpheur->th_ecn_synrst++;
915 if (tpheur->th_ecn_synrst >= ECN_MAX_SYNRST) {
916 tcpstat.tcps_ecn_fallback_synrst++;
917 TCP_CACHE_INC_IFNET_STAT(tcks->ifp, tcks->af,
918 ecn_fallback_synrst);
919 tpheur->th_ecn_backoff = tcp_now +
920 (tcp_min_to_hz(tcp_ecn_timeout) <<
921 (tpheur->th_ecn_synrst - ECN_MAX_SYNRST));
cb323159
A
922
923 os_log(OS_LOG_DEFAULT, "%s disable ECN until %u now %u on %lx for SYN-RST\n",
924 __func__, tpheur->th_ecn_backoff, tcp_now,
925 (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
5ba3f43e
A
926 }
927 }
3e170ce0
A
928 tcp_heuristic_unlock(head);
929}
930
0a7de745
A
931void
932tcp_heuristic_tfo_loss(struct tcpcb *tp)
39037602 933{
5ba3f43e
A
934 struct tcp_cache_key_src tcks;
935 uint32_t flag = 0;
936
cb323159
A
937 if (symptoms_is_wifi_lossy() &&
938 IFNET_IS_WIFI(tp->t_inpcb->inp_last_outifp)) {
939 return;
940 }
941
5ba3f43e
A
942 tcp_cache_key_src_create(tp, &tcks);
943
0a7de745 944 if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) {
5ba3f43e 945 flag = (TCPCACHE_F_TFO_DATA | TCPCACHE_F_TFO_REQ);
0a7de745
A
946 }
947 if (tp->t_tfo_stats & TFO_S_COOKIE_REQ) {
5ba3f43e 948 flag = TCPCACHE_F_TFO_REQ;
0a7de745 949 }
5ba3f43e
A
950
951 tcp_heuristic_inc_counters(&tcks, flag);
952}
953
0a7de745
A
954void
955tcp_heuristic_tfo_rst(struct tcpcb *tp)
5ba3f43e
A
956{
957 struct tcp_cache_key_src tcks;
958 uint32_t flag = 0;
959
960 tcp_cache_key_src_create(tp, &tcks);
961
0a7de745 962 if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) {
5ba3f43e 963 flag = (TCPCACHE_F_TFO_DATA_RST | TCPCACHE_F_TFO_REQ_RST);
0a7de745
A
964 }
965 if (tp->t_tfo_stats & TFO_S_COOKIE_REQ) {
5ba3f43e 966 flag = TCPCACHE_F_TFO_REQ_RST;
0a7de745 967 }
5ba3f43e
A
968
969 tcp_heuristic_inc_counters(&tcks, flag);
39037602
A
970}
971
0a7de745
A
972void
973tcp_heuristic_mptcp_loss(struct tcpcb *tp)
39037602 974{
5ba3f43e
A
975 struct tcp_cache_key_src tcks;
976
cb323159
A
977 if (symptoms_is_wifi_lossy() &&
978 IFNET_IS_WIFI(tp->t_inpcb->inp_last_outifp)) {
979 return;
980 }
981
5ba3f43e
A
982 tcp_cache_key_src_create(tp, &tcks);
983
984 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_MPTCP);
39037602
A
985}
986
0a7de745
A
987void
988tcp_heuristic_ecn_loss(struct tcpcb *tp)
39037602 989{
5ba3f43e
A
990 struct tcp_cache_key_src tcks;
991
cb323159
A
992 if (symptoms_is_wifi_lossy() &&
993 IFNET_IS_WIFI(tp->t_inpcb->inp_last_outifp)) {
994 return;
995 }
996
5ba3f43e
A
997 tcp_cache_key_src_create(tp, &tcks);
998
999 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN);
39037602
A
1000}
1001
0a7de745
A
1002void
1003tcp_heuristic_ecn_droprst(struct tcpcb *tp)
39037602 1004{
5ba3f43e
A
1005 struct tcp_cache_key_src tcks;
1006
1007 tcp_cache_key_src_create(tp, &tcks);
1008
1009 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_DROPRST);
39037602
A
1010}
1011
0a7de745
A
1012void
1013tcp_heuristic_ecn_droprxmt(struct tcpcb *tp)
39037602 1014{
5ba3f43e
A
1015 struct tcp_cache_key_src tcks;
1016
1017 tcp_cache_key_src_create(tp, &tcks);
1018
1019 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_DROPRXMT);
39037602
A
1020}
1021
0a7de745
A
1022void
1023tcp_heuristic_ecn_synrst(struct tcpcb *tp)
3e170ce0 1024{
5ba3f43e 1025 struct tcp_cache_key_src tcks;
3e170ce0 1026
5ba3f43e 1027 tcp_cache_key_src_create(tp, &tcks);
3e170ce0 1028
5ba3f43e
A
1029 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_SYNRST);
1030}
3e170ce0 1031
0a7de745
A
1032void
1033tcp_heuristic_tfo_middlebox(struct tcpcb *tp)
5ba3f43e
A
1034{
1035 struct tcp_cache_key_src tcks;
1036
1037 tp->t_tfo_flags |= TFO_F_HEURISTIC_DONE;
1038
1039 tcp_cache_key_src_create(tp, &tcks);
1040 tcp_heuristic_tfo_middlebox_common(&tcks);
3e170ce0
A
1041}
1042
0a7de745
A
1043static void
1044tcp_heuristic_ecn_aggressive_common(struct tcp_cache_key_src *tcks)
4bd07ac2
A
1045{
1046 struct tcp_heuristics_head *head;
1047 struct tcp_heuristic *tpheur;
1048
5ba3f43e 1049 tpheur = tcp_getheuristic_with_lock(tcks, 1, &head);
0a7de745 1050 if (tpheur == NULL) {
4bd07ac2 1051 return;
0a7de745 1052 }
4bd07ac2 1053
cb323159
A
1054 if (TSTMP_GT(tpheur->th_ecn_backoff, tcp_now)) {
1055 /* We are already in aggressive mode */
1056 tcp_heuristic_unlock(head);
1057 return;
1058 }
1059
4bd07ac2
A
1060 /* Must be done before, otherwise we will start off with expo-backoff */
1061 tpheur->th_ecn_backoff = tcp_now +
0a7de745 1062 (tcp_min_to_hz(tcp_ecn_timeout) << (tpheur->th_ecn_aggressive));
4bd07ac2
A
1063
1064 /*
39037602 1065 * Ugly way to prevent integer overflow... limit to prevent in
4bd07ac2
A
1066 * overflow during exp. backoff.
1067 */
0a7de745 1068 if (tpheur->th_ecn_aggressive < TCP_CACHE_OVERFLOW_PROTECT) {
4bd07ac2 1069 tpheur->th_ecn_aggressive++;
0a7de745 1070 }
4bd07ac2
A
1071
1072 tcp_heuristic_unlock(head);
cb323159
A
1073
1074 os_log(OS_LOG_DEFAULT, "%s disable ECN until %u now %u on %lx\n", __func__,
1075 tpheur->th_ecn_backoff, tcp_now, (unsigned long)VM_KERNEL_ADDRPERM(tpheur));
4bd07ac2
A
1076}
1077
0a7de745
A
1078void
1079tcp_heuristic_ecn_aggressive(struct tcpcb *tp)
5ba3f43e
A
1080{
1081 struct tcp_cache_key_src tcks;
1082
1083 tcp_cache_key_src_create(tp, &tcks);
1084 tcp_heuristic_ecn_aggressive_common(&tcks);
1085}
1086
0a7de745
A
1087static boolean_t
1088tcp_heuristic_do_tfo_common(struct tcp_cache_key_src *tcks)
3e170ce0
A
1089{
1090 struct tcp_heuristics_head *head;
1091 struct tcp_heuristic *tpheur;
1092
0a7de745
A
1093 if (disable_tcp_heuristics) {
1094 return TRUE;
1095 }
39037602 1096
3e170ce0 1097 /* Get the tcp-heuristic. */
5ba3f43e 1098 tpheur = tcp_getheuristic_with_lock(tcks, 0, &head);
0a7de745
A
1099 if (tpheur == NULL) {
1100 return TRUE;
1101 }
3e170ce0 1102
0a7de745 1103 if (tpheur->th_tfo_in_backoff == 0) {
5ba3f43e 1104 goto tfo_ok;
0a7de745 1105 }
3e170ce0 1106
5ba3f43e
A
1107 if (TSTMP_GT(tcp_now, tpheur->th_tfo_backoff_until)) {
1108 tpheur->th_tfo_in_backoff = 0;
1109 tpheur->th_tfo_enabled_time = tcp_now;
3e170ce0 1110
5ba3f43e 1111 goto tfo_ok;
3e170ce0
A
1112 }
1113
3e170ce0 1114 tcp_heuristic_unlock(head);
0a7de745 1115 return FALSE;
3e170ce0 1116
5ba3f43e
A
1117tfo_ok:
1118 tcp_heuristic_unlock(head);
0a7de745 1119 return TRUE;
39037602
A
1120}
1121
0a7de745
A
1122boolean_t
1123tcp_heuristic_do_tfo(struct tcpcb *tp)
5ba3f43e
A
1124{
1125 struct tcp_cache_key_src tcks;
1126
1127 tcp_cache_key_src_create(tp, &tcks);
0a7de745
A
1128 if (tcp_heuristic_do_tfo_common(&tcks)) {
1129 return TRUE;
1130 }
5ba3f43e 1131
0a7de745 1132 return FALSE;
5ba3f43e 1133}
cb323159
A
1134/*
1135 * @return:
1136 * 0 Enable MPTCP (we are still discovering middleboxes)
1137 * -1 Enable MPTCP (heuristics have been temporarily disabled)
1138 * 1 Disable MPTCP
1139 */
1140int
0a7de745 1141tcp_heuristic_do_mptcp(struct tcpcb *tp)
39037602 1142{
5ba3f43e
A
1143 struct tcp_cache_key_src tcks;
1144 struct tcp_heuristics_head *head = NULL;
39037602 1145 struct tcp_heuristic *tpheur;
cb323159 1146 int ret = 0;
39037602 1147
cb323159
A
1148 if (disable_tcp_heuristics ||
1149 (tptomptp(tp)->mpt_mpte->mpte_flags & MPTE_FORCE_ENABLE)) {
1150 return 0;
0a7de745 1151 }
39037602 1152
5ba3f43e
A
1153 tcp_cache_key_src_create(tp, &tcks);
1154
39037602 1155 /* Get the tcp-heuristic. */
5ba3f43e 1156 tpheur = tcp_getheuristic_with_lock(&tcks, 0, &head);
0a7de745 1157 if (tpheur == NULL) {
cb323159
A
1158 return 0;
1159 }
1160
1161 if (tpheur->th_mptcp_in_backoff == 0 ||
1162 tpheur->th_mptcp_heuristic_disabled == 1) {
1163 goto mptcp_ok;
0a7de745 1164 }
39037602 1165
0a7de745 1166 if (TSTMP_GT(tpheur->th_mptcp_backoff, tcp_now)) {
5ba3f43e 1167 goto fallback;
0a7de745 1168 }
39037602 1169
cb323159 1170 tpheur->th_mptcp_in_backoff = 0;
39037602 1171
cb323159
A
1172mptcp_ok:
1173 if (tpheur->th_mptcp_heuristic_disabled) {
1174 ret = -1;
1175
1176 if (TSTMP_GT(tcp_now, tpheur->th_mptcp_backoff)) {
1177 tpheur->th_mptcp_heuristic_disabled = 0;
1178 tpheur->th_mptcp_success = 0;
1179 }
1180 }
1181
1182 tcp_heuristic_unlock(head);
1183 return ret;
5ba3f43e
A
1184
1185fallback:
0a7de745 1186 if (head) {
5ba3f43e 1187 tcp_heuristic_unlock(head);
0a7de745 1188 }
5ba3f43e 1189
0a7de745 1190 if (tptomptp(tp)->mpt_mpte->mpte_flags & MPTE_FIRSTPARTY) {
5ba3f43e 1191 tcpstat.tcps_mptcp_fp_heuristic_fallback++;
0a7de745 1192 } else {
5ba3f43e 1193 tcpstat.tcps_mptcp_heuristic_fallback++;
0a7de745 1194 }
5ba3f43e 1195
cb323159 1196 return 1;
3e170ce0
A
1197}
1198
0a7de745
A
1199static boolean_t
1200tcp_heuristic_do_ecn_common(struct tcp_cache_key_src *tcks)
4bd07ac2
A
1201{
1202 struct tcp_heuristics_head *head;
1203 struct tcp_heuristic *tpheur;
39037602
A
1204 boolean_t ret = TRUE;
1205
0a7de745
A
1206 if (disable_tcp_heuristics) {
1207 return TRUE;
1208 }
4bd07ac2
A
1209
1210 /* Get the tcp-heuristic. */
5ba3f43e 1211 tpheur = tcp_getheuristic_with_lock(tcks, 0, &head);
0a7de745 1212 if (tpheur == NULL) {
4bd07ac2 1213 return ret;
0a7de745 1214 }
4bd07ac2 1215
39037602
A
1216 if (TSTMP_GT(tpheur->th_ecn_backoff, tcp_now)) {
1217 ret = FALSE;
1218 } else {
1219 /* Reset the following counters to start re-evaluating */
0a7de745 1220 if (tpheur->th_ecn_droprst >= ECN_RETRY_LIMIT) {
39037602 1221 tpheur->th_ecn_droprst = 0;
0a7de745
A
1222 }
1223 if (tpheur->th_ecn_droprxmt >= ECN_RETRY_LIMIT) {
39037602 1224 tpheur->th_ecn_droprxmt = 0;
0a7de745
A
1225 }
1226 if (tpheur->th_ecn_synrst >= ECN_RETRY_LIMIT) {
5ba3f43e 1227 tpheur->th_ecn_synrst = 0;
0a7de745 1228 }
cb323159
A
1229
1230 /* Make sure it follows along */
1231 tpheur->th_ecn_backoff = tcp_now;
39037602 1232 }
4bd07ac2
A
1233
1234 tcp_heuristic_unlock(head);
1235
0a7de745 1236 return ret;
4bd07ac2
A
1237}
1238
0a7de745
A
1239boolean_t
1240tcp_heuristic_do_ecn(struct tcpcb *tp)
5ba3f43e
A
1241{
1242 struct tcp_cache_key_src tcks;
1243
1244 tcp_cache_key_src_create(tp, &tcks);
1245 return tcp_heuristic_do_ecn_common(&tcks);
1246}
1247
0a7de745
A
1248boolean_t
1249tcp_heuristic_do_ecn_with_address(struct ifnet *ifp,
5ba3f43e
A
1250 union sockaddr_in_4_6 *local_address)
1251{
1252 struct tcp_cache_key_src tcks;
1253
1254 memset(&tcks, 0, sizeof(tcks));
1255 tcks.ifp = ifp;
1256
1257 calculate_tcp_clock();
1258
1259 if (local_address->sa.sa_family == AF_INET6) {
1260 memcpy(&tcks.laddr.addr6, &local_address->sin6.sin6_addr, sizeof(struct in6_addr));
1261 tcks.af = AF_INET6;
1262 } else if (local_address->sa.sa_family == AF_INET) {
1263 memcpy(&tcks.laddr.addr, &local_address->sin.sin_addr, sizeof(struct in_addr));
1264 tcks.af = AF_INET;
1265 }
1266
1267 return tcp_heuristic_do_ecn_common(&tcks);
1268}
1269
0a7de745
A
1270void
1271tcp_heuristics_ecn_update(struct necp_tcp_ecn_cache *necp_buffer,
5ba3f43e
A
1272 struct ifnet *ifp, union sockaddr_in_4_6 *local_address)
1273{
1274 struct tcp_cache_key_src tcks;
1275
1276 memset(&tcks, 0, sizeof(tcks));
1277 tcks.ifp = ifp;
1278
1279 calculate_tcp_clock();
1280
1281 if (local_address->sa.sa_family == AF_INET6) {
1282 memcpy(&tcks.laddr.addr6, &local_address->sin6.sin6_addr, sizeof(struct in6_addr));
1283 tcks.af = AF_INET6;
1284 } else if (local_address->sa.sa_family == AF_INET) {
1285 memcpy(&tcks.laddr.addr, &local_address->sin.sin_addr, sizeof(struct in_addr));
1286 tcks.af = AF_INET;
1287 }
1288
1289 if (necp_buffer->necp_tcp_ecn_heuristics_success) {
1290 tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_ECN);
1291 } else if (necp_buffer->necp_tcp_ecn_heuristics_loss) {
1292 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN);
1293 } else if (necp_buffer->necp_tcp_ecn_heuristics_drop_rst) {
1294 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_DROPRST);
1295 } else if (necp_buffer->necp_tcp_ecn_heuristics_drop_rxmt) {
1296 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_DROPRXMT);
1297 } else if (necp_buffer->necp_tcp_ecn_heuristics_syn_rst) {
1298 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_SYNRST);
1299 } else if (necp_buffer->necp_tcp_ecn_heuristics_aggressive) {
1300 tcp_heuristic_ecn_aggressive_common(&tcks);
1301 }
1302
1303 return;
1304}
1305
0a7de745
A
1306boolean_t
1307tcp_heuristic_do_tfo_with_address(struct ifnet *ifp,
5ba3f43e 1308 union sockaddr_in_4_6 *local_address, union sockaddr_in_4_6 *remote_address,
f427ee49 1309 uint8_t *cookie, uint8_t *cookie_len)
5ba3f43e
A
1310{
1311 struct tcp_cache_key_src tcks;
1312
1313 memset(&tcks, 0, sizeof(tcks));
1314 tcks.ifp = ifp;
1315
1316 calculate_tcp_clock();
1317
1318 if (remote_address->sa.sa_family == AF_INET6) {
1319 memcpy(&tcks.laddr.addr6, &local_address->sin6.sin6_addr, sizeof(struct in6_addr));
1320 memcpy(&tcks.faddr.addr6, &remote_address->sin6.sin6_addr, sizeof(struct in6_addr));
1321 tcks.af = AF_INET6;
1322 } else if (remote_address->sa.sa_family == AF_INET) {
1323 memcpy(&tcks.laddr.addr, &local_address->sin.sin_addr, sizeof(struct in_addr));
1324 memcpy(&tcks.faddr.addr, &remote_address->sin.sin_addr, sizeof(struct in_addr));
1325 tcks.af = AF_INET;
1326 }
1327
1328 if (tcp_heuristic_do_tfo_common(&tcks)) {
1329 if (!tcp_cache_get_cookie_common(&tcks, cookie, cookie_len)) {
0a7de745 1330 *cookie_len = 0;
5ba3f43e
A
1331 }
1332 return TRUE;
1333 }
1334
1335 return FALSE;
1336}
1337
0a7de745
A
1338void
1339tcp_heuristics_tfo_update(struct necp_tcp_tfo_cache *necp_buffer,
5ba3f43e
A
1340 struct ifnet *ifp, union sockaddr_in_4_6 *local_address,
1341 union sockaddr_in_4_6 *remote_address)
1342{
1343 struct tcp_cache_key_src tcks;
1344
1345 memset(&tcks, 0, sizeof(tcks));
1346 tcks.ifp = ifp;
1347
1348 calculate_tcp_clock();
1349
1350 if (remote_address->sa.sa_family == AF_INET6) {
1351 memcpy(&tcks.laddr.addr6, &local_address->sin6.sin6_addr, sizeof(struct in6_addr));
1352 memcpy(&tcks.faddr.addr6, &remote_address->sin6.sin6_addr, sizeof(struct in6_addr));
1353 tcks.af = AF_INET6;
1354 } else if (remote_address->sa.sa_family == AF_INET) {
1355 memcpy(&tcks.laddr.addr, &local_address->sin.sin_addr, sizeof(struct in_addr));
1356 memcpy(&tcks.faddr.addr, &remote_address->sin.sin_addr, sizeof(struct in_addr));
1357 tcks.af = AF_INET;
1358 }
1359
0a7de745 1360 if (necp_buffer->necp_tcp_tfo_heuristics_success) {
5ba3f43e 1361 tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_DATA |
0a7de745
A
1362 TCPCACHE_F_TFO_REQ_RST | TCPCACHE_F_TFO_DATA_RST);
1363 }
5ba3f43e 1364
0a7de745 1365 if (necp_buffer->necp_tcp_tfo_heuristics_success_req) {
5ba3f43e 1366 tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_REQ_RST);
0a7de745 1367 }
5ba3f43e 1368
0a7de745 1369 if (necp_buffer->necp_tcp_tfo_heuristics_loss) {
5ba3f43e 1370 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_DATA);
0a7de745 1371 }
5ba3f43e 1372
0a7de745 1373 if (necp_buffer->necp_tcp_tfo_heuristics_loss_req) {
5ba3f43e 1374 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ);
0a7de745 1375 }
5ba3f43e 1376
0a7de745 1377 if (necp_buffer->necp_tcp_tfo_heuristics_rst_data) {
5ba3f43e 1378 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ_RST | TCPCACHE_F_TFO_DATA_RST);
0a7de745 1379 }
5ba3f43e 1380
0a7de745 1381 if (necp_buffer->necp_tcp_tfo_heuristics_rst_req) {
5ba3f43e 1382 tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ_RST);
0a7de745 1383 }
5ba3f43e 1384
0a7de745 1385 if (necp_buffer->necp_tcp_tfo_heuristics_middlebox) {
5ba3f43e 1386 tcp_heuristic_tfo_middlebox_common(&tcks);
0a7de745 1387 }
5ba3f43e
A
1388
1389 if (necp_buffer->necp_tcp_tfo_cookie_len != 0) {
1390 tcp_cache_set_cookie_common(&tcks,
0a7de745 1391 necp_buffer->necp_tcp_tfo_cookie, necp_buffer->necp_tcp_tfo_cookie_len);
5ba3f43e
A
1392 }
1393
1394 return;
1395}
1396
0a7de745
A
1397static void
1398sysctl_cleartfocache(void)
3e170ce0
A
1399{
1400 int i;
1401
1402 for (i = 0; i < tcp_cache_size; i++) {
1403 struct tcp_cache_head *head = &tcp_cache[i];
1404 struct tcp_cache *tpcache, *tmp;
1405 struct tcp_heuristics_head *hhead = &tcp_heuristics[i];
1406 struct tcp_heuristic *tpheur, *htmp;
1407
1408 lck_mtx_lock(&head->tch_mtx);
1409 SLIST_FOREACH_SAFE(tpcache, &head->tcp_caches, list, tmp) {
1410 SLIST_REMOVE(&head->tcp_caches, tpcache, tcp_cache, list);
1411 _FREE(tpcache, M_TEMP);
1412 }
1413 lck_mtx_unlock(&head->tch_mtx);
1414
1415 lck_mtx_lock(&hhead->thh_mtx);
1416 SLIST_FOREACH_SAFE(tpheur, &hhead->tcp_heuristics, list, htmp) {
1417 SLIST_REMOVE(&hhead->tcp_heuristics, tpheur, tcp_heuristic, list);
1418 _FREE(tpheur, M_TEMP);
1419 }
1420 lck_mtx_unlock(&hhead->thh_mtx);
1421 }
1422}
1423
1424/* This sysctl is useful for testing purposes only */
1425static int tcpcleartfo = 0;
1426
1427static int sysctl_cleartfo SYSCTL_HANDLER_ARGS
1428{
1429#pragma unused(arg1, arg2)
1430 int error = 0, val, oldval = tcpcleartfo;
1431
1432 val = oldval;
1433 error = sysctl_handle_int(oidp, &val, 0, req);
0a7de745 1434 if (error || !req->newptr) {
f427ee49
A
1435 if (error) {
1436 os_log_error(OS_LOG_DEFAULT, "%s could not parse int: %d", __func__, error);
1437 }
0a7de745
A
1438 return error;
1439 }
3e170ce0
A
1440
1441 /*
1442 * The actual value does not matter. If the value is set, it triggers
1443 * the clearing of the TFO cache. If a future implementation does not
1444 * use the route entry to hold the TFO cache, replace the route sysctl.
1445 */
1446
0a7de745 1447 if (val != oldval) {
3e170ce0 1448 sysctl_cleartfocache();
0a7de745 1449 }
3e170ce0
A
1450
1451 tcpcleartfo = val;
1452
0a7de745 1453 return error;
3e170ce0
A
1454}
1455
1456SYSCTL_PROC(_net_inet_tcp, OID_AUTO, clear_tfocache, CTLTYPE_INT | CTLFLAG_RW |
0a7de745
A
1457 CTLFLAG_LOCKED, &tcpcleartfo, 0, &sysctl_cleartfo, "I",
1458 "Toggle to clear the TFO destination based heuristic cache");
3e170ce0 1459
0a7de745
A
1460void
1461tcp_cache_init(void)
3e170ce0
A
1462{
1463 uint64_t sane_size_meg = sane_size / 1024 / 1024;
1464 int i;
1465
1466 /*
1467 * On machines with <100MB of memory this will result in a (full) cache-size
1468 * of 32 entries, thus 32 * 5 * 64bytes = 10KB. (about 0.01 %)
1469 * On machines with > 4GB of memory, we have a cache-size of 1024 entries,
1470 * thus about 327KB.
1471 *
f427ee49 1472 * Side-note: we convert to uint32_t. If sane_size is more than
3e170ce0
A
1473 * 16000 TB, we loose precision. But, who cares? :)
1474 */
f427ee49 1475 tcp_cache_size = tcp_cache_roundup2((uint32_t)(sane_size_meg >> 2));
0a7de745 1476 if (tcp_cache_size < 32) {
3e170ce0 1477 tcp_cache_size = 32;
0a7de745 1478 } else if (tcp_cache_size > 1024) {
3e170ce0 1479 tcp_cache_size = 1024;
0a7de745 1480 }
3e170ce0
A
1481
1482 tcp_cache = _MALLOC(sizeof(struct tcp_cache_head) * tcp_cache_size,
1483 M_TEMP, M_ZERO);
0a7de745 1484 if (tcp_cache == NULL) {
3e170ce0 1485 panic("Allocating tcp_cache failed at boot-time!");
0a7de745 1486 }
3e170ce0
A
1487
1488 tcp_cache_mtx_grp_attr = lck_grp_attr_alloc_init();
1489 tcp_cache_mtx_grp = lck_grp_alloc_init("tcpcache", tcp_cache_mtx_grp_attr);
1490 tcp_cache_mtx_attr = lck_attr_alloc_init();
1491
1492 tcp_heuristics = _MALLOC(sizeof(struct tcp_heuristics_head) * tcp_cache_size,
1493 M_TEMP, M_ZERO);
0a7de745 1494 if (tcp_heuristics == NULL) {
3e170ce0 1495 panic("Allocating tcp_heuristic failed at boot-time!");
0a7de745 1496 }
3e170ce0
A
1497
1498 tcp_heuristic_mtx_grp_attr = lck_grp_attr_alloc_init();
1499 tcp_heuristic_mtx_grp = lck_grp_alloc_init("tcpheuristic", tcp_heuristic_mtx_grp_attr);
1500 tcp_heuristic_mtx_attr = lck_attr_alloc_init();
1501
1502 for (i = 0; i < tcp_cache_size; i++) {
1503 lck_mtx_init(&tcp_cache[i].tch_mtx, tcp_cache_mtx_grp,
1504 tcp_cache_mtx_attr);
1505 SLIST_INIT(&tcp_cache[i].tcp_caches);
1506
1507 lck_mtx_init(&tcp_heuristics[i].thh_mtx, tcp_heuristic_mtx_grp,
1508 tcp_heuristic_mtx_attr);
1509 SLIST_INIT(&tcp_heuristics[i].tcp_heuristics);
1510 }
1511
1512 tcp_cache_hash_seed = RandomULong();
1513}