]> git.saurik.com Git - apple/xnu.git/blame - bsd/netinet/tcp_cache.c
xnu-3248.20.55.tar.gz
[apple/xnu.git] / bsd / netinet / tcp_cache.c
CommitLineData
3e170ce0
A
1/*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* TCP-cache to store and retrieve TCP-related information */
30
31#include <net/flowhash.h>
32#include <net/route.h>
33#include <netinet/in_pcb.h>
34#include <netinet/tcp_cache.h>
35#include <netinet/tcp_seq.h>
36#include <netinet/tcp_var.h>
37#include <kern/locks.h>
38#include <sys/queue.h>
39#include <dev/random/randomdev.h>
40
41struct tcp_heuristic_key {
42 union {
43 uint8_t thk_net_signature[IFNET_SIGNATURELEN];
44 union {
45 struct in_addr addr;
46 struct in6_addr addr6;
47 } thk_ip;
48 };
49 sa_family_t thk_family;
50};
51
52struct tcp_heuristic {
53 SLIST_ENTRY(tcp_heuristic) list;
54
55 u_int32_t th_last_access;
56
57 struct tcp_heuristic_key th_key;
58
4bd07ac2
A
59 char th_val_start[0]; /* Marker for memsetting to 0 */
60
61 u_int8_t th_tfo_cookie_loss; /* The number of times a SYN+cookie has been lost */
62 u_int8_t th_ecn_loss; /* The number of times a SYN+ecn has been lost */
63 u_int8_t th_ecn_aggressive; /* The number of times we did an aggressive fallback */
3e170ce0
A
64 u_int32_t th_tfo_fallback_trials; /* Number of times we did not try out TFO due to SYN-loss */
65 u_int32_t th_tfo_cookie_backoff; /* Time until when we should not try out TFO */
4bd07ac2 66 u_int32_t th_ecn_backoff; /* Time until when we should not try out ECN */
3e170ce0 67
4bd07ac2
A
68 u_int8_t th_tfo_in_backoff:1, /* Are we avoiding TFO due to the backoff timer? */
69 th_tfo_aggressive_fallback:1, /* Aggressive fallback due to nasty middlebox */
3e170ce0
A
70 th_tfo_snd_middlebox_supp:1, /* We are sure that the network supports TFO in upstream direction */
71 th_tfo_rcv_middlebox_supp:1; /* We are sure that the network supports TFO in downstream direction*/
4bd07ac2
A
72
73 char th_val_end[0]; /* Marker for memsetting to 0 */
3e170ce0
A
74};
75
76struct tcp_heuristics_head {
77 SLIST_HEAD(tcp_heur_bucket, tcp_heuristic) tcp_heuristics;
78
79 /* Per-hashbucket lock to avoid lock-contention */
80 lck_mtx_t thh_mtx;
81};
82
83struct tcp_cache_key {
84 sa_family_t tck_family;
85
86 struct tcp_heuristic_key tck_src;
87 union {
88 struct in_addr addr;
89 struct in6_addr addr6;
90 } tck_dst;
91};
92
93struct tcp_cache {
94 SLIST_ENTRY(tcp_cache) list;
95
96 u_int32_t tc_last_access;
97
98 struct tcp_cache_key tc_key;
99
100 u_int8_t tc_tfo_cookie[TFO_COOKIE_LEN_MAX];
101 u_int8_t tc_tfo_cookie_len;
102};
103
104struct tcp_cache_head {
105 SLIST_HEAD(tcp_cache_bucket, tcp_cache) tcp_caches;
106
107 /* Per-hashbucket lock to avoid lock-contention */
108 lck_mtx_t tch_mtx;
109};
110
111static u_int32_t tcp_cache_hash_seed;
112
113size_t tcp_cache_size;
114
115/*
116 * The maximum depth of the hash-bucket. This way we limit the tcp_cache to
117 * TCP_CACHE_BUCKET_SIZE * tcp_cache_size and have "natural" garbage collection
118 */
119#define TCP_CACHE_BUCKET_SIZE 5
120
121static struct tcp_cache_head *tcp_cache;
122
123decl_lck_mtx_data(, tcp_cache_mtx);
124
125static lck_attr_t *tcp_cache_mtx_attr;
126static lck_grp_t *tcp_cache_mtx_grp;
127static lck_grp_attr_t *tcp_cache_mtx_grp_attr;
128
129static struct tcp_heuristics_head *tcp_heuristics;
130
131decl_lck_mtx_data(, tcp_heuristics_mtx);
132
133static lck_attr_t *tcp_heuristic_mtx_attr;
134static lck_grp_t *tcp_heuristic_mtx_grp;
135static lck_grp_attr_t *tcp_heuristic_mtx_grp_attr;
136
4bd07ac2
A
137int tcp_ecn_timeout = 60;
138SYSCTL_INT(_net_inet_tcp, OID_AUTO, ecn_timeout, CTLFLAG_RW | CTLFLAG_LOCKED,
139 &tcp_ecn_timeout, 0, "Initial minutes to wait before re-trying ECN");
3e170ce0
A
140
141/*
142 * Round up to next higher power-of 2. See "Bit Twiddling Hacks".
143 *
144 * Might be worth moving this to a library so that others
145 * (e.g., scale_to_powerof2()) can use this as well instead of a while-loop.
146 */
147static u_int32_t tcp_cache_roundup2(u_int32_t a)
148{
149 a--;
150 a |= a >> 1;
151 a |= a >> 2;
152 a |= a >> 4;
153 a |= a >> 8;
154 a |= a >> 16;
155 a++;
156
157 return a;
158}
159
160static void tcp_cache_hash_src(struct inpcb *inp, struct tcp_heuristic_key *key)
161{
162 struct ifnet *ifn = inp->inp_last_outifp;
163 uint8_t len = sizeof(key->thk_net_signature);
164 uint16_t flags;
165
166 if (inp->inp_vflag & INP_IPV6) {
167 int ret;
168
169 key->thk_family = AF_INET6;
170 ret = ifnet_get_netsignature(ifn, AF_INET6, &len, &flags,
171 key->thk_net_signature);
172
173 /*
174 * ifnet_get_netsignature only returns EINVAL if ifn is NULL
175 * (we made sure that in the other cases it does not). So,
176 * in this case we should take the connection's address.
177 */
178 if (ret == ENOENT || ret == EINVAL)
179 memcpy(&key->thk_ip.addr6, &inp->in6p_laddr, sizeof(struct in6_addr));
180 } else {
181 int ret;
182
183 key->thk_family = AF_INET;
184 ret = ifnet_get_netsignature(ifn, AF_INET, &len, &flags,
185 key->thk_net_signature);
186
187 /*
188 * ifnet_get_netsignature only returns EINVAL if ifn is NULL
189 * (we made sure that in the other cases it does not). So,
190 * in this case we should take the connection's address.
191 */
192 if (ret == ENOENT || ret == EINVAL)
193 memcpy(&key->thk_ip.addr, &inp->inp_laddr, sizeof(struct in_addr));
194 }
195}
196
197static u_int16_t tcp_cache_hash(struct inpcb *inp, struct tcp_cache_key *key)
198{
199 u_int32_t hash;
200
201 bzero(key, sizeof(struct tcp_cache_key));
202
203 tcp_cache_hash_src(inp, &key->tck_src);
204
205 if (inp->inp_vflag & INP_IPV6) {
206 key->tck_family = AF_INET6;
207 memcpy(&key->tck_dst.addr6, &inp->in6p_faddr,
208 sizeof(struct in6_addr));
209 } else {
210 key->tck_family = AF_INET;
211 memcpy(&key->tck_dst.addr, &inp->inp_faddr,
212 sizeof(struct in_addr));
213 }
214
215 hash = net_flowhash(key, sizeof(struct tcp_cache_key),
216 tcp_cache_hash_seed);
217
218 return (hash & (tcp_cache_size - 1));
219}
220
221static void tcp_cache_unlock(struct tcp_cache_head *head)
222{
223 lck_mtx_unlock(&head->tch_mtx);
224}
225
226/*
227 * Make sure that everything that happens after tcp_getcache_with_lock()
228 * is short enough to justify that you hold the per-bucket lock!!!
229 *
230 * Otherwise, better build another lookup-function that does not hold the
231 * lock and you copy out the bits and bytes.
232 *
233 * That's why we provide the head as a "return"-pointer so that the caller
234 * can give it back to use for tcp_cache_unlock().
235 */
236static struct tcp_cache *tcp_getcache_with_lock(struct tcpcb *tp, int create,
237 struct tcp_cache_head **headarg)
238{
239 struct inpcb *inp = tp->t_inpcb;
240 struct tcp_cache *tpcache = NULL;
241 struct tcp_cache_head *head;
242 struct tcp_cache_key key;
243 u_int16_t hash;
244 int i = 0;
245
246 hash = tcp_cache_hash(inp, &key);
247 head = &tcp_cache[hash];
248
249 lck_mtx_lock(&head->tch_mtx);
250
251 /*** First step: Look for the tcp_cache in our bucket ***/
252 SLIST_FOREACH(tpcache, &head->tcp_caches, list) {
253 if (memcmp(&tpcache->tc_key, &key, sizeof(key)) == 0)
254 break;
255
256 i++;
257 }
258
259 /*** Second step: If it's not there, create/recycle it ***/
260 if ((tpcache == NULL) && create) {
261 if (i >= TCP_CACHE_BUCKET_SIZE) {
262 struct tcp_cache *oldest_cache = NULL;
263 u_int32_t max_age = 0;
264
265 /* Look for the oldest tcp_cache in the bucket */
266 SLIST_FOREACH(tpcache, &head->tcp_caches, list) {
267 u_int32_t age = tcp_now - tpcache->tc_last_access;
268 if (age > max_age) {
269 max_age = age;
270 oldest_cache = tpcache;
271 }
272 }
273 VERIFY(oldest_cache != NULL);
274
275 tpcache = oldest_cache;
276
277 /* We recycle, thus let's indicate that there is no cookie */
278 tpcache->tc_tfo_cookie_len = 0;
279 } else {
280 /* Create a new cache and add it to the list */
281 tpcache = _MALLOC(sizeof(struct tcp_cache), M_TEMP,
282 M_NOWAIT | M_ZERO);
283 if (tpcache == NULL)
284 goto out_null;
285
286 SLIST_INSERT_HEAD(&head->tcp_caches, tpcache, list);
287 }
288
289 memcpy(&tpcache->tc_key, &key, sizeof(key));
290 }
291
292 if (tpcache == NULL)
293 goto out_null;
294
295 /* Update timestamp for garbage collection purposes */
296 tpcache->tc_last_access = tcp_now;
297 *headarg = head;
298
299 return (tpcache);
300
301out_null:
302 tcp_cache_unlock(head);
303 return (NULL);
304}
305
306void tcp_cache_set_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t len)
307{
308 struct tcp_cache_head *head;
309 struct tcp_cache *tpcache;
310
311 /* Call lookup/create function */
312 tpcache = tcp_getcache_with_lock(tp, 1, &head);
313 if (tpcache == NULL)
314 return;
315
316 tpcache->tc_tfo_cookie_len = len;
317 memcpy(tpcache->tc_tfo_cookie, cookie, len);
318
319 tcp_cache_unlock(head);
320}
321
322/*
323 * Get the cookie related to 'tp', and copy it into 'cookie', provided that len
324 * is big enough (len designates the available memory.
325 * Upon return, 'len' is set to the cookie's length.
326 *
327 * Returns 0 if we should request a cookie.
328 * Returns 1 if the cookie has been found and written.
329 */
330int tcp_cache_get_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t *len)
331{
332 struct tcp_cache_head *head;
333 struct tcp_cache *tpcache;
334
335 /* Call lookup/create function */
336 tpcache = tcp_getcache_with_lock(tp, 1, &head);
337 if (tpcache == NULL)
338 return (0);
339
340 if (tpcache->tc_tfo_cookie_len == 0) {
341 tcp_cache_unlock(head);
342 return (0);
343 }
344
345 /*
346 * Not enough space - this should never happen as it has been checked
347 * in tcp_tfo_check. So, fail here!
348 */
349 VERIFY(tpcache->tc_tfo_cookie_len <= *len);
350
351 memcpy(cookie, tpcache->tc_tfo_cookie, tpcache->tc_tfo_cookie_len);
352 *len = tpcache->tc_tfo_cookie_len;
353
354 tcp_cache_unlock(head);
355
356 return (1);
357}
358
359unsigned int tcp_cache_get_cookie_len(struct tcpcb *tp)
360{
361 struct tcp_cache_head *head;
362 struct tcp_cache *tpcache;
363 unsigned int cookie_len;
364
365 /* Call lookup/create function */
366 tpcache = tcp_getcache_with_lock(tp, 1, &head);
367 if (tpcache == NULL)
368 return (0);
369
370 cookie_len = tpcache->tc_tfo_cookie_len;
371
372 tcp_cache_unlock(head);
373
374 return cookie_len;
375}
376
377static u_int16_t tcp_heuristics_hash(struct inpcb *inp,
378 struct tcp_heuristic_key *key)
379{
380 u_int32_t hash;
381
382 bzero(key, sizeof(struct tcp_heuristic_key));
383
384 tcp_cache_hash_src(inp, key);
385
386 hash = net_flowhash(key, sizeof(struct tcp_heuristic_key),
387 tcp_cache_hash_seed);
388
389 return (hash & (tcp_cache_size - 1));
390}
391
392static void tcp_heuristic_unlock(struct tcp_heuristics_head *head)
393{
394 lck_mtx_unlock(&head->thh_mtx);
395}
396
397/*
398 * Make sure that everything that happens after tcp_getheuristic_with_lock()
399 * is short enough to justify that you hold the per-bucket lock!!!
400 *
401 * Otherwise, better build another lookup-function that does not hold the
402 * lock and you copy out the bits and bytes.
403 *
404 * That's why we provide the head as a "return"-pointer so that the caller
405 * can give it back to use for tcp_heur_unlock().
406 *
407 *
408 * ToDo - way too much code-duplication. We should create an interface to handle
409 * bucketized hashtables with recycling of the oldest element.
410 */
411static struct tcp_heuristic *tcp_getheuristic_with_lock(struct tcpcb *tp,
412 int create, struct tcp_heuristics_head **headarg)
413{
414 struct inpcb *inp = tp->t_inpcb;
415 struct tcp_heuristic *tpheur = NULL;
416 struct tcp_heuristics_head *head;
417 struct tcp_heuristic_key key;
418 u_int16_t hash;
419 int i = 0;
420
421 hash = tcp_heuristics_hash(inp, &key);
422 head = &tcp_heuristics[hash];
423
424 lck_mtx_lock(&head->thh_mtx);
425
426 /*** First step: Look for the tcp_heur in our bucket ***/
427 SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) {
428 if (memcmp(&tpheur->th_key, &key, sizeof(key)) == 0)
429 break;
430
431 i++;
432 }
433
434 /*** Second step: If it's not there, create/recycle it ***/
435 if ((tpheur == NULL) && create) {
436 if (i >= TCP_CACHE_BUCKET_SIZE) {
437 struct tcp_heuristic *oldest_heur = NULL;
438 u_int32_t max_age = 0;
439
440 /* Look for the oldest tcp_heur in the bucket */
441 SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) {
442 u_int32_t age = tcp_now - tpheur->th_last_access;
443 if (age > max_age) {
444 max_age = age;
445 oldest_heur = tpheur;
446 }
447 }
448 VERIFY(oldest_heur != NULL);
449
450 tpheur = oldest_heur;
451
452 /* We recycle - set everything to 0 */
4bd07ac2
A
453 bzero(tpheur->th_val_start,
454 tpheur->th_val_end - tpheur->th_val_start);
3e170ce0
A
455 } else {
456 /* Create a new heuristic and add it to the list */
457 tpheur = _MALLOC(sizeof(struct tcp_heuristic), M_TEMP,
458 M_NOWAIT | M_ZERO);
459 if (tpheur == NULL)
460 goto out_null;
461
462 SLIST_INSERT_HEAD(&head->tcp_heuristics, tpheur, list);
463 }
464
4bd07ac2
A
465 /*
466 * Set to tcp_now, to make sure it won't be > than tcp_now in the
467 * near future.
468 */
469 tpheur->th_ecn_backoff = tcp_now;
470 tpheur->th_tfo_cookie_backoff = tcp_now;
471
3e170ce0
A
472 memcpy(&tpheur->th_key, &key, sizeof(key));
473 }
474
475 if (tpheur == NULL)
476 goto out_null;
477
478 /* Update timestamp for garbage collection purposes */
479 tpheur->th_last_access = tcp_now;
480 *headarg = head;
481
482 return (tpheur);
483
484out_null:
485 tcp_heuristic_unlock(head);
486 return (NULL);
487}
488
489void tcp_heuristic_tfo_success(struct tcpcb *tp)
490{
491 struct tcp_heuristics_head *head;
492
493 struct tcp_heuristic *tpheur = tcp_getheuristic_with_lock(tp, 1, &head);
494 if (tpheur == NULL)
495 return;
496
497 tpheur->th_tfo_cookie_loss = 0;
498
499 tcp_heuristic_unlock(head);
500}
501
502void tcp_heuristic_tfo_rcv_good(struct tcpcb *tp)
503{
504 struct tcp_heuristics_head *head;
505
506 struct tcp_heuristic *tpheur = tcp_getheuristic_with_lock(tp, 1, &head);
507 if (tpheur == NULL)
508 return;
509
510 tpheur->th_tfo_rcv_middlebox_supp = 1;
511
512 tcp_heuristic_unlock(head);
513
514 tp->t_tfo_flags |= TFO_F_NO_RCVPROBING;
515}
516
517void tcp_heuristic_tfo_snd_good(struct tcpcb *tp)
518{
519 struct tcp_heuristics_head *head;
520
521 struct tcp_heuristic *tpheur = tcp_getheuristic_with_lock(tp, 1, &head);
522 if (tpheur == NULL)
523 return;
524
525 tpheur->th_tfo_snd_middlebox_supp = 1;
526
527 tcp_heuristic_unlock(head);
528
529 tp->t_tfo_flags |= TFO_F_NO_SNDPROBING;
530}
531
4bd07ac2 532void tcp_heuristic_inc_loss(struct tcpcb *tp, int tfo, int ecn)
3e170ce0
A
533{
534 struct tcp_heuristics_head *head;
535 struct tcp_heuristic *tpheur;
536
537 tpheur = tcp_getheuristic_with_lock(tp, 1, &head);
538 if (tpheur == NULL)
539 return;
540
4bd07ac2
A
541 /* Limit to 9 to prevent integer-overflow during exponential backoff */
542 if (tfo && tpheur->th_tfo_cookie_loss < 9)
543 tpheur->th_tfo_cookie_loss++;
544
545 if (ecn && tpheur->th_ecn_loss < 9) {
546 tpheur->th_ecn_loss++;
547 if (tpheur->th_ecn_loss >= ECN_MAX_SYN_LOSS) {
548 tcpstat.tcps_ecn_fallback_synloss++;
549 INP_INC_IFNET_STAT(tp->t_inpcb, ecn_fallback_synloss);
550 tpheur->th_ecn_backoff = tcp_now +
551 ((tcp_ecn_timeout * 60 * TCP_RETRANSHZ)
552 << (tpheur->th_ecn_loss - ECN_MAX_SYN_LOSS));
553 }
554 }
3e170ce0
A
555
556 tcp_heuristic_unlock(head);
557}
558
559void tcp_heuristic_tfo_middlebox(struct tcpcb *tp)
560{
561 struct tcp_heuristics_head *head;
562 struct tcp_heuristic *tpheur;
563
564 tpheur = tcp_getheuristic_with_lock(tp, 1, &head);
565 if (tpheur == NULL)
566 return;
567
568 tpheur->th_tfo_aggressive_fallback = 1;
569
570 tcp_heuristic_unlock(head);
571}
572
4bd07ac2
A
573void tcp_heuristic_ecn_aggressive(struct tcpcb *tp)
574{
575 struct tcp_heuristics_head *head;
576 struct tcp_heuristic *tpheur;
577
578 tpheur = tcp_getheuristic_with_lock(tp, 1, &head);
579 if (tpheur == NULL)
580 return;
581
582 /* Must be done before, otherwise we will start off with expo-backoff */
583 tpheur->th_ecn_backoff = tcp_now +
584 ((tcp_ecn_timeout * 60 * TCP_RETRANSHZ) << (tpheur->th_ecn_aggressive));
585
586 /*
587 * Ugly way to prevent integer overflow... limit to 9 to prevent in
588 * overflow during exp. backoff.
589 */
590 if (tpheur->th_ecn_aggressive < 9)
591 tpheur->th_ecn_aggressive++;
592
593 tcp_heuristic_unlock(head);
594}
595
596void tcp_heuristic_reset_loss(struct tcpcb *tp, int tfo, int ecn)
3e170ce0
A
597{
598 struct tcp_heuristics_head *head;
599 struct tcp_heuristic *tpheur;
600
601 /*
602 * Don't attempt to create it! Keep the heuristics clean if the
603 * server does not support TFO. This reduces the lookup-cost on
604 * our side.
605 */
606 tpheur = tcp_getheuristic_with_lock(tp, 0, &head);
607 if (tpheur == NULL)
608 return;
609
4bd07ac2
A
610 if (tfo)
611 tpheur->th_tfo_cookie_loss = 0;
612
613 if (ecn)
614 tpheur->th_ecn_loss = 0;
3e170ce0
A
615
616 tcp_heuristic_unlock(head);
617}
618
619boolean_t tcp_heuristic_do_tfo(struct tcpcb *tp)
620{
621 struct tcp_heuristics_head *head;
622 struct tcp_heuristic *tpheur;
623
624 /* Get the tcp-heuristic. */
625 tpheur = tcp_getheuristic_with_lock(tp, 0, &head);
626 if (tpheur == NULL)
627 return (true);
628
629 if (tpheur->th_tfo_aggressive_fallback) {
630 /* Aggressive fallback - don't do TFO anymore... :'( */
631 tcp_heuristic_unlock(head);
632 return (false);
633 }
634
635 if (tpheur->th_tfo_cookie_loss >= TFO_MAX_COOKIE_LOSS &&
636 (tpheur->th_tfo_fallback_trials < tcp_tfo_fallback_min ||
637 TSTMP_GT(tpheur->th_tfo_cookie_backoff, tcp_now))) {
638 /*
639 * So, when we are in SYN-loss mode we try to stop using TFO
640 * for the next 'tcp_tfo_fallback_min' connections. That way,
641 * we are sure that never more than 1 out of tcp_tfo_fallback_min
642 * connections will suffer from our nice little middelbox.
643 *
644 * After that we first wait for 2 minutes. If we fail again,
645 * we wait for yet another 60 minutes.
646 */
647 tpheur->th_tfo_fallback_trials++;
648 if (tpheur->th_tfo_fallback_trials >= tcp_tfo_fallback_min &&
649 !tpheur->th_tfo_in_backoff) {
650 if (tpheur->th_tfo_cookie_loss == TFO_MAX_COOKIE_LOSS)
651 /* Backoff for 2 minutes */
652 tpheur->th_tfo_cookie_backoff = tcp_now + (60 * 2 * TCP_RETRANSHZ);
653 else
654 /* Backoff for 60 minutes */
655 tpheur->th_tfo_cookie_backoff = tcp_now + (60 * 60 * TCP_RETRANSHZ);
656
657 tpheur->th_tfo_in_backoff = 1;
658 }
659
660 tcp_heuristic_unlock(head);
661 return (false);
662 }
663
664 /*
665 * We give it a new shot, set trials back to 0. This allows to
666 * start counting again from zero in case we get yet another SYN-loss
667 */
668 tpheur->th_tfo_fallback_trials = 0;
669 tpheur->th_tfo_in_backoff = 0;
670
671 if (tpheur->th_tfo_rcv_middlebox_supp)
672 tp->t_tfo_flags |= TFO_F_NO_RCVPROBING;
673 if (tpheur->th_tfo_snd_middlebox_supp)
674 tp->t_tfo_flags |= TFO_F_NO_SNDPROBING;
675
676 tcp_heuristic_unlock(head);
677
678 return (true);
679}
680
4bd07ac2
A
681boolean_t tcp_heuristic_do_ecn(struct tcpcb *tp)
682{
683 struct tcp_heuristics_head *head;
684 struct tcp_heuristic *tpheur;
685 boolean_t ret = true;
686
687 /* Get the tcp-heuristic. */
688 tpheur = tcp_getheuristic_with_lock(tp, 0, &head);
689 if (tpheur == NULL)
690 return ret;
691
692 if (TSTMP_GT(tpheur->th_ecn_backoff, tcp_now))
693 ret = false;
694
695 tcp_heuristic_unlock(head);
696
697 return (ret);
698}
699
3e170ce0
A
700static void sysctl_cleartfocache(void)
701{
702 int i;
703
704 for (i = 0; i < tcp_cache_size; i++) {
705 struct tcp_cache_head *head = &tcp_cache[i];
706 struct tcp_cache *tpcache, *tmp;
707 struct tcp_heuristics_head *hhead = &tcp_heuristics[i];
708 struct tcp_heuristic *tpheur, *htmp;
709
710 lck_mtx_lock(&head->tch_mtx);
711 SLIST_FOREACH_SAFE(tpcache, &head->tcp_caches, list, tmp) {
712 SLIST_REMOVE(&head->tcp_caches, tpcache, tcp_cache, list);
713 _FREE(tpcache, M_TEMP);
714 }
715 lck_mtx_unlock(&head->tch_mtx);
716
717 lck_mtx_lock(&hhead->thh_mtx);
718 SLIST_FOREACH_SAFE(tpheur, &hhead->tcp_heuristics, list, htmp) {
719 SLIST_REMOVE(&hhead->tcp_heuristics, tpheur, tcp_heuristic, list);
720 _FREE(tpheur, M_TEMP);
721 }
722 lck_mtx_unlock(&hhead->thh_mtx);
723 }
724}
725
726/* This sysctl is useful for testing purposes only */
727static int tcpcleartfo = 0;
728
729static int sysctl_cleartfo SYSCTL_HANDLER_ARGS
730{
731#pragma unused(arg1, arg2)
732 int error = 0, val, oldval = tcpcleartfo;
733
734 val = oldval;
735 error = sysctl_handle_int(oidp, &val, 0, req);
736 if (error || !req->newptr)
737 return (error);
738
739 /*
740 * The actual value does not matter. If the value is set, it triggers
741 * the clearing of the TFO cache. If a future implementation does not
742 * use the route entry to hold the TFO cache, replace the route sysctl.
743 */
744
745 if (val != oldval)
746 sysctl_cleartfocache();
747
748 tcpcleartfo = val;
749
750 return (error);
751}
752
753SYSCTL_PROC(_net_inet_tcp, OID_AUTO, clear_tfocache, CTLTYPE_INT | CTLFLAG_RW |
754 CTLFLAG_LOCKED, &tcpcleartfo, 0, &sysctl_cleartfo, "I",
755 "Toggle to clear the TFO destination based heuristic cache");
756
757void tcp_cache_init(void)
758{
759 uint64_t sane_size_meg = sane_size / 1024 / 1024;
760 int i;
761
762 /*
763 * On machines with <100MB of memory this will result in a (full) cache-size
764 * of 32 entries, thus 32 * 5 * 64bytes = 10KB. (about 0.01 %)
765 * On machines with > 4GB of memory, we have a cache-size of 1024 entries,
766 * thus about 327KB.
767 *
768 * Side-note: we convert to u_int32_t. If sane_size is more than
769 * 16000 TB, we loose precision. But, who cares? :)
770 */
771 tcp_cache_size = tcp_cache_roundup2((u_int32_t)(sane_size_meg >> 2));
772 if (tcp_cache_size < 32)
773 tcp_cache_size = 32;
774 else if (tcp_cache_size > 1024)
775 tcp_cache_size = 1024;
776
777 tcp_cache = _MALLOC(sizeof(struct tcp_cache_head) * tcp_cache_size,
778 M_TEMP, M_ZERO);
779 if (tcp_cache == NULL)
780 panic("Allocating tcp_cache failed at boot-time!");
781
782 tcp_cache_mtx_grp_attr = lck_grp_attr_alloc_init();
783 tcp_cache_mtx_grp = lck_grp_alloc_init("tcpcache", tcp_cache_mtx_grp_attr);
784 tcp_cache_mtx_attr = lck_attr_alloc_init();
785
786 tcp_heuristics = _MALLOC(sizeof(struct tcp_heuristics_head) * tcp_cache_size,
787 M_TEMP, M_ZERO);
788 if (tcp_heuristics == NULL)
789 panic("Allocating tcp_heuristic failed at boot-time!");
790
791 tcp_heuristic_mtx_grp_attr = lck_grp_attr_alloc_init();
792 tcp_heuristic_mtx_grp = lck_grp_alloc_init("tcpheuristic", tcp_heuristic_mtx_grp_attr);
793 tcp_heuristic_mtx_attr = lck_attr_alloc_init();
794
795 for (i = 0; i < tcp_cache_size; i++) {
796 lck_mtx_init(&tcp_cache[i].tch_mtx, tcp_cache_mtx_grp,
797 tcp_cache_mtx_attr);
798 SLIST_INIT(&tcp_cache[i].tcp_caches);
799
800 lck_mtx_init(&tcp_heuristics[i].thh_mtx, tcp_heuristic_mtx_grp,
801 tcp_heuristic_mtx_attr);
802 SLIST_INIT(&tcp_heuristics[i].tcp_heuristics);
803 }
804
805 tcp_cache_hash_seed = RandomULong();
806}