2 * Copyright (c) 2015-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* TCP-cache to store and retrieve TCP-related information */
31 #include <net/flowhash.h>
32 #include <net/route.h>
34 #include <netinet/in_pcb.h>
35 #include <netinet/mptcp_var.h>
36 #include <netinet/tcp_cache.h>
37 #include <netinet/tcp_seq.h>
38 #include <netinet/tcp_var.h>
39 #include <kern/locks.h>
40 #include <sys/queue.h>
41 #include <dev/random/randomdev.h>
45 struct in6_addr addr6
;
48 struct tcp_heuristic_key
{
50 uint8_t thk_net_signature
[IFNET_SIGNATURELEN
];
53 sa_family_t thk_family
;
56 struct tcp_heuristic
{
57 SLIST_ENTRY(tcp_heuristic
) list
;
59 uint32_t th_last_access
;
61 struct tcp_heuristic_key th_key
;
63 char th_val_start
[0]; /* Marker for memsetting to 0 */
65 uint8_t th_tfo_data_loss
; /* The number of times a SYN+data has been lost */
66 uint8_t th_tfo_req_loss
; /* The number of times a SYN+cookie-req has been lost */
67 uint8_t th_tfo_data_rst
; /* The number of times a SYN+data has received a RST */
68 uint8_t th_tfo_req_rst
; /* The number of times a SYN+cookie-req has received a RST */
69 uint8_t th_mptcp_loss
; /* The number of times a SYN+MP_CAPABLE has been lost */
70 uint8_t th_ecn_loss
; /* The number of times a SYN+ecn has been lost */
71 uint8_t th_ecn_aggressive
; /* The number of times we did an aggressive fallback */
72 uint8_t th_ecn_droprst
; /* The number of times ECN connections received a RST after first data pkt */
73 uint8_t th_ecn_droprxmt
; /* The number of times ECN connection is dropped after multiple retransmits */
74 uint8_t th_ecn_synrst
; /* number of times RST was received in response to an ECN enabled SYN */
75 uint32_t th_tfo_enabled_time
; /* The moment when we reenabled TFO after backing off */
76 uint32_t th_tfo_backoff_until
; /* Time until when we should not try out TFO */
77 uint32_t th_tfo_backoff
; /* Current backoff timer */
78 uint32_t th_mptcp_backoff
; /* Time until when we should not try out MPTCP */
79 uint32_t th_ecn_backoff
; /* Time until when we should not try out ECN */
81 uint8_t th_tfo_in_backoff
:1, /* Are we avoiding TFO due to the backoff timer? */
82 th_mptcp_in_backoff
:1; /* Are we avoiding MPTCP due to the backoff timer? */
84 char th_val_end
[0]; /* Marker for memsetting to 0 */
87 struct tcp_heuristics_head
{
88 SLIST_HEAD(tcp_heur_bucket
, tcp_heuristic
) tcp_heuristics
;
90 /* Per-hashbucket lock to avoid lock-contention */
94 struct tcp_cache_key
{
95 sa_family_t tck_family
;
97 struct tcp_heuristic_key tck_src
;
102 SLIST_ENTRY(tcp_cache
) list
;
104 u_int32_t tc_last_access
;
106 struct tcp_cache_key tc_key
;
108 u_int8_t tc_tfo_cookie
[TFO_COOKIE_LEN_MAX
];
109 u_int8_t tc_tfo_cookie_len
;
112 struct tcp_cache_head
{
113 SLIST_HEAD(tcp_cache_bucket
, tcp_cache
) tcp_caches
;
115 /* Per-hashbucket lock to avoid lock-contention */
119 struct tcp_cache_key_src
{
126 static u_int32_t tcp_cache_hash_seed
;
128 size_t tcp_cache_size
;
131 * The maximum depth of the hash-bucket. This way we limit the tcp_cache to
132 * TCP_CACHE_BUCKET_SIZE * tcp_cache_size and have "natural" garbage collection
134 #define TCP_CACHE_BUCKET_SIZE 5
136 static struct tcp_cache_head
*tcp_cache
;
138 decl_lck_mtx_data(, tcp_cache_mtx
);
140 static lck_attr_t
*tcp_cache_mtx_attr
;
141 static lck_grp_t
*tcp_cache_mtx_grp
;
142 static lck_grp_attr_t
*tcp_cache_mtx_grp_attr
;
144 static struct tcp_heuristics_head
*tcp_heuristics
;
146 decl_lck_mtx_data(, tcp_heuristics_mtx
);
148 static lck_attr_t
*tcp_heuristic_mtx_attr
;
149 static lck_grp_t
*tcp_heuristic_mtx_grp
;
150 static lck_grp_attr_t
*tcp_heuristic_mtx_grp_attr
;
152 static uint32_t tcp_backoff_maximum
= 65536;
154 SYSCTL_UINT(_net_inet_tcp
, OID_AUTO
, backoff_maximum
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
155 &tcp_backoff_maximum
, 0, "Maximum time for which we won't try TFO");
157 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, ecn_timeout
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
158 static int, tcp_ecn_timeout
, 60, "Initial minutes to wait before re-trying ECN");
160 SYSCTL_SKMEM_TCP_INT(OID_AUTO
, disable_tcp_heuristics
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
161 static int, disable_tcp_heuristics
, 0, "Set to 1, to disable all TCP heuristics (TFO, ECN, MPTCP)");
164 tcp_min_to_hz(uint32_t minutes
)
166 if (minutes
> 65536) {
167 return (uint32_t)65536 * 60 * TCP_RETRANSHZ
;
170 return minutes
* 60 * TCP_RETRANSHZ
;
174 * This number is coupled with tcp_ecn_timeout, because we want to prevent
175 * integer overflow. Need to find an unexpensive way to prevent integer overflow
176 * while still allowing a dynamic sysctl.
178 #define TCP_CACHE_OVERFLOW_PROTECT 9
180 /* Number of SYN-losses we accept */
181 #define TFO_MAX_COOKIE_LOSS 2
182 #define ECN_MAX_SYN_LOSS 2
183 #define MPTCP_MAX_SYN_LOSS 2
184 #define ECN_MAX_DROPRST 1
185 #define ECN_MAX_DROPRXMT 4
186 #define ECN_MAX_SYNRST 4
188 /* Flags for setting/unsetting loss-heuristics, limited to 4 bytes */
189 #define TCPCACHE_F_TFO_REQ 0x01
190 #define TCPCACHE_F_TFO_DATA 0x02
191 #define TCPCACHE_F_ECN 0x04
192 #define TCPCACHE_F_MPTCP 0x08
193 #define TCPCACHE_F_ECN_DROPRST 0x10
194 #define TCPCACHE_F_ECN_DROPRXMT 0x20
195 #define TCPCACHE_F_TFO_REQ_RST 0x40
196 #define TCPCACHE_F_TFO_DATA_RST 0x80
197 #define TCPCACHE_F_ECN_SYNRST 0x100
199 /* Always retry ECN after backing off to this level for some heuristics */
200 #define ECN_RETRY_LIMIT 9
202 #define TCP_CACHE_INC_IFNET_STAT(_ifp_, _af_, _stat_) { \
203 if ((_ifp_) != NULL) { \
204 if ((_af_) == AF_INET6) { \
205 (_ifp_)->if_ipv6_stat->_stat_++;\
207 (_ifp_)->if_ipv4_stat->_stat_++;\
213 * Round up to next higher power-of 2. See "Bit Twiddling Hacks".
215 * Might be worth moving this to a library so that others
216 * (e.g., scale_to_powerof2()) can use this as well instead of a while-loop.
219 tcp_cache_roundup2(u_int32_t a
)
233 tcp_cache_hash_src(struct tcp_cache_key_src
*tcks
, struct tcp_heuristic_key
*key
)
235 struct ifnet
*ifp
= tcks
->ifp
;
236 uint8_t len
= sizeof(key
->thk_net_signature
);
239 if (tcks
->af
== AF_INET6
) {
242 key
->thk_family
= AF_INET6
;
243 ret
= ifnet_get_netsignature(ifp
, AF_INET6
, &len
, &flags
,
244 key
->thk_net_signature
);
247 * ifnet_get_netsignature only returns EINVAL if ifn is NULL
248 * (we made sure that in the other cases it does not). So,
249 * in this case we should take the connection's address.
251 if (ret
== ENOENT
|| ret
== EINVAL
) {
252 memcpy(&key
->thk_ip
.addr6
, &tcks
->laddr
.addr6
, sizeof(struct in6_addr
));
257 key
->thk_family
= AF_INET
;
258 ret
= ifnet_get_netsignature(ifp
, AF_INET
, &len
, &flags
,
259 key
->thk_net_signature
);
262 * ifnet_get_netsignature only returns EINVAL if ifn is NULL
263 * (we made sure that in the other cases it does not). So,
264 * in this case we should take the connection's address.
266 if (ret
== ENOENT
|| ret
== EINVAL
) {
267 memcpy(&key
->thk_ip
.addr
, &tcks
->laddr
.addr
, sizeof(struct in_addr
));
273 tcp_cache_hash(struct tcp_cache_key_src
*tcks
, struct tcp_cache_key
*key
)
277 bzero(key
, sizeof(struct tcp_cache_key
));
279 tcp_cache_hash_src(tcks
, &key
->tck_src
);
281 if (tcks
->af
== AF_INET6
) {
282 key
->tck_family
= AF_INET6
;
283 memcpy(&key
->tck_dst
.addr6
, &tcks
->faddr
.addr6
,
284 sizeof(struct in6_addr
));
286 key
->tck_family
= AF_INET
;
287 memcpy(&key
->tck_dst
.addr
, &tcks
->faddr
.addr
,
288 sizeof(struct in_addr
));
291 hash
= net_flowhash(key
, sizeof(struct tcp_cache_key
),
292 tcp_cache_hash_seed
);
294 return hash
& (tcp_cache_size
- 1);
298 tcp_cache_unlock(struct tcp_cache_head
*head
)
300 lck_mtx_unlock(&head
->tch_mtx
);
304 * Make sure that everything that happens after tcp_getcache_with_lock()
305 * is short enough to justify that you hold the per-bucket lock!!!
307 * Otherwise, better build another lookup-function that does not hold the
308 * lock and you copy out the bits and bytes.
310 * That's why we provide the head as a "return"-pointer so that the caller
311 * can give it back to use for tcp_cache_unlock().
313 static struct tcp_cache
*
314 tcp_getcache_with_lock(struct tcp_cache_key_src
*tcks
,
315 int create
, struct tcp_cache_head
**headarg
)
317 struct tcp_cache
*tpcache
= NULL
;
318 struct tcp_cache_head
*head
;
319 struct tcp_cache_key key
;
323 hash
= tcp_cache_hash(tcks
, &key
);
324 head
= &tcp_cache
[hash
];
326 lck_mtx_lock(&head
->tch_mtx
);
328 /*** First step: Look for the tcp_cache in our bucket ***/
329 SLIST_FOREACH(tpcache
, &head
->tcp_caches
, list
) {
330 if (memcmp(&tpcache
->tc_key
, &key
, sizeof(key
)) == 0) {
337 /*** Second step: If it's not there, create/recycle it ***/
338 if ((tpcache
== NULL
) && create
) {
339 if (i
>= TCP_CACHE_BUCKET_SIZE
) {
340 struct tcp_cache
*oldest_cache
= NULL
;
341 u_int32_t max_age
= 0;
343 /* Look for the oldest tcp_cache in the bucket */
344 SLIST_FOREACH(tpcache
, &head
->tcp_caches
, list
) {
345 u_int32_t age
= tcp_now
- tpcache
->tc_last_access
;
348 oldest_cache
= tpcache
;
351 VERIFY(oldest_cache
!= NULL
);
353 tpcache
= oldest_cache
;
355 /* We recycle, thus let's indicate that there is no cookie */
356 tpcache
->tc_tfo_cookie_len
= 0;
358 /* Create a new cache and add it to the list */
359 tpcache
= _MALLOC(sizeof(struct tcp_cache
), M_TEMP
,
361 if (tpcache
== NULL
) {
365 SLIST_INSERT_HEAD(&head
->tcp_caches
, tpcache
, list
);
368 memcpy(&tpcache
->tc_key
, &key
, sizeof(key
));
371 if (tpcache
== NULL
) {
375 /* Update timestamp for garbage collection purposes */
376 tpcache
->tc_last_access
= tcp_now
;
382 tcp_cache_unlock(head
);
387 tcp_cache_key_src_create(struct tcpcb
*tp
, struct tcp_cache_key_src
*tcks
)
389 struct inpcb
*inp
= tp
->t_inpcb
;
390 memset(tcks
, 0, sizeof(*tcks
));
392 tcks
->ifp
= inp
->inp_last_outifp
;
394 if (inp
->inp_vflag
& INP_IPV6
) {
395 memcpy(&tcks
->laddr
.addr6
, &inp
->in6p_laddr
, sizeof(struct in6_addr
));
396 memcpy(&tcks
->faddr
.addr6
, &inp
->in6p_faddr
, sizeof(struct in6_addr
));
399 memcpy(&tcks
->laddr
.addr
, &inp
->inp_laddr
, sizeof(struct in_addr
));
400 memcpy(&tcks
->faddr
.addr
, &inp
->inp_faddr
, sizeof(struct in_addr
));
408 tcp_cache_set_cookie_common(struct tcp_cache_key_src
*tcks
, u_char
*cookie
, u_int8_t len
)
410 struct tcp_cache_head
*head
;
411 struct tcp_cache
*tpcache
;
413 /* Call lookup/create function */
414 tpcache
= tcp_getcache_with_lock(tcks
, 1, &head
);
415 if (tpcache
== NULL
) {
419 tpcache
->tc_tfo_cookie_len
= len
> TFO_COOKIE_LEN_MAX
?
420 TFO_COOKIE_LEN_MAX
: len
;
421 memcpy(tpcache
->tc_tfo_cookie
, cookie
, tpcache
->tc_tfo_cookie_len
);
423 tcp_cache_unlock(head
);
427 tcp_cache_set_cookie(struct tcpcb
*tp
, u_char
*cookie
, u_int8_t len
)
429 struct tcp_cache_key_src tcks
;
431 tcp_cache_key_src_create(tp
, &tcks
);
432 tcp_cache_set_cookie_common(&tcks
, cookie
, len
);
436 tcp_cache_get_cookie_common(struct tcp_cache_key_src
*tcks
, u_char
*cookie
, u_int8_t
*len
)
438 struct tcp_cache_head
*head
;
439 struct tcp_cache
*tpcache
;
441 /* Call lookup/create function */
442 tpcache
= tcp_getcache_with_lock(tcks
, 1, &head
);
443 if (tpcache
== NULL
) {
447 if (tpcache
->tc_tfo_cookie_len
== 0) {
448 tcp_cache_unlock(head
);
453 * Not enough space - this should never happen as it has been checked
454 * in tcp_tfo_check. So, fail here!
456 VERIFY(tpcache
->tc_tfo_cookie_len
<= *len
);
458 memcpy(cookie
, tpcache
->tc_tfo_cookie
, tpcache
->tc_tfo_cookie_len
);
459 *len
= tpcache
->tc_tfo_cookie_len
;
461 tcp_cache_unlock(head
);
467 * Get the cookie related to 'tp', and copy it into 'cookie', provided that len
468 * is big enough (len designates the available memory.
469 * Upon return, 'len' is set to the cookie's length.
471 * Returns 0 if we should request a cookie.
472 * Returns 1 if the cookie has been found and written.
475 tcp_cache_get_cookie(struct tcpcb
*tp
, u_char
*cookie
, u_int8_t
*len
)
477 struct tcp_cache_key_src tcks
;
479 tcp_cache_key_src_create(tp
, &tcks
);
480 return tcp_cache_get_cookie_common(&tcks
, cookie
, len
);
484 tcp_cache_get_cookie_len_common(struct tcp_cache_key_src
*tcks
)
486 struct tcp_cache_head
*head
;
487 struct tcp_cache
*tpcache
;
488 unsigned int cookie_len
;
490 /* Call lookup/create function */
491 tpcache
= tcp_getcache_with_lock(tcks
, 1, &head
);
492 if (tpcache
== NULL
) {
496 cookie_len
= tpcache
->tc_tfo_cookie_len
;
498 tcp_cache_unlock(head
);
504 tcp_cache_get_cookie_len(struct tcpcb
*tp
)
506 struct tcp_cache_key_src tcks
;
508 tcp_cache_key_src_create(tp
, &tcks
);
509 return tcp_cache_get_cookie_len_common(&tcks
);
513 tcp_heuristics_hash(struct tcp_cache_key_src
*tcks
, struct tcp_heuristic_key
*key
)
517 bzero(key
, sizeof(struct tcp_heuristic_key
));
519 tcp_cache_hash_src(tcks
, key
);
521 hash
= net_flowhash(key
, sizeof(struct tcp_heuristic_key
),
522 tcp_cache_hash_seed
);
524 return hash
& (tcp_cache_size
- 1);
528 tcp_heuristic_unlock(struct tcp_heuristics_head
*head
)
530 lck_mtx_unlock(&head
->thh_mtx
);
534 * Make sure that everything that happens after tcp_getheuristic_with_lock()
535 * is short enough to justify that you hold the per-bucket lock!!!
537 * Otherwise, better build another lookup-function that does not hold the
538 * lock and you copy out the bits and bytes.
540 * That's why we provide the head as a "return"-pointer so that the caller
541 * can give it back to use for tcp_heur_unlock().
544 * ToDo - way too much code-duplication. We should create an interface to handle
545 * bucketized hashtables with recycling of the oldest element.
547 static struct tcp_heuristic
*
548 tcp_getheuristic_with_lock(struct tcp_cache_key_src
*tcks
,
549 int create
, struct tcp_heuristics_head
**headarg
)
551 struct tcp_heuristic
*tpheur
= NULL
;
552 struct tcp_heuristics_head
*head
;
553 struct tcp_heuristic_key key
;
557 hash
= tcp_heuristics_hash(tcks
, &key
);
558 head
= &tcp_heuristics
[hash
];
560 lck_mtx_lock(&head
->thh_mtx
);
562 /*** First step: Look for the tcp_heur in our bucket ***/
563 SLIST_FOREACH(tpheur
, &head
->tcp_heuristics
, list
) {
564 if (memcmp(&tpheur
->th_key
, &key
, sizeof(key
)) == 0) {
571 /*** Second step: If it's not there, create/recycle it ***/
572 if ((tpheur
== NULL
) && create
) {
573 if (i
>= TCP_CACHE_BUCKET_SIZE
) {
574 struct tcp_heuristic
*oldest_heur
= NULL
;
575 u_int32_t max_age
= 0;
577 /* Look for the oldest tcp_heur in the bucket */
578 SLIST_FOREACH(tpheur
, &head
->tcp_heuristics
, list
) {
579 u_int32_t age
= tcp_now
- tpheur
->th_last_access
;
582 oldest_heur
= tpheur
;
585 VERIFY(oldest_heur
!= NULL
);
587 tpheur
= oldest_heur
;
589 /* We recycle - set everything to 0 */
590 bzero(tpheur
->th_val_start
,
591 tpheur
->th_val_end
- tpheur
->th_val_start
);
593 /* Create a new heuristic and add it to the list */
594 tpheur
= _MALLOC(sizeof(struct tcp_heuristic
), M_TEMP
,
596 if (tpheur
== NULL
) {
600 SLIST_INSERT_HEAD(&head
->tcp_heuristics
, tpheur
, list
);
604 * Set to tcp_now, to make sure it won't be > than tcp_now in the
607 tpheur
->th_ecn_backoff
= tcp_now
;
608 tpheur
->th_tfo_backoff_until
= tcp_now
;
609 tpheur
->th_mptcp_backoff
= tcp_now
;
610 tpheur
->th_tfo_backoff
= tcp_min_to_hz(tcp_ecn_timeout
);
612 memcpy(&tpheur
->th_key
, &key
, sizeof(key
));
615 if (tpheur
== NULL
) {
619 /* Update timestamp for garbage collection purposes */
620 tpheur
->th_last_access
= tcp_now
;
626 tcp_heuristic_unlock(head
);
631 tcp_heuristic_reset_counters(struct tcp_cache_key_src
*tcks
, u_int8_t flags
)
633 struct tcp_heuristics_head
*head
;
634 struct tcp_heuristic
*tpheur
;
637 * Don't attempt to create it! Keep the heuristics clean if the
638 * server does not support TFO. This reduces the lookup-cost on
641 tpheur
= tcp_getheuristic_with_lock(tcks
, 0, &head
);
642 if (tpheur
== NULL
) {
646 if (flags
& TCPCACHE_F_TFO_DATA
) {
647 tpheur
->th_tfo_data_loss
= 0;
650 if (flags
& TCPCACHE_F_TFO_REQ
) {
651 tpheur
->th_tfo_req_loss
= 0;
654 if (flags
& TCPCACHE_F_TFO_DATA_RST
) {
655 tpheur
->th_tfo_data_rst
= 0;
658 if (flags
& TCPCACHE_F_TFO_REQ_RST
) {
659 tpheur
->th_tfo_req_rst
= 0;
662 if (flags
& TCPCACHE_F_ECN
) {
663 tpheur
->th_ecn_loss
= 0;
664 tpheur
->th_ecn_synrst
= 0;
667 if (flags
& TCPCACHE_F_MPTCP
) {
668 tpheur
->th_mptcp_loss
= 0;
671 tcp_heuristic_unlock(head
);
675 tcp_heuristic_tfo_success(struct tcpcb
*tp
)
677 struct tcp_cache_key_src tcks
;
680 tcp_cache_key_src_create(tp
, &tcks
);
682 if (tp
->t_tfo_stats
& TFO_S_SYN_DATA_SENT
) {
683 flag
= (TCPCACHE_F_TFO_DATA
| TCPCACHE_F_TFO_REQ
|
684 TCPCACHE_F_TFO_DATA_RST
| TCPCACHE_F_TFO_REQ_RST
);
686 if (tp
->t_tfo_stats
& TFO_S_COOKIE_REQ
) {
687 flag
= (TCPCACHE_F_TFO_REQ
| TCPCACHE_F_TFO_REQ_RST
);
690 tcp_heuristic_reset_counters(&tcks
, flag
);
694 tcp_heuristic_mptcp_success(struct tcpcb
*tp
)
696 struct tcp_cache_key_src tcks
;
698 tcp_cache_key_src_create(tp
, &tcks
);
699 tcp_heuristic_reset_counters(&tcks
, TCPCACHE_F_MPTCP
);
703 tcp_heuristic_ecn_success(struct tcpcb
*tp
)
705 struct tcp_cache_key_src tcks
;
707 tcp_cache_key_src_create(tp
, &tcks
);
708 tcp_heuristic_reset_counters(&tcks
, TCPCACHE_F_ECN
);
712 __tcp_heuristic_tfo_middlebox_common(struct tcp_heuristic
*tpheur
)
714 if (tpheur
->th_tfo_in_backoff
) {
718 tpheur
->th_tfo_in_backoff
= 1;
720 if (tpheur
->th_tfo_enabled_time
) {
721 uint32_t old_backoff
= tpheur
->th_tfo_backoff
;
723 tpheur
->th_tfo_backoff
-= (tcp_now
- tpheur
->th_tfo_enabled_time
);
724 if (tpheur
->th_tfo_backoff
> old_backoff
) {
725 tpheur
->th_tfo_backoff
= tcp_min_to_hz(tcp_ecn_timeout
);
729 tpheur
->th_tfo_backoff_until
= tcp_now
+ tpheur
->th_tfo_backoff
;
731 /* Then, increase the backoff time */
732 tpheur
->th_tfo_backoff
*= 2;
734 if (tpheur
->th_tfo_backoff
> tcp_min_to_hz(tcp_backoff_maximum
)) {
735 tpheur
->th_tfo_backoff
= tcp_min_to_hz(tcp_ecn_timeout
);
740 tcp_heuristic_tfo_middlebox_common(struct tcp_cache_key_src
*tcks
)
742 struct tcp_heuristics_head
*head
;
743 struct tcp_heuristic
*tpheur
;
745 tpheur
= tcp_getheuristic_with_lock(tcks
, 1, &head
);
746 if (tpheur
== NULL
) {
750 __tcp_heuristic_tfo_middlebox_common(tpheur
);
752 tcp_heuristic_unlock(head
);
756 tcp_heuristic_inc_counters(struct tcp_cache_key_src
*tcks
,
759 struct tcp_heuristics_head
*head
;
760 struct tcp_heuristic
*tpheur
;
762 tpheur
= tcp_getheuristic_with_lock(tcks
, 1, &head
);
763 if (tpheur
== NULL
) {
767 /* Limit to prevent integer-overflow during exponential backoff */
768 if ((flags
& TCPCACHE_F_TFO_DATA
) && tpheur
->th_tfo_data_loss
< TCP_CACHE_OVERFLOW_PROTECT
) {
769 tpheur
->th_tfo_data_loss
++;
771 if (tpheur
->th_tfo_data_loss
>= TFO_MAX_COOKIE_LOSS
) {
772 __tcp_heuristic_tfo_middlebox_common(tpheur
);
776 if ((flags
& TCPCACHE_F_TFO_REQ
) && tpheur
->th_tfo_req_loss
< TCP_CACHE_OVERFLOW_PROTECT
) {
777 tpheur
->th_tfo_req_loss
++;
779 if (tpheur
->th_tfo_req_loss
>= TFO_MAX_COOKIE_LOSS
) {
780 __tcp_heuristic_tfo_middlebox_common(tpheur
);
784 if ((flags
& TCPCACHE_F_TFO_DATA_RST
) && tpheur
->th_tfo_data_rst
< TCP_CACHE_OVERFLOW_PROTECT
) {
785 tpheur
->th_tfo_data_rst
++;
787 if (tpheur
->th_tfo_data_rst
>= TFO_MAX_COOKIE_LOSS
) {
788 __tcp_heuristic_tfo_middlebox_common(tpheur
);
792 if ((flags
& TCPCACHE_F_TFO_REQ_RST
) && tpheur
->th_tfo_req_rst
< TCP_CACHE_OVERFLOW_PROTECT
) {
793 tpheur
->th_tfo_req_rst
++;
795 if (tpheur
->th_tfo_req_rst
>= TFO_MAX_COOKIE_LOSS
) {
796 __tcp_heuristic_tfo_middlebox_common(tpheur
);
800 if ((flags
& TCPCACHE_F_ECN
) && tpheur
->th_ecn_loss
< TCP_CACHE_OVERFLOW_PROTECT
) {
801 tpheur
->th_ecn_loss
++;
802 if (tpheur
->th_ecn_loss
>= ECN_MAX_SYN_LOSS
) {
803 tcpstat
.tcps_ecn_fallback_synloss
++;
804 TCP_CACHE_INC_IFNET_STAT(tcks
->ifp
, tcks
->af
, ecn_fallback_synloss
);
805 tpheur
->th_ecn_backoff
= tcp_now
+
806 (tcp_min_to_hz(tcp_ecn_timeout
) <<
807 (tpheur
->th_ecn_loss
- ECN_MAX_SYN_LOSS
));
811 if ((flags
& TCPCACHE_F_MPTCP
) &&
812 tpheur
->th_mptcp_loss
< TCP_CACHE_OVERFLOW_PROTECT
) {
813 tpheur
->th_mptcp_loss
++;
814 if (tpheur
->th_mptcp_loss
>= MPTCP_MAX_SYN_LOSS
) {
816 * Yes, we take tcp_ecn_timeout, to avoid adding yet
817 * another sysctl that is just used for testing.
819 tpheur
->th_mptcp_backoff
= tcp_now
+
820 (tcp_min_to_hz(tcp_ecn_timeout
) <<
821 (tpheur
->th_mptcp_loss
- MPTCP_MAX_SYN_LOSS
));
825 if ((flags
& TCPCACHE_F_ECN_DROPRST
) &&
826 tpheur
->th_ecn_droprst
< TCP_CACHE_OVERFLOW_PROTECT
) {
827 tpheur
->th_ecn_droprst
++;
828 if (tpheur
->th_ecn_droprst
>= ECN_MAX_DROPRST
) {
829 tcpstat
.tcps_ecn_fallback_droprst
++;
830 TCP_CACHE_INC_IFNET_STAT(tcks
->ifp
, tcks
->af
,
831 ecn_fallback_droprst
);
832 tpheur
->th_ecn_backoff
= tcp_now
+
833 (tcp_min_to_hz(tcp_ecn_timeout
) <<
834 (tpheur
->th_ecn_droprst
- ECN_MAX_DROPRST
));
838 if ((flags
& TCPCACHE_F_ECN_DROPRXMT
) &&
839 tpheur
->th_ecn_droprxmt
< TCP_CACHE_OVERFLOW_PROTECT
) {
840 tpheur
->th_ecn_droprxmt
++;
841 if (tpheur
->th_ecn_droprxmt
>= ECN_MAX_DROPRXMT
) {
842 tcpstat
.tcps_ecn_fallback_droprxmt
++;
843 TCP_CACHE_INC_IFNET_STAT(tcks
->ifp
, tcks
->af
,
844 ecn_fallback_droprxmt
);
845 tpheur
->th_ecn_backoff
= tcp_now
+
846 (tcp_min_to_hz(tcp_ecn_timeout
) <<
847 (tpheur
->th_ecn_droprxmt
- ECN_MAX_DROPRXMT
));
850 if ((flags
& TCPCACHE_F_ECN_SYNRST
) &&
851 tpheur
->th_ecn_synrst
< TCP_CACHE_OVERFLOW_PROTECT
) {
852 tpheur
->th_ecn_synrst
++;
853 if (tpheur
->th_ecn_synrst
>= ECN_MAX_SYNRST
) {
854 tcpstat
.tcps_ecn_fallback_synrst
++;
855 TCP_CACHE_INC_IFNET_STAT(tcks
->ifp
, tcks
->af
,
856 ecn_fallback_synrst
);
857 tpheur
->th_ecn_backoff
= tcp_now
+
858 (tcp_min_to_hz(tcp_ecn_timeout
) <<
859 (tpheur
->th_ecn_synrst
- ECN_MAX_SYNRST
));
862 tcp_heuristic_unlock(head
);
866 tcp_heuristic_tfo_loss(struct tcpcb
*tp
)
868 struct tcp_cache_key_src tcks
;
871 tcp_cache_key_src_create(tp
, &tcks
);
873 if (tp
->t_tfo_stats
& TFO_S_SYN_DATA_SENT
) {
874 flag
= (TCPCACHE_F_TFO_DATA
| TCPCACHE_F_TFO_REQ
);
876 if (tp
->t_tfo_stats
& TFO_S_COOKIE_REQ
) {
877 flag
= TCPCACHE_F_TFO_REQ
;
880 tcp_heuristic_inc_counters(&tcks
, flag
);
884 tcp_heuristic_tfo_rst(struct tcpcb
*tp
)
886 struct tcp_cache_key_src tcks
;
889 tcp_cache_key_src_create(tp
, &tcks
);
891 if (tp
->t_tfo_stats
& TFO_S_SYN_DATA_SENT
) {
892 flag
= (TCPCACHE_F_TFO_DATA_RST
| TCPCACHE_F_TFO_REQ_RST
);
894 if (tp
->t_tfo_stats
& TFO_S_COOKIE_REQ
) {
895 flag
= TCPCACHE_F_TFO_REQ_RST
;
898 tcp_heuristic_inc_counters(&tcks
, flag
);
902 tcp_heuristic_mptcp_loss(struct tcpcb
*tp
)
904 struct tcp_cache_key_src tcks
;
906 tcp_cache_key_src_create(tp
, &tcks
);
908 tcp_heuristic_inc_counters(&tcks
, TCPCACHE_F_MPTCP
);
912 tcp_heuristic_ecn_loss(struct tcpcb
*tp
)
914 struct tcp_cache_key_src tcks
;
916 tcp_cache_key_src_create(tp
, &tcks
);
918 tcp_heuristic_inc_counters(&tcks
, TCPCACHE_F_ECN
);
922 tcp_heuristic_ecn_droprst(struct tcpcb
*tp
)
924 struct tcp_cache_key_src tcks
;
926 tcp_cache_key_src_create(tp
, &tcks
);
928 tcp_heuristic_inc_counters(&tcks
, TCPCACHE_F_ECN_DROPRST
);
932 tcp_heuristic_ecn_droprxmt(struct tcpcb
*tp
)
934 struct tcp_cache_key_src tcks
;
936 tcp_cache_key_src_create(tp
, &tcks
);
938 tcp_heuristic_inc_counters(&tcks
, TCPCACHE_F_ECN_DROPRXMT
);
942 tcp_heuristic_ecn_synrst(struct tcpcb
*tp
)
944 struct tcp_cache_key_src tcks
;
946 tcp_cache_key_src_create(tp
, &tcks
);
948 tcp_heuristic_inc_counters(&tcks
, TCPCACHE_F_ECN_SYNRST
);
952 tcp_heuristic_tfo_middlebox(struct tcpcb
*tp
)
954 struct tcp_cache_key_src tcks
;
956 tp
->t_tfo_flags
|= TFO_F_HEURISTIC_DONE
;
958 tcp_cache_key_src_create(tp
, &tcks
);
959 tcp_heuristic_tfo_middlebox_common(&tcks
);
963 tcp_heuristic_ecn_aggressive_common(struct tcp_cache_key_src
*tcks
)
965 struct tcp_heuristics_head
*head
;
966 struct tcp_heuristic
*tpheur
;
968 tpheur
= tcp_getheuristic_with_lock(tcks
, 1, &head
);
969 if (tpheur
== NULL
) {
973 /* Must be done before, otherwise we will start off with expo-backoff */
974 tpheur
->th_ecn_backoff
= tcp_now
+
975 (tcp_min_to_hz(tcp_ecn_timeout
) << (tpheur
->th_ecn_aggressive
));
978 * Ugly way to prevent integer overflow... limit to prevent in
979 * overflow during exp. backoff.
981 if (tpheur
->th_ecn_aggressive
< TCP_CACHE_OVERFLOW_PROTECT
) {
982 tpheur
->th_ecn_aggressive
++;
985 tcp_heuristic_unlock(head
);
989 tcp_heuristic_ecn_aggressive(struct tcpcb
*tp
)
991 struct tcp_cache_key_src tcks
;
993 tcp_cache_key_src_create(tp
, &tcks
);
994 tcp_heuristic_ecn_aggressive_common(&tcks
);
998 tcp_heuristic_do_tfo_common(struct tcp_cache_key_src
*tcks
)
1000 struct tcp_heuristics_head
*head
;
1001 struct tcp_heuristic
*tpheur
;
1003 if (disable_tcp_heuristics
) {
1007 /* Get the tcp-heuristic. */
1008 tpheur
= tcp_getheuristic_with_lock(tcks
, 0, &head
);
1009 if (tpheur
== NULL
) {
1013 if (tpheur
->th_tfo_in_backoff
== 0) {
1017 if (TSTMP_GT(tcp_now
, tpheur
->th_tfo_backoff_until
)) {
1018 tpheur
->th_tfo_in_backoff
= 0;
1019 tpheur
->th_tfo_enabled_time
= tcp_now
;
1024 tcp_heuristic_unlock(head
);
1028 tcp_heuristic_unlock(head
);
1033 tcp_heuristic_do_tfo(struct tcpcb
*tp
)
1035 struct tcp_cache_key_src tcks
;
1037 tcp_cache_key_src_create(tp
, &tcks
);
1038 if (tcp_heuristic_do_tfo_common(&tcks
)) {
1046 tcp_heuristic_do_mptcp(struct tcpcb
*tp
)
1048 struct tcp_cache_key_src tcks
;
1049 struct tcp_heuristics_head
*head
= NULL
;
1050 struct tcp_heuristic
*tpheur
;
1052 if (disable_tcp_heuristics
) {
1056 tcp_cache_key_src_create(tp
, &tcks
);
1058 /* Get the tcp-heuristic. */
1059 tpheur
= tcp_getheuristic_with_lock(&tcks
, 0, &head
);
1060 if (tpheur
== NULL
) {
1064 if (TSTMP_GT(tpheur
->th_mptcp_backoff
, tcp_now
)) {
1068 tcp_heuristic_unlock(head
);
1074 tcp_heuristic_unlock(head
);
1077 if (tptomptp(tp
)->mpt_mpte
->mpte_flags
& MPTE_FIRSTPARTY
) {
1078 tcpstat
.tcps_mptcp_fp_heuristic_fallback
++;
1080 tcpstat
.tcps_mptcp_heuristic_fallback
++;
1087 tcp_heuristic_do_ecn_common(struct tcp_cache_key_src
*tcks
)
1089 struct tcp_heuristics_head
*head
;
1090 struct tcp_heuristic
*tpheur
;
1091 boolean_t ret
= TRUE
;
1093 if (disable_tcp_heuristics
) {
1097 /* Get the tcp-heuristic. */
1098 tpheur
= tcp_getheuristic_with_lock(tcks
, 0, &head
);
1099 if (tpheur
== NULL
) {
1103 if (TSTMP_GT(tpheur
->th_ecn_backoff
, tcp_now
)) {
1106 /* Reset the following counters to start re-evaluating */
1107 if (tpheur
->th_ecn_droprst
>= ECN_RETRY_LIMIT
) {
1108 tpheur
->th_ecn_droprst
= 0;
1110 if (tpheur
->th_ecn_droprxmt
>= ECN_RETRY_LIMIT
) {
1111 tpheur
->th_ecn_droprxmt
= 0;
1113 if (tpheur
->th_ecn_synrst
>= ECN_RETRY_LIMIT
) {
1114 tpheur
->th_ecn_synrst
= 0;
1118 tcp_heuristic_unlock(head
);
1124 tcp_heuristic_do_ecn(struct tcpcb
*tp
)
1126 struct tcp_cache_key_src tcks
;
1128 tcp_cache_key_src_create(tp
, &tcks
);
1129 return tcp_heuristic_do_ecn_common(&tcks
);
1133 tcp_heuristic_do_ecn_with_address(struct ifnet
*ifp
,
1134 union sockaddr_in_4_6
*local_address
)
1136 struct tcp_cache_key_src tcks
;
1138 memset(&tcks
, 0, sizeof(tcks
));
1141 calculate_tcp_clock();
1143 if (local_address
->sa
.sa_family
== AF_INET6
) {
1144 memcpy(&tcks
.laddr
.addr6
, &local_address
->sin6
.sin6_addr
, sizeof(struct in6_addr
));
1146 } else if (local_address
->sa
.sa_family
== AF_INET
) {
1147 memcpy(&tcks
.laddr
.addr
, &local_address
->sin
.sin_addr
, sizeof(struct in_addr
));
1151 return tcp_heuristic_do_ecn_common(&tcks
);
1155 tcp_heuristics_ecn_update(struct necp_tcp_ecn_cache
*necp_buffer
,
1156 struct ifnet
*ifp
, union sockaddr_in_4_6
*local_address
)
1158 struct tcp_cache_key_src tcks
;
1160 memset(&tcks
, 0, sizeof(tcks
));
1163 calculate_tcp_clock();
1165 if (local_address
->sa
.sa_family
== AF_INET6
) {
1166 memcpy(&tcks
.laddr
.addr6
, &local_address
->sin6
.sin6_addr
, sizeof(struct in6_addr
));
1168 } else if (local_address
->sa
.sa_family
== AF_INET
) {
1169 memcpy(&tcks
.laddr
.addr
, &local_address
->sin
.sin_addr
, sizeof(struct in_addr
));
1173 if (necp_buffer
->necp_tcp_ecn_heuristics_success
) {
1174 tcp_heuristic_reset_counters(&tcks
, TCPCACHE_F_ECN
);
1175 } else if (necp_buffer
->necp_tcp_ecn_heuristics_loss
) {
1176 tcp_heuristic_inc_counters(&tcks
, TCPCACHE_F_ECN
);
1177 } else if (necp_buffer
->necp_tcp_ecn_heuristics_drop_rst
) {
1178 tcp_heuristic_inc_counters(&tcks
, TCPCACHE_F_ECN_DROPRST
);
1179 } else if (necp_buffer
->necp_tcp_ecn_heuristics_drop_rxmt
) {
1180 tcp_heuristic_inc_counters(&tcks
, TCPCACHE_F_ECN_DROPRXMT
);
1181 } else if (necp_buffer
->necp_tcp_ecn_heuristics_syn_rst
) {
1182 tcp_heuristic_inc_counters(&tcks
, TCPCACHE_F_ECN_SYNRST
);
1183 } else if (necp_buffer
->necp_tcp_ecn_heuristics_aggressive
) {
1184 tcp_heuristic_ecn_aggressive_common(&tcks
);
1191 tcp_heuristic_do_tfo_with_address(struct ifnet
*ifp
,
1192 union sockaddr_in_4_6
*local_address
, union sockaddr_in_4_6
*remote_address
,
1193 u_int8_t
*cookie
, u_int8_t
*cookie_len
)
1195 struct tcp_cache_key_src tcks
;
1197 memset(&tcks
, 0, sizeof(tcks
));
1200 calculate_tcp_clock();
1202 if (remote_address
->sa
.sa_family
== AF_INET6
) {
1203 memcpy(&tcks
.laddr
.addr6
, &local_address
->sin6
.sin6_addr
, sizeof(struct in6_addr
));
1204 memcpy(&tcks
.faddr
.addr6
, &remote_address
->sin6
.sin6_addr
, sizeof(struct in6_addr
));
1206 } else if (remote_address
->sa
.sa_family
== AF_INET
) {
1207 memcpy(&tcks
.laddr
.addr
, &local_address
->sin
.sin_addr
, sizeof(struct in_addr
));
1208 memcpy(&tcks
.faddr
.addr
, &remote_address
->sin
.sin_addr
, sizeof(struct in_addr
));
1212 if (tcp_heuristic_do_tfo_common(&tcks
)) {
1213 if (!tcp_cache_get_cookie_common(&tcks
, cookie
, cookie_len
)) {
1223 tcp_heuristics_tfo_update(struct necp_tcp_tfo_cache
*necp_buffer
,
1224 struct ifnet
*ifp
, union sockaddr_in_4_6
*local_address
,
1225 union sockaddr_in_4_6
*remote_address
)
1227 struct tcp_cache_key_src tcks
;
1229 memset(&tcks
, 0, sizeof(tcks
));
1232 calculate_tcp_clock();
1234 if (remote_address
->sa
.sa_family
== AF_INET6
) {
1235 memcpy(&tcks
.laddr
.addr6
, &local_address
->sin6
.sin6_addr
, sizeof(struct in6_addr
));
1236 memcpy(&tcks
.faddr
.addr6
, &remote_address
->sin6
.sin6_addr
, sizeof(struct in6_addr
));
1238 } else if (remote_address
->sa
.sa_family
== AF_INET
) {
1239 memcpy(&tcks
.laddr
.addr
, &local_address
->sin
.sin_addr
, sizeof(struct in_addr
));
1240 memcpy(&tcks
.faddr
.addr
, &remote_address
->sin
.sin_addr
, sizeof(struct in_addr
));
1244 if (necp_buffer
->necp_tcp_tfo_heuristics_success
) {
1245 tcp_heuristic_reset_counters(&tcks
, TCPCACHE_F_TFO_REQ
| TCPCACHE_F_TFO_DATA
|
1246 TCPCACHE_F_TFO_REQ_RST
| TCPCACHE_F_TFO_DATA_RST
);
1249 if (necp_buffer
->necp_tcp_tfo_heuristics_success_req
) {
1250 tcp_heuristic_reset_counters(&tcks
, TCPCACHE_F_TFO_REQ
| TCPCACHE_F_TFO_REQ_RST
);
1253 if (necp_buffer
->necp_tcp_tfo_heuristics_loss
) {
1254 tcp_heuristic_inc_counters(&tcks
, TCPCACHE_F_TFO_REQ
| TCPCACHE_F_TFO_DATA
);
1257 if (necp_buffer
->necp_tcp_tfo_heuristics_loss_req
) {
1258 tcp_heuristic_inc_counters(&tcks
, TCPCACHE_F_TFO_REQ
);
1261 if (necp_buffer
->necp_tcp_tfo_heuristics_rst_data
) {
1262 tcp_heuristic_inc_counters(&tcks
, TCPCACHE_F_TFO_REQ_RST
| TCPCACHE_F_TFO_DATA_RST
);
1265 if (necp_buffer
->necp_tcp_tfo_heuristics_rst_req
) {
1266 tcp_heuristic_inc_counters(&tcks
, TCPCACHE_F_TFO_REQ_RST
);
1269 if (necp_buffer
->necp_tcp_tfo_heuristics_middlebox
) {
1270 tcp_heuristic_tfo_middlebox_common(&tcks
);
1273 if (necp_buffer
->necp_tcp_tfo_cookie_len
!= 0) {
1274 tcp_cache_set_cookie_common(&tcks
,
1275 necp_buffer
->necp_tcp_tfo_cookie
, necp_buffer
->necp_tcp_tfo_cookie_len
);
1282 sysctl_cleartfocache(void)
1286 for (i
= 0; i
< tcp_cache_size
; i
++) {
1287 struct tcp_cache_head
*head
= &tcp_cache
[i
];
1288 struct tcp_cache
*tpcache
, *tmp
;
1289 struct tcp_heuristics_head
*hhead
= &tcp_heuristics
[i
];
1290 struct tcp_heuristic
*tpheur
, *htmp
;
1292 lck_mtx_lock(&head
->tch_mtx
);
1293 SLIST_FOREACH_SAFE(tpcache
, &head
->tcp_caches
, list
, tmp
) {
1294 SLIST_REMOVE(&head
->tcp_caches
, tpcache
, tcp_cache
, list
);
1295 _FREE(tpcache
, M_TEMP
);
1297 lck_mtx_unlock(&head
->tch_mtx
);
1299 lck_mtx_lock(&hhead
->thh_mtx
);
1300 SLIST_FOREACH_SAFE(tpheur
, &hhead
->tcp_heuristics
, list
, htmp
) {
1301 SLIST_REMOVE(&hhead
->tcp_heuristics
, tpheur
, tcp_heuristic
, list
);
1302 _FREE(tpheur
, M_TEMP
);
1304 lck_mtx_unlock(&hhead
->thh_mtx
);
1308 /* This sysctl is useful for testing purposes only */
1309 static int tcpcleartfo
= 0;
1311 static int sysctl_cleartfo SYSCTL_HANDLER_ARGS
1313 #pragma unused(arg1, arg2)
1314 int error
= 0, val
, oldval
= tcpcleartfo
;
1317 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
1318 if (error
|| !req
->newptr
) {
1323 * The actual value does not matter. If the value is set, it triggers
1324 * the clearing of the TFO cache. If a future implementation does not
1325 * use the route entry to hold the TFO cache, replace the route sysctl.
1328 if (val
!= oldval
) {
1329 sysctl_cleartfocache();
1337 SYSCTL_PROC(_net_inet_tcp
, OID_AUTO
, clear_tfocache
, CTLTYPE_INT
| CTLFLAG_RW
|
1338 CTLFLAG_LOCKED
, &tcpcleartfo
, 0, &sysctl_cleartfo
, "I",
1339 "Toggle to clear the TFO destination based heuristic cache");
1342 tcp_cache_init(void)
1344 uint64_t sane_size_meg
= sane_size
/ 1024 / 1024;
1348 * On machines with <100MB of memory this will result in a (full) cache-size
1349 * of 32 entries, thus 32 * 5 * 64bytes = 10KB. (about 0.01 %)
1350 * On machines with > 4GB of memory, we have a cache-size of 1024 entries,
1353 * Side-note: we convert to u_int32_t. If sane_size is more than
1354 * 16000 TB, we loose precision. But, who cares? :)
1356 tcp_cache_size
= tcp_cache_roundup2((u_int32_t
)(sane_size_meg
>> 2));
1357 if (tcp_cache_size
< 32) {
1358 tcp_cache_size
= 32;
1359 } else if (tcp_cache_size
> 1024) {
1360 tcp_cache_size
= 1024;
1363 tcp_cache
= _MALLOC(sizeof(struct tcp_cache_head
) * tcp_cache_size
,
1365 if (tcp_cache
== NULL
) {
1366 panic("Allocating tcp_cache failed at boot-time!");
1369 tcp_cache_mtx_grp_attr
= lck_grp_attr_alloc_init();
1370 tcp_cache_mtx_grp
= lck_grp_alloc_init("tcpcache", tcp_cache_mtx_grp_attr
);
1371 tcp_cache_mtx_attr
= lck_attr_alloc_init();
1373 tcp_heuristics
= _MALLOC(sizeof(struct tcp_heuristics_head
) * tcp_cache_size
,
1375 if (tcp_heuristics
== NULL
) {
1376 panic("Allocating tcp_heuristic failed at boot-time!");
1379 tcp_heuristic_mtx_grp_attr
= lck_grp_attr_alloc_init();
1380 tcp_heuristic_mtx_grp
= lck_grp_alloc_init("tcpheuristic", tcp_heuristic_mtx_grp_attr
);
1381 tcp_heuristic_mtx_attr
= lck_attr_alloc_init();
1383 for (i
= 0; i
< tcp_cache_size
; i
++) {
1384 lck_mtx_init(&tcp_cache
[i
].tch_mtx
, tcp_cache_mtx_grp
,
1385 tcp_cache_mtx_attr
);
1386 SLIST_INIT(&tcp_cache
[i
].tcp_caches
);
1388 lck_mtx_init(&tcp_heuristics
[i
].thh_mtx
, tcp_heuristic_mtx_grp
,
1389 tcp_heuristic_mtx_attr
);
1390 SLIST_INIT(&tcp_heuristics
[i
].tcp_heuristics
);
1393 tcp_cache_hash_seed
= RandomULong();