2 * Copyright (c) 2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* TCP-cache to store and retrieve TCP-related information */
31 #include <net/flowhash.h>
32 #include <net/route.h>
33 #include <netinet/in_pcb.h>
34 #include <netinet/tcp_cache.h>
35 #include <netinet/tcp_seq.h>
36 #include <netinet/tcp_var.h>
37 #include <kern/locks.h>
38 #include <sys/queue.h>
39 #include <dev/random/randomdev.h>
41 struct tcp_heuristic_key
{
43 uint8_t thk_net_signature
[IFNET_SIGNATURELEN
];
46 struct in6_addr addr6
;
49 sa_family_t thk_family
;
52 struct tcp_heuristic
{
53 SLIST_ENTRY(tcp_heuristic
) list
;
55 u_int32_t th_last_access
;
57 struct tcp_heuristic_key th_key
;
60 * If tfo_cookie_loss is changed to a smaller type, it might be worth
61 * checking for integer-overflow in tcp_cache_tfo_inc_loss
63 u_int32_t th_tfo_cookie_loss
; /* The number of times a SYN+cookie has been lost */
64 u_int32_t th_tfo_fallback_trials
; /* Number of times we did not try out TFO due to SYN-loss */
65 u_int32_t th_tfo_cookie_backoff
; /* Time until when we should not try out TFO */
67 u_int8_t th_tfo_in_backoff
:1, /* Are we doing TFO due to the backoff timer? */
68 th_tfo_aggressive_fallback
:1, /* Agressive fallback due to nasty middlebox */
69 th_tfo_snd_middlebox_supp
:1, /* We are sure that the network supports TFO in upstream direction */
70 th_tfo_rcv_middlebox_supp
:1; /* We are sure that the network supports TFO in downstream direction*/
73 struct tcp_heuristics_head
{
74 SLIST_HEAD(tcp_heur_bucket
, tcp_heuristic
) tcp_heuristics
;
76 /* Per-hashbucket lock to avoid lock-contention */
80 struct tcp_cache_key
{
81 sa_family_t tck_family
;
83 struct tcp_heuristic_key tck_src
;
86 struct in6_addr addr6
;
91 SLIST_ENTRY(tcp_cache
) list
;
93 u_int32_t tc_last_access
;
95 struct tcp_cache_key tc_key
;
97 u_int8_t tc_tfo_cookie
[TFO_COOKIE_LEN_MAX
];
98 u_int8_t tc_tfo_cookie_len
;
101 struct tcp_cache_head
{
102 SLIST_HEAD(tcp_cache_bucket
, tcp_cache
) tcp_caches
;
104 /* Per-hashbucket lock to avoid lock-contention */
108 static u_int32_t tcp_cache_hash_seed
;
110 size_t tcp_cache_size
;
113 * The maximum depth of the hash-bucket. This way we limit the tcp_cache to
114 * TCP_CACHE_BUCKET_SIZE * tcp_cache_size and have "natural" garbage collection
116 #define TCP_CACHE_BUCKET_SIZE 5
118 static struct tcp_cache_head
*tcp_cache
;
120 decl_lck_mtx_data(, tcp_cache_mtx
);
122 static lck_attr_t
*tcp_cache_mtx_attr
;
123 static lck_grp_t
*tcp_cache_mtx_grp
;
124 static lck_grp_attr_t
*tcp_cache_mtx_grp_attr
;
126 static struct tcp_heuristics_head
*tcp_heuristics
;
128 decl_lck_mtx_data(, tcp_heuristics_mtx
);
130 static lck_attr_t
*tcp_heuristic_mtx_attr
;
131 static lck_grp_t
*tcp_heuristic_mtx_grp
;
132 static lck_grp_attr_t
*tcp_heuristic_mtx_grp_attr
;
134 /* Number of SYN-losses we accept */
135 #define TFO_MAX_COOKIE_LOSS 2
138 * Round up to next higher power-of 2. See "Bit Twiddling Hacks".
140 * Might be worth moving this to a library so that others
141 * (e.g., scale_to_powerof2()) can use this as well instead of a while-loop.
143 static u_int32_t
tcp_cache_roundup2(u_int32_t a
)
156 static void tcp_cache_hash_src(struct inpcb
*inp
, struct tcp_heuristic_key
*key
)
158 struct ifnet
*ifn
= inp
->inp_last_outifp
;
159 uint8_t len
= sizeof(key
->thk_net_signature
);
162 if (inp
->inp_vflag
& INP_IPV6
) {
165 key
->thk_family
= AF_INET6
;
166 ret
= ifnet_get_netsignature(ifn
, AF_INET6
, &len
, &flags
,
167 key
->thk_net_signature
);
170 * ifnet_get_netsignature only returns EINVAL if ifn is NULL
171 * (we made sure that in the other cases it does not). So,
172 * in this case we should take the connection's address.
174 if (ret
== ENOENT
|| ret
== EINVAL
)
175 memcpy(&key
->thk_ip
.addr6
, &inp
->in6p_laddr
, sizeof(struct in6_addr
));
179 key
->thk_family
= AF_INET
;
180 ret
= ifnet_get_netsignature(ifn
, AF_INET
, &len
, &flags
,
181 key
->thk_net_signature
);
184 * ifnet_get_netsignature only returns EINVAL if ifn is NULL
185 * (we made sure that in the other cases it does not). So,
186 * in this case we should take the connection's address.
188 if (ret
== ENOENT
|| ret
== EINVAL
)
189 memcpy(&key
->thk_ip
.addr
, &inp
->inp_laddr
, sizeof(struct in_addr
));
193 static u_int16_t
tcp_cache_hash(struct inpcb
*inp
, struct tcp_cache_key
*key
)
197 bzero(key
, sizeof(struct tcp_cache_key
));
199 tcp_cache_hash_src(inp
, &key
->tck_src
);
201 if (inp
->inp_vflag
& INP_IPV6
) {
202 key
->tck_family
= AF_INET6
;
203 memcpy(&key
->tck_dst
.addr6
, &inp
->in6p_faddr
,
204 sizeof(struct in6_addr
));
206 key
->tck_family
= AF_INET
;
207 memcpy(&key
->tck_dst
.addr
, &inp
->inp_faddr
,
208 sizeof(struct in_addr
));
211 hash
= net_flowhash(key
, sizeof(struct tcp_cache_key
),
212 tcp_cache_hash_seed
);
214 return (hash
& (tcp_cache_size
- 1));
217 static void tcp_cache_unlock(struct tcp_cache_head
*head
)
219 lck_mtx_unlock(&head
->tch_mtx
);
223 * Make sure that everything that happens after tcp_getcache_with_lock()
224 * is short enough to justify that you hold the per-bucket lock!!!
226 * Otherwise, better build another lookup-function that does not hold the
227 * lock and you copy out the bits and bytes.
229 * That's why we provide the head as a "return"-pointer so that the caller
230 * can give it back to use for tcp_cache_unlock().
232 static struct tcp_cache
*tcp_getcache_with_lock(struct tcpcb
*tp
, int create
,
233 struct tcp_cache_head
**headarg
)
235 struct inpcb
*inp
= tp
->t_inpcb
;
236 struct tcp_cache
*tpcache
= NULL
;
237 struct tcp_cache_head
*head
;
238 struct tcp_cache_key key
;
242 hash
= tcp_cache_hash(inp
, &key
);
243 head
= &tcp_cache
[hash
];
245 lck_mtx_lock(&head
->tch_mtx
);
247 /*** First step: Look for the tcp_cache in our bucket ***/
248 SLIST_FOREACH(tpcache
, &head
->tcp_caches
, list
) {
249 if (memcmp(&tpcache
->tc_key
, &key
, sizeof(key
)) == 0)
255 /*** Second step: If it's not there, create/recycle it ***/
256 if ((tpcache
== NULL
) && create
) {
257 if (i
>= TCP_CACHE_BUCKET_SIZE
) {
258 struct tcp_cache
*oldest_cache
= NULL
;
259 u_int32_t max_age
= 0;
261 /* Look for the oldest tcp_cache in the bucket */
262 SLIST_FOREACH(tpcache
, &head
->tcp_caches
, list
) {
263 u_int32_t age
= tcp_now
- tpcache
->tc_last_access
;
266 oldest_cache
= tpcache
;
269 VERIFY(oldest_cache
!= NULL
);
271 tpcache
= oldest_cache
;
273 /* We recycle, thus let's indicate that there is no cookie */
274 tpcache
->tc_tfo_cookie_len
= 0;
276 /* Create a new cache and add it to the list */
277 tpcache
= _MALLOC(sizeof(struct tcp_cache
), M_TEMP
,
282 SLIST_INSERT_HEAD(&head
->tcp_caches
, tpcache
, list
);
285 memcpy(&tpcache
->tc_key
, &key
, sizeof(key
));
291 /* Update timestamp for garbage collection purposes */
292 tpcache
->tc_last_access
= tcp_now
;
298 tcp_cache_unlock(head
);
302 void tcp_cache_set_cookie(struct tcpcb
*tp
, u_char
*cookie
, u_int8_t len
)
304 struct tcp_cache_head
*head
;
305 struct tcp_cache
*tpcache
;
307 /* Call lookup/create function */
308 tpcache
= tcp_getcache_with_lock(tp
, 1, &head
);
312 tpcache
->tc_tfo_cookie_len
= len
;
313 memcpy(tpcache
->tc_tfo_cookie
, cookie
, len
);
315 tcp_cache_unlock(head
);
319 * Get the cookie related to 'tp', and copy it into 'cookie', provided that len
320 * is big enough (len designates the available memory.
321 * Upon return, 'len' is set to the cookie's length.
323 * Returns 0 if we should request a cookie.
324 * Returns 1 if the cookie has been found and written.
326 int tcp_cache_get_cookie(struct tcpcb
*tp
, u_char
*cookie
, u_int8_t
*len
)
328 struct tcp_cache_head
*head
;
329 struct tcp_cache
*tpcache
;
331 /* Call lookup/create function */
332 tpcache
= tcp_getcache_with_lock(tp
, 1, &head
);
336 if (tpcache
->tc_tfo_cookie_len
== 0) {
337 tcp_cache_unlock(head
);
342 * Not enough space - this should never happen as it has been checked
343 * in tcp_tfo_check. So, fail here!
345 VERIFY(tpcache
->tc_tfo_cookie_len
<= *len
);
347 memcpy(cookie
, tpcache
->tc_tfo_cookie
, tpcache
->tc_tfo_cookie_len
);
348 *len
= tpcache
->tc_tfo_cookie_len
;
350 tcp_cache_unlock(head
);
355 unsigned int tcp_cache_get_cookie_len(struct tcpcb
*tp
)
357 struct tcp_cache_head
*head
;
358 struct tcp_cache
*tpcache
;
359 unsigned int cookie_len
;
361 /* Call lookup/create function */
362 tpcache
= tcp_getcache_with_lock(tp
, 1, &head
);
366 cookie_len
= tpcache
->tc_tfo_cookie_len
;
368 tcp_cache_unlock(head
);
373 static u_int16_t
tcp_heuristics_hash(struct inpcb
*inp
,
374 struct tcp_heuristic_key
*key
)
378 bzero(key
, sizeof(struct tcp_heuristic_key
));
380 tcp_cache_hash_src(inp
, key
);
382 hash
= net_flowhash(key
, sizeof(struct tcp_heuristic_key
),
383 tcp_cache_hash_seed
);
385 return (hash
& (tcp_cache_size
- 1));
388 static void tcp_heuristic_unlock(struct tcp_heuristics_head
*head
)
390 lck_mtx_unlock(&head
->thh_mtx
);
394 * Make sure that everything that happens after tcp_getheuristic_with_lock()
395 * is short enough to justify that you hold the per-bucket lock!!!
397 * Otherwise, better build another lookup-function that does not hold the
398 * lock and you copy out the bits and bytes.
400 * That's why we provide the head as a "return"-pointer so that the caller
401 * can give it back to use for tcp_heur_unlock().
404 * ToDo - way too much code-duplication. We should create an interface to handle
405 * bucketized hashtables with recycling of the oldest element.
407 static struct tcp_heuristic
*tcp_getheuristic_with_lock(struct tcpcb
*tp
,
408 int create
, struct tcp_heuristics_head
**headarg
)
410 struct inpcb
*inp
= tp
->t_inpcb
;
411 struct tcp_heuristic
*tpheur
= NULL
;
412 struct tcp_heuristics_head
*head
;
413 struct tcp_heuristic_key key
;
417 hash
= tcp_heuristics_hash(inp
, &key
);
418 head
= &tcp_heuristics
[hash
];
420 lck_mtx_lock(&head
->thh_mtx
);
422 /*** First step: Look for the tcp_heur in our bucket ***/
423 SLIST_FOREACH(tpheur
, &head
->tcp_heuristics
, list
) {
424 if (memcmp(&tpheur
->th_key
, &key
, sizeof(key
)) == 0)
430 /*** Second step: If it's not there, create/recycle it ***/
431 if ((tpheur
== NULL
) && create
) {
432 if (i
>= TCP_CACHE_BUCKET_SIZE
) {
433 struct tcp_heuristic
*oldest_heur
= NULL
;
434 u_int32_t max_age
= 0;
436 /* Look for the oldest tcp_heur in the bucket */
437 SLIST_FOREACH(tpheur
, &head
->tcp_heuristics
, list
) {
438 u_int32_t age
= tcp_now
- tpheur
->th_last_access
;
441 oldest_heur
= tpheur
;
444 VERIFY(oldest_heur
!= NULL
);
446 tpheur
= oldest_heur
;
448 /* We recycle - set everything to 0 */
449 tpheur
->th_tfo_cookie_loss
= 0;
450 tpheur
->th_tfo_fallback_trials
= 0;
451 tpheur
->th_tfo_cookie_backoff
= 0;
452 tpheur
->th_tfo_in_backoff
= 0;
453 tpheur
->th_tfo_aggressive_fallback
= 0;
454 tpheur
->th_tfo_snd_middlebox_supp
= 0;
455 tpheur
->th_tfo_rcv_middlebox_supp
= 0;
457 /* Create a new heuristic and add it to the list */
458 tpheur
= _MALLOC(sizeof(struct tcp_heuristic
), M_TEMP
,
463 SLIST_INSERT_HEAD(&head
->tcp_heuristics
, tpheur
, list
);
466 memcpy(&tpheur
->th_key
, &key
, sizeof(key
));
472 /* Update timestamp for garbage collection purposes */
473 tpheur
->th_last_access
= tcp_now
;
479 tcp_heuristic_unlock(head
);
483 void tcp_heuristic_tfo_success(struct tcpcb
*tp
)
485 struct tcp_heuristics_head
*head
;
487 struct tcp_heuristic
*tpheur
= tcp_getheuristic_with_lock(tp
, 1, &head
);
491 tpheur
->th_tfo_cookie_loss
= 0;
493 tcp_heuristic_unlock(head
);
496 void tcp_heuristic_tfo_rcv_good(struct tcpcb
*tp
)
498 struct tcp_heuristics_head
*head
;
500 struct tcp_heuristic
*tpheur
= tcp_getheuristic_with_lock(tp
, 1, &head
);
504 tpheur
->th_tfo_rcv_middlebox_supp
= 1;
506 tcp_heuristic_unlock(head
);
508 tp
->t_tfo_flags
|= TFO_F_NO_RCVPROBING
;
511 void tcp_heuristic_tfo_snd_good(struct tcpcb
*tp
)
513 struct tcp_heuristics_head
*head
;
515 struct tcp_heuristic
*tpheur
= tcp_getheuristic_with_lock(tp
, 1, &head
);
519 tpheur
->th_tfo_snd_middlebox_supp
= 1;
521 tcp_heuristic_unlock(head
);
523 tp
->t_tfo_flags
|= TFO_F_NO_SNDPROBING
;
526 void tcp_heuristic_tfo_inc_loss(struct tcpcb
*tp
)
528 struct tcp_heuristics_head
*head
;
529 struct tcp_heuristic
*tpheur
;
531 tpheur
= tcp_getheuristic_with_lock(tp
, 1, &head
);
535 /* Potential integer overflow, but tfo_cookie_loss is 32-bits */
536 tpheur
->th_tfo_cookie_loss
++;
538 tcp_heuristic_unlock(head
);
541 void tcp_heuristic_tfo_middlebox(struct tcpcb
*tp
)
543 struct tcp_heuristics_head
*head
;
544 struct tcp_heuristic
*tpheur
;
546 tpheur
= tcp_getheuristic_with_lock(tp
, 1, &head
);
550 tpheur
->th_tfo_aggressive_fallback
= 1;
552 tcp_heuristic_unlock(head
);
555 void tcp_heuristic_tfo_reset_loss(struct tcpcb
*tp
)
557 struct tcp_heuristics_head
*head
;
558 struct tcp_heuristic
*tpheur
;
561 * Don't attempt to create it! Keep the heuristics clean if the
562 * server does not support TFO. This reduces the lookup-cost on
565 tpheur
= tcp_getheuristic_with_lock(tp
, 0, &head
);
569 tpheur
->th_tfo_cookie_loss
= 0;
570 tpheur
->th_tfo_aggressive_fallback
= 0;
572 tcp_heuristic_unlock(head
);
575 boolean_t
tcp_heuristic_do_tfo(struct tcpcb
*tp
)
577 struct tcp_heuristics_head
*head
;
578 struct tcp_heuristic
*tpheur
;
580 /* Get the tcp-heuristic. */
581 tpheur
= tcp_getheuristic_with_lock(tp
, 0, &head
);
585 if (tpheur
->th_tfo_aggressive_fallback
) {
586 /* Aggressive fallback - don't do TFO anymore... :'( */
587 tcp_heuristic_unlock(head
);
591 if (tpheur
->th_tfo_cookie_loss
>= TFO_MAX_COOKIE_LOSS
&&
592 (tpheur
->th_tfo_fallback_trials
< tcp_tfo_fallback_min
||
593 TSTMP_GT(tpheur
->th_tfo_cookie_backoff
, tcp_now
))) {
595 * So, when we are in SYN-loss mode we try to stop using TFO
596 * for the next 'tcp_tfo_fallback_min' connections. That way,
597 * we are sure that never more than 1 out of tcp_tfo_fallback_min
598 * connections will suffer from our nice little middelbox.
600 * After that we first wait for 2 minutes. If we fail again,
601 * we wait for yet another 60 minutes.
603 tpheur
->th_tfo_fallback_trials
++;
604 if (tpheur
->th_tfo_fallback_trials
>= tcp_tfo_fallback_min
&&
605 !tpheur
->th_tfo_in_backoff
) {
606 if (tpheur
->th_tfo_cookie_loss
== TFO_MAX_COOKIE_LOSS
)
607 /* Backoff for 2 minutes */
608 tpheur
->th_tfo_cookie_backoff
= tcp_now
+ (60 * 2 * TCP_RETRANSHZ
);
610 /* Backoff for 60 minutes */
611 tpheur
->th_tfo_cookie_backoff
= tcp_now
+ (60 * 60 * TCP_RETRANSHZ
);
613 tpheur
->th_tfo_in_backoff
= 1;
616 tcp_heuristic_unlock(head
);
621 * We give it a new shot, set trials back to 0. This allows to
622 * start counting again from zero in case we get yet another SYN-loss
624 tpheur
->th_tfo_fallback_trials
= 0;
625 tpheur
->th_tfo_in_backoff
= 0;
627 if (tpheur
->th_tfo_rcv_middlebox_supp
)
628 tp
->t_tfo_flags
|= TFO_F_NO_RCVPROBING
;
629 if (tpheur
->th_tfo_snd_middlebox_supp
)
630 tp
->t_tfo_flags
|= TFO_F_NO_SNDPROBING
;
632 tcp_heuristic_unlock(head
);
637 static void sysctl_cleartfocache(void)
641 for (i
= 0; i
< tcp_cache_size
; i
++) {
642 struct tcp_cache_head
*head
= &tcp_cache
[i
];
643 struct tcp_cache
*tpcache
, *tmp
;
644 struct tcp_heuristics_head
*hhead
= &tcp_heuristics
[i
];
645 struct tcp_heuristic
*tpheur
, *htmp
;
647 lck_mtx_lock(&head
->tch_mtx
);
648 SLIST_FOREACH_SAFE(tpcache
, &head
->tcp_caches
, list
, tmp
) {
649 SLIST_REMOVE(&head
->tcp_caches
, tpcache
, tcp_cache
, list
);
650 _FREE(tpcache
, M_TEMP
);
652 lck_mtx_unlock(&head
->tch_mtx
);
654 lck_mtx_lock(&hhead
->thh_mtx
);
655 SLIST_FOREACH_SAFE(tpheur
, &hhead
->tcp_heuristics
, list
, htmp
) {
656 SLIST_REMOVE(&hhead
->tcp_heuristics
, tpheur
, tcp_heuristic
, list
);
657 _FREE(tpheur
, M_TEMP
);
659 lck_mtx_unlock(&hhead
->thh_mtx
);
663 /* This sysctl is useful for testing purposes only */
664 static int tcpcleartfo
= 0;
666 static int sysctl_cleartfo SYSCTL_HANDLER_ARGS
668 #pragma unused(arg1, arg2)
669 int error
= 0, val
, oldval
= tcpcleartfo
;
672 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
673 if (error
|| !req
->newptr
)
677 * The actual value does not matter. If the value is set, it triggers
678 * the clearing of the TFO cache. If a future implementation does not
679 * use the route entry to hold the TFO cache, replace the route sysctl.
683 sysctl_cleartfocache();
690 SYSCTL_PROC(_net_inet_tcp
, OID_AUTO
, clear_tfocache
, CTLTYPE_INT
| CTLFLAG_RW
|
691 CTLFLAG_LOCKED
, &tcpcleartfo
, 0, &sysctl_cleartfo
, "I",
692 "Toggle to clear the TFO destination based heuristic cache");
694 void tcp_cache_init(void)
696 uint64_t sane_size_meg
= sane_size
/ 1024 / 1024;
700 * On machines with <100MB of memory this will result in a (full) cache-size
701 * of 32 entries, thus 32 * 5 * 64bytes = 10KB. (about 0.01 %)
702 * On machines with > 4GB of memory, we have a cache-size of 1024 entries,
705 * Side-note: we convert to u_int32_t. If sane_size is more than
706 * 16000 TB, we loose precision. But, who cares? :)
708 tcp_cache_size
= tcp_cache_roundup2((u_int32_t
)(sane_size_meg
>> 2));
709 if (tcp_cache_size
< 32)
711 else if (tcp_cache_size
> 1024)
712 tcp_cache_size
= 1024;
714 tcp_cache
= _MALLOC(sizeof(struct tcp_cache_head
) * tcp_cache_size
,
716 if (tcp_cache
== NULL
)
717 panic("Allocating tcp_cache failed at boot-time!");
719 tcp_cache_mtx_grp_attr
= lck_grp_attr_alloc_init();
720 tcp_cache_mtx_grp
= lck_grp_alloc_init("tcpcache", tcp_cache_mtx_grp_attr
);
721 tcp_cache_mtx_attr
= lck_attr_alloc_init();
723 tcp_heuristics
= _MALLOC(sizeof(struct tcp_heuristics_head
) * tcp_cache_size
,
725 if (tcp_heuristics
== NULL
)
726 panic("Allocating tcp_heuristic failed at boot-time!");
728 tcp_heuristic_mtx_grp_attr
= lck_grp_attr_alloc_init();
729 tcp_heuristic_mtx_grp
= lck_grp_alloc_init("tcpheuristic", tcp_heuristic_mtx_grp_attr
);
730 tcp_heuristic_mtx_attr
= lck_attr_alloc_init();
732 for (i
= 0; i
< tcp_cache_size
; i
++) {
733 lck_mtx_init(&tcp_cache
[i
].tch_mtx
, tcp_cache_mtx_grp
,
735 SLIST_INIT(&tcp_cache
[i
].tcp_caches
);
737 lck_mtx_init(&tcp_heuristics
[i
].thh_mtx
, tcp_heuristic_mtx_grp
,
738 tcp_heuristic_mtx_attr
);
739 SLIST_INIT(&tcp_heuristics
[i
].tcp_heuristics
);
742 tcp_cache_hash_seed
= RandomULong();