]>
Commit | Line | Data |
---|---|---|
3e170ce0 A |
1 | /* |
2 | * Copyright (c) 2015 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | /* TCP-cache to store and retrieve TCP-related information */ | |
30 | ||
31 | #include <net/flowhash.h> | |
32 | #include <net/route.h> | |
33 | #include <netinet/in_pcb.h> | |
34 | #include <netinet/tcp_cache.h> | |
35 | #include <netinet/tcp_seq.h> | |
36 | #include <netinet/tcp_var.h> | |
37 | #include <kern/locks.h> | |
38 | #include <sys/queue.h> | |
39 | #include <dev/random/randomdev.h> | |
40 | ||
41 | struct tcp_heuristic_key { | |
42 | union { | |
43 | uint8_t thk_net_signature[IFNET_SIGNATURELEN]; | |
44 | union { | |
45 | struct in_addr addr; | |
46 | struct in6_addr addr6; | |
47 | } thk_ip; | |
48 | }; | |
49 | sa_family_t thk_family; | |
50 | }; | |
51 | ||
52 | struct tcp_heuristic { | |
53 | SLIST_ENTRY(tcp_heuristic) list; | |
54 | ||
55 | u_int32_t th_last_access; | |
56 | ||
57 | struct tcp_heuristic_key th_key; | |
58 | ||
59 | /* | |
60 | * If tfo_cookie_loss is changed to a smaller type, it might be worth | |
61 | * checking for integer-overflow in tcp_cache_tfo_inc_loss | |
62 | */ | |
63 | u_int32_t th_tfo_cookie_loss; /* The number of times a SYN+cookie has been lost */ | |
64 | u_int32_t th_tfo_fallback_trials; /* Number of times we did not try out TFO due to SYN-loss */ | |
65 | u_int32_t th_tfo_cookie_backoff; /* Time until when we should not try out TFO */ | |
66 | ||
67 | u_int8_t th_tfo_in_backoff:1, /* Are we doing TFO due to the backoff timer? */ | |
68 | th_tfo_aggressive_fallback:1, /* Agressive fallback due to nasty middlebox */ | |
69 | th_tfo_snd_middlebox_supp:1, /* We are sure that the network supports TFO in upstream direction */ | |
70 | th_tfo_rcv_middlebox_supp:1; /* We are sure that the network supports TFO in downstream direction*/ | |
71 | }; | |
72 | ||
73 | struct tcp_heuristics_head { | |
74 | SLIST_HEAD(tcp_heur_bucket, tcp_heuristic) tcp_heuristics; | |
75 | ||
76 | /* Per-hashbucket lock to avoid lock-contention */ | |
77 | lck_mtx_t thh_mtx; | |
78 | }; | |
79 | ||
80 | struct tcp_cache_key { | |
81 | sa_family_t tck_family; | |
82 | ||
83 | struct tcp_heuristic_key tck_src; | |
84 | union { | |
85 | struct in_addr addr; | |
86 | struct in6_addr addr6; | |
87 | } tck_dst; | |
88 | }; | |
89 | ||
90 | struct tcp_cache { | |
91 | SLIST_ENTRY(tcp_cache) list; | |
92 | ||
93 | u_int32_t tc_last_access; | |
94 | ||
95 | struct tcp_cache_key tc_key; | |
96 | ||
97 | u_int8_t tc_tfo_cookie[TFO_COOKIE_LEN_MAX]; | |
98 | u_int8_t tc_tfo_cookie_len; | |
99 | }; | |
100 | ||
101 | struct tcp_cache_head { | |
102 | SLIST_HEAD(tcp_cache_bucket, tcp_cache) tcp_caches; | |
103 | ||
104 | /* Per-hashbucket lock to avoid lock-contention */ | |
105 | lck_mtx_t tch_mtx; | |
106 | }; | |
107 | ||
108 | static u_int32_t tcp_cache_hash_seed; | |
109 | ||
110 | size_t tcp_cache_size; | |
111 | ||
112 | /* | |
113 | * The maximum depth of the hash-bucket. This way we limit the tcp_cache to | |
114 | * TCP_CACHE_BUCKET_SIZE * tcp_cache_size and have "natural" garbage collection | |
115 | */ | |
116 | #define TCP_CACHE_BUCKET_SIZE 5 | |
117 | ||
118 | static struct tcp_cache_head *tcp_cache; | |
119 | ||
120 | decl_lck_mtx_data(, tcp_cache_mtx); | |
121 | ||
122 | static lck_attr_t *tcp_cache_mtx_attr; | |
123 | static lck_grp_t *tcp_cache_mtx_grp; | |
124 | static lck_grp_attr_t *tcp_cache_mtx_grp_attr; | |
125 | ||
126 | static struct tcp_heuristics_head *tcp_heuristics; | |
127 | ||
128 | decl_lck_mtx_data(, tcp_heuristics_mtx); | |
129 | ||
130 | static lck_attr_t *tcp_heuristic_mtx_attr; | |
131 | static lck_grp_t *tcp_heuristic_mtx_grp; | |
132 | static lck_grp_attr_t *tcp_heuristic_mtx_grp_attr; | |
133 | ||
134 | /* Number of SYN-losses we accept */ | |
135 | #define TFO_MAX_COOKIE_LOSS 2 | |
136 | ||
137 | /* | |
138 | * Round up to next higher power-of 2. See "Bit Twiddling Hacks". | |
139 | * | |
140 | * Might be worth moving this to a library so that others | |
141 | * (e.g., scale_to_powerof2()) can use this as well instead of a while-loop. | |
142 | */ | |
143 | static u_int32_t tcp_cache_roundup2(u_int32_t a) | |
144 | { | |
145 | a--; | |
146 | a |= a >> 1; | |
147 | a |= a >> 2; | |
148 | a |= a >> 4; | |
149 | a |= a >> 8; | |
150 | a |= a >> 16; | |
151 | a++; | |
152 | ||
153 | return a; | |
154 | } | |
155 | ||
156 | static void tcp_cache_hash_src(struct inpcb *inp, struct tcp_heuristic_key *key) | |
157 | { | |
158 | struct ifnet *ifn = inp->inp_last_outifp; | |
159 | uint8_t len = sizeof(key->thk_net_signature); | |
160 | uint16_t flags; | |
161 | ||
162 | if (inp->inp_vflag & INP_IPV6) { | |
163 | int ret; | |
164 | ||
165 | key->thk_family = AF_INET6; | |
166 | ret = ifnet_get_netsignature(ifn, AF_INET6, &len, &flags, | |
167 | key->thk_net_signature); | |
168 | ||
169 | /* | |
170 | * ifnet_get_netsignature only returns EINVAL if ifn is NULL | |
171 | * (we made sure that in the other cases it does not). So, | |
172 | * in this case we should take the connection's address. | |
173 | */ | |
174 | if (ret == ENOENT || ret == EINVAL) | |
175 | memcpy(&key->thk_ip.addr6, &inp->in6p_laddr, sizeof(struct in6_addr)); | |
176 | } else { | |
177 | int ret; | |
178 | ||
179 | key->thk_family = AF_INET; | |
180 | ret = ifnet_get_netsignature(ifn, AF_INET, &len, &flags, | |
181 | key->thk_net_signature); | |
182 | ||
183 | /* | |
184 | * ifnet_get_netsignature only returns EINVAL if ifn is NULL | |
185 | * (we made sure that in the other cases it does not). So, | |
186 | * in this case we should take the connection's address. | |
187 | */ | |
188 | if (ret == ENOENT || ret == EINVAL) | |
189 | memcpy(&key->thk_ip.addr, &inp->inp_laddr, sizeof(struct in_addr)); | |
190 | } | |
191 | } | |
192 | ||
193 | static u_int16_t tcp_cache_hash(struct inpcb *inp, struct tcp_cache_key *key) | |
194 | { | |
195 | u_int32_t hash; | |
196 | ||
197 | bzero(key, sizeof(struct tcp_cache_key)); | |
198 | ||
199 | tcp_cache_hash_src(inp, &key->tck_src); | |
200 | ||
201 | if (inp->inp_vflag & INP_IPV6) { | |
202 | key->tck_family = AF_INET6; | |
203 | memcpy(&key->tck_dst.addr6, &inp->in6p_faddr, | |
204 | sizeof(struct in6_addr)); | |
205 | } else { | |
206 | key->tck_family = AF_INET; | |
207 | memcpy(&key->tck_dst.addr, &inp->inp_faddr, | |
208 | sizeof(struct in_addr)); | |
209 | } | |
210 | ||
211 | hash = net_flowhash(key, sizeof(struct tcp_cache_key), | |
212 | tcp_cache_hash_seed); | |
213 | ||
214 | return (hash & (tcp_cache_size - 1)); | |
215 | } | |
216 | ||
217 | static void tcp_cache_unlock(struct tcp_cache_head *head) | |
218 | { | |
219 | lck_mtx_unlock(&head->tch_mtx); | |
220 | } | |
221 | ||
222 | /* | |
223 | * Make sure that everything that happens after tcp_getcache_with_lock() | |
224 | * is short enough to justify that you hold the per-bucket lock!!! | |
225 | * | |
226 | * Otherwise, better build another lookup-function that does not hold the | |
227 | * lock and you copy out the bits and bytes. | |
228 | * | |
229 | * That's why we provide the head as a "return"-pointer so that the caller | |
230 | * can give it back to use for tcp_cache_unlock(). | |
231 | */ | |
232 | static struct tcp_cache *tcp_getcache_with_lock(struct tcpcb *tp, int create, | |
233 | struct tcp_cache_head **headarg) | |
234 | { | |
235 | struct inpcb *inp = tp->t_inpcb; | |
236 | struct tcp_cache *tpcache = NULL; | |
237 | struct tcp_cache_head *head; | |
238 | struct tcp_cache_key key; | |
239 | u_int16_t hash; | |
240 | int i = 0; | |
241 | ||
242 | hash = tcp_cache_hash(inp, &key); | |
243 | head = &tcp_cache[hash]; | |
244 | ||
245 | lck_mtx_lock(&head->tch_mtx); | |
246 | ||
247 | /*** First step: Look for the tcp_cache in our bucket ***/ | |
248 | SLIST_FOREACH(tpcache, &head->tcp_caches, list) { | |
249 | if (memcmp(&tpcache->tc_key, &key, sizeof(key)) == 0) | |
250 | break; | |
251 | ||
252 | i++; | |
253 | } | |
254 | ||
255 | /*** Second step: If it's not there, create/recycle it ***/ | |
256 | if ((tpcache == NULL) && create) { | |
257 | if (i >= TCP_CACHE_BUCKET_SIZE) { | |
258 | struct tcp_cache *oldest_cache = NULL; | |
259 | u_int32_t max_age = 0; | |
260 | ||
261 | /* Look for the oldest tcp_cache in the bucket */ | |
262 | SLIST_FOREACH(tpcache, &head->tcp_caches, list) { | |
263 | u_int32_t age = tcp_now - tpcache->tc_last_access; | |
264 | if (age > max_age) { | |
265 | max_age = age; | |
266 | oldest_cache = tpcache; | |
267 | } | |
268 | } | |
269 | VERIFY(oldest_cache != NULL); | |
270 | ||
271 | tpcache = oldest_cache; | |
272 | ||
273 | /* We recycle, thus let's indicate that there is no cookie */ | |
274 | tpcache->tc_tfo_cookie_len = 0; | |
275 | } else { | |
276 | /* Create a new cache and add it to the list */ | |
277 | tpcache = _MALLOC(sizeof(struct tcp_cache), M_TEMP, | |
278 | M_NOWAIT | M_ZERO); | |
279 | if (tpcache == NULL) | |
280 | goto out_null; | |
281 | ||
282 | SLIST_INSERT_HEAD(&head->tcp_caches, tpcache, list); | |
283 | } | |
284 | ||
285 | memcpy(&tpcache->tc_key, &key, sizeof(key)); | |
286 | } | |
287 | ||
288 | if (tpcache == NULL) | |
289 | goto out_null; | |
290 | ||
291 | /* Update timestamp for garbage collection purposes */ | |
292 | tpcache->tc_last_access = tcp_now; | |
293 | *headarg = head; | |
294 | ||
295 | return (tpcache); | |
296 | ||
297 | out_null: | |
298 | tcp_cache_unlock(head); | |
299 | return (NULL); | |
300 | } | |
301 | ||
302 | void tcp_cache_set_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t len) | |
303 | { | |
304 | struct tcp_cache_head *head; | |
305 | struct tcp_cache *tpcache; | |
306 | ||
307 | /* Call lookup/create function */ | |
308 | tpcache = tcp_getcache_with_lock(tp, 1, &head); | |
309 | if (tpcache == NULL) | |
310 | return; | |
311 | ||
312 | tpcache->tc_tfo_cookie_len = len; | |
313 | memcpy(tpcache->tc_tfo_cookie, cookie, len); | |
314 | ||
315 | tcp_cache_unlock(head); | |
316 | } | |
317 | ||
318 | /* | |
319 | * Get the cookie related to 'tp', and copy it into 'cookie', provided that len | |
320 | * is big enough (len designates the available memory. | |
321 | * Upon return, 'len' is set to the cookie's length. | |
322 | * | |
323 | * Returns 0 if we should request a cookie. | |
324 | * Returns 1 if the cookie has been found and written. | |
325 | */ | |
326 | int tcp_cache_get_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t *len) | |
327 | { | |
328 | struct tcp_cache_head *head; | |
329 | struct tcp_cache *tpcache; | |
330 | ||
331 | /* Call lookup/create function */ | |
332 | tpcache = tcp_getcache_with_lock(tp, 1, &head); | |
333 | if (tpcache == NULL) | |
334 | return (0); | |
335 | ||
336 | if (tpcache->tc_tfo_cookie_len == 0) { | |
337 | tcp_cache_unlock(head); | |
338 | return (0); | |
339 | } | |
340 | ||
341 | /* | |
342 | * Not enough space - this should never happen as it has been checked | |
343 | * in tcp_tfo_check. So, fail here! | |
344 | */ | |
345 | VERIFY(tpcache->tc_tfo_cookie_len <= *len); | |
346 | ||
347 | memcpy(cookie, tpcache->tc_tfo_cookie, tpcache->tc_tfo_cookie_len); | |
348 | *len = tpcache->tc_tfo_cookie_len; | |
349 | ||
350 | tcp_cache_unlock(head); | |
351 | ||
352 | return (1); | |
353 | } | |
354 | ||
355 | unsigned int tcp_cache_get_cookie_len(struct tcpcb *tp) | |
356 | { | |
357 | struct tcp_cache_head *head; | |
358 | struct tcp_cache *tpcache; | |
359 | unsigned int cookie_len; | |
360 | ||
361 | /* Call lookup/create function */ | |
362 | tpcache = tcp_getcache_with_lock(tp, 1, &head); | |
363 | if (tpcache == NULL) | |
364 | return (0); | |
365 | ||
366 | cookie_len = tpcache->tc_tfo_cookie_len; | |
367 | ||
368 | tcp_cache_unlock(head); | |
369 | ||
370 | return cookie_len; | |
371 | } | |
372 | ||
373 | static u_int16_t tcp_heuristics_hash(struct inpcb *inp, | |
374 | struct tcp_heuristic_key *key) | |
375 | { | |
376 | u_int32_t hash; | |
377 | ||
378 | bzero(key, sizeof(struct tcp_heuristic_key)); | |
379 | ||
380 | tcp_cache_hash_src(inp, key); | |
381 | ||
382 | hash = net_flowhash(key, sizeof(struct tcp_heuristic_key), | |
383 | tcp_cache_hash_seed); | |
384 | ||
385 | return (hash & (tcp_cache_size - 1)); | |
386 | } | |
387 | ||
388 | static void tcp_heuristic_unlock(struct tcp_heuristics_head *head) | |
389 | { | |
390 | lck_mtx_unlock(&head->thh_mtx); | |
391 | } | |
392 | ||
393 | /* | |
394 | * Make sure that everything that happens after tcp_getheuristic_with_lock() | |
395 | * is short enough to justify that you hold the per-bucket lock!!! | |
396 | * | |
397 | * Otherwise, better build another lookup-function that does not hold the | |
398 | * lock and you copy out the bits and bytes. | |
399 | * | |
400 | * That's why we provide the head as a "return"-pointer so that the caller | |
401 | * can give it back to use for tcp_heur_unlock(). | |
402 | * | |
403 | * | |
404 | * ToDo - way too much code-duplication. We should create an interface to handle | |
405 | * bucketized hashtables with recycling of the oldest element. | |
406 | */ | |
407 | static struct tcp_heuristic *tcp_getheuristic_with_lock(struct tcpcb *tp, | |
408 | int create, struct tcp_heuristics_head **headarg) | |
409 | { | |
410 | struct inpcb *inp = tp->t_inpcb; | |
411 | struct tcp_heuristic *tpheur = NULL; | |
412 | struct tcp_heuristics_head *head; | |
413 | struct tcp_heuristic_key key; | |
414 | u_int16_t hash; | |
415 | int i = 0; | |
416 | ||
417 | hash = tcp_heuristics_hash(inp, &key); | |
418 | head = &tcp_heuristics[hash]; | |
419 | ||
420 | lck_mtx_lock(&head->thh_mtx); | |
421 | ||
422 | /*** First step: Look for the tcp_heur in our bucket ***/ | |
423 | SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) { | |
424 | if (memcmp(&tpheur->th_key, &key, sizeof(key)) == 0) | |
425 | break; | |
426 | ||
427 | i++; | |
428 | } | |
429 | ||
430 | /*** Second step: If it's not there, create/recycle it ***/ | |
431 | if ((tpheur == NULL) && create) { | |
432 | if (i >= TCP_CACHE_BUCKET_SIZE) { | |
433 | struct tcp_heuristic *oldest_heur = NULL; | |
434 | u_int32_t max_age = 0; | |
435 | ||
436 | /* Look for the oldest tcp_heur in the bucket */ | |
437 | SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) { | |
438 | u_int32_t age = tcp_now - tpheur->th_last_access; | |
439 | if (age > max_age) { | |
440 | max_age = age; | |
441 | oldest_heur = tpheur; | |
442 | } | |
443 | } | |
444 | VERIFY(oldest_heur != NULL); | |
445 | ||
446 | tpheur = oldest_heur; | |
447 | ||
448 | /* We recycle - set everything to 0 */ | |
449 | tpheur->th_tfo_cookie_loss = 0; | |
450 | tpheur->th_tfo_fallback_trials = 0; | |
451 | tpheur->th_tfo_cookie_backoff = 0; | |
452 | tpheur->th_tfo_in_backoff = 0; | |
453 | tpheur->th_tfo_aggressive_fallback = 0; | |
454 | tpheur->th_tfo_snd_middlebox_supp = 0; | |
455 | tpheur->th_tfo_rcv_middlebox_supp = 0; | |
456 | } else { | |
457 | /* Create a new heuristic and add it to the list */ | |
458 | tpheur = _MALLOC(sizeof(struct tcp_heuristic), M_TEMP, | |
459 | M_NOWAIT | M_ZERO); | |
460 | if (tpheur == NULL) | |
461 | goto out_null; | |
462 | ||
463 | SLIST_INSERT_HEAD(&head->tcp_heuristics, tpheur, list); | |
464 | } | |
465 | ||
466 | memcpy(&tpheur->th_key, &key, sizeof(key)); | |
467 | } | |
468 | ||
469 | if (tpheur == NULL) | |
470 | goto out_null; | |
471 | ||
472 | /* Update timestamp for garbage collection purposes */ | |
473 | tpheur->th_last_access = tcp_now; | |
474 | *headarg = head; | |
475 | ||
476 | return (tpheur); | |
477 | ||
478 | out_null: | |
479 | tcp_heuristic_unlock(head); | |
480 | return (NULL); | |
481 | } | |
482 | ||
483 | void tcp_heuristic_tfo_success(struct tcpcb *tp) | |
484 | { | |
485 | struct tcp_heuristics_head *head; | |
486 | ||
487 | struct tcp_heuristic *tpheur = tcp_getheuristic_with_lock(tp, 1, &head); | |
488 | if (tpheur == NULL) | |
489 | return; | |
490 | ||
491 | tpheur->th_tfo_cookie_loss = 0; | |
492 | ||
493 | tcp_heuristic_unlock(head); | |
494 | } | |
495 | ||
496 | void tcp_heuristic_tfo_rcv_good(struct tcpcb *tp) | |
497 | { | |
498 | struct tcp_heuristics_head *head; | |
499 | ||
500 | struct tcp_heuristic *tpheur = tcp_getheuristic_with_lock(tp, 1, &head); | |
501 | if (tpheur == NULL) | |
502 | return; | |
503 | ||
504 | tpheur->th_tfo_rcv_middlebox_supp = 1; | |
505 | ||
506 | tcp_heuristic_unlock(head); | |
507 | ||
508 | tp->t_tfo_flags |= TFO_F_NO_RCVPROBING; | |
509 | } | |
510 | ||
511 | void tcp_heuristic_tfo_snd_good(struct tcpcb *tp) | |
512 | { | |
513 | struct tcp_heuristics_head *head; | |
514 | ||
515 | struct tcp_heuristic *tpheur = tcp_getheuristic_with_lock(tp, 1, &head); | |
516 | if (tpheur == NULL) | |
517 | return; | |
518 | ||
519 | tpheur->th_tfo_snd_middlebox_supp = 1; | |
520 | ||
521 | tcp_heuristic_unlock(head); | |
522 | ||
523 | tp->t_tfo_flags |= TFO_F_NO_SNDPROBING; | |
524 | } | |
525 | ||
526 | void tcp_heuristic_tfo_inc_loss(struct tcpcb *tp) | |
527 | { | |
528 | struct tcp_heuristics_head *head; | |
529 | struct tcp_heuristic *tpheur; | |
530 | ||
531 | tpheur = tcp_getheuristic_with_lock(tp, 1, &head); | |
532 | if (tpheur == NULL) | |
533 | return; | |
534 | ||
535 | /* Potential integer overflow, but tfo_cookie_loss is 32-bits */ | |
536 | tpheur->th_tfo_cookie_loss++; | |
537 | ||
538 | tcp_heuristic_unlock(head); | |
539 | } | |
540 | ||
541 | void tcp_heuristic_tfo_middlebox(struct tcpcb *tp) | |
542 | { | |
543 | struct tcp_heuristics_head *head; | |
544 | struct tcp_heuristic *tpheur; | |
545 | ||
546 | tpheur = tcp_getheuristic_with_lock(tp, 1, &head); | |
547 | if (tpheur == NULL) | |
548 | return; | |
549 | ||
550 | tpheur->th_tfo_aggressive_fallback = 1; | |
551 | ||
552 | tcp_heuristic_unlock(head); | |
553 | } | |
554 | ||
555 | void tcp_heuristic_tfo_reset_loss(struct tcpcb *tp) | |
556 | { | |
557 | struct tcp_heuristics_head *head; | |
558 | struct tcp_heuristic *tpheur; | |
559 | ||
560 | /* | |
561 | * Don't attempt to create it! Keep the heuristics clean if the | |
562 | * server does not support TFO. This reduces the lookup-cost on | |
563 | * our side. | |
564 | */ | |
565 | tpheur = tcp_getheuristic_with_lock(tp, 0, &head); | |
566 | if (tpheur == NULL) | |
567 | return; | |
568 | ||
569 | tpheur->th_tfo_cookie_loss = 0; | |
570 | tpheur->th_tfo_aggressive_fallback = 0; | |
571 | ||
572 | tcp_heuristic_unlock(head); | |
573 | } | |
574 | ||
575 | boolean_t tcp_heuristic_do_tfo(struct tcpcb *tp) | |
576 | { | |
577 | struct tcp_heuristics_head *head; | |
578 | struct tcp_heuristic *tpheur; | |
579 | ||
580 | /* Get the tcp-heuristic. */ | |
581 | tpheur = tcp_getheuristic_with_lock(tp, 0, &head); | |
582 | if (tpheur == NULL) | |
583 | return (true); | |
584 | ||
585 | if (tpheur->th_tfo_aggressive_fallback) { | |
586 | /* Aggressive fallback - don't do TFO anymore... :'( */ | |
587 | tcp_heuristic_unlock(head); | |
588 | return (false); | |
589 | } | |
590 | ||
591 | if (tpheur->th_tfo_cookie_loss >= TFO_MAX_COOKIE_LOSS && | |
592 | (tpheur->th_tfo_fallback_trials < tcp_tfo_fallback_min || | |
593 | TSTMP_GT(tpheur->th_tfo_cookie_backoff, tcp_now))) { | |
594 | /* | |
595 | * So, when we are in SYN-loss mode we try to stop using TFO | |
596 | * for the next 'tcp_tfo_fallback_min' connections. That way, | |
597 | * we are sure that never more than 1 out of tcp_tfo_fallback_min | |
598 | * connections will suffer from our nice little middelbox. | |
599 | * | |
600 | * After that we first wait for 2 minutes. If we fail again, | |
601 | * we wait for yet another 60 minutes. | |
602 | */ | |
603 | tpheur->th_tfo_fallback_trials++; | |
604 | if (tpheur->th_tfo_fallback_trials >= tcp_tfo_fallback_min && | |
605 | !tpheur->th_tfo_in_backoff) { | |
606 | if (tpheur->th_tfo_cookie_loss == TFO_MAX_COOKIE_LOSS) | |
607 | /* Backoff for 2 minutes */ | |
608 | tpheur->th_tfo_cookie_backoff = tcp_now + (60 * 2 * TCP_RETRANSHZ); | |
609 | else | |
610 | /* Backoff for 60 minutes */ | |
611 | tpheur->th_tfo_cookie_backoff = tcp_now + (60 * 60 * TCP_RETRANSHZ); | |
612 | ||
613 | tpheur->th_tfo_in_backoff = 1; | |
614 | } | |
615 | ||
616 | tcp_heuristic_unlock(head); | |
617 | return (false); | |
618 | } | |
619 | ||
620 | /* | |
621 | * We give it a new shot, set trials back to 0. This allows to | |
622 | * start counting again from zero in case we get yet another SYN-loss | |
623 | */ | |
624 | tpheur->th_tfo_fallback_trials = 0; | |
625 | tpheur->th_tfo_in_backoff = 0; | |
626 | ||
627 | if (tpheur->th_tfo_rcv_middlebox_supp) | |
628 | tp->t_tfo_flags |= TFO_F_NO_RCVPROBING; | |
629 | if (tpheur->th_tfo_snd_middlebox_supp) | |
630 | tp->t_tfo_flags |= TFO_F_NO_SNDPROBING; | |
631 | ||
632 | tcp_heuristic_unlock(head); | |
633 | ||
634 | return (true); | |
635 | } | |
636 | ||
637 | static void sysctl_cleartfocache(void) | |
638 | { | |
639 | int i; | |
640 | ||
641 | for (i = 0; i < tcp_cache_size; i++) { | |
642 | struct tcp_cache_head *head = &tcp_cache[i]; | |
643 | struct tcp_cache *tpcache, *tmp; | |
644 | struct tcp_heuristics_head *hhead = &tcp_heuristics[i]; | |
645 | struct tcp_heuristic *tpheur, *htmp; | |
646 | ||
647 | lck_mtx_lock(&head->tch_mtx); | |
648 | SLIST_FOREACH_SAFE(tpcache, &head->tcp_caches, list, tmp) { | |
649 | SLIST_REMOVE(&head->tcp_caches, tpcache, tcp_cache, list); | |
650 | _FREE(tpcache, M_TEMP); | |
651 | } | |
652 | lck_mtx_unlock(&head->tch_mtx); | |
653 | ||
654 | lck_mtx_lock(&hhead->thh_mtx); | |
655 | SLIST_FOREACH_SAFE(tpheur, &hhead->tcp_heuristics, list, htmp) { | |
656 | SLIST_REMOVE(&hhead->tcp_heuristics, tpheur, tcp_heuristic, list); | |
657 | _FREE(tpheur, M_TEMP); | |
658 | } | |
659 | lck_mtx_unlock(&hhead->thh_mtx); | |
660 | } | |
661 | } | |
662 | ||
663 | /* This sysctl is useful for testing purposes only */ | |
664 | static int tcpcleartfo = 0; | |
665 | ||
666 | static int sysctl_cleartfo SYSCTL_HANDLER_ARGS | |
667 | { | |
668 | #pragma unused(arg1, arg2) | |
669 | int error = 0, val, oldval = tcpcleartfo; | |
670 | ||
671 | val = oldval; | |
672 | error = sysctl_handle_int(oidp, &val, 0, req); | |
673 | if (error || !req->newptr) | |
674 | return (error); | |
675 | ||
676 | /* | |
677 | * The actual value does not matter. If the value is set, it triggers | |
678 | * the clearing of the TFO cache. If a future implementation does not | |
679 | * use the route entry to hold the TFO cache, replace the route sysctl. | |
680 | */ | |
681 | ||
682 | if (val != oldval) | |
683 | sysctl_cleartfocache(); | |
684 | ||
685 | tcpcleartfo = val; | |
686 | ||
687 | return (error); | |
688 | } | |
689 | ||
690 | SYSCTL_PROC(_net_inet_tcp, OID_AUTO, clear_tfocache, CTLTYPE_INT | CTLFLAG_RW | | |
691 | CTLFLAG_LOCKED, &tcpcleartfo, 0, &sysctl_cleartfo, "I", | |
692 | "Toggle to clear the TFO destination based heuristic cache"); | |
693 | ||
694 | void tcp_cache_init(void) | |
695 | { | |
696 | uint64_t sane_size_meg = sane_size / 1024 / 1024; | |
697 | int i; | |
698 | ||
699 | /* | |
700 | * On machines with <100MB of memory this will result in a (full) cache-size | |
701 | * of 32 entries, thus 32 * 5 * 64bytes = 10KB. (about 0.01 %) | |
702 | * On machines with > 4GB of memory, we have a cache-size of 1024 entries, | |
703 | * thus about 327KB. | |
704 | * | |
705 | * Side-note: we convert to u_int32_t. If sane_size is more than | |
706 | * 16000 TB, we loose precision. But, who cares? :) | |
707 | */ | |
708 | tcp_cache_size = tcp_cache_roundup2((u_int32_t)(sane_size_meg >> 2)); | |
709 | if (tcp_cache_size < 32) | |
710 | tcp_cache_size = 32; | |
711 | else if (tcp_cache_size > 1024) | |
712 | tcp_cache_size = 1024; | |
713 | ||
714 | tcp_cache = _MALLOC(sizeof(struct tcp_cache_head) * tcp_cache_size, | |
715 | M_TEMP, M_ZERO); | |
716 | if (tcp_cache == NULL) | |
717 | panic("Allocating tcp_cache failed at boot-time!"); | |
718 | ||
719 | tcp_cache_mtx_grp_attr = lck_grp_attr_alloc_init(); | |
720 | tcp_cache_mtx_grp = lck_grp_alloc_init("tcpcache", tcp_cache_mtx_grp_attr); | |
721 | tcp_cache_mtx_attr = lck_attr_alloc_init(); | |
722 | ||
723 | tcp_heuristics = _MALLOC(sizeof(struct tcp_heuristics_head) * tcp_cache_size, | |
724 | M_TEMP, M_ZERO); | |
725 | if (tcp_heuristics == NULL) | |
726 | panic("Allocating tcp_heuristic failed at boot-time!"); | |
727 | ||
728 | tcp_heuristic_mtx_grp_attr = lck_grp_attr_alloc_init(); | |
729 | tcp_heuristic_mtx_grp = lck_grp_alloc_init("tcpheuristic", tcp_heuristic_mtx_grp_attr); | |
730 | tcp_heuristic_mtx_attr = lck_attr_alloc_init(); | |
731 | ||
732 | for (i = 0; i < tcp_cache_size; i++) { | |
733 | lck_mtx_init(&tcp_cache[i].tch_mtx, tcp_cache_mtx_grp, | |
734 | tcp_cache_mtx_attr); | |
735 | SLIST_INIT(&tcp_cache[i].tcp_caches); | |
736 | ||
737 | lck_mtx_init(&tcp_heuristics[i].thh_mtx, tcp_heuristic_mtx_grp, | |
738 | tcp_heuristic_mtx_attr); | |
739 | SLIST_INIT(&tcp_heuristics[i].tcp_heuristics); | |
740 | } | |
741 | ||
742 | tcp_cache_hash_seed = RandomULong(); | |
743 | } |