X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/0a7de7458d150b5d4dffc935ba399be265ef0a1a..refs/heads/master:/bsd/netinet/tcp_cache.c diff --git a/bsd/netinet/tcp_cache.c b/bsd/netinet/tcp_cache.c index deaa3bcb2..8095d924c 100644 --- a/bsd/netinet/tcp_cache.c +++ b/bsd/netinet/tcp_cache.c @@ -67,6 +67,7 @@ struct tcp_heuristic { uint8_t th_tfo_data_rst; /* The number of times a SYN+data has received a RST */ uint8_t th_tfo_req_rst; /* The number of times a SYN+cookie-req has received a RST */ uint8_t th_mptcp_loss; /* The number of times a SYN+MP_CAPABLE has been lost */ + uint8_t th_mptcp_success; /* The number of times MPTCP-negotiation has been successful */ uint8_t th_ecn_loss; /* The number of times a SYN+ecn has been lost */ uint8_t th_ecn_aggressive; /* The number of times we did an aggressive fallback */ uint8_t th_ecn_droprst; /* The number of times ECN connections received a RST after first data pkt */ @@ -79,7 +80,8 @@ struct tcp_heuristic { uint32_t th_ecn_backoff; /* Time until when we should not try out ECN */ uint8_t th_tfo_in_backoff:1, /* Are we avoiding TFO due to the backoff timer? */ - th_mptcp_in_backoff:1; /* Are we avoiding MPTCP due to the backoff timer? */ + th_mptcp_in_backoff:1, /* Are we avoiding MPTCP due to the backoff timer? */ + th_mptcp_heuristic_disabled:1; /* Are heuristics disabled? */ char th_val_end[0]; /* Marker for memsetting to 0 */ }; @@ -101,12 +103,12 @@ struct tcp_cache_key { struct tcp_cache { SLIST_ENTRY(tcp_cache) list; - u_int32_t tc_last_access; + uint32_t tc_last_access; struct tcp_cache_key tc_key; - u_int8_t tc_tfo_cookie[TFO_COOKIE_LEN_MAX]; - u_int8_t tc_tfo_cookie_len; + uint8_t tc_tfo_cookie[TFO_COOKIE_LEN_MAX]; + uint8_t tc_tfo_cookie_len; }; struct tcp_cache_head { @@ -123,7 +125,7 @@ struct tcp_cache_key_src { int af; }; -static u_int32_t tcp_cache_hash_seed; +static uint32_t tcp_cache_hash_seed; size_t tcp_cache_size; @@ -154,11 +156,14 @@ static uint32_t tcp_backoff_maximum = 65536; SYSCTL_UINT(_net_inet_tcp, OID_AUTO, backoff_maximum, CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_backoff_maximum, 0, "Maximum time for which we won't try TFO"); -SYSCTL_SKMEM_TCP_INT(OID_AUTO, ecn_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, - static int, tcp_ecn_timeout, 60, "Initial minutes to wait before re-trying ECN"); +static uint32_t tcp_ecn_timeout = 60; -SYSCTL_SKMEM_TCP_INT(OID_AUTO, disable_tcp_heuristics, CTLFLAG_RW | CTLFLAG_LOCKED, - static int, disable_tcp_heuristics, 0, "Set to 1, to disable all TCP heuristics (TFO, ECN, MPTCP)"); +SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ecn_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, + &tcp_ecn_timeout, 60, "Initial minutes to wait before re-trying ECN"); + +static int disable_tcp_heuristics = 0; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, disable_tcp_heuristics, CTLFLAG_RW | CTLFLAG_LOCKED, + &disable_tcp_heuristics, 0, "Set to 1, to disable all TCP heuristics (TFO, ECN, MPTCP)"); static uint32_t tcp_min_to_hz(uint32_t minutes) @@ -181,6 +186,7 @@ tcp_min_to_hz(uint32_t minutes) #define TFO_MAX_COOKIE_LOSS 2 #define ECN_MAX_SYN_LOSS 2 #define MPTCP_MAX_SYN_LOSS 2 +#define MPTCP_SUCCESS_TRIGGER 10 #define ECN_MAX_DROPRST 1 #define ECN_MAX_DROPRXMT 4 #define ECN_MAX_SYNRST 4 @@ -215,8 +221,8 @@ tcp_min_to_hz(uint32_t minutes) * Might be worth moving this to a library so that others * (e.g., scale_to_powerof2()) can use this as well instead of a while-loop. */ -static u_int32_t -tcp_cache_roundup2(u_int32_t a) +static uint32_t +tcp_cache_roundup2(uint32_t a) { a--; a |= a >> 1; @@ -269,10 +275,10 @@ tcp_cache_hash_src(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *key } } -static u_int16_t +static uint16_t tcp_cache_hash(struct tcp_cache_key_src *tcks, struct tcp_cache_key *key) { - u_int32_t hash; + uint32_t hash; bzero(key, sizeof(struct tcp_cache_key)); @@ -291,7 +297,7 @@ tcp_cache_hash(struct tcp_cache_key_src *tcks, struct tcp_cache_key *key) hash = net_flowhash(key, sizeof(struct tcp_cache_key), tcp_cache_hash_seed); - return hash & (tcp_cache_size - 1); + return (uint16_t)(hash & (tcp_cache_size - 1)); } static void @@ -317,7 +323,7 @@ tcp_getcache_with_lock(struct tcp_cache_key_src *tcks, struct tcp_cache *tpcache = NULL; struct tcp_cache_head *head; struct tcp_cache_key key; - u_int16_t hash; + uint16_t hash; int i = 0; hash = tcp_cache_hash(tcks, &key); @@ -338,11 +344,11 @@ tcp_getcache_with_lock(struct tcp_cache_key_src *tcks, if ((tpcache == NULL) && create) { if (i >= TCP_CACHE_BUCKET_SIZE) { struct tcp_cache *oldest_cache = NULL; - u_int32_t max_age = 0; + uint32_t max_age = 0; /* Look for the oldest tcp_cache in the bucket */ SLIST_FOREACH(tpcache, &head->tcp_caches, list) { - u_int32_t age = tcp_now - tpcache->tc_last_access; + uint32_t age = tcp_now - tpcache->tc_last_access; if (age > max_age) { max_age = age; oldest_cache = tpcache; @@ -359,6 +365,7 @@ tcp_getcache_with_lock(struct tcp_cache_key_src *tcks, tpcache = _MALLOC(sizeof(struct tcp_cache), M_TEMP, M_NOWAIT | M_ZERO); if (tpcache == NULL) { + os_log_error(OS_LOG_DEFAULT, "%s could not allocate cache", __func__); goto out_null; } @@ -405,7 +412,7 @@ tcp_cache_key_src_create(struct tcpcb *tp, struct tcp_cache_key_src *tcks) } static void -tcp_cache_set_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, u_int8_t len) +tcp_cache_set_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, uint8_t len) { struct tcp_cache_head *head; struct tcp_cache *tpcache; @@ -424,7 +431,7 @@ tcp_cache_set_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, u_in } void -tcp_cache_set_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t len) +tcp_cache_set_cookie(struct tcpcb *tp, u_char *cookie, uint8_t len) { struct tcp_cache_key_src tcks; @@ -433,7 +440,7 @@ tcp_cache_set_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t len) } static int -tcp_cache_get_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, u_int8_t *len) +tcp_cache_get_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, uint8_t *len) { struct tcp_cache_head *head; struct tcp_cache *tpcache; @@ -472,7 +479,7 @@ tcp_cache_get_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, u_in * Returns 1 if the cookie has been found and written. */ int -tcp_cache_get_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t *len) +tcp_cache_get_cookie(struct tcpcb *tp, u_char *cookie, uint8_t *len) { struct tcp_cache_key_src tcks; @@ -509,10 +516,10 @@ tcp_cache_get_cookie_len(struct tcpcb *tp) return tcp_cache_get_cookie_len_common(&tcks); } -static u_int16_t +static uint16_t tcp_heuristics_hash(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *key) { - u_int32_t hash; + uint32_t hash; bzero(key, sizeof(struct tcp_heuristic_key)); @@ -521,7 +528,7 @@ tcp_heuristics_hash(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *ke hash = net_flowhash(key, sizeof(struct tcp_heuristic_key), tcp_cache_hash_seed); - return hash & (tcp_cache_size - 1); + return (uint16_t)(hash & (tcp_cache_size - 1)); } static void @@ -551,7 +558,7 @@ tcp_getheuristic_with_lock(struct tcp_cache_key_src *tcks, struct tcp_heuristic *tpheur = NULL; struct tcp_heuristics_head *head; struct tcp_heuristic_key key; - u_int16_t hash; + uint16_t hash; int i = 0; hash = tcp_heuristics_hash(tcks, &key); @@ -572,11 +579,11 @@ tcp_getheuristic_with_lock(struct tcp_cache_key_src *tcks, if ((tpheur == NULL) && create) { if (i >= TCP_CACHE_BUCKET_SIZE) { struct tcp_heuristic *oldest_heur = NULL; - u_int32_t max_age = 0; + uint32_t max_age = 0; /* Look for the oldest tcp_heur in the bucket */ SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) { - u_int32_t age = tcp_now - tpheur->th_last_access; + uint32_t age = tcp_now - tpheur->th_last_access; if (age > max_age) { max_age = age; oldest_heur = tpheur; @@ -594,6 +601,7 @@ tcp_getheuristic_with_lock(struct tcp_cache_key_src *tcks, tpheur = _MALLOC(sizeof(struct tcp_heuristic), M_TEMP, M_NOWAIT | M_ZERO); if (tpheur == NULL) { + os_log_error(OS_LOG_DEFAULT, "%s could not allocate cache", __func__); goto out_null; } @@ -628,44 +636,73 @@ out_null: } static void -tcp_heuristic_reset_counters(struct tcp_cache_key_src *tcks, u_int8_t flags) +tcp_heuristic_reset_counters(struct tcp_cache_key_src *tcks, uint8_t flags) { struct tcp_heuristics_head *head; struct tcp_heuristic *tpheur; /* - * Don't attempt to create it! Keep the heuristics clean if the - * server does not support TFO. This reduces the lookup-cost on - * our side. + * Always create heuristics here because MPTCP needs to write success + * into it. Thus, we always end up creating them. */ - tpheur = tcp_getheuristic_with_lock(tcks, 0, &head); + tpheur = tcp_getheuristic_with_lock(tcks, 1, &head); if (tpheur == NULL) { return; } if (flags & TCPCACHE_F_TFO_DATA) { + if (tpheur->th_tfo_data_loss >= TFO_MAX_COOKIE_LOSS) { + os_log(OS_LOG_DEFAULT, "%s: Resetting TFO-data loss to 0 from %u on heur %lx\n", + __func__, tpheur->th_tfo_data_loss, (unsigned long)VM_KERNEL_ADDRPERM(tpheur)); + } tpheur->th_tfo_data_loss = 0; } if (flags & TCPCACHE_F_TFO_REQ) { + if (tpheur->th_tfo_req_loss >= TFO_MAX_COOKIE_LOSS) { + os_log(OS_LOG_DEFAULT, "%s: Resetting TFO-req loss to 0 from %u on heur %lx\n", + __func__, tpheur->th_tfo_req_loss, (unsigned long)VM_KERNEL_ADDRPERM(tpheur)); + } tpheur->th_tfo_req_loss = 0; } if (flags & TCPCACHE_F_TFO_DATA_RST) { + if (tpheur->th_tfo_data_rst >= TFO_MAX_COOKIE_LOSS) { + os_log(OS_LOG_DEFAULT, "%s: Resetting TFO-data RST to 0 from %u on heur %lx\n", + __func__, tpheur->th_tfo_data_rst, (unsigned long)VM_KERNEL_ADDRPERM(tpheur)); + } tpheur->th_tfo_data_rst = 0; } if (flags & TCPCACHE_F_TFO_REQ_RST) { + if (tpheur->th_tfo_req_rst >= TFO_MAX_COOKIE_LOSS) { + os_log(OS_LOG_DEFAULT, "%s: Resetting TFO-req RST to 0 from %u on heur %lx\n", + __func__, tpheur->th_tfo_req_rst, (unsigned long)VM_KERNEL_ADDRPERM(tpheur)); + } tpheur->th_tfo_req_rst = 0; } if (flags & TCPCACHE_F_ECN) { + if (tpheur->th_ecn_loss >= ECN_MAX_SYN_LOSS || tpheur->th_ecn_synrst >= ECN_MAX_SYNRST) { + os_log(OS_LOG_DEFAULT, "%s: Resetting ECN-loss to 0 from %u and synrst from %u on heur %lx\n", + __func__, tpheur->th_ecn_loss, tpheur->th_ecn_synrst, (unsigned long)VM_KERNEL_ADDRPERM(tpheur)); + } tpheur->th_ecn_loss = 0; tpheur->th_ecn_synrst = 0; } if (flags & TCPCACHE_F_MPTCP) { tpheur->th_mptcp_loss = 0; + if (tpheur->th_mptcp_success < MPTCP_SUCCESS_TRIGGER) { + tpheur->th_mptcp_success++; + + if (tpheur->th_mptcp_success == MPTCP_SUCCESS_TRIGGER) { + os_log(mptcp_log_handle, "%s disabling heuristics for 12 hours", __func__); + tpheur->th_mptcp_heuristic_disabled = 1; + /* Disable heuristics for 12 hours */ + tpheur->th_mptcp_backoff = tcp_now + tcp_min_to_hz(tcp_ecn_timeout * 12); + } + } } tcp_heuristic_unlock(head); @@ -734,6 +771,9 @@ __tcp_heuristic_tfo_middlebox_common(struct tcp_heuristic *tpheur) if (tpheur->th_tfo_backoff > tcp_min_to_hz(tcp_backoff_maximum)) { tpheur->th_tfo_backoff = tcp_min_to_hz(tcp_ecn_timeout); } + + os_log(OS_LOG_DEFAULT, "%s disable TFO until %u now %u on %lx\n", __func__, + tpheur->th_tfo_backoff_until, tcp_now, (unsigned long)VM_KERNEL_ADDRPERM(tpheur)); } static void @@ -754,7 +794,7 @@ tcp_heuristic_tfo_middlebox_common(struct tcp_cache_key_src *tcks) static void tcp_heuristic_inc_counters(struct tcp_cache_key_src *tcks, - u_int32_t flags) + uint32_t flags) { struct tcp_heuristics_head *head; struct tcp_heuristic *tpheur; @@ -797,7 +837,9 @@ tcp_heuristic_inc_counters(struct tcp_cache_key_src *tcks, } } - if ((flags & TCPCACHE_F_ECN) && tpheur->th_ecn_loss < TCP_CACHE_OVERFLOW_PROTECT) { + if ((flags & TCPCACHE_F_ECN) && + tpheur->th_ecn_loss < TCP_CACHE_OVERFLOW_PROTECT && + TSTMP_LEQ(tpheur->th_ecn_backoff, tcp_now)) { tpheur->th_ecn_loss++; if (tpheur->th_ecn_loss >= ECN_MAX_SYN_LOSS) { tcpstat.tcps_ecn_fallback_synloss++; @@ -805,11 +847,16 @@ tcp_heuristic_inc_counters(struct tcp_cache_key_src *tcks, tpheur->th_ecn_backoff = tcp_now + (tcp_min_to_hz(tcp_ecn_timeout) << (tpheur->th_ecn_loss - ECN_MAX_SYN_LOSS)); + + os_log(OS_LOG_DEFAULT, "%s disable ECN until %u now %u on %lx for SYN-loss\n", + __func__, tpheur->th_ecn_backoff, tcp_now, + (unsigned long)VM_KERNEL_ADDRPERM(tpheur)); } } if ((flags & TCPCACHE_F_MPTCP) && - tpheur->th_mptcp_loss < TCP_CACHE_OVERFLOW_PROTECT) { + tpheur->th_mptcp_loss < TCP_CACHE_OVERFLOW_PROTECT && + tpheur->th_mptcp_heuristic_disabled == 0) { tpheur->th_mptcp_loss++; if (tpheur->th_mptcp_loss >= MPTCP_MAX_SYN_LOSS) { /* @@ -819,11 +866,17 @@ tcp_heuristic_inc_counters(struct tcp_cache_key_src *tcks, tpheur->th_mptcp_backoff = tcp_now + (tcp_min_to_hz(tcp_ecn_timeout) << (tpheur->th_mptcp_loss - MPTCP_MAX_SYN_LOSS)); + tpheur->th_mptcp_in_backoff = 1; + + os_log(OS_LOG_DEFAULT, "%s disable MPTCP until %u now %u on %lx\n", + __func__, tpheur->th_mptcp_backoff, tcp_now, + (unsigned long)VM_KERNEL_ADDRPERM(tpheur)); } } if ((flags & TCPCACHE_F_ECN_DROPRST) && - tpheur->th_ecn_droprst < TCP_CACHE_OVERFLOW_PROTECT) { + tpheur->th_ecn_droprst < TCP_CACHE_OVERFLOW_PROTECT && + TSTMP_LEQ(tpheur->th_ecn_backoff, tcp_now)) { tpheur->th_ecn_droprst++; if (tpheur->th_ecn_droprst >= ECN_MAX_DROPRST) { tcpstat.tcps_ecn_fallback_droprst++; @@ -832,11 +885,16 @@ tcp_heuristic_inc_counters(struct tcp_cache_key_src *tcks, tpheur->th_ecn_backoff = tcp_now + (tcp_min_to_hz(tcp_ecn_timeout) << (tpheur->th_ecn_droprst - ECN_MAX_DROPRST)); + + os_log(OS_LOG_DEFAULT, "%s disable ECN until %u now %u on %lx for drop-RST\n", + __func__, tpheur->th_ecn_backoff, tcp_now, + (unsigned long)VM_KERNEL_ADDRPERM(tpheur)); } } if ((flags & TCPCACHE_F_ECN_DROPRXMT) && - tpheur->th_ecn_droprxmt < TCP_CACHE_OVERFLOW_PROTECT) { + tpheur->th_ecn_droprxmt < TCP_CACHE_OVERFLOW_PROTECT && + TSTMP_LEQ(tpheur->th_ecn_backoff, tcp_now)) { tpheur->th_ecn_droprxmt++; if (tpheur->th_ecn_droprxmt >= ECN_MAX_DROPRXMT) { tcpstat.tcps_ecn_fallback_droprxmt++; @@ -845,6 +903,10 @@ tcp_heuristic_inc_counters(struct tcp_cache_key_src *tcks, tpheur->th_ecn_backoff = tcp_now + (tcp_min_to_hz(tcp_ecn_timeout) << (tpheur->th_ecn_droprxmt - ECN_MAX_DROPRXMT)); + + os_log(OS_LOG_DEFAULT, "%s disable ECN until %u now %u on %lx for drop-Rxmit\n", + __func__, tpheur->th_ecn_backoff, tcp_now, + (unsigned long)VM_KERNEL_ADDRPERM(tpheur)); } } if ((flags & TCPCACHE_F_ECN_SYNRST) && @@ -857,6 +919,10 @@ tcp_heuristic_inc_counters(struct tcp_cache_key_src *tcks, tpheur->th_ecn_backoff = tcp_now + (tcp_min_to_hz(tcp_ecn_timeout) << (tpheur->th_ecn_synrst - ECN_MAX_SYNRST)); + + os_log(OS_LOG_DEFAULT, "%s disable ECN until %u now %u on %lx for SYN-RST\n", + __func__, tpheur->th_ecn_backoff, tcp_now, + (unsigned long)VM_KERNEL_ADDRPERM(tpheur)); } } tcp_heuristic_unlock(head); @@ -868,6 +934,11 @@ tcp_heuristic_tfo_loss(struct tcpcb *tp) struct tcp_cache_key_src tcks; uint32_t flag = 0; + if (symptoms_is_wifi_lossy() && + IFNET_IS_WIFI(tp->t_inpcb->inp_last_outifp)) { + return; + } + tcp_cache_key_src_create(tp, &tcks); if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) { @@ -903,6 +974,11 @@ tcp_heuristic_mptcp_loss(struct tcpcb *tp) { struct tcp_cache_key_src tcks; + if (symptoms_is_wifi_lossy() && + IFNET_IS_WIFI(tp->t_inpcb->inp_last_outifp)) { + return; + } + tcp_cache_key_src_create(tp, &tcks); tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_MPTCP); @@ -913,6 +989,11 @@ tcp_heuristic_ecn_loss(struct tcpcb *tp) { struct tcp_cache_key_src tcks; + if (symptoms_is_wifi_lossy() && + IFNET_IS_WIFI(tp->t_inpcb->inp_last_outifp)) { + return; + } + tcp_cache_key_src_create(tp, &tcks); tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN); @@ -970,6 +1051,12 @@ tcp_heuristic_ecn_aggressive_common(struct tcp_cache_key_src *tcks) return; } + if (TSTMP_GT(tpheur->th_ecn_backoff, tcp_now)) { + /* We are already in aggressive mode */ + tcp_heuristic_unlock(head); + return; + } + /* Must be done before, otherwise we will start off with expo-backoff */ tpheur->th_ecn_backoff = tcp_now + (tcp_min_to_hz(tcp_ecn_timeout) << (tpheur->th_ecn_aggressive)); @@ -983,6 +1070,9 @@ tcp_heuristic_ecn_aggressive_common(struct tcp_cache_key_src *tcks) } tcp_heuristic_unlock(head); + + os_log(OS_LOG_DEFAULT, "%s disable ECN until %u now %u on %lx\n", __func__, + tpheur->th_ecn_backoff, tcp_now, (unsigned long)VM_KERNEL_ADDRPERM(tpheur)); } void @@ -1041,16 +1131,23 @@ tcp_heuristic_do_tfo(struct tcpcb *tp) return FALSE; } - -boolean_t +/* + * @return: + * 0 Enable MPTCP (we are still discovering middleboxes) + * -1 Enable MPTCP (heuristics have been temporarily disabled) + * 1 Disable MPTCP + */ +int tcp_heuristic_do_mptcp(struct tcpcb *tp) { struct tcp_cache_key_src tcks; struct tcp_heuristics_head *head = NULL; struct tcp_heuristic *tpheur; + int ret = 0; - if (disable_tcp_heuristics) { - return TRUE; + if (disable_tcp_heuristics || + (tptomptp(tp)->mpt_mpte->mpte_flags & MPTE_FORCE_ENABLE)) { + return 0; } tcp_cache_key_src_create(tp, &tcks); @@ -1058,16 +1155,32 @@ tcp_heuristic_do_mptcp(struct tcpcb *tp) /* Get the tcp-heuristic. */ tpheur = tcp_getheuristic_with_lock(&tcks, 0, &head); if (tpheur == NULL) { - return TRUE; + return 0; + } + + if (tpheur->th_mptcp_in_backoff == 0 || + tpheur->th_mptcp_heuristic_disabled == 1) { + goto mptcp_ok; } if (TSTMP_GT(tpheur->th_mptcp_backoff, tcp_now)) { goto fallback; } - tcp_heuristic_unlock(head); + tpheur->th_mptcp_in_backoff = 0; - return TRUE; +mptcp_ok: + if (tpheur->th_mptcp_heuristic_disabled) { + ret = -1; + + if (TSTMP_GT(tcp_now, tpheur->th_mptcp_backoff)) { + tpheur->th_mptcp_heuristic_disabled = 0; + tpheur->th_mptcp_success = 0; + } + } + + tcp_heuristic_unlock(head); + return ret; fallback: if (head) { @@ -1080,7 +1193,7 @@ fallback: tcpstat.tcps_mptcp_heuristic_fallback++; } - return FALSE; + return 1; } static boolean_t @@ -1113,6 +1226,9 @@ tcp_heuristic_do_ecn_common(struct tcp_cache_key_src *tcks) if (tpheur->th_ecn_synrst >= ECN_RETRY_LIMIT) { tpheur->th_ecn_synrst = 0; } + + /* Make sure it follows along */ + tpheur->th_ecn_backoff = tcp_now; } tcp_heuristic_unlock(head); @@ -1190,7 +1306,7 @@ tcp_heuristics_ecn_update(struct necp_tcp_ecn_cache *necp_buffer, boolean_t tcp_heuristic_do_tfo_with_address(struct ifnet *ifp, union sockaddr_in_4_6 *local_address, union sockaddr_in_4_6 *remote_address, - u_int8_t *cookie, u_int8_t *cookie_len) + uint8_t *cookie, uint8_t *cookie_len) { struct tcp_cache_key_src tcks; @@ -1316,6 +1432,9 @@ static int sysctl_cleartfo SYSCTL_HANDLER_ARGS val = oldval; error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) { + if (error) { + os_log_error(OS_LOG_DEFAULT, "%s could not parse int: %d", __func__, error); + } return error; } @@ -1350,10 +1469,10 @@ tcp_cache_init(void) * On machines with > 4GB of memory, we have a cache-size of 1024 entries, * thus about 327KB. * - * Side-note: we convert to u_int32_t. If sane_size is more than + * Side-note: we convert to uint32_t. If sane_size is more than * 16000 TB, we loose precision. But, who cares? :) */ - tcp_cache_size = tcp_cache_roundup2((u_int32_t)(sane_size_meg >> 2)); + tcp_cache_size = tcp_cache_roundup2((uint32_t)(sane_size_meg >> 2)); if (tcp_cache_size < 32) { tcp_cache_size = 32; } else if (tcp_cache_size > 1024) {