X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/fe8ab488e9161c46dd9885d58fc52996dc0249ff..94ff46dc2849db4d43eaaf144872decc522aafb4:/bsd/netinet/tcp_timer.c diff --git a/bsd/netinet/tcp_timer.c b/bsd/netinet/tcp_timer.c index aa2317164..ee69afd6f 100644 --- a/bsd/netinet/tcp_timer.c +++ b/bsd/netinet/tcp_timer.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2014 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -74,7 +74,7 @@ #include #include #include -#include /* before tcp_seq.h, for tcp_random18() */ +#include /* before tcp_seq.h, for tcp_random18() */ #include #include @@ -84,11 +84,13 @@ #include #include #include +#include #if INET6 #include #endif #include #include +#include #include #include #include @@ -101,35 +103,20 @@ #if TCPDEBUG #include #endif +#include + #include #include #include -#define TIMERENTRY_TO_TP(te) ((struct tcpcb *)((uintptr_t)te - offsetof(struct tcpcb, tentry.le.le_next))) - -#define VERIFY_NEXT_LINK(elm,field) do { \ - if (LIST_NEXT((elm),field) != NULL && \ - LIST_NEXT((elm),field)->field.le_prev != \ - &((elm)->field.le_next)) \ - panic("Bad link elm %p next->prev != elm", (elm)); \ -} while(0) - -#define VERIFY_PREV_LINK(elm,field) do { \ - if (*(elm)->field.le_prev != (elm)) \ - panic("Bad link elm %p prev->next != elm", (elm)); \ -} while(0) - -#define TCP_SET_TIMER_MODE(mode, i) do { \ - if (IS_TIMER_HZ_10MS(i)) \ - (mode) |= TCP_TIMERLIST_10MS_MODE; \ - else if (IS_TIMER_HZ_100MS(i)) \ - (mode) |= TCP_TIMERLIST_100MS_MODE; \ - else \ - (mode) |= TCP_TIMERLIST_500MS_MODE; \ -} while(0) - /* Max number of times a stretch ack can be delayed on a connection */ -#define TCP_STRETCHACK_DELAY_THRESHOLD 5 +#define TCP_STRETCHACK_DELAY_THRESHOLD 5 + +/* + * If the host processor has been sleeping for too long, this is the threshold + * used to avoid sending stale retransmissions. + */ +#define TCP_SLEEP_TOO_LONG (10 * 60 * 1000) /* 10 minutes in ms */ /* tcp timer list */ struct tcptimerlist tcp_timer_list; @@ -140,50 +127,86 @@ struct tcptailq tcp_tw_tailq; static int sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS { -#pragma unused(arg1, arg2) +#pragma unused(arg2) int error, s, tt; - tt = *(int *)oidp->oid_arg1; - s = tt * 1000 / TCP_RETRANSHZ;; + tt = *(int *)arg1; + if (tt < 0 || tt >= INT_MAX / 1000) { + return EINVAL; + } + s = tt * 1000 / TCP_RETRANSHZ; error = sysctl_handle_int(oidp, &s, 0, req); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } tt = s * TCP_RETRANSHZ / 1000; - if (tt < 1) - return (EINVAL); + if (tt < 1) { + return EINVAL; + } - *(int *)oidp->oid_arg1 = tt; - return (0); + *(int *)arg1 = tt; + SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2, *(int*)arg1); + return 0; } -int tcp_keepinit; +#if SYSCTL_SKMEM +int tcp_keepinit = TCPTV_KEEP_INIT; +SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &tcp_keepinit, offsetof(skmem_sysctl, tcp.keepinit), + sysctl_msec_to_ticks, "I", ""); + +int tcp_keepidle = TCPTV_KEEP_IDLE; +SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &tcp_keepidle, offsetof(skmem_sysctl, tcp.keepidle), + sysctl_msec_to_ticks, "I", ""); + +int tcp_keepintvl = TCPTV_KEEPINTVL; +SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &tcp_keepintvl, offsetof(skmem_sysctl, tcp.keepintvl), + sysctl_msec_to_ticks, "I", ""); + +SYSCTL_SKMEM_TCP_INT(OID_AUTO, keepcnt, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + int, tcp_keepcnt, TCPTV_KEEPCNT, "number of times to repeat keepalive"); + +int tcp_msl = TCPTV_MSL; +SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &tcp_msl, offsetof(skmem_sysctl, tcp.msl), + sysctl_msec_to_ticks, "I", "Maximum segment lifetime"); +#else /* SYSCTL_SKMEM */ +int tcp_keepinit; SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", ""); -int tcp_keepidle; +int tcp_keepidle; SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", ""); -int tcp_keepintvl; +int tcp_keepintvl; SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", ""); -int tcp_keepcnt; +int tcp_keepcnt; SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_keepcnt, 0, "number of times to repeat keepalive"); -int tcp_msl; +int tcp_msl; SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime"); +#endif /* SYSCTL_SKMEM */ -/* +/* * Avoid DoS via TCP Robustness in Persist Condition * (see http://www.ietf.org/id/draft-ananth-tcpm-persist-02.txt) * by allowing a system wide maximum persistence timeout value when in @@ -192,26 +215,32 @@ SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, * Expressed in milliseconds to be consistent without timeout related * values, the TCP socket option is in seconds. */ +#if SYSCTL_SKMEM u_int32_t tcp_max_persist_timeout = 0; SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &tcp_max_persist_timeout, 0, sysctl_msec_to_ticks, "I", + &tcp_max_persist_timeout, offsetof(skmem_sysctl, tcp.max_persist_timeout), + sysctl_msec_to_ticks, "I", "Maximum persistence timeout for ZWP"); +#else /* SYSCTL_SKMEM */ +u_int32_t tcp_max_persist_timeout = 0; +SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &tcp_max_persist_timeout, 0, sysctl_msec_to_ticks, "I", "Maximum persistence timeout for ZWP"); +#endif /* SYSCTL_SKMEM */ -static int always_keepalive = 0; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, - CTLFLAG_RW | CTLFLAG_LOCKED, - &always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections"); +SYSCTL_SKMEM_TCP_INT(OID_AUTO, always_keepalive, + CTLFLAG_RW | CTLFLAG_LOCKED, static int, always_keepalive, 0, + "Assume SO_KEEPALIVE on all TCP connections"); /* * This parameter determines how long the timer list will stay in fast or - * quick mode even though all connections are idle. In this state, the + * quick mode even though all connections are idle. In this state, the * timer will run more frequently anticipating new data. */ -int timer_fastmode_idlemax = TCP_FASTMODE_IDLERUN_MAX; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, timer_fastmode_idlemax, - CTLFLAG_RW | CTLFLAG_LOCKED, - &timer_fastmode_idlemax, 0, "Maximum idle generations in fast mode"); +SYSCTL_SKMEM_TCP_INT(OID_AUTO, timer_fastmode_idlemax, + CTLFLAG_RW | CTLFLAG_LOCKED, int, timer_fastmode_idlemax, + TCP_FASTMODE_IDLERUN_MAX, "Maximum idle generations in fast mode"); /* * See tcp_syn_backoff[] for interval values between SYN retransmits; @@ -220,18 +249,9 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, timer_fastmode_idlemax, * SYN retransmits. Setting it to 0 disables the dropping off of those * two options. */ -static int tcp_broken_peer_syn_rxmit_thres = 7; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, broken_peer_syn_rxmit_thres, - CTLFLAG_RW | CTLFLAG_LOCKED, - &tcp_broken_peer_syn_rxmit_thres, 0, - "Number of retransmitted SYNs before " - "TCP disables rfc1323 and rfc1644 during the rest of attempts"); - -/* A higher threshold on local connections for disabling RFC 1323 options */ -static int tcp_broken_peer_syn_rxmit_thres_local = 10; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, broken_peer_syn_rexmit_thres_local, - CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_broken_peer_syn_rxmit_thres_local, 0, - "Number of retransmitted SYNs before disabling RFC 1323 " +SYSCTL_SKMEM_TCP_INT(OID_AUTO, broken_peer_syn_rexmit_thres, + CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_broken_peer_syn_rxmit_thres, + 10, "Number of retransmitted SYNs before disabling RFC 1323 " "options on local connections"); static int tcp_timer_advanced = 0; @@ -241,61 +261,137 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_timer_advanced, static int tcp_resched_timerlist = 0; SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_resched_timerlist, - CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_resched_timerlist, 0, + CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_resched_timerlist, 0, "Number of times timer list was rescheduled as part of processing a packet"); -int tcp_pmtud_black_hole_detect = 1 ; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_detection, - CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_pmtud_black_hole_detect, 0, +SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_detection, + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_pmtud_black_hole_detect, 1, "Path MTU Discovery Black Hole Detection"); -int tcp_pmtud_black_hole_mss = 1200 ; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_mss, - CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_pmtud_black_hole_mss, 0, +SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_mss, + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_pmtud_black_hole_mss, 1200, "Path MTU Discovery Black Hole Detection lowered MSS"); +#if (DEBUG || DEVELOPMENT) +int tcp_probe_if_fix_port = 0; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, probe_if_fix_port, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &tcp_probe_if_fix_port, 0, ""); +#endif /* (DEBUG || DEVELOPMENT) */ + +static u_int32_t tcp_mss_rec_medium = 1200; +static u_int32_t tcp_mss_rec_low = 512; + +#define TCP_REPORT_STATS_INTERVAL 43200 /* 12 hours, in seconds */ +int tcp_report_stats_interval = TCP_REPORT_STATS_INTERVAL; + /* performed garbage collection of "used" sockets */ static boolean_t tcp_gc_done = FALSE; /* max idle probes */ -int tcp_maxpersistidle; +int tcp_maxpersistidle = TCPTV_KEEP_IDLE; /* * TCP delack timer is set to 100 ms. Since the processing of timer list * in fast mode will happen no faster than 100 ms, the delayed ack timer * will fire some where between 100 and 200 ms. */ -int tcp_delack = TCP_RETRANSHZ / 10; +int tcp_delack = TCP_RETRANSHZ / 10; #if MPTCP /* * MP_JOIN retransmission of 3rd ACK will be every 500 msecs without backoff */ -int tcp_jack_rxmt = TCP_RETRANSHZ / 2; +int tcp_jack_rxmt = TCP_RETRANSHZ / 2; #endif /* MPTCP */ +static boolean_t tcp_itimer_done = FALSE; + static void tcp_remove_timer(struct tcpcb *tp); static void tcp_sched_timerlist(uint32_t offset); -static u_int32_t tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *mode); -static void tcp_sched_timers(struct tcpcb *tp); +static u_int32_t tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *mode, + u_int16_t probe_if_index); static inline void tcp_set_lotimer_index(struct tcpcb *); -static void tcp_rexmt_save_state(struct tcpcb *tp); __private_extern__ void tcp_remove_from_time_wait(struct inpcb *inp); +static inline void tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp); __private_extern__ void tcp_report_stats(void); +static u_int64_t tcp_last_report_time; + /* - * Macro to compare two timers. If there is a reset of the sign bit, - * it is safe to assume that the timer has wrapped around. By doing - * signed comparision, we take care of wrap around such that the value - * with the sign bit reset is actually ahead of the other. + * Structure to store previously reported stats so that we can send + * incremental changes in each report interval. */ -inline int32_t -timer_diff(uint32_t t1, uint32_t toff1, uint32_t t2, uint32_t toff2) { - return (int32_t)((t1 + toff1) - (t2 + toff2)); +struct tcp_last_report_stats { + u_int32_t tcps_connattempt; + u_int32_t tcps_accepts; + u_int32_t tcps_ecn_client_setup; + u_int32_t tcps_ecn_server_setup; + u_int32_t tcps_ecn_client_success; + u_int32_t tcps_ecn_server_success; + u_int32_t tcps_ecn_not_supported; + u_int32_t tcps_ecn_lost_syn; + u_int32_t tcps_ecn_lost_synack; + u_int32_t tcps_ecn_recv_ce; + u_int32_t tcps_ecn_recv_ece; + u_int32_t tcps_ecn_sent_ece; + u_int32_t tcps_ecn_conn_recv_ce; + u_int32_t tcps_ecn_conn_recv_ece; + u_int32_t tcps_ecn_conn_plnoce; + u_int32_t tcps_ecn_conn_pl_ce; + u_int32_t tcps_ecn_conn_nopl_ce; + u_int32_t tcps_ecn_fallback_synloss; + u_int32_t tcps_ecn_fallback_reorder; + u_int32_t tcps_ecn_fallback_ce; + + /* TFO-related statistics */ + u_int32_t tcps_tfo_syn_data_rcv; + u_int32_t tcps_tfo_cookie_req_rcv; + u_int32_t tcps_tfo_cookie_sent; + u_int32_t tcps_tfo_cookie_invalid; + u_int32_t tcps_tfo_cookie_req; + u_int32_t tcps_tfo_cookie_rcv; + u_int32_t tcps_tfo_syn_data_sent; + u_int32_t tcps_tfo_syn_data_acked; + u_int32_t tcps_tfo_syn_loss; + u_int32_t tcps_tfo_blackhole; + u_int32_t tcps_tfo_cookie_wrong; + u_int32_t tcps_tfo_no_cookie_rcv; + u_int32_t tcps_tfo_heuristics_disable; + u_int32_t tcps_tfo_sndblackhole; + + /* MPTCP-related statistics */ + u_int32_t tcps_mptcp_handover_attempt; + u_int32_t tcps_mptcp_interactive_attempt; + u_int32_t tcps_mptcp_aggregate_attempt; + u_int32_t tcps_mptcp_fp_handover_attempt; + u_int32_t tcps_mptcp_fp_interactive_attempt; + u_int32_t tcps_mptcp_fp_aggregate_attempt; + u_int32_t tcps_mptcp_heuristic_fallback; + u_int32_t tcps_mptcp_fp_heuristic_fallback; + u_int32_t tcps_mptcp_handover_success_wifi; + u_int32_t tcps_mptcp_handover_success_cell; + u_int32_t tcps_mptcp_interactive_success; + u_int32_t tcps_mptcp_aggregate_success; + u_int32_t tcps_mptcp_fp_handover_success_wifi; + u_int32_t tcps_mptcp_fp_handover_success_cell; + u_int32_t tcps_mptcp_fp_interactive_success; + u_int32_t tcps_mptcp_fp_aggregate_success; + u_int32_t tcps_mptcp_handover_cell_from_wifi; + u_int32_t tcps_mptcp_handover_wifi_from_cell; + u_int32_t tcps_mptcp_interactive_cell_from_wifi; + u_int64_t tcps_mptcp_handover_cell_bytes; + u_int64_t tcps_mptcp_interactive_cell_bytes; + u_int64_t tcps_mptcp_aggregate_cell_bytes; + u_int64_t tcps_mptcp_handover_all_bytes; + u_int64_t tcps_mptcp_interactive_all_bytes; + u_int64_t tcps_mptcp_aggregate_all_bytes; + u_int32_t tcps_mptcp_back_to_wifi; + u_int32_t tcps_mptcp_wifi_proxy; + u_int32_t tcps_mptcp_cell_proxy; + u_int32_t tcps_mptcp_triggered_cell; }; -static u_int64_t tcp_last_report_time; -#define TCP_REPORT_STATS_INTERVAL 345600 /* 4 days, in seconds */ /* Returns true if the timer is on the timer list */ #define TIMER_IS_ON_LIST(tp) ((tp)->t_flags & TF_TIMER_ONLIST) @@ -307,6 +403,94 @@ static u_int64_t tcp_last_report_time; static void add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay); static boolean_t tcp_garbage_collect(struct inpcb *, int); +#define TIMERENTRY_TO_TP(te) ((struct tcpcb *)((uintptr_t)te - offsetof(struct tcpcb, tentry.le.le_next))) + +#define VERIFY_NEXT_LINK(elm, field) do { \ + if (LIST_NEXT((elm),field) != NULL && \ + LIST_NEXT((elm),field)->field.le_prev != \ + &((elm)->field.le_next)) \ + panic("Bad link elm %p next->prev != elm", (elm)); \ +} while(0) + +#define VERIFY_PREV_LINK(elm, field) do { \ + if (*(elm)->field.le_prev != (elm)) \ + panic("Bad link elm %p prev->next != elm", (elm)); \ +} while(0) + +#define TCP_SET_TIMER_MODE(mode, i) do { \ + if (IS_TIMER_HZ_10MS(i)) \ + (mode) |= TCP_TIMERLIST_10MS_MODE; \ + else if (IS_TIMER_HZ_100MS(i)) \ + (mode) |= TCP_TIMERLIST_100MS_MODE; \ + else \ + (mode) |= TCP_TIMERLIST_500MS_MODE; \ +} while(0) + +#if (DEVELOPMENT || DEBUG) +SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_medium, + CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_medium, 0, + "Medium MSS based on recommendation in link status report"); +SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_low, + CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_low, 0, + "Low MSS based on recommendation in link status report"); + +static int32_t tcp_change_mss_recommended = 0; +static int +sysctl_change_mss_recommended SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + int i, err = 0, changed = 0; + struct ifnet *ifp; + struct if_link_status ifsr; + struct if_cellular_status_v1 *new_cell_sr; + err = sysctl_io_number(req, tcp_change_mss_recommended, + sizeof(int32_t), &i, &changed); + if (changed) { + ifnet_head_lock_shared(); + TAILQ_FOREACH(ifp, &ifnet_head, if_link) { + if (IFNET_IS_CELLULAR(ifp)) { + bzero(&ifsr, sizeof(ifsr)); + new_cell_sr = &ifsr.ifsr_u.ifsr_cell.if_cell_u.if_status_v1; + ifsr.ifsr_version = IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION; + ifsr.ifsr_len = sizeof(*new_cell_sr); + + /* Set MSS recommended */ + new_cell_sr->valid_bitmask |= IF_CELL_UL_MSS_RECOMMENDED_VALID; + new_cell_sr->mss_recommended = i; + err = ifnet_link_status_report(ifp, new_cell_sr, sizeof(new_cell_sr)); + if (err == 0) { + tcp_change_mss_recommended = i; + } else { + break; + } + } + } + ifnet_head_done(); + } + return err; +} + +SYSCTL_PROC(_net_inet_tcp, OID_AUTO, change_mss_recommended, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_change_mss_recommended, + 0, sysctl_change_mss_recommended, "IU", "Change MSS recommended"); + +SYSCTL_INT(_net_inet_tcp, OID_AUTO, report_stats_interval, + CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_report_stats_interval, 0, + "Report stats interval"); +#endif /* (DEVELOPMENT || DEBUG) */ + +/* + * Macro to compare two timers. If there is a reset of the sign bit, + * it is safe to assume that the timer has wrapped around. By doing + * signed comparision, we take care of wrap around such that the value + * with the sign bit reset is actually ahead of the other. + */ +inline int32_t +timer_diff(uint32_t t1, uint32_t toff1, uint32_t t2, uint32_t toff2) +{ + return (int32_t)((t1 + toff1) - (t2 + toff2)); +} + /* * Add to tcp timewait list, delay is given in milliseconds. */ @@ -318,13 +502,13 @@ add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay) uint32_t timer; /* pcb list should be locked when we get here */ - lck_rw_assert(pcbinfo->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE); + LCK_RW_ASSERT(pcbinfo->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE); /* We may get here multiple times, so check */ if (!(inp->inp_flags2 & INP2_TIMEWAIT)) { pcbinfo->ipi_twcount++; inp->inp_flags2 |= INP2_TIMEWAIT; - + /* Remove from global inp list */ LIST_REMOVE(inp, inp_list); } else { @@ -333,11 +517,12 @@ add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay) /* Compute the time at which this socket can be closed */ timer = tcp_now + delay; - + /* We will use the TCPT_2MSL timer for tracking this delay */ - if (TIMER_IS_ON_LIST(tp)) + if (TIMER_IS_ON_LIST(tp)) { tcp_remove_timer(tp); + } tp->t_timer[TCPT_2MSL] = timer; TAILQ_INSERT_TAIL(&tcp_tw_tailq, tp, t_twentry); @@ -347,13 +532,17 @@ void add_to_time_wait(struct tcpcb *tp, uint32_t delay) { struct inpcbinfo *pcbinfo = &tcbinfo; - if (tp->t_inpcb->inp_socket->so_options & SO_NOWAKEFROMSLEEP) + if (tp->t_inpcb->inp_socket->so_options & SO_NOWAKEFROMSLEEP) { socket_post_kev_msg_closed(tp->t_inpcb->inp_socket); + } + + /* 19182803: Notify nstat that connection is closing before waiting. */ + nstat_pcb_detach(tp->t_inpcb); if (!lck_rw_try_lock_exclusive(pcbinfo->ipi_lock)) { - tcp_unlock(tp->t_inpcb->inp_socket, 0, 0); + socket_unlock(tp->t_inpcb->inp_socket, 0); lck_rw_lock_exclusive(pcbinfo->ipi_lock); - tcp_lock(tp->t_inpcb->inp_socket, 0, 0); + socket_lock(tp->t_inpcb->inp_socket, 0); } add_to_time_wait_locked(tp, delay); lck_rw_done(pcbinfo->ipi_lock); @@ -366,20 +555,43 @@ void tcp_remove_from_time_wait(struct inpcb *inp) { struct tcpcb *tp = intotcpcb(inp); - if (inp->inp_flags2 & INP2_TIMEWAIT) + if (inp->inp_flags2 & INP2_TIMEWAIT) { TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry); + } } static boolean_t tcp_garbage_collect(struct inpcb *inp, int istimewait) { boolean_t active = FALSE; - struct socket *so; + struct socket *so, *mp_so = NULL; struct tcpcb *tp; so = inp->inp_socket; tp = intotcpcb(inp); + if (so->so_flags & SOF_MP_SUBFLOW) { + mp_so = mptetoso(tptomptp(tp)->mpt_mpte); + if (!socket_try_lock(mp_so)) { + mp_so = NULL; + active = TRUE; + goto out; + } + if (mpsotomppcb(mp_so)->mpp_inside > 0) { + os_log(mptcp_log_handle, "%s - %lx: Still inside %d usecount %d\n", __func__, + (unsigned long)VM_KERNEL_ADDRPERM(mpsotompte(mp_so)), + mpsotomppcb(mp_so)->mpp_inside, + mp_so->so_usecount); + socket_unlock(mp_so, 0); + mp_so = NULL; + active = TRUE; + goto out; + } + /* We call socket_unlock with refcount further below */ + mp_so->so_usecount++; + tptomptp(tp)->mpt_mpte->mpte_mppcb->mpp_inside++; + } + /* * Skip if still in use or busy; it would have been more efficient * if we were to test so_usecount against 0, but this isn't possible @@ -387,20 +599,22 @@ tcp_garbage_collect(struct inpcb *inp, int istimewait) * overflow sockets that are eligible for garbage collection have * their usecounts set to 1. */ - if (!lck_mtx_try_lock_spin(&inp->inpcb_mtx)) - return (TRUE); + if (!lck_mtx_try_lock_spin(&inp->inpcb_mtx)) { + active = TRUE; + goto out; + } /* Check again under the lock */ if (so->so_usecount > 1) { - if (inp->inp_wantcnt == WNT_STOPUSING) + if (inp->inp_wantcnt == WNT_STOPUSING) { active = TRUE; + } lck_mtx_unlock(&inp->inpcb_mtx); - return (active); + goto out; } - if (istimewait && - TSTMP_GEQ(tcp_now, tp->t_timer[TCPT_2MSL]) && - tp->t_state != TCPS_CLOSED) { + if (istimewait && TSTMP_GEQ(tcp_now, tp->t_timer[TCPT_2MSL]) && + tp->t_state != TCPS_CLOSED) { /* Become a regular mutex */ lck_mtx_convert_spin(&inp->inpcb_mtx); tcp_close(tp); @@ -419,48 +633,50 @@ tcp_garbage_collect(struct inpcb *inp, int istimewait) ((istimewait && (so->so_flags & SOF_OVERFLOW)) || ((tp != NULL) && (tp->t_state == TCPS_CLOSED) && (so->so_head != NULL) && - ((so->so_state & (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE)) == - (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE))))) { - + ((so->so_state & (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE)) == + (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE))))) { if (inp->inp_state != INPCB_STATE_DEAD) { /* Become a regular mutex */ lck_mtx_convert_spin(&inp->inpcb_mtx); #if INET6 - if (SOCK_CHECK_DOM(so, PF_INET6)) + if (SOCK_CHECK_DOM(so, PF_INET6)) { in6_pcbdetach(inp); - else + } else #endif /* INET6 */ - in_pcbdetach(inp); + in_pcbdetach(inp); } + VERIFY(so->so_usecount > 0); so->so_usecount--; - if (inp->inp_wantcnt == WNT_STOPUSING) + if (inp->inp_wantcnt == WNT_STOPUSING) { active = TRUE; + } lck_mtx_unlock(&inp->inpcb_mtx); - return (active); + goto out; } else if (inp->inp_wantcnt != WNT_STOPUSING) { lck_mtx_unlock(&inp->inpcb_mtx); - return (FALSE); + active = FALSE; + goto out; } /* - * We get here because the PCB is no longer searchable - * (WNT_STOPUSING); detach (if needed) and dispose if it is dead - * (usecount is 0). This covers all cases, including overflow - * sockets and those that are considered as "embryonic", - * i.e. created by sonewconn() in TCP input path, and have + * We get here because the PCB is no longer searchable + * (WNT_STOPUSING); detach (if needed) and dispose if it is dead + * (usecount is 0). This covers all cases, including overflow + * sockets and those that are considered as "embryonic", + * i.e. created by sonewconn() in TCP input path, and have * not yet been committed. For the former, we reduce the usecount - * to 0 as done by the code above. For the latter, the usecount + * to 0 as done by the code above. For the latter, the usecount * would have reduced to 0 as part calling soabort() when the * socket is dropped at the end of tcp_input(). */ if (so->so_usecount == 0) { DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp, - struct tcpcb *, tp, int32_t, TCPS_CLOSED); + struct tcpcb *, tp, int32_t, TCPS_CLOSED); /* Become a regular mutex */ lck_mtx_convert_spin(&inp->inpcb_mtx); /* - * If this tp still happens to be on the timer list, + * If this tp still happens to be on the timer list, * take it out */ if (TIMER_IS_ON_LIST(tp)) { @@ -469,18 +685,35 @@ tcp_garbage_collect(struct inpcb *inp, int istimewait) if (inp->inp_state != INPCB_STATE_DEAD) { #if INET6 - if (SOCK_CHECK_DOM(so, PF_INET6)) + if (SOCK_CHECK_DOM(so, PF_INET6)) { in6_pcbdetach(inp); - else + } else #endif /* INET6 */ - in_pcbdetach(inp); + in_pcbdetach(inp); } + + if (mp_so) { + mptcp_subflow_del(tptomptp(tp)->mpt_mpte, tp->t_mpsub); + + /* so is now unlinked from mp_so - let's drop the lock */ + socket_unlock(mp_so, 1); + mp_so = NULL; + } + in_pcbdispose(inp); - return (FALSE); + active = FALSE; + goto out; } lck_mtx_unlock(&inp->inpcb_mtx); - return (TRUE); + active = TRUE; + +out: + if (mp_so) { + socket_unlock(mp_so, 1); + } + + return active; } /* @@ -530,26 +763,28 @@ tcp_gc(struct inpcbinfo *ipi) tcp_gc_done = TRUE; LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) { - if (tcp_garbage_collect(inp, 0)) + if (tcp_garbage_collect(inp, 0)) { atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1); + } } /* Now cleanup the time wait ones */ TAILQ_FOREACH_SAFE(tw_tp, &tcp_tw_tailq, t_twentry, tw_ntp) { /* - * We check the timestamp here without holding the + * We check the timestamp here without holding the * socket lock for better performance. If there are * any pcbs in time-wait, the timer will get rescheduled. * Hence some error in this check can be tolerated. * * Sometimes a socket on time-wait queue can be closed if * 2MSL timer expired but the application still has a - * usecount on it. + * usecount on it. */ - if (tw_tp->t_state == TCPS_CLOSED || + if (tw_tp->t_state == TCPS_CLOSED || TSTMP_GEQ(tcp_now, tw_tp->t_timer[TCPT_2MSL])) { - if (tcp_garbage_collect(tw_tp->t_inpcb, 1)) + if (tcp_garbage_collect(tw_tp->t_inpcb, 1)) { atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1); + } } } @@ -559,8 +794,9 @@ tcp_gc(struct inpcbinfo *ipi) lck_rw_done(ipi->ipi_lock); /* Clean up the socache while we are here */ - if (so_cache_timer()) + if (so_cache_timer()) { atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1); + } KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked, cur_tw_slot, 0, 0, 0); @@ -572,32 +808,33 @@ tcp_gc(struct inpcbinfo *ipi) * Cancel all timers for TCP tp. */ void -tcp_canceltimers(tp) - struct tcpcb *tp; +tcp_canceltimers(struct tcpcb *tp) { - register int i; + int i; tcp_remove_timer(tp); - for (i = 0; i < TCPT_NTIMERS; i++) + for (i = 0; i < TCPT_NTIMERS; i++) { tp->t_timer[i] = 0; + } tp->tentry.timer_start = tcp_now; tp->tentry.index = TCPT_NONE; } -int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] = - { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 }; +int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] = +{ 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 }; -int tcp_backoff[TCP_MAXRXTSHIFT + 1] = - { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 }; +int tcp_backoff[TCP_MAXRXTSHIFT + 1] = +{ 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 }; -static int tcp_totbackoff = 511; /* sum of tcp_backoff[] */ +static int tcp_totbackoff = 511; /* sum of tcp_backoff[] */ -static void tcp_rexmt_save_state(struct tcpcb *tp) +void +tcp_rexmt_save_state(struct tcpcb *tp) { u_int32_t fsize; if (TSTMP_SUPPORTED(tp)) { /* - * Since timestamps are supported on the connection, + * Since timestamps are supported on the connection, * we can do recovery as described in rfc 4015. */ fsize = tp->snd_max - tp->snd_una; @@ -608,19 +845,20 @@ static void tcp_rexmt_save_state(struct tcpcb *tp) * Timestamp option is not supported on this connection. * Record ssthresh and cwnd so they can * be recovered if this turns out to be a "bad" retransmit. - * A retransmit is considered "bad" if an ACK for this + * A retransmit is considered "bad" if an ACK for this * segment is received within RTT/2 interval; the assumption - * here is that the ACK was already in flight. See + * here is that the ACK was already in flight. See * "On Estimating End-to-End Network Path Properties" by * Allman and Paxson for more details. */ tp->snd_cwnd_prev = tp->snd_cwnd; tp->snd_ssthresh_prev = tp->snd_ssthresh; tp->snd_recover_prev = tp->snd_recover; - if (IN_FASTRECOVERY(tp)) + if (IN_FASTRECOVERY(tp)) { tp->t_flags |= TF_WASFRECOVERY; - else + } else { tp->t_flags &= ~TF_WASFRECOVERY; + } } tp->t_srtt_prev = (tp->t_srtt >> TCP_RTT_SHIFT) + 2; tp->t_rttvar_prev = (tp->t_rttvar >> TCP_RTTVAR_SHIFT); @@ -631,33 +869,37 @@ static void tcp_rexmt_save_state(struct tcpcb *tp) * Revert to the older segment size if there is an indication that PMTU * blackhole detection was not needed. */ -void tcp_pmtud_revert_segment_size(struct tcpcb *tp) +void +tcp_pmtud_revert_segment_size(struct tcpcb *tp) { int32_t optlen; VERIFY(tp->t_pmtud_saved_maxopd > 0); - tp->t_flags |= TF_PMTUD; - tp->t_flags &= ~TF_BLACKHOLE; + tp->t_flags |= TF_PMTUD; + tp->t_flags &= ~TF_BLACKHOLE; optlen = tp->t_maxopd - tp->t_maxseg; tp->t_maxopd = tp->t_pmtud_saved_maxopd; tp->t_maxseg = tp->t_maxopd - optlen; + /* - * Reset the slow-start flight size as it + * Reset the slow-start flight size as it * may depend on the new MSS */ - if (CC_ALGO(tp)->cwnd_init != NULL) + if (CC_ALGO(tp)->cwnd_init != NULL) { CC_ALGO(tp)->cwnd_init(tp); + } tp->t_pmtud_start_ts = 0; tcpstat.tcps_pmtudbh_reverted++; + + /* change MSS according to recommendation, if there was one */ + tcp_update_mss_locked(tp->t_inpcb->inp_socket, NULL); } /* * TCP timer processing. */ struct tcpcb * -tcp_timers(tp, timer) - register struct tcpcb *tp; - int timer; +tcp_timers(struct tcpcb *tp, int timer) { int32_t rexmt, optlen = 0, idle_time = 0; struct socket *so; @@ -669,12 +911,13 @@ tcp_timers(tp, timer) #if INET6 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV4) == 0; #endif /* INET6 */ + u_int64_t accsleep_ms; + u_int32_t last_sleep_ms = 0; so = tp->t_inpcb->inp_socket; idle_time = tcp_now - tp->t_rcvtime; switch (timer) { - /* * 2 MSL timeout in shutdown went off. If we're closed but * still waiting for peer to close and connection has been idle @@ -688,11 +931,11 @@ tcp_timers(tp, timer) if (tp->t_state != TCPS_TIME_WAIT && tp->t_state != TCPS_FIN_WAIT_2 && ((idle_time > 0) && (idle_time < TCP_CONN_MAXIDLE(tp)))) { - tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp, - (u_int32_t)TCP_CONN_KEEPINTVL(tp)); + tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp, + (u_int32_t)TCP_CONN_KEEPINTVL(tp)); } else { tp = tcp_close(tp); - return(tp); + return tp; } break; @@ -702,6 +945,12 @@ tcp_timers(tp, timer) * to a longer retransmit interval and retransmit one segment. */ case TCPT_REXMT: + absolutetime_to_nanoseconds(mach_absolutetime_asleep, + &accsleep_ms); + accsleep_ms = accsleep_ms / 1000000UL; + if (accsleep_ms > tp->t_accsleep_ms) { + last_sleep_ms = accsleep_ms - tp->t_accsleep_ms; + } /* * Drop a connection in the retransmit timer * 1. If we have retransmitted more than TCP_MAXRXTSHIFT @@ -714,57 +963,97 @@ tcp_timers(tp, timer) * receiving an ack */ if (++tp->t_rxtshift > TCP_MAXRXTSHIFT || - (tp->t_rxt_conndroptime > 0 - && tp->t_rxtstart > 0 && - (tcp_now - tp->t_rxtstart) >= tp->t_rxt_conndroptime) - || ((tp->t_flagsext & TF_RXTFINDROP) != 0 && - (tp->t_flags & TF_SENTFIN) != 0 && - tp->t_rxtshift >= 4)) { + (tp->t_rxt_conndroptime > 0 && tp->t_rxtstart > 0 && + (tcp_now - tp->t_rxtstart) >= tp->t_rxt_conndroptime) || + ((tp->t_flagsext & TF_RXTFINDROP) != 0 && + (tp->t_flags & TF_SENTFIN) != 0 && tp->t_rxtshift >= 4) || + (tp->t_rxtshift > 4 && last_sleep_ms >= TCP_SLEEP_TOO_LONG)) { + if (tp->t_state == TCPS_ESTABLISHED && + tp->t_rxt_minimum_timeout > 0) { + /* + * Avoid dropping a connection if minimum + * timeout is set and that time did not + * pass. We will retry sending + * retransmissions at the maximum interval + */ + if (TSTMP_LT(tcp_now, (tp->t_rxtstart + + tp->t_rxt_minimum_timeout))) { + tp->t_rxtshift = TCP_MAXRXTSHIFT - 1; + goto retransmit_packet; + } + } if ((tp->t_flagsext & TF_RXTFINDROP) != 0) { tcpstat.tcps_rxtfindrop++; + } else if (last_sleep_ms >= TCP_SLEEP_TOO_LONG) { + tcpstat.tcps_drop_after_sleep++; } else { tcpstat.tcps_timeoutdrop++; } + if (tp->t_rxtshift >= TCP_MAXRXTSHIFT) { + if (TCP_ECN_ENABLED(tp)) { + INP_INC_IFNET_STAT(tp->t_inpcb, + ecn_on.rxmit_drop); + } else { + INP_INC_IFNET_STAT(tp->t_inpcb, + ecn_off.rxmit_drop); + } + } tp->t_rxtshift = TCP_MAXRXTSHIFT; - postevent(so, 0, EV_TIMEOUT); - soevent(so, - (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT)); + postevent(so, 0, EV_TIMEOUT); + soevent(so, + (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT)); + + if (TCP_ECN_ENABLED(tp) && + tp->t_state == TCPS_ESTABLISHED) { + tcp_heuristic_ecn_droprxmt(tp); + } + tp = tcp_drop(tp, tp->t_softerror ? tp->t_softerror : ETIMEDOUT); break; } - +retransmit_packet: tcpstat.tcps_rexmttimeo++; + tp->t_accsleep_ms = accsleep_ms; - if (tp->t_rxtshift == 1 && - tp->t_state == TCPS_ESTABLISHED) { + if (tp->t_rxtshift == 1 && + tp->t_state == TCPS_ESTABLISHED) { /* Set the time at which retransmission started. */ tp->t_rxtstart = tcp_now; - /* + /* * if this is the first retransmit timeout, save * the state so that we can recover if the timeout * is spurious. - */ + */ tcp_rexmt_save_state(tp); + tcp_ccdbg_trace(tp, NULL, TCP_CC_FIRST_REXMT); } #if MPTCP if ((tp->t_rxtshift >= mptcp_fail_thresh) && (tp->t_state == TCPS_ESTABLISHED) && (tp->t_mpflags & TMPF_MPTCP_TRUE)) { mptcp_act_on_txfail(so); + } + if (TCPS_HAVEESTABLISHED(tp->t_state) && + (so->so_flags & SOF_MP_SUBFLOW)) { + struct mptses *mpte = tptomptp(tp)->mpt_mpte; + + if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER) { + mptcp_check_subflows_and_add(mpte); + } } #endif /* MPTCP */ if (tp->t_adaptive_wtimo > 0 && - tp->t_rxtshift > tp->t_adaptive_wtimo && - TCPS_HAVEESTABLISHED(tp->t_state)) { + tp->t_rxtshift > tp->t_adaptive_wtimo && + TCPS_HAVEESTABLISHED(tp->t_state)) { /* Send an event to the application */ soevent(so, - (SO_FILT_HINT_LOCKED| - SO_FILT_HINT_ADAPTIVE_WTIMO)); + (SO_FILT_HINT_LOCKED | + SO_FILT_HINT_ADAPTIVE_WTIMO)); } /* @@ -788,32 +1077,90 @@ tcp_timers(tp, timer) tp->t_flagsext &= ~(TF_DELAY_RECOVERY); } + if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) && + tp->t_state == TCPS_SYN_RECEIVED) { + tcp_disable_tfo(tp); + } + + if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) && + !(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) && + (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) && + !(tp->t_tfo_flags & TFO_F_NO_SNDPROBING) && + ((tp->t_state != TCPS_SYN_SENT && tp->t_rxtshift > 1) || + tp->t_rxtshift > 4)) { + /* + * For regular retransmissions, a first one is being + * done for tail-loss probe. + * Thus, if rxtshift > 1, this means we have sent the segment + * a total of 3 times. + * + * If we are in SYN-SENT state, then there is no tail-loss + * probe thus we have to let rxtshift go up to 3. + */ + tcp_heuristic_tfo_middlebox(tp); + + so->so_error = ENODATA; + soevent(so, + (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR)); + sorwakeup(so); + sowwakeup(so); + + tp->t_tfo_stats |= TFO_S_SEND_BLACKHOLE; + tcpstat.tcps_tfo_sndblackhole++; + } + + if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) && + !(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) && + (tp->t_tfo_stats & TFO_S_SYN_DATA_ACKED) && + tp->t_rxtshift > 3) { + if (TSTMP_GT(tp->t_sndtime - 10 * TCP_RETRANSHZ, tp->t_rcvtime)) { + tcp_heuristic_tfo_middlebox(tp); + + so->so_error = ENODATA; + soevent(so, + (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR)); + sorwakeup(so); + sowwakeup(so); + } + } + if (tp->t_state == TCPS_SYN_SENT) { rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift]; tp->t_stat.synrxtshift = tp->t_rxtshift; + tp->t_stat.rxmitsyns++; + + /* When retransmitting, disable TFO */ + if (tfo_enabled(tp) && + !(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE)) { + tcp_disable_tfo(tp); + tp->t_tfo_flags |= TFO_F_SYN_LOSS; + } } else { rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; } - TCPT_RANGESET(tp->t_rxtcur, rexmt, - tp->t_rttmin, TCPTV_REXMTMAX, - TCP_ADD_REXMTSLOP(tp)); + TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, TCPTV_REXMTMAX, + TCP_ADD_REXMTSLOP(tp)); tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur); - if (INP_WAIT_FOR_IF_FEEDBACK(tp->t_inpcb)) + TCP_LOG_RTT_INFO(tp); + + if (INP_WAIT_FOR_IF_FEEDBACK(tp->t_inpcb)) { goto fc_output; + } tcp_free_sackholes(tp); /* * Check for potential Path MTU Discovery Black Hole */ if (tcp_pmtud_black_hole_detect && - !(tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) && - (tp->t_state == TCPS_ESTABLISHED)) { - if (((tp->t_flags & (TF_PMTUD|TF_MAXSEGSNT)) - == (TF_PMTUD|TF_MAXSEGSNT)) && - (tp->t_rxtshift == 2)) { - /* + !(tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) && + (tp->t_state == TCPS_ESTABLISHED)) { + if ((tp->t_flags & TF_PMTUD) && + ((tp->t_flags & TF_MAXSEGSNT) + || tp->t_pmtud_lastseg_size > tcp_pmtud_black_hole_mss) && + tp->t_rxtshift == 2) { + /* * Enter Path MTU Black-hole Detection mechanism: * - Disable Path MTU Discovery (IP "DF" bit). * - Reduce MTU to lower value than what we @@ -827,26 +1174,29 @@ tcp_timers(tp, timer) /* Keep track of previous MSS */ tp->t_pmtud_saved_maxopd = tp->t_maxopd; tp->t_pmtud_start_ts = tcp_now; - if (tp->t_pmtud_start_ts == 0) + if (tp->t_pmtud_start_ts == 0) { tp->t_pmtud_start_ts++; + } /* Reduce the MSS to intermediary value */ if (tp->t_maxopd > tcp_pmtud_black_hole_mss) { tp->t_maxopd = tcp_pmtud_black_hole_mss; } else { - tp->t_maxopd = /* use the default MSS */ + tp->t_maxopd = /* use the default MSS */ #if INET6 - isipv6 ? tcp_v6mssdflt : + isipv6 ? tcp_v6mssdflt : #endif /* INET6 */ - tcp_mssdflt; + tcp_mssdflt; } tp->t_maxseg = tp->t_maxopd - optlen; /* - * Reset the slow-start flight size + * Reset the slow-start flight size * as it may depend on the new MSS - */ - if (CC_ALGO(tp)->cwnd_init != NULL) + */ + if (CC_ALGO(tp)->cwnd_init != NULL) { CC_ALGO(tp)->cwnd_init(tp); + } + tp->snd_cwnd = tp->t_maxseg; } /* * If further retransmissions are still @@ -855,10 +1205,10 @@ tcp_timers(tp, timer) * MSS and blackhole detection flags. */ else { - if ((tp->t_flags & TF_BLACKHOLE) && (tp->t_rxtshift > 4)) { tcp_pmtud_revert_segment_size(tp); + tp->snd_cwnd = tp->t_maxseg; } } } @@ -874,11 +1224,9 @@ tcp_timers(tp, timer) * Do this only on non-local connections. */ if (tp->t_state == TCPS_SYN_SENT && - ((!(tp->t_flags & TF_LOCAL) && - tp->t_rxtshift == tcp_broken_peer_syn_rxmit_thres) || - ((tp->t_flags & TF_LOCAL) && - tp->t_rxtshift == tcp_broken_peer_syn_rxmit_thres_local))) - tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_REQ_CC); + tp->t_rxtshift == tcp_broken_peer_syn_rxmit_thres) { + tp->t_flags &= ~(TF_REQ_SCALE | TF_REQ_TSTMP | TF_REQ_CC); + } /* * If losing, let the lower level know and try for @@ -890,9 +1238,9 @@ tcp_timers(tp, timer) */ if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { #if INET6 - if (isipv6) + if (isipv6) { in6_losing(tp->t_inpcb); - else + } else #endif /* INET6 */ in_losing(tp->t_inpcb); tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT); @@ -912,8 +1260,9 @@ tcp_timers(tp, timer) /* If timing a segment in this window, stop the timer */ tp->t_rtttime = 0; - if (!IN_FASTRECOVERY(tp) && tp->t_rxtshift == 1) + if (!IN_FASTRECOVERY(tp) && tp->t_rxtshift == 1) { tcpstat.tcps_tailloss_rto++; + } /* @@ -922,18 +1271,25 @@ tcp_timers(tp, timer) * been retransmitted by way of the retransmission timer at * least once, the value of ssthresh is held constant */ - if (tp->t_rxtshift == 1 && - CC_ALGO(tp)->after_timeout != NULL) + if (tp->t_rxtshift == 1 && + CC_ALGO(tp)->after_timeout != NULL) { CC_ALGO(tp)->after_timeout(tp); + /* + * CWR notifications are to be sent on new data + * right after Fast Retransmits and ECE + * notification receipts. + */ + if (TCP_ECN_ENABLED(tp)) { + tp->ecn_flags |= TE_SENDCWR; + } + } EXIT_FASTRECOVERY(tp); - /* CWR notifications are to be sent on new data right after - * RTOs, Fast Retransmits and ECE notification receipts. - */ - if ((tp->ecn_flags & TE_ECN_ON) == TE_ECN_ON) { - tp->ecn_flags |= TE_SENDCWR; - } + /* Exit cwnd non validated phase */ + tp->t_flagsext &= ~TF_CWND_NONVALIDATED; + + fc_output: tcp_ccdbg_trace(tp, NULL, TCP_CC_REXMT_TIMEOUT); @@ -952,20 +1308,20 @@ fc_output: * backoff, drop the connection if the idle time * (no responses to probes) reaches the maximum * backoff that we would use if retransmitting. - * - * Drop the connection if we reached the maximum allowed time for - * Zero Window Probes without a non-zero update from the peer. + * + * Drop the connection if we reached the maximum allowed time for + * Zero Window Probes without a non-zero update from the peer. * See rdar://5805356 */ if ((tp->t_rxtshift == TCP_MAXRXTSHIFT && (idle_time >= tcp_maxpersistidle || - idle_time >= TCP_REXMTVAL(tp) * tcp_totbackoff)) || - ((tp->t_persist_stop != 0) && - TSTMP_LEQ(tp->t_persist_stop, tcp_now))) { + idle_time >= TCP_REXMTVAL(tp) * tcp_totbackoff)) || + ((tp->t_persist_stop != 0) && + TSTMP_LEQ(tp->t_persist_stop, tcp_now))) { tcpstat.tcps_persistdrop++; postevent(so, 0, EV_TIMEOUT); soevent(so, - (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT)); + (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT)); tp = tcp_drop(tp, ETIMEDOUT); break; } @@ -986,23 +1342,27 @@ fc_output: * Regular TCP connections do not send keepalives after closing * MPTCP must not also, after sending Data FINs. */ - struct mptcb *mp_tp = tp->t_mptcb; + struct mptcb *mp_tp = tptomptp(tp); if ((tp->t_mpflags & TMPF_MPTCP_TRUE) && (tp->t_state > TCPS_ESTABLISHED)) { goto dropit; } else if (mp_tp != NULL) { - if ((mptcp_ok_to_keepalive(mp_tp) == 0)) + if ((mptcp_ok_to_keepalive(mp_tp) == 0)) { goto dropit; + } } #endif /* MPTCP */ - if (tp->t_state < TCPS_ESTABLISHED) + if (tp->t_state < TCPS_ESTABLISHED) { goto dropit; + } if ((always_keepalive || (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) || - (tp->t_flagsext & TF_DETECT_READSTALL)) && + (tp->t_flagsext & TF_DETECT_READSTALL) || + (tp->t_tfo_probe_state == TFO_PROBE_PROBING)) && (tp->t_state <= TCPS_CLOSING || tp->t_state == TCPS_FIN_WAIT_2)) { - if (idle_time >= TCP_CONN_KEEPIDLE(tp) + TCP_CONN_MAXIDLE(tp)) + if (idle_time >= TCP_CONN_KEEPIDLE(tp) + TCP_CONN_MAXIDLE(tp)) { goto dropit; + } /* * Send a packet designed to force a response * if the peer is up and reachable: @@ -1024,43 +1384,95 @@ fc_output: bzero(&tra, sizeof(tra)); tra.nocell = INP_NO_CELLULAR(inp); tra.noexpensive = INP_NO_EXPENSIVE(inp); + tra.noconstrained = INP_NO_CONSTRAINED(inp); tra.awdl_unrestricted = INP_AWDL_UNRESTRICTED(inp); - if (tp->t_inpcb->inp_flags & INP_BOUND_IF) + tra.intcoproc_allowed = INP_INTCOPROC_ALLOWED(inp); + tra.keep_alive = 1; + if (tp->t_inpcb->inp_flags & INP_BOUND_IF) { tra.ifscope = tp->t_inpcb->inp_boundifp->if_index; - else + } else { tra.ifscope = IFSCOPE_NONE; + } tcp_respond(tp, t_template->tt_ipgen, &t_template->tt_t, (struct mbuf *)NULL, tp->rcv_nxt, tp->snd_una - 1, 0, &tra); (void) m_free(dtom(t_template)); - if (tp->t_flagsext & TF_DETECT_READSTALL) + if (tp->t_flagsext & TF_DETECT_READSTALL) { tp->t_rtimo_probes++; + } } + + TCP_LOG_KEEP_ALIVE(tp, idle_time); + tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, - TCP_CONN_KEEPINTVL(tp)); + TCP_CONN_KEEPINTVL(tp)); } else { tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, - TCP_CONN_KEEPIDLE(tp)); + TCP_CONN_KEEPIDLE(tp)); } if (tp->t_flagsext & TF_DETECT_READSTALL) { - /* + struct ifnet *outifp = tp->t_inpcb->inp_last_outifp; + bool reenable_probe = false; + /* * The keep alive packets sent to detect a read - * stall did not get a response from the + * stall did not get a response from the * peer. Generate more keep-alives to confirm this. * If the number of probes sent reaches the limit, * generate an event. */ - if (tp->t_rtimo_probes > tp->t_adaptive_rtimo) { - /* Generate an event */ - soevent(so, - (SO_FILT_HINT_LOCKED| - SO_FILT_HINT_ADAPTIVE_RTIMO)); - tcp_keepalive_reset(tp); + if (tp->t_adaptive_rtimo > 0) { + if (tp->t_rtimo_probes > tp->t_adaptive_rtimo) { + /* Generate an event */ + soevent(so, + (SO_FILT_HINT_LOCKED | + SO_FILT_HINT_ADAPTIVE_RTIMO)); + tcp_keepalive_reset(tp); + } else { + reenable_probe = true; + } + } else if (outifp != NULL && + (outifp->if_eflags & IFEF_PROBE_CONNECTIVITY) && + tp->t_rtimo_probes <= TCP_CONNECTIVITY_PROBES_MAX) { + reenable_probe = true; } else { + tp->t_flagsext &= ~TF_DETECT_READSTALL; + } + if (reenable_probe) { + int ind = min(tp->t_rtimo_probes, + TCP_MAXRXTSHIFT); tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START( - tp, TCP_REXMTVAL(tp)); + tp, tcp_backoff[ind] * TCP_REXMTVAL(tp)); } } + if (tp->t_tfo_probe_state == TFO_PROBE_PROBING) { + int ind; + + tp->t_tfo_probes++; + ind = min(tp->t_tfo_probes, TCP_MAXRXTSHIFT); + + /* + * We take the minimum among the time set by true + * keepalive (see above) and the backoff'd RTO. That + * way we backoff in case of packet-loss but will never + * timeout slower than regular keepalive due to the + * backing off. + */ + tp->t_timer[TCPT_KEEP] = min(OFFSET_FROM_START( + tp, tcp_backoff[ind] * TCP_REXMTVAL(tp)), + tp->t_timer[TCPT_KEEP]); + } else if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) && + !(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) && + tp->t_tfo_probe_state == TFO_PROBE_WAIT_DATA) { + /* Still no data! Let's assume a TFO-error and err out... */ + tcp_heuristic_tfo_middlebox(tp); + + so->so_error = ENODATA; + soevent(so, + (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR)); + sorwakeup(so); + tp->t_tfo_stats |= TFO_S_RECV_BLACKHOLE; + tcpstat.tcps_tfo_blackhole++; + } break; case TCPT_DELACK: if (tcp_delack_enabled && (tp->t_flags & TF_DELACK)) { @@ -1071,7 +1483,7 @@ fc_output: /* * If delayed ack timer fired while stretching * acks, count the number of times the streaming - * detection was not correct. If this exceeds a + * detection was not correct. If this exceeds a * threshold, disable strech ack on this * connection * @@ -1079,11 +1491,12 @@ fc_output: */ if ((tp->t_flags & TF_STRETCHACK)) { if (tp->t_unacksegs > 1 && - tp->t_unacksegs < maxseg_unacked) + tp->t_unacksegs < maxseg_unacked) { tp->t_stretchack_delayed++; + } if (tp->t_stretchack_delayed > - TCP_STRETCHACK_DELAY_THRESHOLD) { + TCP_STRETCHACK_DELAY_THRESHOLD) { tp->t_flagsext |= TF_DISABLE_STRETCHACK; /* * Note the time at which stretch @@ -1092,6 +1505,7 @@ fc_output: tp->rcv_nostrack_ts = tcp_now; tcpstat.tcps_nostretchack++; tp->t_stretchack_delayed = 0; + tp->rcv_nostrack_pkts = 0; } tcp_reset_stretch_ack(tp); } @@ -1117,65 +1531,113 @@ fc_output: if (++tp->t_mprxtshift > TCP_MAXRXTSHIFT) { tcpstat.tcps_timeoutdrop++; postevent(so, 0, EV_TIMEOUT); - soevent(so, - (SO_FILT_HINT_LOCKED| + soevent(so, + (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT)); tp = tcp_drop(tp, tp->t_softerror ? - tp->t_softerror : ETIMEDOUT); + tp->t_softerror : ETIMEDOUT); break; } tcpstat.tcps_join_rxmts++; + tp->t_mpflags |= TMPF_SND_JACK; tp->t_flags |= TF_ACKNOW; /* - * No backoff is implemented for simplicity for this + * No backoff is implemented for simplicity for this * corner case. */ (void) tcp_output(tp); } break; + case TCPT_CELLICON: + { + struct mptses *mpte = tptomptp(tp)->mpt_mpte; + + tp->t_timer[TCPT_CELLICON] = 0; + + if (mpte->mpte_cellicon_increments == 0) { + /* Cell-icon not set by this connection */ + break; + } + + if (TSTMP_LT(mpte->mpte_last_cellicon_set + MPTCP_CELLICON_TOGGLE_RATE, tcp_now)) { + mptcp_unset_cellicon(mpte, NULL, 1); + } + + if (mpte->mpte_cellicon_increments) { + tp->t_timer[TCPT_CELLICON] = OFFSET_FROM_START(tp, MPTCP_CELLICON_TOGGLE_RATE); + } + + break; + } #endif /* MPTCP */ case TCPT_PTO: { - tcp_seq old_snd_nxt; - int32_t snd_len; - boolean_t rescue_rxt = FALSE; - - tp->t_flagsext &= ~(TF_SENT_TLPROBE); + int32_t ret = 0; + if (!(tp->t_flagsext & TF_IF_PROBING)) { + tp->t_flagsext &= ~(TF_SENT_TLPROBE); + } /* * Check if the connection is in the right state to * send a probe */ - if (tp->t_state != TCPS_ESTABLISHED || - tp->t_rxtshift > 0 || tp->snd_max == tp->snd_una || - !SACK_ENABLED(tp) || TAILQ_EMPTY(&tp->snd_holes) || - (IN_FASTRECOVERY(tp) && - (SEQ_GEQ(tp->snd_fack, tp->snd_recover) || - SEQ_GT(tp->snd_nxt, tp->sack_newdata)))) + if ((tp->t_state != TCPS_ESTABLISHED || + tp->t_rxtshift > 0 || + tp->snd_max == tp->snd_una || + !SACK_ENABLED(tp) || + !TAILQ_EMPTY(&tp->snd_holes) || + IN_FASTRECOVERY(tp)) && + !(tp->t_flagsext & TF_IF_PROBING)) { break; + } - tcpstat.tcps_pto++; - - /* If timing a segment in this window, stop the timer */ - tp->t_rtttime = 0; + /* + * When the interface state is changed explicitly reset the retransmission + * timer state for both SYN and data packets because we do not want to + * wait unnecessarily or timeout too quickly if the link characteristics + * have changed drastically + */ + if (tp->t_flagsext & TF_IF_PROBING) { + tp->t_rxtshift = 0; + if (tp->t_state == TCPS_SYN_SENT) { + tp->t_stat.synrxtshift = tp->t_rxtshift; + } + /* + * Reset to the the default RTO + */ + tp->t_srtt = TCPTV_SRTTBASE; + tp->t_rttvar = + ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; + tp->t_rttmin = tp->t_flags & TF_LOCAL ? tcp_TCPTV_MIN : + TCPTV_REXMTMIN; + TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), + tp->t_rttmin, TCPTV_REXMTMAX, TCP_ADD_REXMTSLOP(tp)); + TCP_LOG_RTT_INFO(tp); + } - if (IN_FASTRECOVERY(tp)) { + if (tp->t_state == TCPS_SYN_SENT) { + /* + * The PTO for SYN_SENT reinitializes TCP as if it was a fresh + * connection attempt + */ + tp->snd_nxt = tp->snd_una; /* - * Send a probe to detect tail loss in a - * recovery window when the connection is in - * fast_recovery. + * Note: We overload snd_recover to function also as the + * snd_last variable described in RFC 2582 */ - old_snd_nxt = tp->snd_nxt; - rescue_rxt = TRUE; - VERIFY(SEQ_GEQ(tp->snd_fack, tp->snd_una)); - snd_len = min((tp->snd_recover - tp->snd_fack), - tp->t_maxseg); - tp->snd_nxt = tp->snd_recover - snd_len; - tcpstat.tcps_pto_in_recovery++; - tcp_ccdbg_trace(tp, NULL, TCP_CC_TLP_IN_FASTRECOVERY); + tp->snd_recover = tp->snd_max; + /* + * Force a segment to be sent. + */ + tp->t_flags |= TF_ACKNOW; + + /* If timing a segment in this window, stop the timer */ + tp->t_rtttime = 0; } else { + int32_t snd_len; + /* * If there is no new data to send or if the * connection is limited by receive window then @@ -1193,23 +1655,78 @@ fc_output: } } - /* Note that tail loss probe is being sent */ - tp->t_flagsext |= TF_SENT_TLPROBE; - tp->t_tlpstart = tcp_now; + tcpstat.tcps_pto++; + if (tp->t_flagsext & TF_IF_PROBING) { + tcpstat.tcps_probe_if++; + } + + /* If timing a segment in this window, stop the timer */ + tp->t_rtttime = 0; + /* Note that tail loss probe is being sent. Exclude IF probe */ + if (!(tp->t_flagsext & TF_IF_PROBING)) { + tp->t_flagsext |= TF_SENT_TLPROBE; + tp->t_tlpstart = tcp_now; + } tp->snd_cwnd += tp->t_maxseg; - (void )tcp_output(tp); - tp->snd_cwnd -= tp->t_maxseg; + /* + * When tail-loss-probe fires, we reset the RTO timer, because + * a probe just got sent, so we are good to push out the timer. + * + * Set to 0 to ensure that tcp_output() will reschedule it + */ + tp->t_timer[TCPT_REXMT] = 0; + ret = tcp_output(tp); + +#if (DEBUG || DEVELOPMENT) + if ((tp->t_flagsext & TF_IF_PROBING) && + ((IFNET_IS_COMPANION_LINK(tp->t_inpcb->inp_last_outifp)) || + tp->t_state == TCPS_SYN_SENT)) { + if (ret == 0 && tcp_probe_if_fix_port > 0 && + tcp_probe_if_fix_port <= IPPORT_HILASTAUTO) { + tp->t_timer[TCPT_REXMT] = 0; + tcp_set_lotimer_index(tp); + } - tp->t_tlphighrxt = tp->snd_nxt; + os_log(OS_LOG_DEFAULT, + "%s: sent %s probe for %u > %u on interface %s" + " (%u) %s(%d)", + __func__, + tp->t_state == TCPS_SYN_SENT ? "SYN" : "data", + ntohs(tp->t_inpcb->inp_lport), + ntohs(tp->t_inpcb->inp_fport), + if_name(tp->t_inpcb->inp_last_outifp), + tp->t_inpcb->inp_last_outifp->if_index, + ret == 0 ? "succeeded" :"failed", ret); + } +#endif /* DEBUG || DEVELOPMENT */ /* - * If a tail loss probe was sent after entering recovery, - * restore the old snd_nxt value so that other packets - * will get retransmitted correctly. + * When the connection is not idle, make sure the retransmission timer + * is armed because it was set to zero above */ - if (rescue_rxt) - tp->snd_nxt = old_snd_nxt; + if ((tp->t_timer[TCPT_REXMT] == 0 || tp->t_timer[TCPT_PERSIST] == 0) && + (tp->t_inpcb->inp_socket->so_snd.sb_cc != 0 || tp->t_state == TCPS_SYN_SENT || + tp->t_state == TCPS_SYN_RECEIVED)) { + tp->t_timer[TCPT_REXMT] = + OFFSET_FROM_START(tp, tp->t_rxtcur); + + os_log(OS_LOG_DEFAULT, + "%s: tcp_output() returned %u with retransmission timer disabled " + "for %u > %u in state %d, reset timer to %d", + __func__, ret, + ntohs(tp->t_inpcb->inp_lport), + ntohs(tp->t_inpcb->inp_fport), + tp->t_state, + tp->t_timer[TCPT_REXMT]); + + tcp_check_timer_state(tp); + } + tp->snd_cwnd -= tp->t_maxseg; + + if (!(tp->t_flagsext & TF_IF_PROBING)) { + tp->t_tlphighrxt = tp->snd_nxt; + } break; } case TCPT_DELAYFR: @@ -1223,37 +1740,43 @@ fc_output: */ if (IN_FASTRECOVERY(tp) || SEQ_GEQ(tp->snd_una, tp->snd_recover) || - tp->t_rxtshift > 0) + tp->t_rxtshift > 0) { break; + } VERIFY(SACK_ENABLED(tp)); - if (CC_ALGO(tp)->pre_fr != NULL) + tcp_rexmt_save_state(tp); + if (CC_ALGO(tp)->pre_fr != NULL) { CC_ALGO(tp)->pre_fr(tp); + if (TCP_ECN_ENABLED(tp)) { + tp->ecn_flags |= TE_SENDCWR; + } + } ENTER_FASTRECOVERY(tp); - if ((tp->ecn_flags & TE_ECN_ON) == TE_ECN_ON) - tp->ecn_flags |= TE_SENDCWR; tp->t_timer[TCPT_REXMT] = 0; tcpstat.tcps_sack_recovery_episode++; + tp->t_sack_recovery_episode++; tp->sack_newdata = tp->snd_nxt; tp->snd_cwnd = tp->t_maxseg; tcp_ccdbg_trace(tp, NULL, TCP_CC_ENTER_FASTRECOVERY); (void) tcp_output(tp); break; - dropit: +dropit: tcpstat.tcps_keepdrops++; postevent(so, 0, EV_TIMEOUT); soevent(so, - (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT)); + (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT)); tp = tcp_drop(tp, ETIMEDOUT); break; } #if TCPDEBUG - if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG) + if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG) { tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, - PRU_SLOWTIMO); + PRU_SLOWTIMO); + } #endif - return (tp); + return tp; } /* Remove a timer entry from timer list */ @@ -1262,20 +1785,21 @@ tcp_remove_timer(struct tcpcb *tp) { struct tcptimerlist *listp = &tcp_timer_list; - lck_mtx_assert(&tp->t_inpcb->inpcb_mtx, LCK_MTX_ASSERT_OWNED); + socket_lock_assert_owned(tp->t_inpcb->inp_socket); if (!(TIMER_IS_ON_LIST(tp))) { return; } lck_mtx_lock(listp->mtx); - + /* Check if pcb is on timer list again after acquiring the lock */ if (!(TIMER_IS_ON_LIST(tp))) { lck_mtx_unlock(listp->mtx); return; } - - if (listp->next_te != NULL && listp->next_te == &tp->tentry) + + if (listp->next_te != NULL && listp->next_te == &tp->tentry) { listp->next_te = LIST_NEXT(&tp->tentry, le); + } LIST_REMOVE(&tp->tentry, le); tp->t_flags &= ~(TF_TIMER_ONLIST); @@ -1304,42 +1828,47 @@ need_to_resched_timerlist(u_int32_t runtime, u_int16_t mode) * in flux. In this case always acquire the lock and set the state * correctly. */ - if (listp->running) - return (TRUE); + if (listp->running) { + return TRUE; + } - if (!listp->scheduled) - return (TRUE); + if (!listp->scheduled) { + return TRUE; + } diff = timer_diff(listp->runtime, 0, runtime, 0); if (diff <= 0) { /* The list is going to run before this timer */ - return (FALSE); + return FALSE; } else { if (mode & TCP_TIMERLIST_10MS_MODE) { - if (diff <= TCP_TIMER_10MS_QUANTUM) - return (FALSE); + if (diff <= TCP_TIMER_10MS_QUANTUM) { + return FALSE; + } } else if (mode & TCP_TIMERLIST_100MS_MODE) { - if (diff <= TCP_TIMER_100MS_QUANTUM) - return (FALSE); + if (diff <= TCP_TIMER_100MS_QUANTUM) { + return FALSE; + } } else { - if (diff <= TCP_TIMER_500MS_QUANTUM) - return (FALSE); + if (diff <= TCP_TIMER_500MS_QUANTUM) { + return FALSE; + } } } - return (TRUE); + return TRUE; } void -tcp_sched_timerlist(uint32_t offset) +tcp_sched_timerlist(uint32_t offset) { - uint64_t deadline = 0; struct tcptimerlist *listp = &tcp_timer_list; - lck_mtx_assert(listp->mtx, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(listp->mtx, LCK_MTX_ASSERT_OWNED); offset = min(offset, TCP_TIMERLIST_MAX_OFFSET); listp->runtime = tcp_now + offset; + listp->schedtime = tcp_now; if (listp->runtime == 0) { listp->runtime++; offset++; @@ -1354,15 +1883,16 @@ tcp_sched_timerlist(uint32_t offset) /* * Function to run the timers for a connection. * - * Returns the offset of next timer to be run for this connection which + * Returns the offset of next timer to be run for this connection which * can be used to reschedule the timerlist. * * te_mode is an out parameter that indicates the modes of active * timers for this connection. */ u_int32_t -tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode) { - +tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode, + u_int16_t probe_if_index) +{ struct socket *so; u_int16_t i = 0, index = TCPT_NONE, lo_index = TCPT_NONE; u_int32_t timer_val, offset = 0, lo_timer = 0; @@ -1374,30 +1904,42 @@ tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode) { bzero(needtorun, sizeof(needtorun)); *te_mode = 0; - tcp_lock(tp->t_inpcb->inp_socket, 1, 0); + socket_lock(tp->t_inpcb->inp_socket, 1); so = tp->t_inpcb->inp_socket; - /* Release the want count on inp */ + /* Release the want count on inp */ if (in_pcb_checkstate(tp->t_inpcb, WNT_RELEASE, 1) - == WNT_STOPUSING) { + == WNT_STOPUSING) { if (TIMER_IS_ON_LIST(tp)) { tcp_remove_timer(tp); } - /* Looks like the TCP connection got closed while we + /* Looks like the TCP connection got closed while we * were waiting for the lock.. Done */ goto done; } + /* + * If this connection is over an interface that needs to + * be probed, send probe packets to reinitiate communication. + */ + if (TCP_IF_STATE_CHANGED(tp, probe_if_index)) { + tp->t_flagsext |= TF_IF_PROBING; + tcp_timers(tp, TCPT_PTO); + tp->t_timer[TCPT_PTO] = 0; + tp->t_flagsext &= ~TF_IF_PROBING; + } + /* * Since the timer thread needs to wait for tcp lock, it may race * with another thread that can cancel or reschedule the timer * that is about to run. Check if we need to run anything. */ - if ((index = tp->tentry.index) == TCPT_NONE) + if ((index = tp->tentry.index) == TCPT_NONE) { goto done; - + } + timer_val = tp->t_timer[index]; diff = timer_diff(tp->tentry.runtime, 0, tcp_now, 0); @@ -1412,10 +1954,11 @@ tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode) { tp->t_timer[index] = 0; if (timer_val > 0) { tp = tcp_timers(tp, index); - if (tp == NULL) + if (tp == NULL) { goto done; + } } - + /* * Check if there are any other timers that need to be run. * While doing it, adjust the timer values wrt tcp_now. @@ -1424,7 +1967,7 @@ tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode) { for (i = 0; i < TCPT_NTIMERS; ++i) { if (tp->t_timer[i] != 0) { diff = timer_diff(tp->tentry.timer_start, - tp->t_timer[i], tcp_now, 0); + tp->t_timer[i], tcp_now, 0); if (diff <= 0) { needtorun[i] = TRUE; count++; @@ -1439,16 +1982,17 @@ tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode) { } } } - + tp->tentry.timer_start = tcp_now; tp->tentry.index = lo_index; VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0); if (tp->tentry.index != TCPT_NONE) { tp->tentry.runtime = tp->tentry.timer_start + - tp->t_timer[tp->tentry.index]; - if (tp->tentry.runtime == 0) + tp->t_timer[tp->tentry.index]; + if (tp->tentry.runtime == 0) { tp->tentry.runtime++; + } } if (count > 0) { @@ -1478,18 +2022,19 @@ done: offset = 0; } - tcp_unlock(so, 1, 0); - return(offset); + socket_unlock(so, 1); + return offset; } void -tcp_run_timerlist(void * arg1, void * arg2) { +tcp_run_timerlist(void * arg1, void * arg2) +{ #pragma unused(arg1, arg2) struct tcptimerentry *te, *next_te; struct tcptimerlist *listp = &tcp_timer_list; struct tcpcb *tp; uint32_t next_timer = 0; /* offset of the next timer on the list */ - u_int16_t te_mode = 0; /* modes of all active timers in a tcpcb */ + u_int16_t te_mode = 0; /* modes of all active timers in a tcpcb */ u_int16_t list_mode = 0; /* cumulative of modes of all tcpcbs */ uint32_t active_count = 0; @@ -1497,12 +2042,40 @@ tcp_run_timerlist(void * arg1, void * arg2) { lck_mtx_lock(listp->mtx); + int32_t drift = tcp_now - listp->runtime; + if (drift <= 1) { + tcpstat.tcps_timer_drift_le_1_ms++; + } else if (drift <= 10) { + tcpstat.tcps_timer_drift_le_10_ms++; + } else if (drift <= 20) { + tcpstat.tcps_timer_drift_le_20_ms++; + } else if (drift <= 50) { + tcpstat.tcps_timer_drift_le_50_ms++; + } else if (drift <= 100) { + tcpstat.tcps_timer_drift_le_100_ms++; + } else if (drift <= 200) { + tcpstat.tcps_timer_drift_le_200_ms++; + } else if (drift <= 500) { + tcpstat.tcps_timer_drift_le_500_ms++; + } else if (drift <= 1000) { + tcpstat.tcps_timer_drift_le_1000_ms++; + } else { + tcpstat.tcps_timer_drift_gt_1000_ms++; + } + listp->running = TRUE; - + LIST_FOREACH_SAFE(te, &listp->lhead, le, next_te) { uint32_t offset = 0; uint32_t runtime = te->runtime; - if (te->index < TCPT_NONE && TSTMP_GT(runtime, tcp_now)) { + + tp = TIMERENTRY_TO_TP(te); + + /* + * An interface probe may need to happen before the previously scheduled runtime + */ + if (te->index < TCPT_NONE && TSTMP_GT(runtime, tcp_now) && + !TCP_IF_STATE_CHANGED(tp, listp->probe_if_index)) { offset = timer_diff(runtime, 0, tcp_now, 0); if (next_timer == 0 || offset < next_timer) { next_timer = offset; @@ -1511,8 +2084,6 @@ tcp_run_timerlist(void * arg1, void * arg2) { continue; } - tp = TIMERENTRY_TO_TP(te); - /* * Acquire an inp wantcnt on the inpcb so that the socket * won't get detached even if tcp_close is called @@ -1523,7 +2094,7 @@ tcp_run_timerlist(void * arg1, void * arg2) { * Some how this pcb went into dead state while * on the timer list, just take it off the list. * Since the timer list entry pointers are - * protected by the timer list lock, we can + * protected by the timer list lock, we can * do it here without the socket lock. */ if (TIMER_IS_ON_LIST(tp)) { @@ -1544,15 +2115,16 @@ tcp_run_timerlist(void * arg1, void * arg2) { * release the lock, this pointer will be updated to the * element after that. */ - listp->next_te = next_te; + listp->next_te = next_te; VERIFY_NEXT_LINK(&tp->tentry, le); VERIFY_PREV_LINK(&tp->tentry, le); lck_mtx_unlock(listp->mtx); - offset = tcp_run_conn_timer(tp, &te_mode); - + offset = tcp_run_conn_timer(tp, &te_mode, + listp->probe_if_index); + lck_mtx_lock(listp->mtx); next_te = listp->next_te; @@ -1561,21 +2133,23 @@ tcp_run_timerlist(void * arg1, void * arg2) { if (offset > 0 && te_mode != 0) { list_mode |= te_mode; - if (next_timer == 0 || offset < next_timer) + if (next_timer == 0 || offset < next_timer) { next_timer = offset; + } } } if (!LIST_EMPTY(&listp->lhead)) { u_int16_t next_mode = 0; if ((list_mode & TCP_TIMERLIST_10MS_MODE) || - (listp->pref_mode & TCP_TIMERLIST_10MS_MODE)) + (listp->pref_mode & TCP_TIMERLIST_10MS_MODE)) { next_mode = TCP_TIMERLIST_10MS_MODE; - else if ((list_mode & TCP_TIMERLIST_100MS_MODE) || - (listp->pref_mode & TCP_TIMERLIST_100MS_MODE)) + } else if ((list_mode & TCP_TIMERLIST_100MS_MODE) || + (listp->pref_mode & TCP_TIMERLIST_100MS_MODE)) { next_mode = TCP_TIMERLIST_100MS_MODE; - else + } else { next_mode = TCP_TIMERLIST_500MS_MODE; + } if (next_mode != TCP_TIMERLIST_500MS_MODE) { listp->idleruns = 0; @@ -1598,12 +2172,14 @@ tcp_run_timerlist(void * arg1, void * arg2) { } } listp->mode = next_mode; - if (listp->pref_offset != 0) + if (listp->pref_offset != 0) { next_timer = min(listp->pref_offset, next_timer); + } - if (listp->mode == TCP_TIMERLIST_500MS_MODE) + if (listp->mode == TCP_TIMERLIST_500MS_MODE) { next_timer = max(next_timer, - TCP_TIMER_500MS_QUANTUM); + TCP_TIMER_500MS_QUANTUM); + } tcp_sched_timerlist(next_timer); } else { @@ -1617,16 +2193,17 @@ tcp_run_timerlist(void * arg1, void * arg2) { listp->running = FALSE; listp->pref_mode = 0; listp->pref_offset = 0; + listp->probe_if_index = 0; lck_mtx_unlock(listp->mtx); } /* - * Function to check if the timerlist needs to be reschduled to run this + * Function to check if the timerlist needs to be rescheduled to run this * connection's timers correctly. */ -void -tcp_sched_timers(struct tcpcb *tp) +void +tcp_sched_timers(struct tcpcb *tp) { struct tcptimerentry *te = &tp->tentry; u_int16_t index = te->index; @@ -1665,26 +2242,29 @@ tcp_sched_timers(struct tcpcb *tp) list_locked = TRUE; } - LIST_INSERT_HEAD(&listp->lhead, te, le); - tp->t_flags |= TF_TIMER_ONLIST; + if (!TIMER_IS_ON_LIST(tp)) { + LIST_INSERT_HEAD(&listp->lhead, te, le); + tp->t_flags |= TF_TIMER_ONLIST; - listp->entries++; - if (listp->entries > listp->maxentries) - listp->maxentries = listp->entries; + listp->entries++; + if (listp->entries > listp->maxentries) { + listp->maxentries = listp->entries; + } - /* if the list is not scheduled, just schedule it */ - if (!listp->scheduled) - goto schedule; + /* if the list is not scheduled, just schedule it */ + if (!listp->scheduled) { + goto schedule; + } + } } - /* * Timer entry is currently on the list, check if the list needs * to be rescheduled. */ if (need_to_resched_timerlist(te->runtime, mode)) { tcp_resched_timerlist++; - + if (!list_locked) { lck_mtx_lock(listp->mtx); list_locked = TRUE; @@ -1696,7 +2276,7 @@ tcp_sched_timers(struct tcpcb *tp) if (listp->running) { listp->pref_mode |= mode; if (listp->pref_offset == 0 || - offset < listp->pref_offset) { + offset < listp->pref_offset) { listp->pref_offset = offset; } } else { @@ -1708,10 +2288,11 @@ tcp_sched_timers(struct tcpcb *tp) int32_t diff; diff = timer_diff(listp->runtime, 0, tcp_now, offset); - if (diff <= 0) + if (diff <= 0) { goto done; - else + } else { goto schedule; + } } else { goto schedule; } @@ -1730,22 +2311,25 @@ schedule: listp->idleruns = 0; offset = min(offset, TCP_TIMER_10MS_QUANTUM); } else if (mode & TCP_TIMERLIST_100MS_MODE) { - if (listp->mode > TCP_TIMERLIST_100MS_MODE) + if (listp->mode > TCP_TIMERLIST_100MS_MODE) { listp->mode = TCP_TIMERLIST_100MS_MODE; + } listp->idleruns = 0; offset = min(offset, TCP_TIMER_100MS_QUANTUM); } tcp_sched_timerlist(offset); done: - if (list_locked) + if (list_locked) { lck_mtx_unlock(listp->mtx); + } return; } - + static inline void -tcp_set_lotimer_index(struct tcpcb *tp) { +tcp_set_lotimer_index(struct tcpcb *tp) +{ uint16_t i, lo_index = TCPT_NONE, mode = 0; uint32_t lo_timer = 0; for (i = 0; i < TCPT_NTIMERS; ++i) { @@ -1762,20 +2346,22 @@ tcp_set_lotimer_index(struct tcpcb *tp) { VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0); if (tp->tentry.index != TCPT_NONE) { - tp->tentry.runtime = tp->tentry.timer_start + tp->tentry.runtime = tp->tentry.timer_start + tp->t_timer[tp->tentry.index]; - if (tp->tentry.runtime == 0) + if (tp->tentry.runtime == 0) { tp->tentry.runtime++; + } } } void -tcp_check_timer_state(struct tcpcb *tp) { - - lck_mtx_assert(&tp->t_inpcb->inpcb_mtx, LCK_MTX_ASSERT_OWNED); +tcp_check_timer_state(struct tcpcb *tp) +{ + socket_lock_assert_owned(tp->t_inpcb->inp_socket); - if (tp->t_inpcb->inp_flags2 & INP2_TIMEWAIT) + if (tp->t_inpcb->inp_flags2 & INP2_TIMEWAIT) { return; + } tcp_set_lotimer_index(tp); @@ -1783,6 +2369,34 @@ tcp_check_timer_state(struct tcpcb *tp) { return; } +static inline void +tcp_cumulative_stat(u_int32_t cur, u_int32_t *prev, u_int32_t *dest) +{ + /* handle wrap around */ + int32_t diff = (int32_t) (cur - *prev); + if (diff > 0) { + *dest = diff; + } else { + *dest = 0; + } + *prev = cur; + return; +} + +static inline void +tcp_cumulative_stat64(u_int64_t cur, u_int64_t *prev, u_int64_t *dest) +{ + /* handle wrap around */ + int64_t diff = (int64_t) (cur - *prev); + if (diff > 0) { + *dest = diff; + } else { + *dest = 0; + } + *prev = cur; + return; +} + __private_extern__ void tcp_report_stats(void) { @@ -1790,12 +2404,14 @@ tcp_report_stats(void) struct sockaddr_in dst; struct sockaddr_in6 dst6; struct rtentry *rt = NULL; - u_int64_t var, uptime; + static struct tcp_last_report_stats prev; + u_int64_t var, uptime; -#define stat data.u.tcp_stats +#define stat data.u.tcp_stats if (((uptime = net_uptime()) - tcp_last_report_time) < - TCP_REPORT_STATS_INTERVAL) + tcp_report_stats_interval) { return; + } tcp_last_report_time = uptime; @@ -1809,12 +2425,12 @@ tcp_report_stats(void) /* ipv4 avg rtt */ lck_mtx_lock(rnh_lock); rt = rt_lookup(TRUE, (struct sockaddr *)&dst, NULL, - rt_tables[AF_INET], IFSCOPE_NONE); + rt_tables[AF_INET], IFSCOPE_NONE); lck_mtx_unlock(rnh_lock); if (rt != NULL) { RT_LOCK(rt); if (rt_primary_default(rt, rt_key(rt)) && - rt->rt_stats != NULL) { + rt->rt_stats != NULL) { stat.ipv4_avgrtt = rt->rt_stats->nstat_avg_rtt; } RT_UNLOCK(rt); @@ -1828,13 +2444,13 @@ tcp_report_stats(void) dst6.sin6_family = AF_INET6; lck_mtx_lock(rnh_lock); - rt = rt_lookup(TRUE,(struct sockaddr *)&dst6, NULL, - rt_tables[AF_INET6], IFSCOPE_NONE); + rt = rt_lookup(TRUE, (struct sockaddr *)&dst6, NULL, + rt_tables[AF_INET6], IFSCOPE_NONE); lck_mtx_unlock(rnh_lock); if (rt != NULL) { RT_LOCK(rt); if (rt_primary_default(rt, rt_key(rt)) && - rt->rt_stats != NULL) { + rt->rt_stats != NULL) { stat.ipv6_avgrtt = rt->rt_stats->nstat_avg_rtt; } RT_UNLOCK(rt); @@ -1855,21 +2471,476 @@ tcp_report_stats(void) } /* RTO after tail loss, shift by 10 for precision */ - if (tcpstat.tcps_sndrexmitpack > 0 + if (tcpstat.tcps_sndrexmitpack > 0 && tcpstat.tcps_tailloss_rto > 0) { var = tcpstat.tcps_tailloss_rto << 10; stat.send_tlrto_rate = - (var * 100) / tcpstat.tcps_sndrexmitpack; + (var * 100) / tcpstat.tcps_sndrexmitpack; } - + /* packet reordering */ if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_reordered_pkts > 0) { var = tcpstat.tcps_reordered_pkts << 10; stat.send_reorder_rate = - (var * 100) / tcpstat.tcps_sndpack; + (var * 100) / tcpstat.tcps_sndpack; } + if (tcp_ecn_outbound == 1) { + stat.ecn_client_enabled = 1; + } + if (tcp_ecn_inbound == 1) { + stat.ecn_server_enabled = 1; + } + tcp_cumulative_stat(tcpstat.tcps_connattempt, + &prev.tcps_connattempt, &stat.connection_attempts); + tcp_cumulative_stat(tcpstat.tcps_accepts, + &prev.tcps_accepts, &stat.connection_accepts); + tcp_cumulative_stat(tcpstat.tcps_ecn_client_setup, + &prev.tcps_ecn_client_setup, &stat.ecn_client_setup); + tcp_cumulative_stat(tcpstat.tcps_ecn_server_setup, + &prev.tcps_ecn_server_setup, &stat.ecn_server_setup); + tcp_cumulative_stat(tcpstat.tcps_ecn_client_success, + &prev.tcps_ecn_client_success, &stat.ecn_client_success); + tcp_cumulative_stat(tcpstat.tcps_ecn_server_success, + &prev.tcps_ecn_server_success, &stat.ecn_server_success); + tcp_cumulative_stat(tcpstat.tcps_ecn_not_supported, + &prev.tcps_ecn_not_supported, &stat.ecn_not_supported); + tcp_cumulative_stat(tcpstat.tcps_ecn_lost_syn, + &prev.tcps_ecn_lost_syn, &stat.ecn_lost_syn); + tcp_cumulative_stat(tcpstat.tcps_ecn_lost_synack, + &prev.tcps_ecn_lost_synack, &stat.ecn_lost_synack); + tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ce, + &prev.tcps_ecn_recv_ce, &stat.ecn_recv_ce); + tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece, + &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece); + tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece, + &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece); + tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece, + &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece); + tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece, + &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece); + tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ce, + &prev.tcps_ecn_conn_recv_ce, &stat.ecn_conn_recv_ce); + tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ece, + &prev.tcps_ecn_conn_recv_ece, &stat.ecn_conn_recv_ece); + tcp_cumulative_stat(tcpstat.tcps_ecn_conn_plnoce, + &prev.tcps_ecn_conn_plnoce, &stat.ecn_conn_plnoce); + tcp_cumulative_stat(tcpstat.tcps_ecn_conn_pl_ce, + &prev.tcps_ecn_conn_pl_ce, &stat.ecn_conn_pl_ce); + tcp_cumulative_stat(tcpstat.tcps_ecn_conn_nopl_ce, + &prev.tcps_ecn_conn_nopl_ce, &stat.ecn_conn_nopl_ce); + tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_synloss, + &prev.tcps_ecn_fallback_synloss, &stat.ecn_fallback_synloss); + tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_reorder, + &prev.tcps_ecn_fallback_reorder, &stat.ecn_fallback_reorder); + tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_ce, + &prev.tcps_ecn_fallback_ce, &stat.ecn_fallback_ce); + tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_rcv, + &prev.tcps_tfo_syn_data_rcv, &stat.tfo_syn_data_rcv); + tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req_rcv, + &prev.tcps_tfo_cookie_req_rcv, &stat.tfo_cookie_req_rcv); + tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_sent, + &prev.tcps_tfo_cookie_sent, &stat.tfo_cookie_sent); + tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_invalid, + &prev.tcps_tfo_cookie_invalid, &stat.tfo_cookie_invalid); + tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req, + &prev.tcps_tfo_cookie_req, &stat.tfo_cookie_req); + tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_rcv, + &prev.tcps_tfo_cookie_rcv, &stat.tfo_cookie_rcv); + tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_sent, + &prev.tcps_tfo_syn_data_sent, &stat.tfo_syn_data_sent); + tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_acked, + &prev.tcps_tfo_syn_data_acked, &stat.tfo_syn_data_acked); + tcp_cumulative_stat(tcpstat.tcps_tfo_syn_loss, + &prev.tcps_tfo_syn_loss, &stat.tfo_syn_loss); + tcp_cumulative_stat(tcpstat.tcps_tfo_blackhole, + &prev.tcps_tfo_blackhole, &stat.tfo_blackhole); + tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_wrong, + &prev.tcps_tfo_cookie_wrong, &stat.tfo_cookie_wrong); + tcp_cumulative_stat(tcpstat.tcps_tfo_no_cookie_rcv, + &prev.tcps_tfo_no_cookie_rcv, &stat.tfo_no_cookie_rcv); + tcp_cumulative_stat(tcpstat.tcps_tfo_heuristics_disable, + &prev.tcps_tfo_heuristics_disable, &stat.tfo_heuristics_disable); + tcp_cumulative_stat(tcpstat.tcps_tfo_sndblackhole, + &prev.tcps_tfo_sndblackhole, &stat.tfo_sndblackhole); + + + tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_attempt, + &prev.tcps_mptcp_handover_attempt, &stat.mptcp_handover_attempt); + tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_attempt, + &prev.tcps_mptcp_interactive_attempt, &stat.mptcp_interactive_attempt); + tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_attempt, + &prev.tcps_mptcp_aggregate_attempt, &stat.mptcp_aggregate_attempt); + tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_attempt, + &prev.tcps_mptcp_fp_handover_attempt, &stat.mptcp_fp_handover_attempt); + tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_attempt, + &prev.tcps_mptcp_fp_interactive_attempt, &stat.mptcp_fp_interactive_attempt); + tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_attempt, + &prev.tcps_mptcp_fp_aggregate_attempt, &stat.mptcp_fp_aggregate_attempt); + tcp_cumulative_stat(tcpstat.tcps_mptcp_heuristic_fallback, + &prev.tcps_mptcp_heuristic_fallback, &stat.mptcp_heuristic_fallback); + tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_heuristic_fallback, + &prev.tcps_mptcp_fp_heuristic_fallback, &stat.mptcp_fp_heuristic_fallback); + tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_wifi, + &prev.tcps_mptcp_handover_success_wifi, &stat.mptcp_handover_success_wifi); + tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_cell, + &prev.tcps_mptcp_handover_success_cell, &stat.mptcp_handover_success_cell); + tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_success, + &prev.tcps_mptcp_interactive_success, &stat.mptcp_interactive_success); + tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_success, + &prev.tcps_mptcp_aggregate_success, &stat.mptcp_aggregate_success); + tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_wifi, + &prev.tcps_mptcp_fp_handover_success_wifi, &stat.mptcp_fp_handover_success_wifi); + tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_cell, + &prev.tcps_mptcp_fp_handover_success_cell, &stat.mptcp_fp_handover_success_cell); + tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_success, + &prev.tcps_mptcp_fp_interactive_success, &stat.mptcp_fp_interactive_success); + tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_success, + &prev.tcps_mptcp_fp_aggregate_success, &stat.mptcp_fp_aggregate_success); + tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_cell_from_wifi, + &prev.tcps_mptcp_handover_cell_from_wifi, &stat.mptcp_handover_cell_from_wifi); + tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_wifi_from_cell, + &prev.tcps_mptcp_handover_wifi_from_cell, &stat.mptcp_handover_wifi_from_cell); + tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_cell_from_wifi, + &prev.tcps_mptcp_interactive_cell_from_wifi, &stat.mptcp_interactive_cell_from_wifi); + tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_cell_bytes, + &prev.tcps_mptcp_handover_cell_bytes, &stat.mptcp_handover_cell_bytes); + tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_cell_bytes, + &prev.tcps_mptcp_interactive_cell_bytes, &stat.mptcp_interactive_cell_bytes); + tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_cell_bytes, + &prev.tcps_mptcp_aggregate_cell_bytes, &stat.mptcp_aggregate_cell_bytes); + tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_all_bytes, + &prev.tcps_mptcp_handover_all_bytes, &stat.mptcp_handover_all_bytes); + tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_all_bytes, + &prev.tcps_mptcp_interactive_all_bytes, &stat.mptcp_interactive_all_bytes); + tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_all_bytes, + &prev.tcps_mptcp_aggregate_all_bytes, &stat.mptcp_aggregate_all_bytes); + tcp_cumulative_stat(tcpstat.tcps_mptcp_back_to_wifi, + &prev.tcps_mptcp_back_to_wifi, &stat.mptcp_back_to_wifi); + tcp_cumulative_stat(tcpstat.tcps_mptcp_wifi_proxy, + &prev.tcps_mptcp_wifi_proxy, &stat.mptcp_wifi_proxy); + tcp_cumulative_stat(tcpstat.tcps_mptcp_cell_proxy, + &prev.tcps_mptcp_cell_proxy, &stat.mptcp_cell_proxy); + tcp_cumulative_stat(tcpstat.tcps_mptcp_triggered_cell, + &prev.tcps_mptcp_triggered_cell, &stat.mptcp_triggered_cell); + nstat_sysinfo_send_data(&data); -#undef stat +#undef stat +} + +void +tcp_interface_send_probe(u_int16_t probe_if_index) +{ + int32_t offset = 0; + struct tcptimerlist *listp = &tcp_timer_list; + + /* Make sure TCP clock is up to date */ + calculate_tcp_clock(); + + lck_mtx_lock(listp->mtx); + if (listp->probe_if_index > 0 && listp->probe_if_index != probe_if_index) { + tcpstat.tcps_probe_if_conflict++; + os_log(OS_LOG_DEFAULT, + "%s: probe_if_index %u conflicts with %u, tcps_probe_if_conflict %u\n", + __func__, probe_if_index, listp->probe_if_index, + tcpstat.tcps_probe_if_conflict); + goto done; + } + + listp->probe_if_index = probe_if_index; + if (listp->running) { + os_log(OS_LOG_DEFAULT, "%s: timer list already running for if_index %u\n", + __func__, probe_if_index); + goto done; + } + + /* + * Reschedule the timerlist to run within the next 10ms, which is + * the fastest that we can do. + */ + offset = TCP_TIMER_10MS_QUANTUM; + if (listp->scheduled) { + int32_t diff; + diff = timer_diff(listp->runtime, 0, tcp_now, offset); + if (diff <= 0) { + /* The timer will fire sooner than what's needed */ + os_log(OS_LOG_DEFAULT, + "%s: timer will fire sooner than needed for if_index %u\n", + __func__, probe_if_index); + goto done; + } + } + listp->mode = TCP_TIMERLIST_10MS_MODE; + listp->idleruns = 0; + + tcp_sched_timerlist(offset); + +done: + lck_mtx_unlock(listp->mtx); + return; +} + +/* + * Enable read probes on this connection, if: + * - it is in established state + * - doesn't have any data outstanding + * - the outgoing ifp matches + * - we have not already sent any read probes + */ +static void +tcp_enable_read_probe(struct tcpcb *tp, struct ifnet *ifp) +{ + if (tp->t_state == TCPS_ESTABLISHED && + tp->snd_max == tp->snd_una && + tp->t_inpcb->inp_last_outifp == ifp && + !(tp->t_flagsext & TF_DETECT_READSTALL) && + tp->t_rtimo_probes == 0) { + tp->t_flagsext |= TF_DETECT_READSTALL; + tp->t_rtimo_probes = 0; + tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, + TCP_TIMER_10MS_QUANTUM); + if (tp->tentry.index == TCPT_NONE) { + tp->tentry.index = TCPT_KEEP; + tp->tentry.runtime = tcp_now + + TCP_TIMER_10MS_QUANTUM; + } else { + int32_t diff = 0; + + /* Reset runtime to be in next 10ms */ + diff = timer_diff(tp->tentry.runtime, 0, + tcp_now, TCP_TIMER_10MS_QUANTUM); + if (diff > 0) { + tp->tentry.index = TCPT_KEEP; + tp->tentry.runtime = tcp_now + + TCP_TIMER_10MS_QUANTUM; + if (tp->tentry.runtime == 0) { + tp->tentry.runtime++; + } + } + } + } +} + +/* + * Disable read probe and reset the keep alive timer + */ +static void +tcp_disable_read_probe(struct tcpcb *tp) +{ + if (tp->t_adaptive_rtimo == 0 && + ((tp->t_flagsext & TF_DETECT_READSTALL) || + tp->t_rtimo_probes > 0)) { + tcp_keepalive_reset(tp); + + if (tp->t_mpsub) { + mptcp_reset_keepalive(tp); + } + } +} + +/* + * Reschedule the tcp timerlist in the next 10ms to re-enable read/write + * probes on connections going over a particular interface. + */ +void +tcp_probe_connectivity(struct ifnet *ifp, u_int32_t enable) +{ + int32_t offset; + struct tcptimerlist *listp = &tcp_timer_list; + struct inpcbinfo *pcbinfo = &tcbinfo; + struct inpcb *inp, *nxt; + + if (ifp == NULL) { + return; + } + + /* update clock */ + calculate_tcp_clock(); + + /* + * Enable keep alive timer on all connections that are + * active/established on this interface. + */ + lck_rw_lock_shared(pcbinfo->ipi_lock); + + LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, nxt) { + struct tcpcb *tp = NULL; + if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == + WNT_STOPUSING) { + continue; + } + + /* Acquire lock to look at the state of the connection */ + socket_lock(inp->inp_socket, 1); + + /* Release the want count */ + if (inp->inp_ppcb == NULL || + (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) { + socket_unlock(inp->inp_socket, 1); + continue; + } + tp = intotcpcb(inp); + if (enable) { + tcp_enable_read_probe(tp, ifp); + } else { + tcp_disable_read_probe(tp); + } + + socket_unlock(inp->inp_socket, 1); + } + lck_rw_done(pcbinfo->ipi_lock); + + lck_mtx_lock(listp->mtx); + if (listp->running) { + listp->pref_mode |= TCP_TIMERLIST_10MS_MODE; + goto done; + } + + /* Reschedule within the next 10ms */ + offset = TCP_TIMER_10MS_QUANTUM; + if (listp->scheduled) { + int32_t diff; + diff = timer_diff(listp->runtime, 0, tcp_now, offset); + if (diff <= 0) { + /* The timer will fire sooner than what's needed */ + goto done; + } + } + listp->mode = TCP_TIMERLIST_10MS_MODE; + listp->idleruns = 0; + + tcp_sched_timerlist(offset); +done: + lck_mtx_unlock(listp->mtx); + return; +} + +inline void +tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp) +{ + struct if_cellular_status_v1 *ifsr; + u_int32_t optlen; + ifsr = &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1; + if (ifsr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) { + optlen = tp->t_maxopd - tp->t_maxseg; + + if (ifsr->mss_recommended == + IF_CELL_UL_MSS_RECOMMENDED_NONE && + tp->t_cached_maxopd > 0 && + tp->t_maxopd < tp->t_cached_maxopd) { + tp->t_maxopd = tp->t_cached_maxopd; + tcpstat.tcps_mss_to_default++; + } else if (ifsr->mss_recommended == + IF_CELL_UL_MSS_RECOMMENDED_MEDIUM && + tp->t_maxopd > tcp_mss_rec_medium) { + tp->t_cached_maxopd = tp->t_maxopd; + tp->t_maxopd = tcp_mss_rec_medium; + tcpstat.tcps_mss_to_medium++; + } else if (ifsr->mss_recommended == + IF_CELL_UL_MSS_RECOMMENDED_LOW && + tp->t_maxopd > tcp_mss_rec_low) { + tp->t_cached_maxopd = tp->t_maxopd; + tp->t_maxopd = tcp_mss_rec_low; + tcpstat.tcps_mss_to_low++; + } + tp->t_maxseg = tp->t_maxopd - optlen; + + /* + * clear the cached value if it is same as the current + */ + if (tp->t_maxopd == tp->t_cached_maxopd) { + tp->t_cached_maxopd = 0; + } + } +} + +void +tcp_update_mss_locked(struct socket *so, struct ifnet *ifp) +{ + struct inpcb *inp = sotoinpcb(so); + struct tcpcb *tp = intotcpcb(inp); + + if (ifp == NULL && (ifp = inp->inp_last_outifp) == NULL) { + return; + } + + if (!IFNET_IS_CELLULAR(ifp)) { + /* + * This optimization is implemented for cellular + * networks only + */ + return; + } + if (tp->t_state <= TCPS_CLOSE_WAIT) { + /* + * If the connection is currently doing or has done PMTU + * blackhole detection, do not change the MSS + */ + if (tp->t_flags & TF_BLACKHOLE) { + return; + } + if (ifp->if_link_status == NULL) { + return; + } + tcp_update_mss_core(tp, ifp); + } +} + +void +tcp_itimer(struct inpcbinfo *ipi) +{ + struct inpcb *inp, *nxt; + + if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) { + if (tcp_itimer_done == TRUE) { + tcp_itimer_done = FALSE; + atomic_add_32(&ipi->ipi_timer_req.intimer_fast, 1); + return; + } + /* Upgrade failed, lost lock now take it again exclusive */ + lck_rw_lock_exclusive(ipi->ipi_lock); + } + tcp_itimer_done = TRUE; + + LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) { + struct socket *so; + struct ifnet *ifp; + + if (inp->inp_ppcb == NULL || + in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) { + continue; + } + so = inp->inp_socket; + ifp = inp->inp_last_outifp; + socket_lock(so, 1); + if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) { + socket_unlock(so, 1); + continue; + } + so_check_extended_bk_idle_time(so); + if (ipi->ipi_flags & INPCBINFO_UPDATE_MSS) { + tcp_update_mss_locked(so, NULL); + } + socket_unlock(so, 1); + + /* + * Defunct all system-initiated background sockets if the + * socket is using the cellular interface and the interface + * has its LQM set to abort. + */ + if ((ipi->ipi_flags & INPCBINFO_HANDLE_LQM_ABORT) && + IS_SO_TC_BACKGROUNDSYSTEM(so->so_traffic_class) && + ifp != NULL && IFNET_IS_CELLULAR(ifp) && + (ifp->if_interface_state.valid_bitmask & + IF_INTERFACE_STATE_LQM_STATE_VALID) && + ifp->if_interface_state.lqm_state == + IFNET_LQM_THRESH_ABORT) { + socket_defunct(current_proc(), so, + SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL); + } + } + + ipi->ipi_flags &= ~(INPCBINFO_UPDATE_MSS | INPCBINFO_HANDLE_LQM_ABORT); + lck_rw_done(ipi->ipi_lock); }