X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/316670eb35587141e969394ae8537d66b9211e80..3903760236c30e3b5ace7a4eefac3a269d68957c:/bsd/netinet/tcp_timer.c?ds=sidebyside

diff --git a/bsd/netinet/tcp_timer.c b/bsd/netinet/tcp_timer.c
index a8df38947..e50bab301 100644
--- a/bsd/netinet/tcp_timer.c
+++ b/bsd/netinet/tcp_timer.c
@@ -1,8 +1,8 @@
 /*
- * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
@@ -11,10 +11,10 @@
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
- * 
+ *
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
@@ -22,7 +22,7 @@
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
@@ -74,11 +74,12 @@
 #include <sys/mcache.h>
 #include <sys/queue.h>
 #include <kern/locks.h>
-
 #include <kern/cpu_number.h>	/* before tcp_seq.h, for tcp_random18() */
+#include <mach/boolean.h>
 
 #include <net/route.h>
 #include <net/if_var.h>
+#include <net/ntstat.h>
 
 #include <netinet/in.h>
 #include <netinet/in_systm.h>
@@ -88,6 +89,7 @@
 #endif
 #include <netinet/ip_var.h>
 #include <netinet/tcp.h>
+#include <netinet/tcp_cache.h>
 #include <netinet/tcp_fsm.h>
 #include <netinet/tcp_seq.h>
 #include <netinet/tcp_timer.h>
@@ -102,29 +104,22 @@
 #endif
 #include <sys/kdebug.h>
 #include <mach/sdt.h>
+#include <netinet/mptcp_var.h>
 
-extern void postevent(struct socket *, struct sockbuf *,
-                                               int);
-#define DBG_FNC_TCP_FAST	NETDBG_CODE(DBG_NETTCP, (5 << 8))
-#define DBG_FNC_TCP_SLOW	NETDBG_CODE(DBG_NETTCP, (5 << 8) | 1)
-
-#define TIMERENTRY_TO_TP(te) ((struct tcpcb *)((uintptr_t)te - offsetof(struct tcpcb, tentry.le.le_next))) 
+/* Max number of times a stretch ack can be delayed on a connection */
+#define	TCP_STRETCHACK_DELAY_THRESHOLD	5
 
-#define VERIFY_NEXT_LINK(elm,field) do {	\
-	if (LIST_NEXT((elm),field) != NULL && 	\
-	    LIST_NEXT((elm),field)->field.le_prev !=	\
-		&((elm)->field.le_next))	\
-		panic("Bad link elm %p next->prev != elm", (elm));	\
-} while(0)
+/*
+ * If the host processor has been sleeping for too long, this is the threshold
+ * used to avoid sending stale retransmissions.
+ */
+#define	TCP_SLEEP_TOO_LONG	(10 * 60 * 1000) /* 10 minutes in ms */
 
-#define VERIFY_PREV_LINK(elm,field) do {	\
-	if (*(elm)->field.le_prev != (elm))	\
-		panic("Bad link elm %p prev->next != elm", (elm));	\
-} while(0)
+/* tcp timer list */
+struct tcptimerlist tcp_timer_list;
 
-static int 	background_io_trigger = 5;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, background_io_trigger, CTLFLAG_RW | CTLFLAG_LOCKED,
-    &background_io_trigger, 0, "Background IO Trigger Setting");
+/* List of pcbs in timewait state, protected by tcbinfo's ipi_lock */
+struct tcptailq tcp_tw_tailq;
 
 static int
 sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS
@@ -148,41 +143,59 @@ sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS
 }
 
 int	tcp_keepinit;
-SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
+    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
     &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "");
 
 int	tcp_keepidle;
-SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
+    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
     &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "");
 
 int	tcp_keepintvl;
-SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
+    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
     &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "");
 
+int	tcp_keepcnt;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt,
+    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+    &tcp_keepcnt, 0, "number of times to repeat keepalive");
+
 int	tcp_msl;
-SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
+    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
     &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
 
-/* 
- * Avoid DoS via TCP Robustness in Persist Condition (see http://www.ietf.org/id/draft-ananth-tcpm-persist-02.txt)
- * by allowing a system wide maximum persistence timeout value when in Zero Window Probe mode.
- * Expressed in milliseconds to be consistent without timeout related values, the TCP socket option is in seconds.
+/*
+ * Avoid DoS via TCP Robustness in Persist Condition
+ * (see http://www.ietf.org/id/draft-ananth-tcpm-persist-02.txt)
+ * by allowing a system wide maximum persistence timeout value when in
+ * Zero Window Probe mode.
+ *
+ * Expressed in milliseconds to be consistent without timeout related
+ * values, the TCP socket option is in seconds.
  */
 u_int32_t tcp_max_persist_timeout = 0;
-SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
-    &tcp_max_persist_timeout, 0, sysctl_msec_to_ticks, "I", "Maximum persistence timout for ZWP");
+SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout,
+    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+    &tcp_max_persist_timeout, 0, sysctl_msec_to_ticks, "I",
+    "Maximum persistence timeout for ZWP");
 
 static int	always_keepalive = 0;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW | CTLFLAG_LOCKED,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive,
+    CTLFLAG_RW | CTLFLAG_LOCKED,
     &always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections");
 
-/* This parameter determines how long the timer list will stay in fast mode even
- * though all connections are idle. In fast mode, the timer will fire more frequently
- * anticipating new data.
+/*
+ * This parameter determines how long the timer list will stay in fast or
+ * quick mode even though all connections are idle. In this state, the
+ * timer will run more frequently anticipating new data.
  */
-int timer_fastmode_idlemax = TCP_FASTMODE_IDLEGEN_MAX;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, timer_fastmode_idlemax, CTLFLAG_RW | CTLFLAG_LOCKED,
-	&timer_fastmode_idlemax, 0, "Maximum idle generations in fast mode");
+int timer_fastmode_idlemax = TCP_FASTMODE_IDLERUN_MAX;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, timer_fastmode_idlemax,
+    CTLFLAG_RW | CTLFLAG_LOCKED,
+    &timer_fastmode_idlemax, 0, "Maximum idle generations in fast mode");
 
 /*
  * See tcp_syn_backoff[] for interval values between SYN retransmits;
@@ -191,132 +204,283 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, timer_fastmode_idlemax, CTLFLAG_RW | CTLFLAG
  * SYN retransmits.  Setting it to 0 disables the dropping off of those
  * two options.
  */
-static int tcp_broken_peer_syn_rxmit_thres = 7;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, broken_peer_syn_rxmit_thres, CTLFLAG_RW | CTLFLAG_LOCKED,
-    &tcp_broken_peer_syn_rxmit_thres, 0, "Number of retransmitted SYNs before "
-    "TCP disables rfc1323 and rfc1644 during the rest of attempts");
+static int tcp_broken_peer_syn_rxmit_thres = 10;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, broken_peer_syn_rexmit_thres,
+    CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_broken_peer_syn_rxmit_thres, 0,
+    "Number of retransmitted SYNs before disabling RFC 1323 "
+    "options on local connections");
 
 static int tcp_timer_advanced = 0;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_timer_advanced, CTLFLAG_RD | CTLFLAG_LOCKED,
-    &tcp_timer_advanced, 0, "Number of times one of the timers was advanced");
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_timer_advanced,
+    CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_timer_advanced, 0,
+    "Number of times one of the timers was advanced");
 
 static int tcp_resched_timerlist = 0;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_resched_timerlist, CTLFLAG_RD | CTLFLAG_LOCKED,
-    &tcp_resched_timerlist, 0, 
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_resched_timerlist,
+    CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_resched_timerlist, 0,
     "Number of times timer list was rescheduled as part of processing a packet");
 
 int	tcp_pmtud_black_hole_detect = 1 ;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_detection, CTLFLAG_RW | CTLFLAG_LOCKED,
-    &tcp_pmtud_black_hole_detect, 0, "Path MTU Discovery Black Hole Detection");
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_detection,
+    CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_pmtud_black_hole_detect, 0,
+    "Path MTU Discovery Black Hole Detection");
 
 int	tcp_pmtud_black_hole_mss = 1200 ;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_mss, CTLFLAG_RW | CTLFLAG_LOCKED,
-    &tcp_pmtud_black_hole_mss, 0, "Path MTU Discovery Black Hole Detection lowered MSS");
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_mss,
+    CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_pmtud_black_hole_mss, 0,
+    "Path MTU Discovery Black Hole Detection lowered MSS");
 
-static int	tcp_keepcnt = TCPTV_KEEPCNT;
-static int	tcp_gc_done = FALSE;	/* perfromed garbage collection of "used" sockets */
-	/* max idle probes */
+static u_int32_t tcp_mss_rec_medium = 1200;
+static u_int32_t tcp_mss_rec_low = 512;
+
+#define	TCP_REPORT_STATS_INTERVAL	43200 /* 12 hours, in seconds */
+int tcp_report_stats_interval = TCP_REPORT_STATS_INTERVAL;
+
+/* performed garbage collection of "used" sockets */
+static boolean_t tcp_gc_done = FALSE;
+
+/* max idle probes */
 int	tcp_maxpersistidle;
-	/* max idle time in persist */
-int	tcp_maxidle;
 
-/* TCP delack timer is set to 100 ms. Since the processing of timer list in fast
- * mode will happen no faster than 100 ms, the delayed ack timer will fire some where 
- * between 100 and 200 ms.
+/*
+ * TCP delack timer is set to 100 ms. Since the processing of timer list
+ * in fast mode will happen no faster than 100 ms, the delayed ack timer
+ * will fire some where between 100 and 200 ms.
  */
 int	tcp_delack = TCP_RETRANSHZ / 10;
 
-struct	inpcbhead	time_wait_slots[N_TIME_WAIT_SLOTS];
-int		cur_tw_slot = 0;
-
-/* tcp timer list */
-struct tcptimerlist tcp_timer_list;
-
-/* The frequency of running through the TCP timer list in 
- * fast and slow mode can be configured.
+#if MPTCP
+/*
+ * MP_JOIN retransmission of 3rd ACK will be every 500 msecs without backoff
  */
-SYSCTL_UINT(_net_inet_tcp, OID_AUTO, timer_fastquantum, CTLFLAG_RW | CTLFLAG_LOCKED,
-	&tcp_timer_list.fast_quantum, TCP_FASTTIMER_QUANTUM, 
-	"Frequency of running timer list in fast mode");
+int	tcp_jack_rxmt = TCP_RETRANSHZ / 2;
+#endif /* MPTCP */
 
-SYSCTL_UINT(_net_inet_tcp, OID_AUTO, timer_slowquantum, CTLFLAG_RW | CTLFLAG_LOCKED,
-	&tcp_timer_list.slow_quantum, TCP_SLOWTIMER_QUANTUM, 
-	"Frequency of running timer list in slow mode");
+static boolean_t tcp_itimer_done = FALSE;
 
 static void tcp_remove_timer(struct tcpcb *tp);
 static void tcp_sched_timerlist(uint32_t offset);
-static uint32_t tcp_run_conn_timer(struct tcpcb *tp, uint16_t *next_index);
+static u_int32_t tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *mode,
+    u_int16_t probe_if_index);
 static void tcp_sched_timers(struct tcpcb *tp);
 static inline void tcp_set_lotimer_index(struct tcpcb *);
+__private_extern__ void tcp_remove_from_time_wait(struct inpcb *inp);
+static inline void tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp);
+__private_extern__ void tcp_report_stats(void);
 
-/* Macro to compare two timers. If there is a reset of the sign bit, it is 
- * safe to assume that the timer has wrapped around. By doing signed comparision, 
- * we take care of wrap around such that the value with the sign bit reset is 
- * actually ahead of the other.
- */
+static	u_int64_t tcp_last_report_time;
 
-static inline int32_t
-timer_diff(uint32_t t1, uint32_t toff1, uint32_t t2, uint32_t toff2) { 
-	return (int32_t)((t1 + toff1) - (t2 + toff2));
+/*
+ * Structure to store previously reported stats so that we can send
+ * incremental changes in each report interval.
+ */
+struct tcp_last_report_stats {
+	u_int32_t	tcps_connattempt;
+	u_int32_t	tcps_accepts;
+	u_int32_t	tcps_ecn_client_setup;
+	u_int32_t	tcps_ecn_server_setup;
+	u_int32_t	tcps_ecn_client_success;
+	u_int32_t	tcps_ecn_server_success;
+	u_int32_t	tcps_ecn_not_supported;
+	u_int32_t	tcps_ecn_lost_syn;
+	u_int32_t	tcps_ecn_lost_synack;
+	u_int32_t	tcps_ecn_recv_ce;
+	u_int32_t	tcps_ecn_recv_ece;
+	u_int32_t	tcps_ecn_sent_ece;
+	u_int32_t	tcps_ecn_conn_recv_ce;
+	u_int32_t	tcps_ecn_conn_recv_ece;
+	u_int32_t	tcps_ecn_conn_plnoce;
+	u_int32_t	tcps_ecn_conn_pl_ce;
+	u_int32_t	tcps_ecn_conn_nopl_ce;
+	u_int32_t	tcps_ecn_fallback_synloss;
+	u_int32_t	tcps_ecn_fallback_reorder;
+	u_int32_t	tcps_ecn_fallback_ce;
+
+	/* TFO-related statistics */
+	u_int32_t	tcps_tfo_syn_data_rcv;
+	u_int32_t	tcps_tfo_cookie_req_rcv;
+	u_int32_t	tcps_tfo_cookie_sent;
+	u_int32_t	tcps_tfo_cookie_invalid;
+	u_int32_t	tcps_tfo_cookie_req;
+	u_int32_t	tcps_tfo_cookie_rcv;
+	u_int32_t	tcps_tfo_syn_data_sent;
+	u_int32_t	tcps_tfo_syn_data_acked;
+	u_int32_t	tcps_tfo_syn_loss;
+	u_int32_t	tcps_tfo_blackhole;
+	u_int32_t	tcps_tfo_cookie_wrong;
+	u_int32_t	tcps_tfo_no_cookie_rcv;
+	u_int32_t	tcps_tfo_heuristics_disable;
+	u_int32_t	tcps_tfo_sndblackhole;
 };
 
+
 /* Returns true if the timer is on the timer list */
 #define TIMER_IS_ON_LIST(tp) ((tp)->t_flags & TF_TIMER_ONLIST)
 
+/* Run the TCP timerlist atleast once every hour */
+#define TCP_TIMERLIST_MAX_OFFSET (60 * 60 * TCP_RETRANSHZ)
+
+
+static void add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay);
+static boolean_t tcp_garbage_collect(struct inpcb *, int);
+
+#define TIMERENTRY_TO_TP(te) ((struct tcpcb *)((uintptr_t)te - offsetof(struct tcpcb, tentry.le.le_next)))
+
+#define VERIFY_NEXT_LINK(elm,field) do {	\
+	if (LIST_NEXT((elm),field) != NULL && 	\
+	    LIST_NEXT((elm),field)->field.le_prev !=	\
+		&((elm)->field.le_next))	\
+		panic("Bad link elm %p next->prev != elm", (elm));	\
+} while(0)
+
+#define VERIFY_PREV_LINK(elm,field) do {	\
+	if (*(elm)->field.le_prev != (elm))	\
+		panic("Bad link elm %p prev->next != elm", (elm));	\
+} while(0)
 
-void	add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay);
-void	add_to_time_wait(struct tcpcb *tp, uint32_t delay) ;
+#define TCP_SET_TIMER_MODE(mode, i) do { \
+	if (IS_TIMER_HZ_10MS(i)) \
+		(mode) |= TCP_TIMERLIST_10MS_MODE; \
+	else if (IS_TIMER_HZ_100MS(i)) \
+		(mode) |= TCP_TIMERLIST_100MS_MODE; \
+	else \
+		(mode) |= TCP_TIMERLIST_500MS_MODE; \
+} while(0)
 
-static void tcp_garbage_collect(struct inpcb *, int);
+#if (DEVELOPMENT || DEBUG)
+SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_medium,
+    CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_medium, 0,
+    "Medium MSS based on recommendation in link status report");
+SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_low,
+    CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_low, 0,
+    "Low MSS based on recommendation in link status report");
 
-void	add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay) 
+static int32_t tcp_change_mss_recommended = 0;
+static int
+sysctl_change_mss_recommended SYSCTL_HANDLER_ARGS
 {
-	int		tw_slot;
-	struct inpcbinfo *pcbinfo	= &tcbinfo;
-	uint32_t timer;
+#pragma unused(oidp, arg1, arg2)
+	int i, err = 0, changed = 0;
+	struct ifnet *ifp;
+	struct if_link_status ifsr;
+	struct if_cellular_status_v1 *new_cell_sr;
+	err = sysctl_io_number(req, tcp_change_mss_recommended,
+	    sizeof (int32_t), &i, &changed);
+	if (changed) {
+		ifnet_head_lock_shared();
+		TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
+			if (IFNET_IS_CELLULAR(ifp)) {
+				bzero(&ifsr, sizeof (ifsr));
+				new_cell_sr = &ifsr.ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
+				ifsr.ifsr_version = IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION;
+				ifsr.ifsr_len = sizeof(*new_cell_sr);
+
+				/* Set MSS recommended */
+				new_cell_sr->valid_bitmask |= IF_CELL_UL_MSS_RECOMMENDED_VALID;
+				new_cell_sr->mss_recommended = i;
+				err = ifnet_link_status_report(ifp, new_cell_sr, sizeof (new_cell_sr));
+				if (err == 0) {
+					tcp_change_mss_recommended = i;
+				} else {
+					break;
+				}
+			}
+		}
+		ifnet_head_done();
+	}
+	return (err);
+}
+
+SYSCTL_PROC(_net_inet_tcp, OID_AUTO, change_mss_recommended,
+    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_change_mss_recommended,
+    0, sysctl_change_mss_recommended, "IU", "Change MSS recommended");
+
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, report_stats_interval,
+    CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_report_stats_interval, 0,
+    "Report stats interval");
+#endif /* (DEVELOPMENT || DEBUG) */
+
+/*
+ * Macro to compare two timers. If there is a reset of the sign bit,
+ * it is safe to assume that the timer has wrapped around. By doing
+ * signed comparision, we take care of wrap around such that the value
+ * with the sign bit reset is actually ahead of the other.
+ */
+inline int32_t
+timer_diff(uint32_t t1, uint32_t toff1, uint32_t t2, uint32_t toff2) {
+	return (int32_t)((t1 + toff1) - (t2 + toff2));
+};
 
-	/* pcb list should be locked when we get here */	
-	lck_rw_assert(pcbinfo->mtx, LCK_RW_ASSERT_EXCLUSIVE);
+/*
+ * Add to tcp timewait list, delay is given in milliseconds.
+ */
+static void
+add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay)
+{
+	struct inpcbinfo *pcbinfo = &tcbinfo;
+	struct inpcb *inp = tp->t_inpcb;
+	uint32_t timer;
 
-	LIST_REMOVE(tp->t_inpcb, inp_list);
+	/* pcb list should be locked when we get here */
+	lck_rw_assert(pcbinfo->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE);
 
-	/* if (tp->t_timer[TCPT_2MSL] <= 0) 
-	    tp->t_timer[TCPT_2MSL] = 1; */
+	/* We may get here multiple times, so check */
+	if (!(inp->inp_flags2 & INP2_TIMEWAIT)) {
+		pcbinfo->ipi_twcount++;
+		inp->inp_flags2 |= INP2_TIMEWAIT;
 
-	/*
-	 * Because we're pulling this pcb out of the main TCP pcb list,
-	 * we need to recalculate the TCPT_2MSL timer value for tcp_slowtimo
-	 * higher timer granularity.
-	 */
+		/* Remove from global inp list */
+		LIST_REMOVE(inp, inp_list);
+	} else {
+		TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
+	}
 
-	timer = (delay / TCP_RETRANSHZ) * PR_SLOWHZ;
-	tp->t_rcvtime = (tp->t_rcvtime / TCP_RETRANSHZ) * PR_SLOWHZ;
+	/* Compute the time at which this socket can be closed */
+	timer = tcp_now + delay;
 
-	tp->t_rcvtime += timer & (N_TIME_WAIT_SLOTS - 1); 
+	/* We will use the TCPT_2MSL timer for tracking this delay */
 
-	tw_slot = (timer & (N_TIME_WAIT_SLOTS - 1)) + cur_tw_slot; 
-	if (tw_slot >= N_TIME_WAIT_SLOTS)
-	    tw_slot -= N_TIME_WAIT_SLOTS;
+	if (TIMER_IS_ON_LIST(tp))
+		tcp_remove_timer(tp);
+	tp->t_timer[TCPT_2MSL] = timer;
 
-	LIST_INSERT_HEAD(&time_wait_slots[tw_slot], tp->t_inpcb, inp_list);
+	TAILQ_INSERT_TAIL(&tcp_tw_tailq, tp, t_twentry);
 }
 
-void	add_to_time_wait(struct tcpcb *tp, uint32_t delay) 
+void
+add_to_time_wait(struct tcpcb *tp, uint32_t delay)
 {
-    	struct inpcbinfo *pcbinfo		= &tcbinfo;
-	
-	if (!lck_rw_try_lock_exclusive(pcbinfo->mtx)) {
+	struct inpcbinfo *pcbinfo = &tcbinfo;
+	if (tp->t_inpcb->inp_socket->so_options & SO_NOWAKEFROMSLEEP)
+		socket_post_kev_msg_closed(tp->t_inpcb->inp_socket);
+
+	/* 19182803: Notify nstat that connection is closing before waiting. */
+	nstat_pcb_detach(tp->t_inpcb);
+
+	if (!lck_rw_try_lock_exclusive(pcbinfo->ipi_lock)) {
 		tcp_unlock(tp->t_inpcb->inp_socket, 0, 0);
-		lck_rw_lock_exclusive(pcbinfo->mtx);
+		lck_rw_lock_exclusive(pcbinfo->ipi_lock);
 		tcp_lock(tp->t_inpcb->inp_socket, 0, 0);
 	}
 	add_to_time_wait_locked(tp, delay);
-	lck_rw_done(pcbinfo->mtx);
+	lck_rw_done(pcbinfo->ipi_lock);
+
+	inpcb_gc_sched(pcbinfo, INPCB_TIMER_LAZY);
 }
 
-static void
+/* If this is on time wait queue, remove it. */
+void
+tcp_remove_from_time_wait(struct inpcb *inp)
+{
+	struct tcpcb *tp = intotcpcb(inp);
+	if (inp->inp_flags2 & INP2_TIMEWAIT)
+		TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
+}
+
+static boolean_t
 tcp_garbage_collect(struct inpcb *inp, int istimewait)
 {
+	boolean_t active = FALSE;
 	struct socket *so;
 	struct tcpcb *tp;
 
@@ -330,13 +494,23 @@ tcp_garbage_collect(struct inpcb *inp, int istimewait)
 	 * overflow sockets that are eligible for garbage collection have
 	 * their usecounts set to 1.
 	 */
-	if (so->so_usecount > 1 || !lck_mtx_try_lock_spin(&inp->inpcb_mtx))
-		return;
+	if (!lck_mtx_try_lock_spin(&inp->inpcb_mtx))
+		return (TRUE);
 
 	/* Check again under the lock */
 	if (so->so_usecount > 1) {
+		if (inp->inp_wantcnt == WNT_STOPUSING)
+			active = TRUE;
 		lck_mtx_unlock(&inp->inpcb_mtx);
-		return;
+		return (active);
+	}
+
+	if (istimewait &&
+		TSTMP_GEQ(tcp_now, tp->t_timer[TCPT_2MSL]) &&
+		tp->t_state != TCPS_CLOSED) {
+		/* Become a regular mutex */
+		lck_mtx_convert_spin(&inp->inpcb_mtx);
+		tcp_close(tp);
 	}
 
 	/*
@@ -344,42 +518,46 @@ tcp_garbage_collect(struct inpcb *inp, int istimewait)
 	 * only if we are called to clean up the time wait slots, since
 	 * tcp_dropdropablreq() considers a socket to have been fully
 	 * dropped after add_to_time_wait() is finished.
-	 * Also handle the case of connections getting closed by the peer while in the queue as
-	 * seen with rdar://6422317
-	 * 
+	 * Also handle the case of connections getting closed by the peer
+	 * while in the queue as seen with rdar://6422317
+	 *
 	 */
-	if (so->so_usecount == 1 && 
+	if (so->so_usecount == 1 &&
 	    ((istimewait && (so->so_flags & SOF_OVERFLOW)) ||
-	    ((tp != NULL) && (tp->t_state == TCPS_CLOSED) && (so->so_head != NULL)
-	    	 && ((so->so_state & (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE)) ==
-			 (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE))))) {
+	    ((tp != NULL) && (tp->t_state == TCPS_CLOSED) &&
+	    (so->so_head != NULL) &&
+	    ((so->so_state & (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE)) ==
+	    (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE))))) {
 
 		if (inp->inp_state != INPCB_STATE_DEAD) {
 			/* Become a regular mutex */
 			lck_mtx_convert_spin(&inp->inpcb_mtx);
 #if INET6
-			if (INP_CHECK_SOCKAF(so, AF_INET6))
+			if (SOCK_CHECK_DOM(so, PF_INET6))
 				in6_pcbdetach(inp);
 			else
 #endif /* INET6 */
-			in_pcbdetach(inp);
+				in_pcbdetach(inp);
 		}
 		so->so_usecount--;
+		if (inp->inp_wantcnt == WNT_STOPUSING)
+			active = TRUE;
 		lck_mtx_unlock(&inp->inpcb_mtx);
-		return;
+		return (active);
 	} else if (inp->inp_wantcnt != WNT_STOPUSING) {
 		lck_mtx_unlock(&inp->inpcb_mtx);
-		return;
+		return (FALSE);
 	}
 
 	/*
-	 * We get here because the PCB is no longer searchable (WNT_STOPUSING);
-	 * detach (if needed) and dispose if it is dead (usecount is 0).  This
-	 * covers all cases, including overflow sockets and those that are
-	 * considered as "embryonic", i.e. created by sonewconn() in TCP input
-	 * path, and have not yet been committed.  For the former, we reduce
-	 * the usecount to 0 as done by the code above.  For the latter, the
-	 * usecount would have reduced to 0 as part calling soabort() when the
+	 * We get here because the PCB is no longer searchable
+	 * (WNT_STOPUSING); detach (if needed) and dispose if it is dead
+	 * (usecount is 0).  This covers all cases, including overflow
+	 * sockets and those that are considered as "embryonic",
+	 * i.e. created by sonewconn() in TCP input path, and have
+	 * not yet been committed.  For the former, we reduce the usecount
+	 *  to 0 as done by the code above.  For the latter, the usecount
+	 * would have reduced to 0 as part calling soabort() when the
 	 * socket is dropped at the end of tcp_input().
 	 */
 	if (so->so_usecount == 0) {
@@ -387,8 +565,9 @@ tcp_garbage_collect(struct inpcb *inp, int istimewait)
 			struct tcpcb *, tp, int32_t, TCPS_CLOSED);
 		/* Become a regular mutex */
 		lck_mtx_convert_spin(&inp->inpcb_mtx);
-		
-		/* If this tp still happens to be on the timer list, 
+
+		/*
+		 * If this tp still happens to be on the timer list,
 		 * take it out
 		 */
 		if (TIMER_IS_ON_LIST(tp)) {
@@ -397,121 +576,112 @@ tcp_garbage_collect(struct inpcb *inp, int istimewait)
 
 		if (inp->inp_state != INPCB_STATE_DEAD) {
 #if INET6
-			if (INP_CHECK_SOCKAF(so, AF_INET6))
+			if (SOCK_CHECK_DOM(so, PF_INET6))
 				in6_pcbdetach(inp);
 			else
 #endif /* INET6 */
-			in_pcbdetach(inp);
+				in_pcbdetach(inp);
 		}
 		in_pcbdispose(inp);
-	} else {
-		lck_mtx_unlock(&inp->inpcb_mtx);
+		return (FALSE);
 	}
+
+	lck_mtx_unlock(&inp->inpcb_mtx);
+	return (TRUE);
 }
 
+/*
+ * TCP garbage collector callback (inpcb_timer_func_t).
+ *
+ * Returns the number of pcbs that will need to be gc-ed soon,
+ * returnining > 0 will keep timer active.
+ */
 void
-tcp_slowtimo(void)
+tcp_gc(struct inpcbinfo *ipi)
 {
 	struct inpcb *inp, *nxt;
-	struct tcpcb *tp;
+	struct tcpcb *tw_tp, *tw_ntp;
 #if TCPDEBUG
 	int ostate;
 #endif
-
 #if  KDEBUG
 	static int tws_checked = 0;
 #endif
 
-	struct inpcbinfo *pcbinfo		= &tcbinfo;
-
-	KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0,0,0,0,0);
-
-	tcp_maxidle = tcp_keepcnt * tcp_keepintvl;
+	KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0, 0, 0, 0, 0);
 
-	/* Update tcp_now here as it may get used while processing the slow timer */
+	/*
+	 * Update tcp_now here as it may get used while
+	 * processing the slow timer.
+	 */
 	calculate_tcp_clock();
 
-	/* Garbage collect socket/tcpcb: We need to acquire the list lock 
+	/*
+	 * Garbage collect socket/tcpcb: We need to acquire the list lock
 	 * exclusively to do this
 	 */
 
-	if (lck_rw_try_lock_exclusive(pcbinfo->mtx) == FALSE) {
-		if (tcp_gc_done == TRUE) {	/* don't sweat it this time. cleanup was done last time */
+	if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) {
+		/* don't sweat it this time; cleanup was done last time */
+		if (tcp_gc_done == TRUE) {
 			tcp_gc_done = FALSE;
-			KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked, cur_tw_slot,0,0,0);
-			return; /* Upgrade failed and lost lock - give up this time. */
+			KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END,
+			    tws_checked, cur_tw_slot, 0, 0, 0);
+			/* Lock upgrade failed, give up this round */
+			atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
+			return;
 		}
-		lck_rw_lock_exclusive(pcbinfo->mtx);	/* Upgrade failed, lost lock now take it again exclusive */
+		/* Upgrade failed, lost lock now take it again exclusive */
+		lck_rw_lock_exclusive(ipi->ipi_lock);
 	}
 	tcp_gc_done = TRUE;
 
-	/*
-	 * Process the items in the current time-wait slot
-	 */
-#if  KDEBUG
-	tws_checked = 0;
-#endif
-	KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_NONE, tws_checked,0,0,0,0);
-
-    	LIST_FOREACH(inp, &time_wait_slots[cur_tw_slot], inp_list) {
-#if KDEBUG
-	        tws_checked++;
-#endif
-
-		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) 
-			continue;
-
-		tcp_lock(inp->inp_socket, 1, 0);
-
-		if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) 
-			goto twunlock;
-
-		tp = intotcpcb(inp);
-		if (tp == NULL)  /* tp already closed, remove from list */
-			goto twunlock;
-
-		if (tp->t_timer[TCPT_2MSL] >= N_TIME_WAIT_SLOTS) {
-		    tp->t_timer[TCPT_2MSL] -= N_TIME_WAIT_SLOTS;
-		    tp->t_rcvtime += N_TIME_WAIT_SLOTS;
-		}
-		else
-		    tp->t_timer[TCPT_2MSL] = 0;
-
-		if (tp->t_timer[TCPT_2MSL] == 0)  {
+	LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
+		if (tcp_garbage_collect(inp, 0))
+			atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1);
+	}
 
-			/* That pcb is ready for a close */	
-			tcp_free_sackholes(tp);
-			tp = tcp_close(tp);
+	/* Now cleanup the time wait ones */
+	TAILQ_FOREACH_SAFE(tw_tp, &tcp_tw_tailq, t_twentry, tw_ntp) {
+		/*
+		 * We check the timestamp here without holding the
+		 * socket lock for better performance. If there are
+		 * any pcbs in time-wait, the timer will get rescheduled.
+		 * Hence some error in this check can be tolerated.
+		 *
+		 * Sometimes a socket on time-wait queue can be closed if
+		 * 2MSL timer expired but the application still has a
+		 * usecount on it.
+		 */
+		if (tw_tp->t_state == TCPS_CLOSED ||
+		    TSTMP_GEQ(tcp_now, tw_tp->t_timer[TCPT_2MSL])) {
+			if (tcp_garbage_collect(tw_tp->t_inpcb, 1))
+				atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1);
 		}
-twunlock:
-		tcp_unlock(inp->inp_socket, 1, 0);
 	}
 
+	/* take into account pcbs that are still in time_wait_slots */
+	atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, ipi->ipi_twcount);
 
-    	LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
-		tcp_garbage_collect(inp, 0);
-	}
+	lck_rw_done(ipi->ipi_lock);
 
-	/* Now cleanup the time wait ones */
-    	LIST_FOREACH_SAFE(inp, &time_wait_slots[cur_tw_slot], inp_list, nxt) {
-		tcp_garbage_collect(inp, 1);
-	}
+	/* Clean up the socache while we are here */
+	if (so_cache_timer())
+		atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1);
+
+	KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked,
+	    cur_tw_slot, 0, 0, 0);
 
-	if (++cur_tw_slot >= N_TIME_WAIT_SLOTS)
-		cur_tw_slot = 0;
-	
-	lck_rw_done(pcbinfo->mtx);
-	KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked, cur_tw_slot,0,0,0);
+	return;
 }
 
 /*
  * Cancel all timers for TCP tp.
  */
 void
-tcp_canceltimers(tp)
-	struct tcpcb *tp;
+tcp_canceltimers(struct tcpcb *tp)
 {
-	register int i;
+	int i;
 
 	tcp_remove_timer(tp);
 	for (i = 0; i < TCPT_NTIMERS; i++)
@@ -528,20 +698,76 @@ int	tcp_backoff[TCP_MAXRXTSHIFT + 1] =
 
 static int tcp_totbackoff = 511;	/* sum of tcp_backoff[] */
 
+void
+tcp_rexmt_save_state(struct tcpcb *tp)
+{
+	u_int32_t fsize;
+	if (TSTMP_SUPPORTED(tp)) {
+		/*
+		 * Since timestamps are supported on the connection,
+		 * we can do recovery as described in rfc 4015.
+		 */
+		fsize = tp->snd_max - tp->snd_una;
+		tp->snd_ssthresh_prev = max(fsize, tp->snd_ssthresh);
+		tp->snd_recover_prev = tp->snd_recover;
+	} else {
+		/*
+		 * Timestamp option is not supported on this connection.
+		 * Record ssthresh and cwnd so they can
+		 * be recovered if this turns out to be a "bad" retransmit.
+		 * A retransmit is considered "bad" if an ACK for this
+		 * segment is received within RTT/2 interval; the assumption
+		 * here is that the ACK was already in flight.  See
+		 * "On Estimating End-to-End Network Path Properties" by
+		 * Allman and Paxson for more details.
+		 */
+		tp->snd_cwnd_prev = tp->snd_cwnd;
+		tp->snd_ssthresh_prev = tp->snd_ssthresh;
+		tp->snd_recover_prev = tp->snd_recover;
+		if (IN_FASTRECOVERY(tp))
+			tp->t_flags |= TF_WASFRECOVERY;
+		else
+			tp->t_flags &= ~TF_WASFRECOVERY;
+	}
+	tp->t_srtt_prev = (tp->t_srtt >> TCP_RTT_SHIFT) + 2;
+	tp->t_rttvar_prev = (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
+	tp->t_flagsext &= ~(TF_RECOMPUTE_RTT);
+}
+
+/*
+ * Revert to the older segment size if there is an indication that PMTU
+ * blackhole detection was not needed.
+ */
+void
+tcp_pmtud_revert_segment_size(struct tcpcb *tp)
+{
+	int32_t optlen;
+
+	VERIFY(tp->t_pmtud_saved_maxopd > 0);
+	tp->t_flags |= TF_PMTUD;
+	tp->t_flags &= ~TF_BLACKHOLE;
+	optlen = tp->t_maxopd - tp->t_maxseg;
+	tp->t_maxopd = tp->t_pmtud_saved_maxopd;
+	tp->t_maxseg = tp->t_maxopd - optlen;
+	/*
+	 * Reset the slow-start flight size as it
+	 * may depend on the new MSS
+	 */
+	if (CC_ALGO(tp)->cwnd_init != NULL)
+		CC_ALGO(tp)->cwnd_init(tp);
+	tp->t_pmtud_start_ts = 0;
+	tcpstat.tcps_pmtudbh_reverted++;
+}
+
 /*
  * TCP timer processing.
  */
 struct tcpcb *
-tcp_timers(tp, timer)
-	register struct tcpcb *tp;
-	int timer;
+tcp_timers(struct tcpcb *tp, int timer)
 {
-	register int rexmt;
+	int32_t rexmt, optlen = 0, idle_time = 0;
 	struct socket *so;
 	struct tcptemp *t_template;
-	int optlen = 0;
-	int idle_time = 0;
-
 #if TCPDEBUG
 	int ostate;
 #endif
@@ -549,6 +775,8 @@ tcp_timers(tp, timer)
 #if INET6
 	int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV4) == 0;
 #endif /* INET6 */
+	u_int64_t accsleep_ms;
+	u_int32_t last_sleep_ms = 0;
 
 	so = tp->t_inpcb->inp_socket;
 	idle_time = tcp_now - tp->t_rcvtime;
@@ -567,10 +795,10 @@ tcp_timers(tp, timer)
 		tcp_free_sackholes(tp);
 		if (tp->t_state != TCPS_TIME_WAIT &&
 		    tp->t_state != TCPS_FIN_WAIT_2 &&
-		    ((idle_time > 0) && (idle_time < tcp_maxidle))) {
-			tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp, (u_int32_t)tcp_keepintvl);
-		}
-		else {
+		    ((idle_time > 0) && (idle_time < TCP_CONN_MAXIDLE(tp)))) {
+			tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp,
+				(u_int32_t)TCP_CONN_KEEPINTVL(tp));
+		} else {
 			tp = tcp_close(tp);
 			return(tp);
 		}
@@ -582,67 +810,153 @@ tcp_timers(tp, timer)
 	 * to a longer retransmit interval and retransmit one segment.
 	 */
 	case TCPT_REXMT:
-		/* Drop a connection in the retransmit timer
-		 * 1. If we have retransmitted more than TCP_MAXRXTSHIFT times
-		 * 2. If the time spent in this retransmission episode is more than
-		 *    the time limit set with TCP_RXT_CONNDROPTIME socket option
-		 * 3. If TCP_RXT_FINDROP socket option was set and we have already
-		 *    retransmitted the FIN 3 times without receiving an ack
+		absolutetime_to_nanoseconds(mach_absolutetime_asleep,
+		    &accsleep_ms);
+		accsleep_ms = accsleep_ms / 1000000UL;
+		if (accsleep_ms > tp->t_accsleep_ms)
+			last_sleep_ms = accsleep_ms - tp->t_accsleep_ms;
+		/*
+		 * Drop a connection in the retransmit timer
+		 * 1. If we have retransmitted more than TCP_MAXRXTSHIFT
+		 * times
+		 * 2. If the time spent in this retransmission episode is
+		 * more than the time limit set with TCP_RXT_CONNDROPTIME
+		 * socket option
+		 * 3. If TCP_RXT_FINDROP socket option was set and
+		 * we have already retransmitted the FIN 3 times without
+		 * receiving an ack
 		 */
 		if (++tp->t_rxtshift > TCP_MAXRXTSHIFT ||
-			(tp->rxt_conndroptime > 0 && tp->rxt_start > 0 && 
-			(tcp_now - tp->rxt_start) >= tp->rxt_conndroptime) ||
-			((tp->t_flagsext & TF_RXTFINDROP) != 0 &&
-			(tp->t_flags & TF_SENTFIN) != 0 &&
-			tp->t_rxtshift >= 4)) {
-
+		    (tp->t_rxt_conndroptime > 0 && tp->t_rxtstart > 0 &&
+		    (tcp_now - tp->t_rxtstart) >= tp->t_rxt_conndroptime) ||
+		    ((tp->t_flagsext & TF_RXTFINDROP) != 0 &&
+		    (tp->t_flags & TF_SENTFIN) != 0 && tp->t_rxtshift >= 4) ||
+		    (tp->t_rxtshift > 4 && last_sleep_ms >= TCP_SLEEP_TOO_LONG)) {
 			if ((tp->t_flagsext & TF_RXTFINDROP) != 0) {
 				tcpstat.tcps_rxtfindrop++;
+			} else if (last_sleep_ms >= TCP_SLEEP_TOO_LONG) {
+				tcpstat.tcps_drop_after_sleep++;
 			} else {
 				tcpstat.tcps_timeoutdrop++;
 			}
+			if (tp->t_rxtshift >= TCP_MAXRXTSHIFT) {
+				if (TCP_ECN_ENABLED(tp)) {
+					INP_INC_IFNET_STAT(tp->t_inpcb,
+					    ecn_on.rxmit_drop);
+				} else {
+					INP_INC_IFNET_STAT(tp->t_inpcb,
+					    ecn_off.rxmit_drop);
+				}
+			}
 			tp->t_rxtshift = TCP_MAXRXTSHIFT;
-			postevent(so, 0, EV_TIMEOUT);			
-			soevent(so, 
+			postevent(so, 0, EV_TIMEOUT);
+			soevent(so,
 			    (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT));
+
+			if (TCP_ECN_ENABLED(tp) &&
+			    tp->t_state == TCPS_ESTABLISHED)
+				tcp_heuristic_ecn_droprxmt(tp);
+
 			tp = tcp_drop(tp, tp->t_softerror ?
 			    tp->t_softerror : ETIMEDOUT);
 
 			break;
 		}
 
-		if (tp->t_rxtshift == 1) {
+		tcpstat.tcps_rexmttimeo++;
+		tp->t_accsleep_ms = accsleep_ms;
+
+		if (tp->t_rxtshift == 1 &&
+			tp->t_state == TCPS_ESTABLISHED) {
+			/* Set the time at which retransmission started. */
+			tp->t_rxtstart = tcp_now;
+
 			/*
-			 * first retransmit; record ssthresh and cwnd so they can
-			 * be recovered if this turns out to be a "bad" retransmit.
-			 * A retransmit is considered "bad" if an ACK for this 
-			 * segment is received within RTT/2 interval; the assumption
-			 * here is that the ACK was already in flight.  See 
-			 * "On Estimating End-to-End Network Path Properties" by
-			 * Allman and Paxson for more details.
+			 * if this is the first retransmit timeout, save
+			 * the state so that we can recover if the timeout
+			 * is spurious.
 			 */
-			tp->snd_cwnd_prev = tp->snd_cwnd;
-			tp->snd_ssthresh_prev = tp->snd_ssthresh;
-			tp->snd_recover_prev = tp->snd_recover;
-			if (IN_FASTRECOVERY(tp))
-				  tp->t_flags |= TF_WASFRECOVERY;
-			else
-				  tp->t_flags &= ~TF_WASFRECOVERY;
-			tp->t_badrxtwin = tcp_now  + (tp->t_srtt >> (TCP_RTT_SHIFT)); 
+			tcp_rexmt_save_state(tp);
+		}
+#if MPTCP
+		if ((tp->t_rxtshift >= mptcp_fail_thresh) &&
+		    (tp->t_state == TCPS_ESTABLISHED) &&
+		    (tp->t_mpflags & TMPF_MPTCP_TRUE)) {
+			mptcp_act_on_txfail(so);
+
+		}
+#endif /* MPTCP */
+
+		if (tp->t_adaptive_wtimo > 0 &&
+			tp->t_rxtshift > tp->t_adaptive_wtimo &&
+			TCPS_HAVEESTABLISHED(tp->t_state)) {
+			/* Send an event to the application */
+			soevent(so,
+				(SO_FILT_HINT_LOCKED|
+				SO_FILT_HINT_ADAPTIVE_WTIMO));
+		}
+
+		/*
+		 * If this is a retransmit timeout after PTO, the PTO
+		 * was not effective
+		 */
+		if (tp->t_flagsext & TF_SENT_TLPROBE) {
+			tp->t_flagsext &= ~(TF_SENT_TLPROBE);
+			tcpstat.tcps_rto_after_pto++;
+		}
 
-			/* Set the time at which retransmission on this 
-			 * connection started
+		if (tp->t_flagsext & TF_DELAY_RECOVERY) {
+			/*
+			 * Retransmit timer fired before entering recovery
+			 * on a connection with packet re-ordering. This
+			 * suggests that the reordering metrics computed
+			 * are not accurate.
 			 */
-			tp->rxt_start = tcp_now;
+			tp->t_reorderwin = 0;
+			tp->t_timer[TCPT_DELAYFR] = 0;
+			tp->t_flagsext &= ~(TF_DELAY_RECOVERY);
+		}
+
+		if (tp->t_state == TCPS_SYN_RECEIVED)
+			tcp_disable_tfo(tp);
+
+		if ((tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
+		    !(tp->t_tfo_flags & TFO_F_NO_SNDPROBING) &&
+		    ((tp->t_state != TCPS_SYN_SENT && tp->t_rxtshift > 1) ||
+		     tp->t_rxtshift > 2)) {
+			/*
+			 * For regular retransmissions, a first one is being
+			 * done for tail-loss probe.
+			 * Thus, if rxtshift > 1, this means we have sent the segment
+			 * a total of 3 times.
+			 *
+			 * If we are in SYN-SENT state, then there is no tail-loss
+			 * probe thus we have to let rxtshift go up to 3.
+			 */
+			tcp_heuristic_tfo_middlebox(tp);
+
+			so->so_error = ENODATA;
+			sorwakeup(so);
+			sowwakeup(so);
+
+			tp->t_tfo_stats |= TFO_S_SEND_BLACKHOLE;
+			tcpstat.tcps_tfo_sndblackhole++;
 		}
-		tcpstat.tcps_rexmttimeo++;
 
-		if (tp->t_state == TCPS_SYN_SENT)
+		if (tp->t_state == TCPS_SYN_SENT) {
 			rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift];
-		else
+			tp->t_stat.synrxtshift = tp->t_rxtshift;
+
+			/* When retransmitting, disable TFO */
+			if (tfo_enabled(tp)) {
+				tp->t_flagsext &= ~TF_FASTOPEN;
+				tp->t_tfo_flags |= TFO_F_SYN_LOSS;
+			}
+		} else {
 			rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
-		TCPT_RANGESET(tp->t_rxtcur, rexmt,
-			tp->t_rttmin, TCPTV_REXMTMAX, 
+		}
+
+		TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, TCPTV_REXMTMAX,
 			TCP_ADD_REXMTSLOP(tp));
 		tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
 
@@ -651,25 +965,35 @@ tcp_timers(tp, timer)
 
 		tcp_free_sackholes(tp);
 		/*
-		 * Check for potential Path MTU Discovery Black Hole 
+		 * Check for potential Path MTU Discovery Black Hole
 		 */
-
-		if (tcp_pmtud_black_hole_detect && (tp->t_state == TCPS_ESTABLISHED)) {
-			if (((tp->t_flags & (TF_PMTUD|TF_MAXSEGSNT)) == (TF_PMTUD|TF_MAXSEGSNT)) &&
-				 (tp->t_rxtshift == 2)) {
-				/* 
+		if (tcp_pmtud_black_hole_detect &&
+			!(tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) &&
+			(tp->t_state == TCPS_ESTABLISHED)) {
+			if ((tp->t_flags & TF_PMTUD) &&
+			    ((tp->t_flags & TF_MAXSEGSNT)
+			    || tp->t_pmtud_lastseg_size > tcp_pmtud_black_hole_mss) &&
+			    tp->t_rxtshift == 2) {
+				/*
 				 * Enter Path MTU Black-hole Detection mechanism:
 				 * - Disable Path MTU Discovery (IP "DF" bit).
-				 * - Reduce MTU to lower value than what we negociated with peer.
+				 * - Reduce MTU to lower value than what we
+				 * negotiated with the peer.
 				 */
-
-				tp->t_flags &= ~TF_PMTUD; /* Disable Path MTU Discovery for now */
-				tp->t_flags |= TF_BLACKHOLE; /* Record that we may have found a black hole */
+				/* Disable Path MTU Discovery for now */
+				tp->t_flags &= ~TF_PMTUD;
+				/* Record that we may have found a black hole */
+				tp->t_flags |= TF_BLACKHOLE;
 				optlen = tp->t_maxopd - tp->t_maxseg;
-				tp->t_pmtud_saved_maxopd = tp->t_maxopd; /* Keep track of previous MSS */
-				if (tp->t_maxopd > tcp_pmtud_black_hole_mss)
-					tp->t_maxopd = tcp_pmtud_black_hole_mss; /* Reduce the MSS to intermediary value */
-				else {
+				/* Keep track of previous MSS */
+				tp->t_pmtud_saved_maxopd = tp->t_maxopd;
+				tp->t_pmtud_start_ts = tcp_now;
+				if (tp->t_pmtud_start_ts == 0)
+					tp->t_pmtud_start_ts++;
+				/* Reduce the MSS to intermediary value */
+				if (tp->t_maxopd > tcp_pmtud_black_hole_mss) {
+					tp->t_maxopd = tcp_pmtud_black_hole_mss;
+				} else {
 					tp->t_maxopd = 	/* use the default MSS */
 #if INET6
 						isipv6 ? tcp_v6mssdflt :
@@ -679,43 +1003,41 @@ tcp_timers(tp, timer)
 				tp->t_maxseg = tp->t_maxopd - optlen;
 
 				/*
-	 			 * Reset the slow-start flight size as it may depends on the new MSS
+	 			 * Reset the slow-start flight size
+				 * as it may depend on the new MSS
 	 			 */
 				if (CC_ALGO(tp)->cwnd_init != NULL)
 					CC_ALGO(tp)->cwnd_init(tp);
+				tp->snd_cwnd = tp->t_maxseg;
 			}
 			/*
-			 * If further retransmissions are still unsuccessful with a lowered MTU,
-			 * maybe this isn't a Black Hole and we restore the previous MSS and
-			 * blackhole detection flags.
+			 * If further retransmissions are still
+			 * unsuccessful with a lowered MTU, maybe this
+			 * isn't a Black Hole and we restore the previous
+			 * MSS and blackhole detection flags.
 			 */
 			else {
-	
-				if ((tp->t_flags & TF_BLACKHOLE) && (tp->t_rxtshift > 4)) {
-					tp->t_flags |= TF_PMTUD; 
-					tp->t_flags &= ~TF_BLACKHOLE; 
-					optlen = tp->t_maxopd - tp->t_maxseg;
-					tp->t_maxopd = tp->t_pmtud_saved_maxopd;
-					tp->t_maxseg = tp->t_maxopd - optlen;
-					/*
-	 			 	* Reset the slow-start flight size as it may depends on the new MSS
-	 			 	*/
-					if (CC_ALGO(tp)->cwnd_init != NULL)
-						CC_ALGO(tp)->cwnd_init(tp);
+
+				if ((tp->t_flags & TF_BLACKHOLE) &&
+				    (tp->t_rxtshift > 4)) {
+					tcp_pmtud_revert_segment_size(tp);
+					tp->snd_cwnd = tp->t_maxseg;
 				}
 			}
 		}
 
 
 		/*
-		 * Disable rfc1323 and rfc1644 if we haven't got any response to
-		 * our SYN (after we reach the threshold) to work-around some
-		 * broken terminal servers (most of which have hopefully been
-		 * retired) that have bad VJ header compression code which
-		 * trashes TCP segments containing unknown-to-them TCP options.
+		 * Disable rfc1323 and rfc1644 if we haven't got any
+		 * response to our SYN (after we reach the threshold)
+		 * to work-around some broken terminal servers (most of
+		 * which have hopefully been retired) that have bad VJ
+		 * header compression code which trashes TCP segments
+		 * containing unknown-to-them TCP options.
+		 * Do this only on non-local connections.
 		 */
-		if ((tp->t_state == TCPS_SYN_SENT) &&
-		    (tp->t_rxtshift == tcp_broken_peer_syn_rxmit_thres))
+		if (tp->t_state == TCPS_SYN_SENT &&
+		    tp->t_rxtshift == tcp_broken_peer_syn_rxmit_thres)
 			tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_REQ_CC);
 
 		/*
@@ -746,27 +1068,40 @@ tcp_timers(tp, timer)
 		 * Force a segment to be sent.
 		 */
 		tp->t_flags |= TF_ACKNOW;
-		/*
-		 * If timing a segment in this window, stop the timer.
-		 */
+
+		/* If timing a segment in this window, stop the timer */
 		tp->t_rtttime = 0;
 
-		if (CC_ALGO(tp)->after_timeout != NULL)
-			CC_ALGO(tp)->after_timeout(tp);
+		if (!IN_FASTRECOVERY(tp) && tp->t_rxtshift == 1)
+			tcpstat.tcps_tailloss_rto++;
 
-		tp->t_dupacks = 0;
-		EXIT_FASTRECOVERY(tp);
 
-		/* CWR notifications are to be sent on new data right after
-		 * RTOs, Fast Retransmits and ECE notification receipts.
+		/*
+		 * RFC 5681 says: when a TCP sender detects segment loss
+		 * using retransmit timer and the given segment has already
+		 * been retransmitted by way of the retransmission timer at
+		 * least once, the value of ssthresh is held constant
 		 */
-		if ((tp->ecn_flags & TE_ECN_ON) == TE_ECN_ON) {
-			tp->ecn_flags |= TE_SENDCWR;
+		if (tp->t_rxtshift == 1 &&
+		    CC_ALGO(tp)->after_timeout != NULL) {
+			CC_ALGO(tp)->after_timeout(tp);
+			/*
+			 * CWR notifications are to be sent on new data
+			 * right after Fast Retransmits and ECE
+			 * notification receipts.
+			 */
+			if (TCP_ECN_ENABLED(tp))
+				tp->ecn_flags |= TE_SENDCWR;
 		}
+
+		EXIT_FASTRECOVERY(tp);
+
+		/* Exit cwnd non validated phase */
+		tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
+
+
 fc_output:
-		DTRACE_TCP5(cc, void, NULL, struct inpcb *, tp->t_inpcb,
-			struct tcpcb *, tp, struct tcphdr *, NULL,
-			int32_t, TCP_CC_REXMT_TIMEOUT);
+		tcp_ccdbg_trace(tp, NULL, TCP_CC_REXMT_TIMEOUT);
 
 		(void) tcp_output(tp);
 		break;
@@ -783,15 +1118,15 @@ fc_output:
 		 * backoff, drop the connection if the idle time
 		 * (no responses to probes) reaches the maximum
 		 * backoff that we would use if retransmitting.
-		 * 
-		 * Drop the connection if we reached the maximum allowed time for 
-		 * Zero Window Probes without a non-zero update from the peer. 
+		 *
+		 * Drop the connection if we reached the maximum allowed time for
+		 * Zero Window Probes without a non-zero update from the peer.
 		 * See rdar://5805356
 		 */
 		if ((tp->t_rxtshift == TCP_MAXRXTSHIFT &&
 		    (idle_time >= tcp_maxpersistidle ||
-		    idle_time >= TCP_REXMTVAL(tp) * tcp_totbackoff)) || 
-		    ((tp->t_persist_stop != 0) && 
+		    idle_time >= TCP_REXMTVAL(tp) * tcp_totbackoff)) ||
+		    ((tp->t_persist_stop != 0) &&
 			TSTMP_LEQ(tp->t_persist_stop, tcp_now))) {
 			tcpstat.tcps_persistdrop++;
 			postevent(so, 0, EV_TIMEOUT);
@@ -801,9 +1136,9 @@ fc_output:
 			break;
 		}
 		tcp_setpersist(tp);
-		tp->t_force = 1;
+		tp->t_flagsext |= TF_FORCE;
 		(void) tcp_output(tp);
-		tp->t_force = 0;
+		tp->t_flagsext &= ~TF_FORCE;
 		break;
 
 	/*
@@ -812,12 +1147,28 @@ fc_output:
 	 */
 	case TCPT_KEEP:
 		tcpstat.tcps_keeptimeo++;
+#if MPTCP
+		/*
+		 * Regular TCP connections do not send keepalives after closing
+		 * MPTCP must not also, after sending Data FINs.
+		 */
+		struct mptcb *mp_tp = tp->t_mptcb;
+		if ((tp->t_mpflags & TMPF_MPTCP_TRUE) &&
+		    (tp->t_state > TCPS_ESTABLISHED)) {
+			goto dropit;
+		} else if (mp_tp != NULL) {
+			if ((mptcp_ok_to_keepalive(mp_tp) == 0))
+				goto dropit;
+		}
+#endif /* MPTCP */
 		if (tp->t_state < TCPS_ESTABLISHED)
 			goto dropit;
 		if ((always_keepalive ||
-		    tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) &&
+		    (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) ||
+		    (tp->t_flagsext & TF_DETECT_READSTALL) ||
+		    (tp->t_tfo_probe_state == TFO_PROBE_PROBING)) &&
 		    (tp->t_state <= TCPS_CLOSING || tp->t_state == TCPS_FIN_WAIT_2)) {
-		    	if (idle_time >= TCP_KEEPIDLE(tp) + (u_int32_t)tcp_maxidle)
+		    	if (idle_time >= TCP_CONN_KEEPIDLE(tp) + TCP_CONN_MAXIDLE(tp))
 				goto dropit;
 			/*
 			 * Send a packet designed to force a response
@@ -834,29 +1185,90 @@ fc_output:
 			tcpstat.tcps_keepprobe++;
 			t_template = tcp_maketemplate(tp);
 			if (t_template) {
-				unsigned int ifscope, nocell = 0;
-
+				struct inpcb *inp = tp->t_inpcb;
+				struct tcp_respond_args tra;
+
+				bzero(&tra, sizeof(tra));
+				tra.nocell = INP_NO_CELLULAR(inp);
+				tra.noexpensive = INP_NO_EXPENSIVE(inp);
+				tra.awdl_unrestricted = INP_AWDL_UNRESTRICTED(inp);
+				tra.intcoproc_allowed = INP_INTCOPROC_ALLOWED(inp);
 				if (tp->t_inpcb->inp_flags & INP_BOUND_IF)
-					ifscope = tp->t_inpcb->inp_boundifp->if_index;
+					tra.ifscope = tp->t_inpcb->inp_boundifp->if_index;
 				else
-					ifscope = IFSCOPE_NONE;
-
-				/*
-				 * If the socket isn't allowed to use the
-				 * cellular interface, indicate it as such.
-				 */
-				if (tp->t_inpcb->inp_flags & INP_NO_IFT_CELLULAR)
-					nocell = 1;
-
+					tra.ifscope = IFSCOPE_NONE;
 				tcp_respond(tp, t_template->tt_ipgen,
 				    &t_template->tt_t, (struct mbuf *)NULL,
-				    tp->rcv_nxt, tp->snd_una - 1, 0, ifscope,
-				    nocell);
+				    tp->rcv_nxt, tp->snd_una - 1, 0, &tra);
 				(void) m_free(dtom(t_template));
+				if (tp->t_flagsext & TF_DETECT_READSTALL)
+					tp->t_rtimo_probes++;
+			}
+			tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
+			    TCP_CONN_KEEPINTVL(tp));
+		} else {
+			tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
+			    TCP_CONN_KEEPIDLE(tp));
+		}
+		if (tp->t_flagsext & TF_DETECT_READSTALL) {
+			struct ifnet *outifp = tp->t_inpcb->inp_last_outifp;
+			bool reenable_probe = false;
+			/*
+			 * The keep alive packets sent to detect a read
+			 * stall did not get a response from the
+			 * peer. Generate more keep-alives to confirm this.
+			 * If the number of probes sent reaches the limit,
+			 * generate an event.
+			 */
+			if (tp->t_adaptive_rtimo > 0) {
+				if (tp->t_rtimo_probes > tp->t_adaptive_rtimo) {
+					/* Generate an event */
+					soevent(so,
+					    (SO_FILT_HINT_LOCKED |
+					    SO_FILT_HINT_ADAPTIVE_RTIMO));
+					tcp_keepalive_reset(tp);
+				} else {
+					reenable_probe = true;
+				}
+			} else if (outifp != NULL &&
+			    (outifp->if_eflags & IFEF_PROBE_CONNECTIVITY) &&
+			    tp->t_rtimo_probes <= TCP_CONNECTIVITY_PROBES_MAX) {
+				reenable_probe = true;
+			} else {
+				tp->t_flagsext &= ~TF_DETECT_READSTALL;
+			}
+			if (reenable_probe) {
+				int ind = min(tp->t_rtimo_probes,
+				    TCP_MAXRXTSHIFT);
+				tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(
+				    tp, tcp_backoff[ind] * TCP_REXMTVAL(tp));
 			}
-			tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, tcp_keepintvl);
-		} else
-			tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, TCP_KEEPIDLE(tp));
+		}
+		if (tp->t_tfo_probe_state == TFO_PROBE_PROBING) {
+			int ind;
+
+			tp->t_tfo_probes++;
+			ind = min(tp->t_tfo_probes, TCP_MAXRXTSHIFT);
+
+			/*
+			 * We take the minimum among the time set by true
+			 * keepalive (see above) and the backoff'd RTO. That
+			 * way we backoff in case of packet-loss but will never
+			 * timeout slower than regular keepalive due to the
+			 * backing off.
+			 */
+			tp->t_timer[TCPT_KEEP] = min(OFFSET_FROM_START(
+			    tp, tcp_backoff[ind] * TCP_REXMTVAL(tp)),
+			    tp->t_timer[TCPT_KEEP]);
+		} else if (tp->t_tfo_probe_state == TFO_PROBE_WAIT_DATA) {
+			/* Still no data! Let's assume a TFO-error and err out... */
+			tcp_heuristic_tfo_middlebox(tp);
+
+			so->so_error = ENODATA;
+			sorwakeup(so);
+			tp->t_tfo_stats |= TFO_S_RECV_BLACKHOLE;
+			tcpstat.tcps_tfo_blackhole++;
+		}
 		break;
 	case TCPT_DELACK:
 		if (tcp_delack_enabled && (tp->t_flags & TF_DELACK)) {
@@ -864,16 +1276,40 @@ fc_output:
 			tp->t_timer[TCPT_DELACK] = 0;
 			tp->t_flags |= TF_ACKNOW;
 
-			/* If delayed ack timer fired while we are stretching acks, 
-			 * go back to acking every other packet
+			/*
+			 * If delayed ack timer fired while stretching
+			 * acks, count the number of times the streaming
+			 * detection was not correct. If this exceeds a
+			 * threshold, disable strech ack on this
+			 * connection
+			 *
+			 * Also, go back to acking every other packet.
 			 */
-			if ((tp->t_flags & TF_STRETCHACK) != 0)
+			if ((tp->t_flags & TF_STRETCHACK)) {
+				if (tp->t_unacksegs > 1 &&
+				    tp->t_unacksegs < maxseg_unacked)
+					tp->t_stretchack_delayed++;
+
+				if (tp->t_stretchack_delayed >
+					TCP_STRETCHACK_DELAY_THRESHOLD) {
+					tp->t_flagsext |= TF_DISABLE_STRETCHACK;
+					/*
+					 * Note the time at which stretch
+					 * ack was disabled automatically
+					 */
+					tp->rcv_nostrack_ts = tcp_now;
+					tcpstat.tcps_nostretchack++;
+					tp->t_stretchack_delayed = 0;
+					tp->rcv_nostrack_pkts = 0;
+				}
 				tcp_reset_stretch_ack(tp);
+			}
 
-			/* If we are measuring inter packet arrival jitter for 
-			 * throttling a connection, this delayed ack might be 
-			 * the reason for accumulating some jitter. So let's
-			 * restart the measurement.
+			/*
+			 * If we are measuring inter packet arrival jitter
+			 * for throttling a connection, this delayed ack
+			 * might be the reason for accumulating some
+			 * jitter. So let's restart the measurement.
 			 */
 			CLEAR_IAJ_STATE(tp);
 
@@ -882,19 +1318,126 @@ fc_output:
 		}
 		break;
 
-#if TCPDEBUG
-	if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
-		tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
-			  PRU_SLOWTIMO);
-#endif
-	dropit:
-		tcpstat.tcps_keepdrops++;
-		postevent(so, 0, EV_TIMEOUT);
-		soevent(so,
+#if MPTCP
+	case TCPT_JACK_RXMT:
+		if ((tp->t_state == TCPS_ESTABLISHED) &&
+		    (tp->t_mpflags & TMPF_PREESTABLISHED) &&
+		    (tp->t_mpflags & TMPF_JOINED_FLOW)) {
+			if (++tp->t_mprxtshift > TCP_MAXRXTSHIFT) {
+				tcpstat.tcps_timeoutdrop++;
+				postevent(so, 0, EV_TIMEOUT);
+				soevent(so,
+			    	    (SO_FILT_HINT_LOCKED|
+				    SO_FILT_HINT_TIMEOUT));
+				tp = tcp_drop(tp, tp->t_softerror ?
+			    	    tp->t_softerror : ETIMEDOUT);
+				break;
+			}
+			tcpstat.tcps_join_rxmts++;
+			tp->t_flags |= TF_ACKNOW;
+
+			/*
+			 * No backoff is implemented for simplicity for this
+			 * corner case.
+			 */
+			(void) tcp_output(tp);
+		}
+		break;
+#endif /* MPTCP */
+
+	case TCPT_PTO:
+	{
+		int32_t snd_len;
+		tp->t_flagsext &= ~(TF_SENT_TLPROBE);
+
+		/*
+		 * Check if the connection is in the right state to
+		 * send a probe
+		 */
+		if (tp->t_state != TCPS_ESTABLISHED ||
+		    (tp->t_rxtshift > 0 && !(tp->t_flagsext & TF_PROBING))
+		    || tp->snd_max == tp->snd_una ||
+		    !SACK_ENABLED(tp) || !TAILQ_EMPTY(&tp->snd_holes) ||
+		    IN_FASTRECOVERY(tp))
+			break;
+
+		/*
+		 * If there is no new data to send or if the
+		 * connection is limited by receive window then
+		 * retransmit the last segment, otherwise send
+		 * new data.
+		 */
+		snd_len = min(so->so_snd.sb_cc, tp->snd_wnd)
+		    - (tp->snd_max - tp->snd_una);
+		if (snd_len > 0) {
+			tp->snd_nxt = tp->snd_max;
+		} else {
+			snd_len = min((tp->snd_max - tp->snd_una),
+			    tp->t_maxseg);
+			tp->snd_nxt = tp->snd_max - snd_len;
+		}
+
+		tcpstat.tcps_pto++;
+		if (tp->t_flagsext & TF_PROBING)
+			tcpstat.tcps_probe_if++;
+
+		/* If timing a segment in this window, stop the timer */
+		tp->t_rtttime = 0;
+		/* Note that tail loss probe is being sent */
+		tp->t_flagsext |= TF_SENT_TLPROBE;
+		tp->t_tlpstart = tcp_now;
+
+		tp->snd_cwnd += tp->t_maxseg;
+		(void )tcp_output(tp);
+		tp->snd_cwnd -= tp->t_maxseg;
+
+		tp->t_tlphighrxt = tp->snd_nxt;
+		break;
+	}
+	case TCPT_DELAYFR:
+		tp->t_flagsext &= ~TF_DELAY_RECOVERY;
+
+		/*
+		 * Don't do anything if one of the following is true:
+		 * - the connection is already in recovery
+		 * - sequence until snd_recover has been acknowledged.
+		 * - retransmit timeout has fired
+		 */
+		if (IN_FASTRECOVERY(tp) ||
+		    SEQ_GEQ(tp->snd_una, tp->snd_recover) ||
+		    tp->t_rxtshift > 0)
+			break;
+
+		VERIFY(SACK_ENABLED(tp));
+		tcp_rexmt_save_state(tp);
+		if (CC_ALGO(tp)->pre_fr != NULL) {
+			CC_ALGO(tp)->pre_fr(tp);
+			if (TCP_ECN_ENABLED(tp))
+				tp->ecn_flags |= TE_SENDCWR;
+		}
+		ENTER_FASTRECOVERY(tp);
+
+		tp->t_timer[TCPT_REXMT] = 0;
+		tcpstat.tcps_sack_recovery_episode++;
+		tp->t_sack_recovery_episode++;
+		tp->sack_newdata = tp->snd_nxt;
+		tp->snd_cwnd = tp->t_maxseg;
+		tcp_ccdbg_trace(tp, NULL, TCP_CC_ENTER_FASTRECOVERY);
+		(void) tcp_output(tp);
+		break;
+	dropit:
+		tcpstat.tcps_keepdrops++;
+		postevent(so, 0, EV_TIMEOUT);
+		soevent(so,
 		    (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT));
 		tp = tcp_drop(tp, ETIMEDOUT);
 		break;
 	}
+#if TCPDEBUG
+	if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
+		tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
+			  PRU_SLOWTIMO);
+#endif
 	return (tp);
 }
 
@@ -909,13 +1452,13 @@ tcp_remove_timer(struct tcpcb *tp)
 		return;
 	}
 	lck_mtx_lock(listp->mtx);
-	
+
 	/* Check if pcb is on timer list again after acquiring the lock */
 	if (!(TIMER_IS_ON_LIST(tp))) {
 		lck_mtx_unlock(listp->mtx);
 		return;
 	}
-	
+
 	if (listp->next_te != NULL && listp->next_te == &tp->tentry)
 		listp->next_te = LIST_NEXT(&tp->tentry, le);
 
@@ -929,109 +1472,136 @@ tcp_remove_timer(struct tcpcb *tp)
 	lck_mtx_unlock(listp->mtx);
 }
 
-/* Function to check if the timerlist needs to be rescheduled to run
+/*
+ * Function to check if the timerlist needs to be rescheduled to run
  * the timer entry correctly. Basically, this is to check if we can avoid
  * taking the list lock.
  */
 
 static boolean_t
-need_to_resched_timerlist(uint32_t runtime, uint16_t index) {
+need_to_resched_timerlist(u_int32_t runtime, u_int16_t mode)
+{
 	struct tcptimerlist *listp = &tcp_timer_list;
 	int32_t diff;
-	boolean_t is_fast;
-
-	if (runtime == 0 || index == TCPT_NONE)
-		return FALSE;
-	is_fast = !(IS_TIMER_SLOW(index));
 
-	/* If the list is being processed then the state of the list is in flux.
-	 * In this case always acquire the lock and set the state correctly.
+	/*
+	 * If the list is being processed then the state of the list is
+	 * in flux. In this case always acquire the lock and set the state
+	 * correctly.
 	 */
-	if (listp->running) {
-		return TRUE;
-	}
+	if (listp->running)
+		return (TRUE);
+
+	if (!listp->scheduled)
+		return (TRUE);
 
 	diff = timer_diff(listp->runtime, 0, runtime, 0);
 	if (diff <= 0) {
 		/* The list is going to run before this timer */
-		return FALSE;
+		return (FALSE);
 	} else {
-		if (is_fast) {
-			if (diff <= listp->fast_quantum)
-				return FALSE;
+		if (mode & TCP_TIMERLIST_10MS_MODE) {
+			if (diff <= TCP_TIMER_10MS_QUANTUM)
+				return (FALSE);
+		} else if (mode & TCP_TIMERLIST_100MS_MODE) {
+			if (diff <= TCP_TIMER_100MS_QUANTUM)
+				return (FALSE);
 		} else {
-			if (diff <= listp->slow_quantum)
-				return FALSE;
+			if (diff <= TCP_TIMER_500MS_QUANTUM)
+				return (FALSE);
 		}
 	}
-	return TRUE;
+	return (TRUE);
 }
 
 void
-tcp_sched_timerlist(uint32_t offset) 
+tcp_sched_timerlist(uint32_t offset)
 {
-
 	uint64_t deadline = 0;
 	struct tcptimerlist *listp = &tcp_timer_list;
 
 	lck_mtx_assert(listp->mtx, LCK_MTX_ASSERT_OWNED);
 
+	offset = min(offset, TCP_TIMERLIST_MAX_OFFSET);
 	listp->runtime = tcp_now + offset;
+	if (listp->runtime == 0) {
+		listp->runtime++;
+		offset++;
+	}
 
-	clock_interval_to_deadline(offset, NSEC_PER_SEC / TCP_RETRANSHZ,
-		&deadline);
+	clock_interval_to_deadline(offset, USEC_PER_SEC, &deadline);
 
 	thread_call_enter_delayed(listp->call, deadline);
+	listp->scheduled = TRUE;
 }
 
-/* Function to run the timers for a connection.
+/*
+ * Function to run the timers for a connection.
  *
- * Returns the offset of next timer to be run for this connection which 
+ * Returns the offset of next timer to be run for this connection which
  * can be used to reschedule the timerlist.
+ *
+ * te_mode is an out parameter that indicates the modes of active
+ * timers for this connection.
  */
-uint32_t
-tcp_run_conn_timer(struct tcpcb *tp, uint16_t *next_index) {
-
-        struct socket *so;
-        uint16_t i = 0, index = TCPT_NONE, lo_index = TCPT_NONE;
-        uint32_t timer_val, offset = 0, lo_timer = 0;
+u_int32_t
+tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode,
+	u_int16_t probe_if_index)
+{
+	struct socket *so;
+	u_int16_t i = 0, index = TCPT_NONE, lo_index = TCPT_NONE;
+	u_int32_t timer_val, offset = 0, lo_timer = 0;
 	int32_t diff;
 	boolean_t needtorun[TCPT_NTIMERS];
 	int count = 0;
 
-        VERIFY(tp != NULL);
-        bzero(needtorun, sizeof(needtorun));
+	VERIFY(tp != NULL);
+	bzero(needtorun, sizeof(needtorun));
+	*te_mode = 0;
 
-        tcp_lock(tp->t_inpcb->inp_socket, 1, 0);
+	tcp_lock(tp->t_inpcb->inp_socket, 1, 0);
 
-        so = tp->t_inpcb->inp_socket;
-	/* Release the want count on inp */ 
-	if (in_pcb_checkstate(tp->t_inpcb, WNT_RELEASE, 1) == WNT_STOPUSING) {
+	so = tp->t_inpcb->inp_socket;
+	/* Release the want count on inp */
+	if (in_pcb_checkstate(tp->t_inpcb, WNT_RELEASE, 1)
+		== WNT_STOPUSING) {
 		if (TIMER_IS_ON_LIST(tp)) {
 			tcp_remove_timer(tp);
 		}
 
-		/* Looks like the TCP connection got closed while we 
+		/* Looks like the TCP connection got closed while we
 		 * were waiting for the lock.. Done
 		 */
 		goto done;
 	}
 
-        /* Since the timer thread needs to wait for tcp lock, it may race
-         * with another thread that can cancel or reschedule the timer that is
-         * about to run. Check if we need to run anything.
-         */
-	index = tp->tentry.index;
-	timer_val = tp->t_timer[index];
+	/*
+	 * If this connection is over an interface that needs to
+	 * be probed, send probe packets to reinitiate communication.
+	 */
+	if (probe_if_index > 0 && tp->t_inpcb->inp_last_outifp != NULL &&
+	    tp->t_inpcb->inp_last_outifp->if_index == probe_if_index) {
+		tp->t_flagsext |= TF_PROBING;
+		tcp_timers(tp, TCPT_PTO);
+		tp->t_timer[TCPT_PTO] = 0;
+		tp->t_flagsext &= ~TF_PROBING;
+	}
 
-        if (index == TCPT_NONE || tp->tentry.runtime == 0) 
+	/*
+	 * Since the timer thread needs to wait for tcp lock, it may race
+	 * with another thread that can cancel or reschedule the timer
+	 * that is about to run. Check if we need to run anything.
+	 */
+	if ((index = tp->tentry.index) == TCPT_NONE)
 		goto done;
 
+	timer_val = tp->t_timer[index];
+
 	diff = timer_diff(tp->tentry.runtime, 0, tcp_now, 0);
 	if (diff > 0) {
 		if (tp->tentry.index != TCPT_NONE) {
 			offset = diff;
-			*(next_index) = tp->tentry.index;
+			*(te_mode) = tp->tentry.mode;
 		}
 		goto done;
 	}
@@ -1039,18 +1609,20 @@ tcp_run_conn_timer(struct tcpcb *tp, uint16_t *next_index) {
 	tp->t_timer[index] = 0;
 	if (timer_val > 0) {
 		tp = tcp_timers(tp, index);
-		if (tp == NULL) 
+		if (tp == NULL)
 			goto done;
 	}
-	
-	/* Check if there are any other timers that need to be run. While doing it,
-	 * adjust the timer values wrt tcp_now.
+
+	/*
+	 * Check if there are any other timers that need to be run.
+	 * While doing it, adjust the timer values wrt tcp_now.
 	 */
+	tp->tentry.mode = 0;
 	for (i = 0; i < TCPT_NTIMERS; ++i) {
 		if (tp->t_timer[i] != 0) {
-			diff = timer_diff(tp->tentry.timer_start, tp->t_timer[i], tcp_now, 0);
+			diff = timer_diff(tp->tentry.timer_start,
+				tp->t_timer[i], tcp_now, 0);
 			if (diff <= 0) {
-				tp->t_timer[i] = 0;
 				needtorun[i] = TRUE;
 				count++;
 			} else {
@@ -1060,26 +1632,33 @@ tcp_run_conn_timer(struct tcpcb *tp, uint16_t *next_index) {
 					lo_timer = diff;
 					lo_index = i;
 				}
+				TCP_SET_TIMER_MODE(tp->tentry.mode, i);
 			}
 		}
 	}
-	
+
 	tp->tentry.timer_start = tcp_now;
 	tp->tentry.index = lo_index;
-	if (lo_index != TCPT_NONE) {
-		tp->tentry.runtime = tp->tentry.timer_start + tp->t_timer[lo_index];
-	} else {
-		tp->tentry.runtime = 0;
+	VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
+
+	if (tp->tentry.index != TCPT_NONE) {
+		tp->tentry.runtime = tp->tentry.timer_start +
+			tp->t_timer[tp->tentry.index];
+		if (tp->tentry.runtime == 0)
+			tp->tentry.runtime++;
 	}
 
 	if (count > 0) {
-		/* run any other timers that are also outstanding at this time. */
+		/* run any other timers outstanding at this time. */
 		for (i = 0; i < TCPT_NTIMERS; ++i) {
 			if (needtorun[i]) {
 				tp->t_timer[i] = 0;
 				tp = tcp_timers(tp, i);
-				if (tp == NULL) 
+				if (tp == NULL) {
+					offset = 0;
+					*(te_mode) = 0;
 					goto done;
+				}
 			}
 		}
 		tcp_set_lotimer_index(tp);
@@ -1087,58 +1666,63 @@ tcp_run_conn_timer(struct tcpcb *tp, uint16_t *next_index) {
 
 	if (tp->tentry.index < TCPT_NONE) {
 		offset = tp->t_timer[tp->tentry.index];
-		*(next_index) = tp->tentry.index;
+		*(te_mode) = tp->tentry.mode;
 	}
 
 done:
 	if (tp != NULL && tp->tentry.index == TCPT_NONE) {
 		tcp_remove_timer(tp);
+		offset = 0;
 	}
-        tcp_unlock(so, 1, 0);
-        return offset;
+
+	tcp_unlock(so, 1, 0);
+	return(offset);
 }
 
 void
-tcp_run_timerlist(void * arg1, void * arg2) {
-
+tcp_run_timerlist(void * arg1, void * arg2)
+{
 #pragma unused(arg1, arg2)
-	
 	struct tcptimerentry *te, *next_te;
 	struct tcptimerlist *listp = &tcp_timer_list;
 	struct tcpcb *tp;
-	uint32_t next_timer = 0;
-	uint16_t index = TCPT_NONE;
-	boolean_t need_fast = FALSE;
+	uint32_t next_timer = 0; /* offset of the next timer on the list */
+	u_int16_t te_mode = 0;	/* modes of all active timers in a tcpcb */
+	u_int16_t list_mode = 0; /* cumulative of modes of all tcpcbs */
 	uint32_t active_count = 0;
-	uint32_t mode = TCP_TIMERLIST_FASTMODE;
 
 	calculate_tcp_clock();
 
 	lck_mtx_lock(listp->mtx);
 
 	listp->running = TRUE;
-	
+
 	LIST_FOREACH_SAFE(te, &listp->lhead, le, next_te) {
 		uint32_t offset = 0;
 		uint32_t runtime = te->runtime;
-		if (TSTMP_GT(runtime, tcp_now)) {
+		if (te->index < TCPT_NONE && TSTMP_GT(runtime, tcp_now)) {
 			offset = timer_diff(runtime, 0, tcp_now, 0);
 			if (next_timer == 0 || offset < next_timer) {
 				next_timer = offset;
 			}
+			list_mode |= te->mode;
 			continue;
 		}
-		active_count++;
 
 		tp = TIMERENTRY_TO_TP(te);
 
-		/* Acquire an inp wantcnt on the inpcb so that the socket won't get
-		 * detached even if tcp_close is called
+		/*
+		 * Acquire an inp wantcnt on the inpcb so that the socket
+		 * won't get detached even if tcp_close is called
 		 */
-		if (in_pcb_checkstate(tp->t_inpcb, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
-			/* Some how this pcb went into dead state while on the timer list,
-			 * just take it off the list. Since the timer list entry pointers 
-			 * are protected by the timer list lock, we can do it here
+		if (in_pcb_checkstate(tp->t_inpcb, WNT_ACQUIRE, 0)
+		    == WNT_STOPUSING) {
+			/*
+			 * Some how this pcb went into dead state while
+			 * on the timer list, just take it off the list.
+			 * Since the timer list entry pointers are
+			 * protected by the timer list lock, we can
+			 * do it here without the socket lock.
 			 */
 			if (TIMER_IS_ON_LIST(tp)) {
 				tp->t_flags &= ~(TF_TIMER_ONLIST);
@@ -1150,94 +1734,106 @@ tcp_run_timerlist(void * arg1, void * arg2) {
 			}
 			continue;
 		}
+		active_count++;
 
-		/* Store the next timerentry pointer before releasing the list lock.
-		 * If that entry has to be removed when we release the lock, this
-		 * pointer will be updated to the element after that.
+		/*
+		 * Store the next timerentry pointer before releasing the
+		 * list lock. If that entry has to be removed when we
+		 * release the lock, this pointer will be updated to the
+		 * element after that.
 		 */
-		listp->next_te = next_te; 
+		listp->next_te = next_te;
 
 		VERIFY_NEXT_LINK(&tp->tentry, le);
 		VERIFY_PREV_LINK(&tp->tentry, le);
 
 		lck_mtx_unlock(listp->mtx);
 
-		index = TCPT_NONE;
-		offset = tcp_run_conn_timer(tp, &index);
-		
+		offset = tcp_run_conn_timer(tp, &te_mode,
+		    listp->probe_if_index);
+
 		lck_mtx_lock(listp->mtx);
 
 		next_te = listp->next_te;
 		listp->next_te = NULL;
 
-		if (offset > 0) {
-			if (index < TCPT_NONE) {
-				/* Check if this is a fast_timer. */
-				if (!need_fast && !(IS_TIMER_SLOW(index))) {
-					need_fast = TRUE;
-				}
+		if (offset > 0 && te_mode != 0) {
+			list_mode |= te_mode;
 
-				if (next_timer == 0 || offset < next_timer) {
-					next_timer = offset;
-				}
-			}
+			if (next_timer == 0 || offset < next_timer)
+				next_timer = offset;
 		}
 	}
 
 	if (!LIST_EMPTY(&listp->lhead)) {
-		if (listp->mode == TCP_TIMERLIST_FASTMODE) {
-			if (need_fast || active_count > 0 || 
-				listp->pref_mode == TCP_TIMERLIST_FASTMODE) {
-				listp->idlegen = 0;
-			} else {
-				listp->idlegen++;
-				if (listp->idlegen > timer_fastmode_idlemax) {
-					mode = TCP_TIMERLIST_SLOWMODE;
-					listp->idlegen = 0;
-				}
-			}
-		} else {
-			if (!need_fast) {
-				mode = TCP_TIMERLIST_SLOWMODE;
-			}
-		}
+		u_int16_t next_mode = 0;
+		if ((list_mode & TCP_TIMERLIST_10MS_MODE) ||
+			(listp->pref_mode & TCP_TIMERLIST_10MS_MODE))
+			next_mode = TCP_TIMERLIST_10MS_MODE;
+		else if ((list_mode & TCP_TIMERLIST_100MS_MODE) ||
+			(listp->pref_mode & TCP_TIMERLIST_100MS_MODE))
+			next_mode = TCP_TIMERLIST_100MS_MODE;
+		else
+			next_mode = TCP_TIMERLIST_500MS_MODE;
 
-		if (mode == TCP_TIMERLIST_FASTMODE || 
-			listp->pref_mode == TCP_TIMERLIST_FASTMODE) {
-			next_timer = listp->fast_quantum;
+		if (next_mode != TCP_TIMERLIST_500MS_MODE) {
+			listp->idleruns = 0;
 		} else {
-			if (listp->pref_offset != 0 && 
-				listp->pref_offset < next_timer)
-				next_timer = listp->pref_offset;
-			if (next_timer < listp->slow_quantum)
-				next_timer = listp->slow_quantum;
+			/*
+			 * the next required mode is slow mode, but if
+			 * the last one was a faster mode and we did not
+			 * have enough idle runs, repeat the last mode.
+			 *
+			 * We try to keep the timer list in fast mode for
+			 * some idle time in expectation of new data.
+			 */
+			if (listp->mode != next_mode &&
+			    listp->idleruns < timer_fastmode_idlemax) {
+				listp->idleruns++;
+				next_mode = listp->mode;
+				next_timer = TCP_TIMER_100MS_QUANTUM;
+			} else {
+				listp->idleruns = 0;
+			}
 		}
+		listp->mode = next_mode;
+		if (listp->pref_offset != 0)
+			next_timer = min(listp->pref_offset, next_timer);
 
-		listp->mode = mode;
+		if (listp->mode == TCP_TIMERLIST_500MS_MODE)
+			next_timer = max(next_timer,
+				TCP_TIMER_500MS_QUANTUM);
 
 		tcp_sched_timerlist(next_timer);
 	} else {
-		/* No need to reschedule this timer */
-		listp->runtime = 0;
+		/*
+		 * No need to reschedule this timer, but always run
+		 * periodically at a much higher granularity.
+		 */
+		tcp_sched_timerlist(TCP_TIMERLIST_MAX_OFFSET);
 	}
 
 	listp->running = FALSE;
 	listp->pref_mode = 0;
 	listp->pref_offset = 0;
+	listp->probe_if_index = 0;
 
 	lck_mtx_unlock(listp->mtx);
 }
 
-/* Function to verify if a change in timer state is required for a connection */
-void 
-tcp_sched_timers(struct tcpcb *tp) 
+/*
+ * Function to check if the timerlist needs to be rescheduled to run this
+ * connection's timers correctly.
+ */
+void
+tcp_sched_timers(struct tcpcb *tp)
 {
 	struct tcptimerentry *te = &tp->tentry;
-	uint16_t index = te->index;
+	u_int16_t index = te->index;
+	u_int16_t mode = te->mode;
 	struct tcptimerlist *listp = &tcp_timer_list;
-	uint32_t offset = 0;
-	boolean_t is_fast;
-	int list_locked = 0;
+	int32_t offset = 0;
+	boolean_t list_locked = FALSE;
 
 	if (tp->t_inpcb->inp_state == INPCB_STATE_DEAD) {
 		/* Just return without adding the dead pcb to the list */
@@ -1248,64 +1844,74 @@ tcp_sched_timers(struct tcpcb *tp)
 	}
 
 	if (index == TCPT_NONE) {
+		/* Nothing to run */
 		tcp_remove_timer(tp);
 		return;
 	}
 
-	is_fast = !(IS_TIMER_SLOW(index));
-	offset = te->runtime - tcp_now;
-	if (offset == 0) {
+	/*
+	 * compute the offset at which the next timer for this connection
+	 * has to run.
+	 */
+	offset = timer_diff(te->runtime, 0, tcp_now, 0);
+	if (offset <= 0) {
 		offset = 1;
 		tcp_timer_advanced++;
 	}
-	if (is_fast)
-		offset = listp->fast_quantum;
 
 	if (!TIMER_IS_ON_LIST(tp)) {
 		if (!list_locked) {
 			lck_mtx_lock(listp->mtx);
-			list_locked = 1;
+			list_locked = TRUE;
 		}
 
 		LIST_INSERT_HEAD(&listp->lhead, te, le);
 		tp->t_flags |= TF_TIMER_ONLIST;
 
-        	listp->entries++;
-        	if (listp->entries > listp->maxentries)
-                	listp->maxentries = listp->entries;
+		listp->entries++;
+		if (listp->entries > listp->maxentries)
+			listp->maxentries = listp->entries;
 
 		/* if the list is not scheduled, just schedule it */
-		if (listp->runtime == 0)
+		if (!listp->scheduled)
 			goto schedule;
-
 	}
 
 
-	/* timer entry is currently on the list */
-	if (need_to_resched_timerlist(te->runtime, index)) {
+	/*
+	 * Timer entry is currently on the list, check if the list needs
+	 * to be rescheduled.
+	 */
+	if (need_to_resched_timerlist(te->runtime, mode)) {
 		tcp_resched_timerlist++;
-	
+
 		if (!list_locked) {
 			lck_mtx_lock(listp->mtx);
-			list_locked = 1;
+			list_locked = TRUE;
 		}
 
 		VERIFY_NEXT_LINK(te, le);
 		VERIFY_PREV_LINK(te, le);
 
 		if (listp->running) {
-			if (is_fast) {
-				listp->pref_mode = TCP_TIMERLIST_FASTMODE;
-			} else if (listp->pref_offset == 0 ||
-				((int)offset) < listp->pref_offset) {
+			listp->pref_mode |= mode;
+			if (listp->pref_offset == 0 ||
+				offset < listp->pref_offset) {
 				listp->pref_offset = offset;
 			}
 		} else {
-			int32_t diff;
-			diff = timer_diff(listp->runtime, 0, tcp_now, offset);
-			if (diff <= 0) {
-				/* The list is going to run before this timer */
-				goto done;
+			/*
+			 * The list could have got rescheduled while
+			 * this thread was waiting for the lock
+			 */
+			if (listp->scheduled) {
+				int32_t diff;
+				diff = timer_diff(listp->runtime, 0,
+				    tcp_now, offset);
+				if (diff <= 0)
+					goto done;
+				else
+					goto schedule;
 			} else {
 				goto schedule;
 			}
@@ -1314,9 +1920,20 @@ tcp_sched_timers(struct tcpcb *tp)
 	goto done;
 
 schedule:
-	if (is_fast) {
-		listp->mode = TCP_TIMERLIST_FASTMODE;
-		listp->idlegen = 0;
+	/*
+	 * Since a connection with timers is getting scheduled, the timer
+	 * list moves from idle to active state and that is why idlegen is
+	 * reset
+	 */
+	if (mode & TCP_TIMERLIST_10MS_MODE) {
+		listp->mode = TCP_TIMERLIST_10MS_MODE;
+		listp->idleruns = 0;
+		offset = min(offset, TCP_TIMER_10MS_QUANTUM);
+	} else if (mode & TCP_TIMERLIST_100MS_MODE) {
+		if (listp->mode > TCP_TIMERLIST_100MS_MODE)
+			listp->mode = TCP_TIMERLIST_100MS_MODE;
+		listp->idleruns = 0;
+		offset = min(offset, TCP_TIMER_100MS_QUANTUM);
 	}
 	tcp_sched_timerlist(offset);
 
@@ -1326,33 +1943,506 @@ done:
 
 	return;
 }
-		
-void
-tcp_set_lotimer_index(struct tcpcb *tp) {
-	uint16_t i, lo_index = TCPT_NONE;
+
+static inline void
+tcp_set_lotimer_index(struct tcpcb *tp)
+{
+	uint16_t i, lo_index = TCPT_NONE, mode = 0;
 	uint32_t lo_timer = 0;
 	for (i = 0; i < TCPT_NTIMERS; ++i) {
-		if (tp->t_timer[i] != 0 &&
-			(lo_timer == 0 || tp->t_timer[i] < lo_timer)) {
-			lo_timer = tp->t_timer[i];
-			lo_index = i;
+		if (tp->t_timer[i] != 0) {
+			TCP_SET_TIMER_MODE(mode, i);
+			if (lo_timer == 0 || tp->t_timer[i] < lo_timer) {
+				lo_timer = tp->t_timer[i];
+				lo_index = i;
+			}
 		}
 	}
 	tp->tentry.index = lo_index;
-	if (lo_index != TCPT_NONE) {
-		tp->tentry.runtime = tp->tentry.timer_start + tp->t_timer[lo_index];
-	} else {
-		tp->tentry.runtime = 0;
+	tp->tentry.mode = mode;
+	VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
+
+	if (tp->tentry.index != TCPT_NONE) {
+		tp->tentry.runtime = tp->tentry.timer_start
+		    + tp->t_timer[tp->tentry.index];
+		if (tp->tentry.runtime == 0)
+			tp->tentry.runtime++;
 	}
 }
 
 void
-tcp_check_timer_state(struct tcpcb *tp) {
-
+tcp_check_timer_state(struct tcpcb *tp)
+{
 	lck_mtx_assert(&tp->t_inpcb->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
 
+	if (tp->t_inpcb->inp_flags2 & INP2_TIMEWAIT)
+		return;
+
 	tcp_set_lotimer_index(tp);
 
 	tcp_sched_timers(tp);
 	return;
 }
+
+static inline void
+tcp_cumulative_stat(u_int32_t cur, u_int32_t *prev, u_int32_t *dest)
+{
+	/* handle wrap around */
+	int32_t diff = (int32_t) (cur - *prev);
+	if (diff > 0)
+		*dest = diff;
+	else
+		*dest = 0;
+	*prev = cur;
+	return;
+}
+
+__private_extern__ void
+tcp_report_stats(void)
+{
+	struct nstat_sysinfo_data data;
+	struct sockaddr_in dst;
+	struct sockaddr_in6 dst6;
+	struct rtentry *rt = NULL;
+	static struct tcp_last_report_stats prev;
+	u_int64_t var, uptime;
+
+#define	stat	data.u.tcp_stats
+	if (((uptime = net_uptime()) - tcp_last_report_time) <
+		tcp_report_stats_interval)
+		return;
+
+	tcp_last_report_time = uptime;
+
+	bzero(&data, sizeof(data));
+	data.flags = NSTAT_SYSINFO_TCP_STATS;
+
+	bzero(&dst, sizeof(dst));
+	dst.sin_len = sizeof(dst);
+	dst.sin_family = AF_INET;
+
+	/* ipv4 avg rtt */
+	lck_mtx_lock(rnh_lock);
+	rt =  rt_lookup(TRUE, (struct sockaddr *)&dst, NULL,
+		rt_tables[AF_INET], IFSCOPE_NONE);
+	lck_mtx_unlock(rnh_lock);
+	if (rt != NULL) {
+		RT_LOCK(rt);
+		if (rt_primary_default(rt, rt_key(rt)) &&
+			rt->rt_stats != NULL) {
+			stat.ipv4_avgrtt = rt->rt_stats->nstat_avg_rtt;
+		}
+		RT_UNLOCK(rt);
+		rtfree(rt);
+		rt = NULL;
+	}
+
+	/* ipv6 avg rtt */
+	bzero(&dst6, sizeof(dst6));
+	dst6.sin6_len = sizeof(dst6);
+	dst6.sin6_family = AF_INET6;
+
+	lck_mtx_lock(rnh_lock);
+	rt = rt_lookup(TRUE,(struct sockaddr *)&dst6, NULL,
+		rt_tables[AF_INET6], IFSCOPE_NONE);
+	lck_mtx_unlock(rnh_lock);
+	if (rt != NULL) {
+		RT_LOCK(rt);
+		if (rt_primary_default(rt, rt_key(rt)) &&
+			rt->rt_stats != NULL) {
+			stat.ipv6_avgrtt = rt->rt_stats->nstat_avg_rtt;
+		}
+		RT_UNLOCK(rt);
+		rtfree(rt);
+		rt = NULL;
+	}
+
+	/* send packet loss rate, shift by 10 for precision */
+	if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_sndrexmitpack > 0) {
+		var = tcpstat.tcps_sndrexmitpack << 10;
+		stat.send_plr = (var * 100) / tcpstat.tcps_sndpack;
+	}
+
+	/* recv packet loss rate, shift by 10 for precision */
+	if (tcpstat.tcps_rcvpack > 0 && tcpstat.tcps_recovered_pkts > 0) {
+		var = tcpstat.tcps_recovered_pkts << 10;
+		stat.recv_plr = (var * 100) / tcpstat.tcps_rcvpack;
+	}
+
+	/* RTO after tail loss, shift by 10 for precision */
+	if (tcpstat.tcps_sndrexmitpack > 0
+	    && tcpstat.tcps_tailloss_rto > 0) {
+		var = tcpstat.tcps_tailloss_rto << 10;
+		stat.send_tlrto_rate =
+			(var * 100) / tcpstat.tcps_sndrexmitpack;
+	}
+
+	/* packet reordering */
+	if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_reordered_pkts > 0) {
+		var = tcpstat.tcps_reordered_pkts << 10;
+		stat.send_reorder_rate =
+			(var * 100) / tcpstat.tcps_sndpack;
+	}
+
+	if (tcp_ecn_outbound == 1)
+		stat.ecn_client_enabled = 1;
+	if (tcp_ecn_inbound == 1)
+		stat.ecn_server_enabled = 1;
+	tcp_cumulative_stat(tcpstat.tcps_connattempt,
+	    &prev.tcps_connattempt, &stat.connection_attempts);
+	tcp_cumulative_stat(tcpstat.tcps_accepts,
+	    &prev.tcps_accepts, &stat.connection_accepts);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_client_setup,
+	    &prev.tcps_ecn_client_setup, &stat.ecn_client_setup);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_server_setup,
+	    &prev.tcps_ecn_server_setup, &stat.ecn_server_setup);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_client_success,
+	    &prev.tcps_ecn_client_success, &stat.ecn_client_success);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_server_success,
+	    &prev.tcps_ecn_server_success, &stat.ecn_server_success);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_not_supported,
+	    &prev.tcps_ecn_not_supported, &stat.ecn_not_supported);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_lost_syn,
+	    &prev.tcps_ecn_lost_syn, &stat.ecn_lost_syn);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_lost_synack,
+	    &prev.tcps_ecn_lost_synack, &stat.ecn_lost_synack);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ce,
+	    &prev.tcps_ecn_recv_ce, &stat.ecn_recv_ce);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
+	    &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
+	    &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
+	    &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
+	    &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ce,
+	    &prev.tcps_ecn_conn_recv_ce, &stat.ecn_conn_recv_ce);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ece,
+	    &prev.tcps_ecn_conn_recv_ece, &stat.ecn_conn_recv_ece);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_plnoce,
+	    &prev.tcps_ecn_conn_plnoce, &stat.ecn_conn_plnoce);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_pl_ce,
+	    &prev.tcps_ecn_conn_pl_ce, &stat.ecn_conn_pl_ce);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_nopl_ce,
+	    &prev.tcps_ecn_conn_nopl_ce, &stat.ecn_conn_nopl_ce);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_synloss,
+	    &prev.tcps_ecn_fallback_synloss, &stat.ecn_fallback_synloss);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_reorder,
+	    &prev.tcps_ecn_fallback_reorder, &stat.ecn_fallback_reorder);
+	tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_ce,
+	    &prev.tcps_ecn_fallback_ce, &stat.ecn_fallback_ce);
+	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_rcv,
+	    &prev.tcps_tfo_syn_data_rcv, &stat.tfo_syn_data_rcv);
+	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req_rcv,
+	    &prev.tcps_tfo_cookie_req_rcv, &stat.tfo_cookie_req_rcv);
+	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_sent,
+	    &prev.tcps_tfo_cookie_sent, &stat.tfo_cookie_sent);
+	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_invalid,
+	    &prev.tcps_tfo_cookie_invalid, &stat.tfo_cookie_invalid);
+	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req,
+	    &prev.tcps_tfo_cookie_req, &stat.tfo_cookie_req);
+	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_rcv,
+	    &prev.tcps_tfo_cookie_rcv, &stat.tfo_cookie_rcv);
+	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_sent,
+	    &prev.tcps_tfo_syn_data_sent, &stat.tfo_syn_data_sent);
+	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_acked,
+	    &prev.tcps_tfo_syn_data_acked, &stat.tfo_syn_data_acked);
+	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_loss,
+	    &prev.tcps_tfo_syn_loss, &stat.tfo_syn_loss);
+	tcp_cumulative_stat(tcpstat.tcps_tfo_blackhole,
+	    &prev.tcps_tfo_blackhole, &stat.tfo_blackhole);
+	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_wrong,
+	    &prev.tcps_tfo_cookie_wrong, &stat.tfo_cookie_wrong);
+	tcp_cumulative_stat(tcpstat.tcps_tfo_no_cookie_rcv,
+	    &prev.tcps_tfo_no_cookie_rcv, &stat.tfo_no_cookie_rcv);
+	tcp_cumulative_stat(tcpstat.tcps_tfo_heuristics_disable,
+	    &prev.tcps_tfo_heuristics_disable, &stat.tfo_heuristics_disable);
+	tcp_cumulative_stat(tcpstat.tcps_tfo_sndblackhole,
+	    &prev.tcps_tfo_sndblackhole, &stat.tfo_sndblackhole);
+
+
+
+
+	nstat_sysinfo_send_data(&data);
+
+#undef	stat
+}
+
+void
+tcp_interface_send_probe(u_int16_t probe_if_index)
+{
+	int32_t offset = 0;
+	struct tcptimerlist *listp = &tcp_timer_list;
+
+	/* Make sure TCP clock is up to date */
+	calculate_tcp_clock();
+
+	lck_mtx_lock(listp->mtx);
+	if (listp->probe_if_index > 0) {
+		tcpstat.tcps_probe_if_conflict++;
+		goto done;
+	}
+
+	listp->probe_if_index = probe_if_index;
+	if (listp->running)
+		goto done;
+
+	/*
+	 * Reschedule the timerlist to run within the next 10ms, which is
+	 * the fastest that we can do.
+	 */
+	offset = TCP_TIMER_10MS_QUANTUM;
+	if (listp->scheduled) {
+		int32_t diff;
+		diff = timer_diff(listp->runtime, 0, tcp_now, offset);
+		if (diff <= 0) {
+			/* The timer will fire sooner than what's needed */
+			goto done;
+		}
+	}
+	listp->mode = TCP_TIMERLIST_10MS_MODE;
+	listp->idleruns = 0;
+
+	tcp_sched_timerlist(offset);
+
+done:
+	lck_mtx_unlock(listp->mtx);
+	return;
+}
+
+/*
+ * Enable read probes on this connection, if:
+ * - it is in established state
+ * - doesn't have any data outstanding
+ * - the outgoing ifp matches
+ * - we have not already sent any read probes
+ */
+static void
+tcp_enable_read_probe(struct tcpcb *tp, struct ifnet *ifp)
+{
+	if (tp->t_state == TCPS_ESTABLISHED &&
+	    tp->snd_max == tp->snd_una &&
+	    tp->t_inpcb->inp_last_outifp == ifp &&
+	    !(tp->t_flagsext & TF_DETECT_READSTALL) &&
+	    tp->t_rtimo_probes == 0) {
+		tp->t_flagsext |= TF_DETECT_READSTALL;
+		tp->t_rtimo_probes = 0;
+		tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
+		    TCP_TIMER_10MS_QUANTUM);
+		if (tp->tentry.index == TCPT_NONE) {
+			tp->tentry.index = TCPT_KEEP;
+			tp->tentry.runtime = tcp_now +
+			    TCP_TIMER_10MS_QUANTUM;
+		} else {
+			int32_t diff = 0;
+
+			/* Reset runtime to be in next 10ms */
+			diff = timer_diff(tp->tentry.runtime, 0,
+			    tcp_now, TCP_TIMER_10MS_QUANTUM);
+			if (diff > 0) {
+				tp->tentry.index = TCPT_KEEP;
+				tp->tentry.runtime = tcp_now +
+				    TCP_TIMER_10MS_QUANTUM;
+				if (tp->tentry.runtime == 0)
+					tp->tentry.runtime++;
+			}
+		}
+	}
+}
+
+/*
+ * Disable read probe and reset the keep alive timer
+ */
+static void
+tcp_disable_read_probe(struct tcpcb *tp)
+{
+	if (tp->t_adaptive_rtimo == 0 &&
+	    ((tp->t_flagsext & TF_DETECT_READSTALL) ||
+	    tp->t_rtimo_probes > 0)) {
+		tcp_keepalive_reset(tp);
+	}
+}
+
+/*
+ * Reschedule the tcp timerlist in the next 10ms to re-enable read/write
+ * probes on connections going over a particular interface.
+ */
+void
+tcp_probe_connectivity(struct ifnet *ifp, u_int32_t enable)
+{
+	int32_t offset;
+	struct tcptimerlist *listp = &tcp_timer_list;
+	struct inpcbinfo *pcbinfo = &tcbinfo;
+	struct inpcb *inp, *nxt;
+
+	if (ifp == NULL)
+		return;
+
+	/* update clock */
+	calculate_tcp_clock();
+
+	/*
+	 * Enable keep alive timer on all connections that are
+	 * active/established on this interface.
+	 */
+	lck_rw_lock_shared(pcbinfo->ipi_lock);
+
+	LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, nxt) {
+		struct tcpcb *tp = NULL;
+		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) ==
+		    WNT_STOPUSING)
+			continue;
+
+		/* Acquire lock to look at the state of the connection */
+		tcp_lock(inp->inp_socket, 1, 0);
+
+		/* Release the want count */
+		if (inp->inp_ppcb == NULL ||
+		    (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) {
+			tcp_unlock(inp->inp_socket, 1, 0);
+			continue;
+		}
+		tp = intotcpcb(inp);
+		if (enable)
+			tcp_enable_read_probe(tp, ifp);
+		else
+			tcp_disable_read_probe(tp);
+
+		tcp_unlock(inp->inp_socket, 1, 0);
+	}
+	lck_rw_done(pcbinfo->ipi_lock);
+
+	lck_mtx_lock(listp->mtx);
+	if (listp->running) {
+		listp->pref_mode |= TCP_TIMERLIST_10MS_MODE;
+		goto done;
+	}
+
+	/* Reschedule within the next 10ms */
+	offset = TCP_TIMER_10MS_QUANTUM;
+	if (listp->scheduled) {
+		int32_t diff;
+		diff = timer_diff(listp->runtime, 0, tcp_now, offset);
+		if (diff <= 0) {
+			/* The timer will fire sooner than what's needed */
+			goto done;
+		}
+	}
+	listp->mode = TCP_TIMERLIST_10MS_MODE;
+	listp->idleruns = 0;
+
+	tcp_sched_timerlist(offset);
+done:
+	lck_mtx_unlock(listp->mtx);
+	return;
+}
+
+inline void
+tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp)
+{
+	struct if_cellular_status_v1 *ifsr;
+	u_int32_t optlen;
+	ifsr = &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
+	if (ifsr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
+		optlen = tp->t_maxopd - tp->t_maxseg;
+
+		if (ifsr->mss_recommended ==
+		    IF_CELL_UL_MSS_RECOMMENDED_NONE &&
+		    tp->t_cached_maxopd > 0 &&
+		    tp->t_maxopd < tp->t_cached_maxopd) {
+			tp->t_maxopd = tp->t_cached_maxopd;
+			tcpstat.tcps_mss_to_default++;
+		} else if (ifsr->mss_recommended ==
+		    IF_CELL_UL_MSS_RECOMMENDED_MEDIUM &&
+		    tp->t_maxopd > tcp_mss_rec_medium) {
+			tp->t_cached_maxopd = tp->t_maxopd;
+			tp->t_maxopd = tcp_mss_rec_medium;
+			tcpstat.tcps_mss_to_medium++;
+		} else if (ifsr->mss_recommended ==
+		    IF_CELL_UL_MSS_RECOMMENDED_LOW &&
+		    tp->t_maxopd > tcp_mss_rec_low) {
+			tp->t_cached_maxopd = tp->t_maxopd;
+			tp->t_maxopd = tcp_mss_rec_low;
+			tcpstat.tcps_mss_to_low++;
+		}
+		tp->t_maxseg = tp->t_maxopd - optlen;
+
+		/*
+		 * clear the cached value if it is same as the current
+		 */
+		if (tp->t_maxopd == tp->t_cached_maxopd)
+			tp->t_cached_maxopd = 0;
+	}
+}
+
+void
+tcp_update_mss_locked(struct socket *so, struct ifnet *ifp)
+{
+	struct inpcb *inp = sotoinpcb(so);
+	struct tcpcb *tp = intotcpcb(inp);
+
+	if (ifp == NULL && inp->inp_last_outifp == NULL)
+		return;
+
+	if (ifp == NULL)
+		ifp = inp->inp_last_outifp;
+
+	if (!IFNET_IS_CELLULAR(ifp)) {
+		/*
+		 * This optimization is implemented for cellular
+		 * networks only
+		 */
+		return;
+	}
+	if ( tp->t_state <= TCPS_CLOSE_WAIT) {
+		/*
+		 * If the connection is currently doing or has done PMTU
+		 * blackhole detection, do not change the MSS
+		 */
+		if (tp->t_flags & TF_BLACKHOLE)
+			return;
+		if (ifp->if_link_status == NULL)
+			return;
+		tcp_update_mss_core(tp, ifp);
+	}
+}
+
+void
+tcp_itimer(struct inpcbinfo *ipi)
+{
+	struct inpcb *inp, *nxt;
+
+	if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) {
+		if (tcp_itimer_done == TRUE) {
+			tcp_itimer_done = FALSE;
+			atomic_add_32(&ipi->ipi_timer_req.intimer_fast, 1);
+			return;
+		}
+		/* Upgrade failed, lost lock now take it again exclusive */
+		lck_rw_lock_exclusive(ipi->ipi_lock);
+	}
+	tcp_itimer_done = TRUE;
+
+	LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
+		struct socket *so;
+
+		if (inp->inp_ppcb == NULL ||
+		    in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING)
+			continue;
+		so = inp->inp_socket;
+		tcp_lock(so, 1, 0);
+		if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
+			tcp_unlock(so, 1, 0);
+			continue;
+		}
+		so_check_extended_bk_idle_time(so);
+		if (ipi->ipi_flags & INPCBINFO_UPDATE_MSS) {
+			tcp_update_mss_locked(so, NULL);
+		}
+		tcp_unlock(so, 1, 0);
+	}
+
+	ipi->ipi_flags &= ~INPCBINFO_UPDATE_MSS;
+	lck_rw_done(ipi->ipi_lock);
+}