X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/6601e61aa18bf4f09af135ff61fc7f4771d23b06..813fb2f63a553c957e917ede5f119b021d6ce391:/bsd/netinet/tcp_timer.c diff --git a/bsd/netinet/tcp_timer.c b/bsd/netinet/tcp_timer.c index effeffde5..5c87a2d18 100644 --- a/bsd/netinet/tcp_timer.c +++ b/bsd/netinet/tcp_timer.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. - * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. - * - * @APPLE_LICENSE_HEADER_END@ + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 @@ -64,11 +70,16 @@ #include #include #include +#include +#include +#include #include - #include /* before tcp_seq.h, for tcp_random18() */ +#include #include +#include +#include #include #include @@ -78,39 +89,52 @@ #endif #include #include +#include #include #include #include #include +#include +#if INET6 +#include +#endif #include #if TCPDEBUG #include #endif #include +#include +#include -#define DBG_FNC_TCP_FAST NETDBG_CODE(DBG_NETTCP, (5 << 8)) -#define DBG_FNC_TCP_SLOW NETDBG_CODE(DBG_NETTCP, (5 << 8) | 1) +/* Max number of times a stretch ack can be delayed on a connection */ +#define TCP_STRETCHACK_DELAY_THRESHOLD 5 /* - * NOTE - WARNING - * - * - * - * + * If the host processor has been sleeping for too long, this is the threshold + * used to avoid sending stale retransmissions. */ +#define TCP_SLEEP_TOO_LONG (10 * 60 * 1000) /* 10 minutes in ms */ + +/* tcp timer list */ +struct tcptimerlist tcp_timer_list; + +/* List of pcbs in timewait state, protected by tcbinfo's ipi_lock */ +struct tcptailq tcp_tw_tailq; + static int sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS { +#pragma unused(arg1, arg2) int error, s, tt; tt = *(int *)oidp->oid_arg1; - s = tt * 1000 / hz; + s = tt * 1000 / TCP_RETRANSHZ;; error = sysctl_handle_int(oidp, &s, 0, req); if (error || !req->newptr) return (error); - tt = s * hz / 1000; + tt = s * TCP_RETRANSHZ / 1000; if (tt < 1) return (EINVAL); @@ -119,331 +143,552 @@ sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS } int tcp_keepinit; -SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT|CTLFLAG_RW, +SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", ""); int tcp_keepidle; -SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT|CTLFLAG_RW, +SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", ""); int tcp_keepintvl; -SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, CTLTYPE_INT|CTLFLAG_RW, +SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", ""); -int tcp_delacktime; -SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime, - CTLTYPE_INT|CTLFLAG_RW, &tcp_delacktime, 0, sysctl_msec_to_ticks, "I", - "Time before a delayed ACK is sent"); - +int tcp_keepcnt; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &tcp_keepcnt, 0, "number of times to repeat keepalive"); + int tcp_msl; -SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, CTLTYPE_INT|CTLFLAG_RW, +SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime"); +/* + * Avoid DoS via TCP Robustness in Persist Condition + * (see http://www.ietf.org/id/draft-ananth-tcpm-persist-02.txt) + * by allowing a system wide maximum persistence timeout value when in + * Zero Window Probe mode. + * + * Expressed in milliseconds to be consistent without timeout related + * values, the TCP socket option is in seconds. + */ +u_int32_t tcp_max_persist_timeout = 0; +SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &tcp_max_persist_timeout, 0, sysctl_msec_to_ticks, "I", + "Maximum persistence timeout for ZWP"); + static int always_keepalive = 0; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW, +SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, + CTLFLAG_RW | CTLFLAG_LOCKED, &always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections"); -static int tcp_keepcnt = TCPTV_KEEPCNT; - /* max idle probes */ -int tcp_maxpersistidle; - /* max idle time in persist */ -int tcp_maxidle; - -struct inpcbhead time_wait_slots[N_TIME_WAIT_SLOTS]; -int cur_tw_slot = 0; - -u_long *delack_bitmask; - +/* + * This parameter determines how long the timer list will stay in fast or + * quick mode even though all connections are idle. In this state, the + * timer will run more frequently anticipating new data. + */ +int timer_fastmode_idlemax = TCP_FASTMODE_IDLERUN_MAX; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, timer_fastmode_idlemax, + CTLFLAG_RW | CTLFLAG_LOCKED, + &timer_fastmode_idlemax, 0, "Maximum idle generations in fast mode"); -void add_to_time_wait_locked(tp) - struct tcpcb *tp; -{ - int tw_slot; +/* + * See tcp_syn_backoff[] for interval values between SYN retransmits; + * the value set below defines the number of retransmits, before we + * disable the timestamp and window scaling options during subsequent + * SYN retransmits. Setting it to 0 disables the dropping off of those + * two options. + */ +static int tcp_broken_peer_syn_rxmit_thres = 10; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, broken_peer_syn_rexmit_thres, + CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_broken_peer_syn_rxmit_thres, 0, + "Number of retransmitted SYNs before disabling RFC 1323 " + "options on local connections"); + +static int tcp_timer_advanced = 0; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_timer_advanced, + CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_timer_advanced, 0, + "Number of times one of the timers was advanced"); + +static int tcp_resched_timerlist = 0; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_resched_timerlist, + CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_resched_timerlist, 0, + "Number of times timer list was rescheduled as part of processing a packet"); + +int tcp_pmtud_black_hole_detect = 1 ; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_detection, + CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_pmtud_black_hole_detect, 0, + "Path MTU Discovery Black Hole Detection"); + +int tcp_pmtud_black_hole_mss = 1200 ; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_mss, + CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_pmtud_black_hole_mss, 0, + "Path MTU Discovery Black Hole Detection lowered MSS"); + +static u_int32_t tcp_mss_rec_medium = 1200; +static u_int32_t tcp_mss_rec_low = 512; + +#define TCP_REPORT_STATS_INTERVAL 43200 /* 12 hours, in seconds */ +int tcp_report_stats_interval = TCP_REPORT_STATS_INTERVAL; + +/* performed garbage collection of "used" sockets */ +static boolean_t tcp_gc_done = FALSE; + +/* max idle probes */ +int tcp_maxpersistidle; - /* pcb list should be locked when we get here */ -#if 0 - lck_mtx_assert(tp->t_inpcb->inpcb_mtx, LCK_MTX_ASSERT_OWNED); -#endif +/* + * TCP delack timer is set to 100 ms. Since the processing of timer list + * in fast mode will happen no faster than 100 ms, the delayed ack timer + * will fire some where between 100 and 200 ms. + */ +int tcp_delack = TCP_RETRANSHZ / 10; - LIST_REMOVE(tp->t_inpcb, inp_list); +#if MPTCP +/* + * MP_JOIN retransmission of 3rd ACK will be every 500 msecs without backoff + */ +int tcp_jack_rxmt = TCP_RETRANSHZ / 2; +#endif /* MPTCP */ - if (tp->t_timer[TCPT_2MSL] == 0) - tp->t_timer[TCPT_2MSL] = 1; +static boolean_t tcp_itimer_done = FALSE; - tp->t_rcvtime += tp->t_timer[TCPT_2MSL] & (N_TIME_WAIT_SLOTS - 1); - tw_slot = (tp->t_timer[TCPT_2MSL] & (N_TIME_WAIT_SLOTS - 1)) + cur_tw_slot; - if (tw_slot >= N_TIME_WAIT_SLOTS) - tw_slot -= N_TIME_WAIT_SLOTS; +static void tcp_remove_timer(struct tcpcb *tp); +static void tcp_sched_timerlist(uint32_t offset); +static u_int32_t tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *mode, + u_int16_t probe_if_index); +static void tcp_sched_timers(struct tcpcb *tp); +static inline void tcp_set_lotimer_index(struct tcpcb *); +__private_extern__ void tcp_remove_from_time_wait(struct inpcb *inp); +static inline void tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp); +__private_extern__ void tcp_report_stats(void); - LIST_INSERT_HEAD(&time_wait_slots[tw_slot], tp->t_inpcb, inp_list); -} +static u_int64_t tcp_last_report_time; -void add_to_time_wait(tp) - struct tcpcb *tp; +/* + * Structure to store previously reported stats so that we can send + * incremental changes in each report interval. + */ +struct tcp_last_report_stats { + u_int32_t tcps_connattempt; + u_int32_t tcps_accepts; + u_int32_t tcps_ecn_client_setup; + u_int32_t tcps_ecn_server_setup; + u_int32_t tcps_ecn_client_success; + u_int32_t tcps_ecn_server_success; + u_int32_t tcps_ecn_not_supported; + u_int32_t tcps_ecn_lost_syn; + u_int32_t tcps_ecn_lost_synack; + u_int32_t tcps_ecn_recv_ce; + u_int32_t tcps_ecn_recv_ece; + u_int32_t tcps_ecn_sent_ece; + u_int32_t tcps_ecn_conn_recv_ce; + u_int32_t tcps_ecn_conn_recv_ece; + u_int32_t tcps_ecn_conn_plnoce; + u_int32_t tcps_ecn_conn_pl_ce; + u_int32_t tcps_ecn_conn_nopl_ce; + u_int32_t tcps_ecn_fallback_synloss; + u_int32_t tcps_ecn_fallback_reorder; + u_int32_t tcps_ecn_fallback_ce; + + /* TFO-related statistics */ + u_int32_t tcps_tfo_syn_data_rcv; + u_int32_t tcps_tfo_cookie_req_rcv; + u_int32_t tcps_tfo_cookie_sent; + u_int32_t tcps_tfo_cookie_invalid; + u_int32_t tcps_tfo_cookie_req; + u_int32_t tcps_tfo_cookie_rcv; + u_int32_t tcps_tfo_syn_data_sent; + u_int32_t tcps_tfo_syn_data_acked; + u_int32_t tcps_tfo_syn_loss; + u_int32_t tcps_tfo_blackhole; + u_int32_t tcps_tfo_cookie_wrong; + u_int32_t tcps_tfo_no_cookie_rcv; + u_int32_t tcps_tfo_heuristics_disable; + u_int32_t tcps_tfo_sndblackhole; +}; + + +/* Returns true if the timer is on the timer list */ +#define TIMER_IS_ON_LIST(tp) ((tp)->t_flags & TF_TIMER_ONLIST) + +/* Run the TCP timerlist atleast once every hour */ +#define TCP_TIMERLIST_MAX_OFFSET (60 * 60 * TCP_RETRANSHZ) + + +static void add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay); +static boolean_t tcp_garbage_collect(struct inpcb *, int); + +#define TIMERENTRY_TO_TP(te) ((struct tcpcb *)((uintptr_t)te - offsetof(struct tcpcb, tentry.le.le_next))) + +#define VERIFY_NEXT_LINK(elm,field) do { \ + if (LIST_NEXT((elm),field) != NULL && \ + LIST_NEXT((elm),field)->field.le_prev != \ + &((elm)->field.le_next)) \ + panic("Bad link elm %p next->prev != elm", (elm)); \ +} while(0) + +#define VERIFY_PREV_LINK(elm,field) do { \ + if (*(elm)->field.le_prev != (elm)) \ + panic("Bad link elm %p prev->next != elm", (elm)); \ +} while(0) + +#define TCP_SET_TIMER_MODE(mode, i) do { \ + if (IS_TIMER_HZ_10MS(i)) \ + (mode) |= TCP_TIMERLIST_10MS_MODE; \ + else if (IS_TIMER_HZ_100MS(i)) \ + (mode) |= TCP_TIMERLIST_100MS_MODE; \ + else \ + (mode) |= TCP_TIMERLIST_500MS_MODE; \ +} while(0) + +#if (DEVELOPMENT || DEBUG) +SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_medium, + CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_medium, 0, + "Medium MSS based on recommendation in link status report"); +SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_low, + CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_low, 0, + "Low MSS based on recommendation in link status report"); + +static int32_t tcp_change_mss_recommended = 0; +static int +sysctl_change_mss_recommended SYSCTL_HANDLER_ARGS { - struct inpcbinfo *pcbinfo = &tcbinfo; - - if (!lck_rw_try_lock_exclusive(pcbinfo->mtx)) { - tcp_unlock(tp->t_inpcb->inp_socket, 0, 0); - lck_rw_lock_exclusive(pcbinfo->mtx); - tcp_lock(tp->t_inpcb->inp_socket, 0, 0); +#pragma unused(oidp, arg1, arg2) + int i, err = 0, changed = 0; + struct ifnet *ifp; + struct if_link_status ifsr; + struct if_cellular_status_v1 *new_cell_sr; + err = sysctl_io_number(req, tcp_change_mss_recommended, + sizeof (int32_t), &i, &changed); + if (changed) { + ifnet_head_lock_shared(); + TAILQ_FOREACH(ifp, &ifnet_head, if_link) { + if (IFNET_IS_CELLULAR(ifp)) { + bzero(&ifsr, sizeof (ifsr)); + new_cell_sr = &ifsr.ifsr_u.ifsr_cell.if_cell_u.if_status_v1; + ifsr.ifsr_version = IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION; + ifsr.ifsr_len = sizeof(*new_cell_sr); + + /* Set MSS recommended */ + new_cell_sr->valid_bitmask |= IF_CELL_UL_MSS_RECOMMENDED_VALID; + new_cell_sr->mss_recommended = i; + err = ifnet_link_status_report(ifp, new_cell_sr, sizeof (new_cell_sr)); + if (err == 0) { + tcp_change_mss_recommended = i; + } else { + break; + } + } + } + ifnet_head_done(); } - add_to_time_wait_locked(tp); - lck_rw_done(pcbinfo->mtx); + return (err); } +SYSCTL_PROC(_net_inet_tcp, OID_AUTO, change_mss_recommended, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_change_mss_recommended, + 0, sysctl_change_mss_recommended, "IU", "Change MSS recommended"); +SYSCTL_INT(_net_inet_tcp, OID_AUTO, report_stats_interval, + CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_report_stats_interval, 0, + "Report stats interval"); +#endif /* (DEVELOPMENT || DEBUG) */ +/* + * Macro to compare two timers. If there is a reset of the sign bit, + * it is safe to assume that the timer has wrapped around. By doing + * signed comparision, we take care of wrap around such that the value + * with the sign bit reset is actually ahead of the other. + */ +inline int32_t +timer_diff(uint32_t t1, uint32_t toff1, uint32_t t2, uint32_t toff2) { + return (int32_t)((t1 + toff1) - (t2 + toff2)); +}; /* - * Fast timeout routine for processing delayed acks + * Add to tcp timewait list, delay is given in milliseconds. */ -void -tcp_fasttimo() +static void +add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay) { - struct inpcb *inp, *inpnxt; - register struct tcpcb *tp; + struct inpcbinfo *pcbinfo = &tcbinfo; + struct inpcb *inp = tp->t_inpcb; + uint32_t timer; + + /* pcb list should be locked when we get here */ + lck_rw_assert(pcbinfo->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE); + + /* We may get here multiple times, so check */ + if (!(inp->inp_flags2 & INP2_TIMEWAIT)) { + pcbinfo->ipi_twcount++; + inp->inp_flags2 |= INP2_TIMEWAIT; + + /* Remove from global inp list */ + LIST_REMOVE(inp, inp_list); + } else { + TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry); + } + /* Compute the time at which this socket can be closed */ + timer = tcp_now + delay; - struct inpcbinfo *pcbinfo = &tcbinfo; + /* We will use the TCPT_2MSL timer for tracking this delay */ - int delack_checked = 0, delack_done = 0; + if (TIMER_IS_ON_LIST(tp)) + tcp_remove_timer(tp); + tp->t_timer[TCPT_2MSL] = timer; - KERNEL_DEBUG(DBG_FNC_TCP_FAST | DBG_FUNC_START, 0,0,0,0,0); + TAILQ_INSERT_TAIL(&tcp_tw_tailq, tp, t_twentry); +} - if (tcp_delack_enabled == 0) - return; +void +add_to_time_wait(struct tcpcb *tp, uint32_t delay) +{ + struct inpcbinfo *pcbinfo = &tcbinfo; + if (tp->t_inpcb->inp_socket->so_options & SO_NOWAKEFROMSLEEP) + socket_post_kev_msg_closed(tp->t_inpcb->inp_socket); - lck_rw_lock_shared(pcbinfo->mtx); + /* 19182803: Notify nstat that connection is closing before waiting. */ + nstat_pcb_detach(tp->t_inpcb); - /* Walk the list of valid tcpcbs and send ACKS on the ones with DELACK bit set */ + if (!lck_rw_try_lock_exclusive(pcbinfo->ipi_lock)) { + tcp_unlock(tp->t_inpcb->inp_socket, 0, 0); + lck_rw_lock_exclusive(pcbinfo->ipi_lock); + tcp_lock(tp->t_inpcb->inp_socket, 0, 0); + } + add_to_time_wait_locked(tp, delay); + lck_rw_done(pcbinfo->ipi_lock); - for (inp = tcb.lh_first; inp != NULL; inp = inpnxt) { - inpnxt = inp->inp_list.le_next; - /* NOTE: it's OK to check the tp because the pcb can't be removed while we hold pcbinfo->mtx) */ - if ((tp = (struct tcpcb *)inp->inp_ppcb) && (tp->t_flags & TF_DELACK)) { - if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) - continue; - tcp_lock(inp->inp_socket, 1, 0); - if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) { - tcp_unlock(inp->inp_socket, 1, 0); - continue; - } - if (tp->t_flags & TF_DELACK) { - delack_done++; - tp->t_flags &= ~TF_DELACK; - tp->t_flags |= TF_ACKNOW; - tcpstat.tcps_delack++; - (void) tcp_output(tp); - } - tcp_unlock(inp->inp_socket, 1, 0); - } - } - KERNEL_DEBUG(DBG_FNC_TCP_FAST | DBG_FUNC_END, delack_checked, delack_done, tcpstat.tcps_delack,0,0); - lck_rw_done(pcbinfo->mtx); + inpcb_gc_sched(pcbinfo, INPCB_TIMER_LAZY); } -/* - * Tcp protocol timeout routine called every 500 ms. - * Updates the timers in all active tcb's and - * causes finite state machine actions if timers expire. - */ +/* If this is on time wait queue, remove it. */ void -tcp_slowtimo() +tcp_remove_from_time_wait(struct inpcb *inp) { - struct inpcb *inp, *inpnxt; - struct tcpcb *tp; - struct socket *so; - int i; -#if TCPDEBUG - int ostate; -#endif -#if KDEBUG - static int tws_checked; -#endif - struct inpcbinfo *pcbinfo = &tcbinfo; - - KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0,0,0,0,0); + struct tcpcb *tp = intotcpcb(inp); + if (inp->inp_flags2 & INP2_TIMEWAIT) + TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry); +} - tcp_maxidle = tcp_keepcnt * tcp_keepintvl; +static boolean_t +tcp_garbage_collect(struct inpcb *inp, int istimewait) +{ + boolean_t active = FALSE; + struct socket *so; + struct tcpcb *tp; - lck_rw_lock_shared(pcbinfo->mtx); + so = inp->inp_socket; + tp = intotcpcb(inp); /* - * Search through tcb's and update active timers. + * Skip if still in use or busy; it would have been more efficient + * if we were to test so_usecount against 0, but this isn't possible + * due to the current implementation of tcp_dropdropablreq() where + * overflow sockets that are eligible for garbage collection have + * their usecounts set to 1. */ - for (inp = tcb.lh_first; inp != NULL; inp = inpnxt) { - inpnxt = inp->inp_list.le_next; - - so = inp->inp_socket; + if (!lck_mtx_try_lock_spin(&inp->inpcb_mtx)) + return (TRUE); + + /* Check again under the lock */ + if (so->so_usecount > 1) { + if (inp->inp_wantcnt == WNT_STOPUSING) + active = TRUE; + lck_mtx_unlock(&inp->inpcb_mtx); + return (active); + } - if (so == &tcbinfo.nat_dummy_socket) - continue; + if (istimewait && + TSTMP_GEQ(tcp_now, tp->t_timer[TCPT_2MSL]) && + tp->t_state != TCPS_CLOSED) { + /* Become a regular mutex */ + lck_mtx_convert_spin(&inp->inpcb_mtx); + tcp_close(tp); + } - if (in_pcb_checkstate(inp, WNT_ACQUIRE,0) == WNT_STOPUSING) - continue; + /* + * Overflowed socket dropped from the listening queue? Do this + * only if we are called to clean up the time wait slots, since + * tcp_dropdropablreq() considers a socket to have been fully + * dropped after add_to_time_wait() is finished. + * Also handle the case of connections getting closed by the peer + * while in the queue as seen with rdar://6422317 + * + */ + if (so->so_usecount == 1 && + ((istimewait && (so->so_flags & SOF_OVERFLOW)) || + ((tp != NULL) && (tp->t_state == TCPS_CLOSED) && + (so->so_head != NULL) && + ((so->so_state & (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE)) == + (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE))))) { + + if (inp->inp_state != INPCB_STATE_DEAD) { + /* Become a regular mutex */ + lck_mtx_convert_spin(&inp->inpcb_mtx); +#if INET6 + if (SOCK_CHECK_DOM(so, PF_INET6)) + in6_pcbdetach(inp); + else +#endif /* INET6 */ + in_pcbdetach(inp); + } + VERIFY(so->so_usecount > 0); + so->so_usecount--; + if (inp->inp_wantcnt == WNT_STOPUSING) + active = TRUE; + lck_mtx_unlock(&inp->inpcb_mtx); + return (active); + } else if (inp->inp_wantcnt != WNT_STOPUSING) { + lck_mtx_unlock(&inp->inpcb_mtx); + return (FALSE); + } - tcp_lock(so, 1, 0); + /* + * We get here because the PCB is no longer searchable + * (WNT_STOPUSING); detach (if needed) and dispose if it is dead + * (usecount is 0). This covers all cases, including overflow + * sockets and those that are considered as "embryonic", + * i.e. created by sonewconn() in TCP input path, and have + * not yet been committed. For the former, we reduce the usecount + * to 0 as done by the code above. For the latter, the usecount + * would have reduced to 0 as part calling soabort() when the + * socket is dropped at the end of tcp_input(). + */ + if (so->so_usecount == 0) { + DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp, + struct tcpcb *, tp, int32_t, TCPS_CLOSED); + /* Become a regular mutex */ + lck_mtx_convert_spin(&inp->inpcb_mtx); - if ((in_pcb_checkstate(inp, WNT_RELEASE,1) == WNT_STOPUSING) && so->so_usecount == 1) { - tcp_unlock(so, 1, 0); - continue; + /* + * If this tp still happens to be on the timer list, + * take it out + */ + if (TIMER_IS_ON_LIST(tp)) { + tcp_remove_timer(tp); } - tp = intotcpcb(inp); - if (tp == 0 || tp->t_state == TCPS_LISTEN) { - tcp_unlock(so, 1, 0); - continue; + + if (inp->inp_state != INPCB_STATE_DEAD) { +#if INET6 + if (SOCK_CHECK_DOM(so, PF_INET6)) + in6_pcbdetach(inp); + else +#endif /* INET6 */ + in_pcbdetach(inp); } + in_pcbdispose(inp); + return (FALSE); + } - for (i = 0; i < TCPT_NTIMERS; i++) { - if (tp->t_timer[i] && --tp->t_timer[i] == 0) { + lck_mtx_unlock(&inp->inpcb_mtx); + return (TRUE); +} + +/* + * TCP garbage collector callback (inpcb_timer_func_t). + * + * Returns the number of pcbs that will need to be gc-ed soon, + * returnining > 0 will keep timer active. + */ +void +tcp_gc(struct inpcbinfo *ipi) +{ + struct inpcb *inp, *nxt; + struct tcpcb *tw_tp, *tw_ntp; #if TCPDEBUG - ostate = tp->t_state; + int ostate; #endif - tp = tcp_timers(tp, i); - if (tp == NULL) - goto tpgone; -#if TCPDEBUG - if (tp->t_inpcb->inp_socket->so_options - & SO_DEBUG) - tcp_trace(TA_USER, ostate, tp, - (void *)0, - (struct tcphdr *)0, - PRU_SLOWTIMO); +#if KDEBUG + static int tws_checked = 0; #endif - } - } - tp->t_rcvtime++; - tp->t_starttime++; - if (tp->t_rtttime) - tp->t_rtttime++; -tpgone: - tcp_unlock(so, 1, 0); - } -#if KDEBUG - tws_checked = 0; -#endif - KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_NONE, tws_checked,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0, 0, 0, 0, 0); /* - * Process the items in the current time-wait slot + * Update tcp_now here as it may get used while + * processing the slow timer. */ + calculate_tcp_clock(); - for (inp = time_wait_slots[cur_tw_slot].lh_first; inp; inp = inpnxt) - { - inpnxt = inp->inp_list.le_next; -#if KDEBUG - tws_checked++; -#endif - - if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) - continue; - - tcp_lock(inp->inp_socket, 1, 0); - - if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) - goto twunlock; + /* + * Garbage collect socket/tcpcb: We need to acquire the list lock + * exclusively to do this + */ - tp = intotcpcb(inp); - if (tp == NULL) { /* tp already closed, remove from list */ -#if TEMPDEBUG - printf("tcp_slowtimo: tp is null in time-wait slot!\n"); -#endif - goto twunlock; + if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) { + /* don't sweat it this time; cleanup was done last time */ + if (tcp_gc_done == TRUE) { + tcp_gc_done = FALSE; + KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, + tws_checked, cur_tw_slot, 0, 0, 0); + /* Lock upgrade failed, give up this round */ + atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1); + return; } - if (tp->t_timer[TCPT_2MSL] >= N_TIME_WAIT_SLOTS) { - tp->t_timer[TCPT_2MSL] -= N_TIME_WAIT_SLOTS; - tp->t_rcvtime += N_TIME_WAIT_SLOTS; - } - else - tp->t_timer[TCPT_2MSL] = 0; - - if (tp->t_timer[TCPT_2MSL] == 0) - tp = tcp_timers(tp, TCPT_2MSL); /* tp can be returned null if tcp_close is called */ -twunlock: - tcp_unlock(inp->inp_socket, 1, 0); + /* Upgrade failed, lost lock now take it again exclusive */ + lck_rw_lock_exclusive(ipi->ipi_lock); } + tcp_gc_done = TRUE; - if (lck_rw_lock_shared_to_exclusive(pcbinfo->mtx) != 0) - lck_rw_lock_exclusive(pcbinfo->mtx); /* Upgrade failed, lost lock no take it again exclusive */ - - - for (inp = tcb.lh_first; inp != NULL; inp = inpnxt) { - inpnxt = inp->inp_list.le_next; - /* Ignore nat/SharedIP dummy pcbs */ - if (inp->inp_socket == &tcbinfo.nat_dummy_socket) - continue; - - if (inp->inp_wantcnt != WNT_STOPUSING) - continue; - - so = inp->inp_socket; - if (!lck_mtx_try_lock(inp->inpcb_mtx)) {/* skip if in use */ -#if TEMPDEBUG - printf("tcp_slowtimo so=%x STOPUSING but locked...\n", so); -#endif - continue; - } + LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) { + if (tcp_garbage_collect(inp, 0)) + atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1); + } - if (so->so_usecount == 0) - in_pcbdispose(inp); - else { - tp = intotcpcb(inp); - /* Check for embryonic socket stuck on listener queue (4023660) */ - if ((so->so_usecount == 1) && (tp->t_state == TCPS_CLOSED) && - (so->so_head != NULL) && (so->so_state & SS_INCOMP)) { - so->so_usecount--; - in_pcbdispose(inp); - } else - lck_mtx_unlock(inp->inpcb_mtx); + /* Now cleanup the time wait ones */ + TAILQ_FOREACH_SAFE(tw_tp, &tcp_tw_tailq, t_twentry, tw_ntp) { + /* + * We check the timestamp here without holding the + * socket lock for better performance. If there are + * any pcbs in time-wait, the timer will get rescheduled. + * Hence some error in this check can be tolerated. + * + * Sometimes a socket on time-wait queue can be closed if + * 2MSL timer expired but the application still has a + * usecount on it. + */ + if (tw_tp->t_state == TCPS_CLOSED || + TSTMP_GEQ(tcp_now, tw_tp->t_timer[TCPT_2MSL])) { + if (tcp_garbage_collect(tw_tp->t_inpcb, 1)) + atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1); } } - /* Now cleanup the time wait ones */ - for (inp = time_wait_slots[cur_tw_slot].lh_first; inp; inp = inpnxt) - { - inpnxt = inp->inp_list.le_next; + /* take into account pcbs that are still in time_wait_slots */ + atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, ipi->ipi_twcount); - if (inp->inp_wantcnt != WNT_STOPUSING) - continue; + lck_rw_done(ipi->ipi_lock); - so = inp->inp_socket; - if (!lck_mtx_try_lock(inp->inpcb_mtx)) /* skip if in use */ - continue; - if (so->so_usecount == 0) - in_pcbdispose(inp); - else { - tp = intotcpcb(inp); - /* Check for embryonic socket stuck on listener queue (4023660) */ - if ((so->so_usecount == 1) && (tp->t_state == TCPS_CLOSED) && - (so->so_head != NULL) && (so->so_state & SS_INCOMP)) { - so->so_usecount--; - in_pcbdispose(inp); - } else - lck_mtx_unlock(inp->inpcb_mtx); - } - } - - tcp_now++; - if (++cur_tw_slot >= N_TIME_WAIT_SLOTS) - cur_tw_slot = 0; - - lck_rw_done(pcbinfo->mtx); - KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked, cur_tw_slot,0,0,0); + /* Clean up the socache while we are here */ + if (so_cache_timer()) + atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1); + + KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked, + cur_tw_slot, 0, 0, 0); + + return; } /* * Cancel all timers for TCP tp. */ void -tcp_canceltimers(tp) - struct tcpcb *tp; +tcp_canceltimers(struct tcpcb *tp) { - register int i; + int i; + tcp_remove_timer(tp); for (i = 0; i < TCPT_NTIMERS; i++) tp->t_timer[i] = 0; + tp->tentry.timer_start = tcp_now; + tp->tentry.index = TCPT_NONE; } int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] = @@ -454,18 +699,76 @@ int tcp_backoff[TCP_MAXRXTSHIFT + 1] = static int tcp_totbackoff = 511; /* sum of tcp_backoff[] */ +void +tcp_rexmt_save_state(struct tcpcb *tp) +{ + u_int32_t fsize; + if (TSTMP_SUPPORTED(tp)) { + /* + * Since timestamps are supported on the connection, + * we can do recovery as described in rfc 4015. + */ + fsize = tp->snd_max - tp->snd_una; + tp->snd_ssthresh_prev = max(fsize, tp->snd_ssthresh); + tp->snd_recover_prev = tp->snd_recover; + } else { + /* + * Timestamp option is not supported on this connection. + * Record ssthresh and cwnd so they can + * be recovered if this turns out to be a "bad" retransmit. + * A retransmit is considered "bad" if an ACK for this + * segment is received within RTT/2 interval; the assumption + * here is that the ACK was already in flight. See + * "On Estimating End-to-End Network Path Properties" by + * Allman and Paxson for more details. + */ + tp->snd_cwnd_prev = tp->snd_cwnd; + tp->snd_ssthresh_prev = tp->snd_ssthresh; + tp->snd_recover_prev = tp->snd_recover; + if (IN_FASTRECOVERY(tp)) + tp->t_flags |= TF_WASFRECOVERY; + else + tp->t_flags &= ~TF_WASFRECOVERY; + } + tp->t_srtt_prev = (tp->t_srtt >> TCP_RTT_SHIFT) + 2; + tp->t_rttvar_prev = (tp->t_rttvar >> TCP_RTTVAR_SHIFT); + tp->t_flagsext &= ~(TF_RECOMPUTE_RTT); +} + +/* + * Revert to the older segment size if there is an indication that PMTU + * blackhole detection was not needed. + */ +void +tcp_pmtud_revert_segment_size(struct tcpcb *tp) +{ + int32_t optlen; + + VERIFY(tp->t_pmtud_saved_maxopd > 0); + tp->t_flags |= TF_PMTUD; + tp->t_flags &= ~TF_BLACKHOLE; + optlen = tp->t_maxopd - tp->t_maxseg; + tp->t_maxopd = tp->t_pmtud_saved_maxopd; + tp->t_maxseg = tp->t_maxopd - optlen; + /* + * Reset the slow-start flight size as it + * may depend on the new MSS + */ + if (CC_ALGO(tp)->cwnd_init != NULL) + CC_ALGO(tp)->cwnd_init(tp); + tp->t_pmtud_start_ts = 0; + tcpstat.tcps_pmtudbh_reverted++; +} + /* * TCP timer processing. */ struct tcpcb * -tcp_timers(tp, timer) - register struct tcpcb *tp; - int timer; +tcp_timers(struct tcpcb *tp, int timer) { - register int rexmt; - struct socket *so_tmp; + int32_t rexmt, optlen = 0, idle_time = 0; + struct socket *so; struct tcptemp *t_template; - #if TCPDEBUG int ostate; #endif @@ -473,25 +776,30 @@ tcp_timers(tp, timer) #if INET6 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV4) == 0; #endif /* INET6 */ + u_int64_t accsleep_ms; + u_int32_t last_sleep_ms = 0; - so_tmp = tp->t_inpcb->inp_socket; + so = tp->t_inpcb->inp_socket; + idle_time = tcp_now - tp->t_rcvtime; switch (timer) { /* * 2 MSL timeout in shutdown went off. If we're closed but * still waiting for peer to close and connection has been idle - * too long, or if 2MSL time is up from TIME_WAIT, delete connection - * control block. Otherwise, check again in a bit. + * too long, or if 2MSL time is up from TIME_WAIT or FIN_WAIT_2, + * delete connection control block. + * Otherwise, (this case shouldn't happen) check again in a bit + * we keep the socket in the main list in that case. */ case TCPT_2MSL: tcp_free_sackholes(tp); if (tp->t_state != TCPS_TIME_WAIT && - tp->t_rcvtime <= tcp_maxidle) { - tp->t_timer[TCPT_2MSL] = (unsigned long)tcp_keepintvl; - add_to_time_wait_locked(tp); - } - else { + tp->t_state != TCPS_FIN_WAIT_2 && + ((idle_time > 0) && (idle_time < TCP_CONN_MAXIDLE(tp)))) { + tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp, + (u_int32_t)TCP_CONN_KEEPINTVL(tp)); + } else { tp = tcp_close(tp); return(tp); } @@ -503,53 +811,236 @@ tcp_timers(tp, timer) * to a longer retransmit interval and retransmit one segment. */ case TCPT_REXMT: - tcp_free_sackholes(tp); - if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) { + absolutetime_to_nanoseconds(mach_absolutetime_asleep, + &accsleep_ms); + accsleep_ms = accsleep_ms / 1000000UL; + if (accsleep_ms > tp->t_accsleep_ms) + last_sleep_ms = accsleep_ms - tp->t_accsleep_ms; + /* + * Drop a connection in the retransmit timer + * 1. If we have retransmitted more than TCP_MAXRXTSHIFT + * times + * 2. If the time spent in this retransmission episode is + * more than the time limit set with TCP_RXT_CONNDROPTIME + * socket option + * 3. If TCP_RXT_FINDROP socket option was set and + * we have already retransmitted the FIN 3 times without + * receiving an ack + */ + if (++tp->t_rxtshift > TCP_MAXRXTSHIFT || + (tp->t_rxt_conndroptime > 0 && tp->t_rxtstart > 0 && + (tcp_now - tp->t_rxtstart) >= tp->t_rxt_conndroptime) || + ((tp->t_flagsext & TF_RXTFINDROP) != 0 && + (tp->t_flags & TF_SENTFIN) != 0 && tp->t_rxtshift >= 4) || + (tp->t_rxtshift > 4 && last_sleep_ms >= TCP_SLEEP_TOO_LONG)) { + if ((tp->t_flagsext & TF_RXTFINDROP) != 0) { + tcpstat.tcps_rxtfindrop++; + } else if (last_sleep_ms >= TCP_SLEEP_TOO_LONG) { + tcpstat.tcps_drop_after_sleep++; + } else { + tcpstat.tcps_timeoutdrop++; + } + if (tp->t_rxtshift >= TCP_MAXRXTSHIFT) { + if (TCP_ECN_ENABLED(tp)) { + INP_INC_IFNET_STAT(tp->t_inpcb, + ecn_on.rxmit_drop); + } else { + INP_INC_IFNET_STAT(tp->t_inpcb, + ecn_off.rxmit_drop); + } + } tp->t_rxtshift = TCP_MAXRXTSHIFT; - tcpstat.tcps_timeoutdrop++; + postevent(so, 0, EV_TIMEOUT); + soevent(so, + (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT)); + + if (TCP_ECN_ENABLED(tp) && + tp->t_state == TCPS_ESTABLISHED) + tcp_heuristic_ecn_droprxmt(tp); + tp = tcp_drop(tp, tp->t_softerror ? tp->t_softerror : ETIMEDOUT); - postevent(so_tmp, 0, EV_TIMEOUT); + break; } - if (tp->t_rxtshift == 1) { + tcpstat.tcps_rexmttimeo++; + tp->t_accsleep_ms = accsleep_ms; + + if (tp->t_rxtshift == 1 && + tp->t_state == TCPS_ESTABLISHED) { + /* Set the time at which retransmission started. */ + tp->t_rxtstart = tcp_now; + /* - * first retransmit; record ssthresh and cwnd so they can - * be recovered if this turns out to be a "bad" retransmit. - * A retransmit is considered "bad" if an ACK for this - * segment is received within RTT/2 interval; the assumption - * here is that the ACK was already in flight. See - * "On Estimating End-to-End Network Path Properties" by - * Allman and Paxson for more details. + * if this is the first retransmit timeout, save + * the state so that we can recover if the timeout + * is spurious. */ - tp->snd_cwnd_prev = tp->snd_cwnd; - tp->snd_ssthresh_prev = tp->snd_ssthresh; - tp->snd_recover_prev = tp->snd_recover; - if (IN_FASTRECOVERY(tp)) - tp->t_flags |= TF_WASFRECOVERY; - else - tp->t_flags &= ~TF_WASFRECOVERY; - tp->t_badrxtwin = tcp_now + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); + tcp_rexmt_save_state(tp); } - tcpstat.tcps_rexmttimeo++; - if (tp->t_state == TCPS_SYN_SENT) +#if MPTCP + if ((tp->t_rxtshift >= mptcp_fail_thresh) && + (tp->t_state == TCPS_ESTABLISHED) && + (tp->t_mpflags & TMPF_MPTCP_TRUE)) { + mptcp_act_on_txfail(so); + + } +#endif /* MPTCP */ + + if (tp->t_adaptive_wtimo > 0 && + tp->t_rxtshift > tp->t_adaptive_wtimo && + TCPS_HAVEESTABLISHED(tp->t_state)) { + /* Send an event to the application */ + soevent(so, + (SO_FILT_HINT_LOCKED| + SO_FILT_HINT_ADAPTIVE_WTIMO)); + } + + /* + * If this is a retransmit timeout after PTO, the PTO + * was not effective + */ + if (tp->t_flagsext & TF_SENT_TLPROBE) { + tp->t_flagsext &= ~(TF_SENT_TLPROBE); + tcpstat.tcps_rto_after_pto++; + } + + if (tp->t_flagsext & TF_DELAY_RECOVERY) { + /* + * Retransmit timer fired before entering recovery + * on a connection with packet re-ordering. This + * suggests that the reordering metrics computed + * are not accurate. + */ + tp->t_reorderwin = 0; + tp->t_timer[TCPT_DELAYFR] = 0; + tp->t_flagsext &= ~(TF_DELAY_RECOVERY); + } + + if (tp->t_state == TCPS_SYN_RECEIVED) + tcp_disable_tfo(tp); + + if ((tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) && + !(tp->t_tfo_flags & TFO_F_NO_SNDPROBING) && + ((tp->t_state != TCPS_SYN_SENT && tp->t_rxtshift > 1) || + tp->t_rxtshift > 2)) { + /* + * For regular retransmissions, a first one is being + * done for tail-loss probe. + * Thus, if rxtshift > 1, this means we have sent the segment + * a total of 3 times. + * + * If we are in SYN-SENT state, then there is no tail-loss + * probe thus we have to let rxtshift go up to 3. + */ + tcp_heuristic_tfo_middlebox(tp); + + so->so_error = ENODATA; + sorwakeup(so); + sowwakeup(so); + + tp->t_tfo_stats |= TFO_S_SEND_BLACKHOLE; + tcpstat.tcps_tfo_sndblackhole++; + } + + if (tp->t_state == TCPS_SYN_SENT) { rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift]; - else + tp->t_stat.synrxtshift = tp->t_rxtshift; + + /* When retransmitting, disable TFO */ + if (tfo_enabled(tp) && !(so->so_flags & SOF1_DATA_AUTHENTICATED)) { + tp->t_flagsext &= ~TF_FASTOPEN; + tp->t_tfo_flags |= TFO_F_SYN_LOSS; + } + } else { rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; - TCPT_RANGESET(tp->t_rxtcur, rexmt, - tp->t_rttmin, TCPTV_REXMTMAX); - tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; + } + + TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, TCPTV_REXMTMAX, + TCP_ADD_REXMTSLOP(tp)); + tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur); + + if (INP_WAIT_FOR_IF_FEEDBACK(tp->t_inpcb)) + goto fc_output; + + tcp_free_sackholes(tp); + /* + * Check for potential Path MTU Discovery Black Hole + */ + if (tcp_pmtud_black_hole_detect && + !(tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) && + (tp->t_state == TCPS_ESTABLISHED)) { + if ((tp->t_flags & TF_PMTUD) && + ((tp->t_flags & TF_MAXSEGSNT) + || tp->t_pmtud_lastseg_size > tcp_pmtud_black_hole_mss) && + tp->t_rxtshift == 2) { + /* + * Enter Path MTU Black-hole Detection mechanism: + * - Disable Path MTU Discovery (IP "DF" bit). + * - Reduce MTU to lower value than what we + * negotiated with the peer. + */ + /* Disable Path MTU Discovery for now */ + tp->t_flags &= ~TF_PMTUD; + /* Record that we may have found a black hole */ + tp->t_flags |= TF_BLACKHOLE; + optlen = tp->t_maxopd - tp->t_maxseg; + /* Keep track of previous MSS */ + tp->t_pmtud_saved_maxopd = tp->t_maxopd; + tp->t_pmtud_start_ts = tcp_now; + if (tp->t_pmtud_start_ts == 0) + tp->t_pmtud_start_ts++; + /* Reduce the MSS to intermediary value */ + if (tp->t_maxopd > tcp_pmtud_black_hole_mss) { + tp->t_maxopd = tcp_pmtud_black_hole_mss; + } else { + tp->t_maxopd = /* use the default MSS */ +#if INET6 + isipv6 ? tcp_v6mssdflt : +#endif /* INET6 */ + tcp_mssdflt; + } + tp->t_maxseg = tp->t_maxopd - optlen; + + /* + * Reset the slow-start flight size + * as it may depend on the new MSS + */ + if (CC_ALGO(tp)->cwnd_init != NULL) + CC_ALGO(tp)->cwnd_init(tp); + tp->snd_cwnd = tp->t_maxseg; + } + /* + * If further retransmissions are still + * unsuccessful with a lowered MTU, maybe this + * isn't a Black Hole and we restore the previous + * MSS and blackhole detection flags. + */ + else { + + if ((tp->t_flags & TF_BLACKHOLE) && + (tp->t_rxtshift > 4)) { + tcp_pmtud_revert_segment_size(tp); + tp->snd_cwnd = tp->t_maxseg; + } + } + } + /* - * Disable rfc1323 and rfc1644 if we havn't got any response to - * our third SYN to work-around some broken terminal servers - * (most of which have hopefully been retired) that have bad VJ - * header compression code which trashes TCP segments containing - * unknown-to-them TCP options. + * Disable rfc1323 and rfc1644 if we haven't got any + * response to our SYN (after we reach the threshold) + * to work-around some broken terminal servers (most of + * which have hopefully been retired) that have bad VJ + * header compression code which trashes TCP segments + * containing unknown-to-them TCP options. + * Do this only on non-local connections. */ - if ((tp->t_state == TCPS_SYN_SENT) && (tp->t_rxtshift == 3)) - tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_REQ_CC); + if (tp->t_state == TCPS_SYN_SENT && + tp->t_rxtshift == tcp_broken_peer_syn_rxmit_thres) + tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_REQ_CC); + /* * If losing, let the lower level know and try for * a better route. Also, if we backed off this far, @@ -578,43 +1069,41 @@ tcp_timers(tp, timer) * Force a segment to be sent. */ tp->t_flags |= TF_ACKNOW; - /* - * If timing a segment in this window, stop the timer. - */ + + /* If timing a segment in this window, stop the timer */ tp->t_rtttime = 0; + + if (!IN_FASTRECOVERY(tp) && tp->t_rxtshift == 1) + tcpstat.tcps_tailloss_rto++; + + /* - * Close the congestion window down to one segment - * (we'll open it by one segment for each ack we get). - * Since we probably have a window's worth of unacked - * data accumulated, this "slow start" keeps us from - * dumping all that data as back-to-back packets (which - * might overwhelm an intermediate gateway). - * - * There are two phases to the opening: Initially we - * open by one mss on each ack. This makes the window - * size increase exponentially with time. If the - * window is larger than the path can handle, this - * exponential growth results in dropped packet(s) - * almost immediately. To get more time between - * drops but still "push" the network to take advantage - * of improving conditions, we switch from exponential - * to linear window opening at some threshhold size. - * For a threshhold, we use half the current window - * size, truncated to a multiple of the mss. - * - * (the minimum cwnd that will give us exponential - * growth is 2 mss. We don't allow the threshhold - * to go below this.) + * RFC 5681 says: when a TCP sender detects segment loss + * using retransmit timer and the given segment has already + * been retransmitted by way of the retransmission timer at + * least once, the value of ssthresh is held constant */ - { - u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg; - if (win < 2) - win = 2; - tp->snd_cwnd = tp->t_maxseg; - tp->snd_ssthresh = win * tp->t_maxseg; - tp->t_dupacks = 0; + if (tp->t_rxtshift == 1 && + CC_ALGO(tp)->after_timeout != NULL) { + CC_ALGO(tp)->after_timeout(tp); + /* + * CWR notifications are to be sent on new data + * right after Fast Retransmits and ECE + * notification receipts. + */ + if (TCP_ECN_ENABLED(tp)) + tp->ecn_flags |= TE_SENDCWR; } + EXIT_FASTRECOVERY(tp); + + /* Exit cwnd non validated phase */ + tp->t_flagsext &= ~TF_CWND_NONVALIDATED; + + +fc_output: + tcp_ccdbg_trace(tp, NULL, TCP_CC_REXMT_TIMEOUT); + (void) tcp_output(tp); break; @@ -630,20 +1119,27 @@ tcp_timers(tp, timer) * backoff, drop the connection if the idle time * (no responses to probes) reaches the maximum * backoff that we would use if retransmitting. + * + * Drop the connection if we reached the maximum allowed time for + * Zero Window Probes without a non-zero update from the peer. + * See rdar://5805356 */ - if (tp->t_rxtshift == TCP_MAXRXTSHIFT && - (tp->t_rcvtime >= tcp_maxpersistidle || - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) { + if ((tp->t_rxtshift == TCP_MAXRXTSHIFT && + (idle_time >= tcp_maxpersistidle || + idle_time >= TCP_REXMTVAL(tp) * tcp_totbackoff)) || + ((tp->t_persist_stop != 0) && + TSTMP_LEQ(tp->t_persist_stop, tcp_now))) { tcpstat.tcps_persistdrop++; - so_tmp = tp->t_inpcb->inp_socket; + postevent(so, 0, EV_TIMEOUT); + soevent(so, + (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT)); tp = tcp_drop(tp, ETIMEDOUT); - postevent(so_tmp, 0, EV_TIMEOUT); break; } tcp_setpersist(tp); - tp->t_force = 1; + tp->t_flagsext |= TF_FORCE; (void) tcp_output(tp); - tp->t_force = 0; + tp->t_flagsext &= ~TF_FORCE; break; /* @@ -652,12 +1148,28 @@ tcp_timers(tp, timer) */ case TCPT_KEEP: tcpstat.tcps_keeptimeo++; +#if MPTCP + /* + * Regular TCP connections do not send keepalives after closing + * MPTCP must not also, after sending Data FINs. + */ + struct mptcb *mp_tp = tp->t_mptcb; + if ((tp->t_mpflags & TMPF_MPTCP_TRUE) && + (tp->t_state > TCPS_ESTABLISHED)) { + goto dropit; + } else if (mp_tp != NULL) { + if ((mptcp_ok_to_keepalive(mp_tp) == 0)) + goto dropit; + } +#endif /* MPTCP */ if (tp->t_state < TCPS_ESTABLISHED) goto dropit; if ((always_keepalive || - tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) && - tp->t_state <= TCPS_CLOSING || tp->t_state == TCPS_FIN_WAIT_2) { - if (tp->t_rcvtime >= TCP_KEEPIDLE(tp) + (unsigned long)tcp_maxidle) + (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) || + (tp->t_flagsext & TF_DETECT_READSTALL) || + (tp->t_tfo_probe_state == TFO_PROBE_PROBING)) && + (tp->t_state <= TCPS_CLOSING || tp->t_state == TCPS_FIN_WAIT_2)) { + if (idle_time >= TCP_CONN_KEEPIDLE(tp) + TCP_CONN_MAXIDLE(tp)) goto dropit; /* * Send a packet designed to force a response @@ -674,26 +1186,1265 @@ tcp_timers(tp, timer) tcpstat.tcps_keepprobe++; t_template = tcp_maketemplate(tp); if (t_template) { + struct inpcb *inp = tp->t_inpcb; + struct tcp_respond_args tra; + + bzero(&tra, sizeof(tra)); + tra.nocell = INP_NO_CELLULAR(inp); + tra.noexpensive = INP_NO_EXPENSIVE(inp); + tra.awdl_unrestricted = INP_AWDL_UNRESTRICTED(inp); + tra.intcoproc_allowed = INP_INTCOPROC_ALLOWED(inp); + if (tp->t_inpcb->inp_flags & INP_BOUND_IF) + tra.ifscope = tp->t_inpcb->inp_boundifp->if_index; + else + tra.ifscope = IFSCOPE_NONE; tcp_respond(tp, t_template->tt_ipgen, &t_template->tt_t, (struct mbuf *)NULL, - tp->rcv_nxt, tp->snd_una - 1, 0); + tp->rcv_nxt, tp->snd_una - 1, 0, &tra); (void) m_free(dtom(t_template)); + if (tp->t_flagsext & TF_DETECT_READSTALL) + tp->t_rtimo_probes++; } - tp->t_timer[TCPT_KEEP] = tcp_keepintvl; - } else - tp->t_timer[TCPT_KEEP] = TCP_KEEPIDLE(tp); - break; - -#if TCPDEBUG - if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG) - tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, - PRU_SLOWTIMO); -#endif + tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, + TCP_CONN_KEEPINTVL(tp)); + } else { + tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, + TCP_CONN_KEEPIDLE(tp)); + } + if (tp->t_flagsext & TF_DETECT_READSTALL) { + struct ifnet *outifp = tp->t_inpcb->inp_last_outifp; + bool reenable_probe = false; + /* + * The keep alive packets sent to detect a read + * stall did not get a response from the + * peer. Generate more keep-alives to confirm this. + * If the number of probes sent reaches the limit, + * generate an event. + */ + if (tp->t_adaptive_rtimo > 0) { + if (tp->t_rtimo_probes > tp->t_adaptive_rtimo) { + /* Generate an event */ + soevent(so, + (SO_FILT_HINT_LOCKED | + SO_FILT_HINT_ADAPTIVE_RTIMO)); + tcp_keepalive_reset(tp); + } else { + reenable_probe = true; + } + } else if (outifp != NULL && + (outifp->if_eflags & IFEF_PROBE_CONNECTIVITY) && + tp->t_rtimo_probes <= TCP_CONNECTIVITY_PROBES_MAX) { + reenable_probe = true; + } else { + tp->t_flagsext &= ~TF_DETECT_READSTALL; + } + if (reenable_probe) { + int ind = min(tp->t_rtimo_probes, + TCP_MAXRXTSHIFT); + tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START( + tp, tcp_backoff[ind] * TCP_REXMTVAL(tp)); + } + } + if (tp->t_tfo_probe_state == TFO_PROBE_PROBING) { + int ind; + + tp->t_tfo_probes++; + ind = min(tp->t_tfo_probes, TCP_MAXRXTSHIFT); + + /* + * We take the minimum among the time set by true + * keepalive (see above) and the backoff'd RTO. That + * way we backoff in case of packet-loss but will never + * timeout slower than regular keepalive due to the + * backing off. + */ + tp->t_timer[TCPT_KEEP] = min(OFFSET_FROM_START( + tp, tcp_backoff[ind] * TCP_REXMTVAL(tp)), + tp->t_timer[TCPT_KEEP]); + } else if (tp->t_tfo_probe_state == TFO_PROBE_WAIT_DATA) { + /* Still no data! Let's assume a TFO-error and err out... */ + tcp_heuristic_tfo_middlebox(tp); + + so->so_error = ENODATA; + sorwakeup(so); + tp->t_tfo_stats |= TFO_S_RECV_BLACKHOLE; + tcpstat.tcps_tfo_blackhole++; + } + break; + case TCPT_DELACK: + if (tcp_delack_enabled && (tp->t_flags & TF_DELACK)) { + tp->t_flags &= ~TF_DELACK; + tp->t_timer[TCPT_DELACK] = 0; + tp->t_flags |= TF_ACKNOW; + + /* + * If delayed ack timer fired while stretching + * acks, count the number of times the streaming + * detection was not correct. If this exceeds a + * threshold, disable strech ack on this + * connection + * + * Also, go back to acking every other packet. + */ + if ((tp->t_flags & TF_STRETCHACK)) { + if (tp->t_unacksegs > 1 && + tp->t_unacksegs < maxseg_unacked) + tp->t_stretchack_delayed++; + + if (tp->t_stretchack_delayed > + TCP_STRETCHACK_DELAY_THRESHOLD) { + tp->t_flagsext |= TF_DISABLE_STRETCHACK; + /* + * Note the time at which stretch + * ack was disabled automatically + */ + tp->rcv_nostrack_ts = tcp_now; + tcpstat.tcps_nostretchack++; + tp->t_stretchack_delayed = 0; + tp->rcv_nostrack_pkts = 0; + } + tcp_reset_stretch_ack(tp); + } + + /* + * If we are measuring inter packet arrival jitter + * for throttling a connection, this delayed ack + * might be the reason for accumulating some + * jitter. So let's restart the measurement. + */ + CLEAR_IAJ_STATE(tp); + + tcpstat.tcps_delack++; + (void) tcp_output(tp); + } + break; + +#if MPTCP + case TCPT_JACK_RXMT: + if ((tp->t_state == TCPS_ESTABLISHED) && + (tp->t_mpflags & TMPF_PREESTABLISHED) && + (tp->t_mpflags & TMPF_JOINED_FLOW)) { + if (++tp->t_mprxtshift > TCP_MAXRXTSHIFT) { + tcpstat.tcps_timeoutdrop++; + postevent(so, 0, EV_TIMEOUT); + soevent(so, + (SO_FILT_HINT_LOCKED| + SO_FILT_HINT_TIMEOUT)); + tp = tcp_drop(tp, tp->t_softerror ? + tp->t_softerror : ETIMEDOUT); + break; + } + tcpstat.tcps_join_rxmts++; + tp->t_flags |= TF_ACKNOW; + + /* + * No backoff is implemented for simplicity for this + * corner case. + */ + (void) tcp_output(tp); + } + break; +#endif /* MPTCP */ + + case TCPT_PTO: + { + int32_t snd_len; + tp->t_flagsext &= ~(TF_SENT_TLPROBE); + + /* + * Check if the connection is in the right state to + * send a probe + */ + if (tp->t_state != TCPS_ESTABLISHED || + (tp->t_rxtshift > 0 && !(tp->t_flagsext & TF_PROBING)) + || tp->snd_max == tp->snd_una || + !SACK_ENABLED(tp) || !TAILQ_EMPTY(&tp->snd_holes) || + IN_FASTRECOVERY(tp)) + break; + + /* + * If there is no new data to send or if the + * connection is limited by receive window then + * retransmit the last segment, otherwise send + * new data. + */ + snd_len = min(so->so_snd.sb_cc, tp->snd_wnd) + - (tp->snd_max - tp->snd_una); + if (snd_len > 0) { + tp->snd_nxt = tp->snd_max; + } else { + snd_len = min((tp->snd_max - tp->snd_una), + tp->t_maxseg); + tp->snd_nxt = tp->snd_max - snd_len; + } + + tcpstat.tcps_pto++; + if (tp->t_flagsext & TF_PROBING) + tcpstat.tcps_probe_if++; + + /* If timing a segment in this window, stop the timer */ + tp->t_rtttime = 0; + /* Note that tail loss probe is being sent */ + tp->t_flagsext |= TF_SENT_TLPROBE; + tp->t_tlpstart = tcp_now; + + tp->snd_cwnd += tp->t_maxseg; + (void )tcp_output(tp); + tp->snd_cwnd -= tp->t_maxseg; + + tp->t_tlphighrxt = tp->snd_nxt; + break; + } + case TCPT_DELAYFR: + tp->t_flagsext &= ~TF_DELAY_RECOVERY; + + /* + * Don't do anything if one of the following is true: + * - the connection is already in recovery + * - sequence until snd_recover has been acknowledged. + * - retransmit timeout has fired + */ + if (IN_FASTRECOVERY(tp) || + SEQ_GEQ(tp->snd_una, tp->snd_recover) || + tp->t_rxtshift > 0) + break; + + VERIFY(SACK_ENABLED(tp)); + tcp_rexmt_save_state(tp); + if (CC_ALGO(tp)->pre_fr != NULL) { + CC_ALGO(tp)->pre_fr(tp); + if (TCP_ECN_ENABLED(tp)) + tp->ecn_flags |= TE_SENDCWR; + } + ENTER_FASTRECOVERY(tp); + + tp->t_timer[TCPT_REXMT] = 0; + tcpstat.tcps_sack_recovery_episode++; + tp->t_sack_recovery_episode++; + tp->sack_newdata = tp->snd_nxt; + tp->snd_cwnd = tp->t_maxseg; + tcp_ccdbg_trace(tp, NULL, TCP_CC_ENTER_FASTRECOVERY); + (void) tcp_output(tp); + break; dropit: tcpstat.tcps_keepdrops++; + postevent(so, 0, EV_TIMEOUT); + soevent(so, + (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT)); tp = tcp_drop(tp, ETIMEDOUT); - postevent(so_tmp, 0, EV_TIMEOUT); break; } +#if TCPDEBUG + if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG) + tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, + PRU_SLOWTIMO); +#endif return (tp); } + +/* Remove a timer entry from timer list */ +void +tcp_remove_timer(struct tcpcb *tp) +{ + struct tcptimerlist *listp = &tcp_timer_list; + + lck_mtx_assert(&tp->t_inpcb->inpcb_mtx, LCK_MTX_ASSERT_OWNED); + if (!(TIMER_IS_ON_LIST(tp))) { + return; + } + lck_mtx_lock(listp->mtx); + + /* Check if pcb is on timer list again after acquiring the lock */ + if (!(TIMER_IS_ON_LIST(tp))) { + lck_mtx_unlock(listp->mtx); + return; + } + + if (listp->next_te != NULL && listp->next_te == &tp->tentry) + listp->next_te = LIST_NEXT(&tp->tentry, le); + + LIST_REMOVE(&tp->tentry, le); + tp->t_flags &= ~(TF_TIMER_ONLIST); + + listp->entries--; + + tp->tentry.le.le_next = NULL; + tp->tentry.le.le_prev = NULL; + lck_mtx_unlock(listp->mtx); +} + +/* + * Function to check if the timerlist needs to be rescheduled to run + * the timer entry correctly. Basically, this is to check if we can avoid + * taking the list lock. + */ + +static boolean_t +need_to_resched_timerlist(u_int32_t runtime, u_int16_t mode) +{ + struct tcptimerlist *listp = &tcp_timer_list; + int32_t diff; + + /* + * If the list is being processed then the state of the list is + * in flux. In this case always acquire the lock and set the state + * correctly. + */ + if (listp->running) + return (TRUE); + + if (!listp->scheduled) + return (TRUE); + + diff = timer_diff(listp->runtime, 0, runtime, 0); + if (diff <= 0) { + /* The list is going to run before this timer */ + return (FALSE); + } else { + if (mode & TCP_TIMERLIST_10MS_MODE) { + if (diff <= TCP_TIMER_10MS_QUANTUM) + return (FALSE); + } else if (mode & TCP_TIMERLIST_100MS_MODE) { + if (diff <= TCP_TIMER_100MS_QUANTUM) + return (FALSE); + } else { + if (diff <= TCP_TIMER_500MS_QUANTUM) + return (FALSE); + } + } + return (TRUE); +} + +void +tcp_sched_timerlist(uint32_t offset) +{ + uint64_t deadline = 0; + struct tcptimerlist *listp = &tcp_timer_list; + + lck_mtx_assert(listp->mtx, LCK_MTX_ASSERT_OWNED); + + offset = min(offset, TCP_TIMERLIST_MAX_OFFSET); + listp->runtime = tcp_now + offset; + if (listp->runtime == 0) { + listp->runtime++; + offset++; + } + + clock_interval_to_deadline(offset, USEC_PER_SEC, &deadline); + + thread_call_enter_delayed(listp->call, deadline); + listp->scheduled = TRUE; +} + +/* + * Function to run the timers for a connection. + * + * Returns the offset of next timer to be run for this connection which + * can be used to reschedule the timerlist. + * + * te_mode is an out parameter that indicates the modes of active + * timers for this connection. + */ +u_int32_t +tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode, + u_int16_t probe_if_index) +{ + struct socket *so; + u_int16_t i = 0, index = TCPT_NONE, lo_index = TCPT_NONE; + u_int32_t timer_val, offset = 0, lo_timer = 0; + int32_t diff; + boolean_t needtorun[TCPT_NTIMERS]; + int count = 0; + + VERIFY(tp != NULL); + bzero(needtorun, sizeof(needtorun)); + *te_mode = 0; + + tcp_lock(tp->t_inpcb->inp_socket, 1, 0); + + so = tp->t_inpcb->inp_socket; + /* Release the want count on inp */ + if (in_pcb_checkstate(tp->t_inpcb, WNT_RELEASE, 1) + == WNT_STOPUSING) { + if (TIMER_IS_ON_LIST(tp)) { + tcp_remove_timer(tp); + } + + /* Looks like the TCP connection got closed while we + * were waiting for the lock.. Done + */ + goto done; + } + + /* + * If this connection is over an interface that needs to + * be probed, send probe packets to reinitiate communication. + */ + if (probe_if_index > 0 && tp->t_inpcb->inp_last_outifp != NULL && + tp->t_inpcb->inp_last_outifp->if_index == probe_if_index) { + tp->t_flagsext |= TF_PROBING; + tcp_timers(tp, TCPT_PTO); + tp->t_timer[TCPT_PTO] = 0; + tp->t_flagsext &= ~TF_PROBING; + } + + /* + * Since the timer thread needs to wait for tcp lock, it may race + * with another thread that can cancel or reschedule the timer + * that is about to run. Check if we need to run anything. + */ + if ((index = tp->tentry.index) == TCPT_NONE) + goto done; + + timer_val = tp->t_timer[index]; + + diff = timer_diff(tp->tentry.runtime, 0, tcp_now, 0); + if (diff > 0) { + if (tp->tentry.index != TCPT_NONE) { + offset = diff; + *(te_mode) = tp->tentry.mode; + } + goto done; + } + + tp->t_timer[index] = 0; + if (timer_val > 0) { + tp = tcp_timers(tp, index); + if (tp == NULL) + goto done; + } + + /* + * Check if there are any other timers that need to be run. + * While doing it, adjust the timer values wrt tcp_now. + */ + tp->tentry.mode = 0; + for (i = 0; i < TCPT_NTIMERS; ++i) { + if (tp->t_timer[i] != 0) { + diff = timer_diff(tp->tentry.timer_start, + tp->t_timer[i], tcp_now, 0); + if (diff <= 0) { + needtorun[i] = TRUE; + count++; + } else { + tp->t_timer[i] = diff; + needtorun[i] = FALSE; + if (lo_timer == 0 || diff < lo_timer) { + lo_timer = diff; + lo_index = i; + } + TCP_SET_TIMER_MODE(tp->tentry.mode, i); + } + } + } + + tp->tentry.timer_start = tcp_now; + tp->tentry.index = lo_index; + VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0); + + if (tp->tentry.index != TCPT_NONE) { + tp->tentry.runtime = tp->tentry.timer_start + + tp->t_timer[tp->tentry.index]; + if (tp->tentry.runtime == 0) + tp->tentry.runtime++; + } + + if (count > 0) { + /* run any other timers outstanding at this time. */ + for (i = 0; i < TCPT_NTIMERS; ++i) { + if (needtorun[i]) { + tp->t_timer[i] = 0; + tp = tcp_timers(tp, i); + if (tp == NULL) { + offset = 0; + *(te_mode) = 0; + goto done; + } + } + } + tcp_set_lotimer_index(tp); + } + + if (tp->tentry.index < TCPT_NONE) { + offset = tp->t_timer[tp->tentry.index]; + *(te_mode) = tp->tentry.mode; + } + +done: + if (tp != NULL && tp->tentry.index == TCPT_NONE) { + tcp_remove_timer(tp); + offset = 0; + } + + tcp_unlock(so, 1, 0); + return(offset); +} + +void +tcp_run_timerlist(void * arg1, void * arg2) +{ +#pragma unused(arg1, arg2) + struct tcptimerentry *te, *next_te; + struct tcptimerlist *listp = &tcp_timer_list; + struct tcpcb *tp; + uint32_t next_timer = 0; /* offset of the next timer on the list */ + u_int16_t te_mode = 0; /* modes of all active timers in a tcpcb */ + u_int16_t list_mode = 0; /* cumulative of modes of all tcpcbs */ + uint32_t active_count = 0; + + calculate_tcp_clock(); + + lck_mtx_lock(listp->mtx); + + listp->running = TRUE; + + LIST_FOREACH_SAFE(te, &listp->lhead, le, next_te) { + uint32_t offset = 0; + uint32_t runtime = te->runtime; + if (te->index < TCPT_NONE && TSTMP_GT(runtime, tcp_now)) { + offset = timer_diff(runtime, 0, tcp_now, 0); + if (next_timer == 0 || offset < next_timer) { + next_timer = offset; + } + list_mode |= te->mode; + continue; + } + + tp = TIMERENTRY_TO_TP(te); + + /* + * Acquire an inp wantcnt on the inpcb so that the socket + * won't get detached even if tcp_close is called + */ + if (in_pcb_checkstate(tp->t_inpcb, WNT_ACQUIRE, 0) + == WNT_STOPUSING) { + /* + * Some how this pcb went into dead state while + * on the timer list, just take it off the list. + * Since the timer list entry pointers are + * protected by the timer list lock, we can + * do it here without the socket lock. + */ + if (TIMER_IS_ON_LIST(tp)) { + tp->t_flags &= ~(TF_TIMER_ONLIST); + LIST_REMOVE(&tp->tentry, le); + listp->entries--; + + tp->tentry.le.le_next = NULL; + tp->tentry.le.le_prev = NULL; + } + continue; + } + active_count++; + + /* + * Store the next timerentry pointer before releasing the + * list lock. If that entry has to be removed when we + * release the lock, this pointer will be updated to the + * element after that. + */ + listp->next_te = next_te; + + VERIFY_NEXT_LINK(&tp->tentry, le); + VERIFY_PREV_LINK(&tp->tentry, le); + + lck_mtx_unlock(listp->mtx); + + offset = tcp_run_conn_timer(tp, &te_mode, + listp->probe_if_index); + + lck_mtx_lock(listp->mtx); + + next_te = listp->next_te; + listp->next_te = NULL; + + if (offset > 0 && te_mode != 0) { + list_mode |= te_mode; + + if (next_timer == 0 || offset < next_timer) + next_timer = offset; + } + } + + if (!LIST_EMPTY(&listp->lhead)) { + u_int16_t next_mode = 0; + if ((list_mode & TCP_TIMERLIST_10MS_MODE) || + (listp->pref_mode & TCP_TIMERLIST_10MS_MODE)) + next_mode = TCP_TIMERLIST_10MS_MODE; + else if ((list_mode & TCP_TIMERLIST_100MS_MODE) || + (listp->pref_mode & TCP_TIMERLIST_100MS_MODE)) + next_mode = TCP_TIMERLIST_100MS_MODE; + else + next_mode = TCP_TIMERLIST_500MS_MODE; + + if (next_mode != TCP_TIMERLIST_500MS_MODE) { + listp->idleruns = 0; + } else { + /* + * the next required mode is slow mode, but if + * the last one was a faster mode and we did not + * have enough idle runs, repeat the last mode. + * + * We try to keep the timer list in fast mode for + * some idle time in expectation of new data. + */ + if (listp->mode != next_mode && + listp->idleruns < timer_fastmode_idlemax) { + listp->idleruns++; + next_mode = listp->mode; + next_timer = TCP_TIMER_100MS_QUANTUM; + } else { + listp->idleruns = 0; + } + } + listp->mode = next_mode; + if (listp->pref_offset != 0) + next_timer = min(listp->pref_offset, next_timer); + + if (listp->mode == TCP_TIMERLIST_500MS_MODE) + next_timer = max(next_timer, + TCP_TIMER_500MS_QUANTUM); + + tcp_sched_timerlist(next_timer); + } else { + /* + * No need to reschedule this timer, but always run + * periodically at a much higher granularity. + */ + tcp_sched_timerlist(TCP_TIMERLIST_MAX_OFFSET); + } + + listp->running = FALSE; + listp->pref_mode = 0; + listp->pref_offset = 0; + listp->probe_if_index = 0; + + lck_mtx_unlock(listp->mtx); +} + +/* + * Function to check if the timerlist needs to be rescheduled to run this + * connection's timers correctly. + */ +void +tcp_sched_timers(struct tcpcb *tp) +{ + struct tcptimerentry *te = &tp->tentry; + u_int16_t index = te->index; + u_int16_t mode = te->mode; + struct tcptimerlist *listp = &tcp_timer_list; + int32_t offset = 0; + boolean_t list_locked = FALSE; + + if (tp->t_inpcb->inp_state == INPCB_STATE_DEAD) { + /* Just return without adding the dead pcb to the list */ + if (TIMER_IS_ON_LIST(tp)) { + tcp_remove_timer(tp); + } + return; + } + + if (index == TCPT_NONE) { + /* Nothing to run */ + tcp_remove_timer(tp); + return; + } + + /* + * compute the offset at which the next timer for this connection + * has to run. + */ + offset = timer_diff(te->runtime, 0, tcp_now, 0); + if (offset <= 0) { + offset = 1; + tcp_timer_advanced++; + } + + if (!TIMER_IS_ON_LIST(tp)) { + if (!list_locked) { + lck_mtx_lock(listp->mtx); + list_locked = TRUE; + } + + if (!TIMER_IS_ON_LIST(tp)) { + LIST_INSERT_HEAD(&listp->lhead, te, le); + tp->t_flags |= TF_TIMER_ONLIST; + + listp->entries++; + if (listp->entries > listp->maxentries) + listp->maxentries = listp->entries; + + /* if the list is not scheduled, just schedule it */ + if (!listp->scheduled) + goto schedule; + } + } + + /* + * Timer entry is currently on the list, check if the list needs + * to be rescheduled. + */ + if (need_to_resched_timerlist(te->runtime, mode)) { + tcp_resched_timerlist++; + + if (!list_locked) { + lck_mtx_lock(listp->mtx); + list_locked = TRUE; + } + + VERIFY_NEXT_LINK(te, le); + VERIFY_PREV_LINK(te, le); + + if (listp->running) { + listp->pref_mode |= mode; + if (listp->pref_offset == 0 || + offset < listp->pref_offset) { + listp->pref_offset = offset; + } + } else { + /* + * The list could have got rescheduled while + * this thread was waiting for the lock + */ + if (listp->scheduled) { + int32_t diff; + diff = timer_diff(listp->runtime, 0, + tcp_now, offset); + if (diff <= 0) + goto done; + else + goto schedule; + } else { + goto schedule; + } + } + } + goto done; + +schedule: + /* + * Since a connection with timers is getting scheduled, the timer + * list moves from idle to active state and that is why idlegen is + * reset + */ + if (mode & TCP_TIMERLIST_10MS_MODE) { + listp->mode = TCP_TIMERLIST_10MS_MODE; + listp->idleruns = 0; + offset = min(offset, TCP_TIMER_10MS_QUANTUM); + } else if (mode & TCP_TIMERLIST_100MS_MODE) { + if (listp->mode > TCP_TIMERLIST_100MS_MODE) + listp->mode = TCP_TIMERLIST_100MS_MODE; + listp->idleruns = 0; + offset = min(offset, TCP_TIMER_100MS_QUANTUM); + } + tcp_sched_timerlist(offset); + +done: + if (list_locked) + lck_mtx_unlock(listp->mtx); + + return; +} + +static inline void +tcp_set_lotimer_index(struct tcpcb *tp) +{ + uint16_t i, lo_index = TCPT_NONE, mode = 0; + uint32_t lo_timer = 0; + for (i = 0; i < TCPT_NTIMERS; ++i) { + if (tp->t_timer[i] != 0) { + TCP_SET_TIMER_MODE(mode, i); + if (lo_timer == 0 || tp->t_timer[i] < lo_timer) { + lo_timer = tp->t_timer[i]; + lo_index = i; + } + } + } + tp->tentry.index = lo_index; + tp->tentry.mode = mode; + VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0); + + if (tp->tentry.index != TCPT_NONE) { + tp->tentry.runtime = tp->tentry.timer_start + + tp->t_timer[tp->tentry.index]; + if (tp->tentry.runtime == 0) + tp->tentry.runtime++; + } +} + +void +tcp_check_timer_state(struct tcpcb *tp) +{ + lck_mtx_assert(&tp->t_inpcb->inpcb_mtx, LCK_MTX_ASSERT_OWNED); + + if (tp->t_inpcb->inp_flags2 & INP2_TIMEWAIT) + return; + + tcp_set_lotimer_index(tp); + + tcp_sched_timers(tp); + return; +} + +static inline void +tcp_cumulative_stat(u_int32_t cur, u_int32_t *prev, u_int32_t *dest) +{ + /* handle wrap around */ + int32_t diff = (int32_t) (cur - *prev); + if (diff > 0) + *dest = diff; + else + *dest = 0; + *prev = cur; + return; +} + +__private_extern__ void +tcp_report_stats(void) +{ + struct nstat_sysinfo_data data; + struct sockaddr_in dst; + struct sockaddr_in6 dst6; + struct rtentry *rt = NULL; + static struct tcp_last_report_stats prev; + u_int64_t var, uptime; + +#define stat data.u.tcp_stats + if (((uptime = net_uptime()) - tcp_last_report_time) < + tcp_report_stats_interval) + return; + + tcp_last_report_time = uptime; + + bzero(&data, sizeof(data)); + data.flags = NSTAT_SYSINFO_TCP_STATS; + + bzero(&dst, sizeof(dst)); + dst.sin_len = sizeof(dst); + dst.sin_family = AF_INET; + + /* ipv4 avg rtt */ + lck_mtx_lock(rnh_lock); + rt = rt_lookup(TRUE, (struct sockaddr *)&dst, NULL, + rt_tables[AF_INET], IFSCOPE_NONE); + lck_mtx_unlock(rnh_lock); + if (rt != NULL) { + RT_LOCK(rt); + if (rt_primary_default(rt, rt_key(rt)) && + rt->rt_stats != NULL) { + stat.ipv4_avgrtt = rt->rt_stats->nstat_avg_rtt; + } + RT_UNLOCK(rt); + rtfree(rt); + rt = NULL; + } + + /* ipv6 avg rtt */ + bzero(&dst6, sizeof(dst6)); + dst6.sin6_len = sizeof(dst6); + dst6.sin6_family = AF_INET6; + + lck_mtx_lock(rnh_lock); + rt = rt_lookup(TRUE,(struct sockaddr *)&dst6, NULL, + rt_tables[AF_INET6], IFSCOPE_NONE); + lck_mtx_unlock(rnh_lock); + if (rt != NULL) { + RT_LOCK(rt); + if (rt_primary_default(rt, rt_key(rt)) && + rt->rt_stats != NULL) { + stat.ipv6_avgrtt = rt->rt_stats->nstat_avg_rtt; + } + RT_UNLOCK(rt); + rtfree(rt); + rt = NULL; + } + + /* send packet loss rate, shift by 10 for precision */ + if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_sndrexmitpack > 0) { + var = tcpstat.tcps_sndrexmitpack << 10; + stat.send_plr = (var * 100) / tcpstat.tcps_sndpack; + } + + /* recv packet loss rate, shift by 10 for precision */ + if (tcpstat.tcps_rcvpack > 0 && tcpstat.tcps_recovered_pkts > 0) { + var = tcpstat.tcps_recovered_pkts << 10; + stat.recv_plr = (var * 100) / tcpstat.tcps_rcvpack; + } + + /* RTO after tail loss, shift by 10 for precision */ + if (tcpstat.tcps_sndrexmitpack > 0 + && tcpstat.tcps_tailloss_rto > 0) { + var = tcpstat.tcps_tailloss_rto << 10; + stat.send_tlrto_rate = + (var * 100) / tcpstat.tcps_sndrexmitpack; + } + + /* packet reordering */ + if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_reordered_pkts > 0) { + var = tcpstat.tcps_reordered_pkts << 10; + stat.send_reorder_rate = + (var * 100) / tcpstat.tcps_sndpack; + } + + if (tcp_ecn_outbound == 1) + stat.ecn_client_enabled = 1; + if (tcp_ecn_inbound == 1) + stat.ecn_server_enabled = 1; + tcp_cumulative_stat(tcpstat.tcps_connattempt, + &prev.tcps_connattempt, &stat.connection_attempts); + tcp_cumulative_stat(tcpstat.tcps_accepts, + &prev.tcps_accepts, &stat.connection_accepts); + tcp_cumulative_stat(tcpstat.tcps_ecn_client_setup, + &prev.tcps_ecn_client_setup, &stat.ecn_client_setup); + tcp_cumulative_stat(tcpstat.tcps_ecn_server_setup, + &prev.tcps_ecn_server_setup, &stat.ecn_server_setup); + tcp_cumulative_stat(tcpstat.tcps_ecn_client_success, + &prev.tcps_ecn_client_success, &stat.ecn_client_success); + tcp_cumulative_stat(tcpstat.tcps_ecn_server_success, + &prev.tcps_ecn_server_success, &stat.ecn_server_success); + tcp_cumulative_stat(tcpstat.tcps_ecn_not_supported, + &prev.tcps_ecn_not_supported, &stat.ecn_not_supported); + tcp_cumulative_stat(tcpstat.tcps_ecn_lost_syn, + &prev.tcps_ecn_lost_syn, &stat.ecn_lost_syn); + tcp_cumulative_stat(tcpstat.tcps_ecn_lost_synack, + &prev.tcps_ecn_lost_synack, &stat.ecn_lost_synack); + tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ce, + &prev.tcps_ecn_recv_ce, &stat.ecn_recv_ce); + tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece, + &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece); + tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece, + &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece); + tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece, + &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece); + tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece, + &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece); + tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ce, + &prev.tcps_ecn_conn_recv_ce, &stat.ecn_conn_recv_ce); + tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ece, + &prev.tcps_ecn_conn_recv_ece, &stat.ecn_conn_recv_ece); + tcp_cumulative_stat(tcpstat.tcps_ecn_conn_plnoce, + &prev.tcps_ecn_conn_plnoce, &stat.ecn_conn_plnoce); + tcp_cumulative_stat(tcpstat.tcps_ecn_conn_pl_ce, + &prev.tcps_ecn_conn_pl_ce, &stat.ecn_conn_pl_ce); + tcp_cumulative_stat(tcpstat.tcps_ecn_conn_nopl_ce, + &prev.tcps_ecn_conn_nopl_ce, &stat.ecn_conn_nopl_ce); + tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_synloss, + &prev.tcps_ecn_fallback_synloss, &stat.ecn_fallback_synloss); + tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_reorder, + &prev.tcps_ecn_fallback_reorder, &stat.ecn_fallback_reorder); + tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_ce, + &prev.tcps_ecn_fallback_ce, &stat.ecn_fallback_ce); + tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_rcv, + &prev.tcps_tfo_syn_data_rcv, &stat.tfo_syn_data_rcv); + tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req_rcv, + &prev.tcps_tfo_cookie_req_rcv, &stat.tfo_cookie_req_rcv); + tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_sent, + &prev.tcps_tfo_cookie_sent, &stat.tfo_cookie_sent); + tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_invalid, + &prev.tcps_tfo_cookie_invalid, &stat.tfo_cookie_invalid); + tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req, + &prev.tcps_tfo_cookie_req, &stat.tfo_cookie_req); + tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_rcv, + &prev.tcps_tfo_cookie_rcv, &stat.tfo_cookie_rcv); + tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_sent, + &prev.tcps_tfo_syn_data_sent, &stat.tfo_syn_data_sent); + tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_acked, + &prev.tcps_tfo_syn_data_acked, &stat.tfo_syn_data_acked); + tcp_cumulative_stat(tcpstat.tcps_tfo_syn_loss, + &prev.tcps_tfo_syn_loss, &stat.tfo_syn_loss); + tcp_cumulative_stat(tcpstat.tcps_tfo_blackhole, + &prev.tcps_tfo_blackhole, &stat.tfo_blackhole); + tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_wrong, + &prev.tcps_tfo_cookie_wrong, &stat.tfo_cookie_wrong); + tcp_cumulative_stat(tcpstat.tcps_tfo_no_cookie_rcv, + &prev.tcps_tfo_no_cookie_rcv, &stat.tfo_no_cookie_rcv); + tcp_cumulative_stat(tcpstat.tcps_tfo_heuristics_disable, + &prev.tcps_tfo_heuristics_disable, &stat.tfo_heuristics_disable); + tcp_cumulative_stat(tcpstat.tcps_tfo_sndblackhole, + &prev.tcps_tfo_sndblackhole, &stat.tfo_sndblackhole); + + + + + nstat_sysinfo_send_data(&data); + +#undef stat +} + +void +tcp_interface_send_probe(u_int16_t probe_if_index) +{ + int32_t offset = 0; + struct tcptimerlist *listp = &tcp_timer_list; + + /* Make sure TCP clock is up to date */ + calculate_tcp_clock(); + + lck_mtx_lock(listp->mtx); + if (listp->probe_if_index > 0) { + tcpstat.tcps_probe_if_conflict++; + goto done; + } + + listp->probe_if_index = probe_if_index; + if (listp->running) + goto done; + + /* + * Reschedule the timerlist to run within the next 10ms, which is + * the fastest that we can do. + */ + offset = TCP_TIMER_10MS_QUANTUM; + if (listp->scheduled) { + int32_t diff; + diff = timer_diff(listp->runtime, 0, tcp_now, offset); + if (diff <= 0) { + /* The timer will fire sooner than what's needed */ + goto done; + } + } + listp->mode = TCP_TIMERLIST_10MS_MODE; + listp->idleruns = 0; + + tcp_sched_timerlist(offset); + +done: + lck_mtx_unlock(listp->mtx); + return; +} + +/* + * Enable read probes on this connection, if: + * - it is in established state + * - doesn't have any data outstanding + * - the outgoing ifp matches + * - we have not already sent any read probes + */ +static void +tcp_enable_read_probe(struct tcpcb *tp, struct ifnet *ifp) +{ + if (tp->t_state == TCPS_ESTABLISHED && + tp->snd_max == tp->snd_una && + tp->t_inpcb->inp_last_outifp == ifp && + !(tp->t_flagsext & TF_DETECT_READSTALL) && + tp->t_rtimo_probes == 0) { + tp->t_flagsext |= TF_DETECT_READSTALL; + tp->t_rtimo_probes = 0; + tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, + TCP_TIMER_10MS_QUANTUM); + if (tp->tentry.index == TCPT_NONE) { + tp->tentry.index = TCPT_KEEP; + tp->tentry.runtime = tcp_now + + TCP_TIMER_10MS_QUANTUM; + } else { + int32_t diff = 0; + + /* Reset runtime to be in next 10ms */ + diff = timer_diff(tp->tentry.runtime, 0, + tcp_now, TCP_TIMER_10MS_QUANTUM); + if (diff > 0) { + tp->tentry.index = TCPT_KEEP; + tp->tentry.runtime = tcp_now + + TCP_TIMER_10MS_QUANTUM; + if (tp->tentry.runtime == 0) + tp->tentry.runtime++; + } + } + } +} + +/* + * Disable read probe and reset the keep alive timer + */ +static void +tcp_disable_read_probe(struct tcpcb *tp) +{ + if (tp->t_adaptive_rtimo == 0 && + ((tp->t_flagsext & TF_DETECT_READSTALL) || + tp->t_rtimo_probes > 0)) { + tcp_keepalive_reset(tp); + } +} + +/* + * Reschedule the tcp timerlist in the next 10ms to re-enable read/write + * probes on connections going over a particular interface. + */ +void +tcp_probe_connectivity(struct ifnet *ifp, u_int32_t enable) +{ + int32_t offset; + struct tcptimerlist *listp = &tcp_timer_list; + struct inpcbinfo *pcbinfo = &tcbinfo; + struct inpcb *inp, *nxt; + + if (ifp == NULL) + return; + + /* update clock */ + calculate_tcp_clock(); + + /* + * Enable keep alive timer on all connections that are + * active/established on this interface. + */ + lck_rw_lock_shared(pcbinfo->ipi_lock); + + LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, nxt) { + struct tcpcb *tp = NULL; + if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == + WNT_STOPUSING) + continue; + + /* Acquire lock to look at the state of the connection */ + tcp_lock(inp->inp_socket, 1, 0); + + /* Release the want count */ + if (inp->inp_ppcb == NULL || + (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) { + tcp_unlock(inp->inp_socket, 1, 0); + continue; + } + tp = intotcpcb(inp); + if (enable) + tcp_enable_read_probe(tp, ifp); + else + tcp_disable_read_probe(tp); + + tcp_unlock(inp->inp_socket, 1, 0); + } + lck_rw_done(pcbinfo->ipi_lock); + + lck_mtx_lock(listp->mtx); + if (listp->running) { + listp->pref_mode |= TCP_TIMERLIST_10MS_MODE; + goto done; + } + + /* Reschedule within the next 10ms */ + offset = TCP_TIMER_10MS_QUANTUM; + if (listp->scheduled) { + int32_t diff; + diff = timer_diff(listp->runtime, 0, tcp_now, offset); + if (diff <= 0) { + /* The timer will fire sooner than what's needed */ + goto done; + } + } + listp->mode = TCP_TIMERLIST_10MS_MODE; + listp->idleruns = 0; + + tcp_sched_timerlist(offset); +done: + lck_mtx_unlock(listp->mtx); + return; +} + +inline void +tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp) +{ + struct if_cellular_status_v1 *ifsr; + u_int32_t optlen; + ifsr = &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1; + if (ifsr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) { + optlen = tp->t_maxopd - tp->t_maxseg; + + if (ifsr->mss_recommended == + IF_CELL_UL_MSS_RECOMMENDED_NONE && + tp->t_cached_maxopd > 0 && + tp->t_maxopd < tp->t_cached_maxopd) { + tp->t_maxopd = tp->t_cached_maxopd; + tcpstat.tcps_mss_to_default++; + } else if (ifsr->mss_recommended == + IF_CELL_UL_MSS_RECOMMENDED_MEDIUM && + tp->t_maxopd > tcp_mss_rec_medium) { + tp->t_cached_maxopd = tp->t_maxopd; + tp->t_maxopd = tcp_mss_rec_medium; + tcpstat.tcps_mss_to_medium++; + } else if (ifsr->mss_recommended == + IF_CELL_UL_MSS_RECOMMENDED_LOW && + tp->t_maxopd > tcp_mss_rec_low) { + tp->t_cached_maxopd = tp->t_maxopd; + tp->t_maxopd = tcp_mss_rec_low; + tcpstat.tcps_mss_to_low++; + } + tp->t_maxseg = tp->t_maxopd - optlen; + + /* + * clear the cached value if it is same as the current + */ + if (tp->t_maxopd == tp->t_cached_maxopd) + tp->t_cached_maxopd = 0; + } +} + +void +tcp_update_mss_locked(struct socket *so, struct ifnet *ifp) +{ + struct inpcb *inp = sotoinpcb(so); + struct tcpcb *tp = intotcpcb(inp); + + if (ifp == NULL && inp->inp_last_outifp == NULL) + return; + + if (ifp == NULL) + ifp = inp->inp_last_outifp; + + if (!IFNET_IS_CELLULAR(ifp)) { + /* + * This optimization is implemented for cellular + * networks only + */ + return; + } + if ( tp->t_state <= TCPS_CLOSE_WAIT) { + /* + * If the connection is currently doing or has done PMTU + * blackhole detection, do not change the MSS + */ + if (tp->t_flags & TF_BLACKHOLE) + return; + if (ifp->if_link_status == NULL) + return; + tcp_update_mss_core(tp, ifp); + } +} + +void +tcp_itimer(struct inpcbinfo *ipi) +{ + struct inpcb *inp, *nxt; + + if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) { + if (tcp_itimer_done == TRUE) { + tcp_itimer_done = FALSE; + atomic_add_32(&ipi->ipi_timer_req.intimer_fast, 1); + return; + } + /* Upgrade failed, lost lock now take it again exclusive */ + lck_rw_lock_exclusive(ipi->ipi_lock); + } + tcp_itimer_done = TRUE; + + LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) { + struct socket *so; + + if (inp->inp_ppcb == NULL || + in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) + continue; + so = inp->inp_socket; + tcp_lock(so, 1, 0); + if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) { + tcp_unlock(so, 1, 0); + continue; + } + so_check_extended_bk_idle_time(so); + if (ipi->ipi_flags & INPCBINFO_UPDATE_MSS) { + tcp_update_mss_locked(so, NULL); + } + tcp_unlock(so, 1, 0); + } + + ipi->ipi_flags &= ~INPCBINFO_UPDATE_MSS; + lck_rw_done(ipi->ipi_lock); +}