+
+static inline void
+tcp_cumulative_stat(u_int32_t cur, u_int32_t *prev, u_int32_t *dest)
+{
+ /* handle wrap around */
+ int32_t diff = (int32_t) (cur - *prev);
+ if (diff > 0)
+ *dest = diff;
+ else
+ *dest = 0;
+ *prev = cur;
+ return;
+}
+
+__private_extern__ void
+tcp_report_stats(void)
+{
+ struct nstat_sysinfo_data data;
+ struct sockaddr_in dst;
+ struct sockaddr_in6 dst6;
+ struct rtentry *rt = NULL;
+ static struct tcp_last_report_stats prev;
+ u_int64_t var, uptime;
+
+#define stat data.u.tcp_stats
+ if (((uptime = net_uptime()) - tcp_last_report_time) <
+ tcp_report_stats_interval)
+ return;
+
+ tcp_last_report_time = uptime;
+
+ bzero(&data, sizeof(data));
+ data.flags = NSTAT_SYSINFO_TCP_STATS;
+
+ bzero(&dst, sizeof(dst));
+ dst.sin_len = sizeof(dst);
+ dst.sin_family = AF_INET;
+
+ /* ipv4 avg rtt */
+ lck_mtx_lock(rnh_lock);
+ rt = rt_lookup(TRUE, (struct sockaddr *)&dst, NULL,
+ rt_tables[AF_INET], IFSCOPE_NONE);
+ lck_mtx_unlock(rnh_lock);
+ if (rt != NULL) {
+ RT_LOCK(rt);
+ if (rt_primary_default(rt, rt_key(rt)) &&
+ rt->rt_stats != NULL) {
+ stat.ipv4_avgrtt = rt->rt_stats->nstat_avg_rtt;
+ }
+ RT_UNLOCK(rt);
+ rtfree(rt);
+ rt = NULL;
+ }
+
+ /* ipv6 avg rtt */
+ bzero(&dst6, sizeof(dst6));
+ dst6.sin6_len = sizeof(dst6);
+ dst6.sin6_family = AF_INET6;
+
+ lck_mtx_lock(rnh_lock);
+ rt = rt_lookup(TRUE,(struct sockaddr *)&dst6, NULL,
+ rt_tables[AF_INET6], IFSCOPE_NONE);
+ lck_mtx_unlock(rnh_lock);
+ if (rt != NULL) {
+ RT_LOCK(rt);
+ if (rt_primary_default(rt, rt_key(rt)) &&
+ rt->rt_stats != NULL) {
+ stat.ipv6_avgrtt = rt->rt_stats->nstat_avg_rtt;
+ }
+ RT_UNLOCK(rt);
+ rtfree(rt);
+ rt = NULL;
+ }
+
+ /* send packet loss rate, shift by 10 for precision */
+ if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_sndrexmitpack > 0) {
+ var = tcpstat.tcps_sndrexmitpack << 10;
+ stat.send_plr = (var * 100) / tcpstat.tcps_sndpack;
+ }
+
+ /* recv packet loss rate, shift by 10 for precision */
+ if (tcpstat.tcps_rcvpack > 0 && tcpstat.tcps_recovered_pkts > 0) {
+ var = tcpstat.tcps_recovered_pkts << 10;
+ stat.recv_plr = (var * 100) / tcpstat.tcps_rcvpack;
+ }
+
+ /* RTO after tail loss, shift by 10 for precision */
+ if (tcpstat.tcps_sndrexmitpack > 0
+ && tcpstat.tcps_tailloss_rto > 0) {
+ var = tcpstat.tcps_tailloss_rto << 10;
+ stat.send_tlrto_rate =
+ (var * 100) / tcpstat.tcps_sndrexmitpack;
+ }
+
+ /* packet reordering */
+ if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_reordered_pkts > 0) {
+ var = tcpstat.tcps_reordered_pkts << 10;
+ stat.send_reorder_rate =
+ (var * 100) / tcpstat.tcps_sndpack;
+ }
+
+ if (tcp_ecn_outbound == 1)
+ stat.ecn_client_enabled = 1;
+ if (tcp_ecn_inbound == 1)
+ stat.ecn_server_enabled = 1;
+ tcp_cumulative_stat(tcpstat.tcps_connattempt,
+ &prev.tcps_connattempt, &stat.connection_attempts);
+ tcp_cumulative_stat(tcpstat.tcps_accepts,
+ &prev.tcps_accepts, &stat.connection_accepts);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_client_setup,
+ &prev.tcps_ecn_client_setup, &stat.ecn_client_setup);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_server_setup,
+ &prev.tcps_ecn_server_setup, &stat.ecn_server_setup);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_client_success,
+ &prev.tcps_ecn_client_success, &stat.ecn_client_success);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_server_success,
+ &prev.tcps_ecn_server_success, &stat.ecn_server_success);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_not_supported,
+ &prev.tcps_ecn_not_supported, &stat.ecn_not_supported);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_lost_syn,
+ &prev.tcps_ecn_lost_syn, &stat.ecn_lost_syn);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_lost_synack,
+ &prev.tcps_ecn_lost_synack, &stat.ecn_lost_synack);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ce,
+ &prev.tcps_ecn_recv_ce, &stat.ecn_recv_ce);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
+ &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
+ &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
+ &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
+ &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ce,
+ &prev.tcps_ecn_conn_recv_ce, &stat.ecn_conn_recv_ce);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ece,
+ &prev.tcps_ecn_conn_recv_ece, &stat.ecn_conn_recv_ece);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_conn_plnoce,
+ &prev.tcps_ecn_conn_plnoce, &stat.ecn_conn_plnoce);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_conn_pl_ce,
+ &prev.tcps_ecn_conn_pl_ce, &stat.ecn_conn_pl_ce);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_conn_nopl_ce,
+ &prev.tcps_ecn_conn_nopl_ce, &stat.ecn_conn_nopl_ce);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_synloss,
+ &prev.tcps_ecn_fallback_synloss, &stat.ecn_fallback_synloss);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_reorder,
+ &prev.tcps_ecn_fallback_reorder, &stat.ecn_fallback_reorder);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_ce,
+ &prev.tcps_ecn_fallback_ce, &stat.ecn_fallback_ce);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_rcv,
+ &prev.tcps_tfo_syn_data_rcv, &stat.tfo_syn_data_rcv);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req_rcv,
+ &prev.tcps_tfo_cookie_req_rcv, &stat.tfo_cookie_req_rcv);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_sent,
+ &prev.tcps_tfo_cookie_sent, &stat.tfo_cookie_sent);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_invalid,
+ &prev.tcps_tfo_cookie_invalid, &stat.tfo_cookie_invalid);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req,
+ &prev.tcps_tfo_cookie_req, &stat.tfo_cookie_req);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_rcv,
+ &prev.tcps_tfo_cookie_rcv, &stat.tfo_cookie_rcv);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_sent,
+ &prev.tcps_tfo_syn_data_sent, &stat.tfo_syn_data_sent);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_acked,
+ &prev.tcps_tfo_syn_data_acked, &stat.tfo_syn_data_acked);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_syn_loss,
+ &prev.tcps_tfo_syn_loss, &stat.tfo_syn_loss);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_blackhole,
+ &prev.tcps_tfo_blackhole, &stat.tfo_blackhole);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_wrong,
+ &prev.tcps_tfo_cookie_wrong, &stat.tfo_cookie_wrong);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_no_cookie_rcv,
+ &prev.tcps_tfo_no_cookie_rcv, &stat.tfo_no_cookie_rcv);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_heuristics_disable,
+ &prev.tcps_tfo_heuristics_disable, &stat.tfo_heuristics_disable);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_sndblackhole,
+ &prev.tcps_tfo_sndblackhole, &stat.tfo_sndblackhole);
+
+
+
+
+ nstat_sysinfo_send_data(&data);
+
+#undef stat
+}
+
+void
+tcp_interface_send_probe(u_int16_t probe_if_index)
+{
+ int32_t offset = 0;
+ struct tcptimerlist *listp = &tcp_timer_list;
+
+ /* Make sure TCP clock is up to date */
+ calculate_tcp_clock();
+
+ lck_mtx_lock(listp->mtx);
+ if (listp->probe_if_index > 0) {
+ tcpstat.tcps_probe_if_conflict++;
+ goto done;
+ }
+
+ listp->probe_if_index = probe_if_index;
+ if (listp->running)
+ goto done;
+
+ /*
+ * Reschedule the timerlist to run within the next 10ms, which is
+ * the fastest that we can do.
+ */
+ offset = TCP_TIMER_10MS_QUANTUM;
+ if (listp->scheduled) {
+ int32_t diff;
+ diff = timer_diff(listp->runtime, 0, tcp_now, offset);
+ if (diff <= 0) {
+ /* The timer will fire sooner than what's needed */
+ goto done;
+ }
+ }
+ listp->mode = TCP_TIMERLIST_10MS_MODE;
+ listp->idleruns = 0;
+
+ tcp_sched_timerlist(offset);
+
+done:
+ lck_mtx_unlock(listp->mtx);
+ return;
+}
+
+/*
+ * Enable read probes on this connection, if:
+ * - it is in established state
+ * - doesn't have any data outstanding
+ * - the outgoing ifp matches
+ * - we have not already sent any read probes
+ */
+static void
+tcp_enable_read_probe(struct tcpcb *tp, struct ifnet *ifp)
+{
+ if (tp->t_state == TCPS_ESTABLISHED &&
+ tp->snd_max == tp->snd_una &&
+ tp->t_inpcb->inp_last_outifp == ifp &&
+ !(tp->t_flagsext & TF_DETECT_READSTALL) &&
+ tp->t_rtimo_probes == 0) {
+ tp->t_flagsext |= TF_DETECT_READSTALL;
+ tp->t_rtimo_probes = 0;
+ tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
+ TCP_TIMER_10MS_QUANTUM);
+ if (tp->tentry.index == TCPT_NONE) {
+ tp->tentry.index = TCPT_KEEP;
+ tp->tentry.runtime = tcp_now +
+ TCP_TIMER_10MS_QUANTUM;
+ } else {
+ int32_t diff = 0;
+
+ /* Reset runtime to be in next 10ms */
+ diff = timer_diff(tp->tentry.runtime, 0,
+ tcp_now, TCP_TIMER_10MS_QUANTUM);
+ if (diff > 0) {
+ tp->tentry.index = TCPT_KEEP;
+ tp->tentry.runtime = tcp_now +
+ TCP_TIMER_10MS_QUANTUM;
+ if (tp->tentry.runtime == 0)
+ tp->tentry.runtime++;
+ }
+ }
+ }
+}
+
+/*
+ * Disable read probe and reset the keep alive timer
+ */
+static void
+tcp_disable_read_probe(struct tcpcb *tp)
+{
+ if (tp->t_adaptive_rtimo == 0 &&
+ ((tp->t_flagsext & TF_DETECT_READSTALL) ||
+ tp->t_rtimo_probes > 0)) {
+ tcp_keepalive_reset(tp);
+ }
+}
+
+/*
+ * Reschedule the tcp timerlist in the next 10ms to re-enable read/write
+ * probes on connections going over a particular interface.
+ */
+void
+tcp_probe_connectivity(struct ifnet *ifp, u_int32_t enable)
+{
+ int32_t offset;
+ struct tcptimerlist *listp = &tcp_timer_list;
+ struct inpcbinfo *pcbinfo = &tcbinfo;
+ struct inpcb *inp, *nxt;
+
+ if (ifp == NULL)
+ return;
+
+ /* update clock */
+ calculate_tcp_clock();
+
+ /*
+ * Enable keep alive timer on all connections that are
+ * active/established on this interface.
+ */
+ lck_rw_lock_shared(pcbinfo->ipi_lock);
+
+ LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, nxt) {
+ struct tcpcb *tp = NULL;
+ if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) ==
+ WNT_STOPUSING)
+ continue;
+
+ /* Acquire lock to look at the state of the connection */
+ tcp_lock(inp->inp_socket, 1, 0);
+
+ /* Release the want count */
+ if (inp->inp_ppcb == NULL ||
+ (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) {
+ tcp_unlock(inp->inp_socket, 1, 0);
+ continue;
+ }
+ tp = intotcpcb(inp);
+ if (enable)
+ tcp_enable_read_probe(tp, ifp);
+ else
+ tcp_disable_read_probe(tp);
+
+ tcp_unlock(inp->inp_socket, 1, 0);
+ }
+ lck_rw_done(pcbinfo->ipi_lock);
+
+ lck_mtx_lock(listp->mtx);
+ if (listp->running) {
+ listp->pref_mode |= TCP_TIMERLIST_10MS_MODE;
+ goto done;
+ }
+
+ /* Reschedule within the next 10ms */
+ offset = TCP_TIMER_10MS_QUANTUM;
+ if (listp->scheduled) {
+ int32_t diff;
+ diff = timer_diff(listp->runtime, 0, tcp_now, offset);
+ if (diff <= 0) {
+ /* The timer will fire sooner than what's needed */
+ goto done;
+ }
+ }
+ listp->mode = TCP_TIMERLIST_10MS_MODE;
+ listp->idleruns = 0;
+
+ tcp_sched_timerlist(offset);
+done:
+ lck_mtx_unlock(listp->mtx);
+ return;
+}
+
+inline void
+tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp)
+{
+ struct if_cellular_status_v1 *ifsr;
+ u_int32_t optlen;
+ ifsr = &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
+ if (ifsr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
+ optlen = tp->t_maxopd - tp->t_maxseg;
+
+ if (ifsr->mss_recommended ==
+ IF_CELL_UL_MSS_RECOMMENDED_NONE &&
+ tp->t_cached_maxopd > 0 &&
+ tp->t_maxopd < tp->t_cached_maxopd) {
+ tp->t_maxopd = tp->t_cached_maxopd;
+ tcpstat.tcps_mss_to_default++;
+ } else if (ifsr->mss_recommended ==
+ IF_CELL_UL_MSS_RECOMMENDED_MEDIUM &&
+ tp->t_maxopd > tcp_mss_rec_medium) {
+ tp->t_cached_maxopd = tp->t_maxopd;
+ tp->t_maxopd = tcp_mss_rec_medium;
+ tcpstat.tcps_mss_to_medium++;
+ } else if (ifsr->mss_recommended ==
+ IF_CELL_UL_MSS_RECOMMENDED_LOW &&
+ tp->t_maxopd > tcp_mss_rec_low) {
+ tp->t_cached_maxopd = tp->t_maxopd;
+ tp->t_maxopd = tcp_mss_rec_low;
+ tcpstat.tcps_mss_to_low++;
+ }
+ tp->t_maxseg = tp->t_maxopd - optlen;
+
+ /*
+ * clear the cached value if it is same as the current
+ */
+ if (tp->t_maxopd == tp->t_cached_maxopd)
+ tp->t_cached_maxopd = 0;
+ }
+}
+
+void
+tcp_update_mss_locked(struct socket *so, struct ifnet *ifp)
+{
+ struct inpcb *inp = sotoinpcb(so);
+ struct tcpcb *tp = intotcpcb(inp);
+
+ if (ifp == NULL && inp->inp_last_outifp == NULL)
+ return;
+
+ if (ifp == NULL)
+ ifp = inp->inp_last_outifp;
+
+ if (!IFNET_IS_CELLULAR(ifp)) {
+ /*
+ * This optimization is implemented for cellular
+ * networks only
+ */
+ return;
+ }
+ if ( tp->t_state <= TCPS_CLOSE_WAIT) {
+ /*
+ * If the connection is currently doing or has done PMTU
+ * blackhole detection, do not change the MSS
+ */
+ if (tp->t_flags & TF_BLACKHOLE)
+ return;
+ if (ifp->if_link_status == NULL)
+ return;
+ tcp_update_mss_core(tp, ifp);
+ }
+}
+
+void
+tcp_itimer(struct inpcbinfo *ipi)
+{
+ struct inpcb *inp, *nxt;
+
+ if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) {
+ if (tcp_itimer_done == TRUE) {
+ tcp_itimer_done = FALSE;
+ atomic_add_32(&ipi->ipi_timer_req.intimer_fast, 1);
+ return;
+ }
+ /* Upgrade failed, lost lock now take it again exclusive */
+ lck_rw_lock_exclusive(ipi->ipi_lock);
+ }
+ tcp_itimer_done = TRUE;
+
+ LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
+ struct socket *so;
+
+ if (inp->inp_ppcb == NULL ||
+ in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING)
+ continue;
+ so = inp->inp_socket;
+ tcp_lock(so, 1, 0);
+ if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
+ tcp_unlock(so, 1, 0);
+ continue;
+ }
+ so_check_extended_bk_idle_time(so);
+ if (ipi->ipi_flags & INPCBINFO_UPDATE_MSS) {
+ tcp_update_mss_locked(so, NULL);
+ }
+ tcp_unlock(so, 1, 0);
+ }
+
+ ipi->ipi_flags &= ~INPCBINFO_UPDATE_MSS;
+ lck_rw_done(ipi->ipi_lock);
+}