+
+static inline void
+tcp_cumulative_stat(u_int32_t cur, u_int32_t *prev, u_int32_t *dest)
+{
+ /* handle wrap around */
+ int32_t diff = (int32_t) (cur - *prev);
+ if (diff > 0)
+ *dest = diff;
+ else
+ *dest = 0;
+ *prev = cur;
+ return;
+}
+
+__private_extern__ void
+tcp_report_stats(void)
+{
+ struct nstat_sysinfo_data data;
+ struct sockaddr_in dst;
+ struct sockaddr_in6 dst6;
+ struct rtentry *rt = NULL;
+ static struct tcp_last_report_stats prev;
+ u_int64_t var, uptime;
+
+#define stat data.u.tcp_stats
+ if (((uptime = net_uptime()) - tcp_last_report_time) <
+ tcp_report_stats_interval)
+ return;
+
+ tcp_last_report_time = uptime;
+
+ bzero(&data, sizeof(data));
+ data.flags = NSTAT_SYSINFO_TCP_STATS;
+
+ bzero(&dst, sizeof(dst));
+ dst.sin_len = sizeof(dst);
+ dst.sin_family = AF_INET;
+
+ /* ipv4 avg rtt */
+ lck_mtx_lock(rnh_lock);
+ rt = rt_lookup(TRUE, (struct sockaddr *)&dst, NULL,
+ rt_tables[AF_INET], IFSCOPE_NONE);
+ lck_mtx_unlock(rnh_lock);
+ if (rt != NULL) {
+ RT_LOCK(rt);
+ if (rt_primary_default(rt, rt_key(rt)) &&
+ rt->rt_stats != NULL) {
+ stat.ipv4_avgrtt = rt->rt_stats->nstat_avg_rtt;
+ }
+ RT_UNLOCK(rt);
+ rtfree(rt);
+ rt = NULL;
+ }
+
+ /* ipv6 avg rtt */
+ bzero(&dst6, sizeof(dst6));
+ dst6.sin6_len = sizeof(dst6);
+ dst6.sin6_family = AF_INET6;
+
+ lck_mtx_lock(rnh_lock);
+ rt = rt_lookup(TRUE,(struct sockaddr *)&dst6, NULL,
+ rt_tables[AF_INET6], IFSCOPE_NONE);
+ lck_mtx_unlock(rnh_lock);
+ if (rt != NULL) {
+ RT_LOCK(rt);
+ if (rt_primary_default(rt, rt_key(rt)) &&
+ rt->rt_stats != NULL) {
+ stat.ipv6_avgrtt = rt->rt_stats->nstat_avg_rtt;
+ }
+ RT_UNLOCK(rt);
+ rtfree(rt);
+ rt = NULL;
+ }
+
+ /* send packet loss rate, shift by 10 for precision */
+ if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_sndrexmitpack > 0) {
+ var = tcpstat.tcps_sndrexmitpack << 10;
+ stat.send_plr = (var * 100) / tcpstat.tcps_sndpack;
+ }
+
+ /* recv packet loss rate, shift by 10 for precision */
+ if (tcpstat.tcps_rcvpack > 0 && tcpstat.tcps_recovered_pkts > 0) {
+ var = tcpstat.tcps_recovered_pkts << 10;
+ stat.recv_plr = (var * 100) / tcpstat.tcps_rcvpack;
+ }
+
+ /* RTO after tail loss, shift by 10 for precision */
+ if (tcpstat.tcps_sndrexmitpack > 0
+ && tcpstat.tcps_tailloss_rto > 0) {
+ var = tcpstat.tcps_tailloss_rto << 10;
+ stat.send_tlrto_rate =
+ (var * 100) / tcpstat.tcps_sndrexmitpack;
+ }
+
+ /* packet reordering */
+ if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_reordered_pkts > 0) {
+ var = tcpstat.tcps_reordered_pkts << 10;
+ stat.send_reorder_rate =
+ (var * 100) / tcpstat.tcps_sndpack;
+ }
+
+ if (tcp_ecn_outbound == 1)
+ stat.ecn_client_enabled = 1;
+ if (tcp_ecn_inbound == 1)
+ stat.ecn_server_enabled = 1;
+ tcp_cumulative_stat(tcpstat.tcps_connattempt,
+ &prev.tcps_connattempt, &stat.connection_attempts);
+ tcp_cumulative_stat(tcpstat.tcps_accepts,
+ &prev.tcps_accepts, &stat.connection_accepts);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_client_setup,
+ &prev.tcps_ecn_client_setup, &stat.ecn_client_setup);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_server_setup,
+ &prev.tcps_ecn_server_setup, &stat.ecn_server_setup);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_client_success,
+ &prev.tcps_ecn_client_success, &stat.ecn_client_success);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_server_success,
+ &prev.tcps_ecn_server_success, &stat.ecn_server_success);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_not_supported,
+ &prev.tcps_ecn_not_supported, &stat.ecn_not_supported);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_lost_syn,
+ &prev.tcps_ecn_lost_syn, &stat.ecn_lost_syn);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_lost_synack,
+ &prev.tcps_ecn_lost_synack, &stat.ecn_lost_synack);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ce,
+ &prev.tcps_ecn_recv_ce, &stat.ecn_recv_ce);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
+ &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
+ &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
+ &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
+ &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ce,
+ &prev.tcps_ecn_conn_recv_ce, &stat.ecn_conn_recv_ce);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ece,
+ &prev.tcps_ecn_conn_recv_ece, &stat.ecn_conn_recv_ece);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_conn_plnoce,
+ &prev.tcps_ecn_conn_plnoce, &stat.ecn_conn_plnoce);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_conn_pl_ce,
+ &prev.tcps_ecn_conn_pl_ce, &stat.ecn_conn_pl_ce);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_conn_nopl_ce,
+ &prev.tcps_ecn_conn_nopl_ce, &stat.ecn_conn_nopl_ce);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_rcv,
+ &prev.tcps_tfo_syn_data_rcv, &stat.tfo_syn_data_rcv);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req_rcv,
+ &prev.tcps_tfo_cookie_req_rcv, &stat.tfo_cookie_req_rcv);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_sent,
+ &prev.tcps_tfo_cookie_sent, &stat.tfo_cookie_sent);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_invalid,
+ &prev.tcps_tfo_cookie_invalid, &stat.tfo_cookie_invalid);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req,
+ &prev.tcps_tfo_cookie_req, &stat.tfo_cookie_req);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_rcv,
+ &prev.tcps_tfo_cookie_rcv, &stat.tfo_cookie_rcv);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_sent,
+ &prev.tcps_tfo_syn_data_sent, &stat.tfo_syn_data_sent);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_acked,
+ &prev.tcps_tfo_syn_data_acked, &stat.tfo_syn_data_acked);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_syn_loss,
+ &prev.tcps_tfo_syn_loss, &stat.tfo_syn_loss);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_blackhole,
+ &prev.tcps_tfo_blackhole, &stat.tfo_blackhole);
+
+ nstat_sysinfo_send_data(&data);
+
+#undef stat
+}
+
+void
+tcp_interface_send_probe(u_int16_t probe_if_index)
+{
+ int32_t offset = 0;
+ struct tcptimerlist *listp = &tcp_timer_list;
+
+ /* Make sure TCP clock is up to date */
+ calculate_tcp_clock();
+
+ lck_mtx_lock(listp->mtx);
+ if (listp->probe_if_index > 0) {
+ tcpstat.tcps_probe_if_conflict++;
+ goto done;
+ }
+
+ listp->probe_if_index = probe_if_index;
+ if (listp->running)
+ goto done;
+
+ /*
+ * Reschedule the timerlist to run within the next 10ms, which is
+ * the fastest that we can do.
+ */
+ offset = TCP_TIMER_10MS_QUANTUM;
+ if (listp->scheduled) {
+ int32_t diff;
+ diff = timer_diff(listp->runtime, 0, tcp_now, offset);
+ if (diff <= 0) {
+ /* The timer will fire sooner than what's needed */
+ goto done;
+ }
+ }
+ listp->mode = TCP_TIMERLIST_10MS_MODE;
+ listp->idleruns = 0;
+
+ tcp_sched_timerlist(offset);
+
+done:
+ lck_mtx_unlock(listp->mtx);
+ return;
+}
+
+/*
+ * Enable read probes on this connection, if:
+ * - it is in established state
+ * - doesn't have any data outstanding
+ * - the outgoing ifp matches
+ * - we have not already sent any read probes
+ */
+static void
+tcp_enable_read_probe(struct tcpcb *tp, struct ifnet *ifp)
+{
+ if (tp->t_state == TCPS_ESTABLISHED &&
+ tp->snd_max == tp->snd_una &&
+ tp->t_inpcb->inp_last_outifp == ifp &&
+ !(tp->t_flagsext & TF_DETECT_READSTALL) &&
+ tp->t_rtimo_probes == 0) {
+ tp->t_flagsext |= TF_DETECT_READSTALL;
+ tp->t_rtimo_probes = 0;
+ tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
+ TCP_TIMER_10MS_QUANTUM);
+ if (tp->tentry.index == TCPT_NONE) {
+ tp->tentry.index = TCPT_KEEP;
+ tp->tentry.runtime = tcp_now +
+ TCP_TIMER_10MS_QUANTUM;
+ } else {
+ int32_t diff = 0;
+
+ /* Reset runtime to be in next 10ms */
+ diff = timer_diff(tp->tentry.runtime, 0,
+ tcp_now, TCP_TIMER_10MS_QUANTUM);
+ if (diff > 0) {
+ tp->tentry.index = TCPT_KEEP;
+ tp->tentry.runtime = tcp_now +
+ TCP_TIMER_10MS_QUANTUM;
+ if (tp->tentry.runtime == 0)
+ tp->tentry.runtime++;
+ }
+ }
+ }
+}
+
+/*
+ * Disable read probe and reset the keep alive timer
+ */
+static void
+tcp_disable_read_probe(struct tcpcb *tp)
+{
+ if (tp->t_adaptive_rtimo == 0 &&
+ ((tp->t_flagsext & TF_DETECT_READSTALL) ||
+ tp->t_rtimo_probes > 0)) {
+ tcp_keepalive_reset(tp);
+ }
+}
+
+/*
+ * Reschedule the tcp timerlist in the next 10ms to re-enable read/write
+ * probes on connections going over a particular interface.
+ */
+void
+tcp_probe_connectivity(struct ifnet *ifp, u_int32_t enable)
+{
+ int32_t offset;
+ struct tcptimerlist *listp = &tcp_timer_list;
+ struct inpcbinfo *pcbinfo = &tcbinfo;
+ struct inpcb *inp, *nxt;
+
+ if (ifp == NULL)
+ return;
+
+ /* update clock */
+ calculate_tcp_clock();
+
+ /*
+ * Enable keep alive timer on all connections that are
+ * active/established on this interface.
+ */
+ lck_rw_lock_shared(pcbinfo->ipi_lock);
+
+ LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, nxt) {
+ struct tcpcb *tp = NULL;
+ if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) ==
+ WNT_STOPUSING)
+ continue;
+
+ /* Acquire lock to look at the state of the connection */
+ tcp_lock(inp->inp_socket, 1, 0);
+
+ /* Release the want count */
+ if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
+ tcp_unlock(inp->inp_socket, 1, 0);
+ continue;
+ }
+
+ tp = intotcpcb(inp);
+ if (enable)
+ tcp_enable_read_probe(tp, ifp);
+ else
+ tcp_disable_read_probe(tp);
+
+ tcp_unlock(inp->inp_socket, 1, 0);
+ }
+ lck_rw_done(pcbinfo->ipi_lock);
+
+ lck_mtx_lock(listp->mtx);
+ if (listp->running) {
+ listp->pref_mode |= TCP_TIMERLIST_10MS_MODE;
+ goto done;
+ }
+
+ /* Reschedule within the next 10ms */
+ offset = TCP_TIMER_10MS_QUANTUM;
+ if (listp->scheduled) {
+ int32_t diff;
+ diff = timer_diff(listp->runtime, 0, tcp_now, offset);
+ if (diff <= 0) {
+ /* The timer will fire sooner than what's needed */
+ goto done;
+ }
+ }
+ listp->mode = TCP_TIMERLIST_10MS_MODE;
+ listp->idleruns = 0;
+
+ tcp_sched_timerlist(offset);
+done:
+ lck_mtx_unlock(listp->mtx);
+ return;
+}
+
+void
+tcp_itimer(struct inpcbinfo *ipi)
+{
+ struct inpcb *inp, *nxt;
+
+ if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) {
+ if (tcp_itimer_done == TRUE) {
+ tcp_itimer_done = FALSE;
+ atomic_add_32(&ipi->ipi_timer_req.intimer_fast, 1);
+ return;
+ }
+ /* Upgrade failed, lost lock now take it again exclusive */
+ lck_rw_lock_exclusive(ipi->ipi_lock);
+ }
+ tcp_itimer_done = TRUE;
+
+ LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
+ struct socket *so;
+
+ if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING)
+ continue;
+ so = inp->inp_socket;
+ tcp_lock(so, 1, 0);
+ if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
+ tcp_unlock(so, 1, 0);
+ continue;
+ }
+ so_check_extended_bk_idle_time(so);
+ tcp_unlock(so, 1, 0);
+ }
+
+ lck_rw_done(ipi->ipi_lock);
+}
+