+
+/* Remove a timer entry from timer list */
+void
+tcp_remove_timer(struct tcpcb *tp)
+{
+ struct tcptimerlist *listp = &tcp_timer_list;
+
+ lck_mtx_assert(&tp->t_inpcb->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
+ if (!(TIMER_IS_ON_LIST(tp))) {
+ return;
+ }
+ lck_mtx_lock(listp->mtx);
+
+ /* Check if pcb is on timer list again after acquiring the lock */
+ if (!(TIMER_IS_ON_LIST(tp))) {
+ lck_mtx_unlock(listp->mtx);
+ return;
+ }
+
+ if (listp->next_te != NULL && listp->next_te == &tp->tentry)
+ listp->next_te = LIST_NEXT(&tp->tentry, le);
+
+ LIST_REMOVE(&tp->tentry, le);
+ tp->t_flags &= ~(TF_TIMER_ONLIST);
+
+ listp->entries--;
+
+ tp->tentry.le.le_next = NULL;
+ tp->tentry.le.le_prev = NULL;
+ lck_mtx_unlock(listp->mtx);
+}
+
+/*
+ * Function to check if the timerlist needs to be rescheduled to run
+ * the timer entry correctly. Basically, this is to check if we can avoid
+ * taking the list lock.
+ */
+
+static boolean_t
+need_to_resched_timerlist(u_int32_t runtime, u_int16_t mode)
+{
+ struct tcptimerlist *listp = &tcp_timer_list;
+ int32_t diff;
+
+ /*
+ * If the list is being processed then the state of the list is
+ * in flux. In this case always acquire the lock and set the state
+ * correctly.
+ */
+ if (listp->running)
+ return (TRUE);
+
+ if (!listp->scheduled)
+ return (TRUE);
+
+ diff = timer_diff(listp->runtime, 0, runtime, 0);
+ if (diff <= 0) {
+ /* The list is going to run before this timer */
+ return (FALSE);
+ } else {
+ if (mode & TCP_TIMERLIST_10MS_MODE) {
+ if (diff <= TCP_TIMER_10MS_QUANTUM)
+ return (FALSE);
+ } else if (mode & TCP_TIMERLIST_100MS_MODE) {
+ if (diff <= TCP_TIMER_100MS_QUANTUM)
+ return (FALSE);
+ } else {
+ if (diff <= TCP_TIMER_500MS_QUANTUM)
+ return (FALSE);
+ }
+ }
+ return (TRUE);
+}
+
+void
+tcp_sched_timerlist(uint32_t offset)
+{
+ uint64_t deadline = 0;
+ struct tcptimerlist *listp = &tcp_timer_list;
+
+ lck_mtx_assert(listp->mtx, LCK_MTX_ASSERT_OWNED);
+
+ offset = min(offset, TCP_TIMERLIST_MAX_OFFSET);
+ listp->runtime = tcp_now + offset;
+ if (listp->runtime == 0) {
+ listp->runtime++;
+ offset++;
+ }
+
+ clock_interval_to_deadline(offset, USEC_PER_SEC, &deadline);
+
+ thread_call_enter_delayed(listp->call, deadline);
+ listp->scheduled = TRUE;
+}
+
+/*
+ * Function to run the timers for a connection.
+ *
+ * Returns the offset of next timer to be run for this connection which
+ * can be used to reschedule the timerlist.
+ *
+ * te_mode is an out parameter that indicates the modes of active
+ * timers for this connection.
+ */
+u_int32_t
+tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode,
+ u_int16_t probe_if_index)
+{
+ struct socket *so;
+ u_int16_t i = 0, index = TCPT_NONE, lo_index = TCPT_NONE;
+ u_int32_t timer_val, offset = 0, lo_timer = 0;
+ int32_t diff;
+ boolean_t needtorun[TCPT_NTIMERS];
+ int count = 0;
+
+ VERIFY(tp != NULL);
+ bzero(needtorun, sizeof(needtorun));
+ *te_mode = 0;
+
+ tcp_lock(tp->t_inpcb->inp_socket, 1, 0);
+
+ so = tp->t_inpcb->inp_socket;
+ /* Release the want count on inp */
+ if (in_pcb_checkstate(tp->t_inpcb, WNT_RELEASE, 1)
+ == WNT_STOPUSING) {
+ if (TIMER_IS_ON_LIST(tp)) {
+ tcp_remove_timer(tp);
+ }
+
+ /* Looks like the TCP connection got closed while we
+ * were waiting for the lock.. Done
+ */
+ goto done;
+ }
+
+ /*
+ * If this connection is over an interface that needs to
+ * be probed, send probe packets to reinitiate communication.
+ */
+ if (probe_if_index > 0 && tp->t_inpcb->inp_last_outifp != NULL &&
+ tp->t_inpcb->inp_last_outifp->if_index == probe_if_index) {
+ tp->t_flagsext |= TF_PROBING;
+ tcp_timers(tp, TCPT_PTO);
+ tp->t_timer[TCPT_PTO] = 0;
+ tp->t_flagsext &= ~TF_PROBING;
+ }
+
+ /*
+ * Since the timer thread needs to wait for tcp lock, it may race
+ * with another thread that can cancel or reschedule the timer
+ * that is about to run. Check if we need to run anything.
+ */
+ if ((index = tp->tentry.index) == TCPT_NONE)
+ goto done;
+
+ timer_val = tp->t_timer[index];
+
+ diff = timer_diff(tp->tentry.runtime, 0, tcp_now, 0);
+ if (diff > 0) {
+ if (tp->tentry.index != TCPT_NONE) {
+ offset = diff;
+ *(te_mode) = tp->tentry.mode;
+ }
+ goto done;
+ }
+
+ tp->t_timer[index] = 0;
+ if (timer_val > 0) {
+ tp = tcp_timers(tp, index);
+ if (tp == NULL)
+ goto done;
+ }
+
+ /*
+ * Check if there are any other timers that need to be run.
+ * While doing it, adjust the timer values wrt tcp_now.
+ */
+ tp->tentry.mode = 0;
+ for (i = 0; i < TCPT_NTIMERS; ++i) {
+ if (tp->t_timer[i] != 0) {
+ diff = timer_diff(tp->tentry.timer_start,
+ tp->t_timer[i], tcp_now, 0);
+ if (diff <= 0) {
+ needtorun[i] = TRUE;
+ count++;
+ } else {
+ tp->t_timer[i] = diff;
+ needtorun[i] = FALSE;
+ if (lo_timer == 0 || diff < lo_timer) {
+ lo_timer = diff;
+ lo_index = i;
+ }
+ TCP_SET_TIMER_MODE(tp->tentry.mode, i);
+ }
+ }
+ }
+
+ tp->tentry.timer_start = tcp_now;
+ tp->tentry.index = lo_index;
+ VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
+
+ if (tp->tentry.index != TCPT_NONE) {
+ tp->tentry.runtime = tp->tentry.timer_start +
+ tp->t_timer[tp->tentry.index];
+ if (tp->tentry.runtime == 0)
+ tp->tentry.runtime++;
+ }
+
+ if (count > 0) {
+ /* run any other timers outstanding at this time. */
+ for (i = 0; i < TCPT_NTIMERS; ++i) {
+ if (needtorun[i]) {
+ tp->t_timer[i] = 0;
+ tp = tcp_timers(tp, i);
+ if (tp == NULL) {
+ offset = 0;
+ *(te_mode) = 0;
+ goto done;
+ }
+ }
+ }
+ tcp_set_lotimer_index(tp);
+ }
+
+ if (tp->tentry.index < TCPT_NONE) {
+ offset = tp->t_timer[tp->tentry.index];
+ *(te_mode) = tp->tentry.mode;
+ }
+
+done:
+ if (tp != NULL && tp->tentry.index == TCPT_NONE) {
+ tcp_remove_timer(tp);
+ offset = 0;
+ }
+
+ tcp_unlock(so, 1, 0);
+ return(offset);
+}
+
+void
+tcp_run_timerlist(void * arg1, void * arg2)
+{
+#pragma unused(arg1, arg2)
+ struct tcptimerentry *te, *next_te;
+ struct tcptimerlist *listp = &tcp_timer_list;
+ struct tcpcb *tp;
+ uint32_t next_timer = 0; /* offset of the next timer on the list */
+ u_int16_t te_mode = 0; /* modes of all active timers in a tcpcb */
+ u_int16_t list_mode = 0; /* cumulative of modes of all tcpcbs */
+ uint32_t active_count = 0;
+
+ calculate_tcp_clock();
+
+ lck_mtx_lock(listp->mtx);
+
+ listp->running = TRUE;
+
+ LIST_FOREACH_SAFE(te, &listp->lhead, le, next_te) {
+ uint32_t offset = 0;
+ uint32_t runtime = te->runtime;
+ if (te->index < TCPT_NONE && TSTMP_GT(runtime, tcp_now)) {
+ offset = timer_diff(runtime, 0, tcp_now, 0);
+ if (next_timer == 0 || offset < next_timer) {
+ next_timer = offset;
+ }
+ list_mode |= te->mode;
+ continue;
+ }
+
+ tp = TIMERENTRY_TO_TP(te);
+
+ /*
+ * Acquire an inp wantcnt on the inpcb so that the socket
+ * won't get detached even if tcp_close is called
+ */
+ if (in_pcb_checkstate(tp->t_inpcb, WNT_ACQUIRE, 0)
+ == WNT_STOPUSING) {
+ /*
+ * Some how this pcb went into dead state while
+ * on the timer list, just take it off the list.
+ * Since the timer list entry pointers are
+ * protected by the timer list lock, we can
+ * do it here without the socket lock.
+ */
+ if (TIMER_IS_ON_LIST(tp)) {
+ tp->t_flags &= ~(TF_TIMER_ONLIST);
+ LIST_REMOVE(&tp->tentry, le);
+ listp->entries--;
+
+ tp->tentry.le.le_next = NULL;
+ tp->tentry.le.le_prev = NULL;
+ }
+ continue;
+ }
+ active_count++;
+
+ /*
+ * Store the next timerentry pointer before releasing the
+ * list lock. If that entry has to be removed when we
+ * release the lock, this pointer will be updated to the
+ * element after that.
+ */
+ listp->next_te = next_te;
+
+ VERIFY_NEXT_LINK(&tp->tentry, le);
+ VERIFY_PREV_LINK(&tp->tentry, le);
+
+ lck_mtx_unlock(listp->mtx);
+
+ offset = tcp_run_conn_timer(tp, &te_mode,
+ listp->probe_if_index);
+
+ lck_mtx_lock(listp->mtx);
+
+ next_te = listp->next_te;
+ listp->next_te = NULL;
+
+ if (offset > 0 && te_mode != 0) {
+ list_mode |= te_mode;
+
+ if (next_timer == 0 || offset < next_timer)
+ next_timer = offset;
+ }
+ }
+
+ if (!LIST_EMPTY(&listp->lhead)) {
+ u_int16_t next_mode = 0;
+ if ((list_mode & TCP_TIMERLIST_10MS_MODE) ||
+ (listp->pref_mode & TCP_TIMERLIST_10MS_MODE))
+ next_mode = TCP_TIMERLIST_10MS_MODE;
+ else if ((list_mode & TCP_TIMERLIST_100MS_MODE) ||
+ (listp->pref_mode & TCP_TIMERLIST_100MS_MODE))
+ next_mode = TCP_TIMERLIST_100MS_MODE;
+ else
+ next_mode = TCP_TIMERLIST_500MS_MODE;
+
+ if (next_mode != TCP_TIMERLIST_500MS_MODE) {
+ listp->idleruns = 0;
+ } else {
+ /*
+ * the next required mode is slow mode, but if
+ * the last one was a faster mode and we did not
+ * have enough idle runs, repeat the last mode.
+ *
+ * We try to keep the timer list in fast mode for
+ * some idle time in expectation of new data.
+ */
+ if (listp->mode != next_mode &&
+ listp->idleruns < timer_fastmode_idlemax) {
+ listp->idleruns++;
+ next_mode = listp->mode;
+ next_timer = TCP_TIMER_100MS_QUANTUM;
+ } else {
+ listp->idleruns = 0;
+ }
+ }
+ listp->mode = next_mode;
+ if (listp->pref_offset != 0)
+ next_timer = min(listp->pref_offset, next_timer);
+
+ if (listp->mode == TCP_TIMERLIST_500MS_MODE)
+ next_timer = max(next_timer,
+ TCP_TIMER_500MS_QUANTUM);
+
+ tcp_sched_timerlist(next_timer);
+ } else {
+ /*
+ * No need to reschedule this timer, but always run
+ * periodically at a much higher granularity.
+ */
+ tcp_sched_timerlist(TCP_TIMERLIST_MAX_OFFSET);
+ }
+
+ listp->running = FALSE;
+ listp->pref_mode = 0;
+ listp->pref_offset = 0;
+ listp->probe_if_index = 0;
+
+ lck_mtx_unlock(listp->mtx);
+}
+
+/*
+ * Function to check if the timerlist needs to be rescheduled to run this
+ * connection's timers correctly.
+ */
+void
+tcp_sched_timers(struct tcpcb *tp)
+{
+ struct tcptimerentry *te = &tp->tentry;
+ u_int16_t index = te->index;
+ u_int16_t mode = te->mode;
+ struct tcptimerlist *listp = &tcp_timer_list;
+ int32_t offset = 0;
+ boolean_t list_locked = FALSE;
+
+ if (tp->t_inpcb->inp_state == INPCB_STATE_DEAD) {
+ /* Just return without adding the dead pcb to the list */
+ if (TIMER_IS_ON_LIST(tp)) {
+ tcp_remove_timer(tp);
+ }
+ return;
+ }
+
+ if (index == TCPT_NONE) {
+ /* Nothing to run */
+ tcp_remove_timer(tp);
+ return;
+ }
+
+ /*
+ * compute the offset at which the next timer for this connection
+ * has to run.
+ */
+ offset = timer_diff(te->runtime, 0, tcp_now, 0);
+ if (offset <= 0) {
+ offset = 1;
+ tcp_timer_advanced++;
+ }
+
+ if (!TIMER_IS_ON_LIST(tp)) {
+ if (!list_locked) {
+ lck_mtx_lock(listp->mtx);
+ list_locked = TRUE;
+ }
+
+ LIST_INSERT_HEAD(&listp->lhead, te, le);
+ tp->t_flags |= TF_TIMER_ONLIST;
+
+ listp->entries++;
+ if (listp->entries > listp->maxentries)
+ listp->maxentries = listp->entries;
+
+ /* if the list is not scheduled, just schedule it */
+ if (!listp->scheduled)
+ goto schedule;
+ }
+
+
+ /*
+ * Timer entry is currently on the list, check if the list needs
+ * to be rescheduled.
+ */
+ if (need_to_resched_timerlist(te->runtime, mode)) {
+ tcp_resched_timerlist++;
+
+ if (!list_locked) {
+ lck_mtx_lock(listp->mtx);
+ list_locked = TRUE;
+ }
+
+ VERIFY_NEXT_LINK(te, le);
+ VERIFY_PREV_LINK(te, le);
+
+ if (listp->running) {
+ listp->pref_mode |= mode;
+ if (listp->pref_offset == 0 ||
+ offset < listp->pref_offset) {
+ listp->pref_offset = offset;
+ }
+ } else {
+ /*
+ * The list could have got rescheduled while
+ * this thread was waiting for the lock
+ */
+ if (listp->scheduled) {
+ int32_t diff;
+ diff = timer_diff(listp->runtime, 0,
+ tcp_now, offset);
+ if (diff <= 0)
+ goto done;
+ else
+ goto schedule;
+ } else {
+ goto schedule;
+ }
+ }
+ }
+ goto done;
+
+schedule:
+ /*
+ * Since a connection with timers is getting scheduled, the timer
+ * list moves from idle to active state and that is why idlegen is
+ * reset
+ */
+ if (mode & TCP_TIMERLIST_10MS_MODE) {
+ listp->mode = TCP_TIMERLIST_10MS_MODE;
+ listp->idleruns = 0;
+ offset = min(offset, TCP_TIMER_10MS_QUANTUM);
+ } else if (mode & TCP_TIMERLIST_100MS_MODE) {
+ if (listp->mode > TCP_TIMERLIST_100MS_MODE)
+ listp->mode = TCP_TIMERLIST_100MS_MODE;
+ listp->idleruns = 0;
+ offset = min(offset, TCP_TIMER_100MS_QUANTUM);
+ }
+ tcp_sched_timerlist(offset);
+
+done:
+ if (list_locked)
+ lck_mtx_unlock(listp->mtx);
+
+ return;
+}
+
+static inline void
+tcp_set_lotimer_index(struct tcpcb *tp)
+{
+ uint16_t i, lo_index = TCPT_NONE, mode = 0;
+ uint32_t lo_timer = 0;
+ for (i = 0; i < TCPT_NTIMERS; ++i) {
+ if (tp->t_timer[i] != 0) {
+ TCP_SET_TIMER_MODE(mode, i);
+ if (lo_timer == 0 || tp->t_timer[i] < lo_timer) {
+ lo_timer = tp->t_timer[i];
+ lo_index = i;
+ }
+ }
+ }
+ tp->tentry.index = lo_index;
+ tp->tentry.mode = mode;
+ VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
+
+ if (tp->tentry.index != TCPT_NONE) {
+ tp->tentry.runtime = tp->tentry.timer_start
+ + tp->t_timer[tp->tentry.index];
+ if (tp->tentry.runtime == 0)
+ tp->tentry.runtime++;
+ }
+}
+
+void
+tcp_check_timer_state(struct tcpcb *tp)
+{
+ lck_mtx_assert(&tp->t_inpcb->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
+
+ if (tp->t_inpcb->inp_flags2 & INP2_TIMEWAIT)
+ return;
+
+ tcp_set_lotimer_index(tp);
+
+ tcp_sched_timers(tp);
+ return;
+}
+
+static inline void
+tcp_cumulative_stat(u_int32_t cur, u_int32_t *prev, u_int32_t *dest)
+{
+ /* handle wrap around */
+ int32_t diff = (int32_t) (cur - *prev);
+ if (diff > 0)
+ *dest = diff;
+ else
+ *dest = 0;
+ *prev = cur;
+ return;
+}
+
+__private_extern__ void
+tcp_report_stats(void)
+{
+ struct nstat_sysinfo_data data;
+ struct sockaddr_in dst;
+ struct sockaddr_in6 dst6;
+ struct rtentry *rt = NULL;
+ static struct tcp_last_report_stats prev;
+ u_int64_t var, uptime;
+
+#define stat data.u.tcp_stats
+ if (((uptime = net_uptime()) - tcp_last_report_time) <
+ tcp_report_stats_interval)
+ return;
+
+ tcp_last_report_time = uptime;
+
+ bzero(&data, sizeof(data));
+ data.flags = NSTAT_SYSINFO_TCP_STATS;
+
+ bzero(&dst, sizeof(dst));
+ dst.sin_len = sizeof(dst);
+ dst.sin_family = AF_INET;
+
+ /* ipv4 avg rtt */
+ lck_mtx_lock(rnh_lock);
+ rt = rt_lookup(TRUE, (struct sockaddr *)&dst, NULL,
+ rt_tables[AF_INET], IFSCOPE_NONE);
+ lck_mtx_unlock(rnh_lock);
+ if (rt != NULL) {
+ RT_LOCK(rt);
+ if (rt_primary_default(rt, rt_key(rt)) &&
+ rt->rt_stats != NULL) {
+ stat.ipv4_avgrtt = rt->rt_stats->nstat_avg_rtt;
+ }
+ RT_UNLOCK(rt);
+ rtfree(rt);
+ rt = NULL;
+ }
+
+ /* ipv6 avg rtt */
+ bzero(&dst6, sizeof(dst6));
+ dst6.sin6_len = sizeof(dst6);
+ dst6.sin6_family = AF_INET6;
+
+ lck_mtx_lock(rnh_lock);
+ rt = rt_lookup(TRUE,(struct sockaddr *)&dst6, NULL,
+ rt_tables[AF_INET6], IFSCOPE_NONE);
+ lck_mtx_unlock(rnh_lock);
+ if (rt != NULL) {
+ RT_LOCK(rt);
+ if (rt_primary_default(rt, rt_key(rt)) &&
+ rt->rt_stats != NULL) {
+ stat.ipv6_avgrtt = rt->rt_stats->nstat_avg_rtt;
+ }
+ RT_UNLOCK(rt);
+ rtfree(rt);
+ rt = NULL;
+ }
+
+ /* send packet loss rate, shift by 10 for precision */
+ if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_sndrexmitpack > 0) {
+ var = tcpstat.tcps_sndrexmitpack << 10;
+ stat.send_plr = (var * 100) / tcpstat.tcps_sndpack;
+ }
+
+ /* recv packet loss rate, shift by 10 for precision */
+ if (tcpstat.tcps_rcvpack > 0 && tcpstat.tcps_recovered_pkts > 0) {
+ var = tcpstat.tcps_recovered_pkts << 10;
+ stat.recv_plr = (var * 100) / tcpstat.tcps_rcvpack;
+ }
+
+ /* RTO after tail loss, shift by 10 for precision */
+ if (tcpstat.tcps_sndrexmitpack > 0
+ && tcpstat.tcps_tailloss_rto > 0) {
+ var = tcpstat.tcps_tailloss_rto << 10;
+ stat.send_tlrto_rate =
+ (var * 100) / tcpstat.tcps_sndrexmitpack;
+ }
+
+ /* packet reordering */
+ if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_reordered_pkts > 0) {
+ var = tcpstat.tcps_reordered_pkts << 10;
+ stat.send_reorder_rate =
+ (var * 100) / tcpstat.tcps_sndpack;
+ }
+
+ if (tcp_ecn_outbound == 1)
+ stat.ecn_client_enabled = 1;
+ if (tcp_ecn_inbound == 1)
+ stat.ecn_server_enabled = 1;
+ tcp_cumulative_stat(tcpstat.tcps_connattempt,
+ &prev.tcps_connattempt, &stat.connection_attempts);
+ tcp_cumulative_stat(tcpstat.tcps_accepts,
+ &prev.tcps_accepts, &stat.connection_accepts);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_client_setup,
+ &prev.tcps_ecn_client_setup, &stat.ecn_client_setup);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_server_setup,
+ &prev.tcps_ecn_server_setup, &stat.ecn_server_setup);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_client_success,
+ &prev.tcps_ecn_client_success, &stat.ecn_client_success);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_server_success,
+ &prev.tcps_ecn_server_success, &stat.ecn_server_success);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_not_supported,
+ &prev.tcps_ecn_not_supported, &stat.ecn_not_supported);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_lost_syn,
+ &prev.tcps_ecn_lost_syn, &stat.ecn_lost_syn);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_lost_synack,
+ &prev.tcps_ecn_lost_synack, &stat.ecn_lost_synack);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ce,
+ &prev.tcps_ecn_recv_ce, &stat.ecn_recv_ce);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
+ &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
+ &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
+ &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
+ &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ce,
+ &prev.tcps_ecn_conn_recv_ce, &stat.ecn_conn_recv_ce);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ece,
+ &prev.tcps_ecn_conn_recv_ece, &stat.ecn_conn_recv_ece);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_conn_plnoce,
+ &prev.tcps_ecn_conn_plnoce, &stat.ecn_conn_plnoce);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_conn_pl_ce,
+ &prev.tcps_ecn_conn_pl_ce, &stat.ecn_conn_pl_ce);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_conn_nopl_ce,
+ &prev.tcps_ecn_conn_nopl_ce, &stat.ecn_conn_nopl_ce);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_synloss,
+ &prev.tcps_ecn_fallback_synloss, &stat.ecn_fallback_synloss);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_reorder,
+ &prev.tcps_ecn_fallback_reorder, &stat.ecn_fallback_reorder);
+ tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_ce,
+ &prev.tcps_ecn_fallback_ce, &stat.ecn_fallback_ce);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_rcv,
+ &prev.tcps_tfo_syn_data_rcv, &stat.tfo_syn_data_rcv);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req_rcv,
+ &prev.tcps_tfo_cookie_req_rcv, &stat.tfo_cookie_req_rcv);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_sent,
+ &prev.tcps_tfo_cookie_sent, &stat.tfo_cookie_sent);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_invalid,
+ &prev.tcps_tfo_cookie_invalid, &stat.tfo_cookie_invalid);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req,
+ &prev.tcps_tfo_cookie_req, &stat.tfo_cookie_req);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_rcv,
+ &prev.tcps_tfo_cookie_rcv, &stat.tfo_cookie_rcv);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_sent,
+ &prev.tcps_tfo_syn_data_sent, &stat.tfo_syn_data_sent);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_acked,
+ &prev.tcps_tfo_syn_data_acked, &stat.tfo_syn_data_acked);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_syn_loss,
+ &prev.tcps_tfo_syn_loss, &stat.tfo_syn_loss);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_blackhole,
+ &prev.tcps_tfo_blackhole, &stat.tfo_blackhole);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_wrong,
+ &prev.tcps_tfo_cookie_wrong, &stat.tfo_cookie_wrong);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_no_cookie_rcv,
+ &prev.tcps_tfo_no_cookie_rcv, &stat.tfo_no_cookie_rcv);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_heuristics_disable,
+ &prev.tcps_tfo_heuristics_disable, &stat.tfo_heuristics_disable);
+ tcp_cumulative_stat(tcpstat.tcps_tfo_sndblackhole,
+ &prev.tcps_tfo_sndblackhole, &stat.tfo_sndblackhole);
+
+
+
+
+ nstat_sysinfo_send_data(&data);
+
+#undef stat
+}
+
+void
+tcp_interface_send_probe(u_int16_t probe_if_index)
+{
+ int32_t offset = 0;
+ struct tcptimerlist *listp = &tcp_timer_list;
+
+ /* Make sure TCP clock is up to date */
+ calculate_tcp_clock();
+
+ lck_mtx_lock(listp->mtx);
+ if (listp->probe_if_index > 0) {
+ tcpstat.tcps_probe_if_conflict++;
+ goto done;
+ }
+
+ listp->probe_if_index = probe_if_index;
+ if (listp->running)
+ goto done;
+
+ /*
+ * Reschedule the timerlist to run within the next 10ms, which is
+ * the fastest that we can do.
+ */
+ offset = TCP_TIMER_10MS_QUANTUM;
+ if (listp->scheduled) {
+ int32_t diff;
+ diff = timer_diff(listp->runtime, 0, tcp_now, offset);
+ if (diff <= 0) {
+ /* The timer will fire sooner than what's needed */
+ goto done;
+ }
+ }
+ listp->mode = TCP_TIMERLIST_10MS_MODE;
+ listp->idleruns = 0;
+
+ tcp_sched_timerlist(offset);
+
+done:
+ lck_mtx_unlock(listp->mtx);
+ return;
+}
+
+/*
+ * Enable read probes on this connection, if:
+ * - it is in established state
+ * - doesn't have any data outstanding
+ * - the outgoing ifp matches
+ * - we have not already sent any read probes
+ */
+static void
+tcp_enable_read_probe(struct tcpcb *tp, struct ifnet *ifp)
+{
+ if (tp->t_state == TCPS_ESTABLISHED &&
+ tp->snd_max == tp->snd_una &&
+ tp->t_inpcb->inp_last_outifp == ifp &&
+ !(tp->t_flagsext & TF_DETECT_READSTALL) &&
+ tp->t_rtimo_probes == 0) {
+ tp->t_flagsext |= TF_DETECT_READSTALL;
+ tp->t_rtimo_probes = 0;
+ tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
+ TCP_TIMER_10MS_QUANTUM);
+ if (tp->tentry.index == TCPT_NONE) {
+ tp->tentry.index = TCPT_KEEP;
+ tp->tentry.runtime = tcp_now +
+ TCP_TIMER_10MS_QUANTUM;
+ } else {
+ int32_t diff = 0;
+
+ /* Reset runtime to be in next 10ms */
+ diff = timer_diff(tp->tentry.runtime, 0,
+ tcp_now, TCP_TIMER_10MS_QUANTUM);
+ if (diff > 0) {
+ tp->tentry.index = TCPT_KEEP;
+ tp->tentry.runtime = tcp_now +
+ TCP_TIMER_10MS_QUANTUM;
+ if (tp->tentry.runtime == 0)
+ tp->tentry.runtime++;
+ }
+ }
+ }
+}
+
+/*
+ * Disable read probe and reset the keep alive timer
+ */
+static void
+tcp_disable_read_probe(struct tcpcb *tp)
+{
+ if (tp->t_adaptive_rtimo == 0 &&
+ ((tp->t_flagsext & TF_DETECT_READSTALL) ||
+ tp->t_rtimo_probes > 0)) {
+ tcp_keepalive_reset(tp);
+ }
+}
+
+/*
+ * Reschedule the tcp timerlist in the next 10ms to re-enable read/write
+ * probes on connections going over a particular interface.
+ */
+void
+tcp_probe_connectivity(struct ifnet *ifp, u_int32_t enable)
+{
+ int32_t offset;
+ struct tcptimerlist *listp = &tcp_timer_list;
+ struct inpcbinfo *pcbinfo = &tcbinfo;
+ struct inpcb *inp, *nxt;
+
+ if (ifp == NULL)
+ return;
+
+ /* update clock */
+ calculate_tcp_clock();
+
+ /*
+ * Enable keep alive timer on all connections that are
+ * active/established on this interface.
+ */
+ lck_rw_lock_shared(pcbinfo->ipi_lock);
+
+ LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, nxt) {
+ struct tcpcb *tp = NULL;
+ if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) ==
+ WNT_STOPUSING)
+ continue;
+
+ /* Acquire lock to look at the state of the connection */
+ tcp_lock(inp->inp_socket, 1, 0);
+
+ /* Release the want count */
+ if (inp->inp_ppcb == NULL ||
+ (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) {
+ tcp_unlock(inp->inp_socket, 1, 0);
+ continue;
+ }
+ tp = intotcpcb(inp);
+ if (enable)
+ tcp_enable_read_probe(tp, ifp);
+ else
+ tcp_disable_read_probe(tp);
+
+ tcp_unlock(inp->inp_socket, 1, 0);
+ }
+ lck_rw_done(pcbinfo->ipi_lock);
+
+ lck_mtx_lock(listp->mtx);
+ if (listp->running) {
+ listp->pref_mode |= TCP_TIMERLIST_10MS_MODE;
+ goto done;
+ }
+
+ /* Reschedule within the next 10ms */
+ offset = TCP_TIMER_10MS_QUANTUM;
+ if (listp->scheduled) {
+ int32_t diff;
+ diff = timer_diff(listp->runtime, 0, tcp_now, offset);
+ if (diff <= 0) {
+ /* The timer will fire sooner than what's needed */
+ goto done;
+ }
+ }
+ listp->mode = TCP_TIMERLIST_10MS_MODE;
+ listp->idleruns = 0;
+
+ tcp_sched_timerlist(offset);
+done:
+ lck_mtx_unlock(listp->mtx);
+ return;
+}
+
+inline void
+tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp)
+{
+ struct if_cellular_status_v1 *ifsr;
+ u_int32_t optlen;
+ ifsr = &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
+ if (ifsr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
+ optlen = tp->t_maxopd - tp->t_maxseg;
+
+ if (ifsr->mss_recommended ==
+ IF_CELL_UL_MSS_RECOMMENDED_NONE &&
+ tp->t_cached_maxopd > 0 &&
+ tp->t_maxopd < tp->t_cached_maxopd) {
+ tp->t_maxopd = tp->t_cached_maxopd;
+ tcpstat.tcps_mss_to_default++;
+ } else if (ifsr->mss_recommended ==
+ IF_CELL_UL_MSS_RECOMMENDED_MEDIUM &&
+ tp->t_maxopd > tcp_mss_rec_medium) {
+ tp->t_cached_maxopd = tp->t_maxopd;
+ tp->t_maxopd = tcp_mss_rec_medium;
+ tcpstat.tcps_mss_to_medium++;
+ } else if (ifsr->mss_recommended ==
+ IF_CELL_UL_MSS_RECOMMENDED_LOW &&
+ tp->t_maxopd > tcp_mss_rec_low) {
+ tp->t_cached_maxopd = tp->t_maxopd;
+ tp->t_maxopd = tcp_mss_rec_low;
+ tcpstat.tcps_mss_to_low++;
+ }
+ tp->t_maxseg = tp->t_maxopd - optlen;
+
+ /*
+ * clear the cached value if it is same as the current
+ */
+ if (tp->t_maxopd == tp->t_cached_maxopd)
+ tp->t_cached_maxopd = 0;
+ }
+}
+
+void
+tcp_update_mss_locked(struct socket *so, struct ifnet *ifp)
+{
+ struct inpcb *inp = sotoinpcb(so);
+ struct tcpcb *tp = intotcpcb(inp);
+
+ if (ifp == NULL && inp->inp_last_outifp == NULL)
+ return;
+
+ if (ifp == NULL)
+ ifp = inp->inp_last_outifp;
+
+ if (!IFNET_IS_CELLULAR(ifp)) {
+ /*
+ * This optimization is implemented for cellular
+ * networks only
+ */
+ return;
+ }
+ if ( tp->t_state <= TCPS_CLOSE_WAIT) {
+ /*
+ * If the connection is currently doing or has done PMTU
+ * blackhole detection, do not change the MSS
+ */
+ if (tp->t_flags & TF_BLACKHOLE)
+ return;
+ if (ifp->if_link_status == NULL)
+ return;
+ tcp_update_mss_core(tp, ifp);
+ }
+}
+
+void
+tcp_itimer(struct inpcbinfo *ipi)
+{
+ struct inpcb *inp, *nxt;
+
+ if (lck_rw_try_lock_exclusive(ipi->ipi_lock) == FALSE) {
+ if (tcp_itimer_done == TRUE) {
+ tcp_itimer_done = FALSE;
+ atomic_add_32(&ipi->ipi_timer_req.intimer_fast, 1);
+ return;
+ }
+ /* Upgrade failed, lost lock now take it again exclusive */
+ lck_rw_lock_exclusive(ipi->ipi_lock);
+ }
+ tcp_itimer_done = TRUE;
+
+ LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
+ struct socket *so;
+
+ if (inp->inp_ppcb == NULL ||
+ in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING)
+ continue;
+ so = inp->inp_socket;
+ tcp_lock(so, 1, 0);
+ if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
+ tcp_unlock(so, 1, 0);
+ continue;
+ }
+ so_check_extended_bk_idle_time(so);
+ if (ipi->ipi_flags & INPCBINFO_UPDATE_MSS) {
+ tcp_update_mss_locked(so, NULL);
+ }
+ tcp_unlock(so, 1, 0);
+ }
+
+ ipi->ipi_flags &= ~INPCBINFO_UPDATE_MSS;
+ lck_rw_done(ipi->ipi_lock);
+}