+
+#define IFP_PER_FLOW_STAT(_ipv4_, _stat_) { \
+ if (_ipv4_) { \
+ ifp->if_ipv4_stat->_stat_++; \
+ } else { \
+ ifp->if_ipv6_stat->_stat_++; \
+ } \
+}
+
+#define FLOW_ECN_ENABLED(_flags_) \
+ ((_flags_ & (TE_ECN_ON)) == (TE_ECN_ON))
+
+void tcp_update_stats_per_flow(struct ifnet_stats_per_flow *ifs,
+ struct ifnet *ifp)
+{
+ if (ifp == NULL || !IF_FULLY_ATTACHED(ifp))
+ return;
+
+ ifnet_lock_shared(ifp);
+ if (ifs->ecn_flags & TE_SETUPSENT) {
+ if (ifs->ecn_flags & TE_CLIENT_SETUP) {
+ IFP_PER_FLOW_STAT(ifs->ipv4, ecn_client_setup);
+ if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
+ IFP_PER_FLOW_STAT(ifs->ipv4,
+ ecn_client_success);
+ } else if (ifs->ecn_flags & TE_LOST_SYN) {
+ IFP_PER_FLOW_STAT(ifs->ipv4,
+ ecn_syn_lost);
+ } else {
+ IFP_PER_FLOW_STAT(ifs->ipv4,
+ ecn_peer_nosupport);
+ }
+ } else {
+ IFP_PER_FLOW_STAT(ifs->ipv4, ecn_server_setup);
+ if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
+ IFP_PER_FLOW_STAT(ifs->ipv4,
+ ecn_server_success);
+ } else if (ifs->ecn_flags & TE_LOST_SYN) {
+ IFP_PER_FLOW_STAT(ifs->ipv4,
+ ecn_synack_lost);
+ } else {
+ IFP_PER_FLOW_STAT(ifs->ipv4,
+ ecn_peer_nosupport);
+ }
+ }
+ } else {
+ IFP_PER_FLOW_STAT(ifs->ipv4, ecn_off_conn);
+ }
+ if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
+ if (ifs->ecn_flags & TE_RECV_ECN_CE) {
+ tcpstat.tcps_ecn_conn_recv_ce++;
+ IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_recv_ce);
+ }
+ if (ifs->ecn_flags & TE_RECV_ECN_ECE) {
+ tcpstat.tcps_ecn_conn_recv_ece++;
+ IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_recv_ece);
+ }
+ if (ifs->ecn_flags & (TE_RECV_ECN_CE | TE_RECV_ECN_ECE)) {
+ if (ifs->txretransmitbytes > 0 ||
+ ifs->rxoutoforderbytes > 0) {
+ tcpstat.tcps_ecn_conn_pl_ce++;
+ IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_plce);
+ } else {
+ tcpstat.tcps_ecn_conn_nopl_ce++;
+ IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_noplce);
+ }
+ } else {
+ if (ifs->txretransmitbytes > 0 ||
+ ifs->rxoutoforderbytes > 0) {
+ tcpstat.tcps_ecn_conn_plnoce++;
+ IFP_PER_FLOW_STAT(ifs->ipv4, ecn_conn_plnoce);
+ }
+ }
+ }
+
+ /* Other stats are interesting for non-local connections only */
+ if (ifs->local) {
+ ifnet_lock_done(ifp);
+ return;
+ }
+
+ if (ifs->ipv4) {
+ ifp->if_ipv4_stat->timestamp = net_uptime();
+ if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
+ tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv4_stat->ecn_on);
+ } else {
+ tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv4_stat->ecn_off);
+ }
+ } else {
+ ifp->if_ipv6_stat->timestamp = net_uptime();
+ if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
+ tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv6_stat->ecn_on);
+ } else {
+ tcp_flow_ecn_perf_stats(ifs, &ifp->if_ipv6_stat->ecn_off);
+ }
+ }
+
+ if (ifs->rxmit_drop) {
+ if (FLOW_ECN_ENABLED(ifs->ecn_flags)) {
+ IFP_PER_FLOW_STAT(ifs->ipv4, ecn_on.rxmit_drop);
+ } else {
+ IFP_PER_FLOW_STAT(ifs->ipv4, ecn_off.rxmit_drop);
+ }
+ }
+ if (ifs->ecn_fallback_synloss)
+ IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_synloss);
+ if (ifs->ecn_fallback_droprst)
+ IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_droprst);
+ if (ifs->ecn_fallback_droprxmt)
+ IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_droprxmt);
+ if (ifs->ecn_fallback_ce)
+ IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_ce);
+ if (ifs->ecn_fallback_reorder)
+ IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_reorder);
+ if (ifs->ecn_recv_ce > 0)
+ IFP_PER_FLOW_STAT(ifs->ipv4, ecn_recv_ce);
+ if (ifs->ecn_recv_ece > 0)
+ IFP_PER_FLOW_STAT(ifs->ipv4, ecn_recv_ece);
+
+ tcp_flow_lim_stats(ifs, &ifp->if_lim_stat);
+ ifnet_lock_done(ifp);
+}