+void
+tcp_rxtseg_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end) {
+ struct tcp_rxt_seg *rxseg = NULL, *prev = NULL, *next = NULL;
+ u_int32_t rxcount = 0;
+
+ if (SLIST_EMPTY(&tp->t_rxt_segments))
+ tp->t_dsack_lastuna = tp->snd_una;
+ /*
+ * First check if there is a segment already existing for this
+ * sequence space.
+ */
+
+ SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
+ if (SEQ_GT(rxseg->rx_start, start))
+ break;
+ prev = rxseg;
+ }
+ next = rxseg;
+
+ /* check if prev seg is for this sequence */
+ if (prev != NULL && SEQ_LEQ(prev->rx_start, start) &&
+ SEQ_GEQ(prev->rx_end, end)) {
+ prev->rx_count++;
+ return;
+ }
+
+ /*
+ * There are a couple of possibilities at this point.
+ * 1. prev overlaps with the beginning of this sequence
+ * 2. next overlaps with the end of this sequence
+ * 3. there is no overlap.
+ */
+
+ if (prev != NULL && SEQ_GT(prev->rx_end, start)) {
+ if (prev->rx_start == start && SEQ_GT(end, prev->rx_end)) {
+ start = prev->rx_end + 1;
+ prev->rx_count++;
+ } else {
+ prev->rx_end = (start - 1);
+ rxcount = prev->rx_count;
+ }
+ }
+
+ if (next != NULL && SEQ_LT(next->rx_start, end)) {
+ if (SEQ_LEQ(next->rx_end, end)) {
+ end = next->rx_start - 1;
+ next->rx_count++;
+ } else {
+ next->rx_start = end + 1;
+ rxcount = next->rx_count;
+ }
+ }
+ if (!SEQ_LT(start, end))
+ return;
+
+ rxseg = (struct tcp_rxt_seg *) zalloc(tcp_rxt_seg_zone);
+ if (rxseg == NULL) {
+ return;
+ }
+ bzero(rxseg, sizeof(*rxseg));
+ rxseg->rx_start = start;
+ rxseg->rx_end = end;
+ rxseg->rx_count = rxcount + 1;
+
+ if (prev != NULL) {
+ SLIST_INSERT_AFTER(prev, rxseg, rx_link);
+ } else {
+ SLIST_INSERT_HEAD(&tp->t_rxt_segments, rxseg, rx_link);
+ }
+ return;
+}
+
+struct tcp_rxt_seg *
+tcp_rxtseg_find(struct tcpcb *tp, tcp_seq start, tcp_seq end)
+{
+ struct tcp_rxt_seg *rxseg;
+ if (SLIST_EMPTY(&tp->t_rxt_segments))
+ return (NULL);
+
+ SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
+ if (SEQ_LEQ(rxseg->rx_start, start) &&
+ SEQ_GEQ(rxseg->rx_end, end))
+ return (rxseg);
+ if (SEQ_GT(rxseg->rx_start, start))
+ break;
+ }
+ return (NULL);
+}
+
+void
+tcp_rxtseg_clean(struct tcpcb *tp)
+{
+ struct tcp_rxt_seg *rxseg, *next;
+
+ SLIST_FOREACH_SAFE(rxseg, &tp->t_rxt_segments, rx_link, next) {
+ SLIST_REMOVE(&tp->t_rxt_segments, rxseg,
+ tcp_rxt_seg, rx_link);
+ zfree(tcp_rxt_seg_zone, rxseg);
+ }
+ tp->t_dsack_lastuna = tp->snd_max;
+}
+
+boolean_t
+tcp_rxtseg_detect_bad_rexmt(struct tcpcb *tp, tcp_seq th_ack)
+{
+ boolean_t bad_rexmt;
+ struct tcp_rxt_seg *rxseg;
+
+ if (SLIST_EMPTY(&tp->t_rxt_segments))
+ return (FALSE);
+
+ /*
+ * If all of the segments in this window are not cumulatively
+ * acknowledged, then there can still be undetected packet loss.
+ * Do not restore congestion window in that case.
+ */
+ if (SEQ_LT(th_ack, tp->snd_recover))
+ return (FALSE);
+
+ bad_rexmt = TRUE;
+ SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
+ if (rxseg->rx_count > 1 ||
+ !(rxseg->rx_flags & TCP_RXT_SPURIOUS)) {
+ bad_rexmt = FALSE;
+ break;
+ }
+ }
+ return (bad_rexmt);
+}
+
+boolean_t
+tcp_rxtseg_dsack_for_tlp(struct tcpcb *tp)
+{
+ boolean_t dsack_for_tlp = FALSE;
+ struct tcp_rxt_seg *rxseg;
+ if (SLIST_EMPTY(&tp->t_rxt_segments))
+ return (FALSE);
+
+ SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
+ if (rxseg->rx_count == 1 &&
+ SLIST_NEXT(rxseg,rx_link) == NULL &&
+ (rxseg->rx_flags & TCP_RXT_DSACK_FOR_TLP)) {
+ dsack_for_tlp = TRUE;
+ break;
+ }
+ }
+ return (dsack_for_tlp);
+}
+
+u_int32_t
+tcp_rxtseg_total_size(struct tcpcb *tp) {
+ struct tcp_rxt_seg *rxseg;
+ u_int32_t total_size = 0;
+
+ SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
+ total_size += (rxseg->rx_end - rxseg->rx_start) + 1;
+ }
+ return (total_size);
+}
+
+void
+tcp_get_connectivity_status(struct tcpcb *tp,
+ struct tcp_conn_status *connstatus)
+{
+ if (tp == NULL || connstatus == NULL)
+ return;
+ bzero(connstatus, sizeof(*connstatus));
+ if (tp->t_rxtshift >= TCP_CONNECTIVITY_PROBES_MAX) {
+ if (TCPS_HAVEESTABLISHED(tp->t_state)) {
+ connstatus->write_probe_failed = 1;
+ } else {
+ connstatus->conn_probe_failed = 1;
+ }
+ }
+ if (tp->t_rtimo_probes >= TCP_CONNECTIVITY_PROBES_MAX)
+ connstatus->read_probe_failed = 1;
+ if (tp->t_inpcb != NULL && tp->t_inpcb->inp_last_outifp != NULL
+ && (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_PROBE_CONNECTIVITY))
+ connstatus->probe_activated = 1;
+ return;
+}
+
+boolean_t
+tfo_enabled(const struct tcpcb *tp)
+{
+ return !!(tp->t_flagsext & TF_FASTOPEN);
+}
+
+void
+tcp_disable_tfo(struct tcpcb *tp)
+{
+ tp->t_flagsext &= ~TF_FASTOPEN;
+}