+
+ tp->request_r_scale = max(tcp_win_scale, tp->request_r_scale);
+ maxsockbufsize = ((so->so_rcv.sb_flags & SB_USRSIZE) != 0) ?
+ so->so_rcv.sb_hiwat : tcp_autorcvbuf_max;
+
+ while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
+ (TCP_MAXWIN << tp->request_r_scale) < maxsockbufsize)
+ tp->request_r_scale++;
+ tp->request_r_scale = min(tp->request_r_scale, TCP_MAX_WINSHIFT);
+
+}
+
+int
+tcp_notsent_lowat_check(struct socket *so) {
+ struct inpcb *inp = sotoinpcb(so);
+ struct tcpcb *tp = NULL;
+ int notsent = 0;
+ if (inp != NULL) {
+ tp = intotcpcb(inp);
+ }
+
+ notsent = so->so_snd.sb_cc -
+ (tp->snd_nxt - tp->snd_una);
+
+ /* When we send a FIN or SYN, not_sent can be negative.
+ * In that case also we need to send a write event to the
+ * process if it is waiting. In the FIN case, it will
+ * get an error from send because cantsendmore will be set.
+ */
+ if (notsent <= tp->t_notsent_lowat) {
+ return(1);
+ }
+
+ /* When Nagle's algorithm is not disabled, it is better
+ * to wakeup the client until there is atleast one
+ * maxseg of data to write.
+ */
+ if ((tp->t_flags & TF_NODELAY) == 0 &&
+ notsent > 0 && notsent < tp->t_maxseg) {
+ return(1);
+ }
+ return(0);
+}
+
+void
+tcp_rxtseg_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end) {
+ struct tcp_rxt_seg *rxseg = NULL, *prev = NULL, *next = NULL;
+ u_int32_t rxcount = 0;
+
+ if (SLIST_EMPTY(&tp->t_rxt_segments))
+ tp->t_dsack_lastuna = tp->snd_una;
+ /*
+ * First check if there is a segment already existing for this
+ * sequence space.
+ */
+
+ SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
+ if (SEQ_GT(rxseg->rx_start, start))
+ break;
+ prev = rxseg;
+ }
+ next = rxseg;
+
+ /* check if prev seg is for this sequence */
+ if (prev != NULL && SEQ_LEQ(prev->rx_start, start) &&
+ SEQ_GEQ(prev->rx_end, end)) {
+ prev->rx_count++;
+ return;
+ }
+
+ /*
+ * There are a couple of possibilities at this point.
+ * 1. prev overlaps with the beginning of this sequence
+ * 2. next overlaps with the end of this sequence
+ * 3. there is no overlap.
+ */
+
+ if (prev != NULL && SEQ_GT(prev->rx_end, start)) {
+ if (prev->rx_start == start && SEQ_GT(end, prev->rx_end)) {
+ start = prev->rx_end + 1;
+ prev->rx_count++;
+ } else {
+ prev->rx_end = (start - 1);
+ rxcount = prev->rx_count;
+ }
+ }
+
+ if (next != NULL && SEQ_LT(next->rx_start, end)) {
+ if (SEQ_LEQ(next->rx_end, end)) {
+ end = next->rx_start - 1;
+ next->rx_count++;
+ } else {
+ next->rx_start = end + 1;
+ rxcount = next->rx_count;
+ }
+ }
+ if (!SEQ_LT(start, end))
+ return;
+
+ rxseg = (struct tcp_rxt_seg *) zalloc(tcp_rxt_seg_zone);
+ if (rxseg == NULL) {
+ return;
+ }
+ bzero(rxseg, sizeof(*rxseg));
+ rxseg->rx_start = start;
+ rxseg->rx_end = end;
+ rxseg->rx_count = rxcount + 1;
+
+ if (prev != NULL) {
+ SLIST_INSERT_AFTER(prev, rxseg, rx_link);
+ } else {
+ SLIST_INSERT_HEAD(&tp->t_rxt_segments, rxseg, rx_link);
+ }
+ return;
+}
+
+struct tcp_rxt_seg *
+tcp_rxtseg_find(struct tcpcb *tp, tcp_seq start, tcp_seq end)
+{
+ struct tcp_rxt_seg *rxseg;
+ if (SLIST_EMPTY(&tp->t_rxt_segments))
+ return (NULL);
+
+ SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) {
+ if (SEQ_LEQ(rxseg->rx_start, start) &&
+ SEQ_GEQ(rxseg->rx_end, end))
+ return (rxseg);
+ if (SEQ_GT(rxseg->rx_start, start))
+ break;
+ }
+ return (NULL);