+/* Depending on the usage of mbuf space in the system, this function
+ * will return true or false. This is used to determine if a socket
+ * buffer can take more memory from the system for auto-tuning or not.
+ */
+u_int8_t
+tcp_cansbgrow(struct sockbuf *sb)
+{
+ /* Calculate the host level space limit in terms of MSIZE buffers.
+ * We can use a maximum of half of the available mbuf space for
+ * socket buffers.
+ */
+ u_int32_t mblim = ((nmbclusters >> 1) << (MCLSHIFT - MSIZESHIFT));
+
+ /* Calculate per sb limit in terms of bytes. We optimize this limit
+ * for upto 16 socket buffers.
+ */
+
+ u_int32_t sbspacelim = ((nmbclusters >> 4) << MCLSHIFT);
+
+ if ((total_sbmb_cnt < mblim) &&
+ (sb->sb_hiwat < sbspacelim)) {
+ return(1);
+ }
+ return(0);
+}
+
+void
+tcp_sbrcv_reserve(struct tcpcb *tp,
+ struct sockbuf *sbrcv,
+ u_int32_t newsize,
+ u_int32_t idealsize) {
+
+ /* newsize should not exceed max */
+ newsize = min(newsize, tcp_autorcvbuf_max);
+
+ /* The receive window scale negotiated at the
+ * beginning of the connection will also set a
+ * limit on the socket buffer size
+ */
+ newsize = min(newsize, TCP_MAXWIN << tp->rcv_scale);
+
+ /* Set new socket buffer size */
+ if (newsize > sbrcv->sb_hiwat &&
+ (sbreserve(sbrcv, newsize) == 1)) {
+ sbrcv->sb_idealsize = min(max(sbrcv->sb_idealsize,
+ (idealsize != 0) ? idealsize : newsize),
+ tcp_autorcvbuf_max);
+
+ /* Again check the limit set by the advertised
+ * window scale
+ */
+ sbrcv->sb_idealsize = min(sbrcv->sb_idealsize,
+ TCP_MAXWIN << tp->rcv_scale);
+ }
+}
+
+/*
+ * This function is used to grow a receive socket buffer. It
+ * will take into account system-level memory usage and the
+ * bandwidth available on the link to make a decision.
+ */
+static void
+tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sbrcv,
+ struct tcpopt *to, u_int32_t pktlen) {
+
+ if (tcp_do_autorcvbuf == 0 ||
+ (sbrcv->sb_flags & SB_AUTOSIZE) == 0 ||
+ tcp_cansbgrow(sbrcv) == 0 ||
+ sbrcv->sb_hiwat >= tcp_autorcvbuf_max) {
+ /* Can not resize the socket buffer, just return */
+ goto out;
+ }
+
+ if (TSTMP_GT(tcp_now,
+ tp->rfbuf_ts + TCPTV_RCVBUFIDLE)) {
+ /* If there has been an idle period in the
+ * connection, just restart the measurement
+ */
+ goto out;
+ }
+
+ if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP)) !=
+ (TF_REQ_TSTMP | TF_RCVD_TSTMP)) {
+ /*
+ * Timestamp option is not supported on this connection.
+ * If the connection reached a state to indicate that
+ * the receive socket buffer needs to grow, increase
+ * the high water mark.
+ */
+ if (TSTMP_GEQ(tcp_now,
+ tp->rfbuf_ts + TCPTV_RCVNOTS_QUANTUM)) {
+ if (tp->rfbuf_cnt >= TCP_RCVNOTS_BYTELEVEL) {
+ tcp_sbrcv_reserve(tp, sbrcv,
+ tcp_autorcvbuf_max, 0);
+ }
+ goto out;
+ } else {
+ tp->rfbuf_cnt += pktlen;
+ return;
+ }
+ } else if (to->to_tsecr != 0) {
+ /* If the timestamp shows that one RTT has
+ * completed, we can stop counting the
+ * bytes. Here we consider increasing
+ * the socket buffer if it fits the following
+ * criteria:
+ * 1. the bandwidth measured in last rtt, is more
+ * than half of sb_hiwat, this will help to scale the
+ * buffer according to the bandwidth on the link.
+ * 2. the space left in sbrcv is less than
+ * one forth of the bandwidth measured in last rtt, this
+ * will help to accommodate an application reading slowly.
+ */
+ if (TSTMP_GEQ(to->to_tsecr, tp->rfbuf_ts)) {
+ if ((tp->rfbuf_cnt > (sbrcv->sb_hiwat -
+ (sbrcv->sb_hiwat >> tcp_rbuf_hiwat_shift)) ||
+ (sbrcv->sb_hiwat - sbrcv->sb_cc) <
+ (tp->rfbuf_cnt >> tcp_rbuf_win_shift))) {
+ u_int32_t rcvbuf_inc;
+ /*
+ * Increment the receive window by a multiple of
+ * maximum sized segments. This will prevent a
+ * connection from sending smaller segments on
+ * wire if it is limited by the receive window.
+ *
+ * Set the ideal size based on current bandwidth
+ * measurements. We set the ideal size on receive
+ * socket buffer to be twice the bandwidth delay
+ * product.
+ */
+ rcvbuf_inc = tp->t_maxseg << tcp_autorcvbuf_inc_shift;
+ tcp_sbrcv_reserve(tp, sbrcv,
+ sbrcv->sb_hiwat + rcvbuf_inc,
+ (tp->rfbuf_cnt * 2));
+ }
+ goto out;
+ } else {
+ tp->rfbuf_cnt += pktlen;
+ return;
+ }
+ }
+out:
+ /* Restart the measurement */
+ tp->rfbuf_ts = 0;
+ tp->rfbuf_cnt = 0;
+ return;
+}
+
+/* This function will trim the excess space added to the socket buffer
+ * to help a slow-reading app. The ideal-size of a socket buffer depends
+ * on the link bandwidth or it is set by an application and we aim to
+ * reach that size.
+ */
+void
+tcp_sbrcv_trim(struct tcpcb *tp, struct sockbuf *sbrcv) {
+ if (tcp_do_autorcvbuf == 1 && sbrcv->sb_idealsize > 0 &&
+ sbrcv->sb_hiwat > sbrcv->sb_idealsize) {
+ int32_t trim;
+ /* compute the difference between ideal and current sizes */
+ u_int32_t diff = sbrcv->sb_hiwat - sbrcv->sb_idealsize;
+
+ /* Compute the maximum advertised window for
+ * this connection.
+ */
+ u_int32_t advwin = tp->rcv_adv - tp->rcv_nxt;
+
+ /* How much can we trim the receive socket buffer?
+ * 1. it can not be trimmed beyond the max rcv win advertised
+ * 2. if possible, leave 1/16 of bandwidth*delay to
+ * avoid closing the win completely
+ */
+ u_int32_t leave = max(advwin, (sbrcv->sb_idealsize >> 4));
+
+ /* Sometimes leave can be zero, in that case leave at least
+ * a few segments worth of space.
+ */
+ if (leave == 0)
+ leave = tp->t_maxseg << tcp_autorcvbuf_inc_shift;
+
+ trim = sbrcv->sb_hiwat - (sbrcv->sb_cc + leave);
+ trim = imin(trim, (int32_t)diff);
+
+ if (trim > 0)
+ sbreserve(sbrcv, (sbrcv->sb_hiwat - trim));
+ }
+}
+
+/* We may need to trim the send socket buffer size for two reasons:
+ * 1. if the rtt seen on the connection is climbing up, we do not
+ * want to fill the buffers any more.
+ * 2. if the congestion win on the socket backed off, there is no need
+ * to hold more mbufs for that connection than what the cwnd will allow.
+ */
+void
+tcp_sbsnd_trim(struct sockbuf *sbsnd) {
+ if (tcp_do_autosendbuf == 1 &&
+ ((sbsnd->sb_flags & (SB_AUTOSIZE | SB_TRIM)) ==
+ (SB_AUTOSIZE | SB_TRIM)) &&
+ (sbsnd->sb_idealsize > 0) &&
+ (sbsnd->sb_hiwat > sbsnd->sb_idealsize)) {
+ u_int32_t trim = 0;
+ if (sbsnd->sb_cc <= sbsnd->sb_idealsize) {
+ trim = sbsnd->sb_hiwat - sbsnd->sb_idealsize;
+ } else {
+ trim = sbsnd->sb_hiwat - sbsnd->sb_cc;
+ }
+ sbreserve(sbsnd, (sbsnd->sb_hiwat - trim));
+ }
+ if (sbsnd->sb_hiwat <= sbsnd->sb_idealsize)
+ sbsnd->sb_flags &= ~(SB_TRIM);
+}
+
+/*
+ * If timestamp option was not negotiated on this connection
+ * and this connection is on the receiving side of a stream
+ * then we can not measure the delay on the link accurately.
+ * Instead of enabling automatic receive socket buffer
+ * resizing, just give more space to the receive socket buffer.
+ */
+static inline void
+tcp_sbrcv_tstmp_check(struct tcpcb *tp) {
+ struct socket *so = tp->t_inpcb->inp_socket;
+ u_int32_t newsize = 2 * tcp_recvspace;
+ struct sockbuf *sbrcv = &so->so_rcv;
+
+ if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP)) !=
+ (TF_REQ_TSTMP | TF_RCVD_TSTMP) &&
+ (sbrcv->sb_flags & SB_AUTOSIZE) != 0) {
+ tcp_sbrcv_reserve(tp, sbrcv, newsize, 0);
+ }
+}
+
+/* A receiver will evaluate the flow of packets on a connection
+ * to see if it can reduce ack traffic. The receiver will start
+ * stretching acks if all of the following conditions are met:
+ * 1. tcp_delack_enabled is set to 3
+ * 2. If the bytes received in the last 100ms is greater than a threshold
+ * defined by maxseg_unacked
+ * 3. If the connection has not been idle for tcp_maxrcvidle period.
+ * 4. If the connection has seen enough packets to let the slow-start
+ * finish after connection establishment or after some packet loss.
+ *
+ * The receiver will stop stretching acks if there is congestion/reordering
+ * as indicated by packets on reassembly queue or an ECN. If the delayed-ack
+ * timer fires while stretching acks, it means that the packet flow has gone
+ * below the threshold defined by maxseg_unacked and the receiver will stop
+ * stretching acks. The receiver gets no indication when slow-start is completed
+ * or when the connection reaches an idle state. That is why we use
+ * tcp_rcvsspktcnt to cover slow-start and tcp_maxrcvidle to identify idle
+ * state.
+ */
+ static inline int
+ tcp_stretch_ack_enable(struct tcpcb *tp) {
+ if (tp->rcv_by_unackwin >= (maxseg_unacked * tp->t_maxseg) &&
+ TSTMP_GT(tp->rcv_unackwin + tcp_maxrcvidle, tcp_now) &&
+ (((tp->t_flagsext & TF_RCVUNACK_WAITSS) == 0) ||
+ (tp->rcv_waitforss >= tcp_rcvsspktcnt))) {
+ return(1);
+ }
+
+ return(0);
+}
+
+/* Reset the state related to stretch-ack algorithm. This will make
+ * the receiver generate an ack every other packet. The receiver
+ * will start re-evaluating the rate at which packets come to decide
+ * if it can benefit by lowering the ack traffic.
+ */
+void
+tcp_reset_stretch_ack(struct tcpcb *tp)
+{
+ tp->t_flags &= ~(TF_STRETCHACK);
+ tp->rcv_by_unackwin = 0;
+ tp->rcv_unackwin = tcp_now + tcp_rcvunackwin;
+}
+