+ if (tp->snd_cwnd > th->th_ack - tp->snd_una)
+ tp->snd_cwnd -= th->th_ack - tp->snd_una;
+ else
+ tp->snd_cwnd = 0;
+ tp->snd_cwnd += tp->t_maxseg;
+
+}
+
+/*
+ * Drop a random TCP connection that hasn't been serviced yet and
+ * is eligible for discard. There is a one in qlen chance that
+ * we will return a null, saying that there are no dropable
+ * requests. In this case, the protocol specific code should drop
+ * the new request. This insures fairness.
+ *
+ * The listening TCP socket "head" must be locked
+ */
+static int
+tcp_dropdropablreq(struct socket *head)
+{
+ struct socket *so, *sonext;
+ unsigned int i, j, qlen;
+ static int rnd;
+ static struct timeval old_runtime;
+ static unsigned int cur_cnt, old_cnt;
+ struct timeval tv;
+ struct inpcb *inp = NULL;
+ struct tcpcb *tp;
+
+ if ((head->so_options & SO_ACCEPTCONN) == 0)
+ return 0;
+
+ so = TAILQ_FIRST(&head->so_incomp);
+ if (!so)
+ return 0;
+
+ microtime(&tv);
+ if ((i = (tv.tv_sec - old_runtime.tv_sec)) != 0) {
+ old_runtime = tv;
+ old_cnt = cur_cnt / i;
+ cur_cnt = 0;
+ }
+
+
+ qlen = head->so_incqlen;
+ if (++cur_cnt > qlen || old_cnt > qlen) {
+ rnd = (314159 * rnd + 66329) & 0xffff;
+ j = ((qlen + 1) * rnd) >> 16;
+
+ while (j-- && so)
+ so = TAILQ_NEXT(so, so_list);
+ }
+ /* Find a connection that is not already closing (or being served) */
+ while (so) {
+ inp = (struct inpcb *)so->so_pcb;
+
+ sonext = TAILQ_NEXT(so, so_list);
+
+ if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != WNT_STOPUSING) {
+ /* Avoid the issue of a socket being accepted by one input thread
+ * and being dropped by another input thread.
+ * If we can't get a hold on this mutex, then grab the next socket in line.
+ */
+ if (lck_mtx_try_lock(&inp->inpcb_mtx)) {
+ so->so_usecount++;
+ if ((so->so_usecount == 2) &&
+ (so->so_state & SS_INCOMP) != 0 &&
+ (so->so_flags & SOF_INCOMP_INPROGRESS) == 0)
+ break;
+ else {/* don't use if being accepted or used in any other way */
+ in_pcb_checkstate(inp, WNT_RELEASE, 1);
+ tcp_unlock(so, 1, 0);
+ }
+ }
+ else {
+ /* do not try to lock the inp in in_pcb_checkstate
+ * because the lock is already held in some other thread.
+ * Only drop the inp_wntcnt reference.
+ */
+ in_pcb_checkstate(inp, WNT_RELEASE, 1);
+ }
+ }
+ so = sonext;
+
+ }
+ if (!so)
+ return 0;
+
+ /* Makes sure socket is still in the right state to be discarded */
+
+ if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
+ tcp_unlock(so, 1, 0);
+ return 0;
+ }
+
+ if (so->so_usecount != 2 || !(so->so_state & SS_INCOMP)) {
+ /* do not discard: that socket is being accepted */
+ tcp_unlock(so, 1, 0);
+ return 0;
+ }
+
+ TAILQ_REMOVE(&head->so_incomp, so, so_list);
+ tcp_unlock(head, 0, 0);
+
+ lck_mtx_assert(&inp->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
+ tp = sototcpcb(so);
+ so->so_flags |= SOF_OVERFLOW;
+ so->so_head = NULL;
+
+ tcp_close(tp);
+ tp->t_unacksegs = 0;
+
+ if (inp->inp_wantcnt > 0 && inp->inp_wantcnt != WNT_STOPUSING) {
+ /* Some one has a wantcnt on this pcb. Since WNT_ACQUIRE
+ * doesn't require a lock, it could have happened while
+ * we are holding the lock. This pcb will have to
+ * be garbage collected later.
+ * Release the reference held for so_incomp queue
+ */
+ so->so_usecount--;
+
+ tcp_unlock(so, 1, 0);
+ } else {
+ /* Unlock this socket and leave the reference on. We need to
+ * acquire the pcbinfo lock in order to fully dispose it off
+ */
+ tcp_unlock(so, 0, 0);
+
+ lck_rw_lock_exclusive(tcbinfo.mtx);
+
+ tcp_lock(so, 0, 0);
+
+ /* Release the reference held for so_incomp queue */
+ so->so_usecount--;
+
+ if (so->so_usecount != 1 ||
+ (inp->inp_wantcnt > 0 && inp->inp_wantcnt != WNT_STOPUSING)) {
+ /* There is an extra wantcount or usecount that must
+ * have been added when the socket was unlocked. This
+ * socket will have to be garbage collected later
+ */
+ tcp_unlock(so, 1, 0);
+ } else {
+
+ /* Drop the reference held for this function */
+ so->so_usecount--;
+
+ in_pcbdispose(inp);
+ }
+ lck_rw_done(tcbinfo.mtx);
+ }
+ tcpstat.tcps_drops++;
+
+ tcp_lock(head, 0, 0);
+ head->so_incqlen--;
+ head->so_qlen--;
+ return(1);
+}
+
+/* Set background congestion control on a socket */
+void
+tcp_set_background_cc(struct socket *so)
+{
+ tcp_set_new_cc(so, TCP_CC_ALGO_BACKGROUND_INDEX);
+}
+
+/* Set foreground congestion control on a socket */
+void
+tcp_set_foreground_cc(struct socket *so)
+{
+ tcp_set_new_cc(so, TCP_CC_ALGO_NEWRENO_INDEX);
+}
+
+static void
+tcp_set_new_cc(struct socket *so, uint16_t cc_index)
+{
+ struct inpcb *inp = sotoinpcb(so);
+ struct tcpcb *tp = intotcpcb(inp);
+ uint16_t old_cc_index = 0;
+ if (tp->tcp_cc_index != cc_index) {
+
+ old_cc_index = tp->tcp_cc_index;
+
+ if (CC_ALGO(tp)->cleanup != NULL)
+ CC_ALGO(tp)->cleanup(tp);
+ tp->tcp_cc_index = cc_index;
+
+ /* Decide if the connection is just starting or if
+ * we have sent some packets on it.
+ */
+ if (tp->snd_nxt > tp->iss) {
+ /* Already sent some packets */
+ if (CC_ALGO(tp)->switch_to != NULL)
+ CC_ALGO(tp)->switch_to(tp, old_cc_index);
+ } else {
+ if (CC_ALGO(tp)->init != NULL)
+ CC_ALGO(tp)->init(tp);
+ }
+ DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp,
+ struct tcpcb *, tp, struct tcphdr *, NULL,
+ int32_t, TCP_CC_CHANGE_ALGO);