+ mpte_lock_assert_held(mpte);
+ mp_so = mptetoso(mpte);
+ mp_tp = mpte->mpte_mptcb;
+
+ VERIFY(!(mpte->mpte_mppcb->mpp_flags & MPP_WUPCALL));
+ mpte->mpte_mppcb->mpp_flags |= MPP_WUPCALL;
+
+ mptcplog((LOG_DEBUG, "%s: snxt %u sndmax %u suna %u swnd %u reinjectq %u state %u\n",
+ __func__, (uint32_t)mp_tp->mpt_sndnxt, (uint32_t)mp_tp->mpt_sndmax,
+ (uint32_t)mp_tp->mpt_snduna, mp_tp->mpt_sndwnd,
+ mpte->mpte_reinjectq ? 1 : 0,
+ mp_tp->mpt_state),
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
+
+ old_snd_nxt = mp_tp->mpt_sndnxt;
+ while (mptcp_can_send_more(mp_tp, FALSE)) {
+ /* get the "best" subflow to be used for transmission */
+ mpts = mptcp_get_subflow(mpte, NULL, &preferred_mpts);
+ if (mpts == NULL) {
+ mptcplog((LOG_INFO, "%s: no subflow\n", __func__),
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_LOG);
+ break;
+ }
+
+ mptcplog((LOG_DEBUG, "%s: using id %u\n", __func__, mpts->mpts_connid),
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
+
+ /* In case there's just one flow, we reattempt later */
+ if (mpts_tried != NULL &&
+ (mpts == mpts_tried || (mpts->mpts_flags & MPTSF_FAILINGOVER))) {
+ mpts_tried->mpts_flags &= ~MPTSF_FAILINGOVER;
+ mpts_tried->mpts_flags |= MPTSF_ACTIVE;
+ mptcp_start_timer(mpte, MPTT_REXMT);
+ mptcplog((LOG_DEBUG, "%s: retry later\n", __func__),
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
+ break;
+ }
+
+ /*
+ * Automatic sizing of send socket buffer. Increase the send
+ * socket buffer size if all of the following criteria are met
+ * 1. the receiver has enough buffer space for this data
+ * 2. send buffer is filled to 7/8th with data (so we actually
+ * have data to make use of it);
+ */
+ if (tcp_do_autosendbuf == 1 &&
+ (mp_so->so_snd.sb_flags & (SB_AUTOSIZE | SB_TRIM)) == SB_AUTOSIZE &&
+ tcp_cansbgrow(&mp_so->so_snd)) {
+ if ((mp_tp->mpt_sndwnd / 4 * 5) >= mp_so->so_snd.sb_hiwat &&
+ mp_so->so_snd.sb_cc >= (mp_so->so_snd.sb_hiwat / 8 * 7)) {
+ if (sbreserve(&mp_so->so_snd,
+ min(mp_so->so_snd.sb_hiwat + tcp_autosndbuf_inc,
+ tcp_autosndbuf_max)) == 1) {
+ mp_so->so_snd.sb_idealsize = mp_so->so_snd.sb_hiwat;
+
+ mptcplog((LOG_DEBUG, "%s: increased snd hiwat to %u lowat %u\n",
+ __func__, mp_so->so_snd.sb_hiwat,
+ mp_so->so_snd.sb_lowat),
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
+ }
+ }
+ }
+
+ DTRACE_MPTCP3(output, struct mptses *, mpte, struct mptsub *, mpts,
+ struct socket *, mp_so);
+ error = mptcp_subflow_output(mpte, mpts, 0);
+ if (error) {
+ /* can be a temporary loss of source address or other error */
+ mpts->mpts_flags |= MPTSF_FAILINGOVER;
+ mpts->mpts_flags &= ~MPTSF_ACTIVE;
+ mpts_tried = mpts;
+ if (error != ECANCELED) {
+ mptcplog((LOG_ERR, "%s: Error = %d mpts_flags %#x\n", __func__,
+ error, mpts->mpts_flags),
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
+ }
+ break;
+ }
+ /* The model is to have only one active flow at a time */
+ mpts->mpts_flags |= MPTSF_ACTIVE;
+ mpts->mpts_probesoon = mpts->mpts_probecnt = 0;
+
+ /* Allows us to update the smoothed rtt */
+ if (mptcp_probeto && mpts != preferred_mpts && preferred_mpts != NULL) {
+ if (preferred_mpts->mpts_probesoon) {
+ if ((tcp_now - preferred_mpts->mpts_probesoon) > mptcp_probeto) {
+ mptcp_subflow_output(mpte, preferred_mpts, MPTCP_SUBOUT_PROBING);
+ if (preferred_mpts->mpts_probecnt >= mptcp_probecnt) {
+ preferred_mpts->mpts_probesoon = 0;
+ preferred_mpts->mpts_probecnt = 0;
+ }
+ }
+ } else {
+ preferred_mpts->mpts_probesoon = tcp_now;
+ preferred_mpts->mpts_probecnt = 0;
+ }
+ }
+
+ if (mpte->mpte_active_sub == NULL) {
+ mpte->mpte_active_sub = mpts;
+ } else if (mpte->mpte_active_sub != mpts) {
+ struct tcpcb *tp = sototcpcb(mpts->mpts_socket);
+ struct tcpcb *acttp = sototcpcb(mpte->mpte_active_sub->mpts_socket);
+
+ mptcplog((LOG_DEBUG, "%s: switch [%u, srtt %d] to [%u, srtt %d]\n", __func__,
+ mpte->mpte_active_sub->mpts_connid, acttp->t_srtt >> TCP_RTT_SHIFT,
+ mpts->mpts_connid, tp->t_srtt >> TCP_RTT_SHIFT),
+ (MPTCP_SENDER_DBG | MPTCP_SOCKET_DBG), MPTCP_LOGLVL_LOG);
+
+ mpte->mpte_active_sub->mpts_flags &= ~MPTSF_ACTIVE;
+ mpte->mpte_active_sub = mpts;
+
+ mptcpstats_inc_switch(mpte, mpts);
+ }