SYSCTL_INT(_net_inet_mptcp, OID_AUTO, enable, CTLFLAG_RW | CTLFLAG_LOCKED,
&mptcp_enable, 0, "Enable Multipath TCP Support");
-/* Number of times to try negotiating MPTCP on SYN retransmissions */
-int mptcp_mpcap_retries = MPTCP_CAPABLE_RETRIES;
+/*
+ * Number of times to try negotiating MPTCP on SYN retransmissions.
+ * We haven't seen any reports of a middlebox that is dropping all SYN-segments
+ * that have an MPTCP-option. Thus, let's be generous and retransmit it 4 times.
+ */
+int mptcp_mpcap_retries = 4;
SYSCTL_INT(_net_inet_mptcp, OID_AUTO, mptcp_cap_retr,
CTLFLAG_RW | CTLFLAG_LOCKED,
&mptcp_mpcap_retries, 0, "Number of MP Capable SYN Retries");
SYSCTL_INT(_net_inet_mptcp, OID_AUTO, fail, CTLFLAG_RW | CTLFLAG_LOCKED,
&mptcp_fail_thresh, 0, "Failover threshold");
-
/*
* MPTCP subflows have TCP keepalives set to ON. Set a conservative keeptime
* as carrier networks mostly have a 30 minute to 60 minute NAT Timeout.
SYSCTL_INT(_net_inet_mptcp, OID_AUTO, rtthist_thresh, CTLFLAG_RW | CTLFLAG_LOCKED,
&mptcp_rtthist_rtthresh, 0, "Rtt threshold");
-/*
- * Use RTO history for sending new data
- */
-int mptcp_use_rto = 1;
-SYSCTL_INT(_net_inet_mptcp, OID_AUTO, userto, CTLFLAG_RW | CTLFLAG_LOCKED,
- &mptcp_use_rto, 0, "Disable RTO for subflow selection");
-
int mptcp_rtothresh = 1500;
SYSCTL_INT(_net_inet_mptcp, OID_AUTO, rto_thresh, CTLFLAG_RW | CTLFLAG_LOCKED,
&mptcp_rtothresh, 0, "RTO threshold");
SYSCTL_UINT(_net_inet_mptcp, OID_AUTO, probecnt, CTLFLAG_RW | CTLFLAG_LOCKED,
&mptcp_probecnt, 0, "Number of probe writes");
-/*
- * Static declarations
- */
-static uint16_t mptcp_input_csum(struct tcpcb *, struct mbuf *, uint64_t,
- uint32_t, uint16_t, uint16_t, uint16_t);
-
static int
mptcp_reass_present(struct socket *mp_so)
{
- struct mptcb *mp_tp = mpsotomppcb(mp_so)->mpp_pcbe->mpte_mptcb;
+ struct mptses *mpte = mpsotompte(mp_so);
+ struct mptcb *mp_tp = mpte->mpte_mptcb;
struct tseg_qent *q;
int dowakeup = 0;
int flags = 0;
m_freem(q->tqe_m);
} else {
flags = !!(q->tqe_m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN);
- if (sbappendstream_rcvdemux(mp_so, q->tqe_m, 0, 0)) {
+ if (sbappendstream_rcvdemux(mp_so, q->tqe_m)) {
dowakeup = 1;
}
}
struct tseg_qent *p = NULL;
struct tseg_qent *nq;
struct tseg_qent *te = NULL;
- u_int16_t qlimit;
+ uint32_t qlimit;
/*
* Limit the number of segments in the reassembly queue to prevent
* queue. Always keep one global queue entry spare to be able to
* process the missing segment.
*/
- qlimit = min(max(100, mp_so->so_rcv.sb_hiwat >> 10),
+ qlimit = MIN(MAX(100, mp_so->so_rcv.sb_hiwat >> 10),
(tcp_autorcvbuf_max >> 10));
if (mb_dsn != mp_tp->mpt_rcvnxt &&
(mp_tp->mpt_reassqlen + 1) >= qlimit) {
*/
goto out;
}
- m_adj(m, i);
+ VERIFY(i <= INT_MAX);
+ m_adj(m, (int)i);
*tlenp -= i;
phdr->mp_dsn += i;
}
if (i < q->tqe_len) {
q->tqe_m->m_pkthdr.mp_dsn += i;
q->tqe_len -= i;
- m_adj(q->tqe_m, i);
+
+ VERIFY(i <= INT_MAX);
+ m_adj(q->tqe_m, (int)i);
break;
}
VERIFY(m->m_flags & M_PKTHDR);
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
-
mp_so = mptetoso(mpte);
mp_tp = mpte->mpte_mptcb;
+ socket_lock_assert_owned(mp_so);
+
DTRACE_MPTCP(input);
mp_tp->mpt_rcvwnd = mptcp_sbspace(mp_tp);
* assume degraded flow as this may be the first packet
* without DSS, and the subflow state is not updated yet.
*/
- if (sbappendstream_rcvdemux(mp_so, m, 0, 0)) {
+ if (sbappendstream_rcvdemux(mp_so, m)) {
sorwakeup(mp_so);
}
mptcp_close_fsm(mp_tp, MPCE_RECV_DATA_FIN);
socantrcvmore(mp_so);
}
-
- mptcplog((LOG_DEBUG, "%s: Fallback read %d bytes\n", __func__,
- count), MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_VERBOSE);
return;
}
int64_t todrop;
int mb_dfin = 0;
+ VERIFY(m->m_flags & M_PKTHDR);
+
/* If fallback occurs, mbufs will not have PKTF_MPTCP set */
if (!(m->m_pkthdr.pkt_flags & PKTF_MPTCP)) {
goto fallback;
if (todrop > 0) {
tcpstat.tcps_mptcp_rcvpackafterwin++;
+ os_log_info(mptcp_log_handle, "%s - %lx: dropping dsn %u dlen %u rcvnxt %u rcvwnd %u todrop %lld\n",
+ __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
+ (uint32_t)mb_dsn, mb_datalen, (uint32_t)mp_tp->mpt_rcvnxt,
+ mp_tp->mpt_rcvwnd, todrop);
+
if (todrop >= mb_datalen) {
if (freelist == NULL) {
freelist = m;
prev = save = NULL;
continue;
} else {
- m_adj(m, -todrop);
+ VERIFY(todrop <= INT_MAX);
+ m_adj(m, (int)-todrop);
mb_datalen -= todrop;
+ m->m_pkthdr.mp_rlen -= todrop;
}
/*
m->m_pkthdr.pkt_flags &= ~PKTF_MPTCP_DFIN;
}
-
if (MPTCP_SEQ_LT(mb_dsn, mp_tp->mpt_rcvnxt)) {
if (MPTCP_SEQ_LEQ((mb_dsn + mb_datalen),
mp_tp->mpt_rcvnxt)) {
prev = save = NULL;
continue;
} else {
- m_adj(m, (mp_tp->mpt_rcvnxt - mb_dsn));
+ VERIFY((mp_tp->mpt_rcvnxt - mb_dsn) <= INT_MAX);
+ m_adj(m, (int)(mp_tp->mpt_rcvnxt - mb_dsn));
+ mb_datalen -= (mp_tp->mpt_rcvnxt - mb_dsn);
+ mb_dsn = mp_tp->mpt_rcvnxt;
+ VERIFY(mb_datalen >= 0 && mb_datalen <= USHRT_MAX);
+ m->m_pkthdr.mp_rlen = (uint16_t)mb_datalen;
+ m->m_pkthdr.mp_dsn = mb_dsn;
}
- mptcplog((LOG_INFO, "%s: Left Edge %llu\n", __func__,
- mp_tp->mpt_rcvnxt),
- MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_VERBOSE);
}
if (MPTCP_SEQ_GT(mb_dsn, mp_tp->mpt_rcvnxt) ||
mptcp_sbrcv_grow(mp_tp);
- if (sbappendstream_rcvdemux(mp_so, m, 0, 0)) {
+ if (sbappendstream_rcvdemux(mp_so, m)) {
wakeup = 1;
}
count = mp_so->so_rcv.sb_cc - count;
tcpstat.tcps_mp_rcvtotal++;
tcpstat.tcps_mp_rcvbytes += count;
- mptcplog((LOG_DEBUG, "%s: Read %d bytes\n", __func__, count),
- MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_VERBOSE);
mp_tp->mpt_rcvnxt += count;
uint64_t old_snd_nxt;
int error = 0;
- mpte_lock_assert_held(mpte);
mp_so = mptetoso(mpte);
mp_tp = mpte->mpte_mptcb;
+ socket_lock_assert_owned(mp_so);
+
+ if (mp_so->so_flags & SOF_DEFUNCT) {
+ return 0;
+ }
+
VERIFY(!(mpte->mpte_mppcb->mpp_flags & MPP_WUPCALL));
mpte->mpte_mppcb->mpp_flags |= MPP_WUPCALL;
- mptcplog((LOG_DEBUG, "%s: snxt %u sndmax %u suna %u swnd %u reinjectq %u state %u\n",
- __func__, (uint32_t)mp_tp->mpt_sndnxt, (uint32_t)mp_tp->mpt_sndmax,
- (uint32_t)mp_tp->mpt_snduna, mp_tp->mpt_sndwnd,
- mpte->mpte_reinjectq ? 1 : 0,
- mp_tp->mpt_state),
- MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
-
old_snd_nxt = mp_tp->mpt_sndnxt;
while (mptcp_can_send_more(mp_tp, FALSE)) {
/* get the "best" subflow to be used for transmission */
- mpts = mptcp_get_subflow(mpte, NULL, &preferred_mpts);
+ mpts = mptcp_get_subflow(mpte, &preferred_mpts);
if (mpts == NULL) {
mptcplog((LOG_INFO, "%s: no subflow\n", __func__),
MPTCP_SENDER_DBG, MPTCP_LOGLVL_LOG);
break;
}
- mptcplog((LOG_DEBUG, "%s: using id %u\n", __func__, mpts->mpts_connid),
- MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
-
/* In case there's just one flow, we reattempt later */
if (mpts_tried != NULL &&
(mpts == mpts_tried || (mpts->mpts_flags & MPTSF_FAILINGOVER))) {
mpts_tried->mpts_flags &= ~MPTSF_FAILINGOVER;
mpts_tried->mpts_flags |= MPTSF_ACTIVE;
mptcp_start_timer(mpte, MPTT_REXMT);
- mptcplog((LOG_DEBUG, "%s: retry later\n", __func__),
- MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
break;
}
* 2. send buffer is filled to 7/8th with data (so we actually
* have data to make use of it);
*/
- if (tcp_do_autosendbuf == 1 &&
- (mp_so->so_snd.sb_flags & (SB_AUTOSIZE | SB_TRIM)) == SB_AUTOSIZE &&
+ if ((mp_so->so_snd.sb_flags & (SB_AUTOSIZE | SB_TRIM)) == SB_AUTOSIZE &&
tcp_cansbgrow(&mp_so->so_snd)) {
if ((mp_tp->mpt_sndwnd / 4 * 5) >= mp_so->so_snd.sb_hiwat &&
mp_so->so_snd.sb_cc >= (mp_so->so_snd.sb_hiwat / 8 * 7)) {
min(mp_so->so_snd.sb_hiwat + tcp_autosndbuf_inc,
tcp_autosndbuf_max)) == 1) {
mp_so->so_snd.sb_idealsize = mp_so->so_snd.sb_hiwat;
-
- mptcplog((LOG_DEBUG, "%s: increased snd hiwat to %u lowat %u\n",
- __func__, mp_so->so_snd.sb_hiwat,
- mp_so->so_snd.sb_lowat),
- MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
}
}
}
mpts->mpts_flags &= ~MPTSF_ACTIVE;
mpts_tried = mpts;
if (error != ECANCELED) {
- mptcplog((LOG_ERR, "%s: Error = %d mpts_flags %#x\n", __func__,
- error, mpts->mpts_flags),
- MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
+ os_log_error(mptcp_log_handle, "%s - %lx: Error = %d mpts_flags %#x\n",
+ __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
+ error, mpts->mpts_flags);
}
break;
}
if (mpte->mpte_active_sub == NULL) {
mpte->mpte_active_sub = mpts;
} else if (mpte->mpte_active_sub != mpts) {
- struct tcpcb *tp = sototcpcb(mpts->mpts_socket);
- struct tcpcb *acttp = sototcpcb(mpte->mpte_active_sub->mpts_socket);
-
- mptcplog((LOG_DEBUG, "%s: switch [%u, srtt %d] to [%u, srtt %d]\n", __func__,
- mpte->mpte_active_sub->mpts_connid, acttp->t_srtt >> TCP_RTT_SHIFT,
- mpts->mpts_connid, tp->t_srtt >> TCP_RTT_SHIFT),
- (MPTCP_SENDER_DBG | MPTCP_SOCKET_DBG), MPTCP_LOGLVL_LOG);
-
mpte->mpte_active_sub->mpts_flags &= ~MPTSF_ACTIVE;
mpte->mpte_active_sub = mpts;
return mpts;
}
+static boolean_t
+mptcp_subflow_is_slow(struct mptses *mpte, struct mptsub *mpts)
+{
+ struct tcpcb *tp = sototcpcb(mpts->mpts_socket);
+ int fail_thresh = mptcp_fail_thresh;
+
+ if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER || mpte->mpte_svctype == MPTCP_SVCTYPE_PURE_HANDOVER) {
+ fail_thresh *= 2;
+ }
+
+ return tp->t_rxtshift >= fail_thresh &&
+ (mptetoso(mpte)->so_snd.sb_cc || mpte->mpte_reinjectq);
+}
+
/*
* Return the most eligible subflow to be used for sending data.
*/
struct mptsub *
-mptcp_get_subflow(struct mptses *mpte, struct mptsub *ignore, struct mptsub **preferred)
+mptcp_get_subflow(struct mptses *mpte, struct mptsub **preferred)
{
struct tcpcb *besttp, *secondtp;
struct inpcb *bestinp, *secondinp;
struct tcpcb *tp = sototcpcb(so);
struct inpcb *inp = sotoinpcb(so);
- mptcplog((LOG_DEBUG, "%s mpts %u ignore %d, mpts_flags %#x, suspended %u sostate %#x tpstate %u cellular %d rtt %u rxtshift %u cheap %u exp %u cwnd %d\n",
- __func__, mpts->mpts_connid, ignore ? ignore->mpts_connid : -1, mpts->mpts_flags,
+ mptcplog((LOG_DEBUG, "%s mpts %u mpts_flags %#x, suspended %u sostate %#x tpstate %u cellular %d rtt %u rxtshift %u cheap %u exp %u cwnd %d\n",
+ __func__, mpts->mpts_connid, mpts->mpts_flags,
INP_WAIT_FOR_IF_FEEDBACK(inp), so->so_state, tp->t_state,
inp->inp_last_outifp ? IFNET_IS_CELLULAR(inp->inp_last_outifp) : -1,
tp->t_srtt, tp->t_rxtshift, cheap_rtt, exp_rtt,
* First, the hard conditions to reject subflows
* (e.g., not connected,...)
*/
- if (mpts == ignore || inp->inp_last_outifp == NULL) {
+ if (inp->inp_last_outifp == NULL) {
continue;
}
* Second Step: Among best and second_best. Choose the one that is
* most appropriate for this particular service-type.
*/
- if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER) {
+ if (mpte->mpte_svctype == MPTCP_SVCTYPE_PURE_HANDOVER) {
+ return mptcp_return_subflow(best);
+ } else if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER) {
/*
* Only handover if Symptoms tells us to do so.
*/
if (!IFNET_IS_CELLULAR(bestinp->inp_last_outifp) &&
- mptcp_is_wifi_unusable(mpte) != 0 && mptcp_subflow_is_bad(mpte, best)) {
+ mptcp_is_wifi_unusable_for_session(mpte) != 0 && mptcp_subflow_is_slow(mpte, best)) {
return mptcp_return_subflow(second_best);
}
/* Adjust with symptoms information */
if (!IFNET_IS_CELLULAR(bestinp->inp_last_outifp) &&
- mptcp_is_wifi_unusable(mpte) != 0) {
+ mptcp_is_wifi_unusable_for_session(mpte) != 0) {
rtt_thresh /= 2;
rto_thresh /= 2;
}
return mptcp_return_subflow(second_best);
}
- if (mptcp_subflow_is_bad(mpte, best) &&
+ if (mptcp_subflow_is_slow(mpte, best) &&
secondtp->t_rxtshift == 0) {
return mptcp_return_subflow(second_best);
}
* has some space in the congestion-window.
*/
return mptcp_return_subflow(best);
- } else if (mpte->mpte_svctype == MPTCP_SVCTYPE_AGGREGATE) {
+ } else if (mpte->mpte_svctype >= MPTCP_SVCTYPE_AGGREGATE) {
struct mptsub *tmp;
/*
void
mptcp_close_fsm(struct mptcb *mp_tp, uint32_t event)
{
- mpte_lock_assert_held(mp_tp->mpt_mpte);
+ struct socket *mp_so = mptetoso(mp_tp->mpt_mpte);
+
+ socket_lock_assert_owned(mp_so);
+
mptcp_state_t old_state = mp_tp->mpt_state;
DTRACE_MPTCP2(state__change, struct mptcb *, mp_tp,
uint16_t csum)
{
if (mdss_data_len == 0) {
- mptcplog((LOG_INFO, "%s: Infinite Mapping.\n", __func__),
- MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_LOG);
+ os_log_error(mptcp_log_handle, "%s - %lx: Infinite Mapping.\n",
+ __func__, (unsigned long)VM_KERNEL_ADDRPERM(mp_tp->mpt_mpte));
if ((mp_tp->mpt_flags & MPTCPF_CHECKSUM) && (csum != 0)) {
- mptcplog((LOG_ERR, "%s: Bad checksum %x \n", __func__,
- csum), MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_ERR);
+ os_log_error(mptcp_log_handle, "%s - %lx: Bad checksum %x \n",
+ __func__, (unsigned long)VM_KERNEL_ADDRPERM(mp_tp->mpt_mpte), csum);
}
mptcp_notify_mpfail(tp->t_inpcb->inp_socket);
return;
}
- mptcplog((LOG_DEBUG,
- "%s: seqn = %u len = %u full = %u rcvnxt = %u \n", __func__,
- seqn, mdss_data_len, (uint32_t)full_dsn, (uint32_t)mp_tp->mpt_rcvnxt),
- MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_VERBOSE);
mptcp_notify_mpready(tp->t_inpcb->inp_socket);
/* unacceptable DSS option, fallback to TCP */
if (m->m_pkthdr.len > ((int) datalen + hdrlen)) {
- mptcplog((LOG_ERR, "%s: mbuf len %d, MPTCP expected %d",
- __func__, m->m_pkthdr.len, datalen),
- MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_LOG);
+ os_log_error(mptcp_log_handle, "%s - %lx: mbuf len %d, MPTCP expected %d",
+ __func__, (unsigned long)VM_KERNEL_ADDRPERM(tptomptp(tp)->mpt_mpte), m->m_pkthdr.len, datalen);
} else {
return 0;
}
return 0;
}
-/*
- * MPTCP Checksum support
- * The checksum is calculated whenever the MPTCP DSS option is included
- * in the TCP packet. The checksum includes the sum of the MPTCP psuedo
- * header and the actual data indicated by the length specified in the
- * DSS option.
- */
-
-int
-mptcp_validate_csum(struct tcpcb *tp, struct mbuf *m, uint64_t dsn,
- uint32_t sseq, uint16_t dlen, uint16_t csum, uint16_t dfin)
-{
- uint16_t mptcp_csum;
-
- mptcp_csum = mptcp_input_csum(tp, m, dsn, sseq, dlen, csum, dfin);
- if (mptcp_csum) {
- tp->t_mpflags |= TMPF_SND_MPFAIL;
- mptcp_notify_mpfail(tp->t_inpcb->inp_socket);
- m_freem(m);
- tcpstat.tcps_mp_badcsum++;
- return -1;
- }
- return 0;
-}
-
static uint16_t
mptcp_input_csum(struct tcpcb *tp, struct mbuf *m, uint64_t dsn, uint32_t sseq,
- uint16_t dlen, uint16_t csum, uint16_t dfin)
+ uint16_t dlen, uint16_t csum, int dfin)
{
struct mptcb *mp_tp = tptomptp(tp);
- uint16_t real_len = dlen - dfin;
+ int real_len = dlen - dfin;
uint32_t sum = 0;
+ VERIFY(real_len >= 0);
+
if (mp_tp == NULL) {
return 0;
}
sum += in_pseudo64(htonll(dsn), htonl(sseq), htons(dlen) + csum);
ADDCARRY(sum);
+
DTRACE_MPTCP3(checksum__result, struct tcpcb *, tp, struct mbuf *, m,
uint32_t, sum);
- mptcplog((LOG_DEBUG, "%s: sum = %x \n", __func__, sum),
- MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_VERBOSE);
return ~sum & 0xffff;
}
-uint32_t
+/*
+ * MPTCP Checksum support
+ * The checksum is calculated whenever the MPTCP DSS option is included
+ * in the TCP packet. The checksum includes the sum of the MPTCP psuedo
+ * header and the actual data indicated by the length specified in the
+ * DSS option.
+ */
+
+int
+mptcp_validate_csum(struct tcpcb *tp, struct mbuf *m, uint64_t dsn,
+ uint32_t sseq, uint16_t dlen, uint16_t csum, int dfin)
+{
+ uint16_t mptcp_csum;
+
+ mptcp_csum = mptcp_input_csum(tp, m, dsn, sseq, dlen, csum, dfin);
+ if (mptcp_csum) {
+ tp->t_mpflags |= TMPF_SND_MPFAIL;
+ mptcp_notify_mpfail(tp->t_inpcb->inp_socket);
+ m_freem(m);
+ tcpstat.tcps_mp_badcsum++;
+ return -1;
+ }
+ return 0;
+}
+
+uint16_t
mptcp_output_csum(struct mbuf *m, uint64_t dss_val, uint32_t sseq, uint16_t dlen)
{
uint32_t sum = 0;
mptcplog((LOG_DEBUG, "%s: sum = %x \n", __func__, sum),
MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
- return sum;
+ return (uint16_t)sum;
}
/*
sowwakeup(mpp->mpp_socket);
}
-
- if (mpp->mpp_flags & MPP_SET_CELLICON) {
- mpp->mpp_flags &= ~MPP_SET_CELLICON;
-
- mptcp_set_cellicon(mpp->mpp_pcbe);
- }
-
- if (mpp->mpp_flags & MPP_UNSET_CELLICON) {
- mpp->mpp_flags &= ~MPP_UNSET_CELLICON;
-
- mptcp_unset_cellicon();
- }
-}
-
-void
-mptcp_ask_for_nat64(struct ifnet *ifp)
-{
- in6_post_msg(ifp, KEV_INET6_REQUEST_NAT64_PREFIX, NULL, NULL);
-
- os_log_info(mptcp_log_handle,
- "%s: asked for NAT64-prefix on %s\n", __func__,
- ifp->if_name);
}
static void
mptcp_reset_itfinfo(struct mpt_itf_info *info)
{
- info->ifindex = 0;
- info->has_v4_conn = 0;
- info->has_v6_conn = 0;
- info->has_nat64_conn = 0;
+ memset(info, 0, sizeof(*info));
}
void
struct mptses *mpte = mptompte(mp);
struct socket *mp_so;
struct mptcb *mp_tp;
- int locked = 0;
uint32_t i, ifindex;
+ struct ifnet *ifp;
+ int locked = 0;
ifindex = interface_index;
VERIFY(ifindex != IFSCOPE_NONE);
return;
}
+ mp_so = mptetoso(mpte);
+
if (action != NECP_CLIENT_CBACTION_INITIAL) {
- mpte_lock(mpte);
+ socket_lock(mp_so, 1);
locked = 1;
/* Check again, because it might have changed while waiting */
}
}
- mpte_lock_assert_held(mpte);
+ socket_lock_assert_owned(mp_so);
mp_tp = mpte->mpte_mptcb;
- mp_so = mptetoso(mpte);
- os_log_info(mptcp_log_handle, "%s, action: %u ifindex %u usecount %u mpt_flags %#x state %u v4 %u v6 %u nat64 %u power %u\n",
- __func__, action, ifindex, mp->mpp_socket->so_usecount, mp_tp->mpt_flags, mp_tp->mpt_state,
+ ifnet_head_lock_shared();
+ ifp = ifindex2ifnet[ifindex];
+ ifnet_head_done();
+
+ os_log(mptcp_log_handle, "%s - %lx: action: %u ifindex %u delegated to %u usecount %u mpt_flags %#x state %u v4 %u v6 %u nat64 %u power %u\n",
+ __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), action, ifindex,
+ ifp && ifp->if_delegated.ifp ? ifp->if_delegated.ifp->if_index : IFSCOPE_NONE,
+ mp->mpp_socket->so_usecount, mp_tp->mpt_flags, mp_tp->mpt_state,
has_v4, has_v6, has_nat64, low_power);
/* No need on fallen back sockets */
} else if (action == NECP_CLIENT_CBACTION_VIABLE ||
action == NECP_CLIENT_CBACTION_INITIAL) {
int found_slot = 0, slot_index = -1;
- struct ifnet *ifp;
-
- ifnet_head_lock_shared();
- ifp = ifindex2ifnet[ifindex];
- ifnet_head_done();
+ struct sockaddr *dst;
if (ifp == NULL) {
goto out;
}
+ if (IFNET_IS_COMPANION_LINK(ifp)) {
+ goto out;
+ }
+
if (IFNET_IS_EXPENSIVE(ifp) &&
(mp_so->so_restrictions & SO_RESTRICT_DENY_EXPENSIVE)) {
goto out;
}
+ if (IFNET_IS_CONSTRAINED(ifp) &&
+ (mp_so->so_restrictions & SO_RESTRICT_DENY_CONSTRAINED)) {
+ goto out;
+ }
+
if (IFNET_IS_CELLULAR(ifp) &&
(mp_so->so_restrictions & SO_RESTRICT_DENY_CELLULAR)) {
goto out;
}
}
- if ((mpte->mpte_dst.sa_family == AF_INET || mpte->mpte_dst.sa_family == 0) &&
- !has_nat64 && !has_v4) {
+ dst = mptcp_get_session_dst(mpte, has_v6, has_v4);
+ if (dst && dst->sa_family == AF_INET &&
+ has_v6 && !has_nat64 && !has_v4) {
if (found_slot) {
+ mpte->mpte_itfinfo[slot_index].ifindex = ifindex;
mpte->mpte_itfinfo[slot_index].has_v4_conn = has_v4;
mpte->mpte_itfinfo[slot_index].has_v6_conn = has_v6;
mpte->mpte_itfinfo[slot_index].has_nat64_conn = has_nat64;
}
- mptcp_ask_for_nat64(ifp);
goto out;
}
struct mpt_itf_info *info = _MALLOC(sizeof(*info) * new_size, M_TEMP, M_ZERO);
if (info == NULL) {
- os_log_error(mptcp_log_handle, "%s malloc failed for %u\n",
- __func__, new_size);
+ os_log_error(mptcp_log_handle, "%s - %lx: malloc failed for %u\n",
+ __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), new_size);
goto out;
}
out:
if (locked) {
- mpte_unlock(mpte);
+ socket_unlock(mp_so, 1);
}
}
struct mptses *mpte = mpsotompte(mp_so);
uint32_t i;
- mpte_lock_assert_held(mpte);
+ socket_lock_assert_owned(mp_so);
ifnet_head_lock_shared();
info->ifindex = IFSCOPE_NONE;
}
+ if (IFNET_IS_CONSTRAINED(ifp) &&
+ (mp_so->so_restrictions & SO_RESTRICT_DENY_CONSTRAINED)) {
+ info->ifindex = IFSCOPE_NONE;
+ }
+
if (IFNET_IS_CELLULAR(ifp) &&
(mp_so->so_restrictions & SO_RESTRICT_DENY_CELLULAR)) {
info->ifindex = IFSCOPE_NONE;