* until the next round of events processing.
*/
typedef enum {
- MPTS_EVRET_DELETE = 1, /* delete this subflow */
- MPTS_EVRET_OK = 2, /* OK */
- MPTS_EVRET_CONNECT_PENDING = 3, /* resume pended connects */
- MPTS_EVRET_DISCONNECT_FALLBACK = 4, /* abort all but preferred */
+ MPTS_EVRET_DELETE = 1, /* delete this subflow */
+ MPTS_EVRET_OK = 2, /* OK */
+ MPTS_EVRET_CONNECT_PENDING = 3, /* resume pended connects */
+ MPTS_EVRET_DISCONNECT_FALLBACK = 4, /* abort all but preferred */
} ev_ret_t;
static ev_ret_t mptcp_subflow_events(struct mptses *, struct mptsub *, uint64_t *);
static void mptcp_do_sha1(mptcp_key_t *, char *);
static void mptcp_init_local_parms(struct mptses *);
-static unsigned int mptsub_zone_size; /* size of mptsub */
-static struct zone *mptsub_zone; /* zone for mptsub */
+static unsigned int mptsub_zone_size; /* size of mptsub */
+static struct zone *mptsub_zone; /* zone for mptsub */
-static unsigned int mptopt_zone_size; /* size of mptopt */
-static struct zone *mptopt_zone; /* zone for mptopt */
+static unsigned int mptopt_zone_size; /* size of mptopt */
+static struct zone *mptopt_zone; /* zone for mptopt */
-static unsigned int mpt_subauth_entry_size; /* size of subf auth entry */
-static struct zone *mpt_subauth_zone; /* zone of subf auth entry */
+static unsigned int mpt_subauth_entry_size; /* size of subf auth entry */
+static struct zone *mpt_subauth_zone; /* zone of subf auth entry */
struct mppcbinfo mtcbinfo;
-#define MPTCP_SUBFLOW_WRITELEN (8 * 1024) /* bytes to write each time */
-#define MPTCP_SUBFLOW_READLEN (8 * 1024) /* bytes to read each time */
+#define MPTCP_SUBFLOW_WRITELEN (8 * 1024) /* bytes to write each time */
+#define MPTCP_SUBFLOW_READLEN (8 * 1024) /* bytes to read each time */
SYSCTL_DECL(_net_inet);
-SYSCTL_NODE(_net_inet, OID_AUTO, mptcp, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "MPTCP");
+SYSCTL_NODE(_net_inet, OID_AUTO, mptcp, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "MPTCP");
-uint32_t mptcp_dbg_area = 31; /* more noise if greater than 1 */
-SYSCTL_UINT(_net_inet_mptcp, OID_AUTO, dbg_area, CTLFLAG_RW|CTLFLAG_LOCKED,
- &mptcp_dbg_area, 0, "MPTCP debug area");
+uint32_t mptcp_dbg_area = 31; /* more noise if greater than 1 */
+SYSCTL_UINT(_net_inet_mptcp, OID_AUTO, dbg_area, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &mptcp_dbg_area, 0, "MPTCP debug area");
uint32_t mptcp_dbg_level = 1;
SYSCTL_INT(_net_inet_mptcp, OID_AUTO, dbg_level, CTLFLAG_RW | CTLFLAG_LOCKED,
- &mptcp_dbg_level, 0, "MPTCP debug level");
+ &mptcp_dbg_level, 0, "MPTCP debug level");
-SYSCTL_UINT(_net_inet_mptcp, OID_AUTO, pcbcount, CTLFLAG_RD|CTLFLAG_LOCKED,
- &mtcbinfo.mppi_count, 0, "Number of active PCBs");
+SYSCTL_UINT(_net_inet_mptcp, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &mtcbinfo.mppi_count, 0, "Number of active PCBs");
static int mptcp_alternate_port = 0;
SYSCTL_INT(_net_inet_mptcp, OID_AUTO, alternate_port, CTLFLAG_RW | CTLFLAG_LOCKED,
- &mptcp_alternate_port, 0, "Set alternate port for MPTCP connections");
+ &mptcp_alternate_port, 0, "Set alternate port for MPTCP connections");
static struct protosw mptcp_subflow_protosw;
static struct pr_usrreqs mptcp_subflow_usrreqs;
static struct pr_usrreqs mptcp_subflow_usrreqs6;
#endif /* INET6 */
-static uint8_t mptcp_create_subflows_scheduled;
+static uint8_t mptcp_create_subflows_scheduled;
typedef struct mptcp_subflow_event_entry {
uint64_t sofilt_hint_mask;
ev_ret_t (*sofilt_hint_ev_hdlr)(
- struct mptses *mpte,
- struct mptsub *mpts,
- uint64_t *p_mpsofilt_hint,
- uint64_t event);
+ struct mptses *mpte,
+ struct mptsub *mpts,
+ uint64_t *p_mpsofilt_hint,
+ uint64_t event);
} mptsub_ev_entry_t;
static uint8_t mptcp_cellicon_is_set;
static uint32_t mptcp_last_cellicon_set;
-#define MPTCP_CELLICON_TOGGLE_RATE (5 * TCP_RETRANSHZ) /* Only toggle every 5 seconds */
+#define MPTCP_CELLICON_TOGGLE_RATE (5 * TCP_RETRANSHZ) /* Only toggle every 5 seconds */
/*
* XXX The order of the event handlers below is really
* really important. Think twice before changing it.
*/
-static mptsub_ev_entry_t mpsub_ev_entry_tbl [] = {
+static mptsub_ev_entry_t mpsub_ev_entry_tbl[] = {
{
.sofilt_hint_mask = SO_FILT_HINT_MPCANTRCVMORE,
- .sofilt_hint_ev_hdlr = mptcp_subflow_mpcantrcvmore_ev,
+ .sofilt_hint_ev_hdlr = mptcp_subflow_mpcantrcvmore_ev,
},
{
.sofilt_hint_mask = SO_FILT_HINT_MPFAILOVER,
struct ip6protosw *prp6;
#endif /* INET6 */
- VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED);
+ VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
/* do this only once */
- if (mptcp_initialized)
+ if (mptcp_initialized) {
return;
+ }
mptcp_initialized = 1;
/*
*/
prp = pffindproto_locked(PF_INET, IPPROTO_TCP, SOCK_STREAM);
VERIFY(prp != NULL);
- bcopy(prp, &mptcp_subflow_protosw, sizeof (*prp));
+ bcopy(prp, &mptcp_subflow_protosw, sizeof(*prp));
bcopy(prp->pr_usrreqs, &mptcp_subflow_usrreqs,
- sizeof (mptcp_subflow_usrreqs));
+ sizeof(mptcp_subflow_usrreqs));
mptcp_subflow_protosw.pr_entry.tqe_next = NULL;
mptcp_subflow_protosw.pr_entry.tqe_prev = NULL;
mptcp_subflow_protosw.pr_usrreqs = &mptcp_subflow_usrreqs;
prp6 = (struct ip6protosw *)pffindproto_locked(PF_INET6,
IPPROTO_TCP, SOCK_STREAM);
VERIFY(prp6 != NULL);
- bcopy(prp6, &mptcp_subflow_protosw6, sizeof (*prp6));
+ bcopy(prp6, &mptcp_subflow_protosw6, sizeof(*prp6));
bcopy(prp6->pr_usrreqs, &mptcp_subflow_usrreqs6,
- sizeof (mptcp_subflow_usrreqs6));
+ sizeof(mptcp_subflow_usrreqs6));
mptcp_subflow_protosw6.pr_entry.tqe_next = NULL;
mptcp_subflow_protosw6.pr_entry.tqe_prev = NULL;
mptcp_subflow_protosw6.pr_usrreqs = &mptcp_subflow_usrreqs6;
(struct socket_filter **)(uintptr_t)0xdeadbeefdeadbeef;
#endif /* INET6 */
- bzero(&mtcbinfo, sizeof (mtcbinfo));
+ bzero(&mtcbinfo, sizeof(mtcbinfo));
TAILQ_INIT(&mtcbinfo.mppi_pcbs);
- mtcbinfo.mppi_size = sizeof (struct mpp_mtp);
+ mtcbinfo.mppi_size = sizeof(struct mpp_mtp);
if ((mtcbinfo.mppi_zone = zinit(mtcbinfo.mppi_size,
1024 * mtcbinfo.mppi_size, 8192, "mptcb")) == NULL) {
panic("%s: unable to allocate MPTCP PCB zone\n", __func__);
/* attach to MP domain for garbage collection to take place */
mp_pcbinfo_attach(&mtcbinfo);
- mptsub_zone_size = sizeof (struct mptsub);
+ mptsub_zone_size = sizeof(struct mptsub);
if ((mptsub_zone = zinit(mptsub_zone_size, 1024 * mptsub_zone_size,
8192, "mptsub")) == NULL) {
panic("%s: unable to allocate MPTCP subflow zone\n", __func__);
zone_change(mptsub_zone, Z_CALLERACCT, FALSE);
zone_change(mptsub_zone, Z_EXPAND, TRUE);
- mptopt_zone_size = sizeof (struct mptopt);
+ mptopt_zone_size = sizeof(struct mptopt);
if ((mptopt_zone = zinit(mptopt_zone_size, 128 * mptopt_zone_size,
1024, "mptopt")) == NULL) {
panic("%s: unable to allocate MPTCP option zone\n", __func__);
zone_change(mptopt_zone, Z_CALLERACCT, FALSE);
zone_change(mptopt_zone, Z_EXPAND, TRUE);
- mpt_subauth_entry_size = sizeof (struct mptcp_subf_auth_entry);
+ mpt_subauth_entry_size = sizeof(struct mptcp_subf_auth_entry);
if ((mpt_subauth_zone = zinit(mpt_subauth_entry_size,
1024 * mpt_subauth_entry_size, 8192, "mptauth")) == NULL) {
panic("%s: unable to allocate MPTCP address auth zone \n",
if (ifp == NULL) {
mptcplog((LOG_ERR, "%s: no ifp on subflow\n", __func__),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
- return (-1);
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ return -1;
}
for (i = 0; i < MPTCP_ITFSTATS_SIZE; i++) {
if (stats[i].ifindex == IFSCOPE_NONE) {
- if (index < 0)
+ if (index < 0) {
index = i;
+ }
continue;
}
if (stats[i].ifindex == ifp->if_index) {
index = i;
- return (index);
+ return index;
}
}
if (index != -1) {
stats[index].ifindex = ifp->if_index;
- if (stats[index].is_expensive == 0)
+ if (stats[index].is_expensive == 0) {
stats[index].is_expensive = IFNET_IS_CELLULAR(ifp);
+ }
}
- return (index);
+ return index;
}
void
index = mptcp_get_statsindex(mpte->mpte_itfstats, mpts);
- if (index != -1)
+ if (index != -1) {
mpte->mpte_itfstats[index].switches++;
+ }
}
/*
__IGNORE_WCASTALIGN(mp_tp = &((struct mpp_mtp *)mpp)->mtcb);
/* MPTCP Multipath PCB Extension */
- bzero(mpte, sizeof (*mpte));
+ bzero(mpte, sizeof(*mpte));
VERIFY(mpp->mpp_pcbe == NULL);
mpp->mpp_pcbe = mpte;
mpte->mpte_mppcb = mpp;
mpte->mpte_itfinfo = &mpte->_mpte_itfinfo[0];
mpte->mpte_itfinfo_size = MPTE_ITFINFO_SIZE;
- if (mptcp_alternate_port)
+ if (mptcp_alternate_port) {
mpte->mpte_alternate_port = htons(mptcp_alternate_port);
+ }
/* MPTCP Protocol Control Block */
- bzero(mp_tp, sizeof (*mp_tp));
+ bzero(mp_tp, sizeof(*mp_tp));
mp_tp->mpt_mpte = mpte;
mp_tp->mpt_state = MPTCPS_CLOSED;
DTRACE_MPTCP1(session__create, struct mppcb *, mpp);
- return (0);
+ return 0;
}
static void
mptcpstats_get_bytes(struct mptses *mpte, boolean_t initial_cell,
- uint64_t *cellbytes, uint64_t *allbytes)
+ uint64_t *cellbytes, uint64_t *allbytes)
{
int64_t mycellbytes = 0;
uint64_t myallbytes = 0;
if (mycellbytes < 0) {
mptcplog((LOG_ERR, "%s cellbytes is %d\n", __func__, mycellbytes),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
*cellbytes = 0;
*allbytes = 0;
} else {
if (cell && mpte->mpte_handshake_success) {
tcpstat.tcps_mptcp_fp_handover_success_cell++;
- if (mpte->mpte_used_wifi)
+ if (mpte->mpte_used_wifi) {
tcpstat.tcps_mptcp_handover_wifi_from_cell++;
+ }
} else if (mpte->mpte_handshake_success) {
tcpstat.tcps_mptcp_fp_handover_success_wifi++;
- if (mpte->mpte_used_cell)
+ if (mpte->mpte_used_cell) {
tcpstat.tcps_mptcp_handover_cell_from_wifi++;
+ }
}
} else {
tcpstat.tcps_mptcp_handover_attempt++;
if (cell && mpte->mpte_handshake_success) {
tcpstat.tcps_mptcp_handover_success_cell++;
- if (mpte->mpte_used_wifi)
+ if (mpte->mpte_used_wifi) {
tcpstat.tcps_mptcp_handover_wifi_from_cell++;
+ }
} else if (mpte->mpte_handshake_success) {
tcpstat.tcps_mptcp_handover_success_wifi++;
- if (mpte->mpte_used_cell)
+ if (mpte->mpte_used_cell) {
tcpstat.tcps_mptcp_handover_cell_from_wifi++;
+ }
}
}
if (mpte->mpte_handshake_success) {
tcpstat.tcps_mptcp_fp_interactive_success++;
- if (!cell && mpte->mpte_used_cell)
+ if (!cell && mpte->mpte_used_cell) {
tcpstat.tcps_mptcp_interactive_cell_from_wifi++;
+ }
}
} else {
tcpstat.tcps_mptcp_interactive_attempt++;
if (mpte->mpte_handshake_success) {
tcpstat.tcps_mptcp_interactive_success++;
- if (!cell && mpte->mpte_used_cell)
+ if (!cell && mpte->mpte_used_cell) {
tcpstat.tcps_mptcp_interactive_cell_from_wifi++;
+ }
}
}
if (mpte->mpte_flags & MPTE_FIRSTPARTY) {
tcpstat.tcps_mptcp_fp_aggregate_attempt++;
- if (mpte->mpte_handshake_success)
+ if (mpte->mpte_handshake_success) {
tcpstat.tcps_mptcp_fp_aggregate_success++;
+ }
} else {
tcpstat.tcps_mptcp_aggregate_attempt++;
break;
}
- if (cell && mpte->mpte_handshake_success && mpte->mpte_used_wifi)
+ if (cell && mpte->mpte_handshake_success && mpte->mpte_used_wifi) {
tcpstat.tcps_mptcp_back_to_wifi++;
+ }
+
+ if (mpte->mpte_triggered_cell) {
+ tcpstat.tcps_mptcp_triggered_cell++;
+ }
}
/*
{
struct mptcb *mp_tp;
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
+ mpte_lock_assert_held(mpte); /* same as MP socket lock */
mp_tp = mpte->mpte_mptcb;
VERIFY(mp_tp != NULL);
mptcp_flush_sopts(mpte);
VERIFY(TAILQ_EMPTY(&mpte->mpte_subflows) && mpte->mpte_numflows == 0);
- if (mpte->mpte_itfinfo_size > MPTE_ITFINFO_SIZE)
+ if (mpte->mpte_itfinfo_size > MPTE_ITFINFO_SIZE) {
_FREE(mpte->mpte_itfinfo, M_TEMP);
+ }
mpte->mpte_itfinfo = NULL;
static boolean_t
mptcp_ok_to_create_subflows(struct mptcb *mp_tp)
{
- return (mp_tp->mpt_state >= MPTCPS_ESTABLISHED &&
- mp_tp->mpt_state < MPTCPS_TIME_WAIT &&
- !(mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP));
+ return mp_tp->mpt_state >= MPTCPS_ESTABLISHED &&
+ mp_tp->mpt_state < MPTCPS_FIN_WAIT_1 &&
+ !(mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP);
}
static int
{
static const struct in6_addr well_known_prefix = {
.__u6_addr.__u6_addr8 = {0x00, 0x64, 0xff, 0x9b, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00},
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00},
};
char buf[MAX_IPv6_STR_LEN];
char *ptrv4 = (char *)addrv4;
char *ptr = (char *)addr;
- if (IN_ZERONET(addrv4->s_addr) || // 0.0.0.0/8 Source hosts on local network
- IN_LOOPBACK(addrv4->s_addr) || // 127.0.0.0/8 Loopback
- IN_LINKLOCAL(addrv4->s_addr) || // 169.254.0.0/16 Link Local
- IN_DS_LITE(addrv4->s_addr) || // 192.0.0.0/29 DS-Lite
- IN_6TO4_RELAY_ANYCAST(addrv4->s_addr) || // 192.88.99.0/24 6to4 Relay Anycast
- IN_MULTICAST(addrv4->s_addr) || // 224.0.0.0/4 Multicast
+ if (IN_ZERONET(ntohl(addrv4->s_addr)) || // 0.0.0.0/8 Source hosts on local network
+ IN_LOOPBACK(ntohl(addrv4->s_addr)) || // 127.0.0.0/8 Loopback
+ IN_LINKLOCAL(ntohl(addrv4->s_addr)) || // 169.254.0.0/16 Link Local
+ IN_DS_LITE(ntohl(addrv4->s_addr)) || // 192.0.0.0/29 DS-Lite
+ IN_6TO4_RELAY_ANYCAST(ntohl(addrv4->s_addr)) || // 192.88.99.0/24 6to4 Relay Anycast
+ IN_MULTICAST(ntohl(addrv4->s_addr)) || // 224.0.0.0/4 Multicast
INADDR_BROADCAST == addrv4->s_addr) { // 255.255.255.255/32 Limited Broadcast
- return (-1);
+ return -1;
}
/* Check for the well-known prefix */
if (len == NAT64_PREFIX_LEN_96 &&
IN6_ARE_ADDR_EQUAL(addr, &well_known_prefix)) {
- if (IN_PRIVATE(addrv4->s_addr) || // 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16 Private-Use
- IN_SHARED_ADDRESS_SPACE(addrv4->s_addr)) // 100.64.0.0/10 Shared Address Space
- return (-1);
+ if (IN_PRIVATE(ntohl(addrv4->s_addr)) || // 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16 Private-Use
+ IN_SHARED_ADDRESS_SPACE(ntohl(addrv4->s_addr))) { // 100.64.0.0/10 Shared Address Space
+ return -1;
+ }
}
switch (len) {
- case NAT64_PREFIX_LEN_96:
- memcpy(ptr + 12, ptrv4, 4);
- break;
- case NAT64_PREFIX_LEN_64:
- memcpy(ptr + 9, ptrv4, 4);
- break;
- case NAT64_PREFIX_LEN_56:
- memcpy(ptr + 7, ptrv4, 1);
- memcpy(ptr + 9, ptrv4 + 1, 3);
- break;
- case NAT64_PREFIX_LEN_48:
- memcpy(ptr + 6, ptrv4, 2);
- memcpy(ptr + 9, ptrv4 + 2, 2);
- break;
- case NAT64_PREFIX_LEN_40:
- memcpy(ptr + 5, ptrv4, 3);
- memcpy(ptr + 9, ptrv4 + 3, 1);
- break;
- case NAT64_PREFIX_LEN_32:
- memcpy(ptr + 4, ptrv4, 4);
- break;
- default:
- panic("NAT64-prefix len is wrong: %u\n", len);
+ case NAT64_PREFIX_LEN_96:
+ memcpy(ptr + 12, ptrv4, 4);
+ break;
+ case NAT64_PREFIX_LEN_64:
+ memcpy(ptr + 9, ptrv4, 4);
+ break;
+ case NAT64_PREFIX_LEN_56:
+ memcpy(ptr + 7, ptrv4, 1);
+ memcpy(ptr + 9, ptrv4 + 1, 3);
+ break;
+ case NAT64_PREFIX_LEN_48:
+ memcpy(ptr + 6, ptrv4, 2);
+ memcpy(ptr + 9, ptrv4 + 2, 2);
+ break;
+ case NAT64_PREFIX_LEN_40:
+ memcpy(ptr + 5, ptrv4, 3);
+ memcpy(ptr + 9, ptrv4 + 3, 1);
+ break;
+ case NAT64_PREFIX_LEN_32:
+ memcpy(ptr + 4, ptrv4, 4);
+ break;
+ default:
+ panic("NAT64-prefix len is wrong: %u\n", len);
}
os_log_info(mptcp_log_handle, "%s: nat64prefix-len %u synthesized %s\n",
- __func__, len,
- inet_ntop(AF_INET6, (void *)addr, buf, sizeof(buf)));
+ __func__, len,
+ inet_ntop(AF_INET6, (void *)addr, buf, sizeof(buf)));
- return (0);
+ return 0;
}
+static void
+mptcp_trigger_cell_bringup(struct mptses *mpte)
+{
+ struct socket *mp_so = mptetoso(mpte);
+
+ if (!uuid_is_null(mpsotomppcb(mp_so)->necp_client_uuid)) {
+ uuid_string_t uuidstr;
+ int err;
+
+ mpte_unlock(mpte);
+ err = necp_client_assert_bb_radio_manager(mpsotomppcb(mp_so)->necp_client_uuid,
+ TRUE);
+ mpte_lock(mpte);
+
+ if (err == 0) {
+ mpte->mpte_triggered_cell = 1;
+ }
+
+ uuid_unparse_upper(mpsotomppcb(mp_so)->necp_client_uuid, uuidstr);
+ os_log_info(mptcp_log_handle, "%s asked irat to bringup cell for uuid %s, err %d\n",
+ __func__, uuidstr, err);
+ } else {
+ os_log_info(mptcp_log_handle, "%s UUID is already null\n", __func__);
+ }
+}
+
+
void
mptcp_check_subflows_and_add(struct mptses *mpte)
{
struct mptcb *mp_tp = mpte->mpte_mptcb;
+ boolean_t cellular_viable = FALSE;
+ boolean_t want_cellular = TRUE;
uint32_t i;
- if (!mptcp_ok_to_create_subflows(mp_tp))
+ if (!mptcp_ok_to_create_subflows(mp_tp)) {
return;
+ }
for (i = 0; i < mpte->mpte_itfinfo_size; i++) {
struct mpt_itf_info *info;
struct mptsub *mpts;
+ struct ifnet *ifp;
uint32_t ifindex;
int found = 0;
info = &mpte->mpte_itfinfo[i];
- if (info->no_mptcp_support)
+ if (info->no_mptcp_support) {
continue;
+ }
ifindex = info->ifindex;
- if (ifindex == IFSCOPE_NONE)
+ if (ifindex == IFSCOPE_NONE) {
+ continue;
+ }
+
+ ifnet_head_lock_shared();
+ ifp = ifindex2ifnet[ifindex];
+ ifnet_head_done();
+
+ if (ifp == NULL) {
continue;
+ }
+
+ if (IFNET_IS_CELLULAR(ifp)) {
+ cellular_viable = TRUE;
+ }
TAILQ_FOREACH(mpts, &mpte->mpte_subflows, mpts_entry) {
- const struct ifnet *ifp = sotoinpcb(mpts->mpts_socket)->inp_last_outifp;
+ const struct ifnet *subifp = sotoinpcb(mpts->mpts_socket)->inp_last_outifp;
- if (ifp == NULL)
+ if (subifp == NULL) {
continue;
-
- if (ifp->if_index == ifindex &&
- !(mpts->mpts_socket->so_state & SS_ISDISCONNECTED) &&
- sototcpcb(mpts->mpts_socket)->t_state != TCPS_CLOSED) {
- /*
- * We found a subflow on this interface.
- * No need to create a new one.
- */
- found = 1;
- break;
}
/*
* good performance.
*/
if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER &&
- !IFNET_IS_CELLULAR(ifp) &&
+ !IFNET_IS_CELLULAR(subifp) &&
!(mpts->mpts_flags & (MPTSF_DISCONNECTING | MPTSF_DISCONNECTED | MPTSF_CLOSE_REQD)) &&
- (!mptcp_is_wifi_unusable() ||
- (sototcpcb(mpts->mpts_socket)->t_rxtshift < mptcp_fail_thresh &&
- mptetoso(mpte)->so_snd.sb_cc))) {
- mptcplog((LOG_DEBUG, "%s handover, wifi state %u rxt %u ifindex %u this %u\n",
- __func__, mptcp_is_wifi_unusable(), sototcpcb(mpts->mpts_socket)->t_rxtshift, ifindex,
- ifp->if_index),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
+ (mptcp_is_wifi_unusable(mpte) == 0 ||
+ (sototcpcb(mpts->mpts_socket)->t_rxtshift < mptcp_fail_thresh * 2 &&
+ ((mpte->mpte_flags & MPTE_FIRSTPARTY) || mptetoso(mpte)->so_snd.sb_cc)))) {
+ os_log_debug(mptcp_log_handle, "%s handover, wifi state %d rxt %u first-party %u sb_cc %u ifindex %u this %u\n",
+ __func__, mptcp_is_wifi_unusable(mpte),
+ sototcpcb(mpts->mpts_socket)->t_rxtshift,
+ !!(mpte->mpte_flags & MPTE_FIRSTPARTY),
+ mptetoso(mpte)->so_snd.sb_cc,
+ ifindex, subifp->if_index);
+ found = 1;
+
+ /* We found a proper subflow on WiFi - no need for cell */
+ want_cellular = FALSE;
+ break;
+ } else {
+ os_log_debug(mptcp_log_handle, "%s svc %u cell %u flags %#x unusable %d rtx %u first %u sbcc %u\n",
+ __func__, mpte->mpte_svctype, IFNET_IS_CELLULAR(subifp), mpts->mpts_flags,
+ mptcp_is_wifi_unusable(mpte), sototcpcb(mpts->mpts_socket)->t_rxtshift,
+ !!(mpte->mpte_flags & MPTE_FIRSTPARTY), mptetoso(mpte)->so_snd.sb_cc);
+ }
+
+ if (subifp->if_index == ifindex &&
+ !(mpts->mpts_socket->so_state & SS_ISDISCONNECTED) &&
+ sototcpcb(mpts->mpts_socket)->t_state != TCPS_CLOSED) {
+ /*
+ * We found a subflow on this interface.
+ * No need to create a new one.
+ */
found = 1;
break;
}
struct sockaddr_in6 nat64pre;
if (mpte->mpte_dst.sa_family == AF_INET &&
- !info->has_v4_conn && info->has_v6_conn) {
+ !info->has_v4_conn && info->has_nat64_conn) {
struct ipv6_prefix nat64prefixes[NAT64_MAX_NUM_PREFIXES];
- struct ifnet *ifp;
int error, j;
bzero(&nat64pre, sizeof(struct sockaddr_in6));
- ifnet_head_lock_shared();
- ifp = ifindex2ifnet[ifindex];
- ifnet_head_done();
-
error = ifnet_get_nat64prefix(ifp, nat64prefixes);
if (error) {
- mptcplog((LOG_ERR, "%s: no NAT64-prefix on itf %s, error %d\n",
- __func__, ifp->if_name, error),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ os_log_error(mptcp_log_handle, "%s: no NAT64-prefix on itf %s, error %d\n",
+ __func__, ifp->if_name, error);
continue;
}
for (j = 0; j < NAT64_MAX_NUM_PREFIXES; j++) {
- if (nat64prefixes[j].prefix_len != 0)
+ if (nat64prefixes[j].prefix_len != 0) {
break;
+ }
}
VERIFY(j < NAT64_MAX_NUM_PREFIXES);
error = mptcp_synthesize_nat64(&nat64prefixes[j].ipv6_prefix,
- nat64prefixes[j].prefix_len,
- &mpte->__mpte_dst_v4.sin_addr);
+ nat64prefixes[j].prefix_len,
+ &mpte->__mpte_dst_v4.sin_addr);
if (error != 0) {
- mptcplog((LOG_INFO, "%s: cannot synthesize this addr\n", __func__),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_LOG);
+ os_log_info(mptcp_log_handle, "%s: cannot synthesize this addr\n",
+ __func__);
continue;
}
memcpy(&nat64pre.sin6_addr,
- &nat64prefixes[j].ipv6_prefix,
- sizeof(nat64pre.sin6_addr));
+ &nat64prefixes[j].ipv6_prefix,
+ sizeof(nat64pre.sin6_addr));
nat64pre.sin6_len = sizeof(struct sockaddr_in6);
nat64pre.sin6_family = AF_INET6;
nat64pre.sin6_port = mpte->__mpte_dst_v6.sin6_port;
dst = (struct sockaddr *)&mpte->mpte_dst_v4_nat64;
}
- if (dst->sa_family == AF_INET && !info->has_v4_conn)
+ if (dst->sa_family == AF_INET && !info->has_v4_conn) {
continue;
- if (dst->sa_family == AF_INET6 && !info->has_v6_conn)
+ }
+ if (dst->sa_family == AF_INET6 && !info->has_v6_conn) {
continue;
+ }
mptcp_subflow_add(mpte, NULL, dst, ifindex, NULL);
}
}
+
+ if (!cellular_viable && want_cellular) {
+ /* Trigger Cell Bringup */
+ mptcp_trigger_cell_bringup(mpte);
+ }
}
/*
{
struct mptsub *mpts, *tmpts;
int found_working_subflow = 0, removed_some = 0;
- int wifi_unusable = mptcp_is_wifi_unusable();
+ int wifi_unusable = mptcp_is_wifi_unusable(mpte);
- if (mpte->mpte_svctype != MPTCP_SVCTYPE_HANDOVER)
+ if (mpte->mpte_svctype != MPTCP_SVCTYPE_HANDOVER) {
return;
+ }
/*
* Look for a subflow that is on a non-cellular interface
struct socket *so;
struct tcpcb *tp;
- if (ifp == NULL || IFNET_IS_CELLULAR(ifp))
+ if (ifp == NULL || IFNET_IS_CELLULAR(ifp)) {
continue;
+ }
so = mpts->mpts_socket;
tp = sototcpcb(so);
if (!(mpts->mpts_flags & MPTSF_CONNECTED) ||
- tp->t_state != TCPS_ESTABLISHED)
+ tp->t_state != TCPS_ESTABLISHED) {
continue;
+ }
- /* Either this subflow is in good condition while we try to send */
- if (tp->t_rxtshift == 0 && mptetoso(mpte)->so_snd.sb_cc)
+ /* Is this subflow in good condition? */
+ if (tp->t_rxtshift == 0) {
found_working_subflow = 1;
+ }
/* Or WiFi is fine */
- if (!wifi_unusable)
+ if (!wifi_unusable) {
found_working_subflow = 1;
+ }
}
/*
* Couldn't find a working subflow, let's not remove those on a cellular
* interface.
*/
- if (!found_working_subflow)
+ if (!found_working_subflow) {
return;
+ }
TAILQ_FOREACH_SAFE(mpts, &mpte->mpte_subflows, mpts_entry, tmpts) {
const struct ifnet *ifp = sotoinpcb(mpts->mpts_socket)->inp_last_outifp;
/* Only remove cellular subflows */
- if (ifp == NULL || !IFNET_IS_CELLULAR(ifp))
+ if (ifp == NULL || !IFNET_IS_CELLULAR(ifp)) {
continue;
+ }
soevent(mpts->mpts_socket, SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST);
removed_some = 1;
}
- if (removed_some)
+ if (removed_some) {
mptcp_unset_cellicon();
+ }
}
static void
mpts->mpts_flags &= ~MPTSF_CLOSE_REQD;
soevent(mpts->mpts_socket,
- SO_FILT_HINT_LOCKED | SO_FILT_HINT_NOSRCADDR);
+ SO_FILT_HINT_LOCKED | SO_FILT_HINT_NOSRCADDR);
}
}
}
* Start with clearing, because we might be processing connections
* while a new event comes in.
*/
- if (OSTestAndClear(0x01, &mptcp_create_subflows_scheduled))
+ if (OSTestAndClear(0x01, &mptcp_create_subflows_scheduled)) {
mptcplog((LOG_ERR, "%s: bit was already cleared!\n", __func__),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ }
/* Iterate over all MPTCP connections */
struct mptses *mpte;
struct socket *mp_so;
- if (!(mpp->mpp_flags & MPP_CREATE_SUBFLOWS))
+ if (!(mpp->mpp_flags & MPP_CREATE_SUBFLOWS)) {
continue;
+ }
mpp_lock(mpp);
if (!mptcp_ok_to_create_subflows(mp_tp)) {
mptcplog((LOG_DEBUG, "%s: not a good time for subflows, state %u flags %#x",
- __func__, mp_tp->mpt_state, mp_tp->mpt_flags),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
+ __func__, mp_tp->mpt_state, mp_tp->mpt_flags),
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
return;
}
mpp->mpp_flags |= MPP_CREATE_SUBFLOWS;
}
- if (OSTestAndSet(0x01, &mptcp_create_subflows_scheduled))
+ if (OSTestAndSet(0x01, &mptcp_create_subflows_scheduled)) {
return;
+ }
/* Do the call in 100ms to allow NECP to schedule it on all sockets */
- timeout(mptcp_create_subflows, NULL, hz/10);
+ timeout(mptcp_create_subflows, NULL, hz / 10);
}
/*
bzero(mpo, mptopt_zone_size);
}
- return (mpo);
+ return mpo;
}
/*
void
mptcp_sopt_insert(struct mptses *mpte, struct mptopt *mpo)
{
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
- VERIFY(!(mpo->mpo_flags & MPOF_ATTACHED));
+ mpte_lock_assert_held(mpte); /* same as MP socket lock */
mpo->mpo_flags |= MPOF_ATTACHED;
TAILQ_INSERT_TAIL(&mpte->mpte_sopts, mpo, mpo_entry);
}
void
mptcp_sopt_remove(struct mptses *mpte, struct mptopt *mpo)
{
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
+ mpte_lock_assert_held(mpte); /* same as MP socket lock */
VERIFY(mpo->mpo_flags & MPOF_ATTACHED);
mpo->mpo_flags &= ~MPOF_ATTACHED;
TAILQ_REMOVE(&mpte->mpte_sopts, mpo, mpo_entry);
{
struct mptopt *mpo;
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
+ mpte_lock_assert_held(mpte); /* same as MP socket lock */
TAILQ_FOREACH(mpo, &mpte->mpte_sopts, mpo_entry) {
if (mpo->mpo_level == sopt->sopt_level &&
- mpo->mpo_name == sopt->sopt_name)
+ mpo->mpo_name == sopt->sopt_name) {
break;
+ }
}
- VERIFY(mpo == NULL || sopt->sopt_valsize == sizeof (int));
-
- return (mpo);
+ return mpo;
}
/*
{
struct mptsub *mpts = zalloc(mptsub_zone);
- if (mpts == NULL)
- return (NULL);
+ if (mpts == NULL) {
+ return NULL;
+ }
bzero(mpts, mptsub_zone_size);
- return (mpts);
+ return mpts;
}
/*
static void
mptcp_subflow_addref(struct mptsub *mpts)
{
- if (++mpts->mpts_refcnt == 0)
+ if (++mpts->mpts_refcnt == 0) {
panic("%s: mpts %p wraparound refcnt\n", __func__, mpts);
- /* NOTREACHED */
+ }
+ /* NOTREACHED */
}
static void
panic("%s: mpts %p negative refcnt\n", __func__, mpts);
/* NOTREACHED */
}
- if (--mpts->mpts_refcnt > 0)
+ if (--mpts->mpts_refcnt > 0) {
return;
+ }
/* callee will unlock and destroy lock */
mptcp_subflow_free(mpts);
mpts->mpts_mpte = mpte;
mpts->mpts_socket = so;
tp->t_mpsub = mpts;
- mptcp_subflow_addref(mpts); /* for being in MPTCP subflow list */
- mptcp_subflow_addref(mpts); /* for subflow socket */
+ mptcp_subflow_addref(mpts); /* for being in MPTCP subflow list */
+ mptcp_subflow_addref(mpts); /* for subflow socket */
}
static void
mptcp_subflow_necp_cb(void *handle, __unused int action,
- __unused struct necp_client_flow *flow)
+ __unused uint32_t interface_index,
+ uint32_t necp_flags, bool *viable)
{
+ boolean_t low_power = !!(necp_flags & NECP_CLIENT_RESULT_FLAG_INTERFACE_LOW_POWER);
struct inpcb *inp = (struct inpcb *)handle;
struct socket *so = inp->inp_socket;
struct mptsub *mpts;
struct mptses *mpte;
- if (action != NECP_CLIENT_CBACTION_NONVIABLE)
+ if (low_power) {
+ action = NECP_CLIENT_CBACTION_NONVIABLE;
+ }
+
+ if (action != NECP_CLIENT_CBACTION_NONVIABLE) {
return;
+ }
/*
* The socket is being garbage-collected. There is nothing to be done
* here.
*/
- if (so->so_usecount == 0)
+ if (so->so_usecount == 0) {
return;
+ }
socket_lock(so, 1);
/* Check again after we acquired the lock. */
- if (so->so_usecount == 0)
+ if (so->so_usecount == 0) {
goto out;
+ }
mpte = tptomptp(sototcpcb(so))->mpt_mpte;
mpts = sototcpcb(so)->t_mpsub;
- mptcplog((LOG_DEBUG, "%s: Subflow became non-viable", __func__),
- MPTCP_EVENTS_DBG, MPTCP_LOGLVL_VERBOSE);
+ os_log_debug(mptcp_log_handle, "%s Subflow on itf %u became non-viable, power %u",
+ __func__, mpts->mpts_ifscope, low_power);
mpts->mpts_flags |= MPTSF_CLOSE_REQD;
mptcp_sched_create_subflows(mpte);
- if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER)
- flow->viable = 1;
+ if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER && viable != NULL) {
+ *viable = 1;
+ }
out:
socket_unlock(so, 1);
int error;
*so = NULL;
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
+ mpte_lock_assert_held(mpte); /* same as MP socket lock */
mp_so = mptetoso(mpte);
p = proc_find(mp_so->last_pid);
if (p == PROC_NULL) {
mptcplog((LOG_ERR, "%s: Couldn't find proc for pid %u\n", __func__, mp_so->last_pid),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
- return (ESRCH);
+ return ESRCH;
}
/*
*/
mpte_unlock(mpte);
error = socreate_internal(dom, so, SOCK_STREAM, IPPROTO_TCP, p,
- SOCF_ASYNC, PROC_NULL);
+ SOCF_ASYNC, PROC_NULL);
mpte_lock(mpte);
if (error) {
mptcplog((LOG_ERR, "%s: subflow socreate mp_so 0x%llx unable to create subflow socket error %d\n",
- __func__, (u_int64_t)VM_KERNEL_ADDRPERM(mp_so), error),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ __func__, (u_int64_t)VM_KERNEL_ADDRPERM(mp_so), error),
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
proc_rele(p);
mptcp_subflow_free(mpts);
- return (error);
+ return error;
}
/*
(*so)->so_snd.sb_flags |= SB_NOCOMPRESS;
/* Inherit preconnect and TFO data flags */
- if (mp_so->so_flags1 & SOF1_PRECONNECT_DATA)
+ if (mp_so->so_flags1 & SOF1_PRECONNECT_DATA) {
(*so)->so_flags1 |= SOF1_PRECONNECT_DATA;
- if (mp_so->so_flags1 & SOF1_DATA_IDEMPOTENT)
+ }
+ if (mp_so->so_flags1 & SOF1_DATA_IDEMPOTENT) {
(*so)->so_flags1 |= SOF1_DATA_IDEMPOTENT;
+ }
/* Inherit uuid and create the related flow. */
if (!uuid_is_null(mpsotomppcb(mp_so)->necp_client_uuid)) {
mpsotomppcb(mp_so)->necp_client_uuid, sotoinpcb(*so));
mpte_lock(mpte);
- if (error)
+ if (error) {
goto out_err;
+ }
/* Possible state-change during the unlock above */
if (mp_tp->mpt_state >= MPTCPS_TIME_WAIT ||
- (mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP))
+ (mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP)) {
goto out_err;
+ }
uuid_copy(sotoinpcb(*so)->necp_client_uuid, mpsotomppcb(mp_so)->necp_client_uuid);
} else {
}
/* inherit the other socket options */
- bzero(&smpo, sizeof (smpo));
+ bzero(&smpo, sizeof(smpo));
smpo.mpo_flags |= MPOF_SUBFLOW_OK;
smpo.mpo_level = SOL_SOCKET;
smpo.mpo_intval = 1;
/* disable SIGPIPE */
smpo.mpo_name = SO_NOSIGPIPE;
- if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0)
+ if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0) {
goto out_err;
+ }
/* find out if the subflow's source address goes away */
smpo.mpo_name = SO_NOADDRERR;
- if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0)
+ if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0) {
goto out_err;
+ }
/* enable keepalive */
smpo.mpo_name = SO_KEEPALIVE;
- if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0)
+ if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0) {
goto out_err;
+ }
smpo.mpo_level = IPPROTO_TCP;
smpo.mpo_intval = mptcp_subflow_keeptime;
smpo.mpo_name = TCP_KEEPALIVE;
- if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0)
+ if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0) {
goto out_err;
+ }
if (mpte->mpte_mptcb->mpt_state >= MPTCPS_ESTABLISHED) {
/*
smpo.mpo_level = SOL_SOCKET;
smpo.mpo_name = SO_MARK_CELLFALLBACK;
smpo.mpo_intval = 1;
- if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0)
+ if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0) {
goto out_err;
+ }
}
/* replay setsockopt(2) on the subflow sockets for eligible options */
TAILQ_FOREACH_SAFE(mpo, &mpte->mpte_sopts, mpo_entry, tmpo) {
int interim;
- if (!(mpo->mpo_flags & MPOF_SUBFLOW_OK))
+ if (!(mpo->mpo_flags & MPOF_SUBFLOW_OK)) {
continue;
+ }
/*
* Skip those that are handled internally; these options
if (mpo->mpo_level == SOL_SOCKET &&
(mpo->mpo_name == SO_NOSIGPIPE ||
mpo->mpo_name == SO_NOADDRERR ||
- mpo->mpo_name == SO_KEEPALIVE))
+ mpo->mpo_name == SO_KEEPALIVE)) {
continue;
+ }
interim = (mpo->mpo_flags & MPOF_INTERIM);
if (mptcp_subflow_sosetopt(mpte, mpts, mpo) != 0 && interim) {
DTRACE_MPTCP3(subflow__create, struct mptses *, mpte,
int, dom, int, error);
- return (0);
+ return 0;
out_err:
mptcp_subflow_abort(mpts, error);
proc_rele(p);
mptcplog((LOG_ERR, "%s: subflow socreate failed with error %d\n",
- __func__, error), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ __func__, error), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
- return (error);
+ return error;
}
/*
{
struct socket *so = mpts->mpts_socket;
- if (mpts->mpts_flags & MPTSF_CLOSED)
+ if (mpts->mpts_flags & MPTSF_CLOSED) {
return;
+ }
VERIFY(so != NULL);
VERIFY(so->so_flags & SOF_MP_SUBFLOW);
- VERIFY((so->so_state & (SS_NBIO|SS_NOFDREF)) == (SS_NBIO|SS_NOFDREF));
+ VERIFY((so->so_state & (SS_NBIO | SS_NOFDREF)) == (SS_NBIO | SS_NOFDREF));
DTRACE_MPTCP5(subflow__close, struct mptsub *, mpts,
struct socket *, so,
af = mpts->mpts_dst.sa_family;
dst = &mpts->mpts_dst;
- VERIFY((mpts->mpts_flags & (MPTSF_CONNECTING|MPTSF_CONNECTED)) == MPTSF_CONNECTING);
+ VERIFY((mpts->mpts_flags & (MPTSF_CONNECTING | MPTSF_CONNECTED)) == MPTSF_CONNECTING);
VERIFY(mpts->mpts_socket != NULL);
VERIFY(af == AF_INET || af == AF_INET6);
if (af == AF_INET) {
- inet_ntop(af, &SIN(dst)->sin_addr.s_addr, dbuf, sizeof (dbuf));
+ inet_ntop(af, &SIN(dst)->sin_addr.s_addr, dbuf, sizeof(dbuf));
dport = ntohs(SIN(dst)->sin_port);
} else {
- inet_ntop(af, &SIN6(dst)->sin6_addr, dbuf, sizeof (dbuf));
+ inet_ntop(af, &SIN6(dst)->sin6_addr, dbuf, sizeof(dbuf));
dport = ntohs(SIN6(dst)->sin6_port);
}
os_log_info(mptcp_log_handle,
- "%s: ifindex %u dst %s:%d pended %u\n", __func__, mpts->mpts_ifscope,
- dbuf, dport, !!(mpts->mpts_flags & MPTSF_CONNECT_PENDING));
+ "%s: ifindex %u dst %s:%d pended %u\n", __func__, mpts->mpts_ifscope,
+ dbuf, dport, !!(mpts->mpts_flags & MPTSF_CONNECT_PENDING));
p = proc_find(mp_so->last_pid);
if (p == PROC_NULL) {
mptcplog((LOG_ERR, "%s: Couldn't find proc for pid %u\n", __func__, mp_so->last_pid),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
- return (ESRCH);
+ return ESRCH;
}
mpts->mpts_flags &= ~MPTSF_CONNECT_PENDING;
/* Allocate a unique address id per subflow */
mpte->mpte_addrid_last++;
- if (mpte->mpte_addrid_last == 0)
+ if (mpte->mpte_addrid_last == 0) {
mpte->mpte_addrid_last++;
+ }
proc_rele(p);
DTRACE_MPTCP3(subflow__connect, struct mptses *, mpte,
struct mptsub *, mpts, int, error);
- if (error)
+ if (error) {
mptcplog((LOG_ERR, "%s: connectx failed with error %d ifscope %u\n",
- __func__, error, mpts->mpts_ifscope),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ __func__, error, mpts->mpts_ifscope),
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ }
- return (error);
+ return error;
}
/*
* to the MPTCP layer, so we require that the caller passes in the
* expected parameters.
*/
- if (mp == NULL || controlp != NULL)
- return (EINVAL);
+ if (mp == NULL || controlp != NULL) {
+ return EINVAL;
+ }
*mp = NULL;
- if (psa != NULL)
+ if (psa != NULL) {
*psa = NULL;
- if (flagsp != NULL)
- flags = *flagsp &~ MSG_EOR;
- else
+ }
+ if (flagsp != NULL) {
+ flags = *flagsp & ~MSG_EOR;
+ } else {
flags = 0;
+ }
- if (flags & (MSG_PEEK|MSG_OOB|MSG_NEEDSA|MSG_WAITALL|MSG_WAITSTREAM))
- return (EOPNOTSUPP);
+ if (flags & (MSG_PEEK | MSG_OOB | MSG_NEEDSA | MSG_WAITALL | MSG_WAITSTREAM)) {
+ return EOPNOTSUPP;
+ }
- flags |= (MSG_DONTWAIT|MSG_NBIO);
+ flags |= (MSG_DONTWAIT | MSG_NBIO);
/*
* If a recv attempt is made on a previously-accepted socket
* prior to being returned from sodefunct(); there should
* be no data on its receive list, so panic otherwise.
*/
- if (so->so_state & SS_DEFUNCT)
+ if (so->so_state & SS_DEFUNCT) {
sb_empty_assert(sb, __func__);
- return (error);
+ }
+ return error;
}
/*
* socket is closed for real, SOF_MP_SUBFLOW would be cleared.
*/
if ((so->so_state & (SS_NOFDREF | SS_CANTRCVMORE)) ==
- (SS_NOFDREF | SS_CANTRCVMORE) && !(so->so_flags & SOF_MP_SUBFLOW))
- return (0);
+ (SS_NOFDREF | SS_CANTRCVMORE) && !(so->so_flags & SOF_MP_SUBFLOW)) {
+ return 0;
+ }
/*
* For consistency with soreceive() semantics, we need to obey
* SB_LOCK in case some other code path has locked the buffer.
*/
error = sblock(&so->so_rcv, 0);
- if (error != 0)
- return (error);
+ if (error != 0) {
+ return error;
+ }
m = so->so_rcv.sb_mb;
if (m == NULL) {
goto release;
}
- if (!(so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING))) {
+ if (!(so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING))) {
error = ENOTCONN;
goto release;
}
mp = &m->m_next;
so->so_rcv.sb_mb = m = m->m_next;
*mp = NULL;
-
}
if (m != NULL) {
continue;
}
- if (m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN)
+ if (m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN) {
dfin = 1;
+ }
/*
* Check if the full mapping is now present
*/
if ((int)so->so_rcv.sb_cc < dlen - dfin) {
- mptcplog((LOG_INFO, "%s not enough data (%u) need %u\n",
- __func__, so->so_rcv.sb_cc, dlen),
- MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_LOG);
+ mptcplog((LOG_INFO, "%s not enough data (%u) need %u for dsn %u\n",
+ __func__, so->so_rcv.sb_cc, dlen, (uint32_t)dsn),
+ MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_LOG);
- if (*mp0 == NULL)
+ if (*mp0 == NULL) {
error = EWOULDBLOCK;
+ }
goto release;
}
error_out = 1;
error = EIO;
dlen = 0;
- soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST);
+ *mp0 = NULL;
+ mptcp_subflow_abort(sototcpcb(so)->t_mpsub, ECONNABORTED);
break;
}
*mp = NULL;
}
- if (dlen - dfin == 0)
+ if (dlen - dfin == 0) {
dlen = 0;
+ }
VERIFY(dlen <= 0 || m);
}
SB_EMPTY_FIXUP(&so->so_rcv);
}
- if (error_out)
+ if (error_out) {
goto release;
-
+ }
if (mptcp_validate_csum(sototcpcb(so), start, dsn, sseq, orig_dlen, csum, dfin)) {
error = EIO;
DTRACE_MPTCP3(subflow__receive, struct socket *, so,
struct sockbuf *, &so->so_rcv, struct sockbuf *, &so->so_snd);
- if (flagsp != NULL)
+ if (flagsp != NULL) {
*flagsp |= flags;
+ }
release:
sbunlock(&so->so_rcv, TRUE);
- if (proc_held)
+ if (proc_held) {
proc_rele(p);
+ }
- return (error);
-
+ return error;
}
/*
OSIncrementAtomicLong(&p->p_stats->p_ru.ru_msgsnd);
error = sosendcheck(so, NULL, top->m_pkthdr.len, 0, 1, 0, &sblocked, NULL);
- if (error)
+ if (error) {
goto out;
+ }
error = (*so->so_proto->pr_usrreqs->pru_send)(so, 0, top, NULL, NULL, p);
top = NULL;
out:
- if (top != NULL)
+ if (top != NULL) {
m_freem(top);
+ }
- if (proc_held)
+ if (proc_held) {
proc_rele(p);
+ }
soclearfastopen(so);
(int64_t)en_tracing_val);
}
- return (error);
-
+ return error;
}
/*
struct mptsub *mpts = NULL;
int af, error = 0;
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
+ mpte_lock_assert_held(mpte); /* same as MP socket lock */
mp_so = mptetoso(mpte);
mp_tp = mpte->mpte_mptcb;
if (mp_tp->mpt_state >= MPTCPS_CLOSE_WAIT) {
/* If the remote end sends Data FIN, refuse subflow adds */
mptcplog((LOG_ERR, "%s state %u\n", __func__, mp_tp->mpt_state),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
error = ENOTCONN;
goto out_err;
}
mpts = mptcp_subflow_alloc();
if (mpts == NULL) {
mptcplog((LOG_ERR, "%s malloc subflow failed\n", __func__),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
error = ENOMEM;
goto out_err;
}
- if (src != NULL) {
- int len = src->sa_len;
+ if (src) {
+ if (src->sa_family != AF_INET && src->sa_family != AF_INET6) {
+ error = EAFNOSUPPORT;
+ goto out_err;
+ }
+
+ if (src->sa_family == AF_INET &&
+ src->sa_len != sizeof(struct sockaddr_in)) {
+ error = EINVAL;
+ goto out_err;
+ }
+
+ if (src->sa_family == AF_INET6 &&
+ src->sa_len != sizeof(struct sockaddr_in6)) {
+ error = EINVAL;
+ goto out_err;
+ }
- MALLOC(mpts->mpts_src, struct sockaddr *, len, M_SONAME,
+ MALLOC(mpts->mpts_src, struct sockaddr *, src->sa_len, M_SONAME,
M_WAITOK | M_ZERO);
if (mpts->mpts_src == NULL) {
- mptcplog((LOG_ERR, "%s malloc mpts_src failed", __func__),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
error = ENOMEM;
goto out_err;
}
- bcopy(src, mpts->mpts_src, len);
+ bcopy(src, mpts->mpts_src, src->sa_len);
+ }
+
+ if (dst->sa_family != AF_INET && dst->sa_family != AF_INET6) {
+ error = EAFNOSUPPORT;
+ goto out_err;
+ }
+
+ if (dst->sa_family == AF_INET &&
+ dst->sa_len != sizeof(mpts->__mpts_dst_v4)) {
+ error = EINVAL;
+ goto out_err;
+ }
+
+ if (dst->sa_family == AF_INET6 &&
+ dst->sa_len != sizeof(mpts->__mpts_dst_v6)) {
+ error = EINVAL;
+ goto out_err;
}
memcpy(&mpts->mpts_dst, dst, dst->sa_len);
af = mpts->mpts_dst.sa_family;
+ ifnet_head_lock_shared();
+ if ((ifscope > (unsigned)if_index)) {
+ ifnet_head_done();
+ error = ENXIO;
+ goto out_err;
+ }
+ ifnet_head_done();
+
mpts->mpts_ifscope = ifscope;
/* create the subflow socket */
- if ((error = mptcp_subflow_socreate(mpte, mpts, af, &so)) != 0)
+ if ((error = mptcp_subflow_socreate(mpte, mpts, af, &so)) != 0) {
/*
* Returning (error) and not cleaning up, because up to here
* all we did is creating mpts.
* And the contract is that the call to mptcp_subflow_socreate,
* moves ownership of mpts to mptcp_subflow_socreate.
*/
- return (error);
+ return error;
+ }
/*
* We may be called from within the kernel. Still need to account this
*/
mpte->mpte_connid_last++;
if (mpte->mpte_connid_last == SAE_CONNID_ALL ||
- mpte->mpte_connid_last == SAE_CONNID_ANY)
+ mpte->mpte_connid_last == SAE_CONNID_ANY) {
mpte->mpte_connid_last++;
+ }
mpts->mpts_connid = mpte->mpte_connid_last;
/* Allocate a unique address id per subflow */
mpte->mpte_addrid_last++;
- if (mpte->mpte_addrid_last == 0)
+ if (mpte->mpte_addrid_last == 0) {
mpte->mpte_addrid_last++;
+ }
/* register for subflow socket read/write events */
sock_setupcalls_locked(so, mptcp_subflow_rupcall, mpts, mptcp_subflow_wupcall, mpts, 1);
/* sanity check */
VERIFY(!(mpts->mpts_flags &
- (MPTSF_CONNECTING|MPTSF_CONNECTED|MPTSF_CONNECT_PENDING)));
+ (MPTSF_CONNECTING | MPTSF_CONNECTED | MPTSF_CONNECT_PENDING)));
/*
* Indicate to the TCP subflow whether or not it should establish
soisconnecting(mp_so);
/* If fastopen is requested, set state in mpts */
- if (so->so_flags1 & SOF1_PRECONNECT_DATA)
+ if (so->so_flags1 & SOF1_PRECONNECT_DATA) {
mpts->mpts_flags |= MPTSF_TFO_REQD;
+ }
} else {
- if (!(mp_tp->mpt_flags & MPTCPF_JOIN_READY))
+ if (!(mp_tp->mpt_flags & MPTCPF_JOIN_READY)) {
mpts->mpts_flags |= MPTSF_CONNECT_PENDING;
+ }
}
mpts->mpts_flags |= MPTSF_CONNECTING;
inet_ntop(af, ((af == AF_INET) ?
(void *)&SIN(&mpts->mpts_dst)->sin_addr.s_addr :
(void *)&SIN6(&mpts->mpts_dst)->sin6_addr),
- dbuf, sizeof (dbuf)), ((af == AF_INET) ?
+ dbuf, sizeof(dbuf)), ((af == AF_INET) ?
ntohs(SIN(&mpts->mpts_dst)->sin_port) :
ntohs(SIN6(&mpts->mpts_dst)->sin6_port)),
mpts->mpts_connid,
}
/* connect right away if first attempt, or if join can be done now */
- if (!(mpts->mpts_flags & MPTSF_CONNECT_PENDING))
+ if (!(mpts->mpts_flags & MPTSF_CONNECT_PENDING)) {
error = mptcp_subflow_soconnectx(mpte, mpts);
+ }
- if (error)
+ if (error) {
goto out_err_close;
+ }
- if (pcid)
+ if (pcid) {
*pcid = mpts->mpts_connid;
+ }
- return (0);
+ return 0;
out_err_close:
mptcp_subflow_abort(mpts, error);
- return (error);
+ return error;
out_err:
- if (mpts)
+ if (mpts) {
mptcp_subflow_free(mpts);
+ }
- return (error);
+ return error;
}
void
struct socket *so = mpts->mpts_socket;
struct tcpcb *tp = sototcpcb(so);
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
+ mpte_lock_assert_held(mpte); /* same as MP socket lock */
VERIFY(mpts->mpts_mpte == mpte);
VERIFY(mpts->mpts_flags & MPTSF_ATTACHED);
VERIFY(mpte->mpte_numflows != 0);
VERIFY(mp_so->so_usecount > 0);
mptcplog((LOG_DEBUG, "%s: mp_so 0x%llx [u=%d,r=%d] cid %d %x error %d\n",
- __func__, (u_int64_t)VM_KERNEL_ADDRPERM(mp_so),
- mp_so->so_usecount, mp_so->so_retaincnt, mpts->mpts_connid,
- mpts->mpts_flags, mp_so->so_error),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
+ __func__, (u_int64_t)VM_KERNEL_ADDRPERM(mp_so),
+ mp_so->so_usecount, mp_so->so_retaincnt, mpts->mpts_connid,
+ mpts->mpts_flags, mp_so->so_error),
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
mptcpstats_update(mpte->mpte_itfstats, mpts);
mpte->mpte_init_rxbytes = sotoinpcb(so)->inp_stat->rxbytes;
atomic_bitclear_32(&mpts->mpts_flags, MPTSF_ATTACHED);
TAILQ_REMOVE(&mpte->mpte_subflows, mpts, mpts_entry);
mpte->mpte_numflows--;
- if (mpte->mpte_active_sub == mpts)
+ if (mpte->mpte_active_sub == mpts) {
mpte->mpte_active_sub = NULL;
+ }
/*
* Drop references held by this subflow socket; there
mptcp_detach_mptcb_from_subf(mpte->mpte_mptcb, so);
- mp_so->so_usecount--; /* for subflow socket */
+ mp_so->so_usecount--; /* for subflow socket */
mpts->mpts_mpte = NULL;
mpts->mpts_socket = NULL;
- mptcp_subflow_remref(mpts); /* for MPTCP subflow list */
- mptcp_subflow_remref(mpts); /* for subflow socket */
+ mptcp_subflow_remref(mpts); /* for MPTCP subflow list */
+ mptcp_subflow_remref(mpts); /* for subflow socket */
so->so_flags &= ~SOF_MP_SUBFLOW;
tp->t_mptcb = NULL;
struct mptcb *mp_tp = mpte->mpte_mptcb;
int send_dfin = 0;
- if (mp_tp->mpt_state > MPTCPS_CLOSE_WAIT)
+ if (mp_tp->mpt_state > MPTCPS_CLOSE_WAIT) {
send_dfin = 1;
+ }
if (!(so->so_state & (SS_ISDISCONNECTING | SS_ISDISCONNECTED)) &&
(so->so_state & SS_ISCONNECTED)) {
__func__, mpts->mpts_connid, send_dfin),
MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
- if (send_dfin)
+ if (send_dfin) {
mptcp_send_dfin(so);
+ }
soshutdownlock(so, SHUT_WR);
}
-
}
static void
struct socket *so = mpts->mpts_socket;
struct tcpcb *tp = sototcpcb(so);
- if (mpts->mpts_flags & MPTSF_DISCONNECTED)
+ if (mpts->mpts_flags & MPTSF_DISCONNECTED) {
return;
+ }
mptcplog((LOG_DEBUG, "%s aborting connection state %u\n", __func__, tp->t_state),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
- if (tp->t_state != TCPS_CLOSED)
+ if (tp->t_state != TCPS_CLOSED) {
tcp_drop(tp, error);
+ }
mptcp_subflow_eupcall1(so, mpts, SO_FILT_HINT_DISCONNECTED);
}
struct mptcb *mp_tp;
int send_dfin = 0;
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
+ mpte_lock_assert_held(mpte); /* same as MP socket lock */
VERIFY(mpts->mpts_mpte == mpte);
VERIFY(mpts->mpts_socket != NULL);
- if (mpts->mpts_flags & (MPTSF_DISCONNECTING|MPTSF_DISCONNECTED))
+ if (mpts->mpts_flags & (MPTSF_DISCONNECTING | MPTSF_DISCONNECTED)) {
return;
+ }
mpts->mpts_flags |= MPTSF_DISCONNECTING;
so = mpts->mpts_socket;
mp_tp = mpte->mpte_mptcb;
- if (mp_tp->mpt_state > MPTCPS_CLOSE_WAIT)
+ if (mp_tp->mpt_state > MPTCPS_CLOSE_WAIT) {
send_dfin = 1;
+ }
if (!(so->so_state & (SS_ISDISCONNECTING | SS_ISDISCONNECTED)) &&
(so->so_state & SS_ISCONNECTED)) {
__func__, mpts->mpts_connid, send_dfin),
MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
- if (send_dfin)
+ if (send_dfin) {
mptcp_send_dfin(so);
+ }
(void) soshutdownlock(so, SHUT_RD);
(void) soshutdownlock(so, SHUT_WR);
(void) sodisconnectlocked(so);
VERIFY(mpte != NULL);
if (mptcp_should_defer_upcall(mpte->mpte_mppcb)) {
- if (!(mpte->mpte_mppcb->mpp_flags & MPP_RUPCALL))
+ if (!(mpte->mpte_mppcb->mpp_flags & MPP_RUPCALL)) {
mpte->mpte_mppcb->mpp_flags |= MPP_SHOULD_RWAKEUP;
+ }
return;
}
mptcp_subflow_input(mpte, mpts);
- mptcp_subflow_remref(mpts); /* ours */
+ mptcp_subflow_remref(mpts); /* ours */
VERIFY(mpts->mpts_socket->so_usecount != 0);
mpts->mpts_socket->so_usecount--;
DTRACE_MPTCP2(subflow__input, struct mptses *, mpte,
struct mptsub *, mpts);
- if (!(mpts->mpts_flags & MPTSF_CONNECTED))
+ if (!(mpts->mpts_flags & MPTSF_CONNECTED)) {
goto out;
+ }
so = mpts->mpts_socket;
/* notify protocol that we drained all the data */
if (error == 0 && m != NULL &&
- (so->so_proto->pr_flags & PR_WANTRCVD) && so->so_pcb != NULL)
+ (so->so_proto->pr_flags & PR_WANTRCVD) && so->so_pcb != NULL) {
(*so->so_proto->pr_usrreqs->pru_rcvd)(so, 0);
+ }
out:
- if (wakeup)
+ if (wakeup) {
mpte->mpte_mppcb->mpp_flags |= MPP_SHOULD_RWAKEUP;
+ }
mptcp_handle_deferred_upcalls(mpte->mpte_mppcb, MPP_INSIDE_INPUT);
}
VERIFY(mpte != NULL);
if (mptcp_should_defer_upcall(mpte->mpte_mppcb)) {
- if (!(mpte->mpte_mppcb->mpp_flags & MPP_WUPCALL))
+ if (!(mpte->mpte_mppcb->mpp_flags & MPP_WUPCALL)) {
mpte->mpte_mppcb->mpp_flags |= MPP_SHOULD_WWAKEUP;
+ }
return;
}
/* Part of the segment is covered, don't reinject here */
if (so_m->m_pkthdr.mp_dsn <= dsn &&
- so_m->m_pkthdr.mp_dsn + so_m->m_pkthdr.mp_rlen > dsn)
+ so_m->m_pkthdr.mp_dsn + so_m->m_pkthdr.mp_rlen > dsn) {
return TRUE;
+ }
so_m = so_m->m_next;
}
VERIFY(!INP_WAIT_FOR_IF_FEEDBACK(sotoinpcb(so)));
VERIFY((mpts->mpts_flags & MPTSF_MP_CAPABLE) ||
- (mpts->mpts_flags & MPTSF_MP_DEGRADED) ||
- (mpts->mpts_flags & MPTSF_TFO_REQD));
+ (mpts->mpts_flags & MPTSF_MP_DEGRADED) ||
+ (mpts->mpts_flags & MPTSF_TFO_REQD));
VERIFY(mptcp_subflow_cwnd_space(mpts->mpts_socket) > 0);
mptcplog((LOG_DEBUG, "%s mpts_flags %#x, mpte_flags %#x cwnd_space %u\n",
- __func__, mpts->mpts_flags, mpte->mpte_flags,
- mptcp_subflow_cwnd_space(so)),
- MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
+ __func__, mpts->mpts_flags, mpte->mpte_flags,
+ mptcp_subflow_cwnd_space(so)),
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
DTRACE_MPTCP2(subflow__output, struct mptses *, mpte,
struct mptsub *, mpts);
* pkt_flags marked with the PKTF_MPTCP flag.
*/
- if (mpte->mpte_reinjectq)
+ if (mpte->mpte_reinjectq) {
sb_mb = mpte->mpte_reinjectq;
- else
+ } else {
sb_mb = mp_so->so_snd.sb_mb;
+ }
if (sb_mb == NULL) {
mptcplog((LOG_ERR, "%s: No data in MPTCP-sendbuffer! smax %u snxt %u suna %u state %u flags %#x\n",
- __func__, (uint32_t)mp_tp->mpt_sndmax, (uint32_t)mp_tp->mpt_sndnxt,
- (uint32_t)mp_tp->mpt_snduna, mp_tp->mpt_state, mp_so->so_flags1),
- MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
+ __func__, (uint32_t)mp_tp->mpt_sndmax, (uint32_t)mp_tp->mpt_sndnxt,
+ (uint32_t)mp_tp->mpt_snduna, mp_tp->mpt_state, mp_so->so_flags1),
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
/* Fix it to prevent looping */
- if (MPTCP_SEQ_LT(mp_tp->mpt_sndnxt, mp_tp->mpt_snduna))
+ if (MPTCP_SEQ_LT(mp_tp->mpt_sndnxt, mp_tp->mpt_snduna)) {
mp_tp->mpt_sndnxt = mp_tp->mpt_snduna;
+ }
goto out;
}
/* First, drop acknowledged data */
if (MPTCP_SEQ_LT(mpt_dsn, mp_tp->mpt_snduna)) {
mptcplog((LOG_ERR, "%s: dropping data, should have been done earlier "
- "dsn %u suna %u reinject? %u\n",
- __func__, (uint32_t)mpt_dsn,
- (uint32_t)mp_tp->mpt_snduna, !!mpte->mpte_reinjectq),
- MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
+ "dsn %u suna %u reinject? %u\n",
+ __func__, (uint32_t)mpt_dsn,
+ (uint32_t)mp_tp->mpt_snduna, !!mpte->mpte_reinjectq),
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
if (mpte->mpte_reinjectq) {
mptcp_clean_reinjectq(mpte);
} else {
/* Check again because of above sbdrop */
if (mp_so->so_snd.sb_mb == NULL && mpte->mpte_reinjectq == NULL) {
mptcplog((LOG_ERR, "%s send-buffer is empty\n", __func__),
- MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
goto out;
}
wakeup = 1;
mptcplog((LOG_ERR, "%s: dropping data in degraded mode, should have been done earlier dsn %u sndnxt %u suna %u\n",
- __func__, (uint32_t)mpt_dsn, (uint32_t)mp_tp->mpt_sndnxt, (uint32_t)mp_tp->mpt_snduna),
- MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
+ __func__, (uint32_t)mpt_dsn, (uint32_t)mp_tp->mpt_sndnxt, (uint32_t)mp_tp->mpt_snduna),
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
}
}
* Adjust the top level notion of next byte used for retransmissions
* and sending FINs.
*/
- if (MPTCP_SEQ_LT(mp_tp->mpt_sndnxt, mp_tp->mpt_snduna))
+ if (MPTCP_SEQ_LT(mp_tp->mpt_sndnxt, mp_tp->mpt_snduna)) {
mp_tp->mpt_sndnxt = mp_tp->mpt_snduna;
+ }
/* Now determine the offset from which to start transmitting data */
- if (mpte->mpte_reinjectq)
+ if (mpte->mpte_reinjectq) {
sb_mb = mpte->mpte_reinjectq;
- else
+ } else {
dont_reinject:
sb_mb = mp_so->so_snd.sb_mb;
+ }
if (sb_mb == NULL) {
mptcplog((LOG_ERR, "%s send-buffer is still empty\n", __func__),
- MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
goto out;
}
sb_cc -= off;
} else {
mptcplog((LOG_ERR, "%s this should not happen: sndnxt %u sndmax %u\n",
- __func__, (uint32_t)mp_tp->mpt_sndnxt,
- (uint32_t)mp_tp->mpt_sndmax),
- MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
+ __func__, (uint32_t)mp_tp->mpt_sndnxt,
+ (uint32_t)mp_tp->mpt_sndmax),
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
goto out;
}
sb_cc = min(sb_cc, mptcp_subflow_cwnd_space(so));
if (sb_cc <= 0) {
mptcplog((LOG_ERR, "%s sb_cc is %d, mp_so->sb_cc %u, sndwnd %u,sndnxt %u sndmax %u cwnd %u\n",
- __func__, sb_cc, mp_so->so_snd.sb_cc, mp_tp->mpt_sndwnd,
- (uint32_t)mp_tp->mpt_sndnxt, (uint32_t)mp_tp->mpt_sndmax,
- mptcp_subflow_cwnd_space(so)),
- MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
+ __func__, sb_cc, mp_so->so_snd.sb_cc, mp_tp->mpt_sndwnd,
+ (uint32_t)mp_tp->mpt_sndnxt, (uint32_t)mp_tp->mpt_sndmax,
+ mptcp_subflow_cwnd_space(so)),
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
}
sb_cc = min(sb_cc, UINT16_MAX);
* Create a DSN mapping for the data we are about to send. It all
* has the same mapping.
*/
- if (reinjected)
+ if (reinjected) {
mpt_dsn = sb_mb->m_pkthdr.mp_dsn;
- else
+ } else {
mpt_dsn = mp_tp->mpt_snduna + off;
+ }
mpt_mbuf = sb_mb;
while (mpt_mbuf && reinjected == FALSE &&
- (mpt_mbuf->m_pkthdr.mp_rlen == 0 ||
- mpt_mbuf->m_pkthdr.mp_rlen <= (uint32_t)off)) {
+ (mpt_mbuf->m_pkthdr.mp_rlen == 0 ||
+ mpt_mbuf->m_pkthdr.mp_rlen <= (uint32_t)off)) {
off -= mpt_mbuf->m_pkthdr.mp_rlen;
mpt_mbuf = mpt_mbuf->m_next;
}
- if (mpts->mpts_flags & MPTSF_MP_DEGRADED)
+ if (mpts->mpts_flags & MPTSF_MP_DEGRADED) {
mptcplog((LOG_DEBUG, "%s: %u snduna = %u sndnxt = %u probe %d\n",
__func__, mpts->mpts_connid, (uint32_t)mp_tp->mpt_snduna, (uint32_t)mp_tp->mpt_sndnxt,
mpts->mpts_probecnt),
MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
+ }
VERIFY((mpt_mbuf == NULL) || (mpt_mbuf->m_pkthdr.pkt_flags & PKTF_MPTCP));
if (mlen < 0) {
mptcplog((LOG_ERR, "%s mlen %d mp_rlen %u off %u sb_cc %u tot_sent %u\n",
- __func__, (int)mlen, mpt_mbuf->m_pkthdr.mp_rlen,
- (uint32_t)off, sb_cc, tot_sent),
- MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
+ __func__, (int)mlen, mpt_mbuf->m_pkthdr.mp_rlen,
+ (uint32_t)off, sb_cc, tot_sent),
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
goto out;
}
- if (mlen == 0)
+ if (mlen == 0) {
goto next;
+ }
m = m_copym_mode(mpt_mbuf, (int)off, mlen, M_DONTWAIT,
M_COPYM_MUST_COPY_HDR);
if (m == NULL) {
mptcplog((LOG_ERR, "%s m_copym_mode failed\n", __func__),
- MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR);
error = ENOBUFS;
break;
}
m->m_pkthdr.len = mlen;
if (head == NULL) {
- head = tail = m;
+ head = tail = m;
} else {
tail->m_next = m;
tail = m;
}
mptcplog((LOG_DEBUG, "%s: Queued dsn %u ssn %u len %u on sub %u\n",
- __func__, (uint32_t)mpt_dsn, mpts->mpts_rel_seq,
- tot_sent, mpts->mpts_connid), MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
+ __func__, (uint32_t)mpt_dsn, mpts->mpts_rel_seq,
+ tot_sent, mpts->mpts_connid), MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
if (head && (mp_tp->mpt_flags & MPTCPF_CHECKSUM)) {
dss_csum = mptcp_output_csum(head, mpt_dsn, mpts->mpts_rel_seq,
- tot_sent);
+ tot_sent);
}
/* Now, let's update rel-seq and the data-level length */
mpts->mpts_rel_seq += tot_sent;
m = head;
while (m) {
- if (mp_tp->mpt_flags & MPTCPF_CHECKSUM)
+ if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) {
m->m_pkthdr.mp_csum = dss_csum;
+ }
m->m_pkthdr.mp_rlen = tot_sent;
m = m->m_next;
}
if (head != NULL) {
if ((mpts->mpts_flags & MPTSF_TFO_REQD) &&
- (tp->t_tfo_stats == 0))
+ (tp->t_tfo_stats == 0)) {
tp->t_mpflags |= TMPF_TFO_REQUEST;
+ }
error = sock_sendmbuf(so, NULL, head, 0, NULL);
if (mpts->mpts_probesoon && mpts->mpts_maxseg && tot_sent) {
tcpstat.tcps_mp_num_probes++;
- if ((uint32_t)tot_sent < mpts->mpts_maxseg)
+ if ((uint32_t)tot_sent < mpts->mpts_maxseg) {
mpts->mpts_probecnt += 1;
- else
+ } else {
mpts->mpts_probecnt +=
- tot_sent/mpts->mpts_maxseg;
+ tot_sent / mpts->mpts_maxseg;
+ }
}
if (!reinjected && !(flags & MPTCP_SUBOUT_PROBING)) {
if (MPTCP_DATASEQ_HIGH32(new_sndnxt) >
- MPTCP_DATASEQ_HIGH32(mp_tp->mpt_sndnxt))
+ MPTCP_DATASEQ_HIGH32(mp_tp->mpt_sndnxt)) {
mp_tp->mpt_flags |= MPTCPF_SND_64BITDSN;
+ }
mp_tp->mpt_sndnxt = new_sndnxt;
}
soclearfastopen(mp_so);
if ((mpts->mpts_flags & MPTSF_MP_DEGRADED) ||
- (mpts->mpts_probesoon != 0))
+ (mpts->mpts_probesoon != 0)) {
mptcplog((LOG_DEBUG, "%s %u degraded %u wrote %d %d probe %d probedelta %d\n",
__func__, mpts->mpts_connid,
!!(mpts->mpts_flags & MPTSF_MP_DEGRADED),
tot_sent, (int) sb_cc, mpts->mpts_probecnt,
(tcp_now - mpts->mpts_probesoon)),
MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
+ }
if (IFNET_IS_CELLULAR(sotoinpcb(so)->inp_last_outifp)) {
mpte->mpte_mppcb->mpp_flags |= MPP_SET_CELLICON;
}
out:
- if (wakeup)
+ if (wakeup) {
mpte->mpte_mppcb->mpp_flags |= MPP_SHOULD_WWAKEUP;
+ }
mptcp_handle_deferred_upcalls(mpte->mpte_mppcb, MPP_INSIDE_OUTPUT);
- return (error);
+ return error;
zero_len_write:
/* Opting to call pru_send as no mbuf at subflow level */
error = (*so->so_proto->pr_usrreqs->pru_send)(so, 0, NULL, NULL,
- NULL, current_proc());
+ NULL, current_proc());
goto done_sending;
}
struct mbuf *n, *prev = NULL;
mptcplog((LOG_DEBUG, "%s reinjecting dsn %u dlen %u rseq %u\n",
- __func__, (uint32_t)m->m_pkthdr.mp_dsn, m->m_pkthdr.mp_rlen,
- m->m_pkthdr.mp_rseq),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
+ __func__, (uint32_t)m->m_pkthdr.mp_dsn, m->m_pkthdr.mp_rlen,
+ m->m_pkthdr.mp_rseq),
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
n = mpte->mpte_reinjectq;
* equal than m's sequence number.
*/
while (n) {
- if (MPTCP_SEQ_GEQ(n->m_pkthdr.mp_dsn, m->m_pkthdr.mp_dsn))
+ if (MPTCP_SEQ_GEQ(n->m_pkthdr.mp_dsn, m->m_pkthdr.mp_dsn)) {
break;
+ }
prev = n;
if (n->m_pkthdr.mp_dsn == m->m_pkthdr.mp_dsn &&
n->m_pkthdr.mp_rlen >= m->m_pkthdr.mp_rlen) {
mptcplog((LOG_DEBUG, "%s fully covered with len %u\n",
- __func__, n->m_pkthdr.mp_rlen),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
+ __func__, n->m_pkthdr.mp_rlen),
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
goto dont_queue;
}
struct mbuf *tmp = n->m_nextpkt;
mptcplog((LOG_DEBUG, "%s m is covering that guy dsn %u len %u dsn %u len %u\n",
- __func__, m->m_pkthdr.mp_dsn, m->m_pkthdr.mp_rlen,
- n->m_pkthdr.mp_dsn, n->m_pkthdr.mp_rlen),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
+ __func__, m->m_pkthdr.mp_dsn, m->m_pkthdr.mp_rlen,
+ n->m_pkthdr.mp_dsn, n->m_pkthdr.mp_rlen),
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
m->m_nextpkt = NULL;
- if (prev == NULL)
+ if (prev == NULL) {
mpte->mpte_reinjectq = tmp;
- else
+ } else {
prev->m_nextpkt = tmp;
+ }
m_freem(n);
n = tmp;
}
-
}
if (prev) {
/* m is already fully covered by the previous mbuf in the queue */
if (prev->m_pkthdr.mp_dsn + prev->m_pkthdr.mp_rlen >= m->m_pkthdr.mp_dsn + m->m_pkthdr.len) {
mptcplog((LOG_DEBUG, "%s prev covers us from %u with len %u\n",
- __func__, prev->m_pkthdr.mp_dsn, prev->m_pkthdr.mp_rlen),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
+ __func__, prev->m_pkthdr.mp_dsn, prev->m_pkthdr.mp_rlen),
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
goto dont_queue;
}
}
- if (prev == NULL)
+ if (prev == NULL) {
mpte->mpte_reinjectq = m;
- else
+ } else {
prev->m_nextpkt = m;
+ }
m->m_nextpkt = n;
while (m) {
/* If this segment covers what we are looking for, return it. */
if (MPTCP_SEQ_LEQ(m->m_pkthdr.mp_dsn, dsn) &&
- MPTCP_SEQ_GT(m->m_pkthdr.mp_dsn + m->m_pkthdr.mp_rlen, dsn))
+ MPTCP_SEQ_GT(m->m_pkthdr.mp_dsn + m->m_pkthdr.mp_rlen, dsn)) {
break;
+ }
/* Segment is no more in the queue */
- if (MPTCP_SEQ_GT(m->m_pkthdr.mp_dsn, dsn))
+ if (MPTCP_SEQ_GT(m->m_pkthdr.mp_dsn, dsn)) {
return NULL;
+ }
m = m->m_next;
}
n = m_copym_mode(m, 0, m->m_len, M_DONTWAIT, M_COPYM_MUST_COPY_HDR);
if (n == NULL) {
mptcplog((LOG_ERR, "%s m_copym_mode returned NULL\n", __func__),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
goto err;
}
n->m_pkthdr.pkt_flags |= (PKTF_MPSO | PKTF_MPTCP);
- if (top == NULL)
+ if (top == NULL) {
top = n;
+ }
- if (tail != NULL)
+ if (tail != NULL) {
tail->m_next = n;
+ }
tail = n;
return top;
err:
- if (top)
+ if (top) {
m_freem(top);
+ }
return NULL;
}
struct mbuf *n = m->m_next, *orig = m;
mptcplog((LOG_DEBUG, "%s working on suna %u relseq %u iss %u len %u pktflags %#x\n",
- __func__, tp->snd_una, m->m_pkthdr.mp_rseq, mpts->mpts_iss,
- m->m_pkthdr.mp_rlen, m->m_pkthdr.pkt_flags),
- MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
+ __func__, tp->snd_una, m->m_pkthdr.mp_rseq, mpts->mpts_iss,
+ m->m_pkthdr.mp_rlen, m->m_pkthdr.pkt_flags),
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
VERIFY((m->m_flags & M_PKTHDR) && (m->m_pkthdr.pkt_flags & PKTF_MPTCP));
- if (m->m_pkthdr.pkt_flags & PKTF_MPTCP_REINJ)
+ if (m->m_pkthdr.pkt_flags & PKTF_MPTCP_REINJ) {
goto next;
+ }
/* Has it all already been acknowledged at the data-level? */
- if (MPTCP_SEQ_GEQ(mp_tp->mpt_snduna, m->m_pkthdr.mp_dsn + m->m_pkthdr.mp_rlen))
+ if (MPTCP_SEQ_GEQ(mp_tp->mpt_snduna, m->m_pkthdr.mp_dsn + m->m_pkthdr.mp_rlen)) {
goto next;
+ }
/* Part of this has already been acknowledged - lookup in the
* MPTCP-socket for the segment.
*/
if (SEQ_GT(tp->snd_una - mpts->mpts_iss, m->m_pkthdr.mp_rseq)) {
m = mptcp_lookup_dsn(mpte, m->m_pkthdr.mp_dsn);
- if (m == NULL)
+ if (m == NULL) {
goto next;
+ }
}
/* Copy the mbuf with headers (aka, DSN-numbers) */
m = mptcp_copy_mbuf_list(m, m->m_pkthdr.mp_rlen);
- if (m == NULL)
+ if (m == NULL) {
break;
+ }
VERIFY(m->m_nextpkt == NULL);
while (n) {
VERIFY((n->m_flags & M_PKTHDR) && (n->m_pkthdr.pkt_flags & PKTF_MPTCP));
- if (n->m_pkthdr.mp_dsn != orig->m_pkthdr.mp_dsn)
+ if (n->m_pkthdr.mp_dsn != orig->m_pkthdr.mp_dsn) {
break;
+ }
n->m_pkthdr.pkt_flags |= PKTF_MPTCP_REINJ;
n = n->m_next;
struct mbuf *m = mpte->mpte_reinjectq;
if (MPTCP_SEQ_GEQ(m->m_pkthdr.mp_dsn, mp_tp->mpt_snduna) ||
- MPTCP_SEQ_GT(m->m_pkthdr.mp_dsn + m->m_pkthdr.mp_rlen, mp_tp->mpt_snduna))
+ MPTCP_SEQ_GT(m->m_pkthdr.mp_dsn + m->m_pkthdr.mp_rlen, mp_tp->mpt_snduna)) {
break;
+ }
mpte->mpte_reinjectq = m->m_nextpkt;
m->m_nextpkt = NULL;
VERIFY(mpte != NULL);
mpte_lock_assert_held(mpte);
- if ((mpts->mpts_evctl & events) == events)
+ if ((mpts->mpts_evctl & events) == events) {
return;
+ }
mpts->mpts_evctl |= events;
*/
static ev_ret_t
mptcp_subflow_events(struct mptses *mpte, struct mptsub *mpts,
- uint64_t *p_mpsofilt_hint)
+ uint64_t *p_mpsofilt_hint)
{
ev_ret_t ret = MPTS_EVRET_OK;
int i, mpsub_ev_entry_count = sizeof(mpsub_ev_entry_tbl) /
- sizeof(mpsub_ev_entry_tbl[0]);
+ sizeof(mpsub_ev_entry_tbl[0]);
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
+ mpte_lock_assert_held(mpte); /* same as MP socket lock */
/* bail if there's nothing to process */
- if (!mpts->mpts_evctl)
- return (ret);
+ if (!mpts->mpts_evctl) {
+ return ret;
+ }
- if (mpts->mpts_evctl & (SO_FILT_HINT_CONNRESET|SO_FILT_HINT_MUSTRST|
- SO_FILT_HINT_CANTSENDMORE|SO_FILT_HINT_TIMEOUT|
- SO_FILT_HINT_NOSRCADDR|SO_FILT_HINT_IFDENIED|
+ if (mpts->mpts_evctl & (SO_FILT_HINT_CONNRESET | SO_FILT_HINT_MUSTRST |
+ SO_FILT_HINT_CANTSENDMORE | SO_FILT_HINT_TIMEOUT |
+ SO_FILT_HINT_NOSRCADDR | SO_FILT_HINT_IFDENIED |
SO_FILT_HINT_DISCONNECTED)) {
mpts->mpts_evctl |= SO_FILT_HINT_MPFAILOVER;
}
struct mptsub *, mpts, uint32_t, mpts->mpts_evctl);
mptcplog((LOG_DEBUG, "%s cid %d events=%b\n", __func__,
- mpts->mpts_connid, mpts->mpts_evctl, SO_FILT_HINT_BITS),
- MPTCP_EVENTS_DBG, MPTCP_LOGLVL_VERBOSE);
+ mpts->mpts_connid, mpts->mpts_evctl, SO_FILT_HINT_BITS),
+ MPTCP_EVENTS_DBG, MPTCP_LOGLVL_VERBOSE);
/*
* Process all the socket filter hints and reset the hint
*/
if ((mpts->mpts_evctl & mpsub_ev_entry_tbl[i].sofilt_hint_mask) &&
(ret >= MPTS_EVRET_OK ||
- mpsub_ev_entry_tbl[i].sofilt_hint_mask == SO_FILT_HINT_DISCONNECTED)) {
+ mpsub_ev_entry_tbl[i].sofilt_hint_mask == SO_FILT_HINT_DISCONNECTED)) {
mpts->mpts_evctl &= ~mpsub_ev_entry_tbl[i].sofilt_hint_mask;
ev_ret_t error =
- mpsub_ev_entry_tbl[i].sofilt_hint_ev_hdlr(mpte, mpts, p_mpsofilt_hint, mpsub_ev_entry_tbl[i].sofilt_hint_mask);
+ mpsub_ev_entry_tbl[i].sofilt_hint_ev_hdlr(mpte, mpts, p_mpsofilt_hint, mpsub_ev_entry_tbl[i].sofilt_hint_mask);
ret = ((error >= MPTS_EVRET_OK) ? MAX(error, ret) : error);
}
}
* We should be getting only events specified via sock_catchevents(),
* so loudly complain if we have any unprocessed one(s).
*/
- if (mpts->mpts_evctl || ret < MPTS_EVRET_OK)
+ if (mpts->mpts_evctl || ret < MPTS_EVRET_OK) {
mptcplog((LOG_WARNING, "%s%s: cid %d evret %s (%d) unhandled events=%b\n", __func__,
(mpts->mpts_evctl && ret == MPTS_EVRET_OK) ? "MPTCP_ERROR " : "",
mpts->mpts_connid,
mptcp_evret2str(ret), ret, mpts->mpts_evctl, SO_FILT_HINT_BITS),
MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG);
- else
+ } else {
mptcplog((LOG_DEBUG, "%s: Done, events %b\n", __func__,
- mpts->mpts_evctl, SO_FILT_HINT_BITS),
- MPTCP_EVENTS_DBG, MPTCP_LOGLVL_VERBOSE);
+ mpts->mpts_evctl, SO_FILT_HINT_BITS),
+ MPTCP_EVENTS_DBG, MPTCP_LOGLVL_VERBOSE);
+ }
- return (ret);
+ return ret;
}
static ev_ret_t
mptcp_subflow_propagate_ev(struct mptses *mpte, struct mptsub *mpts,
- uint64_t *p_mpsofilt_hint, uint64_t event)
+ uint64_t *p_mpsofilt_hint, uint64_t event)
{
struct socket *mp_so, *so;
struct mptcb *mp_tp;
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
+ mpte_lock_assert_held(mpte); /* same as MP socket lock */
VERIFY(mpte->mpte_mppcb != NULL);
mp_so = mptetoso(mpte);
mp_tp = mpte->mpte_mptcb;
*p_mpsofilt_hint |= event;
}
- return (MPTS_EVRET_OK);
+ return MPTS_EVRET_OK;
}
/*
*/
static ev_ret_t
mptcp_subflow_nosrcaddr_ev(struct mptses *mpte, struct mptsub *mpts,
- uint64_t *p_mpsofilt_hint, uint64_t event)
+ uint64_t *p_mpsofilt_hint, uint64_t event)
{
#pragma unused(p_mpsofilt_hint, event)
struct socket *mp_so;
struct tcpcb *tp;
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
+ mpte_lock_assert_held(mpte); /* same as MP socket lock */
VERIFY(mpte->mpte_mppcb != NULL);
mp_so = mptetoso(mpte);
mpte->mpte_lost_aid = tp->t_local_aid;
mptcplog((LOG_DEBUG, "%s cid %d\n", __func__, mpts->mpts_connid),
- MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG);
+ MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG);
/*
* The subflow connection has lost its source address.
*/
mptcp_subflow_abort(mpts, EADDRNOTAVAIL);
- if (mp_so->so_flags & SOF_NOADDRAVAIL)
+ if (mp_so->so_flags & SOF_NOADDRAVAIL) {
mptcp_subflow_propagate_ev(mpte, mpts, p_mpsofilt_hint, event);
+ }
- return (MPTS_EVRET_DELETE);
+ return MPTS_EVRET_DELETE;
}
/*
*/
static ev_ret_t
mptcp_subflow_mpcantrcvmore_ev(struct mptses *mpte, struct mptsub *mpts,
- uint64_t *p_mpsofilt_hint, uint64_t event)
+ uint64_t *p_mpsofilt_hint, uint64_t event)
{
#pragma unused(event)
struct mptcb *mp_tp;
MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG);
/*
- * We got a Data FIN for the MPTCP connection.
- * The FIN may arrive with data. The data is handed up to the
- * mptcp socket and the user is notified so that it may close
- * the socket if needed.
- */
- if (mp_tp->mpt_state == MPTCPS_CLOSE_WAIT)
+ * We got a Data FIN for the MPTCP connection.
+ * The FIN may arrive with data. The data is handed up to the
+ * mptcp socket and the user is notified so that it may close
+ * the socket if needed.
+ */
+ if (mp_tp->mpt_state == MPTCPS_CLOSE_WAIT) {
*p_mpsofilt_hint |= SO_FILT_HINT_CANTRCVMORE;
+ }
- return (MPTS_EVRET_OK); /* keep the subflow socket around */
+ return MPTS_EVRET_OK; /* keep the subflow socket around */
}
/*
*/
static ev_ret_t
mptcp_subflow_failover_ev(struct mptses *mpte, struct mptsub *mpts,
- uint64_t *p_mpsofilt_hint, uint64_t event)
+ uint64_t *p_mpsofilt_hint, uint64_t event)
{
#pragma unused(event, p_mpsofilt_hint)
struct mptsub *mpts_alt = NULL;
mpte_lock_assert_held(mpte);
mp_so = mptetoso(mpte);
mptcplog((LOG_NOTICE, "%s: mp_so 0x%llx\n", __func__,
- (u_int64_t)VM_KERNEL_ADDRPERM(mp_so)),
- MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG);
+ (u_int64_t)VM_KERNEL_ADDRPERM(mp_so)),
+ MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG);
mptcp_reinject_mbufs(mpts->mpts_socket);
*/
if (mpts_alt == NULL) {
mptcplog((LOG_WARNING, "%s: no alternate path\n", __func__),
- MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG);
+ MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG);
goto done;
}
mpts->mpts_flags &= ~MPTSF_ACTIVE;
mptcplog((LOG_NOTICE, "%s: switched from %d to %d\n",
- __func__, mpts->mpts_connid, mpts_alt->mpts_connid),
- MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG);
+ __func__, mpts->mpts_connid, mpts_alt->mpts_connid),
+ MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG);
mptcpstats_inc_switch(mpte, mpts);
sowwakeup(alt_so);
} else {
mptcplog((LOG_DEBUG, "%s: no alt cid = %d\n", __func__,
- mpts->mpts_connid),
- MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG);
+ mpts->mpts_connid),
+ MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG);
done:
mpts->mpts_socket->so_flags &= ~SOF_MP_TRYFAILOVER;
}
- return (MPTS_EVRET_OK);
+ return MPTS_EVRET_OK;
}
/*
*/
static ev_ret_t
mptcp_subflow_ifdenied_ev(struct mptses *mpte, struct mptsub *mpts,
- uint64_t *p_mpsofilt_hint, uint64_t event)
+ uint64_t *p_mpsofilt_hint, uint64_t event)
{
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
+ mpte_lock_assert_held(mpte); /* same as MP socket lock */
VERIFY(mpte->mpte_mppcb != NULL);
mptcplog((LOG_DEBUG, "%s: cid %d\n", __func__,
mptcp_subflow_propagate_ev(mpte, mpts, p_mpsofilt_hint, event);
- return (MPTS_EVRET_DELETE);
+ return MPTS_EVRET_DELETE;
}
/*
*/
static boolean_t
mptcp_desynthesize_ipv6_addr(const struct in6_addr *addr,
- const struct ipv6_prefix *prefix,
- struct in_addr *addrv4)
+ const struct ipv6_prefix *prefix,
+ struct in_addr *addrv4)
{
char buf[MAX_IPv4_STR_LEN];
char *ptrv4 = (char *)addrv4;
const char *ptr = (const char *)addr;
- if (memcmp(addr, &prefix->ipv6_prefix, prefix->prefix_len) != 0)
+ if (memcmp(addr, &prefix->ipv6_prefix, prefix->prefix_len) != 0) {
return false;
+ }
switch (prefix->prefix_len) {
- case NAT64_PREFIX_LEN_96:
- memcpy(ptrv4, ptr + 12, 4);
- break;
- case NAT64_PREFIX_LEN_64:
- memcpy(ptrv4, ptr + 9, 4);
- break;
- case NAT64_PREFIX_LEN_56:
- memcpy(ptrv4, ptr + 7, 1);
- memcpy(ptrv4 + 1, ptr + 9, 3);
- break;
- case NAT64_PREFIX_LEN_48:
- memcpy(ptrv4, ptr + 6, 2);
- memcpy(ptrv4 + 2, ptr + 9, 2);
- break;
- case NAT64_PREFIX_LEN_40:
- memcpy(ptrv4, ptr + 5, 3);
- memcpy(ptrv4 + 3, ptr + 9, 1);
- break;
- case NAT64_PREFIX_LEN_32:
- memcpy(ptrv4, ptr + 4, 4);
- break;
- default:
- panic("NAT64-prefix len is wrong: %u\n",
- prefix->prefix_len);
+ case NAT64_PREFIX_LEN_96:
+ memcpy(ptrv4, ptr + 12, 4);
+ break;
+ case NAT64_PREFIX_LEN_64:
+ memcpy(ptrv4, ptr + 9, 4);
+ break;
+ case NAT64_PREFIX_LEN_56:
+ memcpy(ptrv4, ptr + 7, 1);
+ memcpy(ptrv4 + 1, ptr + 9, 3);
+ break;
+ case NAT64_PREFIX_LEN_48:
+ memcpy(ptrv4, ptr + 6, 2);
+ memcpy(ptrv4 + 2, ptr + 9, 2);
+ break;
+ case NAT64_PREFIX_LEN_40:
+ memcpy(ptrv4, ptr + 5, 3);
+ memcpy(ptrv4 + 3, ptr + 9, 1);
+ break;
+ case NAT64_PREFIX_LEN_32:
+ memcpy(ptrv4, ptr + 4, 4);
+ break;
+ default:
+ panic("NAT64-prefix len is wrong: %u\n",
+ prefix->prefix_len);
}
os_log_info(mptcp_log_handle, "%s desynthesized to %s\n", __func__,
- inet_ntop(AF_INET, (void *)addrv4, buf, sizeof(buf)));
+ inet_ntop(AF_INET, (void *)addrv4, buf, sizeof(buf)));
return true;
}
for (j = 0; j < NAT64_MAX_NUM_PREFIXES; j++) {
int success;
- if (nat64prefixes[j].prefix_len == 0)
+ if (nat64prefixes[j].prefix_len == 0) {
continue;
+ }
success = mptcp_desynthesize_ipv6_addr(&mpte->__mpte_dst_v6.sin6_addr,
- &nat64prefixes[j],
- &mpte->mpte_dst_v4_nat64.sin_addr);
+ &nat64prefixes[j],
+ &mpte->mpte_dst_v4_nat64.sin_addr);
if (success) {
mpte->mpte_dst_v4_nat64.sin_len = sizeof(mpte->mpte_dst_v4_nat64);
mpte->mpte_dst_v4_nat64.sin_family = AF_INET;
*/
static ev_ret_t
mptcp_subflow_connected_ev(struct mptses *mpte, struct mptsub *mpts,
- uint64_t *p_mpsofilt_hint, uint64_t event)
+ uint64_t *p_mpsofilt_hint, uint64_t event)
{
#pragma unused(event, p_mpsofilt_hint)
struct socket *mp_so, *so;
int af;
boolean_t mpok = FALSE;
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
+ mpte_lock_assert_held(mpte); /* same as MP socket lock */
VERIFY(mpte->mpte_mppcb != NULL);
mp_so = mptetoso(mpte);
tp = sototcpcb(so);
af = mpts->mpts_dst.sa_family;
- if (mpts->mpts_flags & MPTSF_CONNECTED)
- return (MPTS_EVRET_OK);
+ if (mpts->mpts_flags & MPTSF_CONNECTED) {
+ return MPTS_EVRET_OK;
+ }
if ((mpts->mpts_flags & MPTSF_DISCONNECTED) ||
(mpts->mpts_flags & MPTSF_DISCONNECTING)) {
if (!(so->so_state & (SS_ISDISCONNECTING | SS_ISDISCONNECTED)) &&
(so->so_state & SS_ISCONNECTED)) {
- mptcplog((LOG_DEBUG, "%s: cid %d disconnect before tcp connect\n",
- __func__, mpts->mpts_connid),
- MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG);
+ mptcplog((LOG_DEBUG, "%s: cid %d disconnect before tcp connect\n",
+ __func__, mpts->mpts_connid),
+ MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG);
(void) soshutdownlock(so, SHUT_RD);
(void) soshutdownlock(so, SHUT_WR);
(void) sodisconnectlocked(so);
}
- return (MPTS_EVRET_OK);
+ return MPTS_EVRET_OK;
}
/*
* where the subflow could get disconnected before the
* connected event is processed.
*/
- return (MPTS_EVRET_OK);
+ return MPTS_EVRET_OK;
}
- if (mpts->mpts_flags & MPTSF_TFO_REQD)
+ if (mpts->mpts_flags & MPTSF_TFO_REQD) {
mptcp_drop_tfo_data(mpte, mpts);
+ }
mpts->mpts_flags &= ~(MPTSF_CONNECTING | MPTSF_TFO_REQD);
mpts->mpts_flags |= MPTSF_CONNECTED;
- if (tp->t_mpflags & TMPF_MPTCP_TRUE)
+ if (tp->t_mpflags & TMPF_MPTCP_TRUE) {
mpts->mpts_flags |= MPTSF_MP_CAPABLE;
+ }
tp->t_mpflags &= ~TMPF_TFO_REQUEST;
mpts->mpts_flags |= MPTSF_MPCAP_CTRSET;
mpte->mpte_nummpcapflows++;
- if (SOCK_DOM(so) == AF_INET6)
+ if (SOCK_DOM(so) == AF_INET6) {
mptcp_handle_ipv6_connection(mpte, mpts);
+ }
mptcp_check_subflows_and_add(mpte);
- if (IFNET_IS_CELLULAR(inp->inp_last_outifp))
+ if (IFNET_IS_CELLULAR(inp->inp_last_outifp)) {
mpte->mpte_initial_cell = 1;
+ }
mpte->mpte_handshake_success = 1;
}
dst_in->sin_port = mpte->mpte_alternate_port;
mptcp_subflow_add(mpte, NULL, (struct sockaddr *)&dst,
- mpts->mpts_ifscope , NULL);
+ mpts->mpts_ifscope, NULL);
} else { /* Else, we tried all we could, mark this interface as non-MPTCP */
for (i = 0; i < mpte->mpte_itfinfo_size; i++) {
struct mpt_itf_info *info = &mpte->mpte_itfinfo[i];
}
tcpstat.tcps_join_fallback++;
- if (IFNET_IS_CELLULAR(inp->inp_last_outifp))
+ if (IFNET_IS_CELLULAR(inp->inp_last_outifp)) {
tcpstat.tcps_mptcp_cell_proxy++;
- else
+ } else {
tcpstat.tcps_mptcp_wifi_proxy++;
+ }
soevent(mpts->mpts_socket, SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST);
- return (MPTS_EVRET_OK);
+ return MPTS_EVRET_OK;
}
/* This call, just to "book" an entry in the stats-table for this ifindex */
mptcp_output(mpte);
- return (MPTS_EVRET_OK); /* keep the subflow socket around */
+ return MPTS_EVRET_OK; /* keep the subflow socket around */
}
/*
*/
static ev_ret_t
mptcp_subflow_disconnected_ev(struct mptses *mpte, struct mptsub *mpts,
- uint64_t *p_mpsofilt_hint, uint64_t event)
+ uint64_t *p_mpsofilt_hint, uint64_t event)
{
#pragma unused(event, p_mpsofilt_hint)
struct socket *mp_so, *so;
struct mptcb *mp_tp;
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
+ mpte_lock_assert_held(mpte); /* same as MP socket lock */
VERIFY(mpte->mpte_mppcb != NULL);
mp_so = mptetoso(mpte);
mp_tp = mpte->mpte_mptcb;
!!(mpts->mpts_flags & MPTSF_ACTIVE), sototcpcb(so)->t_mpflags),
MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG);
- if (mpts->mpts_flags & MPTSF_DISCONNECTED)
- return (MPTS_EVRET_DELETE);
+ if (mpts->mpts_flags & MPTSF_DISCONNECTED) {
+ return MPTS_EVRET_DELETE;
+ }
mpts->mpts_flags |= MPTSF_DISCONNECTED;
}
if (mp_tp->mpt_state < MPTCPS_ESTABLISHED ||
- ((mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) && (mpts->mpts_flags & MPTSF_ACTIVE)) ||
- (sototcpcb(so)->t_mpflags & TMPF_FASTCLOSERCV)) {
+ ((mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) && (mpts->mpts_flags & MPTSF_ACTIVE))) {
mptcp_drop(mpte, mp_tp, so->so_error);
}
+ if (sototcpcb(so)->t_mpflags & TMPF_FASTCLOSERCV) {
+ mptcp_drop(mpte, mp_tp, mp_so->so_error);
+ }
+
/*
* Clear flags that are used by getconninfo to return state.
* Retain like MPTSF_DELETEOK for internal purposes.
*/
- mpts->mpts_flags &= ~(MPTSF_CONNECTING|MPTSF_CONNECT_PENDING|
- MPTSF_CONNECTED|MPTSF_DISCONNECTING|MPTSF_PREFERRED|
- MPTSF_MP_CAPABLE|MPTSF_MP_READY|MPTSF_MP_DEGRADED|MPTSF_ACTIVE);
+ mpts->mpts_flags &= ~(MPTSF_CONNECTING | MPTSF_CONNECT_PENDING |
+ MPTSF_CONNECTED | MPTSF_DISCONNECTING | MPTSF_PREFERRED |
+ MPTSF_MP_CAPABLE | MPTSF_MP_READY | MPTSF_MP_DEGRADED | MPTSF_ACTIVE);
- return (MPTS_EVRET_DELETE);
+ return MPTS_EVRET_DELETE;
}
/*
*/
static ev_ret_t
mptcp_subflow_mpstatus_ev(struct mptses *mpte, struct mptsub *mpts,
- uint64_t *p_mpsofilt_hint, uint64_t event)
+ uint64_t *p_mpsofilt_hint, uint64_t event)
{
#pragma unused(event, p_mpsofilt_hint)
struct socket *mp_so, *so;
struct mptcb *mp_tp;
ev_ret_t ret = MPTS_EVRET_OK;
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
+ mpte_lock_assert_held(mpte); /* same as MP socket lock */
VERIFY(mpte->mpte_mppcb != NULL);
mp_so = mptetoso(mpte);
mp_tp = mpte->mpte_mptcb;
so = mpts->mpts_socket;
- if (sototcpcb(so)->t_mpflags & TMPF_MPTCP_TRUE)
+ if (sototcpcb(so)->t_mpflags & TMPF_MPTCP_TRUE) {
mpts->mpts_flags |= MPTSF_MP_CAPABLE;
- else
+ } else {
mpts->mpts_flags &= ~MPTSF_MP_CAPABLE;
+ }
if (sototcpcb(so)->t_mpflags & TMPF_TCP_FALLBACK) {
- if (mpts->mpts_flags & MPTSF_MP_DEGRADED)
+ if (mpts->mpts_flags & MPTSF_MP_DEGRADED) {
goto done;
+ }
mpts->mpts_flags |= MPTSF_MP_DEGRADED;
- }
- else
+ } else {
mpts->mpts_flags &= ~MPTSF_MP_DEGRADED;
+ }
- if (sototcpcb(so)->t_mpflags & TMPF_MPTCP_READY)
+ if (sototcpcb(so)->t_mpflags & TMPF_MPTCP_READY) {
mpts->mpts_flags |= MPTSF_MP_READY;
- else
+ } else {
mpts->mpts_flags &= ~MPTSF_MP_READY;
+ }
if (mpts->mpts_flags & MPTSF_MP_DEGRADED) {
mp_tp->mpt_flags |= MPTCPF_FALLBACK_TO_TCP;
if (mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) {
VERIFY(!(mp_tp->mpt_flags & MPTCPF_JOIN_READY));
ret = MPTS_EVRET_DISCONNECT_FALLBACK;
+
+ m_freem_list(mpte->mpte_reinjectq);
+ mpte->mpte_reinjectq = NULL;
} else if (mpts->mpts_flags & MPTSF_MP_READY) {
mp_tp->mpt_flags |= MPTCPF_JOIN_READY;
ret = MPTS_EVRET_CONNECT_PENDING;
}
mptcplog((LOG_DEBUG, "%s: mp_so 0x%llx mpt_flags=%b cid %d mptsf=%b\n",
- __func__, (u_int64_t)VM_KERNEL_ADDRPERM(mp_so),
- mp_tp->mpt_flags, MPTCPF_BITS, mpts->mpts_connid,
- mpts->mpts_flags, MPTSF_BITS),
- MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG);
+ __func__, (u_int64_t)VM_KERNEL_ADDRPERM(mp_so),
+ mp_tp->mpt_flags, MPTCPF_BITS, mpts->mpts_connid,
+ mpts->mpts_flags, MPTSF_BITS),
+ MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG);
done:
- return (ret);
+ return ret;
}
/*
*/
static ev_ret_t
mptcp_subflow_mustrst_ev(struct mptses *mpte, struct mptsub *mpts,
- uint64_t *p_mpsofilt_hint, uint64_t event)
+ uint64_t *p_mpsofilt_hint, uint64_t event)
{
#pragma unused(event)
struct socket *mp_so, *so;
struct tcp_respond_args tra;
bzero(&tra, sizeof(tra));
- if (inp->inp_flags & INP_BOUND_IF)
+ if (inp->inp_flags & INP_BOUND_IF) {
tra.ifscope = inp->inp_boundifp->if_index;
- else
+ } else {
tra.ifscope = IFSCOPE_NONE;
+ }
tra.awdl_unrestricted = 1;
tcp_respond(tp, t_template->tt_ipgen,
if (!(mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) && is_fastclose) {
*p_mpsofilt_hint |= SO_FILT_HINT_CONNRESET;
- if (mp_tp->mpt_state < MPTCPS_ESTABLISHED)
+ if (mp_tp->mpt_state < MPTCPS_ESTABLISHED) {
mp_so->so_error = ECONNABORTED;
- else
+ } else {
mp_so->so_error = ECONNRESET;
+ }
/*
* mptcp_drop is being called after processing the events, to fully
*/
}
- if (mp_tp->mpt_gc_ticks == MPT_GC_TICKS)
+ if (mp_tp->mpt_gc_ticks == MPT_GC_TICKS) {
mp_tp->mpt_gc_ticks = MPT_GC_TICKS_FAST;
+ }
- return (MPTS_EVRET_DELETE);
+ return MPTS_EVRET_DELETE;
}
static ev_ret_t
mptcp_subflow_adaptive_rtimo_ev(struct mptses *mpte, struct mptsub *mpts,
- uint64_t *p_mpsofilt_hint, uint64_t event)
+ uint64_t *p_mpsofilt_hint, uint64_t event)
{
#pragma unused(event)
bool found_active = false;
struct tcpcb *tp = sototcpcb(mpts->mpts_socket);
if (!TCPS_HAVEESTABLISHED(tp->t_state) ||
- TCPS_HAVERCVDFIN2(tp->t_state))
+ TCPS_HAVERCVDFIN2(tp->t_state)) {
continue;
+ }
if (!(mpts->mpts_flags & MPTSF_READ_STALL)) {
found_active = true;
}
}
- if (!found_active)
+ if (!found_active) {
*p_mpsofilt_hint |= SO_FILT_HINT_ADAPTIVE_RTIMO;
+ }
- return (MPTS_EVRET_OK);
+ return MPTS_EVRET_OK;
}
static ev_ret_t
mptcp_subflow_adaptive_wtimo_ev(struct mptses *mpte, struct mptsub *mpts,
- uint64_t *p_mpsofilt_hint, uint64_t event)
+ uint64_t *p_mpsofilt_hint, uint64_t event)
{
#pragma unused(event)
bool found_active = false;
struct tcpcb *tp = sototcpcb(mpts->mpts_socket);
if (!TCPS_HAVEESTABLISHED(tp->t_state) ||
- tp->t_state > TCPS_CLOSE_WAIT)
+ tp->t_state > TCPS_CLOSE_WAIT) {
continue;
+ }
if (!(mpts->mpts_flags & MPTSF_WRITE_STALL)) {
found_active = true;
}
}
- if (!found_active)
+ if (!found_active) {
*p_mpsofilt_hint |= SO_FILT_HINT_ADAPTIVE_WTIMO;
+ }
- return (MPTS_EVRET_OK);
+ return MPTS_EVRET_OK;
}
static const char *
default:
break;
}
- return (c);
+ return c;
}
/*
if (mpte->mpte_mptcb->mpt_state >= MPTCPS_ESTABLISHED &&
mpo->mpo_level == SOL_SOCKET &&
mpo->mpo_name == SO_MARK_CELLFALLBACK) {
- mptcplog((LOG_DEBUG, "%s Setting CELL_FALLBACK, mpte_flags %#x, svctype %u wifi unusable %u lastcell? %d boundcell? %d\n",
- __func__, mpte->mpte_flags, mpte->mpte_svctype, mptcp_is_wifi_unusable(),
- sotoinpcb(so)->inp_last_outifp ? IFNET_IS_CELLULAR(sotoinpcb(so)->inp_last_outifp) : -1,
- mpts->mpts_ifscope != IFSCOPE_NONE ? IFNET_IS_CELLULAR(ifindex2ifnet[mpts->mpts_ifscope]) : -1),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
+ struct ifnet *ifp = ifindex2ifnet[mpts->mpts_ifscope];
+
+ mptcplog((LOG_DEBUG, "%s Setting CELL_FALLBACK, mpte_flags %#x, svctype %u wifi unusable %d lastcell? %d boundcell? %d\n",
+ __func__, mpte->mpte_flags, mpte->mpte_svctype, mptcp_is_wifi_unusable(mpte),
+ sotoinpcb(so)->inp_last_outifp ? IFNET_IS_CELLULAR(sotoinpcb(so)->inp_last_outifp) : -1,
+ mpts->mpts_ifscope != IFSCOPE_NONE && ifp ? IFNET_IS_CELLULAR(ifp) : -1),
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
/*
* When we open a new subflow, mark it as cell fallback, if
* (except for first-party apps)
*/
- if (mpte->mpte_flags & MPTE_FIRSTPARTY)
- return (0);
+ if (mpte->mpte_flags & MPTE_FIRSTPARTY) {
+ return 0;
+ }
if (sotoinpcb(so)->inp_last_outifp &&
- !IFNET_IS_CELLULAR(sotoinpcb(so)->inp_last_outifp))
- return (0);
+ !IFNET_IS_CELLULAR(sotoinpcb(so)->inp_last_outifp)) {
+ return 0;
+ }
/*
* This here is an OR, because if the app is not binding to the
* interface, then it definitely is not a cell-fallback
* connection.
*/
- if (mpts->mpts_ifscope == IFSCOPE_NONE ||
- !IFNET_IS_CELLULAR(ifindex2ifnet[mpts->mpts_ifscope]))
- return (0);
+ if (mpts->mpts_ifscope == IFSCOPE_NONE || ifp == NULL ||
+ !IFNET_IS_CELLULAR(ifp)) {
+ return 0;
+ }
}
mpo->mpo_flags &= ~MPOF_INTERIM;
- bzero(&sopt, sizeof (sopt));
+ bzero(&sopt, sizeof(sopt));
sopt.sopt_dir = SOPT_SET;
sopt.sopt_level = mpo->mpo_level;
sopt.sopt_name = mpo->mpo_name;
sopt.sopt_val = CAST_USER_ADDR_T(&mpo->mpo_intval);
- sopt.sopt_valsize = sizeof (int);
+ sopt.sopt_valsize = sizeof(int);
sopt.sopt_p = kernproc;
error = sosetoptlock(so, &sopt, 0);
mpo->mpo_intval, error),
MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
}
- return (error);
+ return error;
}
/*
int error;
VERIFY(mpo->mpo_flags & MPOF_SUBFLOW_OK);
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
+ mpte_lock_assert_held(mpte); /* same as MP socket lock */
mp_so = mptetoso(mpte);
- bzero(&sopt, sizeof (sopt));
+ bzero(&sopt, sizeof(sopt));
sopt.sopt_dir = SOPT_GET;
sopt.sopt_level = mpo->mpo_level;
sopt.sopt_name = mpo->mpo_name;
sopt.sopt_val = CAST_USER_ADDR_T(&mpo->mpo_intval);
- sopt.sopt_valsize = sizeof (int);
+ sopt.sopt_valsize = sizeof(int);
sopt.sopt_p = kernproc;
- error = sogetoptlock(so, &sopt, 0); /* already locked */
+ error = sogetoptlock(so, &sopt, 0); /* already locked */
if (error == 0) {
mptcplog((LOG_DEBUG, "MPTCP Socket: "
"%s: mp_so 0x%llx sopt %s "
mptcp_sopt2str(mpo->mpo_level, mpo->mpo_name), error),
MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
}
- return (error);
+ return error;
}
MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
if (mp_tp->mpt_state >= MPTCPS_FIN_WAIT_1) {
- if (mp_tp->mpt_gc_ticks > 0)
+ if (mp_tp->mpt_gc_ticks > 0) {
mp_tp->mpt_gc_ticks--;
+ }
if (mp_tp->mpt_gc_ticks == 0) {
wakeup = TRUE;
}
if (mpp->mpp_state != MPPCB_STATE_DEAD) {
panic("MPTCP Socket: %s: mp_so 0x%llx skipped state "
- "[u=%d,r=%d,s=%d]\n", __func__,
- (u_int64_t)VM_KERNEL_ADDRPERM(mp_so),
- mp_so->so_usecount, mp_so->so_retaincnt,
- mpp->mpp_state);
+ "[u=%d,r=%d,s=%d]\n", __func__,
+ (u_int64_t)VM_KERNEL_ADDRPERM(mp_so),
+ mp_so->so_usecount, mp_so->so_retaincnt,
+ mpp->mpp_state);
}
- if (mp_tp->mpt_state == MPTCPS_TIME_WAIT)
+ if (mp_tp->mpt_state == MPTCPS_TIME_WAIT) {
mptcp_close(mpte, mp_tp);
+ }
mptcp_session_destroy(mpte);
sodealloc(mp_so);
}
- return (active);
+ return active;
}
/*
{
struct socket *mp_so;
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
+ mpte_lock_assert_held(mpte); /* same as MP socket lock */
VERIFY(mpte->mpte_mptcb == mp_tp);
mp_so = mptetoso(mpte);
DTRACE_MPTCP2(state__change, struct mptcb *, mp_tp,
uint32_t, 0 /* event */);
- if (errno == ETIMEDOUT && mp_tp->mpt_softerror != 0)
+ if (errno == ETIMEDOUT && mp_tp->mpt_softerror != 0) {
errno = mp_tp->mpt_softerror;
+ }
mp_so->so_error = errno;
- return (mptcp_close(mpte, mp_tp));
+ return mptcp_close(mpte, mp_tp);
}
/*
struct socket *mp_so = NULL;
struct mptsub *mpts = NULL, *tmpts = NULL;
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
+ mpte_lock_assert_held(mpte); /* same as MP socket lock */
VERIFY(mpte->mpte_mptcb == mp_tp);
mp_so = mptetoso(mpte);
mptcp_subflow_disconnect(mpte, mpts);
}
- return (NULL);
+ return NULL;
}
void
void
mptcp_subflow_workloop(struct mptses *mpte)
{
- struct socket *mp_so;
- struct mptsub *mpts, *tmpts;
boolean_t connect_pending = FALSE, disconnect_fallback = FALSE;
- uint64_t mpsofilt_hint_mask = SO_FILT_HINT_LOCKED;
+ uint64_t mpsofilt_hint_mask;
+ struct mptsub *mpts, *tmpts;
+ struct socket *mp_so;
mpte_lock_assert_held(mpte);
- VERIFY(mpte->mpte_mppcb != NULL);
+
+ if (mpte->mpte_flags & MPTE_IN_WORKLOOP) {
+ mpte->mpte_flags |= MPTE_WORKLOOP_RELAUNCH;
+ return;
+ }
+ mpte->mpte_flags |= MPTE_IN_WORKLOOP;
+
mp_so = mptetoso(mpte);
- VERIFY(mp_so != NULL);
+
+relaunch:
+ mpsofilt_hint_mask = SO_FILT_HINT_LOCKED;
+ mpte->mpte_flags &= ~MPTE_WORKLOOP_RELAUNCH;
TAILQ_FOREACH_SAFE(mpts, &mpte->mpte_subflows, mpts_entry, tmpts) {
ev_ret_t ret;
* be handled during the next iteration, causing a
* non-zero error to be returned above.
*/
- if (mp_so->so_flags & SOF_PCBCLEARING)
+ if (mp_so->so_flags & SOF_PCBCLEARING) {
mptcp_subflow_disconnect(mpte, mpts);
+ }
switch (ret) {
case MPTS_EVRET_OK:
default:
mptcplog((LOG_DEBUG,
"MPTCP Socket: %s: mptcp_subflow_events "
- "returned invalid value: %d\n", __func__,
+ "returned invalid value: %d\n", __func__,
ret),
MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
break;
}
- mptcp_subflow_remref(mpts); /* ours */
+ mptcp_subflow_remref(mpts); /* ours */
VERIFY(mpts->mpts_socket->so_usecount != 0);
mpts->mpts_socket->so_usecount--;
soevent(mp_so, mpsofilt_hint_mask);
}
- if (!connect_pending && !disconnect_fallback)
- return;
+ if (!connect_pending && !disconnect_fallback) {
+ goto exit;
+ }
TAILQ_FOREACH_SAFE(mpts, &mpte->mpte_subflows, mpts_entry, tmpts) {
if (disconnect_fallback) {
struct inpcb *inp = NULL;
struct tcpcb *tp = NULL;
- if (mpts->mpts_flags & MPTSF_MP_DEGRADED)
+ if (mpts->mpts_flags & MPTSF_MP_DEGRADED) {
continue;
+ }
mpts->mpts_flags |= MPTSF_MP_DEGRADED;
- if (mpts->mpts_flags & (MPTSF_DISCONNECTING|
- MPTSF_DISCONNECTED|MPTSF_CONNECT_PENDING))
+ if (mpts->mpts_flags & (MPTSF_DISCONNECTING |
+ MPTSF_DISCONNECTED | MPTSF_CONNECT_PENDING)) {
continue;
+ }
so = mpts->mpts_socket;
inp = sotoinpcb(so);
tp = intotcpcb(inp);
tp->t_mpflags &=
- ~(TMPF_MPTCP_READY|TMPF_MPTCP_TRUE);
+ ~(TMPF_MPTCP_READY | TMPF_MPTCP_TRUE);
tp->t_mpflags |= TMPF_TCP_FALLBACK;
if (mpts->mpts_flags & MPTSF_ACTIVE) {
if (mpts->mpts_flags & MPTSF_CONNECT_PENDING) {
int error = mptcp_subflow_soconnectx(mpte, mpts);
- if (error)
+ if (error) {
mptcp_subflow_abort(mpts, error);
+ }
}
}
}
+
+exit:
+ if (mpte->mpte_flags & MPTE_WORKLOOP_RELAUNCH) {
+ goto relaunch;
+ }
+
+ mpte->mpte_flags &= ~MPTE_IN_WORKLOOP;
}
/*
struct mppcb *mpp = mpsotomppcb(mp_so);
void *lr_saved;
- if (lr == NULL)
+ if (lr == NULL) {
lr_saved = __builtin_return_address(0);
- else
+ } else {
lr_saved = lr;
+ }
if (mpp == NULL) {
panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
solockhistory_nr(mp_so));
/* NOTREACHED */
}
- if (refcount != 0)
+ if (refcount != 0) {
mp_so->so_usecount++;
+ }
mp_so->lock_lr[mp_so->next_lock_lr] = lr_saved;
mp_so->next_lock_lr = (mp_so->next_lock_lr + 1) % SO_LCKDBG_MAX;
- return (0);
+ return 0;
}
/*
struct mppcb *mpp = mpsotomppcb(mp_so);
void *lr_saved;
- if (lr == NULL)
+ if (lr == NULL) {
lr_saved = __builtin_return_address(0);
- else
+ } else {
lr_saved = lr;
+ }
if (mpp == NULL) {
panic("%s: so=%p NO PCB usecount=%x lr=%p lrh= %s\n", __func__,
}
mpp_lock_assert_held(mpp);
- if (refcount != 0)
+ if (refcount != 0) {
mp_so->so_usecount--;
+ }
if (mp_so->so_usecount < 0) {
panic("%s: so=%p usecount=%x lrh= %s\n", __func__,
mp_so->next_unlock_lr = (mp_so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
mpp_unlock(mpp);
- return (0);
+ return 0;
}
/*
mp_so, mp_so->so_usecount, solockhistory_nr(mp_so));
/* NOTREACHED */
}
- return (mpp_getlock(mpp, flags));
+ return mpp_getlock(mpp, flags);
}
/*
sauth_entry->msae_raddr_rand = 0;
try_again:
sauth_entry->msae_laddr_rand = RandomULong();
- if (sauth_entry->msae_laddr_rand == 0)
+ if (sauth_entry->msae_laddr_rand == 0) {
goto try_again;
+ }
LIST_INSERT_HEAD(&mp_tp->mpt_subauth_list, sauth_entry, msae_next);
}
int found = 0;
tp = sototcpcb(so);
- if (tp == NULL)
+ if (tp == NULL) {
return;
+ }
LIST_FOREACH(sauth_entry, &mp_tp->mpt_subauth_list, msae_next) {
if (sauth_entry->msae_laddr_id == tp->t_local_aid) {
LIST_REMOVE(sauth_entry, msae_next);
}
- if (found)
+ if (found) {
zfree(mpt_subauth_zone, sauth_entry);
+ }
}
void
LIST_FOREACH(sauth_entry, &mp_tp->mpt_subauth_list, msae_next) {
if (sauth_entry->msae_laddr_id == addr_id) {
- if (lrand)
+ if (lrand) {
*lrand = sauth_entry->msae_laddr_rand;
- if (rrand)
+ }
+ if (rrand) {
*rrand = sauth_entry->msae_raddr_rand;
+ }
break;
}
}
int sha1_size;
sha1_base = (const unsigned char *) key;
- sha1_size = sizeof (mptcp_key_t);
+ sha1_size = sizeof(mptcp_key_t);
SHA1Init(&sha1ctxt);
SHA1Update(&sha1ctxt, sha1_base, sha1_size);
SHA1Final(sha_digest, &sha1ctxt);
void
mptcp_hmac_sha1(mptcp_key_t key1, mptcp_key_t key2,
- u_int32_t rand1, u_int32_t rand2, u_char *digest)
+ u_int32_t rand1, u_int32_t rand2, u_char *digest)
{
SHA1_CTX sha1ctxt;
mptcp_key_t key_ipad[8] = {0}; /* key XOR'd with inner pad */
/* Perform inner SHA1 */
SHA1Init(&sha1ctxt);
- SHA1Update(&sha1ctxt, (unsigned char *)key_ipad, sizeof (key_ipad));
- SHA1Update(&sha1ctxt, (unsigned char *)data, sizeof (data));
+ SHA1Update(&sha1ctxt, (unsigned char *)key_ipad, sizeof(key_ipad));
+ SHA1Update(&sha1ctxt, (unsigned char *)data, sizeof(data));
SHA1Final(digest, &sha1ctxt);
/* Perform outer SHA1 */
SHA1Init(&sha1ctxt);
- SHA1Update(&sha1ctxt, (unsigned char *)key_opad, sizeof (key_opad));
+ SHA1Update(&sha1ctxt, (unsigned char *)key_opad, sizeof(key_opad));
SHA1Update(&sha1ctxt, (unsigned char *)digest, SHA1_RESULTLEN);
SHA1Final(digest, &sha1ctxt);
}
mptcp_generate_token(char *sha_digest, int sha_digest_len, caddr_t token,
int token_len)
{
- VERIFY(token_len == sizeof (u_int32_t));
+ VERIFY(token_len == sizeof(u_int32_t));
VERIFY(sha_digest_len == SHA1_RESULTLEN);
/* Most significant 32 bits of the SHA1 hash */
- bcopy(sha_digest, token, sizeof (u_int32_t));
+ bcopy(sha_digest, token, sizeof(u_int32_t));
return;
}
mptcp_generate_idsn(char *sha_digest, int sha_digest_len, caddr_t idsn,
int idsn_len)
{
- VERIFY(idsn_len == sizeof (u_int64_t));
+ VERIFY(idsn_len == sizeof(u_int64_t));
VERIFY(sha_digest_len == SHA1_RESULTLEN);
/*
mp_tp->mpt_version = MPTCP_STD_VERSION_0;
/* Set DSS checksum flag */
- if (mptcp_dss_csum)
+ if (mptcp_dss_csum) {
mp_tp->mpt_flags |= MPTCPF_CHECKSUM;
+ }
/* Set up receive window */
mp_tp->mpt_rcvwnd = mptcp_sbspace(mp_tp);
mptcp_do_sha1(&mp_tp->mpt_localkey, key_digest);
mptcp_generate_token(key_digest, SHA1_RESULTLEN,
- (caddr_t)&mp_tp->mpt_localtoken, sizeof (mp_tp->mpt_localtoken));
+ (caddr_t)&mp_tp->mpt_localtoken, sizeof(mp_tp->mpt_localtoken));
mptcp_generate_idsn(key_digest, SHA1_RESULTLEN,
- (caddr_t)&mp_tp->mpt_local_idsn, sizeof (u_int64_t));
+ (caddr_t)&mp_tp->mpt_local_idsn, sizeof(u_int64_t));
/* The subflow SYN is also first MPTCP byte */
mp_tp->mpt_snduna = mp_tp->mpt_sndmax = mp_tp->mpt_local_idsn + 1;
mpte_lock_assert_held(mp_tp->mpt_mpte);
/* Only Version 0 is supported for auth purposes */
- if (mp_tp->mpt_version != MPTCP_STD_VERSION_0)
- return (-1);
+ if (mp_tp->mpt_version != MPTCP_STD_VERSION_0) {
+ return -1;
+ }
/* Setup local and remote tokens and Initial DSNs */
mptcp_do_sha1(&mp_tp->mpt_remotekey, remote_digest);
mptcp_generate_token(remote_digest, SHA1_RESULTLEN,
- (caddr_t)&mp_tp->mpt_remotetoken, sizeof (mp_tp->mpt_remotetoken));
+ (caddr_t)&mp_tp->mpt_remotetoken, sizeof(mp_tp->mpt_remotetoken));
mptcp_generate_idsn(remote_digest, SHA1_RESULTLEN,
- (caddr_t)&mp_tp->mpt_remote_idsn, sizeof (u_int64_t));
+ (caddr_t)&mp_tp->mpt_remote_idsn, sizeof(u_int64_t));
mp_tp->mpt_rcvnxt = mp_tp->mpt_remote_idsn + 1;
- return (0);
+ return 0;
}
static void
struct inpcb *inp = NULL;
inp = sotoinpcb(so);
- if (!inp)
+ if (!inp) {
return;
+ }
tp = intotcpcb(inp);
- if (!tp)
+ if (!tp) {
return;
+ }
- if (!(tp->t_mpflags & TMPF_RESET))
+ if (!(tp->t_mpflags & TMPF_RESET)) {
tp->t_mpflags |= TMPF_SEND_DFIN;
+ }
}
/*
{
struct mptcb *mp_tp;
- if (m == NULL)
+ if (m == NULL) {
return;
+ }
__IGNORE_WCASTALIGN(mp_tp = &((struct mpp_mtp *)mpp)->mtcb);
mpte_lock_assert_held(mp_tp->mpt_mpte);
uint64_t data_ack;
uint64_t dsn;
- if (!m || len == 0)
+ if (!m || len == 0) {
return;
+ }
while (m && len > 0) {
VERIFY(m->m_flags & M_PKTHDR);
}
mptcplog((LOG_DEBUG, "%s inferred ack up to %u\n", __func__, (uint32_t)data_ack),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
mptcp_data_ack_rcvd(mp_tp, sototcpcb(so), data_ack);
}
len -= sub_len;
} else {
/* sub_len >= len */
- if (rewinding == 0)
+ if (rewinding == 0) {
m->m_pkthdr.mp_dsn += len;
+ }
if (!(m->m_pkthdr.pkt_flags & PKTF_MPSO)) {
- if (rewinding == 0)
+ if (rewinding == 0) {
m->m_pkthdr.mp_rseq += len;
+ }
}
mptcplog((LOG_DEBUG, "%s: dsn %u ssn %u len %d %d\n",
__func__, (u_int32_t)m->m_pkthdr.mp_dsn,
void
mptcp_output_getm_dsnmap64(struct socket *so, int off, uint64_t *dsn,
- uint32_t *relseq, uint16_t *data_len,
- uint16_t *dss_csum)
+ uint32_t *relseq, uint16_t *data_len,
+ uint16_t *dss_csum)
{
struct mbuf *m = so->so_snd.sb_mb;
int off_orig = off;
*dss_csum = m->m_pkthdr.mp_csum;
mptcplog((LOG_DEBUG, "%s: dsn %u ssn %u data_len %d off %d off_orig %d\n",
- __func__, (u_int32_t)(*dsn), *relseq, *data_len, off, off_orig),
- MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
+ __func__, (u_int32_t)(*dsn), *relseq, *data_len, off, off_orig),
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
}
/*
m->m_pkthdr.mp_rseq = tp->t_rcv_map.mpt_sseq;
m->m_pkthdr.mp_rlen = tp->t_rcv_map.mpt_len;
m->m_pkthdr.mp_csum = tp->t_rcv_map.mpt_csum;
- if (tp->t_rcv_map.mpt_dfin)
+ if (tp->t_rcv_map.mpt_dfin) {
m->m_pkthdr.pkt_flags |= PKTF_MPTCP_DFIN;
+ }
m->m_pkthdr.pkt_flags |= PKTF_MPTCP;
tp->t_mpflags &= ~TMPF_EMBED_DSN;
tp->t_mpflags |= TMPF_MPTCP_ACKNOW;
} else if (tp->t_mpflags & TMPF_TCP_FALLBACK) {
- if (th->th_flags & TH_FIN)
+ if (th->th_flags & TH_FIN) {
m->m_pkthdr.pkt_flags |= PKTF_MPTCP_DFIN;
+ }
}
}
int
mptcp_adj_rmap(struct socket *so, struct mbuf *m, int off, uint64_t dsn,
- uint32_t rseq, uint16_t dlen)
+ uint32_t rseq, uint16_t dlen)
{
struct mptsub *mpts = sototcpcb(so)->t_mpsub;
- if (m_pktlen(m) == 0)
- return (0);
+ if (m_pktlen(m) == 0) {
+ return 0;
+ }
if ((m->m_flags & M_PKTHDR) && (m->m_pkthdr.pkt_flags & PKTF_MPTCP)) {
if (off && (dsn != m->m_pkthdr.mp_dsn ||
- rseq != m->m_pkthdr.mp_rseq ||
- dlen != m->m_pkthdr.mp_rlen)) {
+ rseq != m->m_pkthdr.mp_rseq ||
+ dlen != m->m_pkthdr.mp_rlen)) {
mptcplog((LOG_ERR, "%s: Received incorrect second mapping: %llu - %llu , %u - %u, %u - %u\n",
- __func__, dsn, m->m_pkthdr.mp_dsn,
- rseq, m->m_pkthdr.mp_rseq,
- dlen, m->m_pkthdr.mp_rlen),
- MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_ERR);
- return (-1);
+ __func__, dsn, m->m_pkthdr.mp_dsn,
+ rseq, m->m_pkthdr.mp_rseq,
+ dlen, m->m_pkthdr.mp_rlen),
+ MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_ERR);
+ return -1;
}
m->m_pkthdr.mp_dsn += off;
m->m_pkthdr.mp_rseq += off;
mpts->mpts_flags |= MPTSF_CONFIRMED;
- return (0);
+ return 0;
}
/*
struct tcpcb *tp = NULL;
struct inpcb *inp = sotoinpcb(so);
- if (inp == NULL)
+ if (inp == NULL) {
return;
+ }
tp = intotcpcb(inp);
- if (tp == NULL)
+ if (tp == NULL) {
return;
+ }
- if (so->so_flags & SOF_MP_TRYFAILOVER)
+ if (so->so_flags & SOF_MP_TRYFAILOVER) {
return;
+ }
so->so_flags |= SOF_MP_TRYFAILOVER;
soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MPFAILOVER));
int off = 0;
u_int32_t datalen;
- if (m == NULL)
- return (-1);
+ if (m == NULL) {
+ return -1;
+ }
while (m != NULL) {
VERIFY(m->m_pkthdr.pkt_flags & PKTF_MPTCP);
off = dsn_fail - dsn;
*tcp_seq = m->m_pkthdr.mp_rseq + off;
mptcplog((LOG_DEBUG, "%s: %llu %llu \n", __func__, dsn,
- dsn_fail), MPTCP_SENDER_DBG, MPTCP_LOGLVL_LOG);
- return (0);
+ dsn_fail), MPTCP_SENDER_DBG, MPTCP_LOGLVL_LOG);
+ return 0;
}
m = m->m_next;
mptcplog((LOG_ERR, "MPTCP Sender: "
"%s: %llu not found \n", __func__, dsn_fail),
MPTCP_SENDER_DBG, MPTCP_LOGLVL_LOG);
- return (-1);
+ return -1;
}
/*
uint16_t dss_csum;
mptcp_output_getm_dsnmap64(so, off, &mdss_dsn, &mdss_subflow_seq,
- &mdss_data_len, &dss_csum);
+ &mdss_data_len, &dss_csum);
/*
* We need to compute how much of the mapping still remains.
* When TFO is used, we are sending the mpts->mpts_iss although the relative
* seq has been set to 1 (while it should be 0).
*/
- if (tp->t_mpflags & TMPF_TFO_REQUEST)
+ if (tp->t_mpflags & TMPF_TFO_REQUEST) {
mdss_subflow_off--;
+ }
- if (off < mdss_subflow_off)
+ if (off < mdss_subflow_off) {
printf("%s off %d mdss_subflow_off %d mdss_subflow_seq %u iss %u suna %u\n", __func__,
- off, mdss_subflow_off, mdss_subflow_seq, mpts->mpts_iss, tp->snd_una);
+ off, mdss_subflow_off, mdss_subflow_seq, mpts->mpts_iss, tp->snd_una);
+ }
VERIFY(off >= mdss_subflow_off);
mptcplog((LOG_DEBUG, "%s dlen %u off %d sub_off %d sub_seq %u iss %u suna %u\n",
- __func__, mdss_data_len, off, mdss_subflow_off, mdss_subflow_seq,
- mpts->mpts_iss, tp->snd_una), MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
- return (mdss_data_len - (off - mdss_subflow_off));
+ __func__, mdss_data_len, off, mdss_subflow_off, mdss_subflow_seq,
+ mpts->mpts_iss, tp->snd_una), MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
+ return mdss_data_len - (off - mdss_subflow_off);
}
static uint32_t
struct tcpcb *tp = sototcpcb(mpts->mpts_socket);
if (!TCPS_HAVEESTABLISHED(tp->t_state) ||
- TCPS_HAVERCVDFIN2(tp->t_state))
+ TCPS_HAVERCVDFIN2(tp->t_state)) {
continue;
+ }
- if (tp->t_maxseg > maxseg)
+ if (tp->t_maxseg > maxseg) {
maxseg = tp->t_maxseg;
+ }
}
- return (maxseg);
+ return maxseg;
}
static uint8_t
struct tcpcb *tp = sototcpcb(mpts->mpts_socket);
if (!TCPS_HAVEESTABLISHED(tp->t_state) ||
- TCPS_HAVERCVDFIN2(tp->t_state))
+ TCPS_HAVERCVDFIN2(tp->t_state)) {
continue;
+ }
- if (tp->rcv_scale < rcvscale)
+ if (tp->rcv_scale < rcvscale) {
rcvscale = tp->rcv_scale;
+ }
}
- return (rcvscale);
+ return rcvscale;
}
/* Similar to tcp_sbrcv_reserve */
static void
mptcp_sbrcv_reserve(struct mptcb *mp_tp, struct sockbuf *sbrcv,
- u_int32_t newsize, u_int32_t idealsize)
+ u_int32_t newsize, u_int32_t idealsize)
{
uint8_t rcvscale = mptcp_get_rcvscale(mp_tp->mpt_mpte);
/* Set new socket buffer size */
if (newsize > sbrcv->sb_hiwat &&
- (sbreserve(sbrcv, newsize) == 1)) {
+ (sbreserve(sbrcv, newsize) == 1)) {
sbrcv->sb_idealsize = min(max(sbrcv->sb_idealsize,
(idealsize != 0) ? idealsize : newsize), tcp_autorcvbuf_max);
* window scale
*/
sbrcv->sb_idealsize = min(sbrcv->sb_idealsize,
- TCP_MAXWIN << rcvscale);
+ TCP_MAXWIN << rcvscale);
}
}
u_int32_t rcvbufinc = mptcp_get_maxseg(mp_tp->mpt_mpte) << 4;
u_int32_t rcvbuf = sb->sb_hiwat;
- if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(mp_so))
+ if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(mp_so)) {
return;
+ }
if (tcp_do_autorcvbuf == 1 &&
tcp_cansbgrow(sb) &&
rcvbuf = sb->sb_hiwat;
space = ((int32_t) imin((rcvbuf - sb->sb_cc),
- (sb->sb_mbmax - sb->sb_mbcnt)));
- if (space < 0)
+ (sb->sb_mbmax - sb->sb_mbcnt)));
+ if (space < 0) {
space = 0;
+ }
#if CONTENT_FILTER
/* Compensate for data being processed by content filters */
pending = cfil_sock_data_space(sb);
#endif /* CONTENT_FILTER */
- if (pending > space)
+ if (pending > space) {
space = 0;
- else
+ } else {
space -= pending;
+ }
- return (space);
+ return space;
}
/*
{
struct tcpcb *tp = NULL;
- if (so == NULL)
+ if (so == NULL) {
return;
+ }
tp = intotcpcb(sotoinpcb(so));
- if (tp == NULL)
+ if (tp == NULL) {
return;
+ }
DTRACE_MPTCP4(multipath__ready, struct socket *, so,
struct sockbuf *, &so->so_rcv, struct sockbuf *, &so->so_snd,
struct tcpcb *, tp);
- if (!(tp->t_mpflags & TMPF_MPTCP_TRUE))
+ if (!(tp->t_mpflags & TMPF_MPTCP_TRUE)) {
return;
+ }
- if (tp->t_mpflags & TMPF_MPTCP_READY)
+ if (tp->t_mpflags & TMPF_MPTCP_READY) {
return;
+ }
tp->t_mpflags &= ~TMPF_TCP_FALLBACK;
tp->t_mpflags |= TMPF_MPTCP_READY;
{
struct tcpcb *tp = NULL;
- if (so == NULL)
+ if (so == NULL) {
return;
+ }
tp = intotcpcb(sotoinpcb(so));
- if (tp == NULL)
+ if (tp == NULL) {
return;
+ }
DTRACE_MPTCP4(multipath__failed, struct socket *, so,
struct sockbuf *, &so->so_rcv, struct sockbuf *, &so->so_snd,
struct tcpcb *, tp);
- if (tp->t_mpflags & TMPF_TCP_FALLBACK)
+ if (tp->t_mpflags & TMPF_TCP_FALLBACK) {
return;
+ }
- tp->t_mpflags &= ~(TMPF_MPTCP_READY|TMPF_MPTCP_TRUE);
+ tp->t_mpflags &= ~(TMPF_MPTCP_READY | TMPF_MPTCP_TRUE);
tp->t_mpflags |= TMPF_TCP_FALLBACK;
soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MPSTATUS));
if (mp_tp->mpt_state >= MPTCPS_CLOSE_WAIT) {
ret = 0;
}
- return (ret);
+ return ret;
}
/*
int mss_lower = 0;
struct mptcb *mp_tp = tptomptp(tp);
-#define MPTCP_COMPUTE_LEN { \
- mss_lower = sizeof (struct mptcp_dss_ack_opt); \
- if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) \
- mss_lower += 2; \
- else \
- /* adjust to 32-bit boundary + EOL */ \
- mss_lower += 2; \
+#define MPTCP_COMPUTE_LEN { \
+ mss_lower = sizeof (struct mptcp_dss_ack_opt); \
+ if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) \
+ mss_lower += 2; \
+ else \
+ /* adjust to 32-bit boundary + EOL */ \
+ mss_lower += 2; \
}
- if (mp_tp == NULL)
- return (0);
+ if (mp_tp == NULL) {
+ return 0;
+ }
mpte_lock_assert_held(mp_tp->mpt_mpte);
}
}
- return (mss_lower);
+ return mss_lower;
}
/*
conninfo_mptcp_t mptcpci;
mptcp_flow_t *flows = NULL;
- if (req->newptr != USER_ADDR_NULL)
- return (EPERM);
+ if (req->newptr != USER_ADDR_NULL) {
+ return EPERM;
+ }
lck_mtx_lock(&mtcbinfo.mppi_lock);
if (req->oldptr == USER_ADDR_NULL) {
size_t n = mtcbinfo.mppi_count;
lck_mtx_unlock(&mtcbinfo.mppi_lock);
- req->oldidx = (n + n/8) * sizeof(conninfo_mptcp_t) +
- 4 * (n + n/8) * sizeof(mptcp_flow_t);
- return (0);
+ req->oldidx = (n + n / 8) * sizeof(conninfo_mptcp_t) +
+ 4 * (n + n / 8) * sizeof(mptcp_flow_t);
+ return 0;
}
TAILQ_FOREACH(mpp, &mtcbinfo.mppi_pcbs, mpp_entry) {
flows = NULL;
if (flows) {
error = SYSCTL_OUT(req, flows, len);
FREE(flows, M_TEMP);
- if (error)
+ if (error) {
break;
+ }
}
}
lck_mtx_unlock(&mtcbinfo.mppi_lock);
- return (error);
+ return error;
}
SYSCTL_PROC(_net_inet_mptcp, OID_AUTO, pcblist, CTLFLAG_RD | CTLFLAG_LOCKED,
struct mptcb *mp_tp = NULL;
int error = 0;
- if (mpte->mpte_mppcb->mpp_flags & MPP_ATTACHED)
+ if (mpte->mpte_mppcb->mpp_flags & MPP_ATTACHED) {
mp_tp = mpte->mpte_mptcb;
+ }
- if (mp_tp)
+ if (mp_tp) {
mp_tp->mpt_notsent_lowat = optval;
- else
+ } else {
error = EINVAL;
+ }
- return (error);
+ return error;
}
u_int32_t
{
struct mptcb *mp_tp = NULL;
- if (mpte->mpte_mppcb->mpp_flags & MPP_ATTACHED)
+ if (mpte->mpte_mppcb->mpp_flags & MPP_ATTACHED) {
mp_tp = mpte->mpte_mptcb;
+ }
- if (mp_tp)
- return (mp_tp->mpt_notsent_lowat);
- else
- return (0);
+ if (mp_tp) {
+ return mp_tp->mpt_notsent_lowat;
+ } else {
+ return 0;
+ }
}
int
mpp = mpsotomppcb(so);
if (mpp == NULL || mpp->mpp_state == MPPCB_STATE_DEAD) {
- return (0);
+ return 0;
}
mpte = mptompte(mpp);
"lowat %d notsent %d actual %d \n",
mp_tp->mpt_notsent_lowat, notsent,
notsent - (mp_tp->mpt_sndnxt - mp_tp->mpt_snduna)),
- MPTCP_SENDER_DBG , MPTCP_LOGLVL_VERBOSE);
- return (1);
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
+ return 1;
}
/* When Nagle's algorithm is not disabled, it is better
struct tcpcb *tp = intotcpcb(sotoinpcb(subf_so));
notsent = so->so_snd.sb_cc -
- (tp->snd_nxt - tp->snd_una);
+ (tp->snd_nxt - tp->snd_una);
if ((tp->t_flags & TF_NODELAY) == 0 &&
notsent > 0 && (notsent <= (int)tp->t_maxseg)) {
mptcplog((LOG_DEBUG, "MPTCP Sender: lowat %d notsent %d"
" nodelay false \n",
mp_tp->mpt_notsent_lowat, notsent),
- MPTCP_SENDER_DBG , MPTCP_LOGLVL_VERBOSE);
- return (retval);
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
+ return retval;
}
}
- return (0);
+ return 0;
}
/* Using Symptoms Advisory to detect poor WiFi or poor Cell */
static errno_t
mptcp_symptoms_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac,
- void **unitinfo)
+ void **unitinfo)
{
#pragma unused(kctlref, sac, unitinfo)
- if (OSIncrementAtomic(&mptcp_kern_skt_inuse) > 0)
- mptcplog((LOG_ERR, "%s MPTCP kernel-control socket already open!", __func__),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ if (OSIncrementAtomic(&mptcp_kern_skt_inuse) > 0) {
+ os_log_error(mptcp_log_handle, "%s MPTCP kernel-control socket for Symptoms already open!", __func__);
+ }
mptcp_kern_skt_unit = sac->sc_unit;
- return (0);
+ return 0;
}
static void
mp_so = mpp->mpp_socket;
if (mp_so->so_flags & SOF_DELEGATED &&
- uuid_compare(uuid, mp_so->e_uuid))
+ uuid_compare(uuid, mp_so->e_uuid)) {
goto next;
- else if (!(mp_so->so_flags & SOF_DELEGATED) &&
- uuid_compare(uuid, mp_so->last_uuid))
+ } else if (!(mp_so->so_flags & SOF_DELEGATED) &&
+ uuid_compare(uuid, mp_so->last_uuid)) {
goto next;
+ }
+
+ os_log(mptcp_log_handle, "%s - %lx: Got allowance for useApp\n",
+ __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte));
mpte->mpte_flags |= MPTE_ACCESS_GRANTED;
mp_so = mpp->mpp_socket;
/* Only handover-mode is purely driven by Symptom's Wi-Fi status */
- if (mpte->mpte_svctype != MPTCP_SVCTYPE_HANDOVER)
+ if (mpte->mpte_svctype != MPTCP_SVCTYPE_HANDOVER) {
goto next;
+ }
mptcp_check_subflows_and_add(mpte);
mptcp_check_subflows_and_remove(mpte);
int pid, prio, err;
if (mptcp_kern_skt_unit == 0) {
- mptcplog((LOG_ERR, "%s skt_unit is still 0\n", __func__),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ os_log_error(mptcp_log_handle, "%s skt_unit is still 0\n", __func__);
return;
}
mp_so = mptetoso(mpte);
- if (mp_so->so_flags & SOF_DELEGATED)
+ if (mp_so->so_flags & SOF_DELEGATED) {
pid = mp_so->e_pid;
- else
+ } else {
pid = mp_so->last_pid;
+ }
p = proc_find(pid);
if (p == PROC_NULL) {
- mptcplog((LOG_ERR, "%s Couldn't find proc for pid %u\n", __func__,
- pid), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ os_log_error(mptcp_log_handle, "%s Couldn't find proc for pid %u\n", __func__, pid);
return;
}
ask.cmd = MPTCP_SYMPTOMS_ASK_UUID;
- if (mp_so->so_flags & SOF_DELEGATED)
+ if (mp_so->so_flags & SOF_DELEGATED) {
uuid_copy(ask.uuid, mp_so->e_uuid);
- else
+ } else {
uuid_copy(ask.uuid, mp_so->last_uuid);
+ }
prio = proc_get_effective_task_policy(proc_task(p), TASK_POLICY_ROLE);
- if (prio == TASK_BACKGROUND_APPLICATION)
+ if (prio == TASK_BACKGROUND_APPLICATION) {
ask.priority = MPTCP_SYMPTOMS_BACKGROUND;
- else if (prio == TASK_FOREGROUND_APPLICATION)
+ } else if (prio == TASK_FOREGROUND_APPLICATION) {
ask.priority = MPTCP_SYMPTOMS_FOREGROUND;
- else
+ } else {
ask.priority = MPTCP_SYMPTOMS_UNKNOWN;
-
- mptcplog((LOG_DEBUG, "%s ask symptoms about pid %u, prio %u\n", __func__,
- pid, ask.priority), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
+ }
err = ctl_enqueuedata(mptcp_kern_ctrl_ref, mptcp_kern_skt_unit,
- &ask, sizeof(ask), CTL_DATA_EOR);
- if (err)
- mptcplog((LOG_ERR, "%s ctl_enqueuedata failed %d\n", __func__, err),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ &ask, sizeof(ask), CTL_DATA_EOR);
+
+ os_log_debug(mptcp_log_handle, "%s asked symptoms about pid %u, prio %u, err %d\n",
+ __func__, pid, ask.priority, err);
+
proc_rele(p);
}
static errno_t
mptcp_symptoms_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t kcunit,
- void *unitinfo)
+ void *unitinfo)
{
#pragma unused(kctlref, kcunit, unitinfo)
OSDecrementAtomic(&mptcp_kern_skt_inuse);
- return (0);
+ return 0;
}
static errno_t
mptcp_symptoms_ctl_send(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo,
- mbuf_t m, int flags)
+ mbuf_t m, int flags)
{
#pragma unused(kctlref, unitinfo, flags)
- symptoms_advisory_t *sa = NULL;
+ symptoms_advisory_t *sa = NULL;
- if (kcunit != mptcp_kern_skt_unit)
- mptcplog((LOG_ERR, "%s kcunit %u is different from expected one %u\n",
- __func__, kcunit, mptcp_kern_skt_unit),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ if (kcunit != mptcp_kern_skt_unit) {
+ os_log_error(mptcp_log_handle, "%s kcunit %u is different from expected one %u\n",
+ __func__, kcunit, mptcp_kern_skt_unit);
+ }
if (mbuf_pkthdr_len(m) < sizeof(*sa)) {
mbuf_freem(m);
- return (EINVAL);
+ return EINVAL;
+ }
+
+ if (mbuf_len(m) < sizeof(*sa)) {
+ os_log_error(mptcp_log_handle, "%s: mbuf is %lu but need %lu\n",
+ __func__, mbuf_len(m), sizeof(*sa));
+ mbuf_freem(m);
+ return EINVAL;
}
- if (mbuf_len(m) >= sizeof(*sa))
- sa = mbuf_data(m);
- else
- return (EINVAL);
+ sa = mbuf_data(m);
if (sa->sa_nwk_status != SYMPTOMS_ADVISORY_NOCOMMENT &&
sa->sa_nwk_status != SYMPTOMS_ADVISORY_USEAPP) {
if ((sa->sa_wifi_status &
(SYMPTOMS_ADVISORY_WIFI_BAD | SYMPTOMS_ADVISORY_WIFI_OK)) !=
- (SYMPTOMS_ADVISORY_WIFI_BAD | SYMPTOMS_ADVISORY_WIFI_OK))
+ (SYMPTOMS_ADVISORY_WIFI_BAD | SYMPTOMS_ADVISORY_WIFI_OK)) {
mptcp_advisory.sa_wifi_status = sa->sa_wifi_status;
+ }
- if (old_wifi_status != mptcp_advisory.sa_wifi_status)
+ if (old_wifi_status != mptcp_advisory.sa_wifi_status) {
mptcp_wifi_status_changed();
+ }
} else if (sa->sa_nwk_status == SYMPTOMS_ADVISORY_NOCOMMENT) {
mptcplog((LOG_DEBUG, "%s: NOCOMMENT wifi %d\n", __func__,
mptcp_advisory.sa_wifi_status),
MPTCP_EVENTS_DBG, MPTCP_LOGLVL_VERBOSE);
} else if (sa->sa_nwk_status == SYMPTOMS_ADVISORY_USEAPP) {
uuid_t uuid;
+ errno_t err;
- mptcplog((LOG_DEBUG, "%s Got response about useApp\n", __func__),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
+ if (mbuf_len(m) < sizeof(uuid_t) + sizeof(*sa)) {
+ os_log_error(mptcp_log_handle, "%s: mbuf is %lu but need %lu\n",
+ __func__, mbuf_len(m), sizeof(uuid_t) + sizeof(*sa));
+ mbuf_free(m);
+ return EINVAL;
+ }
- uuid_copy(uuid, (unsigned char *)(sa + 1));
+ err = mbuf_copydata(m, sizeof(*sa), sizeof(uuid_t), uuid);
+ if (err) {
+ os_log_error(mptcp_log_handle, "%s: mbuf_copydata returned %d\n", __func__, err);
+ mbuf_free(m);
+ return err;
+ }
mptcp_allow_uuid(uuid);
}
- return (0);
+ mbuf_freem(m);
+ return 0;
}
void
(void)ctl_register(&mptcp_kern_ctl, &mptcp_kern_ctrl_ref);
}
+/*
+ * Three return-values:
+ * 1 : WiFi is bad
+ * 0 : WiFi is good
+ * -1 : WiFi-state is unknown, use subflow-only heuristics
+ */
int
-mptcp_is_wifi_unusable(void)
+mptcp_is_wifi_unusable(struct mptses *mpte)
{
- /* a false return val indicates there is no info or wifi is ok */
- return (mptcp_advisory.sa_wifi_status & SYMPTOMS_ADVISORY_WIFI_BAD);
+ if (mpte->mpte_flags & MPTE_FIRSTPARTY) {
+ if (mptcp_advisory.sa_wifi_status) {
+ return (mptcp_advisory.sa_wifi_status & SYMPTOMS_ADVISORY_WIFI_BAD) ? 1 : 0;
+ }
+
+ /*
+ * If it's a first-party app and we don't have any info
+ * about the Wi-Fi state, let's be pessimistic.
+ */
+ return -1;
+ }
+
+ return (mptcp_advisory.sa_wifi_status & SYMPTOMS_ADVISORY_WIFI_BAD) ? 1 : 0;
+}
+
+boolean_t
+mptcp_subflow_is_bad(struct mptses *mpte, struct mptsub *mpts)
+{
+ struct tcpcb *tp = sototcpcb(mpts->mpts_socket);
+ int fail_thresh = mptcp_fail_thresh;
+
+ if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER) {
+ fail_thresh *= 2;
+ }
+
+ return tp->t_rxtshift >= fail_thresh &&
+ (mptetoso(mpte)->so_snd.sb_cc || mpte->mpte_reinjectq);
}
/* If TFO data is succesfully acked, it must be dropped from the mptcp so */
sbdrop(&mp_so->so_snd, (int)mp_droplen);
}
mptcplog((LOG_DEBUG, "%s: mp_so 0x%llx cid %d TFO tcp len %d mptcp len %d\n",
- __func__, (u_int64_t)VM_KERNEL_ADDRPERM(mp_so),
- mpts->mpts_connid, tcp_droplen, mp_droplen),
- MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
+ __func__, (u_int64_t)VM_KERNEL_ADDRPERM(mp_so),
+ mpts->mpts_connid, tcp_droplen, mp_droplen),
+ MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE);
}
}
rv = 1;
}
mp_tp->mpt_reassqlen = 0;
- return (rv);
+ return rv;
}
static int
memset(&ev_msg, 0, sizeof(ev_msg));
- ev_msg.vendor_code = KEV_VENDOR_APPLE;
- ev_msg.kev_class = KEV_NETWORK_CLASS;
- ev_msg.kev_subclass = KEV_MPTCP_SUBCLASS;
- ev_msg.event_code = event_code;
+ ev_msg.vendor_code = KEV_VENDOR_APPLE;
+ ev_msg.kev_class = KEV_NETWORK_CLASS;
+ ev_msg.kev_subclass = KEV_MPTCP_SUBCLASS;
+ ev_msg.event_code = event_code;
event_data.value = value;
- ev_msg.dv[0].data_ptr = &event_data;
+ ev_msg.dv[0].data_ptr = &event_data;
ev_msg.dv[0].data_length = sizeof(event_data);
return kev_post_msg(&ev_msg);
int error;
/* First-party apps (Siri) don't flip the cellicon */
- if (mpte->mpte_flags & MPTE_FIRSTPARTY)
+ if (mpte->mpte_flags & MPTE_FIRSTPARTY) {
return;
+ }
/* Remember the last time we set the cellicon (see mptcp_unset_cellicon) */
mptcp_last_cellicon_set = tcp_now;
/* If cellicon is already set, get out of here! */
- if (OSTestAndSet(7, &mptcp_cellicon_is_set))
+ if (OSTestAndSet(7, &mptcp_cellicon_is_set)) {
return;
+ }
error = mptcp_post_event(KEV_MPTCP_CELLUSE, 1);
- if (error)
+ if (error) {
mptcplog((LOG_ERR, "%s: Setting cellicon failed with %d\n",
- __func__, error), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
- else
+ __func__, error), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ } else {
mptcplog((LOG_DEBUG, "%s successfully set the cellicon\n", __func__),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
+ }
}
void
int error;
/* If cellicon is already unset, get out of here! */
- if (OSTestAndClear(7, &mptcp_cellicon_is_set))
+ if (OSTestAndClear(7, &mptcp_cellicon_is_set)) {
return;
+ }
/*
* If during the past MPTCP_CELLICON_TOGGLE_RATE seconds we didn't
* it again.
*/
if (TSTMP_GT(mptcp_last_cellicon_set + MPTCP_CELLICON_TOGGLE_RATE,
- tcp_now)) {
+ tcp_now)) {
OSTestAndSet(7, &mptcp_cellicon_is_set);
return;
}
error = mptcp_post_event(KEV_MPTCP_CELLUSE, 0);
- if (error)
+ if (error) {
mptcplog((LOG_ERR, "%s: Unsetting cellicon failed with %d\n",
- __func__, error), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
- else
+ __func__, error), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR);
+ } else {
mptcplog((LOG_DEBUG, "%s successfully unset the cellicon\n", __func__),
- MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
+ MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE);
+ }
}
void
struct socket *so;
inp = tp->t_inpcb;
- if (inp == NULL)
+ if (inp == NULL) {
return;
+ }
so = inp->inp_socket;
- if (so == NULL)
+ if (so == NULL) {
return;
+ }
- if (!(so->so_flags & SOF_MP_SUBFLOW))
+ if (!(so->so_flags & SOF_MP_SUBFLOW)) {
return;
+ }
mpts = tp->t_mpsub;
mpts->mpts_flags &= ~MPTSF_READ_STALL;
}
-