+ return TRUE;
+ }
+}
+
+void
+mptcp_handle_deferred_upcalls(struct mppcb *mpp, uint32_t flag)
+{
+ VERIFY(mpp->mpp_flags & flag);
+ mpp->mpp_flags &= ~flag;
+
+ if (mptcp_should_defer_upcall(mpp)) {
+ return;
+ }
+
+ if (mpp->mpp_flags & MPP_SHOULD_WORKLOOP) {
+ mpp->mpp_flags &= ~MPP_SHOULD_WORKLOOP;
+
+ mptcp_subflow_workloop(mpp->mpp_pcbe);
+ }
+
+ if (mpp->mpp_flags & MPP_SHOULD_RWAKEUP) {
+ mpp->mpp_flags &= ~MPP_SHOULD_RWAKEUP;
+
+ sorwakeup(mpp->mpp_socket);
+ }
+
+ if (mpp->mpp_flags & MPP_SHOULD_WWAKEUP) {
+ mpp->mpp_flags &= ~MPP_SHOULD_WWAKEUP;
+
+ sowwakeup(mpp->mpp_socket);
+ }
+}
+
+static void
+mptcp_reset_itfinfo(struct mpt_itf_info *info)
+{
+ memset(info, 0, sizeof(*info));
+}
+
+void
+mptcp_session_necp_cb(void *handle, int action, uint32_t interface_index,
+ uint32_t necp_flags, __unused bool *viable)
+{
+ boolean_t has_v4 = !!(necp_flags & NECP_CLIENT_RESULT_FLAG_HAS_IPV4);
+ boolean_t has_v6 = !!(necp_flags & NECP_CLIENT_RESULT_FLAG_HAS_IPV6);
+ boolean_t has_nat64 = !!(necp_flags & NECP_CLIENT_RESULT_FLAG_HAS_NAT64);
+ boolean_t low_power = !!(necp_flags & NECP_CLIENT_RESULT_FLAG_INTERFACE_LOW_POWER);
+ struct mppcb *mp = (struct mppcb *)handle;
+ struct mptses *mpte = mptompte(mp);
+ struct socket *mp_so;
+ struct mptcb *mp_tp;
+ uint32_t i, ifindex;
+ struct ifnet *ifp;
+ int locked = 0;
+
+ ifindex = interface_index;
+ VERIFY(ifindex != IFSCOPE_NONE);
+
+ /* About to be garbage-collected (see note about MPTCP/NECP interactions) */
+ if (mp->mpp_socket->so_usecount == 0) {
+ return;
+ }
+
+ mp_so = mptetoso(mpte);
+
+ if (action != NECP_CLIENT_CBACTION_INITIAL) {
+ socket_lock(mp_so, 1);
+ locked = 1;
+
+ /* Check again, because it might have changed while waiting */
+ if (mp->mpp_socket->so_usecount == 0) {
+ goto out;
+ }
+ }
+
+ socket_lock_assert_owned(mp_so);
+
+ mp_tp = mpte->mpte_mptcb;
+
+ ifnet_head_lock_shared();
+ ifp = ifindex2ifnet[ifindex];
+ ifnet_head_done();
+
+ os_log(mptcp_log_handle, "%s - %lx: action: %u ifindex %u delegated to %u usecount %u mpt_flags %#x state %u v4 %u v6 %u nat64 %u power %u\n",
+ __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), action, ifindex,
+ ifp && ifp->if_delegated.ifp ? ifp->if_delegated.ifp->if_index : IFSCOPE_NONE,
+ mp->mpp_socket->so_usecount, mp_tp->mpt_flags, mp_tp->mpt_state,
+ has_v4, has_v6, has_nat64, low_power);
+
+ /* No need on fallen back sockets */
+ if (mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) {
+ goto out;
+ }
+
+ /*
+ * When the interface goes in low-power mode we don't want to establish
+ * new subflows on it. Thus, mark it internally as non-viable.
+ */
+ if (low_power) {
+ action = NECP_CLIENT_CBACTION_NONVIABLE;
+ }
+
+ if (action == NECP_CLIENT_CBACTION_NONVIABLE) {
+ for (i = 0; i < mpte->mpte_itfinfo_size; i++) {
+ if (mpte->mpte_itfinfo[i].ifindex == IFSCOPE_NONE) {
+ continue;
+ }
+
+ if (mpte->mpte_itfinfo[i].ifindex == ifindex) {
+ mptcp_reset_itfinfo(&mpte->mpte_itfinfo[i]);
+ }
+ }
+
+ mptcp_sched_create_subflows(mpte);
+ } else if (action == NECP_CLIENT_CBACTION_VIABLE ||
+ action == NECP_CLIENT_CBACTION_INITIAL) {
+ int found_slot = 0, slot_index = -1;
+ struct sockaddr *dst;
+
+ if (ifp == NULL) {
+ goto out;
+ }
+
+ if (IFNET_IS_COMPANION_LINK(ifp)) {
+ goto out;
+ }
+
+ if (IFNET_IS_EXPENSIVE(ifp) &&
+ (mp_so->so_restrictions & SO_RESTRICT_DENY_EXPENSIVE)) {
+ goto out;
+ }
+
+ if (IFNET_IS_CONSTRAINED(ifp) &&
+ (mp_so->so_restrictions & SO_RESTRICT_DENY_CONSTRAINED)) {
+ goto out;
+ }
+
+ if (IFNET_IS_CELLULAR(ifp) &&
+ (mp_so->so_restrictions & SO_RESTRICT_DENY_CELLULAR)) {
+ goto out;
+ }
+
+ if (IS_INTF_CLAT46(ifp)) {
+ has_v4 = FALSE;
+ }
+
+ /* Look for the slot on where to store/update the interface-info. */
+ for (i = 0; i < mpte->mpte_itfinfo_size; i++) {
+ /* Found a potential empty slot where we can put it */
+ if (mpte->mpte_itfinfo[i].ifindex == 0) {
+ found_slot = 1;
+ slot_index = i;
+ }
+
+ /*
+ * The interface is already in our array. Check if we
+ * need to update it.
+ */
+ if (mpte->mpte_itfinfo[i].ifindex == ifindex &&
+ (mpte->mpte_itfinfo[i].has_v4_conn != has_v4 ||
+ mpte->mpte_itfinfo[i].has_v6_conn != has_v6 ||
+ mpte->mpte_itfinfo[i].has_nat64_conn != has_nat64)) {
+ found_slot = 1;
+ slot_index = i;
+ break;
+ }
+
+ if (mpte->mpte_itfinfo[i].ifindex == ifindex) {
+ /*
+ * Ok, it's already there and we don't need
+ * to update it
+ */
+ goto out;
+ }
+ }
+
+ dst = mptcp_get_session_dst(mpte, has_v6, has_v4);
+ if (dst && dst->sa_family == AF_INET &&
+ has_v6 && !has_nat64 && !has_v4) {
+ if (found_slot) {
+ mpte->mpte_itfinfo[slot_index].ifindex = ifindex;
+ mpte->mpte_itfinfo[slot_index].has_v4_conn = has_v4;
+ mpte->mpte_itfinfo[slot_index].has_v6_conn = has_v6;
+ mpte->mpte_itfinfo[slot_index].has_nat64_conn = has_nat64;
+ }
+ goto out;
+ }
+
+ if (found_slot == 0) {
+ int new_size = mpte->mpte_itfinfo_size * 2;
+ struct mpt_itf_info *info = _MALLOC(sizeof(*info) * new_size, M_TEMP, M_ZERO);
+
+ if (info == NULL) {
+ os_log_error(mptcp_log_handle, "%s - %lx: malloc failed for %u\n",
+ __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), new_size);
+ goto out;
+ }
+
+ memcpy(info, mpte->mpte_itfinfo, mpte->mpte_itfinfo_size * sizeof(*info));
+
+ if (mpte->mpte_itfinfo_size > MPTE_ITFINFO_SIZE) {
+ _FREE(mpte->mpte_itfinfo, M_TEMP);
+ }
+
+ /* We allocated a new one, thus the first must be empty */
+ slot_index = mpte->mpte_itfinfo_size;
+
+ mpte->mpte_itfinfo = info;
+ mpte->mpte_itfinfo_size = new_size;
+ }
+
+ VERIFY(slot_index >= 0 && slot_index < (int)mpte->mpte_itfinfo_size);
+ mpte->mpte_itfinfo[slot_index].ifindex = ifindex;
+ mpte->mpte_itfinfo[slot_index].has_v4_conn = has_v4;
+ mpte->mpte_itfinfo[slot_index].has_v6_conn = has_v6;
+ mpte->mpte_itfinfo[slot_index].has_nat64_conn = has_nat64;
+
+ mptcp_sched_create_subflows(mpte);
+ }
+
+out:
+ if (locked) {
+ socket_unlock(mp_so, 1);
+ }
+}
+
+void
+mptcp_set_restrictions(struct socket *mp_so)
+{
+ struct mptses *mpte = mpsotompte(mp_so);
+ uint32_t i;
+
+ socket_lock_assert_owned(mp_so);
+
+ ifnet_head_lock_shared();
+
+ for (i = 0; i < mpte->mpte_itfinfo_size; i++) {
+ struct mpt_itf_info *info = &mpte->mpte_itfinfo[i];
+ uint32_t ifindex = info->ifindex;
+ struct ifnet *ifp;
+
+ if (ifindex == IFSCOPE_NONE) {
+ continue;
+ }
+
+ ifp = ifindex2ifnet[ifindex];
+ if (ifp == NULL) {
+ continue;
+ }
+
+ if (IFNET_IS_EXPENSIVE(ifp) &&
+ (mp_so->so_restrictions & SO_RESTRICT_DENY_EXPENSIVE)) {
+ info->ifindex = IFSCOPE_NONE;
+ }
+
+ if (IFNET_IS_CONSTRAINED(ifp) &&
+ (mp_so->so_restrictions & SO_RESTRICT_DENY_CONSTRAINED)) {
+ info->ifindex = IFSCOPE_NONE;
+ }
+
+ if (IFNET_IS_CELLULAR(ifp) &&
+ (mp_so->so_restrictions & SO_RESTRICT_DENY_CELLULAR)) {
+ info->ifindex = IFSCOPE_NONE;
+ }