+ ROUTE_RELEASE(&inp->inp_route);
+}
+
+/*
+ * Handler for clearing IP_NO_IFT_CELLULAR/IPV6_NO_IFT_CELLULAR socket option,
+ * as well as for clearing PROC_UUID_NO_CELLULAR policy.
+ */
+void
+inp_clear_nocellular(struct inpcb *inp)
+{
+ struct socket *so = inp->inp_socket;
+
+ /*
+ * SO_RESTRICT_DENY_CELLULAR socket restriction issued on the socket
+ * has a higher precendence than INP_NO_IFT_CELLULAR. Clear the flag
+ * if and only if the socket is unrestricted.
+ */
+ if (so != NULL && !(so->so_restrictions & SO_RESTRICT_DENY_CELLULAR)) {
+ inp->inp_flags &= ~INP_NO_IFT_CELLULAR;
+
+ /* Blow away any cached route in the PCB */
+ ROUTE_RELEASE(&inp->inp_route);
+ }
+}
+
+void
+inp_set_noexpensive(struct inpcb *inp)
+{
+ inp->inp_flags2 |= INP2_NO_IFF_EXPENSIVE;
+
+ /* Blow away any cached route in the PCB */
+ ROUTE_RELEASE(&inp->inp_route);
+}
+
+void
+inp_set_noconstrained(struct inpcb *inp)
+{
+ inp->inp_flags2 |= INP2_NO_IFF_CONSTRAINED;
+
+ /* Blow away any cached route in the PCB */
+ ROUTE_RELEASE(&inp->inp_route);
+}
+
+void
+inp_set_awdl_unrestricted(struct inpcb *inp)
+{
+ inp->inp_flags2 |= INP2_AWDL_UNRESTRICTED;
+
+ /* Blow away any cached route in the PCB */
+ ROUTE_RELEASE(&inp->inp_route);
+}
+
+boolean_t
+inp_get_awdl_unrestricted(struct inpcb *inp)
+{
+ return (inp->inp_flags2 & INP2_AWDL_UNRESTRICTED) ? TRUE : FALSE;
+}
+
+void
+inp_clear_awdl_unrestricted(struct inpcb *inp)
+{
+ inp->inp_flags2 &= ~INP2_AWDL_UNRESTRICTED;
+
+ /* Blow away any cached route in the PCB */
+ ROUTE_RELEASE(&inp->inp_route);
+}
+
+void
+inp_set_intcoproc_allowed(struct inpcb *inp)
+{
+ inp->inp_flags2 |= INP2_INTCOPROC_ALLOWED;
+
+ /* Blow away any cached route in the PCB */
+ ROUTE_RELEASE(&inp->inp_route);
+}
+
+boolean_t
+inp_get_intcoproc_allowed(struct inpcb *inp)
+{
+ return (inp->inp_flags2 & INP2_INTCOPROC_ALLOWED) ? TRUE : FALSE;
+}
+
+void
+inp_clear_intcoproc_allowed(struct inpcb *inp)
+{
+ inp->inp_flags2 &= ~INP2_INTCOPROC_ALLOWED;
+
+ /* Blow away any cached route in the PCB */
+ ROUTE_RELEASE(&inp->inp_route);
+}
+
+#if NECP
+/*
+ * Called when PROC_UUID_NECP_APP_POLICY is set.
+ */
+void
+inp_set_want_app_policy(struct inpcb *inp)
+{
+ inp->inp_flags2 |= INP2_WANT_APP_POLICY;
+}
+
+/*
+ * Called when PROC_UUID_NECP_APP_POLICY is cleared.
+ */
+void
+inp_clear_want_app_policy(struct inpcb *inp)
+{
+ inp->inp_flags2 &= ~INP2_WANT_APP_POLICY;
+}
+#endif /* NECP */
+
+/*
+ * Calculate flow hash for an inp, used by an interface to identify a
+ * flow. When an interface provides flow control advisory, this flow
+ * hash is used as an identifier.
+ */
+u_int32_t
+inp_calc_flowhash(struct inpcb *inp)
+{
+ struct inp_flowhash_key fh __attribute__((aligned(8)));
+ u_int32_t flowhash = 0;
+ struct inpcb *tmp_inp = NULL;
+
+ if (inp_hash_seed == 0) {
+ inp_hash_seed = RandomULong();
+ }
+
+ bzero(&fh, sizeof(fh));
+
+ bcopy(&inp->inp_dependladdr, &fh.infh_laddr, sizeof(fh.infh_laddr));
+ bcopy(&inp->inp_dependfaddr, &fh.infh_faddr, sizeof(fh.infh_faddr));
+
+ fh.infh_lport = inp->inp_lport;
+ fh.infh_fport = inp->inp_fport;
+ fh.infh_af = (inp->inp_vflag & INP_IPV6) ? AF_INET6 : AF_INET;
+ fh.infh_proto = inp->inp_ip_p;
+ fh.infh_rand1 = RandomULong();
+ fh.infh_rand2 = RandomULong();
+
+try_again:
+ flowhash = net_flowhash(&fh, sizeof(fh), inp_hash_seed);
+ if (flowhash == 0) {
+ /* try to get a non-zero flowhash */
+ inp_hash_seed = RandomULong();
+ goto try_again;
+ }
+
+ inp->inp_flowhash = flowhash;
+
+ /* Insert the inp into inp_fc_tree */
+ lck_mtx_lock_spin(&inp_fc_lck);
+ tmp_inp = RB_FIND(inp_fc_tree, &inp_fc_tree, inp);
+ if (tmp_inp != NULL) {
+ /*
+ * There is a different inp with the same flowhash.
+ * There can be a collision on flow hash but the
+ * probability is low. Let's recompute the
+ * flowhash.
+ */
+ lck_mtx_unlock(&inp_fc_lck);
+ /* recompute hash seed */
+ inp_hash_seed = RandomULong();
+ goto try_again;
+ }
+
+ RB_INSERT(inp_fc_tree, &inp_fc_tree, inp);
+ inp->inp_flags2 |= INP2_IN_FCTREE;
+ lck_mtx_unlock(&inp_fc_lck);
+
+ return flowhash;
+}
+
+void
+inp_flowadv(uint32_t flowhash)
+{
+ struct inpcb *inp;
+
+ inp = inp_fc_getinp(flowhash, 0);
+
+ if (inp == NULL) {
+ return;
+ }
+ inp_fc_feedback(inp);
+}
+
+/*
+ * Function to compare inp_fc_entries in inp flow control tree
+ */
+static inline int
+infc_cmp(const struct inpcb *inp1, const struct inpcb *inp2)
+{
+ return memcmp(&(inp1->inp_flowhash), &(inp2->inp_flowhash),
+ sizeof(inp1->inp_flowhash));
+}
+
+static struct inpcb *
+inp_fc_getinp(u_int32_t flowhash, u_int32_t flags)
+{
+ struct inpcb *inp = NULL;
+ int locked = (flags & INPFC_SOLOCKED) ? 1 : 0;
+
+ lck_mtx_lock_spin(&inp_fc_lck);
+ key_inp.inp_flowhash = flowhash;
+ inp = RB_FIND(inp_fc_tree, &inp_fc_tree, &key_inp);
+ if (inp == NULL) {
+ /* inp is not present, return */
+ lck_mtx_unlock(&inp_fc_lck);
+ return NULL;
+ }
+
+ if (flags & INPFC_REMOVE) {
+ RB_REMOVE(inp_fc_tree, &inp_fc_tree, inp);
+ lck_mtx_unlock(&inp_fc_lck);
+
+ bzero(&(inp->infc_link), sizeof(inp->infc_link));
+ inp->inp_flags2 &= ~INP2_IN_FCTREE;
+ return NULL;
+ }
+
+ if (in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) {
+ inp = NULL;
+ }
+ lck_mtx_unlock(&inp_fc_lck);
+
+ return inp;
+}
+
+static void
+inp_fc_feedback(struct inpcb *inp)
+{
+ struct socket *so = inp->inp_socket;
+
+ /* we already hold a want_cnt on this inp, socket can't be null */
+ VERIFY(so != NULL);
+ socket_lock(so, 1);
+
+ if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
+ socket_unlock(so, 1);
+ return;
+ }
+
+ if (inp->inp_sndinprog_cnt > 0) {
+ inp->inp_flags |= INP_FC_FEEDBACK;
+ }
+
+ /*
+ * Return if the connection is not in flow-controlled state.
+ * This can happen if the connection experienced
+ * loss while it was in flow controlled state
+ */
+ if (!INP_WAIT_FOR_IF_FEEDBACK(inp)) {
+ socket_unlock(so, 1);
+ return;
+ }
+ inp_reset_fc_state(inp);
+
+ if (SOCK_TYPE(so) == SOCK_STREAM) {
+ inp_fc_unthrottle_tcp(inp);
+ }
+
+ socket_unlock(so, 1);
+}
+
+void
+inp_reset_fc_state(struct inpcb *inp)
+{
+ struct socket *so = inp->inp_socket;
+ int suspended = (INP_IS_FLOW_SUSPENDED(inp)) ? 1 : 0;
+ int needwakeup = (INP_WAIT_FOR_IF_FEEDBACK(inp)) ? 1 : 0;
+
+ inp->inp_flags &= ~(INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED);
+
+ if (suspended) {
+ so->so_flags &= ~(SOF_SUSPENDED);
+ soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_RESUME));
+ }
+
+ /* Give a write wakeup to unblock the socket */
+ if (needwakeup) {
+ sowwakeup(so);
+ }
+}
+
+int
+inp_set_fc_state(struct inpcb *inp, int advcode)
+{
+ struct inpcb *tmp_inp = NULL;
+ /*
+ * If there was a feedback from the interface when
+ * send operation was in progress, we should ignore
+ * this flow advisory to avoid a race between setting
+ * flow controlled state and receiving feedback from
+ * the interface
+ */
+ if (inp->inp_flags & INP_FC_FEEDBACK) {
+ return 0;
+ }
+
+ inp->inp_flags &= ~(INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED);
+ if ((tmp_inp = inp_fc_getinp(inp->inp_flowhash,
+ INPFC_SOLOCKED)) != NULL) {
+ if (in_pcb_checkstate(tmp_inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
+ return 0;
+ }
+ VERIFY(tmp_inp == inp);
+ switch (advcode) {
+ case FADV_FLOW_CONTROLLED:
+ inp->inp_flags |= INP_FLOW_CONTROLLED;
+ break;
+ case FADV_SUSPENDED:
+ inp->inp_flags |= INP_FLOW_SUSPENDED;
+ soevent(inp->inp_socket,
+ (SO_FILT_HINT_LOCKED | SO_FILT_HINT_SUSPEND));
+
+ /* Record the fact that suspend event was sent */
+ inp->inp_socket->so_flags |= SOF_SUSPENDED;
+ break;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Handler for SO_FLUSH socket option.
+ */
+int
+inp_flush(struct inpcb *inp, int optval)
+{
+ u_int32_t flowhash = inp->inp_flowhash;
+ struct ifnet *rtifp, *oifp;
+
+ /* Either all classes or one of the valid ones */
+ if (optval != SO_TC_ALL && !SO_VALID_TC(optval)) {
+ return EINVAL;
+ }
+
+ /* We need a flow hash for identification */
+ if (flowhash == 0) {
+ return 0;
+ }
+
+ /* Grab the interfaces from the route and pcb */
+ rtifp = ((inp->inp_route.ro_rt != NULL) ?
+ inp->inp_route.ro_rt->rt_ifp : NULL);
+ oifp = inp->inp_last_outifp;
+
+ if (rtifp != NULL) {
+ if_qflush_sc(rtifp, so_tc2msc(optval), flowhash, NULL, NULL, 0);
+ }
+ if (oifp != NULL && oifp != rtifp) {
+ if_qflush_sc(oifp, so_tc2msc(optval), flowhash, NULL, NULL, 0);
+ }
+
+ return 0;
+}
+
+/*
+ * Clear the INP_INADDR_ANY flag (special case for PPP only)
+ */
+void
+inp_clear_INP_INADDR_ANY(struct socket *so)
+{
+ struct inpcb *inp = NULL;
+
+ socket_lock(so, 1);
+ inp = sotoinpcb(so);
+ if (inp) {
+ inp->inp_flags &= ~INP_INADDR_ANY;
+ }
+ socket_unlock(so, 1);
+}
+
+void
+inp_get_soprocinfo(struct inpcb *inp, struct so_procinfo *soprocinfo)
+{
+ struct socket *so = inp->inp_socket;
+
+ soprocinfo->spi_pid = so->last_pid;
+ strlcpy(&soprocinfo->spi_proc_name[0], &inp->inp_last_proc_name[0],
+ sizeof(soprocinfo->spi_proc_name));
+ if (so->last_pid != 0) {
+ uuid_copy(soprocinfo->spi_uuid, so->last_uuid);
+ }
+ /*
+ * When not delegated, the effective pid is the same as the real pid
+ */
+ if (so->so_flags & SOF_DELEGATED) {
+ soprocinfo->spi_delegated = 1;
+ soprocinfo->spi_epid = so->e_pid;
+ uuid_copy(soprocinfo->spi_euuid, so->e_uuid);
+ } else {
+ soprocinfo->spi_delegated = 0;
+ soprocinfo->spi_epid = so->last_pid;
+ }
+ strlcpy(&soprocinfo->spi_e_proc_name[0], &inp->inp_e_proc_name[0],
+ sizeof(soprocinfo->spi_e_proc_name));
+}
+
+int
+inp_findinpcb_procinfo(struct inpcbinfo *pcbinfo, uint32_t flowhash,
+ struct so_procinfo *soprocinfo)
+{
+ struct inpcb *inp = NULL;
+ int found = 0;
+
+ bzero(soprocinfo, sizeof(struct so_procinfo));
+
+ if (!flowhash) {
+ return -1;
+ }
+
+ lck_rw_lock_shared(pcbinfo->ipi_lock);
+ LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
+ if (inp->inp_state != INPCB_STATE_DEAD &&
+ inp->inp_socket != NULL &&
+ inp->inp_flowhash == flowhash) {
+ found = 1;
+ inp_get_soprocinfo(inp, soprocinfo);
+ break;
+ }
+ }
+ lck_rw_done(pcbinfo->ipi_lock);
+
+ return found;
+}
+
+#if CONFIG_PROC_UUID_POLICY
+static void
+inp_update_cellular_policy(struct inpcb *inp, boolean_t set)
+{
+ struct socket *so = inp->inp_socket;
+ int before, after;
+
+ VERIFY(so != NULL);
+ VERIFY(inp->inp_state != INPCB_STATE_DEAD);
+
+ before = INP_NO_CELLULAR(inp);
+ if (set) {
+ inp_set_nocellular(inp);
+ } else {
+ inp_clear_nocellular(inp);
+ }
+ after = INP_NO_CELLULAR(inp);
+ if (net_io_policy_log && (before != after)) {
+ static const char *ok = "OK";
+ static const char *nok = "NOACCESS";
+ uuid_string_t euuid_buf;
+ pid_t epid;
+
+ if (so->so_flags & SOF_DELEGATED) {
+ uuid_unparse(so->e_uuid, euuid_buf);
+ epid = so->e_pid;
+ } else {
+ uuid_unparse(so->last_uuid, euuid_buf);
+ epid = so->last_pid;
+ }
+
+ /* allow this socket to generate another notification event */
+ so->so_ifdenied_notifies = 0;
+
+ log(LOG_DEBUG, "%s: so 0x%llx [%d,%d] epid %d "
+ "euuid %s%s %s->%s\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so),
+ SOCK_TYPE(so), epid, euuid_buf,
+ (so->so_flags & SOF_DELEGATED) ?
+ " [delegated]" : "",
+ ((before < after) ? ok : nok),
+ ((before < after) ? nok : ok));
+ }
+}
+
+#if NECP
+static void
+inp_update_necp_want_app_policy(struct inpcb *inp, boolean_t set)
+{
+ struct socket *so = inp->inp_socket;
+ int before, after;
+
+ VERIFY(so != NULL);
+ VERIFY(inp->inp_state != INPCB_STATE_DEAD);
+
+ before = (inp->inp_flags2 & INP2_WANT_APP_POLICY);
+ if (set) {
+ inp_set_want_app_policy(inp);
+ } else {
+ inp_clear_want_app_policy(inp);
+ }
+ after = (inp->inp_flags2 & INP2_WANT_APP_POLICY);
+ if (net_io_policy_log && (before != after)) {
+ static const char *wanted = "WANTED";
+ static const char *unwanted = "UNWANTED";
+ uuid_string_t euuid_buf;
+ pid_t epid;
+
+ if (so->so_flags & SOF_DELEGATED) {
+ uuid_unparse(so->e_uuid, euuid_buf);
+ epid = so->e_pid;
+ } else {
+ uuid_unparse(so->last_uuid, euuid_buf);
+ epid = so->last_pid;
+ }
+
+ log(LOG_DEBUG, "%s: so 0x%llx [%d,%d] epid %d "
+ "euuid %s%s %s->%s\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so),
+ SOCK_TYPE(so), epid, euuid_buf,
+ (so->so_flags & SOF_DELEGATED) ?
+ " [delegated]" : "",
+ ((before < after) ? unwanted : wanted),
+ ((before < after) ? wanted : unwanted));
+ }
+}
+#endif /* NECP */
+#endif /* !CONFIG_PROC_UUID_POLICY */
+
+#if NECP
+void
+inp_update_necp_policy(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, u_int override_bound_interface)
+{
+ necp_socket_find_policy_match(inp, override_local_addr, override_remote_addr, override_bound_interface);
+ if (necp_socket_should_rescope(inp) &&
+ inp->inp_lport == 0 &&
+ inp->inp_laddr.s_addr == INADDR_ANY &&
+ IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
+ // If we should rescope, and the socket is not yet bound
+ inp_bindif(inp, necp_socket_get_rescope_if_index(inp), NULL);
+ }
+}
+#endif /* NECP */
+
+int
+inp_update_policy(struct inpcb *inp)
+{
+#if CONFIG_PROC_UUID_POLICY
+ struct socket *so = inp->inp_socket;
+ uint32_t pflags = 0;
+ int32_t ogencnt;
+ int err = 0;
+
+ if (!net_io_policy_uuid ||
+ so == NULL || inp->inp_state == INPCB_STATE_DEAD) {
+ return 0;
+ }
+
+ /*
+ * Kernel-created sockets that aren't delegating other sockets
+ * are currently exempted from UUID policy checks.
+ */
+ if (so->last_pid == 0 && !(so->so_flags & SOF_DELEGATED)) {
+ return 0;
+ }
+
+ ogencnt = so->so_policy_gencnt;
+ err = proc_uuid_policy_lookup(((so->so_flags & SOF_DELEGATED) ?
+ so->e_uuid : so->last_uuid), &pflags, &so->so_policy_gencnt);
+
+ /*
+ * Discard cached generation count if the entry is gone (ENOENT),
+ * so that we go thru the checks below.
+ */
+ if (err == ENOENT && ogencnt != 0) {
+ so->so_policy_gencnt = 0;
+ }
+
+ /*
+ * If the generation count has changed, inspect the policy flags
+ * and act accordingly. If a policy flag was previously set and
+ * the UUID is no longer present in the table (ENOENT), treat it
+ * as if the flag has been cleared.
+ */
+ if ((err == 0 || err == ENOENT) && ogencnt != so->so_policy_gencnt) {
+ /* update cellular policy for this socket */
+ if (err == 0 && (pflags & PROC_UUID_NO_CELLULAR)) {
+ inp_update_cellular_policy(inp, TRUE);
+ } else if (!(pflags & PROC_UUID_NO_CELLULAR)) {
+ inp_update_cellular_policy(inp, FALSE);
+ }
+#if NECP
+ /* update necp want app policy for this socket */
+ if (err == 0 && (pflags & PROC_UUID_NECP_APP_POLICY)) {
+ inp_update_necp_want_app_policy(inp, TRUE);
+ } else if (!(pflags & PROC_UUID_NECP_APP_POLICY)) {
+ inp_update_necp_want_app_policy(inp, FALSE);
+ }
+#endif /* NECP */
+ }
+
+ return (err == ENOENT) ? 0 : err;
+#else /* !CONFIG_PROC_UUID_POLICY */
+#pragma unused(inp)
+ return 0;
+#endif /* !CONFIG_PROC_UUID_POLICY */
+}
+
+static unsigned int log_restricted;
+SYSCTL_DECL(_net_inet);
+SYSCTL_INT(_net_inet, OID_AUTO, log_restricted,
+ CTLFLAG_RW | CTLFLAG_LOCKED, &log_restricted, 0,
+ "Log network restrictions");
+/*
+ * Called when we need to enforce policy restrictions in the input path.
+ *
+ * Returns TRUE if we're not allowed to receive data, otherwise FALSE.
+ */
+static boolean_t
+_inp_restricted_recv(struct inpcb *inp, struct ifnet *ifp)
+{
+ VERIFY(inp != NULL);
+
+ /*
+ * Inbound restrictions.
+ */
+ if (!sorestrictrecv) {
+ return FALSE;
+ }
+
+ if (ifp == NULL) {
+ return FALSE;
+ }
+
+ if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp)) {
+ return TRUE;
+ }
+
+ if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp)) {
+ return TRUE;
+ }
+
+ if (IFNET_IS_CONSTRAINED(ifp) && INP_NO_CONSTRAINED(inp)) {
+ return TRUE;
+ }
+
+ if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp)) {
+ return TRUE;
+ }
+
+ if (!(ifp->if_eflags & IFEF_RESTRICTED_RECV)) {
+ return FALSE;
+ }
+
+ if (inp->inp_flags & INP_RECV_ANYIF) {
+ return FALSE;
+ }
+
+ if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp == ifp) {
+ return FALSE;
+ }
+
+ if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp)) {
+ return TRUE;
+ }
+
+ return TRUE;
+}
+
+boolean_t
+inp_restricted_recv(struct inpcb *inp, struct ifnet *ifp)
+{
+ boolean_t ret;
+
+ ret = _inp_restricted_recv(inp, ifp);
+ if (ret == TRUE && log_restricted) {
+ printf("pid %d (%s) is unable to receive packets on %s\n",
+ current_proc()->p_pid, proc_best_name(current_proc()),
+ ifp->if_xname);
+ }
+ return ret;
+}
+
+/*
+ * Called when we need to enforce policy restrictions in the output path.
+ *
+ * Returns TRUE if we're not allowed to send data out, otherwise FALSE.
+ */
+static boolean_t
+_inp_restricted_send(struct inpcb *inp, struct ifnet *ifp)
+{
+ VERIFY(inp != NULL);
+
+ /*
+ * Outbound restrictions.
+ */
+ if (!sorestrictsend) {
+ return FALSE;
+ }
+
+ if (ifp == NULL) {
+ return FALSE;
+ }
+
+ if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp)) {
+ return TRUE;
+ }
+
+ if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp)) {
+ return TRUE;
+ }
+
+ if (IFNET_IS_CONSTRAINED(ifp) && INP_NO_CONSTRAINED(inp)) {
+ return TRUE;
+ }
+
+ if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp)) {
+ return TRUE;
+ }
+
+ if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp)) {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+boolean_t
+inp_restricted_send(struct inpcb *inp, struct ifnet *ifp)
+{
+ boolean_t ret;
+
+ ret = _inp_restricted_send(inp, ifp);
+ if (ret == TRUE && log_restricted) {
+ printf("pid %d (%s) is unable to transmit packets on %s\n",
+ current_proc()->p_pid, proc_best_name(current_proc()),
+ ifp->if_xname);
+ }
+ return ret;
+}
+
+inline void
+inp_count_sndbytes(struct inpcb *inp, u_int32_t th_ack)
+{
+ struct ifnet *ifp = inp->inp_last_outifp;
+ struct socket *so = inp->inp_socket;
+ if (ifp != NULL && !(so->so_flags & SOF_MP_SUBFLOW) &&
+ (ifp->if_type == IFT_CELLULAR || IFNET_IS_WIFI(ifp))) {
+ int32_t unsent;
+
+ so->so_snd.sb_flags |= SB_SNDBYTE_CNT;
+
+ /*
+ * There can be data outstanding before the connection
+ * becomes established -- TFO case
+ */
+ if (so->so_snd.sb_cc > 0) {
+ inp_incr_sndbytes_total(so, so->so_snd.sb_cc);
+ }
+
+ unsent = inp_get_sndbytes_allunsent(so, th_ack);
+ if (unsent > 0) {
+ inp_incr_sndbytes_unsent(so, unsent);
+ }
+ }
+}
+
+inline void
+inp_incr_sndbytes_total(struct socket *so, int32_t len)
+{
+ struct inpcb *inp = (struct inpcb *)so->so_pcb;
+ struct ifnet *ifp = inp->inp_last_outifp;
+
+ if (ifp != NULL) {
+ VERIFY(ifp->if_sndbyte_total >= 0);
+ OSAddAtomic64(len, &ifp->if_sndbyte_total);
+ }
+}
+
+inline void
+inp_decr_sndbytes_total(struct socket *so, int32_t len)
+{
+ struct inpcb *inp = (struct inpcb *)so->so_pcb;
+ struct ifnet *ifp = inp->inp_last_outifp;
+
+ if (ifp != NULL) {
+ VERIFY(ifp->if_sndbyte_total >= len);
+ OSAddAtomic64(-len, &ifp->if_sndbyte_total);
+ }
+}
+
+inline void
+inp_incr_sndbytes_unsent(struct socket *so, int32_t len)
+{
+ struct inpcb *inp = (struct inpcb *)so->so_pcb;
+ struct ifnet *ifp = inp->inp_last_outifp;
+
+ if (ifp != NULL) {
+ VERIFY(ifp->if_sndbyte_unsent >= 0);
+ OSAddAtomic64(len, &ifp->if_sndbyte_unsent);
+ }
+}
+
+inline void
+inp_decr_sndbytes_unsent(struct socket *so, int32_t len)
+{
+ if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT)) {
+ return;
+ }
+
+ struct inpcb *inp = (struct inpcb *)so->so_pcb;
+ struct ifnet *ifp = inp->inp_last_outifp;
+
+ if (ifp != NULL) {
+ if (ifp->if_sndbyte_unsent >= len) {
+ OSAddAtomic64(-len, &ifp->if_sndbyte_unsent);
+ } else {
+ ifp->if_sndbyte_unsent = 0;
+ }
+ }
+}
+
+inline void
+inp_decr_sndbytes_allunsent(struct socket *so, u_int32_t th_ack)
+{
+ int32_t len;
+
+ if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT)) {
+ return;
+ }
+
+ len = inp_get_sndbytes_allunsent(so, th_ack);
+ inp_decr_sndbytes_unsent(so, len);
+}
+
+
+inline void
+inp_set_activity_bitmap(struct inpcb *inp)
+{
+ in_stat_set_activity_bitmap(&inp->inp_nw_activity, net_uptime());
+}
+
+inline void
+inp_get_activity_bitmap(struct inpcb *inp, activity_bitmap_t *ab)
+{
+ bcopy(&inp->inp_nw_activity, ab, sizeof(*ab));
+}
+
+void
+inp_update_last_owner(struct socket *so, struct proc *p, struct proc *ep)
+{
+ struct inpcb *inp = (struct inpcb *)so->so_pcb;
+
+ if (inp == NULL) {
+ return;
+ }
+
+ if (p != NULL) {
+ strlcpy(&inp->inp_last_proc_name[0], proc_name_address(p), sizeof(inp->inp_last_proc_name));
+ }
+ if (so->so_flags & SOF_DELEGATED) {
+ if (ep != NULL) {
+ strlcpy(&inp->inp_e_proc_name[0], proc_name_address(ep), sizeof(inp->inp_e_proc_name));
+ } else {
+ inp->inp_e_proc_name[0] = 0;
+ }
+ } else {
+ inp->inp_e_proc_name[0] = 0;
+ }
+}
+
+void
+inp_copy_last_owner(struct socket *so, struct socket *head)
+{
+ struct inpcb *inp = (struct inpcb *)so->so_pcb;
+ struct inpcb *head_inp = (struct inpcb *)head->so_pcb;
+
+ if (inp == NULL || head_inp == NULL) {
+ return;