+ lck_mtx_unlock(&inp_fc_lck);
+ /* recompute hash seed */
+ inp_hash_seed = RandomULong();
+ goto try_again;
+ }
+
+ RB_INSERT(inp_fc_tree, &inp_fc_tree, inp);
+ inp->inp_flags2 |= INP2_IN_FCTREE;
+ lck_mtx_unlock(&inp_fc_lck);
+
+ return (flowhash);
+}
+
+void
+inp_flowadv(uint32_t flowhash)
+{
+ struct inpcb *inp;
+
+ inp = inp_fc_getinp(flowhash, 0);
+
+ if (inp == NULL)
+ return;
+ inp_fc_feedback(inp);
+}
+
+/*
+ * Function to compare inp_fc_entries in inp flow control tree
+ */
+static inline int
+infc_cmp(const struct inpcb *inp1, const struct inpcb *inp2)
+{
+ return (memcmp(&(inp1->inp_flowhash), &(inp2->inp_flowhash),
+ sizeof(inp1->inp_flowhash)));
+}
+
+static struct inpcb *
+inp_fc_getinp(u_int32_t flowhash, u_int32_t flags)
+{
+ struct inpcb *inp = NULL;
+ int locked = (flags & INPFC_SOLOCKED) ? 1 : 0;
+
+ lck_mtx_lock_spin(&inp_fc_lck);
+ key_inp.inp_flowhash = flowhash;
+ inp = RB_FIND(inp_fc_tree, &inp_fc_tree, &key_inp);
+ if (inp == NULL) {
+ /* inp is not present, return */
+ lck_mtx_unlock(&inp_fc_lck);
+ return (NULL);
+ }
+
+ if (flags & INPFC_REMOVE) {
+ RB_REMOVE(inp_fc_tree, &inp_fc_tree, inp);
+ lck_mtx_unlock(&inp_fc_lck);
+
+ bzero(&(inp->infc_link), sizeof (inp->infc_link));
+ inp->inp_flags2 &= ~INP2_IN_FCTREE;
+ return (NULL);
+ }
+
+ if (in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING)
+ inp = NULL;
+ lck_mtx_unlock(&inp_fc_lck);
+
+ return (inp);
+}
+
+static void
+inp_fc_feedback(struct inpcb *inp)
+{
+ struct socket *so = inp->inp_socket;
+
+ /* we already hold a want_cnt on this inp, socket can't be null */
+ VERIFY(so != NULL);
+ socket_lock(so, 1);
+
+ if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
+ socket_unlock(so, 1);
+ return;
+ }
+
+ if (inp->inp_sndinprog_cnt > 0)
+ inp->inp_flags |= INP_FC_FEEDBACK;
+
+ /*
+ * Return if the connection is not in flow-controlled state.
+ * This can happen if the connection experienced
+ * loss while it was in flow controlled state
+ */
+ if (!INP_WAIT_FOR_IF_FEEDBACK(inp)) {
+ socket_unlock(so, 1);
+ return;
+ }
+ inp_reset_fc_state(inp);
+
+ if (SOCK_TYPE(so) == SOCK_STREAM)
+ inp_fc_unthrottle_tcp(inp);
+
+ socket_unlock(so, 1);
+}
+
+void
+inp_reset_fc_state(struct inpcb *inp)
+{
+ struct socket *so = inp->inp_socket;
+ int suspended = (INP_IS_FLOW_SUSPENDED(inp)) ? 1 : 0;
+ int needwakeup = (INP_WAIT_FOR_IF_FEEDBACK(inp)) ? 1 : 0;
+
+ inp->inp_flags &= ~(INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED);
+
+ if (suspended) {
+ so->so_flags &= ~(SOF_SUSPENDED);
+ soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_RESUME));
+ }
+
+ /* Give a write wakeup to unblock the socket */
+ if (needwakeup)
+ sowwakeup(so);
+}
+
+int
+inp_set_fc_state(struct inpcb *inp, int advcode)
+{
+ struct inpcb *tmp_inp = NULL;
+ /*
+ * If there was a feedback from the interface when
+ * send operation was in progress, we should ignore
+ * this flow advisory to avoid a race between setting
+ * flow controlled state and receiving feedback from
+ * the interface
+ */
+ if (inp->inp_flags & INP_FC_FEEDBACK)
+ return (0);
+
+ inp->inp_flags &= ~(INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED);
+ if ((tmp_inp = inp_fc_getinp(inp->inp_flowhash,
+ INPFC_SOLOCKED)) != NULL) {
+ if (in_pcb_checkstate(tmp_inp, WNT_RELEASE, 1) == WNT_STOPUSING)
+ return (0);
+ VERIFY(tmp_inp == inp);
+ switch (advcode) {
+ case FADV_FLOW_CONTROLLED:
+ inp->inp_flags |= INP_FLOW_CONTROLLED;
+ break;
+ case FADV_SUSPENDED:
+ inp->inp_flags |= INP_FLOW_SUSPENDED;
+ soevent(inp->inp_socket,
+ (SO_FILT_HINT_LOCKED | SO_FILT_HINT_SUSPEND));
+
+ /* Record the fact that suspend event was sent */
+ inp->inp_socket->so_flags |= SOF_SUSPENDED;
+ break;
+ }
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * Handler for SO_FLUSH socket option.
+ */
+int
+inp_flush(struct inpcb *inp, int optval)
+{
+ u_int32_t flowhash = inp->inp_flowhash;
+ struct ifnet *rtifp, *oifp;
+
+ /* Either all classes or one of the valid ones */
+ if (optval != SO_TC_ALL && !SO_VALID_TC(optval))
+ return (EINVAL);
+
+ /* We need a flow hash for identification */
+ if (flowhash == 0)
+ return (0);
+
+ /* Grab the interfaces from the route and pcb */
+ rtifp = ((inp->inp_route.ro_rt != NULL) ?
+ inp->inp_route.ro_rt->rt_ifp : NULL);
+ oifp = inp->inp_last_outifp;
+
+ if (rtifp != NULL)
+ if_qflush_sc(rtifp, so_tc2msc(optval), flowhash, NULL, NULL, 0);
+ if (oifp != NULL && oifp != rtifp)
+ if_qflush_sc(oifp, so_tc2msc(optval), flowhash, NULL, NULL, 0);
+
+ return (0);
+}
+
+/*
+ * Clear the INP_INADDR_ANY flag (special case for PPP only)
+ */
+void
+inp_clear_INP_INADDR_ANY(struct socket *so)
+{
+ struct inpcb *inp = NULL;
+
+ socket_lock(so, 1);
+ inp = sotoinpcb(so);
+ if (inp) {
+ inp->inp_flags &= ~INP_INADDR_ANY;
+ }
+ socket_unlock(so, 1);
+}
+
+void
+inp_get_soprocinfo(struct inpcb *inp, struct so_procinfo *soprocinfo)
+{
+ struct socket *so = inp->inp_socket;
+
+ soprocinfo->spi_pid = so->last_pid;
+ if (so->last_pid != 0)
+ uuid_copy(soprocinfo->spi_uuid, so->last_uuid);
+ /*
+ * When not delegated, the effective pid is the same as the real pid
+ */
+ if (so->so_flags & SOF_DELEGATED) {
+ soprocinfo->spi_epid = so->e_pid;
+ if (so->e_pid != 0)
+ uuid_copy(soprocinfo->spi_euuid, so->e_uuid);
+ } else {
+ soprocinfo->spi_epid = so->last_pid;
+ }
+}
+
+int
+inp_findinpcb_procinfo(struct inpcbinfo *pcbinfo, uint32_t flowhash,
+ struct so_procinfo *soprocinfo)
+{
+ struct inpcb *inp = NULL;
+ int found = 0;
+
+ bzero(soprocinfo, sizeof (struct so_procinfo));
+
+ if (!flowhash)
+ return (-1);
+
+ lck_rw_lock_shared(pcbinfo->ipi_lock);
+ LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
+ if (inp->inp_state != INPCB_STATE_DEAD &&
+ inp->inp_socket != NULL &&
+ inp->inp_flowhash == flowhash) {
+ found = 1;
+ inp_get_soprocinfo(inp, soprocinfo);
+ break;
+ }
+ }
+ lck_rw_done(pcbinfo->ipi_lock);
+
+ return (found);
+}
+
+#if CONFIG_PROC_UUID_POLICY
+static void
+inp_update_cellular_policy(struct inpcb *inp, boolean_t set)
+{
+ struct socket *so = inp->inp_socket;
+ int before, after;
+
+ VERIFY(so != NULL);
+ VERIFY(inp->inp_state != INPCB_STATE_DEAD);
+
+ before = INP_NO_CELLULAR(inp);
+ if (set) {
+ inp_set_nocellular(inp);
+ } else {
+ inp_clear_nocellular(inp);
+ }
+ after = INP_NO_CELLULAR(inp);
+ if (net_io_policy_log && (before != after)) {
+ static const char *ok = "OK";
+ static const char *nok = "NOACCESS";
+ uuid_string_t euuid_buf;
+ pid_t epid;
+
+ if (so->so_flags & SOF_DELEGATED) {
+ uuid_unparse(so->e_uuid, euuid_buf);
+ epid = so->e_pid;
+ } else {
+ uuid_unparse(so->last_uuid, euuid_buf);
+ epid = so->last_pid;
+ }
+
+ /* allow this socket to generate another notification event */
+ so->so_ifdenied_notifies = 0;
+
+ log(LOG_DEBUG, "%s: so 0x%llx [%d,%d] epid %d "
+ "euuid %s%s %s->%s\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so),
+ SOCK_TYPE(so), epid, euuid_buf,
+ (so->so_flags & SOF_DELEGATED) ?
+ " [delegated]" : "",
+ ((before < after) ? ok : nok),
+ ((before < after) ? nok : ok));
+ }
+}
+
+#if NECP
+static void
+inp_update_necp_want_app_policy(struct inpcb *inp, boolean_t set)
+{
+ struct socket *so = inp->inp_socket;
+ int before, after;
+
+ VERIFY(so != NULL);
+ VERIFY(inp->inp_state != INPCB_STATE_DEAD);
+
+ before = (inp->inp_flags2 & INP2_WANT_APP_POLICY);
+ if (set) {
+ inp_set_want_app_policy(inp);
+ } else {
+ inp_clear_want_app_policy(inp);
+ }
+ after = (inp->inp_flags2 & INP2_WANT_APP_POLICY);
+ if (net_io_policy_log && (before != after)) {
+ static const char *wanted = "WANTED";
+ static const char *unwanted = "UNWANTED";
+ uuid_string_t euuid_buf;
+ pid_t epid;
+
+ if (so->so_flags & SOF_DELEGATED) {
+ uuid_unparse(so->e_uuid, euuid_buf);
+ epid = so->e_pid;