+#pragma unused(so, nam)
+ return (EOPNOTSUPP);
+}
+
+int
+pru_sopoll_notsupp(struct socket *so, int events, kauth_cred_t cred, void *wql)
+{
+#pragma unused(so, events, cred, wql)
+ return (EOPNOTSUPP);
+}
+
+int
+pru_socheckopt_null(struct socket *so, struct sockopt *sopt)
+{
+#pragma unused(so, sopt)
+ /*
+ * Allow all options for set/get by default.
+ */
+ return (0);
+}
+
+void
+pru_sanitize(struct pr_usrreqs *pru)
+{
+#define DEFAULT(foo, bar) if ((foo) == NULL) (foo) = (bar)
+ DEFAULT(pru->pru_abort, pru_abort_notsupp);
+ DEFAULT(pru->pru_accept, pru_accept_notsupp);
+ DEFAULT(pru->pru_attach, pru_attach_notsupp);
+ DEFAULT(pru->pru_bind, pru_bind_notsupp);
+ DEFAULT(pru->pru_connect, pru_connect_notsupp);
+ DEFAULT(pru->pru_connect2, pru_connect2_notsupp);
+ DEFAULT(pru->pru_connectx, pru_connectx_notsupp);
+ DEFAULT(pru->pru_control, pru_control_notsupp);
+ DEFAULT(pru->pru_detach, pru_detach_notsupp);
+ DEFAULT(pru->pru_disconnect, pru_disconnect_notsupp);
+ DEFAULT(pru->pru_disconnectx, pru_disconnectx_notsupp);
+ DEFAULT(pru->pru_listen, pru_listen_notsupp);
+ DEFAULT(pru->pru_peeloff, pru_peeloff_notsupp);
+ DEFAULT(pru->pru_peeraddr, pru_peeraddr_notsupp);
+ DEFAULT(pru->pru_rcvd, pru_rcvd_notsupp);
+ DEFAULT(pru->pru_rcvoob, pru_rcvoob_notsupp);
+ DEFAULT(pru->pru_send, pru_send_notsupp);
+ DEFAULT(pru->pru_send_list, pru_send_list_notsupp);
+ DEFAULT(pru->pru_sense, pru_sense_null);
+ DEFAULT(pru->pru_shutdown, pru_shutdown_notsupp);
+ DEFAULT(pru->pru_sockaddr, pru_sockaddr_notsupp);
+ DEFAULT(pru->pru_sopoll, pru_sopoll_notsupp);
+ DEFAULT(pru->pru_soreceive, pru_soreceive_notsupp);
+ DEFAULT(pru->pru_soreceive_list, pru_soreceive_list_notsupp);
+ DEFAULT(pru->pru_sosend, pru_sosend_notsupp);
+ DEFAULT(pru->pru_sosend_list, pru_sosend_list_notsupp);
+ DEFAULT(pru->pru_socheckopt, pru_socheckopt_null);
+#undef DEFAULT
+}
+
+/*
+ * The following are macros on BSD and functions on Darwin
+ */
+
+/*
+ * Do we need to notify the other side when I/O is possible?
+ */
+
+int
+sb_notify(struct sockbuf *sb)
+{
+ return (sb->sb_waiters > 0 ||
+ (sb->sb_flags & (SB_SEL|SB_ASYNC|SB_UPCALL|SB_KNOTE)));
+}
+
+/*
+ * How much space is there in a socket buffer (so->so_snd or so->so_rcv)?
+ * This is problematical if the fields are unsigned, as the space might
+ * still be negative (cc > hiwat or mbcnt > mbmax). Should detect
+ * overflow and return 0.
+ */
+int
+sbspace(struct sockbuf *sb)
+{
+ int pending = 0;
+ int space = imin((int)(sb->sb_hiwat - sb->sb_cc),
+ (int)(sb->sb_mbmax - sb->sb_mbcnt));
+ if (space < 0)
+ space = 0;
+
+ /* Compensate for data being processed by content filters */
+#if CONTENT_FILTER
+ pending = cfil_sock_data_space(sb);
+#endif /* CONTENT_FILTER */
+ if (pending > space)
+ space = 0;
+ else
+ space -= pending;
+
+ return (space);
+}
+
+/*
+ * If this socket has priority queues, check if there is enough
+ * space in the priority queue for this msg.
+ */
+int
+msgq_sbspace(struct socket *so, struct mbuf *control)
+{
+ int space = 0, error;
+ u_int32_t msgpri;
+ VERIFY(so->so_type == SOCK_STREAM &&
+ SOCK_PROTO(so) == IPPROTO_TCP);
+ if (control != NULL) {
+ error = tcp_get_msg_priority(control, &msgpri);
+ if (error)
+ return (0);
+ } else {
+ msgpri = MSG_PRI_0;
+ }
+ space = (so->so_snd.sb_idealsize / MSG_PRI_COUNT) -
+ so->so_msg_state->msg_priq[msgpri].msgq_bytes;
+ if (space < 0)
+ space = 0;
+ return (space);
+}
+
+/* do we have to send all at once on a socket? */
+int
+sosendallatonce(struct socket *so)
+{
+ return (so->so_proto->pr_flags & PR_ATOMIC);
+}
+
+/* can we read something from so? */
+int
+soreadable(struct socket *so)
+{
+ return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat ||
+ ((so->so_state & SS_CANTRCVMORE)
+#if CONTENT_FILTER
+ && cfil_sock_data_pending(&so->so_rcv) == 0
+#endif /* CONTENT_FILTER */
+ ) ||
+ so->so_comp.tqh_first || so->so_error);
+}
+
+/* can we write something to so? */
+
+int
+sowriteable(struct socket *so)
+{
+ if ((so->so_state & SS_CANTSENDMORE) ||
+ so->so_error > 0)
+ return (1);
+
+ if (!so_wait_for_if_feedback(so) &&
+ sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat &&
+ ((so->so_state & SS_ISCONNECTED) ||
+ !(so->so_proto->pr_flags & PR_CONNREQUIRED))) {
+ if (so->so_flags & SOF_NOTSENT_LOWAT) {
+ if ((SOCK_DOM(so) == PF_INET6
+ || SOCK_DOM(so) == PF_INET)
+ && so->so_type == SOCK_STREAM) {
+ return (tcp_notsent_lowat_check(so));
+ }
+#if MPTCP
+ else if ((SOCK_DOM(so) == PF_MULTIPATH) &&
+ (SOCK_PROTO(so) == IPPROTO_TCP)) {
+ return (mptcp_notsent_lowat_check(so));
+ }
+#endif
+ else {
+ return (1);
+ }
+ } else {
+ return (1);
+ }
+ }
+ return (0);
+}
+
+/* adjust counters in sb reflecting allocation of m */
+
+void
+sballoc(struct sockbuf *sb, struct mbuf *m)
+{
+ u_int32_t cnt = 1;
+ sb->sb_cc += m->m_len;
+ if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
+ m->m_type != MT_OOBDATA)
+ sb->sb_ctl += m->m_len;
+ sb->sb_mbcnt += MSIZE;
+
+ if (m->m_flags & M_EXT) {
+ sb->sb_mbcnt += m->m_ext.ext_size;
+ cnt += (m->m_ext.ext_size >> MSIZESHIFT);
+ }
+ OSAddAtomic(cnt, &total_sbmb_cnt);
+ VERIFY(total_sbmb_cnt > 0);
+ if (total_sbmb_cnt > total_sbmb_cnt_peak)
+ total_sbmb_cnt_peak = total_sbmb_cnt;
+}
+
+/* adjust counters in sb reflecting freeing of m */
+void
+sbfree(struct sockbuf *sb, struct mbuf *m)
+{
+ int cnt = -1;
+
+ sb->sb_cc -= m->m_len;
+ if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
+ m->m_type != MT_OOBDATA)
+ sb->sb_ctl -= m->m_len;
+ sb->sb_mbcnt -= MSIZE;
+ if (m->m_flags & M_EXT) {
+ sb->sb_mbcnt -= m->m_ext.ext_size;
+ cnt -= (m->m_ext.ext_size >> MSIZESHIFT);
+ }
+ OSAddAtomic(cnt, &total_sbmb_cnt);
+ VERIFY(total_sbmb_cnt >= 0);
+}
+
+/*
+ * Set lock on sockbuf sb; sleep if lock is already held.
+ * Unless SB_NOINTR is set on sockbuf, sleep is interruptible.
+ * Returns error without lock if sleep is interrupted.
+ */
+int
+sblock(struct sockbuf *sb, uint32_t flags)
+{
+ boolean_t nointr = ((sb->sb_flags & SB_NOINTR) || (flags & SBL_NOINTR));
+ void *lr_saved = __builtin_return_address(0);
+ struct socket *so = sb->sb_so;
+ void * wchan;
+ int error = 0;
+ thread_t tp = current_thread();
+
+ VERIFY((flags & SBL_VALID) == flags);
+
+ /* so_usecount may be 0 if we get here from sofreelastref() */
+ if (so == NULL) {
+ panic("%s: null so, sb=%p sb_flags=0x%x lr=%p\n",
+ __func__, sb, sb->sb_flags, lr_saved);
+ /* NOTREACHED */
+ } else if (so->so_usecount < 0) {
+ panic("%s: sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
+ "lrh= %s\n", __func__, sb, sb->sb_flags, so,
+ so->so_usecount, lr_saved, solockhistory_nr(so));
+ /* NOTREACHED */
+ }
+
+ /*
+ * The content filter thread must hold the sockbuf lock
+ */
+ if ((so->so_flags & SOF_CONTENT_FILTER) && sb->sb_cfil_thread == tp) {
+ /*
+ * Don't panic if we are defunct because SB_LOCK has
+ * been cleared by sodefunct()
+ */
+ if (!(so->so_flags & SOF_DEFUNCT) && !(sb->sb_flags & SB_LOCK))
+ panic("%s: SB_LOCK not held for %p\n",
+ __func__, sb);
+
+ /* Keep the sockbuf locked */
+ return (0);
+ }
+
+ if ((sb->sb_flags & SB_LOCK) && !(flags & SBL_WAIT))
+ return (EWOULDBLOCK);
+ /*
+ * We may get here from sorflush(), in which case "sb" may not
+ * point to the real socket buffer. Use the actual socket buffer
+ * address from the socket instead.
+ */
+ wchan = (sb->sb_flags & SB_RECV) ?
+ &so->so_rcv.sb_flags : &so->so_snd.sb_flags;
+
+ /*
+ * A content filter thread has exclusive access to the sockbuf
+ * until it clears the
+ */
+ while ((sb->sb_flags & SB_LOCK) ||
+ ((so->so_flags & SOF_CONTENT_FILTER) &&
+ sb->sb_cfil_thread != NULL)) {
+ lck_mtx_t *mutex_held;
+
+ /*
+ * XXX: This code should be moved up above outside of this loop;
+ * however, we may get here as part of sofreelastref(), and
+ * at that time pr_getlock() may no longer be able to return
+ * us the lock. This will be fixed in future.
+ */
+ if (so->so_proto->pr_getlock != NULL)
+ mutex_held = (*so->so_proto->pr_getlock)(so, 0);
+ else
+ mutex_held = so->so_proto->pr_domain->dom_mtx;
+
+ lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
+
+ sb->sb_wantlock++;
+ VERIFY(sb->sb_wantlock != 0);
+
+ error = msleep(wchan, mutex_held,
+ nointr ? PSOCK : PSOCK | PCATCH,
+ nointr ? "sb_lock_nointr" : "sb_lock", NULL);
+
+ VERIFY(sb->sb_wantlock != 0);
+ sb->sb_wantlock--;
+
+ if (error == 0 && (so->so_flags & SOF_DEFUNCT) &&
+ !(flags & SBL_IGNDEFUNCT)) {
+ error = EBADF;
+ SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] "
+ "(%d)\n", __func__, proc_selfpid(),
+ (uint64_t)VM_KERNEL_ADDRPERM(so),
+ SOCK_DOM(so), SOCK_TYPE(so), error));
+ }
+
+ if (error != 0)
+ return (error);
+ }
+ sb->sb_flags |= SB_LOCK;
+ return (0);
+}
+
+/*
+ * Release lock on sockbuf sb
+ */
+void
+sbunlock(struct sockbuf *sb, boolean_t keeplocked)
+{
+ void *lr_saved = __builtin_return_address(0);
+ struct socket *so = sb->sb_so;
+ thread_t tp = current_thread();
+
+ /* so_usecount may be 0 if we get here from sofreelastref() */
+ if (so == NULL) {
+ panic("%s: null so, sb=%p sb_flags=0x%x lr=%p\n",
+ __func__, sb, sb->sb_flags, lr_saved);
+ /* NOTREACHED */
+ } else if (so->so_usecount < 0) {
+ panic("%s: sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
+ "lrh= %s\n", __func__, sb, sb->sb_flags, so,
+ so->so_usecount, lr_saved, solockhistory_nr(so));
+ /* NOTREACHED */
+ }
+
+ /*
+ * The content filter thread must hold the sockbuf lock
+ */
+ if ((so->so_flags & SOF_CONTENT_FILTER) && sb->sb_cfil_thread == tp) {
+ /*
+ * Don't panic if we are defunct because SB_LOCK has
+ * been cleared by sodefunct()
+ */
+ if (!(so->so_flags & SOF_DEFUNCT) &&
+ !(sb->sb_flags & SB_LOCK) &&
+ !(so->so_state & SS_DEFUNCT) &&
+ !(so->so_flags1 & SOF1_DEFUNCTINPROG)) {
+ panic("%s: SB_LOCK not held for %p\n",
+ __func__, sb);
+ }
+ /* Keep the sockbuf locked and proceed*/
+ } else {
+ VERIFY((sb->sb_flags & SB_LOCK) ||
+ (so->so_state & SS_DEFUNCT) ||
+ (so->so_flags1 & SOF1_DEFUNCTINPROG));
+
+ sb->sb_flags &= ~SB_LOCK;
+
+ if (sb->sb_wantlock > 0) {
+ /*
+ * We may get here from sorflush(), in which case "sb" may not
+ * point to the real socket buffer. Use the actual socket
+ * buffer address from the socket instead.
+ */
+ wakeup((sb->sb_flags & SB_RECV) ? &so->so_rcv.sb_flags :
+ &so->so_snd.sb_flags);
+ }
+ }
+
+ if (!keeplocked) { /* unlock on exit */
+ lck_mtx_t *mutex_held;
+
+ if (so->so_proto->pr_getlock != NULL)
+ mutex_held = (*so->so_proto->pr_getlock)(so, 0);
+ else
+ mutex_held = so->so_proto->pr_domain->dom_mtx;
+
+ lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
+
+ VERIFY(so->so_usecount != 0);
+ so->so_usecount--;
+ so->unlock_lr[so->next_unlock_lr] = lr_saved;
+ so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
+ lck_mtx_unlock(mutex_held);
+ }