+#if CONFIG_MACF_SOCKET_SUBSET
+ /*
+ * Call the MAC framework for policy checking if we're in
+ * the user process context and the socket isn't connected.
+ */
+ if (p != kernproc && !(so->so_state & SS_ISCONNECTED)) {
+ struct mbuf *m0 = m;
+ /*
+ * Dequeue this record (temporarily) from the receive
+ * list since we're about to drop the socket's lock
+ * where a new record may arrive and be appended to
+ * the list. Upon MAC policy failure, the record
+ * will be freed. Otherwise, we'll add it back to
+ * the head of the list. We cannot rely on SB_LOCK
+ * because append operation uses the socket's lock.
+ */
+ do {
+ m->m_nextpkt = NULL;
+ sbfree(&so->so_rcv, m);
+ m = m->m_next;
+ } while (m != NULL);
+ m = m0;
+ so->so_rcv.sb_mb = nextrecord;
+ SB_EMPTY_FIXUP(&so->so_rcv);
+ SBLASTRECORDCHK(&so->so_rcv, "soreceive 1a");
+ SBLASTMBUFCHK(&so->so_rcv, "soreceive 1a");
+ socket_unlock(so, 0);
+ if (mac_socket_check_received(proc_ucred(p), so,
+ mtod(m, struct sockaddr *)) != 0) {
+ /*
+ * MAC policy failure; free this record and
+ * process the next record (or block until
+ * one is available). We have adjusted sb_cc
+ * and sb_mbcnt above so there is no need to
+ * call sbfree() again.
+ */
+ do {
+ m = m_free(m);
+ } while (m != NULL);
+ /*
+ * Clear SB_LOCK but don't unlock the socket.
+ * Process the next record or wait for one.
+ */
+ socket_lock(so, 0);
+ sbunlock(&so->so_rcv, 1);
+ goto restart;
+ }
+ socket_lock(so, 0);
+ /*
+ * Re-adjust the socket receive list and re-enqueue
+ * the record in front of any packets which may have
+ * been appended while we dropped the lock.
+ */
+ for (m = m0; m->m_next != NULL; m = m->m_next)
+ sballoc(&so->so_rcv, m);
+ sballoc(&so->so_rcv, m);
+ if (so->so_rcv.sb_mb == NULL) {
+ so->so_rcv.sb_lastrecord = m0;
+ so->so_rcv.sb_mbtail = m;
+ }
+ m = m0;
+ nextrecord = m->m_nextpkt = so->so_rcv.sb_mb;
+ so->so_rcv.sb_mb = m;
+ SBLASTRECORDCHK(&so->so_rcv, "soreceive 1b");
+ SBLASTMBUFCHK(&so->so_rcv, "soreceive 1b");
+ }
+#endif /* CONFIG_MACF_SOCKET_SUBSET */