+ socket_unlock(so, 0);
+ error = (*pr->pr_domain->dom_externalize)(cm);
+ socket_lock(so, 0);
+ } else {
+ error = 0;
+ }
+
+ if (controlp != NULL && error == 0) {
+ *controlp = cm;
+ controlp = &(*controlp)->m_next;
+ } else {
+ (void) m_free(cm);
+ }
+ cm = cmn;
+ }
+ /*
+ * Update the value of nextrecord in case we received new
+ * records when the socket was unlocked above for
+ * externalizing SCM_RIGHTS.
+ */
+ if (m != NULL) {
+ nextrecord = sb_rcv->sb_mb->m_nextpkt;
+ } else {
+ nextrecord = sb_rcv->sb_mb;
+ }
+
+done:
+ *mp = m;
+ *nextrecordp = nextrecord;
+
+ return error;
+}
+
+/*
+ * If we have less data than requested, block awaiting more
+ * (subject to any timeout) if:
+ * 1. the current count is less than the low water mark, or
+ * 2. MSG_WAITALL is set, and it is possible to do the entire
+ * receive operation at once if we block (resid <= hiwat).
+ * 3. MSG_DONTWAIT is not set
+ * If MSG_WAITALL is set but resid is larger than the receive buffer,
+ * we have to do the receive in sections, and thus risk returning
+ * a short count if a timeout or signal occurs after we start.
+ */
+static boolean_t
+so_should_wait(struct socket *so, struct uio *uio, struct mbuf *m, int flags)
+{
+ struct protosw *pr = so->so_proto;
+
+ /* No mbufs in the receive-queue? Wait! */
+ if (m == NULL) {
+ return true;
+ }
+
+ /* Not enough data in the receive socket-buffer - we may have to wait */
+ if ((flags & MSG_DONTWAIT) == 0 && so->so_rcv.sb_cc < uio_resid(uio) &&
+ m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0) {
+ /*
+ * Application did set the lowater-mark, so we should wait for
+ * this data to be present.
+ */
+ if (so->so_rcv.sb_cc < so->so_rcv.sb_lowat) {
+ return true;
+ }
+
+ /*
+ * Application wants all the data - so let's try to do the
+ * receive-operation at once by waiting for everything to
+ * be there.
+ */
+ if ((flags & MSG_WAITALL) && uio_resid(uio) <= so->so_rcv.sb_hiwat) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/*
+ * Implement receive operations on a socket.
+ * We depend on the way that records are added to the sockbuf
+ * by sbappend*. In particular, each record (mbufs linked through m_next)
+ * must begin with an address if the protocol so specifies,
+ * followed by an optional mbuf or mbufs containing ancillary data,
+ * and then zero or more mbufs of data.
+ * In order to avoid blocking network interrupts for the entire time here,
+ * we splx() while doing the actual copy to user space.
+ * Although the sockbuf is locked, new data may still be appended,
+ * and thus we must maintain consistency of the sockbuf during that time.
+ *
+ * The caller may receive the data as a single mbuf chain by supplying
+ * an mbuf **mp0 for use in returning the chain. The uio is then used
+ * only for the count in uio_resid.
+ *
+ * Returns: 0 Success
+ * ENOBUFS
+ * ENOTCONN
+ * EWOULDBLOCK
+ * uiomove:EFAULT
+ * sblock:EWOULDBLOCK
+ * sblock:EINTR
+ * sbwait:EBADF
+ * sbwait:EINTR
+ * sodelayed_copy:EFAULT
+ * <pru_rcvoob>:EINVAL[TCP]
+ * <pru_rcvoob>:EWOULDBLOCK[TCP]
+ * <pru_rcvoob>:???
+ * <pr_domain->dom_externalize>:EMSGSIZE[AF_UNIX]
+ * <pr_domain->dom_externalize>:ENOBUFS[AF_UNIX]
+ * <pr_domain->dom_externalize>:???
+ *
+ * Notes: Additional return values from calls through <pru_rcvoob> and
+ * <pr_domain->dom_externalize> depend on protocols other than
+ * TCP or AF_UNIX, which are documented above.
+ */
+int
+soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
+ struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
+{
+ struct mbuf *m, **mp, *ml = NULL;
+ struct mbuf *nextrecord, *free_list;
+ int flags, error, offset;
+ user_ssize_t len;
+ struct protosw *pr = so->so_proto;
+ int moff, type = 0;
+ user_ssize_t orig_resid = uio_resid(uio);
+ user_ssize_t delayed_copy_len;
+ int can_delay;
+ struct proc *p = current_proc();
+ boolean_t en_tracing = FALSE;
+
+ /*
+ * Sanity check on the length passed by caller as we are making 'int'
+ * comparisons
+ */
+ if (orig_resid < 0 || orig_resid > INT_MAX) {
+ return EINVAL;
+ }
+
+ KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_START, so,
+ uio_resid(uio), so->so_rcv.sb_cc, so->so_rcv.sb_lowat,
+ so->so_rcv.sb_hiwat);
+
+ socket_lock(so, 1);
+ so_update_last_owner_locked(so, p);
+ so_update_policy(so);
+
+#ifdef MORE_LOCKING_DEBUG
+ if (so->so_usecount == 1) {
+ panic("%s: so=%x no other reference on socket\n", __func__, so);
+ /* NOTREACHED */
+ }
+#endif
+ mp = mp0;
+ if (psa != NULL) {
+ *psa = NULL;
+ }
+ if (controlp != NULL) {
+ *controlp = NULL;
+ }
+ if (flagsp != NULL) {
+ flags = *flagsp & ~MSG_EOR;
+ } else {
+ flags = 0;
+ }
+
+ /*
+ * If a recv attempt is made on a previously-accepted socket
+ * that has been marked as inactive (disconnected), reject
+ * the request.
+ */
+ if (so->so_flags & SOF_DEFUNCT) {
+ struct sockbuf *sb = &so->so_rcv;
+
+ error = ENOTCONN;
+ SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llx [%d,%d] (%d)\n",
+ __func__, proc_pid(p), proc_best_name(p),
+ (uint64_t)DEBUG_KERNEL_ADDRPERM(so),
+ SOCK_DOM(so), SOCK_TYPE(so), error);
+ /*
+ * This socket should have been disconnected and flushed
+ * prior to being returned from sodefunct(); there should
+ * be no data on its receive list, so panic otherwise.
+ */
+ if (so->so_state & SS_DEFUNCT) {
+ sb_empty_assert(sb, __func__);
+ }
+ socket_unlock(so, 1);
+ return error;
+ }
+
+ if ((so->so_flags1 & SOF1_PRECONNECT_DATA) &&
+ pr->pr_usrreqs->pru_preconnect) {
+ /*
+ * A user may set the CONNECT_RESUME_ON_READ_WRITE-flag but not
+ * calling write() right after this. *If* the app calls a read
+ * we do not want to block this read indefinetely. Thus,
+ * we trigger a connect so that the session gets initiated.
+ */
+ error = (*pr->pr_usrreqs->pru_preconnect)(so);
+
+ if (error) {
+ socket_unlock(so, 1);
+ return error;
+ }
+ }
+
+ if (ENTR_SHOULDTRACE &&
+ (SOCK_CHECK_DOM(so, AF_INET) || SOCK_CHECK_DOM(so, AF_INET6))) {
+ /*
+ * enable energy tracing for inet sockets that go over
+ * non-loopback interfaces only.
+ */
+ struct inpcb *inp = sotoinpcb(so);
+ if (inp->inp_last_outifp != NULL &&
+ !(inp->inp_last_outifp->if_flags & IFF_LOOPBACK)) {
+ en_tracing = TRUE;
+ KERNEL_ENERGYTRACE(kEnTrActKernSockRead, DBG_FUNC_START,
+ VM_KERNEL_ADDRPERM(so),
+ ((so->so_state & SS_NBIO) ?
+ kEnTrFlagNonBlocking : 0),
+ (int64_t)orig_resid);
+ }
+ }
+
+ /*
+ * When SO_WANTOOBFLAG is set we try to get out-of-band data
+ * regardless of the flags argument. Here is the case were
+ * out-of-band data is not inline.
+ */
+ if ((flags & MSG_OOB) ||
+ ((so->so_options & SO_WANTOOBFLAG) != 0 &&
+ (so->so_options & SO_OOBINLINE) == 0 &&
+ (so->so_oobmark || (so->so_state & SS_RCVATMARK)))) {
+ m = m_get(M_WAIT, MT_DATA);
+ if (m == NULL) {
+ socket_unlock(so, 1);
+ KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END,
+ ENOBUFS, 0, 0, 0, 0);
+ return ENOBUFS;
+ }
+ error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
+ if (error) {
+ goto bad;
+ }
+ socket_unlock(so, 0);
+ do {
+ error = uiomove(mtod(m, caddr_t),
+ imin(uio_resid(uio), m->m_len), uio);
+ m = m_free(m);
+ } while (uio_resid(uio) && error == 0 && m != NULL);
+ socket_lock(so, 0);
+bad:
+ if (m != NULL) {
+ m_freem(m);
+ }
+
+ if ((so->so_options & SO_WANTOOBFLAG) != 0) {
+ if (error == EWOULDBLOCK || error == EINVAL) {
+ /*
+ * Let's try to get normal data:
+ * EWOULDBLOCK: out-of-band data not
+ * receive yet. EINVAL: out-of-band data
+ * already read.
+ */
+ error = 0;
+ goto nooob;
+ } else if (error == 0 && flagsp != NULL) {
+ *flagsp |= MSG_OOB;
+ }
+ }
+ socket_unlock(so, 1);
+ if (en_tracing) {
+ KERNEL_ENERGYTRACE(kEnTrActKernSockRead, DBG_FUNC_END,
+ VM_KERNEL_ADDRPERM(so), 0,
+ (int64_t)(orig_resid - uio_resid(uio)));
+ }
+ KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,
+ 0, 0, 0, 0);
+
+ return error;
+ }
+nooob:
+ if (mp != NULL) {
+ *mp = NULL;
+ }
+
+ if (so->so_state & SS_ISCONFIRMING && uio_resid(uio)) {
+ (*pr->pr_usrreqs->pru_rcvd)(so, 0);
+ }
+
+ free_list = NULL;
+ delayed_copy_len = 0;
+restart:
+#ifdef MORE_LOCKING_DEBUG
+ if (so->so_usecount <= 1) {
+ printf("soreceive: sblock so=0x%llx ref=%d on socket\n",
+ (uint64_t)DEBUG_KERNEL_ADDRPERM(so), so->so_usecount);
+ }
+#endif
+ /*
+ * See if the socket has been closed (SS_NOFDREF|SS_CANTRCVMORE)
+ * and if so just return to the caller. This could happen when
+ * soreceive() is called by a socket upcall function during the
+ * time the socket is freed. The socket buffer would have been
+ * locked across the upcall, therefore we cannot put this thread
+ * to sleep (else we will deadlock) or return EWOULDBLOCK (else
+ * we may livelock), because the lock on the socket buffer will
+ * only be released when the upcall routine returns to its caller.
+ * Because the socket has been officially closed, there can be
+ * no further read on it.
+ *
+ * A multipath subflow socket would have its SS_NOFDREF set by
+ * default, so check for SOF_MP_SUBFLOW socket flag; when the
+ * socket is closed for real, SOF_MP_SUBFLOW would be cleared.
+ */
+ if ((so->so_state & (SS_NOFDREF | SS_CANTRCVMORE)) ==
+ (SS_NOFDREF | SS_CANTRCVMORE) && !(so->so_flags & SOF_MP_SUBFLOW)) {
+ socket_unlock(so, 1);
+ return 0;
+ }
+
+ error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
+ if (error) {
+ socket_unlock(so, 1);
+ KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,
+ 0, 0, 0, 0);
+ if (en_tracing) {
+ KERNEL_ENERGYTRACE(kEnTrActKernSockRead, DBG_FUNC_END,
+ VM_KERNEL_ADDRPERM(so), 0,
+ (int64_t)(orig_resid - uio_resid(uio)));
+ }
+ return error;
+ }
+
+ m = so->so_rcv.sb_mb;
+ if (so_should_wait(so, uio, m, flags)) {
+ /*
+ * Panic if we notice inconsistencies in the socket's
+ * receive list; both sb_mb and sb_cc should correctly
+ * reflect the contents of the list, otherwise we may
+ * end up with false positives during select() or poll()
+ * which could put the application in a bad state.
+ */
+ SB_MB_CHECK(&so->so_rcv);
+
+ if (so->so_error) {
+ if (m != NULL) {
+ goto dontblock;
+ }
+ error = so->so_error;
+ if ((flags & MSG_PEEK) == 0) {
+ so->so_error = 0;
+ }
+ goto release;
+ }
+ if (so->so_state & SS_CANTRCVMORE) {
+#if CONTENT_FILTER
+ /*
+ * Deal with half closed connections
+ */
+ if ((so->so_state & SS_ISDISCONNECTED) == 0 &&
+ cfil_sock_data_pending(&so->so_rcv) != 0) {
+ CFIL_LOG(LOG_INFO,
+ "so %llx ignore SS_CANTRCVMORE",
+ (uint64_t)DEBUG_KERNEL_ADDRPERM(so));
+ } else
+#endif /* CONTENT_FILTER */
+ if (m != NULL) {
+ goto dontblock;
+ } else {
+ goto release;
+ }
+ }
+ for (; m != NULL; m = m->m_next) {
+ if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
+ m = so->so_rcv.sb_mb;
+ goto dontblock;
+ }
+ }
+ if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) == 0 &&
+ (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
+ error = ENOTCONN;
+ goto release;
+ }
+ if (uio_resid(uio) == 0) {
+ goto release;
+ }
+
+ if ((so->so_state & SS_NBIO) ||
+ (flags & (MSG_DONTWAIT | MSG_NBIO))) {
+ error = EWOULDBLOCK;
+ goto release;
+ }
+ SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1");
+ SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1");
+ sbunlock(&so->so_rcv, TRUE); /* keep socket locked */
+#if EVEN_MORE_LOCKING_DEBUG
+ if (socket_debug) {
+ printf("Waiting for socket data\n");
+ }
+#endif
+
+ /*
+ * Depending on the protocol (e.g. TCP), the following
+ * might cause the socket lock to be dropped and later
+ * be reacquired, and more data could have arrived and
+ * have been appended to the receive socket buffer by
+ * the time it returns. Therefore, we only sleep in
+ * sbwait() below if and only if the wait-condition is still
+ * true.
+ */
+ if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb != NULL) {
+ (*pr->pr_usrreqs->pru_rcvd)(so, flags);
+ }
+
+ error = 0;
+ if (so_should_wait(so, uio, so->so_rcv.sb_mb, flags)) {
+ error = sbwait(&so->so_rcv);
+ }
+
+#if EVEN_MORE_LOCKING_DEBUG
+ if (socket_debug) {
+ printf("SORECEIVE - sbwait returned %d\n", error);
+ }
+#endif
+ if (so->so_usecount < 1) {
+ panic("%s: after 2nd sblock so=%p ref=%d on socket\n",
+ __func__, so, so->so_usecount);
+ /* NOTREACHED */
+ }
+ if (error) {
+ socket_unlock(so, 1);
+ KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,
+ 0, 0, 0, 0);
+ if (en_tracing) {
+ KERNEL_ENERGYTRACE(kEnTrActKernSockRead, DBG_FUNC_END,
+ VM_KERNEL_ADDRPERM(so), 0,
+ (int64_t)(orig_resid - uio_resid(uio)));
+ }
+ return error;
+ }
+ goto restart;
+ }
+dontblock:
+ OSIncrementAtomicLong(&p->p_stats->p_ru.ru_msgrcv);
+ SBLASTRECORDCHK(&so->so_rcv, "soreceive 1");
+ SBLASTMBUFCHK(&so->so_rcv, "soreceive 1");
+ nextrecord = m->m_nextpkt;
+
+ if ((pr->pr_flags & PR_ADDR) && m->m_type == MT_SONAME) {
+ error = soreceive_addr(p, so, psa, flags, &m, &nextrecord,
+ mp0 == NULL);
+ if (error == ERESTART) {
+ goto restart;
+ } else if (error != 0) {
+ goto release;
+ }
+ orig_resid = 0;
+ }
+
+ /*
+ * Process one or more MT_CONTROL mbufs present before any data mbufs
+ * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
+ * just copy the data; if !MSG_PEEK, we call into the protocol to
+ * perform externalization.
+ */
+ if (m != NULL && m->m_type == MT_CONTROL) {
+ error = soreceive_ctl(so, controlp, flags, &m, &nextrecord);
+ if (error != 0) {
+ goto release;
+ }
+ orig_resid = 0;
+ }
+
+ if (m != NULL) {
+ if (!(flags & MSG_PEEK)) {
+ /*
+ * We get here because m points to an mbuf following
+ * any MT_SONAME or MT_CONTROL mbufs which have been
+ * processed above. In any case, m should be pointing
+ * to the head of the mbuf chain, and the nextrecord
+ * should be either NULL or equal to m->m_nextpkt.
+ * See comments above about SB_LOCK.
+ */
+ if (m != so->so_rcv.sb_mb ||
+ m->m_nextpkt != nextrecord) {
+ panic("%s: post-control !sync so=%p m=%p "
+ "nextrecord=%p\n", __func__, so, m,
+ nextrecord);
+ /* NOTREACHED */
+ }
+ if (nextrecord == NULL) {
+ so->so_rcv.sb_lastrecord = m;
+ }
+ }
+ type = m->m_type;
+ if (type == MT_OOBDATA) {
+ flags |= MSG_OOB;
+ }
+ } else {
+ if (!(flags & MSG_PEEK)) {
+ SB_EMPTY_FIXUP(&so->so_rcv);
+ }
+ }
+ SBLASTRECORDCHK(&so->so_rcv, "soreceive 2");
+ SBLASTMBUFCHK(&so->so_rcv, "soreceive 2");
+
+ moff = 0;
+ offset = 0;
+
+ if (!(flags & MSG_PEEK) && uio_resid(uio) > sorecvmincopy) {
+ can_delay = 1;
+ } else {
+ can_delay = 0;
+ }
+
+ while (m != NULL &&
+ (uio_resid(uio) - delayed_copy_len) > 0 && error == 0) {
+ if (m->m_type == MT_OOBDATA) {
+ if (type != MT_OOBDATA) {
+ break;
+ }
+ } else if (type == MT_OOBDATA) {
+ break;
+ }
+ /*
+ * Make sure to allways set MSG_OOB event when getting
+ * out of band data inline.
+ */
+ if ((so->so_options & SO_WANTOOBFLAG) != 0 &&
+ (so->so_options & SO_OOBINLINE) != 0 &&
+ (so->so_state & SS_RCVATMARK) != 0) {
+ flags |= MSG_OOB;
+ }
+ so->so_state &= ~SS_RCVATMARK;
+ len = uio_resid(uio) - delayed_copy_len;
+ if (so->so_oobmark && len > so->so_oobmark - offset) {
+ len = so->so_oobmark - offset;
+ }
+ if (len > m->m_len - moff) {
+ len = m->m_len - moff;
+ }
+ /*
+ * If mp is set, just pass back the mbufs.
+ * Otherwise copy them out via the uio, then free.
+ * Sockbuf must be consistent here (points to current mbuf,
+ * it points to next record) when we drop priority;
+ * we must note any additions to the sockbuf when we
+ * block interrupts again.
+ */
+ if (mp == NULL) {
+ SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove");
+ SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove");
+ if (can_delay && len == m->m_len) {
+ /*
+ * only delay the copy if we're consuming the
+ * mbuf and we're NOT in MSG_PEEK mode
+ * and we have enough data to make it worthwile
+ * to drop and retake the lock... can_delay
+ * reflects the state of the 2 latter
+ * constraints moff should always be zero
+ * in these cases
+ */
+ delayed_copy_len += len;
+ } else {
+ if (delayed_copy_len) {
+ error = sodelayed_copy(so, uio,
+ &free_list, &delayed_copy_len);
+
+ if (error) {
+ goto release;
+ }
+ /*
+ * can only get here if MSG_PEEK is not
+ * set therefore, m should point at the
+ * head of the rcv queue; if it doesn't,
+ * it means something drastically
+ * changed while we were out from behind
+ * the lock in sodelayed_copy. perhaps
+ * a RST on the stream. in any event,
+ * the stream has been interrupted. it's
+ * probably best just to return whatever
+ * data we've moved and let the caller
+ * sort it out...
+ */
+ if (m != so->so_rcv.sb_mb) {
+ break;
+ }
+ }
+ socket_unlock(so, 0);
+ error = uiomove(mtod(m, caddr_t) + moff,
+ (int)len, uio);
+ socket_lock(so, 0);
+
+ if (error) {
+ goto release;
+ }
+ }
+ } else {
+ uio_setresid(uio, (uio_resid(uio) - len));
+ }
+ if (len == m->m_len - moff) {
+ if (m->m_flags & M_EOR) {
+ flags |= MSG_EOR;
+ }
+ if (flags & MSG_PEEK) {
+ m = m->m_next;
+ moff = 0;
+ } else {
+ nextrecord = m->m_nextpkt;
+ sbfree(&so->so_rcv, m);
+ m->m_nextpkt = NULL;
+
+ if (mp != NULL) {
+ *mp = m;
+ mp = &m->m_next;
+ so->so_rcv.sb_mb = m = m->m_next;
+ *mp = NULL;
+ } else {
+ if (free_list == NULL) {
+ free_list = m;
+ } else {
+ ml->m_next = m;
+ }
+ ml = m;
+ so->so_rcv.sb_mb = m = m->m_next;
+ ml->m_next = NULL;
+ }
+ if (m != NULL) {
+ m->m_nextpkt = nextrecord;
+ if (nextrecord == NULL) {
+ so->so_rcv.sb_lastrecord = m;
+ }
+ } else {
+ so->so_rcv.sb_mb = nextrecord;
+ SB_EMPTY_FIXUP(&so->so_rcv);
+ }
+ SBLASTRECORDCHK(&so->so_rcv, "soreceive 3");
+ SBLASTMBUFCHK(&so->so_rcv, "soreceive 3");
+ }
+ } else {
+ if (flags & MSG_PEEK) {
+ moff += len;
+ } else {
+ if (mp != NULL) {
+ int copy_flag;
+
+ if (flags & MSG_DONTWAIT) {
+ copy_flag = M_DONTWAIT;
+ } else {
+ copy_flag = M_WAIT;
+ }
+ *mp = m_copym(m, 0, len, copy_flag);
+ /*
+ * Failed to allocate an mbuf?
+ * Adjust uio_resid back, it was
+ * adjusted down by len bytes which
+ * we didn't copy over.
+ */
+ if (*mp == NULL) {
+ uio_setresid(uio,
+ (uio_resid(uio) + len));
+ break;
+ }
+ }
+ m->m_data += len;
+ m->m_len -= len;
+ so->so_rcv.sb_cc -= len;
+ }
+ }
+ if (so->so_oobmark) {
+ if ((flags & MSG_PEEK) == 0) {
+ so->so_oobmark -= len;
+ if (so->so_oobmark == 0) {
+ so->so_state |= SS_RCVATMARK;
+ break;
+ }
+ } else {
+ offset += len;
+ if (offset == so->so_oobmark) {
+ break;
+ }
+ }
+ }
+ if (flags & MSG_EOR) {
+ break;
+ }
+ /*
+ * If the MSG_WAITALL or MSG_WAITSTREAM flag is set
+ * (for non-atomic socket), we must not quit until
+ * "uio->uio_resid == 0" or an error termination.
+ * If a signal/timeout occurs, return with a short
+ * count but without error. Keep sockbuf locked
+ * against other readers.
+ */
+ while (flags & (MSG_WAITALL | MSG_WAITSTREAM) && m == NULL &&
+ (uio_resid(uio) - delayed_copy_len) > 0 &&
+ !sosendallatonce(so) && !nextrecord) {
+ if (so->so_error || ((so->so_state & SS_CANTRCVMORE)
+#if CONTENT_FILTER
+ && cfil_sock_data_pending(&so->so_rcv) == 0
+#endif /* CONTENT_FILTER */
+ )) {
+ goto release;
+ }
+
+ /*
+ * Depending on the protocol (e.g. TCP), the following
+ * might cause the socket lock to be dropped and later
+ * be reacquired, and more data could have arrived and
+ * have been appended to the receive socket buffer by
+ * the time it returns. Therefore, we only sleep in
+ * sbwait() below if and only if the socket buffer is
+ * empty, in order to avoid a false sleep.
+ */
+ if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb != NULL) {
+ (*pr->pr_usrreqs->pru_rcvd)(so, flags);
+ }
+
+ SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2");
+ SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2");
+
+ if (so->so_rcv.sb_mb == NULL && sbwait(&so->so_rcv)) {
+ error = 0;
+ goto release;
+ }
+ /*
+ * have to wait until after we get back from the sbwait
+ * to do the copy because we will drop the lock if we
+ * have enough data that has been delayed... by dropping
+ * the lock we open up a window allowing the netisr
+ * thread to process the incoming packets and to change
+ * the state of this socket... we're issuing the sbwait
+ * because the socket is empty and we're expecting the
+ * netisr thread to wake us up when more packets arrive;
+ * if we allow that processing to happen and then sbwait
+ * we could stall forever with packets sitting in the
+ * socket if no further packets arrive from the remote
+ * side.
+ *
+ * we want to copy before we've collected all the data
+ * to satisfy this request to allow the copy to overlap
+ * the incoming packet processing on an MP system
+ */
+ if (delayed_copy_len > sorecvmincopy &&
+ (delayed_copy_len > (so->so_rcv.sb_hiwat / 2))) {
+ error = sodelayed_copy(so, uio,
+ &free_list, &delayed_copy_len);
+
+ if (error) {
+ goto release;
+ }
+ }
+ m = so->so_rcv.sb_mb;
+ if (m != NULL) {
+ nextrecord = m->m_nextpkt;
+ }
+ SB_MB_CHECK(&so->so_rcv);
+ }
+ }
+#ifdef MORE_LOCKING_DEBUG
+ if (so->so_usecount <= 1) {
+ panic("%s: after big while so=%p ref=%d on socket\n",
+ __func__, so, so->so_usecount);
+ /* NOTREACHED */
+ }
+#endif
+
+ if (m != NULL && pr->pr_flags & PR_ATOMIC) {
+ if (so->so_options & SO_DONTTRUNC) {
+ flags |= MSG_RCVMORE;
+ } else {
+ flags |= MSG_TRUNC;
+ if ((flags & MSG_PEEK) == 0) {
+ (void) sbdroprecord(&so->so_rcv);
+ }
+ }
+ }
+
+ /*
+ * pru_rcvd below (for TCP) may cause more data to be received
+ * if the socket lock is dropped prior to sending the ACK; some
+ * legacy OpenTransport applications don't handle this well
+ * (if it receives less data than requested while MSG_HAVEMORE
+ * is set), and so we set the flag now based on what we know
+ * prior to calling pru_rcvd.
+ */
+ if ((so->so_options & SO_WANTMORE) && so->so_rcv.sb_cc > 0) {
+ flags |= MSG_HAVEMORE;
+ }
+
+ if ((flags & MSG_PEEK) == 0) {
+ if (m == NULL) {
+ so->so_rcv.sb_mb = nextrecord;
+ /*
+ * First part is an inline SB_EMPTY_FIXUP(). Second
+ * part makes sure sb_lastrecord is up-to-date if
+ * there is still data in the socket buffer.
+ */
+ if (so->so_rcv.sb_mb == NULL) {
+ so->so_rcv.sb_mbtail = NULL;
+ so->so_rcv.sb_lastrecord = NULL;
+ } else if (nextrecord->m_nextpkt == NULL) {
+ so->so_rcv.sb_lastrecord = nextrecord;
+ }
+ SB_MB_CHECK(&so->so_rcv);
+ }
+ SBLASTRECORDCHK(&so->so_rcv, "soreceive 4");
+ SBLASTMBUFCHK(&so->so_rcv, "soreceive 4");
+ if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) {
+ (*pr->pr_usrreqs->pru_rcvd)(so, flags);
+ }
+ }
+
+ if (delayed_copy_len) {
+ error = sodelayed_copy(so, uio, &free_list, &delayed_copy_len);
+ if (error) {
+ goto release;
+ }
+ }
+ if (free_list != NULL) {
+ m_freem_list(free_list);
+ free_list = NULL;
+ }
+
+ if (orig_resid == uio_resid(uio) && orig_resid &&
+ (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
+ sbunlock(&so->so_rcv, TRUE); /* keep socket locked */
+ goto restart;
+ }
+
+ if (flagsp != NULL) {
+ *flagsp |= flags;
+ }
+release:
+#ifdef MORE_LOCKING_DEBUG
+ if (so->so_usecount <= 1) {
+ panic("%s: release so=%p ref=%d on socket\n", __func__,
+ so, so->so_usecount);
+ /* NOTREACHED */
+ }
+#endif
+ if (delayed_copy_len) {
+ error = sodelayed_copy(so, uio, &free_list, &delayed_copy_len);
+ }
+
+ if (free_list != NULL) {
+ m_freem_list(free_list);
+ }
+
+ sbunlock(&so->so_rcv, FALSE); /* will unlock socket */
+
+ if (en_tracing) {
+ KERNEL_ENERGYTRACE(kEnTrActKernSockRead, DBG_FUNC_END,
+ VM_KERNEL_ADDRPERM(so),
+ ((error == EWOULDBLOCK) ? kEnTrFlagNoWork : 0),
+ (int64_t)(orig_resid - uio_resid(uio)));
+ }
+ KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, so, uio_resid(uio),
+ so->so_rcv.sb_cc, 0, error);
+
+ return error;
+}
+
+/*
+ * Returns: 0 Success
+ * uiomove:EFAULT
+ */
+static int
+sodelayed_copy(struct socket *so, struct uio *uio, struct mbuf **free_list,
+ user_ssize_t *resid)
+{
+ int error = 0;
+ struct mbuf *m;
+
+ m = *free_list;
+
+ socket_unlock(so, 0);
+
+ while (m != NULL && error == 0) {
+ error = uiomove(mtod(m, caddr_t), (int)m->m_len, uio);
+ m = m->m_next;
+ }
+ m_freem_list(*free_list);
+
+ *free_list = NULL;
+ *resid = 0;
+
+ socket_lock(so, 0);
+
+ return error;
+}
+
+static int
+sodelayed_copy_list(struct socket *so, struct recv_msg_elem *msgarray,
+ u_int uiocnt, struct mbuf **free_list, user_ssize_t *resid)
+{
+#pragma unused(so)
+ int error = 0;
+ struct mbuf *ml, *m;
+ int i = 0;
+ struct uio *auio;
+
+ for (ml = *free_list, i = 0; ml != NULL && i < uiocnt;
+ ml = ml->m_nextpkt, i++) {
+ auio = msgarray[i].uio;
+ for (m = ml; m != NULL; m = m->m_next) {
+ error = uiomove(mtod(m, caddr_t), m->m_len, auio);
+ if (error != 0) {
+ goto out;
+ }
+ }
+ }
+out:
+ m_freem_list(*free_list);
+
+ *free_list = NULL;
+ *resid = 0;
+
+ return error;
+}
+
+int
+soreceive_list(struct socket *so, struct recv_msg_elem *msgarray, u_int uiocnt,
+ int *flagsp)
+{
+ struct mbuf *m;
+ struct mbuf *nextrecord;
+ struct mbuf *ml = NULL, *free_list = NULL, *free_tail = NULL;
+ int error;
+ user_ssize_t len, pktlen, delayed_copy_len = 0;
+ struct protosw *pr = so->so_proto;
+ user_ssize_t resid;
+ struct proc *p = current_proc();
+ struct uio *auio = NULL;
+ int npkts = 0;
+ int sblocked = 0;
+ struct sockaddr **psa = NULL;
+ struct mbuf **controlp = NULL;
+ int can_delay;
+ int flags;
+ struct mbuf *free_others = NULL;
+
+ KERNEL_DEBUG(DBG_FNC_SORECEIVE_LIST | DBG_FUNC_START,
+ so, uiocnt,
+ so->so_rcv.sb_cc, so->so_rcv.sb_lowat, so->so_rcv.sb_hiwat);
+
+ /*
+ * Sanity checks:
+ * - Only supports don't wait flags
+ * - Only support datagram sockets (could be extended to raw)
+ * - Must be atomic
+ * - Protocol must support packet chains
+ * - The uio array is NULL (should we panic?)
+ */
+ if (flagsp != NULL) {
+ flags = *flagsp;
+ } else {
+ flags = 0;
+ }
+ if (flags & ~(MSG_PEEK | MSG_WAITALL | MSG_DONTWAIT | MSG_NEEDSA |
+ MSG_NBIO)) {
+ printf("%s invalid flags 0x%x\n", __func__, flags);
+ error = EINVAL;
+ goto out;
+ }
+ if (so->so_type != SOCK_DGRAM) {
+ error = EINVAL;
+ goto out;
+ }
+ if (sosendallatonce(so) == 0) {
+ error = EINVAL;
+ goto out;
+ }
+ if (so->so_proto->pr_usrreqs->pru_send_list == NULL) {
+ error = EPROTONOSUPPORT;
+ goto out;
+ }
+ if (msgarray == NULL) {
+ printf("%s uioarray is NULL\n", __func__);
+ error = EINVAL;
+ goto out;
+ }
+ if (uiocnt == 0) {
+ printf("%s uiocnt is 0\n", __func__);
+ error = EINVAL;
+ goto out;
+ }
+ /*
+ * Sanity check on the length passed by caller as we are making 'int'
+ * comparisons
+ */
+ resid = recv_msg_array_resid(msgarray, uiocnt);
+ if (resid < 0 || resid > INT_MAX) {
+ error = EINVAL;
+ goto out;
+ }
+
+ if (!(flags & MSG_PEEK) && sorecvmincopy > 0) {
+ can_delay = 1;
+ } else {
+ can_delay = 0;
+ }
+
+ socket_lock(so, 1);
+ so_update_last_owner_locked(so, p);
+ so_update_policy(so);
+
+#if NECP
+ so_update_necp_policy(so, NULL, NULL);
+#endif /* NECP */
+
+ /*
+ * If a recv attempt is made on a previously-accepted socket
+ * that has been marked as inactive (disconnected), reject
+ * the request.
+ */
+ if (so->so_flags & SOF_DEFUNCT) {
+ struct sockbuf *sb = &so->so_rcv;
+
+ error = ENOTCONN;
+ SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llx [%d,%d] (%d)\n",
+ __func__, proc_pid(p), proc_best_name(p),
+ (uint64_t)DEBUG_KERNEL_ADDRPERM(so),
+ SOCK_DOM(so), SOCK_TYPE(so), error);
+ /*
+ * This socket should have been disconnected and flushed
+ * prior to being returned from sodefunct(); there should
+ * be no data on its receive list, so panic otherwise.
+ */
+ if (so->so_state & SS_DEFUNCT) {
+ sb_empty_assert(sb, __func__);
+ }
+ goto release;
+ }
+
+next:
+ /*
+ * The uio may be empty
+ */
+ if (npkts >= uiocnt) {
+ error = 0;
+ goto release;
+ }
+restart:
+ /*
+ * See if the socket has been closed (SS_NOFDREF|SS_CANTRCVMORE)
+ * and if so just return to the caller. This could happen when
+ * soreceive() is called by a socket upcall function during the
+ * time the socket is freed. The socket buffer would have been
+ * locked across the upcall, therefore we cannot put this thread
+ * to sleep (else we will deadlock) or return EWOULDBLOCK (else
+ * we may livelock), because the lock on the socket buffer will
+ * only be released when the upcall routine returns to its caller.
+ * Because the socket has been officially closed, there can be
+ * no further read on it.
+ */
+ if ((so->so_state & (SS_NOFDREF | SS_CANTRCVMORE)) ==
+ (SS_NOFDREF | SS_CANTRCVMORE)) {
+ error = 0;
+ goto release;
+ }
+
+ error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
+ if (error) {
+ goto release;
+ }
+ sblocked = 1;
+
+ m = so->so_rcv.sb_mb;
+ /*
+ * Block awaiting more datagram if needed
+ */
+ if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
+ (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
+ ((flags & MSG_WAITALL) && npkts < uiocnt))))) {
+ /*
+ * Panic if we notice inconsistencies in the socket's
+ * receive list; both sb_mb and sb_cc should correctly
+ * reflect the contents of the list, otherwise we may
+ * end up with false positives during select() or poll()
+ * which could put the application in a bad state.
+ */
+ SB_MB_CHECK(&so->so_rcv);
+
+ if (so->so_error) {
+ error = so->so_error;
+ if ((flags & MSG_PEEK) == 0) {
+ so->so_error = 0;
+ }
+ goto release;
+ }
+ if (so->so_state & SS_CANTRCVMORE) {
+ goto release;
+ }
+ if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) == 0 &&
+ (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
+ error = ENOTCONN;
+ goto release;
+ }
+ if ((so->so_state & SS_NBIO) ||
+ (flags & (MSG_DONTWAIT | MSG_NBIO))) {
+ error = EWOULDBLOCK;
+ goto release;
+ }
+ /*
+ * Do not block if we got some data
+ */
+ if (free_list != NULL) {
+ error = 0;
+ goto release;
+ }
+
+ SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1");
+ SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1");
+
+ sbunlock(&so->so_rcv, TRUE); /* keep socket locked */
+ sblocked = 0;
+
+ error = sbwait(&so->so_rcv);
+ if (error) {
+ goto release;
+ }
+ goto restart;
+ }
+
+ OSIncrementAtomicLong(&p->p_stats->p_ru.ru_msgrcv);
+ SBLASTRECORDCHK(&so->so_rcv, "soreceive 1");
+ SBLASTMBUFCHK(&so->so_rcv, "soreceive 1");
+
+ /*
+ * Consume the current uio index as we have a datagram
+ */
+ auio = msgarray[npkts].uio;
+ resid = uio_resid(auio);
+ msgarray[npkts].which |= SOCK_MSG_DATA;
+ psa = (msgarray[npkts].which & SOCK_MSG_SA) ?
+ &msgarray[npkts].psa : NULL;
+ controlp = (msgarray[npkts].which & SOCK_MSG_CONTROL) ?
+ &msgarray[npkts].controlp : NULL;
+ npkts += 1;
+ nextrecord = m->m_nextpkt;
+
+ if ((pr->pr_flags & PR_ADDR) && m->m_type == MT_SONAME) {
+ error = soreceive_addr(p, so, psa, flags, &m, &nextrecord, 1);
+ if (error == ERESTART) {
+ goto restart;
+ } else if (error != 0) {
+ goto release;
+ }
+ }
+
+ if (m != NULL && m->m_type == MT_CONTROL) {
+ error = soreceive_ctl(so, controlp, flags, &m, &nextrecord);
+ if (error != 0) {
+ goto release;
+ }
+ }
+
+ if (m->m_pkthdr.len == 0) {
+ printf("%s:%d so %llx pkt %llx type %u pktlen null\n",
+ __func__, __LINE__,
+ (uint64_t)DEBUG_KERNEL_ADDRPERM(so),
+ (uint64_t)DEBUG_KERNEL_ADDRPERM(m),
+ m->m_type);
+ }
+
+ /*
+ * Loop to copy the mbufs of the current record
+ * Support zero length packets
+ */
+ ml = NULL;
+ pktlen = 0;
+ while (m != NULL && (len = resid - pktlen) >= 0 && error == 0) {
+ if (m->m_len == 0) {
+ panic("%p m_len zero", m);
+ }
+ if (m->m_type == 0) {
+ panic("%p m_type zero", m);
+ }
+ /*
+ * Clip to the residual length
+ */
+ if (len > m->m_len) {
+ len = m->m_len;
+ }
+ pktlen += len;
+ /*
+ * Copy the mbufs via the uio or delay the copy
+ * Sockbuf must be consistent here (points to current mbuf,
+ * it points to next record) when we drop priority;
+ * we must note any additions to the sockbuf when we
+ * block interrupts again.
+ */
+ if (len > 0 && can_delay == 0) {
+ socket_unlock(so, 0);
+ error = uiomove(mtod(m, caddr_t), (int)len, auio);
+ socket_lock(so, 0);
+ if (error) {
+ goto release;
+ }
+ } else {
+ delayed_copy_len += len;
+ }
+
+ if (len == m->m_len) {
+ /*
+ * m was entirely copied
+ */
+ sbfree(&so->so_rcv, m);
+ nextrecord = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+
+ /*
+ * Set the first packet to the head of the free list
+ */
+ if (free_list == NULL) {
+ free_list = m;
+ }
+ /*
+ * Link current packet to tail of free list
+ */
+ if (ml == NULL) {
+ if (free_tail != NULL) {
+ free_tail->m_nextpkt = m;
+ }
+ free_tail = m;
+ }
+ /*
+ * Link current mbuf to last mbuf of current packet
+ */
+ if (ml != NULL) {
+ ml->m_next = m;
+ }
+ ml = m;
+
+ /*
+ * Move next buf to head of socket buffer
+ */
+ so->so_rcv.sb_mb = m = ml->m_next;
+ ml->m_next = NULL;
+
+ if (m != NULL) {
+ m->m_nextpkt = nextrecord;
+ if (nextrecord == NULL) {
+ so->so_rcv.sb_lastrecord = m;
+ }
+ } else {
+ so->so_rcv.sb_mb = nextrecord;
+ SB_EMPTY_FIXUP(&so->so_rcv);
+ }
+ SBLASTRECORDCHK(&so->so_rcv, "soreceive 3");
+ SBLASTMBUFCHK(&so->so_rcv, "soreceive 3");
+ } else {
+ /*
+ * Stop the loop on partial copy
+ */
+ break;
+ }
+ }
+#ifdef MORE_LOCKING_DEBUG
+ if (so->so_usecount <= 1) {
+ panic("%s: after big while so=%llx ref=%d on socket\n",
+ __func__,
+ (uint64_t)DEBUG_KERNEL_ADDRPERM(so), so->so_usecount);
+ /* NOTREACHED */
+ }
+#endif
+ /*
+ * Tell the caller we made a partial copy
+ */
+ if (m != NULL) {
+ if (so->so_options & SO_DONTTRUNC) {
+ /*
+ * Copyout first the freelist then the partial mbuf
+ */
+ socket_unlock(so, 0);
+ if (delayed_copy_len) {
+ error = sodelayed_copy_list(so, msgarray,
+ uiocnt, &free_list, &delayed_copy_len);
+ }
+
+ if (error == 0) {
+ error = uiomove(mtod(m, caddr_t), (int)len,
+ auio);
+ }
+ socket_lock(so, 0);
+ if (error) {
+ goto release;
+ }
+
+ m->m_data += len;
+ m->m_len -= len;
+ so->so_rcv.sb_cc -= len;
+ flags |= MSG_RCVMORE;
+ } else {
+ (void) sbdroprecord(&so->so_rcv);
+ nextrecord = so->so_rcv.sb_mb;
+ m = NULL;
+ flags |= MSG_TRUNC;
+ }
+ }
+
+ if (m == NULL) {
+ so->so_rcv.sb_mb = nextrecord;
+ /*
+ * First part is an inline SB_EMPTY_FIXUP(). Second
+ * part makes sure sb_lastrecord is up-to-date if
+ * there is still data in the socket buffer.
+ */
+ if (so->so_rcv.sb_mb == NULL) {
+ so->so_rcv.sb_mbtail = NULL;
+ so->so_rcv.sb_lastrecord = NULL;
+ } else if (nextrecord->m_nextpkt == NULL) {
+ so->so_rcv.sb_lastrecord = nextrecord;
+ }
+ SB_MB_CHECK(&so->so_rcv);
+ }
+ SBLASTRECORDCHK(&so->so_rcv, "soreceive 4");
+ SBLASTMBUFCHK(&so->so_rcv, "soreceive 4");
+
+ /*
+ * We can continue to the next packet as long as:
+ * - We haven't exhausted the uio array
+ * - There was no error
+ * - A packet was not truncated
+ * - We can still receive more data
+ */
+ if (npkts < uiocnt && error == 0 &&
+ (flags & (MSG_RCVMORE | MSG_TRUNC)) == 0 &&
+ (so->so_state & SS_CANTRCVMORE) == 0) {
+ sbunlock(&so->so_rcv, TRUE); /* keep socket locked */
+ sblocked = 0;
+
+ goto next;
+ }
+ if (flagsp != NULL) {
+ *flagsp |= flags;
+ }
+
+release:
+ /*
+ * pru_rcvd may cause more data to be received if the socket lock
+ * is dropped so we set MSG_HAVEMORE now based on what we know.
+ * That way the caller won't be surprised if it receives less data
+ * than requested.
+ */
+ if ((so->so_options & SO_WANTMORE) && so->so_rcv.sb_cc > 0) {
+ flags |= MSG_HAVEMORE;
+ }
+
+ if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) {
+ (*pr->pr_usrreqs->pru_rcvd)(so, flags);
+ }
+
+ if (sblocked) {
+ sbunlock(&so->so_rcv, FALSE); /* will unlock socket */
+ } else {
+ socket_unlock(so, 1);
+ }
+
+ if (delayed_copy_len) {
+ error = sodelayed_copy_list(so, msgarray, uiocnt,
+ &free_list, &delayed_copy_len);
+ }
+out:
+ /*
+ * Amortize the cost of freeing the mbufs
+ */
+ if (free_list != NULL) {
+ m_freem_list(free_list);
+ }
+ if (free_others != NULL) {
+ m_freem_list(free_others);
+ }
+
+ KERNEL_DEBUG(DBG_FNC_SORECEIVE_LIST | DBG_FUNC_END, error,
+ 0, 0, 0, 0);
+ return error;
+}
+
+static int
+so_statistics_event_to_nstat_event(int64_t *input_options,
+ uint64_t *nstat_event)
+{
+ int error = 0;
+ switch (*input_options) {
+ case SO_STATISTICS_EVENT_ENTER_CELLFALLBACK:
+ *nstat_event = NSTAT_EVENT_SRC_ENTER_CELLFALLBACK;
+ break;
+ case SO_STATISTICS_EVENT_EXIT_CELLFALLBACK:
+ *nstat_event = NSTAT_EVENT_SRC_EXIT_CELLFALLBACK;
+ break;
+#if (DEBUG || DEVELOPMENT)
+ case SO_STATISTICS_EVENT_RESERVED_1:
+ *nstat_event = NSTAT_EVENT_SRC_RESERVED_1;
+ break;
+ case SO_STATISTICS_EVENT_RESERVED_2:
+ *nstat_event = NSTAT_EVENT_SRC_RESERVED_2;
+ break;
+#endif /* (DEBUG || DEVELOPMENT) */
+ default:
+ error = EINVAL;
+ break;
+ }
+ return error;
+}
+
+/*
+ * Returns: 0 Success
+ * EINVAL
+ * ENOTCONN
+ * <pru_shutdown>:EINVAL
+ * <pru_shutdown>:EADDRNOTAVAIL[TCP]
+ * <pru_shutdown>:ENOBUFS[TCP]
+ * <pru_shutdown>:EMSGSIZE[TCP]
+ * <pru_shutdown>:EHOSTUNREACH[TCP]
+ * <pru_shutdown>:ENETUNREACH[TCP]
+ * <pru_shutdown>:ENETDOWN[TCP]
+ * <pru_shutdown>:ENOMEM[TCP]
+ * <pru_shutdown>:EACCES[TCP]
+ * <pru_shutdown>:EMSGSIZE[TCP]
+ * <pru_shutdown>:ENOBUFS[TCP]
+ * <pru_shutdown>:???[TCP] [ignorable: mostly IPSEC/firewall/DLIL]
+ * <pru_shutdown>:??? [other protocol families]
+ */
+int
+soshutdown(struct socket *so, int how)
+{
+ int error;
+
+ KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_START, how, 0, 0, 0, 0);
+
+ switch (how) {
+ case SHUT_RD:
+ case SHUT_WR:
+ case SHUT_RDWR:
+ socket_lock(so, 1);
+ if ((so->so_state &
+ (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) {
+ error = ENOTCONN;
+ } else {
+ error = soshutdownlock(so, how);
+ }
+ socket_unlock(so, 1);
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_END, how, error, 0, 0, 0);
+
+ return error;
+}
+
+int
+soshutdownlock_final(struct socket *so, int how)
+{
+ struct protosw *pr = so->so_proto;
+ int error = 0;
+
+ sflt_notify(so, sock_evt_shutdown, &how);
+
+ if (how != SHUT_WR) {
+ if ((so->so_state & SS_CANTRCVMORE) != 0) {
+ /* read already shut down */
+ error = ENOTCONN;
+ goto done;
+ }
+ sorflush(so);
+ }
+ if (how != SHUT_RD) {
+ if ((so->so_state & SS_CANTSENDMORE) != 0) {
+ /* write already shut down */
+ error = ENOTCONN;
+ goto done;
+ }
+ error = (*pr->pr_usrreqs->pru_shutdown)(so);
+ }
+done:
+ KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN, how, 1, 0, 0, 0);
+ return error;
+}
+
+int
+soshutdownlock(struct socket *so, int how)
+{
+ int error = 0;
+
+#if CONTENT_FILTER
+ /*
+ * A content filter may delay the actual shutdown until it
+ * has processed the pending data
+ */
+ if (so->so_flags & SOF_CONTENT_FILTER) {
+ error = cfil_sock_shutdown(so, &how);
+ if (error == EJUSTRETURN) {
+ error = 0;
+ goto done;
+ } else if (error != 0) {
+ goto done;
+ }
+ }
+#endif /* CONTENT_FILTER */
+
+ error = soshutdownlock_final(so, how);
+
+done:
+ return error;
+}
+
+void
+sowflush(struct socket *so)
+{
+ struct sockbuf *sb = &so->so_snd;
+
+ /*
+ * Obtain lock on the socket buffer (SB_LOCK). This is required
+ * to prevent the socket buffer from being unexpectedly altered
+ * while it is used by another thread in socket send/receive.
+ *
+ * sblock() must not fail here, hence the assertion.
+ */
+ (void) sblock(sb, SBL_WAIT | SBL_NOINTR | SBL_IGNDEFUNCT);
+ VERIFY(sb->sb_flags & SB_LOCK);
+
+ sb->sb_flags &= ~(SB_SEL | SB_UPCALL);
+ sb->sb_flags |= SB_DROP;
+ sb->sb_upcall = NULL;
+ sb->sb_upcallarg = NULL;
+
+ sbunlock(sb, TRUE); /* keep socket locked */
+
+ selthreadclear(&sb->sb_sel);
+ sbrelease(sb);
+}
+
+void
+sorflush(struct socket *so)
+{
+ struct sockbuf *sb = &so->so_rcv;
+ struct protosw *pr = so->so_proto;
+ struct sockbuf asb;
+#ifdef notyet
+ lck_mtx_t *mutex_held;
+ /*
+ * XXX: This code is currently commented out, because we may get here
+ * as part of sofreelastref(), and at that time, pr_getlock() may no
+ * longer be able to return us the lock; this will be fixed in future.
+ */
+ if (so->so_proto->pr_getlock != NULL) {
+ mutex_held = (*so->so_proto->pr_getlock)(so, 0);
+ } else {
+ mutex_held = so->so_proto->pr_domain->dom_mtx;
+ }
+
+ LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
+#endif /* notyet */
+
+ sflt_notify(so, sock_evt_flush_read, NULL);
+
+ socantrcvmore(so);
+
+ /*
+ * Obtain lock on the socket buffer (SB_LOCK). This is required
+ * to prevent the socket buffer from being unexpectedly altered
+ * while it is used by another thread in socket send/receive.
+ *
+ * sblock() must not fail here, hence the assertion.
+ */
+ (void) sblock(sb, SBL_WAIT | SBL_NOINTR | SBL_IGNDEFUNCT);
+ VERIFY(sb->sb_flags & SB_LOCK);
+
+ /*
+ * Copy only the relevant fields from "sb" to "asb" which we
+ * need for sbrelease() to function. In particular, skip
+ * sb_sel as it contains the wait queue linkage, which would
+ * wreak havoc if we were to issue selthreadclear() on "asb".
+ * Make sure to not carry over SB_LOCK in "asb", as we need
+ * to acquire it later as part of sbrelease().
+ */
+ bzero(&asb, sizeof(asb));
+ asb.sb_cc = sb->sb_cc;
+ asb.sb_hiwat = sb->sb_hiwat;
+ asb.sb_mbcnt = sb->sb_mbcnt;
+ asb.sb_mbmax = sb->sb_mbmax;
+ asb.sb_ctl = sb->sb_ctl;
+ asb.sb_lowat = sb->sb_lowat;
+ asb.sb_mb = sb->sb_mb;
+ asb.sb_mbtail = sb->sb_mbtail;
+ asb.sb_lastrecord = sb->sb_lastrecord;
+ asb.sb_so = sb->sb_so;
+ asb.sb_flags = sb->sb_flags;
+ asb.sb_flags &= ~(SB_LOCK | SB_SEL | SB_KNOTE | SB_UPCALL);
+ asb.sb_flags |= SB_DROP;
+
+ /*
+ * Ideally we'd bzero() these and preserve the ones we need;
+ * but to do that we'd need to shuffle things around in the
+ * sockbuf, and we can't do it now because there are KEXTS
+ * that are directly referring to the socket structure.
+ *
+ * Setting SB_DROP acts as a barrier to prevent further appends.
+ * Clearing SB_SEL is done for selthreadclear() below.
+ */
+ sb->sb_cc = 0;
+ sb->sb_hiwat = 0;
+ sb->sb_mbcnt = 0;
+ sb->sb_mbmax = 0;
+ sb->sb_ctl = 0;
+ sb->sb_lowat = 0;
+ sb->sb_mb = NULL;
+ sb->sb_mbtail = NULL;
+ sb->sb_lastrecord = NULL;
+ sb->sb_timeo.tv_sec = 0;
+ sb->sb_timeo.tv_usec = 0;
+ sb->sb_upcall = NULL;
+ sb->sb_upcallarg = NULL;
+ sb->sb_flags &= ~(SB_SEL | SB_UPCALL);
+ sb->sb_flags |= SB_DROP;
+
+ sbunlock(sb, TRUE); /* keep socket locked */
+
+ /*
+ * Note that selthreadclear() is called on the original "sb" and
+ * not the local "asb" because of the way wait queue linkage is
+ * implemented. Given that selwakeup() may be triggered, SB_SEL
+ * should no longer be set (cleared above.)
+ */
+ selthreadclear(&sb->sb_sel);
+
+ if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) {
+ (*pr->pr_domain->dom_dispose)(asb.sb_mb);
+ }
+
+ sbrelease(&asb);
+}
+
+/*
+ * Perhaps this routine, and sooptcopyout(), below, ought to come in
+ * an additional variant to handle the case where the option value needs
+ * to be some kind of integer, but not a specific size.
+ * In addition to their use here, these functions are also called by the
+ * protocol-level pr_ctloutput() routines.
+ *
+ * Returns: 0 Success
+ * EINVAL
+ * copyin:EFAULT
+ */
+int
+sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
+{
+ size_t valsize;
+
+ /*
+ * If the user gives us more than we wanted, we ignore it,
+ * but if we don't get the minimum length the caller
+ * wants, we return EINVAL. On success, sopt->sopt_valsize
+ * is set to however much we actually retrieved.
+ */
+ if ((valsize = sopt->sopt_valsize) < minlen) {
+ return EINVAL;
+ }
+ if (valsize > len) {
+ sopt->sopt_valsize = valsize = len;
+ }
+
+ if (sopt->sopt_p != kernproc) {
+ return copyin(sopt->sopt_val, buf, valsize);
+ }
+
+ bcopy(CAST_DOWN(caddr_t, sopt->sopt_val), buf, valsize);
+ return 0;
+}
+
+/*
+ * sooptcopyin_timeval
+ * Copy in a timeval value into tv_p, and take into account whether the
+ * the calling process is 64-bit or 32-bit. Moved the sanity checking
+ * code here so that we can verify the 64-bit tv_sec value before we lose
+ * the top 32-bits assigning tv64.tv_sec to tv_p->tv_sec.
+ */
+static int
+sooptcopyin_timeval(struct sockopt *sopt, struct timeval *tv_p)
+{
+ int error;
+
+ if (proc_is64bit(sopt->sopt_p)) {
+ struct user64_timeval tv64;
+
+ if (sopt->sopt_valsize < sizeof(tv64)) {
+ return EINVAL;
+ }
+
+ sopt->sopt_valsize = sizeof(tv64);
+ if (sopt->sopt_p != kernproc) {
+ error = copyin(sopt->sopt_val, &tv64, sizeof(tv64));
+ if (error != 0) {
+ return error;
+ }
+ } else {
+ bcopy(CAST_DOWN(caddr_t, sopt->sopt_val), &tv64,
+ sizeof(tv64));
+ }
+ if (tv64.tv_sec < 0 || tv64.tv_sec > LONG_MAX ||
+ tv64.tv_usec < 0 || tv64.tv_usec >= 1000000) {
+ return EDOM;
+ }
+
+ tv_p->tv_sec = tv64.tv_sec;
+ tv_p->tv_usec = tv64.tv_usec;
+ } else {
+ struct user32_timeval tv32;
+
+ if (sopt->sopt_valsize < sizeof(tv32)) {
+ return EINVAL;
+ }
+
+ sopt->sopt_valsize = sizeof(tv32);
+ if (sopt->sopt_p != kernproc) {
+ error = copyin(sopt->sopt_val, &tv32, sizeof(tv32));
+ if (error != 0) {
+ return error;
+ }
+ } else {
+ bcopy(CAST_DOWN(caddr_t, sopt->sopt_val), &tv32,
+ sizeof(tv32));
+ }
+#ifndef __LP64__
+ /*
+ * K64todo "comparison is always false due to
+ * limited range of data type"
+ */
+ if (tv32.tv_sec < 0 || tv32.tv_sec > LONG_MAX ||
+ tv32.tv_usec < 0 || tv32.tv_usec >= 1000000) {
+ return EDOM;
+ }
+#endif
+ tv_p->tv_sec = tv32.tv_sec;
+ tv_p->tv_usec = tv32.tv_usec;
+ }
+ return 0;
+}
+
+int
+soopt_cred_check(struct socket *so, int priv, boolean_t allow_root,
+ boolean_t ignore_delegate)
+{
+ kauth_cred_t cred = NULL;
+ proc_t ep = PROC_NULL;
+ uid_t uid;
+ int error = 0;
+
+ if (ignore_delegate == false && so->so_flags & SOF_DELEGATED) {
+ ep = proc_find(so->e_pid);
+ if (ep) {
+ cred = kauth_cred_proc_ref(ep);
+ }
+ }
+
+ uid = kauth_cred_getuid(cred ? cred : so->so_cred);
+
+ /* uid is 0 for root */
+ if (uid != 0 || !allow_root) {
+ error = priv_check_cred(cred ? cred : so->so_cred, priv, 0);
+ }
+ if (cred) {
+ kauth_cred_unref(&cred);
+ }
+ if (ep != PROC_NULL) {
+ proc_rele(ep);
+ }
+
+ return error;
+}
+
+/*
+ * Returns: 0 Success
+ * EINVAL
+ * ENOPROTOOPT
+ * ENOBUFS
+ * EDOM
+ * sooptcopyin:EINVAL
+ * sooptcopyin:EFAULT
+ * sooptcopyin_timeval:EINVAL
+ * sooptcopyin_timeval:EFAULT
+ * sooptcopyin_timeval:EDOM
+ * <pr_ctloutput>:EOPNOTSUPP[AF_UNIX]
+ * <pr_ctloutput>:???w
+ * sflt_attach_private:??? [whatever a filter author chooses]
+ * <sf_setoption>:??? [whatever a filter author chooses]
+ *
+ * Notes: Other <pru_listen> returns depend on the protocol family; all
+ * <sf_listen> returns depend on what the filter author causes
+ * their filter to return.
+ */
+int
+sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock)
+{
+ int error, optval;
+ int64_t long_optval;
+ struct linger l;
+ struct timeval tv;
+
+ if (sopt->sopt_dir != SOPT_SET) {
+ sopt->sopt_dir = SOPT_SET;
+ }
+
+ if (dolock) {
+ socket_lock(so, 1);
+ }
+
+ if ((so->so_state & (SS_CANTRCVMORE | SS_CANTSENDMORE)) ==
+ (SS_CANTRCVMORE | SS_CANTSENDMORE) &&
+ (so->so_flags & SOF_NPX_SETOPTSHUT) == 0) {
+ /* the socket has been shutdown, no more sockopt's */
+ error = EINVAL;
+ goto out;
+ }
+
+ error = sflt_setsockopt(so, sopt);
+ if (error != 0) {
+ if (error == EJUSTRETURN) {
+ error = 0;
+ }
+ goto out;
+ }
+
+ if (sopt->sopt_level != SOL_SOCKET) {
+ if (so->so_proto != NULL &&
+ so->so_proto->pr_ctloutput != NULL) {
+ error = (*so->so_proto->pr_ctloutput)(so, sopt);
+ goto out;
+ }
+ error = ENOPROTOOPT;
+ } else {
+ /*
+ * Allow socket-level (SOL_SOCKET) options to be filtered by
+ * the protocol layer, if needed. A zero value returned from
+ * the handler means use default socket-level processing as
+ * done by the rest of this routine. Otherwise, any other
+ * return value indicates that the option is unsupported.
+ */
+ if (so->so_proto != NULL && (error = so->so_proto->pr_usrreqs->
+ pru_socheckopt(so, sopt)) != 0) {
+ goto out;
+ }
+
+ error = 0;
+ switch (sopt->sopt_name) {
+ case SO_LINGER:
+ case SO_LINGER_SEC:
+ error = sooptcopyin(sopt, &l, sizeof(l), sizeof(l));
+ if (error != 0) {
+ goto out;
+ }
+
+ so->so_linger = (sopt->sopt_name == SO_LINGER) ?
+ l.l_linger : l.l_linger * hz;
+ if (l.l_onoff != 0) {
+ so->so_options |= SO_LINGER;
+ } else {
+ so->so_options &= ~SO_LINGER;
+ }
+ break;
+
+ case SO_DEBUG:
+ case SO_KEEPALIVE:
+ case SO_DONTROUTE:
+ case SO_USELOOPBACK:
+ case SO_BROADCAST:
+ case SO_REUSEADDR:
+ case SO_REUSEPORT:
+ case SO_OOBINLINE:
+ case SO_TIMESTAMP:
+ case SO_TIMESTAMP_MONOTONIC:
+ case SO_TIMESTAMP_CONTINUOUS:
+ case SO_DONTTRUNC:
+ case SO_WANTMORE:
+ case SO_WANTOOBFLAG:
+ case SO_NOWAKEFROMSLEEP:
+ case SO_NOAPNFALLBK:
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0) {
+ goto out;
+ }
+ if (optval) {
+ so->so_options |= sopt->sopt_name;
+ } else {
+ so->so_options &= ~sopt->sopt_name;
+ }
+ break;
+
+ case SO_SNDBUF:
+ case SO_RCVBUF:
+ case SO_SNDLOWAT:
+ case SO_RCVLOWAT:
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0) {
+ goto out;
+ }
+
+ /*
+ * Values < 1 make no sense for any of these
+ * options, so disallow them.
+ */
+ if (optval < 1) {
+ error = EINVAL;
+ goto out;
+ }
+
+ switch (sopt->sopt_name) {
+ case SO_SNDBUF:
+ case SO_RCVBUF: {
+ struct sockbuf *sb =
+ (sopt->sopt_name == SO_SNDBUF) ?
+ &so->so_snd : &so->so_rcv;
+ if (sbreserve(sb, (u_int32_t)optval) == 0) {
+ error = ENOBUFS;
+ goto out;
+ }
+ sb->sb_flags |= SB_USRSIZE;
+ sb->sb_flags &= ~SB_AUTOSIZE;
+ sb->sb_idealsize = (u_int32_t)optval;
+ break;
+ }
+ /*
+ * Make sure the low-water is never greater than
+ * the high-water.
+ */
+ case SO_SNDLOWAT: {
+ int space = sbspace(&so->so_snd);
+ u_int32_t hiwat = so->so_snd.sb_hiwat;
+
+ if (so->so_snd.sb_flags & SB_UNIX) {
+ struct unpcb *unp =
+ (struct unpcb *)(so->so_pcb);
+ if (unp != NULL &&
+ unp->unp_conn != NULL) {
+ hiwat += unp->unp_conn->unp_cc;
+ }
+ }
+
+ so->so_snd.sb_lowat =
+ (optval > hiwat) ?
+ hiwat : optval;
+
+ if (space >= so->so_snd.sb_lowat) {
+ sowwakeup(so);
+ }
+ break;
+ }
+ case SO_RCVLOWAT: {
+ int64_t data_len;
+ so->so_rcv.sb_lowat =
+ (optval > so->so_rcv.sb_hiwat) ?
+ so->so_rcv.sb_hiwat : optval;
+ data_len = so->so_rcv.sb_cc
+ - so->so_rcv.sb_ctl;
+ if (data_len >= so->so_rcv.sb_lowat) {
+ sorwakeup(so);
+ }
+ break;
+ }
+ }
+ break;
+
+ case SO_SNDTIMEO:
+ case SO_RCVTIMEO:
+ error = sooptcopyin_timeval(sopt, &tv);
+ if (error != 0) {
+ goto out;
+ }
+
+ switch (sopt->sopt_name) {
+ case SO_SNDTIMEO:
+ so->so_snd.sb_timeo = tv;
+ break;
+ case SO_RCVTIMEO:
+ so->so_rcv.sb_timeo = tv;
+ break;
+ }
+ break;
+
+ case SO_NKE: {
+ struct so_nke nke;
+
+ error = sooptcopyin(sopt, &nke, sizeof(nke),
+ sizeof(nke));
+ if (error != 0) {
+ goto out;
+ }
+
+ error = sflt_attach_internal(so, nke.nke_handle);
+ break;
+ }
+
+ case SO_NOSIGPIPE:
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0) {
+ goto out;
+ }
+ if (optval != 0) {
+ so->so_flags |= SOF_NOSIGPIPE;
+ } else {
+ so->so_flags &= ~SOF_NOSIGPIPE;
+ }
+ break;
+
+ case SO_NOADDRERR:
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0) {
+ goto out;
+ }
+ if (optval != 0) {
+ so->so_flags |= SOF_NOADDRAVAIL;
+ } else {
+ so->so_flags &= ~SOF_NOADDRAVAIL;
+ }
+ break;
+
+ case SO_REUSESHAREUID:
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0) {
+ goto out;
+ }
+ if (optval != 0) {
+ so->so_flags |= SOF_REUSESHAREUID;
+ } else {
+ so->so_flags &= ~SOF_REUSESHAREUID;
+ }
+ break;
+
+ case SO_NOTIFYCONFLICT:
+ if (kauth_cred_issuser(kauth_cred_get()) == 0) {
+ error = EPERM;
+ goto out;
+ }
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0) {
+ goto out;
+ }
+ if (optval != 0) {
+ so->so_flags |= SOF_NOTIFYCONFLICT;
+ } else {
+ so->so_flags &= ~SOF_NOTIFYCONFLICT;
+ }
+ break;
+
+ case SO_RESTRICTIONS:
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0) {
+ goto out;
+ }
+
+ error = so_set_restrictions(so, optval);
+ break;
+
+ case SO_AWDL_UNRESTRICTED:
+ if (SOCK_DOM(so) != PF_INET &&
+ SOCK_DOM(so) != PF_INET6) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0) {
+ goto out;
+ }
+ if (optval != 0) {
+ error = soopt_cred_check(so,
+ PRIV_NET_RESTRICTED_AWDL, false, false);
+ if (error == 0) {
+ inp_set_awdl_unrestricted(
+ sotoinpcb(so));
+ }
+ } else {
+ inp_clear_awdl_unrestricted(sotoinpcb(so));
+ }
+ break;
+ case SO_INTCOPROC_ALLOW:
+ if (SOCK_DOM(so) != PF_INET6) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0) {
+ goto out;
+ }
+ if (optval != 0 &&
+ inp_get_intcoproc_allowed(sotoinpcb(so)) == FALSE) {
+ error = soopt_cred_check(so,
+ PRIV_NET_RESTRICTED_INTCOPROC, false, false);
+ if (error == 0) {
+ inp_set_intcoproc_allowed(
+ sotoinpcb(so));
+ }
+ } else if (optval == 0) {
+ inp_clear_intcoproc_allowed(sotoinpcb(so));
+ }
+ break;
+
+ case SO_LABEL:
+ error = EOPNOTSUPP;
+ break;
+
+ case SO_UPCALLCLOSEWAIT:
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0) {
+ goto out;
+ }
+ if (optval != 0) {
+ so->so_flags |= SOF_UPCALLCLOSEWAIT;
+ } else {
+ so->so_flags &= ~SOF_UPCALLCLOSEWAIT;
+ }
+ break;
+
+ case SO_RANDOMPORT:
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0) {
+ goto out;
+ }
+ if (optval != 0) {
+ so->so_flags |= SOF_BINDRANDOMPORT;
+ } else {
+ so->so_flags &= ~SOF_BINDRANDOMPORT;
+ }
+ break;
+
+ case SO_NP_EXTENSIONS: {
+ struct so_np_extensions sonpx;
+
+ error = sooptcopyin(sopt, &sonpx, sizeof(sonpx),
+ sizeof(sonpx));
+ if (error != 0) {
+ goto out;
+ }
+ if (sonpx.npx_mask & ~SONPX_MASK_VALID) {
+ error = EINVAL;
+ goto out;
+ }
+ /*
+ * Only one bit defined for now
+ */
+ if ((sonpx.npx_mask & SONPX_SETOPTSHUT)) {
+ if ((sonpx.npx_flags & SONPX_SETOPTSHUT)) {
+ so->so_flags |= SOF_NPX_SETOPTSHUT;
+ } else {
+ so->so_flags &= ~SOF_NPX_SETOPTSHUT;
+ }
+ }
+ break;
+ }
+
+ case SO_TRAFFIC_CLASS: {
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0) {
+ goto out;
+ }
+ if (optval >= SO_TC_NET_SERVICE_OFFSET) {
+ int netsvc = optval - SO_TC_NET_SERVICE_OFFSET;
+ error = so_set_net_service_type(so, netsvc);
+ goto out;
+ }
+ error = so_set_traffic_class(so, optval);
+ if (error != 0) {
+ goto out;
+ }
+ so->so_flags1 &= ~SOF1_TC_NET_SERV_TYPE;
+ so->so_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
+ break;
+ }
+
+ case SO_RECV_TRAFFIC_CLASS: {
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0) {
+ goto out;
+ }
+ if (optval == 0) {
+ so->so_flags &= ~SOF_RECV_TRAFFIC_CLASS;
+ } else {
+ so->so_flags |= SOF_RECV_TRAFFIC_CLASS;
+ }
+ break;
+ }
+
+#if (DEVELOPMENT || DEBUG)
+ case SO_TRAFFIC_CLASS_DBG: {
+ struct so_tcdbg so_tcdbg;
+
+ error = sooptcopyin(sopt, &so_tcdbg,
+ sizeof(struct so_tcdbg), sizeof(struct so_tcdbg));
+ if (error != 0) {
+ goto out;
+ }
+ error = so_set_tcdbg(so, &so_tcdbg);
+ if (error != 0) {
+ goto out;
+ }
+ break;
+ }
+#endif /* (DEVELOPMENT || DEBUG) */
+
+ case SO_PRIVILEGED_TRAFFIC_CLASS:
+ error = priv_check_cred(kauth_cred_get(),
+ PRIV_NET_PRIVILEGED_TRAFFIC_CLASS, 0);
+ if (error != 0) {
+ goto out;
+ }
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0) {
+ goto out;
+ }
+ if (optval == 0) {
+ so->so_flags &= ~SOF_PRIVILEGED_TRAFFIC_CLASS;
+ } else {
+ so->so_flags |= SOF_PRIVILEGED_TRAFFIC_CLASS;
+ }
+ break;
+
+#if (DEVELOPMENT || DEBUG)
+ case SO_DEFUNCTIT:
+ error = sosetdefunct(current_proc(), so, 0, FALSE);
+ if (error == 0) {
+ error = sodefunct(current_proc(), so, 0);
+ }
+
+ break;
+#endif /* (DEVELOPMENT || DEBUG) */
+
+ case SO_DEFUNCTOK:
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0 || (so->so_flags & SOF_DEFUNCT)) {
+ if (error == 0) {
+ error = EBADF;
+ }
+ goto out;
+ }
+ /*
+ * Any process can set SO_DEFUNCTOK (clear
+ * SOF_NODEFUNCT), but only root can clear
+ * SO_DEFUNCTOK (set SOF_NODEFUNCT).
+ */
+ if (optval == 0 &&
+ kauth_cred_issuser(kauth_cred_get()) == 0) {
+ error = EPERM;
+ goto out;
+ }
+ if (optval) {
+ so->so_flags &= ~SOF_NODEFUNCT;
+ } else {
+ so->so_flags |= SOF_NODEFUNCT;
+ }
+
+ if (SOCK_DOM(so) == PF_INET ||
+ SOCK_DOM(so) == PF_INET6) {
+ char s[MAX_IPv6_STR_LEN];
+ char d[MAX_IPv6_STR_LEN];
+ struct inpcb *inp = sotoinpcb(so);
+
+ SODEFUNCTLOG("%s[%d, %s]: so 0x%llx "
+ "[%s %s:%d -> %s:%d] is now marked "
+ "as %seligible for "
+ "defunct\n", __func__, proc_selfpid(),
+ proc_best_name(current_proc()),
+ (uint64_t)DEBUG_KERNEL_ADDRPERM(so),
+ (SOCK_TYPE(so) == SOCK_STREAM) ?
+ "TCP" : "UDP", inet_ntop(SOCK_DOM(so),
+ ((SOCK_DOM(so) == PF_INET) ?
+ (void *)&inp->inp_laddr.s_addr :
+ (void *)&inp->in6p_laddr), s, sizeof(s)),
+ ntohs(inp->in6p_lport),
+ inet_ntop(SOCK_DOM(so),
+ (SOCK_DOM(so) == PF_INET) ?
+ (void *)&inp->inp_faddr.s_addr :
+ (void *)&inp->in6p_faddr, d, sizeof(d)),
+ ntohs(inp->in6p_fport),
+ (so->so_flags & SOF_NODEFUNCT) ?
+ "not " : "");
+ } else {
+ SODEFUNCTLOG("%s[%d, %s]: so 0x%llx [%d,%d] "
+ "is now marked as %seligible for "
+ "defunct\n",
+ __func__, proc_selfpid(),
+ proc_best_name(current_proc()),
+ (uint64_t)DEBUG_KERNEL_ADDRPERM(so),
+ SOCK_DOM(so), SOCK_TYPE(so),
+ (so->so_flags & SOF_NODEFUNCT) ?
+ "not " : "");
+ }
+ break;
+
+ case SO_ISDEFUNCT:
+ /* This option is not settable */
+ error = EINVAL;
+ break;
+
+ case SO_OPPORTUNISTIC:
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error == 0) {
+ error = so_set_opportunistic(so, optval);
+ }
+ break;
+
+ case SO_FLUSH:
+ /* This option is handled by lower layer(s) */
+ error = 0;
+ break;
+
+ case SO_RECV_ANYIF:
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error == 0) {
+ error = so_set_recv_anyif(so, optval);
+ }
+ break;
+
+ case SO_TRAFFIC_MGT_BACKGROUND: {
+ /* This option is handled by lower layer(s) */
+ error = 0;
+ break;
+ }
+
+#if FLOW_DIVERT
+ case SO_FLOW_DIVERT_TOKEN:
+ error = flow_divert_token_set(so, sopt);
+ break;
+#endif /* FLOW_DIVERT */
+
+
+ case SO_DELEGATED:
+ if ((error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval))) != 0) {
+ break;
+ }
+
+ error = so_set_effective_pid(so, optval, sopt->sopt_p, true);
+ break;
+
+ case SO_DELEGATED_UUID: {
+ uuid_t euuid;
+
+ if ((error = sooptcopyin(sopt, &euuid, sizeof(euuid),
+ sizeof(euuid))) != 0) {
+ break;
+ }
+
+ error = so_set_effective_uuid(so, euuid, sopt->sopt_p, true);
+ break;
+ }
+
+#if NECP
+ case SO_NECP_ATTRIBUTES:
+ error = necp_set_socket_attributes(so, sopt);
+ break;
+
+ case SO_NECP_CLIENTUUID: {
+ if (SOCK_DOM(so) == PF_MULTIPATH) {
+ /* Handled by MPTCP itself */
+ break;
+ }
+
+ if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) {
+ error = EINVAL;
+ goto out;
+ }
+
+ struct inpcb *inp = sotoinpcb(so);
+ if (!uuid_is_null(inp->necp_client_uuid)) {
+ // Clear out the old client UUID if present
+ necp_inpcb_remove_cb(inp);
+ }
+
+ error = sooptcopyin(sopt, &inp->necp_client_uuid,
+ sizeof(uuid_t), sizeof(uuid_t));
+ if (error != 0) {
+ goto out;
+ }
+
+ if (uuid_is_null(inp->necp_client_uuid)) {
+ error = EINVAL;
+ goto out;
+ }
+
+ pid_t current_pid = proc_pid(current_proc());
+ error = necp_client_register_socket_flow(current_pid,
+ inp->necp_client_uuid, inp);
+ if (error != 0) {
+ uuid_clear(inp->necp_client_uuid);
+ goto out;
+ }
+
+ if (inp->inp_lport != 0) {
+ // There is a bound local port, so this is not
+ // a fresh socket. Assign to the client.
+ necp_client_assign_from_socket(current_pid, inp->necp_client_uuid, inp);
+ }
+
+ break;
+ }
+ case SO_NECP_LISTENUUID: {
+ if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) {
+ error = EINVAL;
+ goto out;
+ }
+
+ struct inpcb *inp = sotoinpcb(so);
+ if (!uuid_is_null(inp->necp_client_uuid)) {
+ error = EINVAL;
+ goto out;
+ }
+
+ error = sooptcopyin(sopt, &inp->necp_client_uuid,
+ sizeof(uuid_t), sizeof(uuid_t));
+ if (error != 0) {
+ goto out;
+ }
+
+ if (uuid_is_null(inp->necp_client_uuid)) {
+ error = EINVAL;
+ goto out;
+ }
+
+ error = necp_client_register_socket_listener(proc_pid(current_proc()),
+ inp->necp_client_uuid, inp);
+ if (error != 0) {
+ uuid_clear(inp->necp_client_uuid);
+ goto out;
+ }
+
+ // Mark that the port registration is held by NECP
+ inp->inp_flags2 |= INP2_EXTERNAL_PORT;
+
+ break;
+ }
+#endif /* NECP */
+
+ case SO_EXTENDED_BK_IDLE:
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error == 0) {
+ error = so_set_extended_bk_idle(so, optval);
+ }
+ break;
+
+ case SO_MARK_CELLFALLBACK:
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0) {
+ goto out;
+ }
+ if (optval < 0) {
+ error = EINVAL;
+ goto out;
+ }
+ if (optval == 0) {
+ so->so_flags1 &= ~SOF1_CELLFALLBACK;
+ } else {
+ so->so_flags1 |= SOF1_CELLFALLBACK;
+ }
+ break;
+
+ case SO_STATISTICS_EVENT:
+ error = sooptcopyin(sopt, &long_optval,
+ sizeof(long_optval), sizeof(long_optval));
+ if (error != 0) {
+ goto out;
+ }
+ u_int64_t nstat_event = 0;
+ error = so_statistics_event_to_nstat_event(
+ &long_optval, &nstat_event);
+ if (error != 0) {
+ goto out;
+ }
+ nstat_pcb_event(sotoinpcb(so), nstat_event);
+ break;
+
+ case SO_NET_SERVICE_TYPE: {
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0) {
+ goto out;
+ }
+ error = so_set_net_service_type(so, optval);
+ break;
+ }
+
+ case SO_QOSMARKING_POLICY_OVERRIDE:
+ error = priv_check_cred(kauth_cred_get(),
+ PRIV_NET_QOSMARKING_POLICY_OVERRIDE, 0);
+ if (error != 0) {
+ goto out;
+ }
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0) {
+ goto out;
+ }
+ if (optval == 0) {
+ so->so_flags1 &= ~SOF1_QOSMARKING_POLICY_OVERRIDE;
+ } else {
+ so->so_flags1 |= SOF1_QOSMARKING_POLICY_OVERRIDE;
+ }
+ break;
+
+ case SO_MPKL_SEND_INFO: {
+ struct so_mpkl_send_info so_mpkl_send_info;
+
+ error = sooptcopyin(sopt, &so_mpkl_send_info,
+ sizeof(struct so_mpkl_send_info), sizeof(struct so_mpkl_send_info));
+ if (error != 0) {
+ goto out;
+ }
+ uuid_copy(so->so_mpkl_send_uuid, so_mpkl_send_info.mpkl_uuid);
+ so->so_mpkl_send_proto = so_mpkl_send_info.mpkl_proto;
+
+ if (uuid_is_null(so->so_mpkl_send_uuid) && so->so_mpkl_send_proto == 0) {
+ so->so_flags1 &= ~SOF1_MPKL_SEND_INFO;
+ } else {
+ so->so_flags1 |= SOF1_MPKL_SEND_INFO;
+ }
+ break;
+ }
+ case SO_WANT_KEV_SOCKET_CLOSED: {
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error != 0) {
+ goto out;
+ }
+ if (optval == 0) {
+ so->so_flags1 &= ~SOF1_WANT_KEV_SOCK_CLOSED;
+ } else {
+ so->so_flags1 |= SOF1_WANT_KEV_SOCK_CLOSED;
+ }
+ break;
+ }
+ default:
+ error = ENOPROTOOPT;
+ break;
+ }
+ if (error == 0 && so->so_proto != NULL &&
+ so->so_proto->pr_ctloutput != NULL) {
+ (void) so->so_proto->pr_ctloutput(so, sopt);
+ }
+ }
+out:
+ if (dolock) {
+ socket_unlock(so, 1);
+ }
+ return error;
+}
+
+/* Helper routines for getsockopt */
+int
+sooptcopyout(struct sockopt *sopt, void *buf, size_t len)
+{
+ int error;
+ size_t valsize;
+
+ error = 0;
+
+ /*
+ * Documented get behavior is that we always return a value,
+ * possibly truncated to fit in the user's buffer.
+ * Traditional behavior is that we always tell the user
+ * precisely how much we copied, rather than something useful
+ * like the total amount we had available for her.
+ * Note that this interface is not idempotent; the entire answer must
+ * generated ahead of time.
+ */
+ valsize = min(len, sopt->sopt_valsize);
+ sopt->sopt_valsize = valsize;
+ if (sopt->sopt_val != USER_ADDR_NULL) {
+ if (sopt->sopt_p != kernproc) {
+ error = copyout(buf, sopt->sopt_val, valsize);
+ } else {
+ bcopy(buf, CAST_DOWN(caddr_t, sopt->sopt_val), valsize);
+ }
+ }
+ return error;
+}
+
+static int
+sooptcopyout_timeval(struct sockopt *sopt, const struct timeval *tv_p)
+{
+ int error;
+ size_t len;
+ struct user64_timeval tv64 = {};
+ struct user32_timeval tv32 = {};
+ const void * val;
+ size_t valsize;
+
+ error = 0;
+ if (proc_is64bit(sopt->sopt_p)) {
+ len = sizeof(tv64);
+ tv64.tv_sec = tv_p->tv_sec;
+ tv64.tv_usec = tv_p->tv_usec;
+ val = &tv64;
+ } else {
+ len = sizeof(tv32);
+ tv32.tv_sec = tv_p->tv_sec;
+ tv32.tv_usec = tv_p->tv_usec;
+ val = &tv32;
+ }
+ valsize = min(len, sopt->sopt_valsize);
+ sopt->sopt_valsize = valsize;
+ if (sopt->sopt_val != USER_ADDR_NULL) {
+ if (sopt->sopt_p != kernproc) {
+ error = copyout(val, sopt->sopt_val, valsize);
+ } else {
+ bcopy(val, CAST_DOWN(caddr_t, sopt->sopt_val), valsize);
+ }
+ }
+ return error;
+}
+
+/*
+ * Return: 0 Success
+ * ENOPROTOOPT
+ * <pr_ctloutput>:EOPNOTSUPP[AF_UNIX]
+ * <pr_ctloutput>:???
+ * <sf_getoption>:???
+ */
+int
+sogetoptlock(struct socket *so, struct sockopt *sopt, int dolock)
+{
+ int error, optval;
+ struct linger l;
+ struct timeval tv;
+
+ if (sopt->sopt_dir != SOPT_GET) {
+ sopt->sopt_dir = SOPT_GET;
+ }
+
+ if (dolock) {
+ socket_lock(so, 1);
+ }
+
+ error = sflt_getsockopt(so, sopt);
+ if (error != 0) {
+ if (error == EJUSTRETURN) {
+ error = 0;
+ }
+ goto out;
+ }
+
+ if (sopt->sopt_level != SOL_SOCKET) {
+ if (so->so_proto != NULL &&
+ so->so_proto->pr_ctloutput != NULL) {
+ error = (*so->so_proto->pr_ctloutput)(so, sopt);
+ goto out;
+ }
+ error = ENOPROTOOPT;
+ } else {
+ /*
+ * Allow socket-level (SOL_SOCKET) options to be filtered by
+ * the protocol layer, if needed. A zero value returned from
+ * the handler means use default socket-level processing as
+ * done by the rest of this routine. Otherwise, any other
+ * return value indicates that the option is unsupported.
+ */
+ if (so->so_proto != NULL && (error = so->so_proto->pr_usrreqs->
+ pru_socheckopt(so, sopt)) != 0) {
+ goto out;
+ }
+
+ error = 0;
+ switch (sopt->sopt_name) {
+ case SO_LINGER:
+ case SO_LINGER_SEC:
+ l.l_onoff = ((so->so_options & SO_LINGER) ? 1 : 0);
+ l.l_linger = (sopt->sopt_name == SO_LINGER) ?
+ so->so_linger : so->so_linger / hz;
+ error = sooptcopyout(sopt, &l, sizeof(l));
+ break;
+
+ case SO_USELOOPBACK:
+ case SO_DONTROUTE:
+ case SO_DEBUG:
+ case SO_KEEPALIVE:
+ case SO_REUSEADDR:
+ case SO_REUSEPORT:
+ case SO_BROADCAST:
+ case SO_OOBINLINE:
+ case SO_TIMESTAMP:
+ case SO_TIMESTAMP_MONOTONIC:
+ case SO_TIMESTAMP_CONTINUOUS:
+ case SO_DONTTRUNC:
+ case SO_WANTMORE:
+ case SO_WANTOOBFLAG:
+ case SO_NOWAKEFROMSLEEP:
+ case SO_NOAPNFALLBK:
+ optval = so->so_options & sopt->sopt_name;
+integer:
+ error = sooptcopyout(sopt, &optval, sizeof(optval));
+ break;
+
+ case SO_TYPE:
+ optval = so->so_type;
+ goto integer;
+
+ case SO_NREAD:
+ if (so->so_proto->pr_flags & PR_ATOMIC) {
+ int pkt_total;
+ struct mbuf *m1;
+
+ pkt_total = 0;
+ m1 = so->so_rcv.sb_mb;
+ while (m1 != NULL) {
+ if (m1->m_type == MT_DATA ||
+ m1->m_type == MT_HEADER ||
+ m1->m_type == MT_OOBDATA) {
+ pkt_total += m1->m_len;
+ }
+ m1 = m1->m_next;
+ }
+ optval = pkt_total;
+ } else {
+ optval = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
+ }
+ goto integer;
+
+ case SO_NUMRCVPKT:
+ if (so->so_proto->pr_flags & PR_ATOMIC) {
+ int cnt = 0;
+ struct mbuf *m1;
+
+ m1 = so->so_rcv.sb_mb;
+ while (m1 != NULL) {
+ cnt += 1;
+ m1 = m1->m_nextpkt;
+ }
+ optval = cnt;
+ goto integer;
+ } else {
+ error = ENOPROTOOPT;
+ break;
+ }
+
+ case SO_NWRITE:
+ optval = so->so_snd.sb_cc;
+ goto integer;
+
+ case SO_ERROR:
+ optval = so->so_error;
+ so->so_error = 0;
+ goto integer;
+
+ case SO_SNDBUF: {
+ u_int32_t hiwat = so->so_snd.sb_hiwat;
+
+ if (so->so_snd.sb_flags & SB_UNIX) {
+ struct unpcb *unp =
+ (struct unpcb *)(so->so_pcb);
+ if (unp != NULL && unp->unp_conn != NULL) {
+ hiwat += unp->unp_conn->unp_cc;
+ }
+ }
+
+ optval = hiwat;
+ goto integer;
+ }
+ case SO_RCVBUF:
+ optval = so->so_rcv.sb_hiwat;
+ goto integer;
+
+ case SO_SNDLOWAT:
+ optval = so->so_snd.sb_lowat;
+ goto integer;
+
+ case SO_RCVLOWAT:
+ optval = so->so_rcv.sb_lowat;
+ goto integer;
+
+ case SO_SNDTIMEO:
+ case SO_RCVTIMEO:
+ tv = (sopt->sopt_name == SO_SNDTIMEO ?
+ so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
+
+ error = sooptcopyout_timeval(sopt, &tv);
+ break;
+
+ case SO_NOSIGPIPE:
+ optval = (so->so_flags & SOF_NOSIGPIPE);
+ goto integer;
+
+ case SO_NOADDRERR:
+ optval = (so->so_flags & SOF_NOADDRAVAIL);
+ goto integer;
+
+ case SO_REUSESHAREUID:
+ optval = (so->so_flags & SOF_REUSESHAREUID);
+ goto integer;
+
+
+ case SO_NOTIFYCONFLICT:
+ optval = (so->so_flags & SOF_NOTIFYCONFLICT);
+ goto integer;
+
+ case SO_RESTRICTIONS:
+ optval = so_get_restrictions(so);
+ goto integer;
+
+ case SO_AWDL_UNRESTRICTED:
+ if (SOCK_DOM(so) == PF_INET ||
+ SOCK_DOM(so) == PF_INET6) {
+ optval = inp_get_awdl_unrestricted(
+ sotoinpcb(so));
+ goto integer;
+ } else {
+ error = EOPNOTSUPP;
+ }
+ break;
+
+ case SO_INTCOPROC_ALLOW:
+ if (SOCK_DOM(so) == PF_INET6) {
+ optval = inp_get_intcoproc_allowed(
+ sotoinpcb(so));
+ goto integer;
+ } else {
+ error = EOPNOTSUPP;
+ }
+ break;
+
+ case SO_LABEL:
+ error = EOPNOTSUPP;
+ break;
+
+ case SO_PEERLABEL:
+ error = EOPNOTSUPP;
+ break;
+
+#ifdef __APPLE_API_PRIVATE
+ case SO_UPCALLCLOSEWAIT:
+ optval = (so->so_flags & SOF_UPCALLCLOSEWAIT);
+ goto integer;
+#endif
+ case SO_RANDOMPORT:
+ optval = (so->so_flags & SOF_BINDRANDOMPORT);
+ goto integer;
+
+ case SO_NP_EXTENSIONS: {
+ struct so_np_extensions sonpx = {};
+
+ sonpx.npx_flags = (so->so_flags & SOF_NPX_SETOPTSHUT) ?
+ SONPX_SETOPTSHUT : 0;
+ sonpx.npx_mask = SONPX_MASK_VALID;
+
+ error = sooptcopyout(sopt, &sonpx,
+ sizeof(struct so_np_extensions));
+ break;
+ }
+
+ case SO_TRAFFIC_CLASS:
+ optval = so->so_traffic_class;
+ goto integer;
+
+ case SO_RECV_TRAFFIC_CLASS:
+ optval = (so->so_flags & SOF_RECV_TRAFFIC_CLASS);
+ goto integer;
+
+#if (DEVELOPMENT || DEBUG)
+ case SO_TRAFFIC_CLASS_DBG:
+ error = sogetopt_tcdbg(so, sopt);
+ break;
+#endif /* (DEVELOPMENT || DEBUG) */
+
+ case SO_PRIVILEGED_TRAFFIC_CLASS:
+ optval = (so->so_flags & SOF_PRIVILEGED_TRAFFIC_CLASS);
+ goto integer;
+
+ case SO_DEFUNCTOK:
+ optval = !(so->so_flags & SOF_NODEFUNCT);
+ goto integer;
+
+ case SO_ISDEFUNCT:
+ optval = (so->so_flags & SOF_DEFUNCT);
+ goto integer;
+
+ case SO_OPPORTUNISTIC:
+ optval = so_get_opportunistic(so);
+ goto integer;
+
+ case SO_FLUSH:
+ /* This option is not gettable */
+ error = EINVAL;
+ break;
+
+ case SO_RECV_ANYIF:
+ optval = so_get_recv_anyif(so);
+ goto integer;
+
+ case SO_TRAFFIC_MGT_BACKGROUND:
+ /* This option is handled by lower layer(s) */
+ if (so->so_proto != NULL &&
+ so->so_proto->pr_ctloutput != NULL) {
+ (void) so->so_proto->pr_ctloutput(so, sopt);
+ }
+ break;
+
+#if FLOW_DIVERT
+ case SO_FLOW_DIVERT_TOKEN:
+ error = flow_divert_token_get(so, sopt);
+ break;
+#endif /* FLOW_DIVERT */
+
+#if NECP
+ case SO_NECP_ATTRIBUTES:
+ error = necp_get_socket_attributes(so, sopt);
+ break;
+
+ case SO_NECP_CLIENTUUID: {
+ uuid_t *ncu;
+
+ if (SOCK_DOM(so) == PF_MULTIPATH) {
+ ncu = &mpsotomppcb(so)->necp_client_uuid;
+ } else if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) {
+ ncu = &sotoinpcb(so)->necp_client_uuid;
+ } else {
+ error = EINVAL;
+ goto out;
+ }
+
+ error = sooptcopyout(sopt, ncu, sizeof(uuid_t));
+ break;
+ }
+
+ case SO_NECP_LISTENUUID: {
+ uuid_t *nlu;
+
+ if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) {
+ if (sotoinpcb(so)->inp_flags2 & INP2_EXTERNAL_PORT) {
+ nlu = &sotoinpcb(so)->necp_client_uuid;
+ } else {
+ error = ENOENT;
+ goto out;
+ }
+ } else {
+ error = EINVAL;
+ goto out;
+ }
+
+ error = sooptcopyout(sopt, nlu, sizeof(uuid_t));
+ break;
+ }
+#endif /* NECP */
+
+#if CONTENT_FILTER
+ case SO_CFIL_SOCK_ID: {
+ cfil_sock_id_t sock_id;
+
+ sock_id = cfil_sock_id_from_socket(so);
+
+ error = sooptcopyout(sopt, &sock_id,
+ sizeof(cfil_sock_id_t));
+ break;
+ }
+#endif /* CONTENT_FILTER */
+
+ case SO_EXTENDED_BK_IDLE:
+ optval = (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED);
+ goto integer;
+ case SO_MARK_CELLFALLBACK:
+ optval = ((so->so_flags1 & SOF1_CELLFALLBACK) > 0)
+ ? 1 : 0;
+ goto integer;
+ case SO_NET_SERVICE_TYPE: {
+ if ((so->so_flags1 & SOF1_TC_NET_SERV_TYPE)) {
+ optval = so->so_netsvctype;
+ } else {
+ optval = NET_SERVICE_TYPE_BE;
+ }
+ goto integer;
+ }
+ case SO_NETSVC_MARKING_LEVEL:
+ optval = so_get_netsvc_marking_level(so);
+ goto integer;
+
+ case SO_MPKL_SEND_INFO: {
+ struct so_mpkl_send_info so_mpkl_send_info;
+
+ uuid_copy(so_mpkl_send_info.mpkl_uuid, so->so_mpkl_send_uuid);
+ so_mpkl_send_info.mpkl_proto = so->so_mpkl_send_proto;
+ error = sooptcopyout(sopt, &so_mpkl_send_info,
+ sizeof(struct so_mpkl_send_info));
+ break;
+ }
+ default:
+ error = ENOPROTOOPT;
+ break;
+ }
+ }
+out:
+ if (dolock) {
+ socket_unlock(so, 1);
+ }
+ return error;
+}
+
+/*
+ * The size limits on our soopt_getm is different from that on FreeBSD.
+ * We limit the size of options to MCLBYTES. This will have to change
+ * if we need to define options that need more space than MCLBYTES.
+ */
+int
+soopt_getm(struct sockopt *sopt, struct mbuf **mp)
+{
+ struct mbuf *m, *m_prev;
+ int sopt_size = sopt->sopt_valsize;
+ int how;
+
+ if (sopt_size <= 0 || sopt_size > MCLBYTES) {
+ return EMSGSIZE;
+ }
+
+ how = sopt->sopt_p != kernproc ? M_WAIT : M_DONTWAIT;
+ MGET(m, how, MT_DATA);
+ if (m == NULL) {
+ return ENOBUFS;
+ }
+ if (sopt_size > MLEN) {
+ MCLGET(m, how);
+ if ((m->m_flags & M_EXT) == 0) {
+ m_free(m);
+ return ENOBUFS;
+ }
+ m->m_len = min(MCLBYTES, sopt_size);
+ } else {
+ m->m_len = min(MLEN, sopt_size);
+ }
+ sopt_size -= m->m_len;
+ *mp = m;
+ m_prev = m;
+
+ while (sopt_size > 0) {
+ MGET(m, how, MT_DATA);
+ if (m == NULL) {
+ m_freem(*mp);
+ return ENOBUFS;
+ }
+ if (sopt_size > MLEN) {
+ MCLGET(m, how);
+ if ((m->m_flags & M_EXT) == 0) {
+ m_freem(*mp);
+ m_freem(m);
+ return ENOBUFS;
+ }
+ m->m_len = min(MCLBYTES, sopt_size);
+ } else {
+ m->m_len = min(MLEN, sopt_size);
+ }
+ sopt_size -= m->m_len;
+ m_prev->m_next = m;
+ m_prev = m;
+ }
+ return 0;
+}
+
+/* copyin sopt data into mbuf chain */
+int
+soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
+{
+ struct mbuf *m0 = m;
+
+ if (sopt->sopt_val == USER_ADDR_NULL) {
+ return 0;
+ }
+ while (m != NULL && sopt->sopt_valsize >= m->m_len) {
+ if (sopt->sopt_p != kernproc) {
+ int error;
+
+ error = copyin(sopt->sopt_val, mtod(m, char *),
+ m->m_len);
+ if (error != 0) {
+ m_freem(m0);
+ return error;
+ }
+ } else {
+ bcopy(CAST_DOWN(caddr_t, sopt->sopt_val),
+ mtod(m, char *), m->m_len);
+ }
+ sopt->sopt_valsize -= m->m_len;
+ sopt->sopt_val += m->m_len;
+ m = m->m_next;
+ }
+ /* should be allocated enoughly at ip6_sooptmcopyin() */
+ if (m != NULL) {
+ panic("soopt_mcopyin");
+ /* NOTREACHED */
+ }
+ return 0;
+}
+
+/* copyout mbuf chain data into soopt */
+int
+soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
+{
+ struct mbuf *m0 = m;
+ size_t valsize = 0;
+
+ if (sopt->sopt_val == USER_ADDR_NULL) {
+ return 0;
+ }
+ while (m != NULL && sopt->sopt_valsize >= m->m_len) {
+ if (sopt->sopt_p != kernproc) {
+ int error;
+
+ error = copyout(mtod(m, char *), sopt->sopt_val,
+ m->m_len);
+ if (error != 0) {
+ m_freem(m0);
+ return error;
+ }
+ } else {
+ bcopy(mtod(m, char *),
+ CAST_DOWN(caddr_t, sopt->sopt_val), m->m_len);
+ }
+ sopt->sopt_valsize -= m->m_len;
+ sopt->sopt_val += m->m_len;
+ valsize += m->m_len;
+ m = m->m_next;
+ }
+ if (m != NULL) {
+ /* enough soopt buffer should be given from user-land */
+ m_freem(m0);
+ return EINVAL;
+ }
+ sopt->sopt_valsize = valsize;
+ return 0;
+}
+
+void
+sohasoutofband(struct socket *so)
+{
+ if (so->so_pgid < 0) {
+ gsignal(-so->so_pgid, SIGURG);
+ } else if (so->so_pgid > 0) {
+ proc_signal(so->so_pgid, SIGURG);
+ }
+ selwakeup(&so->so_rcv.sb_sel);
+ if (so->so_rcv.sb_flags & SB_KNOTE) {
+ KNOTE(&so->so_rcv.sb_sel.si_note,
+ (NOTE_OOB | SO_FILT_HINT_LOCKED));
+ }
+}