+ case SO_SNDLOWAT:
+ optval = so->so_snd.sb_lowat;
+ goto integer;
+
+ case SO_RCVLOWAT:
+ optval = so->so_rcv.sb_lowat;
+ goto integer;
+
+ case SO_SNDTIMEO:
+ case SO_RCVTIMEO:
+ tv = (sopt->sopt_name == SO_SNDTIMEO ?
+ so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
+
+ error = sooptcopyout_timeval(sopt, &tv);
+ break;
+
+ case SO_NOSIGPIPE:
+ optval = (so->so_flags & SOF_NOSIGPIPE);
+ goto integer;
+
+ case SO_NOADDRERR:
+ optval = (so->so_flags & SOF_NOADDRAVAIL);
+ goto integer;
+
+ case SO_REUSESHAREUID:
+ optval = (so->so_flags & SOF_REUSESHAREUID);
+ goto integer;
+
+
+ case SO_NOTIFYCONFLICT:
+ optval = (so->so_flags & SOF_NOTIFYCONFLICT);
+ goto integer;
+
+ case SO_RESTRICTIONS:
+ optval = so_get_restrictions(so);
+ goto integer;
+
+ case SO_LABEL:
+#if CONFIG_MACF_SOCKET
+ if ((error = sooptcopyin(sopt, &extmac, sizeof (extmac),
+ sizeof (extmac))) != 0 ||
+ (error = mac_socket_label_get(proc_ucred(
+ sopt->sopt_p), so, &extmac)) != 0)
+ break;
+
+ error = sooptcopyout(sopt, &extmac, sizeof (extmac));
+#else
+ error = EOPNOTSUPP;
+#endif /* MAC_SOCKET */
+ break;
+
+ case SO_PEERLABEL:
+#if CONFIG_MACF_SOCKET
+ if ((error = sooptcopyin(sopt, &extmac, sizeof (extmac),
+ sizeof (extmac))) != 0 ||
+ (error = mac_socketpeer_label_get(proc_ucred(
+ sopt->sopt_p), so, &extmac)) != 0)
+ break;
+
+ error = sooptcopyout(sopt, &extmac, sizeof (extmac));
+#else
+ error = EOPNOTSUPP;
+#endif /* MAC_SOCKET */
+ break;
+
+#ifdef __APPLE_API_PRIVATE
+ case SO_UPCALLCLOSEWAIT:
+ optval = (so->so_flags & SOF_UPCALLCLOSEWAIT);
+ goto integer;
+#endif
+ case SO_RANDOMPORT:
+ optval = (so->so_flags & SOF_BINDRANDOMPORT);
+ goto integer;
+
+ case SO_NP_EXTENSIONS: {
+ struct so_np_extensions sonpx;
+
+ sonpx.npx_flags = (so->so_flags & SOF_NPX_SETOPTSHUT) ?
+ SONPX_SETOPTSHUT : 0;
+ sonpx.npx_mask = SONPX_MASK_VALID;
+
+ error = sooptcopyout(sopt, &sonpx,
+ sizeof (struct so_np_extensions));
+ break;
+ }
+
+ case SO_TRAFFIC_CLASS:
+ optval = so->so_traffic_class;
+ goto integer;
+
+ case SO_RECV_TRAFFIC_CLASS:
+ optval = (so->so_flags & SOF_RECV_TRAFFIC_CLASS);
+ goto integer;
+
+ case SO_TRAFFIC_CLASS_STATS:
+ error = sooptcopyout(sopt, &so->so_tc_stats,
+ sizeof (so->so_tc_stats));
+ break;
+
+ case SO_TRAFFIC_CLASS_DBG:
+ error = sogetopt_tcdbg(so, sopt);
+ break;
+
+ case SO_PRIVILEGED_TRAFFIC_CLASS:
+ optval = (so->so_flags & SOF_PRIVILEGED_TRAFFIC_CLASS);
+ goto integer;
+
+ case SO_DEFUNCTOK:
+ optval = !(so->so_flags & SOF_NODEFUNCT);
+ goto integer;
+
+ case SO_ISDEFUNCT:
+ optval = (so->so_flags & SOF_DEFUNCT);
+ goto integer;
+
+ case SO_OPPORTUNISTIC:
+ optval = so_get_opportunistic(so);
+ goto integer;
+
+ case SO_FLUSH:
+ /* This option is not gettable */
+ error = EINVAL;
+ break;
+
+ case SO_RECV_ANYIF:
+ optval = so_get_recv_anyif(so);
+ goto integer;
+
+ case SO_TRAFFIC_MGT_BACKGROUND:
+ /* This option is handled by lower layer(s) */
+ if (so->so_proto != NULL &&
+ so->so_proto->pr_ctloutput != NULL) {
+ (void) so->so_proto->pr_ctloutput(so, sopt);
+ }
+ break;
+
+#if FLOW_DIVERT
+ case SO_FLOW_DIVERT_TOKEN:
+ error = flow_divert_token_get(so, sopt);
+ break;
+#endif /* FLOW_DIVERT */
+
+ default:
+ error = ENOPROTOOPT;
+ break;
+ }
+ }
+out:
+ if (dolock)
+ socket_unlock(so, 1);
+ return (error);
+}
+
+/*
+ * The size limits on our soopt_getm is different from that on FreeBSD.
+ * We limit the size of options to MCLBYTES. This will have to change
+ * if we need to define options that need more space than MCLBYTES.
+ */
+int
+soopt_getm(struct sockopt *sopt, struct mbuf **mp)
+{
+ struct mbuf *m, *m_prev;
+ int sopt_size = sopt->sopt_valsize;
+ int how;
+
+ if (sopt_size <= 0 || sopt_size > MCLBYTES)
+ return (EMSGSIZE);
+
+ how = sopt->sopt_p != kernproc ? M_WAIT : M_DONTWAIT;
+ MGET(m, how, MT_DATA);
+ if (m == NULL)
+ return (ENOBUFS);
+ if (sopt_size > MLEN) {
+ MCLGET(m, how);
+ if ((m->m_flags & M_EXT) == 0) {
+ m_free(m);
+ return (ENOBUFS);
+ }
+ m->m_len = min(MCLBYTES, sopt_size);
+ } else {
+ m->m_len = min(MLEN, sopt_size);
+ }
+ sopt_size -= m->m_len;
+ *mp = m;
+ m_prev = m;
+
+ while (sopt_size > 0) {
+ MGET(m, how, MT_DATA);
+ if (m == NULL) {
+ m_freem(*mp);
+ return (ENOBUFS);
+ }
+ if (sopt_size > MLEN) {
+ MCLGET(m, how);
+ if ((m->m_flags & M_EXT) == 0) {
+ m_freem(*mp);
+ m_freem(m);
+ return (ENOBUFS);
+ }
+ m->m_len = min(MCLBYTES, sopt_size);
+ } else {
+ m->m_len = min(MLEN, sopt_size);
+ }
+ sopt_size -= m->m_len;
+ m_prev->m_next = m;
+ m_prev = m;
+ }
+ return (0);
+}
+
+/* copyin sopt data into mbuf chain */
+int
+soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
+{
+ struct mbuf *m0 = m;
+
+ if (sopt->sopt_val == USER_ADDR_NULL)
+ return (0);
+ while (m != NULL && sopt->sopt_valsize >= m->m_len) {
+ if (sopt->sopt_p != kernproc) {
+ int error;
+
+ error = copyin(sopt->sopt_val, mtod(m, char *),
+ m->m_len);
+ if (error != 0) {
+ m_freem(m0);
+ return (error);
+ }
+ } else {
+ bcopy(CAST_DOWN(caddr_t, sopt->sopt_val),
+ mtod(m, char *), m->m_len);
+ }
+ sopt->sopt_valsize -= m->m_len;
+ sopt->sopt_val += m->m_len;
+ m = m->m_next;
+ }
+ /* should be allocated enoughly at ip6_sooptmcopyin() */
+ if (m != NULL) {
+ panic("soopt_mcopyin");
+ /* NOTREACHED */
+ }
+ return (0);
+}
+
+/* copyout mbuf chain data into soopt */
+int
+soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
+{
+ struct mbuf *m0 = m;
+ size_t valsize = 0;
+
+ if (sopt->sopt_val == USER_ADDR_NULL)
+ return (0);
+ while (m != NULL && sopt->sopt_valsize >= m->m_len) {
+ if (sopt->sopt_p != kernproc) {
+ int error;
+
+ error = copyout(mtod(m, char *), sopt->sopt_val,
+ m->m_len);
+ if (error != 0) {
+ m_freem(m0);
+ return (error);
+ }
+ } else {
+ bcopy(mtod(m, char *),
+ CAST_DOWN(caddr_t, sopt->sopt_val), m->m_len);
+ }
+ sopt->sopt_valsize -= m->m_len;
+ sopt->sopt_val += m->m_len;
+ valsize += m->m_len;
+ m = m->m_next;
+ }
+ if (m != NULL) {
+ /* enough soopt buffer should be given from user-land */
+ m_freem(m0);
+ return (EINVAL);
+ }
+ sopt->sopt_valsize = valsize;
+ return (0);
+}
+
+void
+sohasoutofband(struct socket *so)
+{
+ if (so->so_pgid < 0)
+ gsignal(-so->so_pgid, SIGURG);
+ else if (so->so_pgid > 0)
+ proc_signal(so->so_pgid, SIGURG);
+ selwakeup(&so->so_rcv.sb_sel);
+}
+
+int
+sopoll(struct socket *so, int events, kauth_cred_t cred, void * wql)
+{
+#pragma unused(cred)
+ struct proc *p = current_proc();
+ int revents = 0;
+
+ socket_lock(so, 1);
+ so_update_last_owner_locked(so, PROC_NULL);
+ so_update_policy(so);
+
+ if (events & (POLLIN | POLLRDNORM))
+ if (soreadable(so))
+ revents |= events & (POLLIN | POLLRDNORM);
+
+ if (events & (POLLOUT | POLLWRNORM))
+ if (sowriteable(so))
+ revents |= events & (POLLOUT | POLLWRNORM);
+
+ if (events & (POLLPRI | POLLRDBAND))
+ if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
+ revents |= events & (POLLPRI | POLLRDBAND);
+
+ if (revents == 0) {
+ if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
+ /*
+ * Darwin sets the flag first,
+ * BSD calls selrecord first
+ */
+ so->so_rcv.sb_flags |= SB_SEL;
+ selrecord(p, &so->so_rcv.sb_sel, wql);
+ }
+
+ if (events & (POLLOUT | POLLWRNORM)) {
+ /*
+ * Darwin sets the flag first,
+ * BSD calls selrecord first
+ */
+ so->so_snd.sb_flags |= SB_SEL;
+ selrecord(p, &so->so_snd.sb_sel, wql);
+ }
+ }
+
+ socket_unlock(so, 1);
+ return (revents);
+}
+
+int
+soo_kqfilter(struct fileproc *fp, struct knote *kn, vfs_context_t ctx)
+{
+#pragma unused(fp)
+#if !CONFIG_MACF_SOCKET
+#pragma unused(ctx)
+#endif /* MAC_SOCKET */
+ struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data;
+ struct klist *skl;
+
+ socket_lock(so, 1);
+ so_update_last_owner_locked(so, PROC_NULL);
+ so_update_policy(so);
+
+#if CONFIG_MACF_SOCKET
+ if (mac_socket_check_kqfilter(proc_ucred(vfs_context_proc(ctx)),
+ kn, so) != 0) {
+ socket_unlock(so, 1);
+ return (1);
+ }
+#endif /* MAC_SOCKET */
+
+ switch (kn->kn_filter) {
+ case EVFILT_READ:
+ kn->kn_fop = &soread_filtops;
+ skl = &so->so_rcv.sb_sel.si_note;
+ break;
+ case EVFILT_WRITE:
+ kn->kn_fop = &sowrite_filtops;
+ skl = &so->so_snd.sb_sel.si_note;
+ break;
+ case EVFILT_SOCK:
+ kn->kn_fop = &sock_filtops;
+ skl = &so->so_klist;
+ break;
+ default:
+ socket_unlock(so, 1);
+ return (1);
+ }
+
+ if (KNOTE_ATTACH(skl, kn)) {
+ switch (kn->kn_filter) {
+ case EVFILT_READ:
+ so->so_rcv.sb_flags |= SB_KNOTE;
+ break;
+ case EVFILT_WRITE:
+ so->so_snd.sb_flags |= SB_KNOTE;
+ break;
+ case EVFILT_SOCK:
+ so->so_flags |= SOF_KNOTE;
+ break;
+ default:
+ socket_unlock(so, 1);
+ return (1);
+ }
+ }
+ socket_unlock(so, 1);
+ return (0);
+}
+
+static void
+filt_sordetach(struct knote *kn)
+{
+ struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data;
+
+ socket_lock(so, 1);
+ if (so->so_rcv.sb_flags & SB_KNOTE)
+ if (KNOTE_DETACH(&so->so_rcv.sb_sel.si_note, kn))
+ so->so_rcv.sb_flags &= ~SB_KNOTE;
+ socket_unlock(so, 1);
+}
+
+/*ARGSUSED*/
+static int
+filt_soread(struct knote *kn, long hint)
+{
+ struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data;
+
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_lock(so, 1);
+
+ if (so->so_options & SO_ACCEPTCONN) {
+ int isempty;
+
+ /*
+ * Radar 6615193 handle the listen case dynamically
+ * for kqueue read filter. This allows to call listen()
+ * after registering the kqueue EVFILT_READ.
+ */
+
+ kn->kn_data = so->so_qlen;
+ isempty = ! TAILQ_EMPTY(&so->so_comp);
+
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+
+ return (isempty);
+ }
+
+ /* socket isn't a listener */
+
+ kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
+
+ if (so->so_oobmark) {
+ if (kn->kn_flags & EV_OOBAND) {
+ kn->kn_data -= so->so_oobmark;
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+ return (1);
+ }
+ kn->kn_data = so->so_oobmark;
+ kn->kn_flags |= EV_OOBAND;
+ } else {
+ if (so->so_state & SS_CANTRCVMORE) {
+ kn->kn_flags |= EV_EOF;
+ kn->kn_fflags = so->so_error;
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+ return (1);
+ }
+ }
+
+ if (so->so_state & SS_RCVATMARK) {
+ if (kn->kn_flags & EV_OOBAND) {
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+ return (1);
+ }
+ kn->kn_flags |= EV_OOBAND;
+ } else if (kn->kn_flags & EV_OOBAND) {
+ kn->kn_data = 0;
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+ return (0);
+ }
+
+ if (so->so_error) { /* temporary udp error */
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+ return (1);
+ }
+
+ int64_t lowwat = so->so_rcv.sb_lowat;
+ if (kn->kn_sfflags & NOTE_LOWAT) {
+ if (kn->kn_sdata > so->so_rcv.sb_hiwat)
+ lowwat = so->so_rcv.sb_hiwat;
+ else if (kn->kn_sdata > lowwat)
+ lowwat = kn->kn_sdata;
+ }
+
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+
+ return ((kn->kn_flags & EV_OOBAND) || kn->kn_data >= lowwat);
+}
+
+static void
+filt_sowdetach(struct knote *kn)
+{
+ struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data;
+ socket_lock(so, 1);
+
+ if (so->so_snd.sb_flags & SB_KNOTE)
+ if (KNOTE_DETACH(&so->so_snd.sb_sel.si_note, kn))
+ so->so_snd.sb_flags &= ~SB_KNOTE;
+ socket_unlock(so, 1);
+}
+
+int
+so_wait_for_if_feedback(struct socket *so)
+{
+ if ((SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) &&
+ (so->so_state & SS_ISCONNECTED)) {
+ struct inpcb *inp = sotoinpcb(so);
+ if (INP_WAIT_FOR_IF_FEEDBACK(inp))
+ return (1);
+ }
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+filt_sowrite(struct knote *kn, long hint)
+{
+ struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data;
+ int ret = 0;
+
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_lock(so, 1);
+
+ kn->kn_data = sbspace(&so->so_snd);
+ if (so->so_state & SS_CANTSENDMORE) {
+ kn->kn_flags |= EV_EOF;
+ kn->kn_fflags = so->so_error;
+ ret = 1;
+ goto out;
+ }
+ if (so->so_error) { /* temporary udp error */
+ ret = 1;
+ goto out;
+ }
+ if (((so->so_state & SS_ISCONNECTED) == 0) &&
+ (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
+ ret = 0;
+ goto out;
+ }
+ int64_t lowwat = so->so_snd.sb_lowat;
+ if (kn->kn_sfflags & NOTE_LOWAT) {
+ if (kn->kn_sdata > so->so_snd.sb_hiwat)
+ lowwat = so->so_snd.sb_hiwat;
+ else if (kn->kn_sdata > lowwat)
+ lowwat = kn->kn_sdata;
+ }
+ if (kn->kn_data >= lowwat) {
+ if ((so->so_flags & SOF_NOTSENT_LOWAT) != 0) {
+ ret = tcp_notsent_lowat_check(so);
+ } else {
+ ret = 1;
+ }
+ }
+ if (so_wait_for_if_feedback(so))
+ ret = 0;
+out:
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+ return (ret);
+}
+
+static void
+filt_sockdetach(struct knote *kn)
+{
+ struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data;
+ socket_lock(so, 1);
+
+ if ((so->so_flags & SOF_KNOTE) != 0)
+ if (KNOTE_DETACH(&so->so_klist, kn))
+ so->so_flags &= ~SOF_KNOTE;
+ socket_unlock(so, 1);
+}
+
+static int
+filt_sockev(struct knote *kn, long hint)
+{
+ int ret = 0, locked = 0;
+ struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data;
+ long ev_hint = (hint & SO_FILT_HINT_EV);
+
+ if ((hint & SO_FILT_HINT_LOCKED) == 0) {
+ socket_lock(so, 1);
+ locked = 1;
+ }
+
+ if (ev_hint & SO_FILT_HINT_CONNRESET) {
+ if (kn->kn_sfflags & NOTE_CONNRESET)
+ kn->kn_fflags |= NOTE_CONNRESET;
+ }
+ if (ev_hint & SO_FILT_HINT_TIMEOUT) {
+ if (kn->kn_sfflags & NOTE_TIMEOUT)
+ kn->kn_fflags |= NOTE_TIMEOUT;
+ }
+ if (ev_hint & SO_FILT_HINT_NOSRCADDR) {
+ if (kn->kn_sfflags & NOTE_NOSRCADDR)
+ kn->kn_fflags |= NOTE_NOSRCADDR;
+ }
+ if (ev_hint & SO_FILT_HINT_IFDENIED) {
+ if ((kn->kn_sfflags & NOTE_IFDENIED))
+ kn->kn_fflags |= NOTE_IFDENIED;
+ }
+ if (ev_hint & SO_FILT_HINT_KEEPALIVE) {
+ if (kn->kn_sfflags & NOTE_KEEPALIVE)
+ kn->kn_fflags |= NOTE_KEEPALIVE;
+ }
+ if (ev_hint & SO_FILT_HINT_ADAPTIVE_WTIMO) {
+ if (kn->kn_sfflags & NOTE_ADAPTIVE_WTIMO)
+ kn->kn_fflags |= NOTE_ADAPTIVE_WTIMO;
+ }
+ if (ev_hint & SO_FILT_HINT_ADAPTIVE_RTIMO) {
+ if (kn->kn_sfflags & NOTE_ADAPTIVE_RTIMO)
+ kn->kn_fflags |= NOTE_ADAPTIVE_RTIMO;
+ }
+ if (ev_hint & SO_FILT_HINT_CONNECTED) {
+ if (kn->kn_sfflags & NOTE_CONNECTED)
+ kn->kn_fflags |= NOTE_CONNECTED;
+ }
+ if (ev_hint & SO_FILT_HINT_DISCONNECTED) {
+ if (kn->kn_sfflags & NOTE_DISCONNECTED)
+ kn->kn_fflags |= NOTE_DISCONNECTED;
+ }
+ if (ev_hint & SO_FILT_HINT_CONNINFO_UPDATED) {
+ if (so->so_proto != NULL &&
+ (so->so_proto->pr_flags & PR_EVCONNINFO) &&
+ (kn->kn_sfflags & NOTE_CONNINFO_UPDATED))
+ kn->kn_fflags |= NOTE_CONNINFO_UPDATED;
+ }
+
+ if ((kn->kn_sfflags & NOTE_READCLOSED) &&
+ (so->so_state & SS_CANTRCVMORE))
+ kn->kn_fflags |= NOTE_READCLOSED;
+
+ if ((kn->kn_sfflags & NOTE_WRITECLOSED) &&
+ (so->so_state & SS_CANTSENDMORE))
+ kn->kn_fflags |= NOTE_WRITECLOSED;
+
+ if ((kn->kn_sfflags & NOTE_SUSPEND) &&
+ ((ev_hint & SO_FILT_HINT_SUSPEND) ||
+ (so->so_flags & SOF_SUSPENDED))) {
+ kn->kn_fflags &= ~(NOTE_SUSPEND | NOTE_RESUME);
+ kn->kn_fflags |= NOTE_SUSPEND;
+ }
+
+ if ((kn->kn_sfflags & NOTE_RESUME) &&
+ ((ev_hint & SO_FILT_HINT_RESUME) ||
+ (so->so_flags & SOF_SUSPENDED) == 0)) {
+ kn->kn_fflags &= ~(NOTE_SUSPEND | NOTE_RESUME);
+ kn->kn_fflags |= NOTE_RESUME;
+ }
+
+ if (so->so_error != 0) {
+ ret = 1;
+ kn->kn_data = so->so_error;
+ kn->kn_flags |= EV_EOF;
+ } else {
+ get_sockev_state(so, (u_int32_t *)&(kn->kn_data));
+ }
+
+ if (kn->kn_fflags != 0)
+ ret = 1;
+
+ if (locked)
+ socket_unlock(so, 1);
+
+ return (ret);
+}
+
+void
+get_sockev_state(struct socket *so, u_int32_t *statep)
+{
+ u_int32_t state = *(statep);
+
+ if (so->so_state & SS_ISCONNECTED)
+ state |= SOCKEV_CONNECTED;
+ else
+ state &= ~(SOCKEV_CONNECTED);
+ state |= ((so->so_state & SS_ISDISCONNECTED) ? SOCKEV_DISCONNECTED : 0);
+ *(statep) = state;
+}
+
+#define SO_LOCK_HISTORY_STR_LEN \
+ (2 * SO_LCKDBG_MAX * (2 + (2 * sizeof (void *)) + 1) + 1)
+
+__private_extern__ const char *
+solockhistory_nr(struct socket *so)
+{
+ size_t n = 0;
+ int i;
+ static char lock_history_str[SO_LOCK_HISTORY_STR_LEN];
+
+ bzero(lock_history_str, sizeof (lock_history_str));
+ for (i = SO_LCKDBG_MAX - 1; i >= 0; i--) {
+ n += snprintf(lock_history_str + n,
+ SO_LOCK_HISTORY_STR_LEN - n, "%p:%p ",
+ so->lock_lr[(so->next_lock_lr + i) % SO_LCKDBG_MAX],
+ so->unlock_lr[(so->next_unlock_lr + i) % SO_LCKDBG_MAX]);
+ }
+ return (lock_history_str);
+}
+
+int
+socket_lock(struct socket *so, int refcount)
+{
+ int error = 0;
+ void *lr_saved;
+
+ lr_saved = __builtin_return_address(0);
+
+ if (so->so_proto->pr_lock) {
+ error = (*so->so_proto->pr_lock)(so, refcount, lr_saved);
+ } else {
+#ifdef MORE_LOCKING_DEBUG
+ lck_mtx_assert(so->so_proto->pr_domain->dom_mtx,
+ LCK_MTX_ASSERT_NOTOWNED);
+#endif
+ lck_mtx_lock(so->so_proto->pr_domain->dom_mtx);
+ if (refcount)
+ so->so_usecount++;
+ so->lock_lr[so->next_lock_lr] = lr_saved;
+ so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
+ }
+
+ return (error);
+}
+
+int
+socket_unlock(struct socket *so, int refcount)
+{
+ int error = 0;
+ void *lr_saved;
+ lck_mtx_t *mutex_held;