+ return (0);
+}
+
+void
+sohasoutofband(struct socket *so)
+{
+
+ if (so->so_pgid < 0)
+ gsignal(-so->so_pgid, SIGURG);
+ else if (so->so_pgid > 0)
+ proc_signal(so->so_pgid, SIGURG);
+ selwakeup(&so->so_rcv.sb_sel);
+}
+
+int
+sopoll(struct socket *so, int events, __unused kauth_cred_t cred, void * wql)
+{
+ struct proc *p = current_proc();
+ int revents = 0;
+
+ socket_lock(so, 1);
+
+ if (events & (POLLIN | POLLRDNORM))
+ if (soreadable(so))
+ revents |= events & (POLLIN | POLLRDNORM);
+
+ if (events & (POLLOUT | POLLWRNORM))
+ if (sowriteable(so))
+ revents |= events & (POLLOUT | POLLWRNORM);
+
+ if (events & (POLLPRI | POLLRDBAND))
+ if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
+ revents |= events & (POLLPRI | POLLRDBAND);
+
+ if (revents == 0) {
+ if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
+ /*
+ * Darwin sets the flag first,
+ * BSD calls selrecord first
+ */
+ so->so_rcv.sb_flags |= SB_SEL;
+ selrecord(p, &so->so_rcv.sb_sel, wql);
+ }
+
+ if (events & (POLLOUT | POLLWRNORM)) {
+ /*
+ * Darwin sets the flag first,
+ * BSD calls selrecord first
+ */
+ so->so_snd.sb_flags |= SB_SEL;
+ selrecord(p, &so->so_snd.sb_sel, wql);
+ }
+ }
+
+ socket_unlock(so, 1);
+ return (revents);
+}
+
+int
+soo_kqfilter(__unused struct fileproc *fp, struct knote *kn,
+ __unused struct proc *p)
+{
+ struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data;
+ struct sockbuf *sb;
+
+ socket_lock(so, 1);
+
+#if CONFIG_MACF_SOCKET
+ if (mac_socket_check_kqfilter(proc_ucred(p), kn, so) != 0) {
+ socket_unlock(so, 1);
+ return (1);
+ }
+#endif /* MAC_SOCKET */
+
+ switch (kn->kn_filter) {
+ case EVFILT_READ:
+ kn->kn_fop = &soread_filtops;
+ sb = &so->so_rcv;
+ break;
+ case EVFILT_WRITE:
+ kn->kn_fop = &sowrite_filtops;
+ sb = &so->so_snd;
+ break;
+ default:
+ socket_unlock(so, 1);
+ return (1);
+ }
+
+ if (KNOTE_ATTACH(&sb->sb_sel.si_note, kn))
+ sb->sb_flags |= SB_KNOTE;
+ socket_unlock(so, 1);
+ return (0);
+}
+
+static void
+filt_sordetach(struct knote *kn)
+{
+ struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data;
+
+ socket_lock(so, 1);
+ if (so->so_rcv.sb_flags & SB_KNOTE)
+ if (KNOTE_DETACH(&so->so_rcv.sb_sel.si_note, kn))
+ so->so_rcv.sb_flags &= ~SB_KNOTE;
+ socket_unlock(so, 1);
+}
+
+/*ARGSUSED*/
+static int
+filt_soread(struct knote *kn, long hint)
+{
+ struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data;
+
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_lock(so, 1);
+
+ if (so->so_options & SO_ACCEPTCONN) {
+ int isempty;
+
+ /* Radar 6615193 handle the listen case dynamically
+ * for kqueue read filter. This allows to call listen() after registering
+ * the kqueue EVFILT_READ.
+ */
+
+ kn->kn_data = so->so_qlen;
+ isempty = ! TAILQ_EMPTY(&so->so_comp);
+
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+
+ return (isempty);
+ }
+
+ /* socket isn't a listener */
+
+ kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
+
+ if (so->so_oobmark) {
+ if (kn->kn_flags & EV_OOBAND) {
+ kn->kn_data -= so->so_oobmark;
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+ return (1);
+ }
+ kn->kn_data = so->so_oobmark;
+ kn->kn_flags |= EV_OOBAND;
+ } else {
+ if (so->so_state & SS_CANTRCVMORE) {
+ kn->kn_flags |= EV_EOF;
+ kn->kn_fflags = so->so_error;
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+ return (1);
+ }
+ }
+
+ if (so->so_state & SS_RCVATMARK) {
+ if (kn->kn_flags & EV_OOBAND) {
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+ return (1);
+ }
+ kn->kn_flags |= EV_OOBAND;
+ } else if (kn->kn_flags & EV_OOBAND) {
+ kn->kn_data = 0;
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+ return (0);
+ }
+
+ if (so->so_error) { /* temporary udp error */
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+ return (1);
+ }
+
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+
+ return ((kn->kn_flags & EV_OOBAND) ||
+ kn->kn_data >= ((kn->kn_sfflags & NOTE_LOWAT) ?
+ kn->kn_sdata : so->so_rcv.sb_lowat));
+}
+
+static void
+filt_sowdetach(struct knote *kn)
+{
+ struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data;
+ socket_lock(so, 1);
+
+ if (so->so_snd.sb_flags & SB_KNOTE)
+ if (KNOTE_DETACH(&so->so_snd.sb_sel.si_note, kn))
+ so->so_snd.sb_flags &= ~SB_KNOTE;
+ socket_unlock(so, 1);
+}
+
+/*ARGSUSED*/
+static int
+filt_sowrite(struct knote *kn, long hint)
+{
+ struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data;
+
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_lock(so, 1);
+
+ kn->kn_data = sbspace(&so->so_snd);
+ if (so->so_state & SS_CANTSENDMORE) {
+ kn->kn_flags |= EV_EOF;
+ kn->kn_fflags = so->so_error;
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+ return (1);
+ }
+ if (so->so_error) { /* temporary udp error */
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+ return (1);
+ }
+ if (((so->so_state & SS_ISCONNECTED) == 0) &&
+ (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+ return (0);
+ }
+ if ((hint & SO_FILT_HINT_LOCKED) == 0)
+ socket_unlock(so, 1);
+ if (kn->kn_sfflags & NOTE_LOWAT)
+ return (kn->kn_data >= kn->kn_sdata);
+ return (kn->kn_data >= so->so_snd.sb_lowat);
+}
+
+#define SO_LOCK_HISTORY_STR_LEN (2 * SO_LCKDBG_MAX * (2 + sizeof(void *) + 1) + 1)
+
+__private_extern__ const char * solockhistory_nr(struct socket *so)
+{
+ size_t n = 0;
+ int i;
+ static char lock_history_str[SO_LOCK_HISTORY_STR_LEN];
+
+ for (i = SO_LCKDBG_MAX - 1; i >= 0; i--) {
+ n += snprintf(lock_history_str + n, SO_LOCK_HISTORY_STR_LEN - n, "%lx:%lx ",
+ (uintptr_t) so->lock_lr[(so->next_lock_lr + i) % SO_LCKDBG_MAX],
+ (uintptr_t) so->unlock_lr[(so->next_unlock_lr + i) % SO_LCKDBG_MAX]);
+ }
+ return lock_history_str;
+}
+
+int
+socket_lock(struct socket *so, int refcount)
+{
+ int error = 0;
+ void *lr_saved;
+
+ lr_saved = __builtin_return_address(0);
+
+ if (so->so_proto->pr_lock) {
+ error = (*so->so_proto->pr_lock)(so, refcount, lr_saved);
+ } else {
+#ifdef MORE_LOCKING_DEBUG
+ lck_mtx_assert(so->so_proto->pr_domain->dom_mtx,
+ LCK_MTX_ASSERT_NOTOWNED);
+#endif
+ lck_mtx_lock(so->so_proto->pr_domain->dom_mtx);
+ if (refcount)
+ so->so_usecount++;
+ so->lock_lr[so->next_lock_lr] = lr_saved;
+ so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
+ }
+
+ return (error);
+}
+
+int
+socket_unlock(struct socket *so, int refcount)
+{
+ int error = 0;
+ void *lr_saved;
+ lck_mtx_t *mutex_held;
+
+ lr_saved = __builtin_return_address(0);
+
+ if (so->so_proto == NULL)
+ panic("socket_unlock null so_proto so=%p\n", so);
+
+ if (so && so->so_proto->pr_unlock) {
+ error = (*so->so_proto->pr_unlock)(so, refcount, lr_saved);
+ } else {
+ mutex_held = so->so_proto->pr_domain->dom_mtx;
+#ifdef MORE_LOCKING_DEBUG
+ lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
+#endif
+ so->unlock_lr[so->next_unlock_lr] = lr_saved;
+ so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
+
+ if (refcount) {
+ if (so->so_usecount <= 0)
+ panic("socket_unlock: bad refcount=%d so=%p (%d, %d, %d) lrh=%s",
+ so->so_usecount, so, so->so_proto->pr_domain->dom_family,
+ so->so_type, so->so_proto->pr_protocol,
+ solockhistory_nr(so));
+
+ so->so_usecount--;
+ if (so->so_usecount == 0) {
+ sofreelastref(so, 1);
+ }
+ }
+ lck_mtx_unlock(mutex_held);
+ }
+
+ return (error);