+__private_extern__ int kevt_getstat SYSCTL_HANDLER_ARGS;
+__private_extern__ int kevt_pcblist SYSCTL_HANDLER_ARGS;
+
+SYSCTL_NODE(_net_systm, OID_AUTO, kevt,
+ CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Kernel event family");
+
+struct kevtstat kevtstat;
+SYSCTL_PROC(_net_systm_kevt, OID_AUTO, stats,
+ CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
+ kevt_getstat, "S,kevtstat", "");
+
+SYSCTL_PROC(_net_systm_kevt, OID_AUTO, pcblist,
+ CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
+ kevt_pcblist, "S,xkevtpcb", "");
+
+static lck_mtx_t *
+event_getlock(struct socket *so, int locktype)
+{
+#pragma unused(locktype)
+ struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
+
+ if (so->so_pcb != NULL) {
+ if (so->so_usecount < 0)
+ panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
+ so, so->so_usecount, solockhistory_nr(so));
+ /* NOTREACHED */
+ } else {
+ panic("%s: so=%p NULL NO so_pcb %s\n", __func__,
+ so, solockhistory_nr(so));
+ /* NOTREACHED */
+ }
+ return (&ev_pcb->evp_mtx);
+}
+
+static int
+event_lock(struct socket *so, int refcount, void *lr)
+{
+ void *lr_saved;
+
+ if (lr == NULL)
+ lr_saved = __builtin_return_address(0);
+ else
+ lr_saved = lr;
+
+ if (so->so_pcb != NULL) {
+ lck_mtx_lock(&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
+ } else {
+ panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
+ so, lr_saved, solockhistory_nr(so));
+ /* NOTREACHED */
+ }
+
+ if (so->so_usecount < 0) {
+ panic("%s: so=%p so_pcb=%p lr=%p ref=%d lrh= %s\n", __func__,
+ so, so->so_pcb, lr_saved, so->so_usecount,
+ solockhistory_nr(so));
+ /* NOTREACHED */
+ }
+
+ if (refcount)
+ so->so_usecount++;
+
+ so->lock_lr[so->next_lock_lr] = lr_saved;
+ so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
+ return (0);
+}
+
+static int
+event_unlock(struct socket *so, int refcount, void *lr)
+{
+ void *lr_saved;
+ lck_mtx_t *mutex_held;
+
+ if (lr == NULL)
+ lr_saved = __builtin_return_address(0);
+ else
+ lr_saved = lr;
+
+ if (refcount)
+ so->so_usecount--;
+
+ if (so->so_usecount < 0) {
+ panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
+ so, so->so_usecount, solockhistory_nr(so));
+ /* NOTREACHED */
+ }
+ if (so->so_pcb == NULL) {
+ panic("%s: so=%p NO PCB usecount=%d lr=%p lrh= %s\n", __func__,
+ so, so->so_usecount, (void *)lr_saved,
+ solockhistory_nr(so));
+ /* NOTREACHED */
+ }
+ mutex_held = (&((struct kern_event_pcb *)so->so_pcb)->evp_mtx);
+
+ lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
+ so->unlock_lr[so->next_unlock_lr] = lr_saved;
+ so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
+
+ if (so->so_usecount == 0) {
+ VERIFY(so->so_flags & SOF_PCBCLEARING);
+ event_sofreelastref(so);
+ } else {
+ lck_mtx_unlock(mutex_held);
+ }
+
+ return (0);
+}
+
+static int
+event_sofreelastref(struct socket *so)
+{
+ struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb;
+
+ lck_mtx_assert(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_OWNED);
+
+ so->so_pcb = NULL;
+
+ /*
+ * Disable upcall in the event another thread is in kev_post_msg()
+ * appending record to the receive socket buffer, since sbwakeup()
+ * may release the socket lock otherwise.
+ */
+ so->so_rcv.sb_flags &= ~SB_UPCALL;
+ so->so_snd.sb_flags &= ~SB_UPCALL;
+ so->so_event = sonullevent;
+ lck_mtx_unlock(&(ev_pcb->evp_mtx));
+
+ lck_mtx_assert(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_NOTOWNED);
+ lck_rw_lock_exclusive(kev_rwlock);
+ LIST_REMOVE(ev_pcb, evp_link);
+ kevtstat.kes_pcbcount--;
+ kevtstat.kes_gencnt++;
+ lck_rw_done(kev_rwlock);
+ kev_delete(ev_pcb);
+
+ sofreelastref(so, 1);
+ return (0);
+}
+
+static int event_proto_count = (sizeof (eventsw) / sizeof (struct protosw));
+