+#define SO_LOCK_HISTORY_STR_LEN (2 * SO_LCKDBG_MAX * (2 + (2 * sizeof(void *)) + 1) + 1)
+
+__private_extern__ const char * solockhistory_nr(struct socket *so)
+{
+ size_t n = 0;
+ int i;
+ static char lock_history_str[SO_LOCK_HISTORY_STR_LEN];
+
+ bzero(lock_history_str, sizeof(lock_history_str));
+ for (i = SO_LCKDBG_MAX - 1; i >= 0; i--) {
+ n += snprintf(lock_history_str + n, SO_LOCK_HISTORY_STR_LEN - n, "%lx:%lx ",
+ (uintptr_t) so->lock_lr[(so->next_lock_lr + i) % SO_LCKDBG_MAX],
+ (uintptr_t) so->unlock_lr[(so->next_unlock_lr + i) % SO_LCKDBG_MAX]);
+ }
+ return lock_history_str;
+}
+
+int
+socket_lock(struct socket *so, int refcount)
+{
+ int error = 0;
+ void *lr_saved;
+
+ lr_saved = __builtin_return_address(0);
+
+ if (so->so_proto->pr_lock) {
+ error = (*so->so_proto->pr_lock)(so, refcount, lr_saved);
+ } else {
+#ifdef MORE_LOCKING_DEBUG
+ lck_mtx_assert(so->so_proto->pr_domain->dom_mtx,
+ LCK_MTX_ASSERT_NOTOWNED);
+#endif
+ lck_mtx_lock(so->so_proto->pr_domain->dom_mtx);
+ if (refcount)
+ so->so_usecount++;
+ so->lock_lr[so->next_lock_lr] = lr_saved;
+ so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
+ }
+
+ return (error);
+}
+
+int
+socket_unlock(struct socket *so, int refcount)
+{
+ int error = 0;
+ void *lr_saved;
+ lck_mtx_t *mutex_held;
+
+ lr_saved = __builtin_return_address(0);
+
+ if (so->so_proto == NULL)
+ panic("socket_unlock null so_proto so=%p\n", so);
+
+ if (so && so->so_proto->pr_unlock) {
+ error = (*so->so_proto->pr_unlock)(so, refcount, lr_saved);
+ } else {
+ mutex_held = so->so_proto->pr_domain->dom_mtx;
+#ifdef MORE_LOCKING_DEBUG
+ lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
+#endif
+ so->unlock_lr[so->next_unlock_lr] = lr_saved;
+ so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
+
+ if (refcount) {
+ if (so->so_usecount <= 0)
+ panic("socket_unlock: bad refcount=%d so=%p (%d, %d, %d) lrh=%s",
+ so->so_usecount, so, so->so_proto->pr_domain->dom_family,
+ so->so_type, so->so_proto->pr_protocol,
+ solockhistory_nr(so));
+
+ so->so_usecount--;
+ if (so->so_usecount == 0) {
+ sofreelastref(so, 1);
+ }
+ }
+ lck_mtx_unlock(mutex_held);
+ }
+
+ return (error);
+}
+
+/* Called with socket locked, will unlock socket */
+void
+sofree(struct socket *so)
+{
+
+ lck_mtx_t *mutex_held;
+ if (so->so_proto->pr_getlock != NULL)
+ mutex_held = (*so->so_proto->pr_getlock)(so, 0);
+ else
+ mutex_held = so->so_proto->pr_domain->dom_mtx;
+ lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
+
+ sofreelastref(so, 0);
+}
+
+void
+soreference(struct socket *so)
+{
+ socket_lock(so, 1); /* locks & take one reference on socket */
+ socket_unlock(so, 0); /* unlock only */
+}
+
+void
+sodereference(struct socket *so)
+{
+ socket_lock(so, 0);
+ socket_unlock(so, 1);
+}
+
+/*
+ * Set or clear SOF_MULTIPAGES on the socket to enable or disable the
+ * possibility of using jumbo clusters. Caller must ensure to hold
+ * the socket lock.
+ */
+void
+somultipages(struct socket *so, boolean_t set)
+{
+ if (set)
+ so->so_flags |= SOF_MULTIPAGES;
+ else
+ so->so_flags &= ~SOF_MULTIPAGES;
+}
+
+int
+so_isdstlocal(struct socket *so) {
+
+ struct inpcb *inp = (struct inpcb *)so->so_pcb;
+
+ if (so->so_proto->pr_domain->dom_family == AF_INET) {
+ return inaddr_local(inp->inp_faddr);
+ } else if (so->so_proto->pr_domain->dom_family == AF_INET6) {
+ return in6addr_local(&inp->in6p_faddr);
+ }
+ return 0;
+}
+
+int
+sosetdefunct(struct proc *p, struct socket *so, int level, boolean_t noforce)