+/*
+ * We don't want another read/write lock for every vnode in the system
+ * so we keep a hash of them here. There should never be very many of
+ * these around at any point in time.
+ */
+cl_direct_read_lock_t *cluster_lock_direct_read(vnode_t vp, lck_rw_type_t type)
+{
+ struct cl_direct_read_locks *head
+ = &cl_direct_read_locks[(uintptr_t)vp / sizeof(*vp)
+ % CL_DIRECT_READ_LOCK_BUCKETS];
+
+ struct cl_direct_read_lock *lck, *new_lck = NULL;
+
+ for (;;) {
+ lck_spin_lock(&cl_direct_read_spin_lock);
+
+ LIST_FOREACH(lck, head, chain) {
+ if (lck->vp == vp) {
+ ++lck->ref_count;
+ lck_spin_unlock(&cl_direct_read_spin_lock);
+ if (new_lck) {
+ // Someone beat us to it, ditch the allocation
+ lck_rw_destroy(&new_lck->rw_lock, cl_mtx_grp);
+ FREE(new_lck, M_TEMP);
+ }
+ lck_rw_lock(&lck->rw_lock, type);
+ return lck;
+ }
+ }
+
+ if (new_lck) {
+ // Use the lock we allocated
+ LIST_INSERT_HEAD(head, new_lck, chain);
+ lck_spin_unlock(&cl_direct_read_spin_lock);
+ lck_rw_lock(&new_lck->rw_lock, type);
+ return new_lck;
+ }
+
+ lck_spin_unlock(&cl_direct_read_spin_lock);
+
+ // Allocate a new lock
+ MALLOC(new_lck, cl_direct_read_lock_t *, sizeof(*new_lck),
+ M_TEMP, M_WAITOK);
+ lck_rw_init(&new_lck->rw_lock, cl_mtx_grp, cl_mtx_attr);
+ new_lck->vp = vp;
+ new_lck->ref_count = 1;
+
+ // Got to go round again
+ }
+}
+
+void cluster_unlock_direct_read(cl_direct_read_lock_t *lck)
+{
+ lck_rw_done(&lck->rw_lock);
+
+ lck_spin_lock(&cl_direct_read_spin_lock);
+ if (lck->ref_count == 1) {
+ LIST_REMOVE(lck, chain);
+ lck_spin_unlock(&cl_direct_read_spin_lock);
+ lck_rw_destroy(&lck->rw_lock, cl_mtx_grp);
+ FREE(lck, M_TEMP);
+ } else {
+ --lck->ref_count;
+ lck_spin_unlock(&cl_direct_read_spin_lock);
+ }
+}