+ dqhashtbl = hashinit(desiredvnodes, M_DQUOT, &dqhash);
+out:
+ dq_list_unlock();
+}
+
+
+static volatile int dq_list_lock_cnt = 0;
+
+static int
+dq_list_lock(void)
+{
+ lck_mtx_lock(quota_list_mtx_lock);
+ return ++dq_list_lock_cnt;
+}
+
+static int
+dq_list_lock_changed(int oldval) {
+ return (dq_list_lock_cnt != oldval);
+}
+
+static int
+dq_list_lock_val(void) {
+ return dq_list_lock_cnt;
+}
+
+void
+dq_list_unlock(void)
+{
+ lck_mtx_unlock(quota_list_mtx_lock);
+}
+
+
+/*
+ * must be called with the quota_list_lock held
+ */
+void
+dq_lock_internal(struct dquot *dq)
+{
+ while (dq->dq_lflags & DQ_LLOCK) {
+ dq->dq_lflags |= DQ_LWANT;
+ msleep(&dq->dq_lflags, quota_list_mtx_lock, PVFS, "dq_lock_internal", NULL);
+ }
+ dq->dq_lflags |= DQ_LLOCK;
+}
+
+/*
+ * must be called with the quota_list_lock held
+ */
+void
+dq_unlock_internal(struct dquot *dq)
+{
+ int wanted = dq->dq_lflags & DQ_LWANT;
+
+ dq->dq_lflags &= ~(DQ_LLOCK | DQ_LWANT);
+
+ if (wanted)
+ wakeup(&dq->dq_lflags);
+}
+
+void
+dqlock(struct dquot *dq) {
+
+ lck_mtx_lock(quota_list_mtx_lock);
+
+ dq_lock_internal(dq);
+
+ lck_mtx_unlock(quota_list_mtx_lock);
+}
+
+void
+dqunlock(struct dquot *dq) {
+
+ lck_mtx_lock(quota_list_mtx_lock);
+
+ dq_unlock_internal(dq);
+
+ lck_mtx_unlock(quota_list_mtx_lock);
+}
+
+
+
+int
+qf_get(struct quotafile *qfp, int type)
+{
+ int error = 0;
+
+ dq_list_lock();
+
+ switch (type) {
+
+ case QTF_OPENING:
+ while ( (qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING)) ) {
+ if ( (qfp->qf_qflags & QTF_OPENING) ) {
+ error = EBUSY;
+ break;
+ }
+ if ( (qfp->qf_qflags & QTF_CLOSING) ) {
+ qfp->qf_qflags |= QTF_WANTED;
+ msleep(&qfp->qf_qflags, quota_list_mtx_lock, PVFS, "qf_get", NULL);
+ }
+ }
+ if (qfp->qf_vp != NULLVP)
+ error = EBUSY;
+ if (error == 0)
+ qfp->qf_qflags |= QTF_OPENING;
+ break;
+
+ case QTF_CLOSING:
+ if ( (qfp->qf_qflags & QTF_CLOSING) ) {
+ error = EBUSY;
+ break;
+ }
+ qfp->qf_qflags |= QTF_CLOSING;
+
+ while ( (qfp->qf_qflags & QTF_OPENING) || qfp->qf_refcnt ) {
+ qfp->qf_qflags |= QTF_WANTED;
+ msleep(&qfp->qf_qflags, quota_list_mtx_lock, PVFS, "qf_get", NULL);
+ }
+ if (qfp->qf_vp == NULLVP) {
+ qfp->qf_qflags &= ~QTF_CLOSING;
+ error = EBUSY;
+ }
+ break;
+ }
+ dq_list_unlock();
+
+ return (error);
+}
+
+void
+qf_put(struct quotafile *qfp, int type)
+{
+
+ dq_list_lock();
+
+ switch (type) {
+
+ case QTF_OPENING:
+ case QTF_CLOSING:
+ qfp->qf_qflags &= ~type;
+ break;
+ }
+ if ( (qfp->qf_qflags & QTF_WANTED) ) {
+ qfp->qf_qflags &= ~QTF_WANTED;
+ wakeup(&qfp->qf_qflags);
+ }
+ dq_list_unlock();
+}
+
+
+static void
+qf_lock(struct quotafile *qfp)
+{
+ lck_mtx_lock(&qfp->qf_lock);
+}
+
+static void
+qf_unlock(struct quotafile *qfp)
+{
+ lck_mtx_unlock(&qfp->qf_lock);
+}
+
+
+/*
+ * take a reference on the quota file while we're
+ * in dqget... this will prevent a quota_off from
+ * occurring while we're potentially playing with
+ * the quota file... the quota_off will stall until
+ * all the current references 'die'... once we start
+ * into quoto_off, all new references will be rejected
+ * we also don't want any dqgets being processed while
+ * we're in the middle of the quota_on... once we've
+ * actually got the quota file open and the associated
+ * struct quotafile inited, we can let them come through
+ *
+ * quota list lock must be held on entry
+ */
+static int
+qf_ref(struct quotafile *qfp)
+{
+ int error = 0;
+
+ if ( (qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING)) || (qfp->qf_vp == NULLVP) )
+ error = EINVAL;
+ else
+ qfp->qf_refcnt++;
+
+ return (error);
+}
+
+/*
+ * drop our reference and wakeup any waiters if
+ * we were the last one holding a ref
+ *
+ * quota list lock must be held on entry
+ */
+static void
+qf_rele(struct quotafile *qfp)
+{
+ qfp->qf_refcnt--;
+
+ if ( (qfp->qf_qflags & QTF_WANTED) && qfp->qf_refcnt == 0) {
+ qfp->qf_qflags &= ~QTF_WANTED;
+ wakeup(&qfp->qf_qflags);
+ }
+}
+
+
+void
+dqfileinit(struct quotafile *qfp)
+{
+ qfp->qf_vp = NULLVP;
+ qfp->qf_qflags = 0;
+
+ lck_mtx_init(&qfp->qf_lock, qf_lck_grp, qf_lck_attr);