]>
git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_quota.c
b58c75e27d7c37db1f7660011013cea73ed08ab2
2 * Copyright (c) 2002-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1982, 1986, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
32 * This code is derived from software contributed to Berkeley by
33 * Robert Elz at The University of Melbourne.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by the University of
46 * California, Berkeley and its contributors.
47 * 4. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * derived from @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
67 #include <sys/param.h>
68 #include <sys/kernel.h>
69 #include <sys/systm.h>
70 #include <kern/zalloc.h>
71 #include <sys/file_internal.h>
72 #include <sys/proc_internal.h>
73 #include <sys/vnode_internal.h>
74 #include <sys/mount_internal.h>
75 #include <sys/quota.h>
76 #include <sys/uio_internal.h>
78 #include <libkern/OSByteOrder.h>
81 /* vars for quota file lock */
82 lck_grp_t
* qf_lck_grp
;
83 lck_grp_attr_t
* qf_lck_grp_attr
;
84 lck_attr_t
* qf_lck_attr
;
86 /* vars for quota list lock */
87 lck_grp_t
* quota_list_lck_grp
;
88 lck_grp_attr_t
* quota_list_lck_grp_attr
;
89 lck_attr_t
* quota_list_lck_attr
;
90 lck_mtx_t
* quota_list_mtx_lock
;
92 /* Routines to lock and unlock the quota global data */
93 static int dq_list_lock(void);
94 static void dq_list_unlock(void);
96 static void dq_lock_internal(struct dquot
*dq
);
97 static void dq_unlock_internal(struct dquot
*dq
);
99 static u_int32_t quotamagic
[MAXQUOTAS
] = INITQMAGICS
;
103 * Code pertaining to management of the in-core dquot data structures.
105 #define DQHASH(dqvp, id) \
106 (&dqhashtbl[((((intptr_t)(dqvp)) >> 8) + id) & dqhash])
107 LIST_HEAD(dqhash
, dquot
) * dqhashtbl
;
110 #define DQUOTINC 5 /* minimum free dquots desired */
111 long numdquot
, desireddquot
= DQUOTINC
;
116 TAILQ_HEAD(dqfreelist
, dquot
) dqfreelist
;
118 * Dquot dirty orphans list
120 TAILQ_HEAD(dqdirtylist
, dquot
) dqdirtylist
;
122 ZONE_VIEW_DEFINE(ZV_DQUOT
, "FS quota entries", KHEAP_ID_DEFAULT
,
123 sizeof(struct dquot
));
125 static int dqlookup(struct quotafile
*, u_int32_t
, struct dqblk
*, u_int32_t
*);
126 static int dqsync_locked(struct dquot
*dq
);
128 static void qf_lock(struct quotafile
*);
129 static void qf_unlock(struct quotafile
*);
130 static int qf_ref(struct quotafile
*);
131 static void qf_rele(struct quotafile
*);
135 * Initialize locks for the quota system.
141 * Allocate quota list lock group attribute and group
143 quota_list_lck_grp_attr
= lck_grp_attr_alloc_init();
144 quota_list_lck_grp
= lck_grp_alloc_init("quota list", quota_list_lck_grp_attr
);
147 * Allocate qouta list lock attribute
149 quota_list_lck_attr
= lck_attr_alloc_init();
152 * Allocate quota list lock
154 quota_list_mtx_lock
= lck_mtx_alloc_init(quota_list_lck_grp
, quota_list_lck_attr
);
158 * allocate quota file lock group attribute and group
160 qf_lck_grp_attr
= lck_grp_attr_alloc_init();
161 qf_lck_grp
= lck_grp_alloc_init("quota file", qf_lck_grp_attr
);
164 * Allocate quota file lock attribute
166 qf_lck_attr
= lck_attr_alloc_init();
170 * Report whether dqhashinit has been run.
173 dqisinitialized(void)
175 return dqhashtbl
!= NULL
;
179 * Initialize hash table for dquot structures.
185 if (dqisinitialized()) {
189 TAILQ_INIT(&dqfreelist
);
190 TAILQ_INIT(&dqdirtylist
);
191 dqhashtbl
= hashinit(desiredvnodes
, M_DQUOT
, &dqhash
);
197 static volatile int dq_list_lock_cnt
= 0;
202 lck_mtx_lock(quota_list_mtx_lock
);
203 return ++dq_list_lock_cnt
;
207 dq_list_lock_changed(int oldval
)
209 return dq_list_lock_cnt
!= oldval
;
213 dq_list_lock_val(void)
215 return dq_list_lock_cnt
;
221 lck_mtx_unlock(quota_list_mtx_lock
);
226 * must be called with the quota_list_lock held
229 dq_lock_internal(struct dquot
*dq
)
231 while (dq
->dq_lflags
& DQ_LLOCK
) {
232 dq
->dq_lflags
|= DQ_LWANT
;
233 msleep(&dq
->dq_lflags
, quota_list_mtx_lock
, PVFS
, "dq_lock_internal", NULL
);
235 dq
->dq_lflags
|= DQ_LLOCK
;
239 * must be called with the quota_list_lock held
242 dq_unlock_internal(struct dquot
*dq
)
244 int wanted
= dq
->dq_lflags
& DQ_LWANT
;
246 dq
->dq_lflags
&= ~(DQ_LLOCK
| DQ_LWANT
);
249 wakeup(&dq
->dq_lflags
);
254 dqlock(struct dquot
*dq
)
256 lck_mtx_lock(quota_list_mtx_lock
);
258 dq_lock_internal(dq
);
260 lck_mtx_unlock(quota_list_mtx_lock
);
264 dqunlock(struct dquot
*dq
)
266 lck_mtx_lock(quota_list_mtx_lock
);
268 dq_unlock_internal(dq
);
270 lck_mtx_unlock(quota_list_mtx_lock
);
276 qf_get(struct quotafile
*qfp
, int type
)
284 while ((qfp
->qf_qflags
& (QTF_OPENING
| QTF_CLOSING
))) {
285 if ((qfp
->qf_qflags
& QTF_OPENING
)) {
289 if ((qfp
->qf_qflags
& QTF_CLOSING
)) {
290 qfp
->qf_qflags
|= QTF_WANTED
;
291 msleep(&qfp
->qf_qflags
, quota_list_mtx_lock
, PVFS
, "qf_get", NULL
);
294 if (qfp
->qf_vp
!= NULLVP
) {
298 qfp
->qf_qflags
|= QTF_OPENING
;
303 if ((qfp
->qf_qflags
& QTF_CLOSING
)) {
307 qfp
->qf_qflags
|= QTF_CLOSING
;
309 while ((qfp
->qf_qflags
& QTF_OPENING
) || qfp
->qf_refcnt
) {
310 qfp
->qf_qflags
|= QTF_WANTED
;
311 msleep(&qfp
->qf_qflags
, quota_list_mtx_lock
, PVFS
, "qf_get", NULL
);
313 if (qfp
->qf_vp
== NULLVP
) {
314 qfp
->qf_qflags
&= ~QTF_CLOSING
;
325 qf_put(struct quotafile
*qfp
, int type
)
332 qfp
->qf_qflags
&= ~type
;
335 if ((qfp
->qf_qflags
& QTF_WANTED
)) {
336 qfp
->qf_qflags
&= ~QTF_WANTED
;
337 wakeup(&qfp
->qf_qflags
);
344 qf_lock(struct quotafile
*qfp
)
346 lck_mtx_lock(&qfp
->qf_lock
);
350 qf_unlock(struct quotafile
*qfp
)
352 lck_mtx_unlock(&qfp
->qf_lock
);
357 * take a reference on the quota file while we're
358 * in dqget... this will prevent a quota_off from
359 * occurring while we're potentially playing with
360 * the quota file... the quota_off will stall until
361 * all the current references 'die'... once we start
362 * into quoto_off, all new references will be rejected
363 * we also don't want any dqgets being processed while
364 * we're in the middle of the quota_on... once we've
365 * actually got the quota file open and the associated
366 * struct quotafile inited, we can let them come through
368 * quota list lock must be held on entry
371 qf_ref(struct quotafile
*qfp
)
375 if ((qfp
->qf_qflags
& (QTF_OPENING
| QTF_CLOSING
)) || (qfp
->qf_vp
== NULLVP
)) {
385 * drop our reference and wakeup any waiters if
386 * we were the last one holding a ref
388 * quota list lock must be held on entry
391 qf_rele(struct quotafile
*qfp
)
395 if ((qfp
->qf_qflags
& QTF_WANTED
) && qfp
->qf_refcnt
== 0) {
396 qfp
->qf_qflags
&= ~QTF_WANTED
;
397 wakeup(&qfp
->qf_qflags
);
403 dqfileinit(struct quotafile
*qfp
)
408 lck_mtx_init(&qfp
->qf_lock
, qf_lck_grp
, qf_lck_attr
);
413 * Initialize a quota file
415 * must be called with the quota file lock held
418 dqfileopen(struct quotafile
*qfp
, int type
)
420 struct dqfilehdr header
;
421 struct vfs_context context
;
425 char uio_buf
[UIO_SIZEOF(1)];
427 context
.vc_thread
= current_thread();
428 context
.vc_ucred
= qfp
->qf_cred
;
430 /* Obtain the file size */
431 if ((error
= vnode_size(qfp
->qf_vp
, &file_size
, &context
)) != 0) {
435 /* Read the file header */
436 auio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_READ
,
437 &uio_buf
[0], sizeof(uio_buf
));
438 uio_addiov(auio
, CAST_USER_ADDR_T(&header
), sizeof(header
));
439 error
= VNOP_READ(qfp
->qf_vp
, auio
, 0, &context
);
442 } else if (uio_resid(auio
)) {
446 /* Sanity check the quota file header. */
447 if ((OSSwapBigToHostInt32(header
.dqh_magic
) != quotamagic
[type
]) ||
448 (OSSwapBigToHostInt32(header
.dqh_version
) > QF_VERSION
) ||
449 (!powerof2(OSSwapBigToHostInt32(header
.dqh_maxentries
))) ||
450 (OSSwapBigToHostInt32(header
.dqh_maxentries
) > (file_size
/ sizeof(struct dqblk
)))) {
454 /* Set up the time limits for this quota. */
455 if (header
.dqh_btime
!= 0) {
456 qfp
->qf_btime
= OSSwapBigToHostInt32(header
.dqh_btime
);
458 qfp
->qf_btime
= MAX_DQ_TIME
;
460 if (header
.dqh_itime
!= 0) {
461 qfp
->qf_itime
= OSSwapBigToHostInt32(header
.dqh_itime
);
463 qfp
->qf_itime
= MAX_IQ_TIME
;
466 /* Calculate the hash table constants. */
467 qfp
->qf_maxentries
= OSSwapBigToHostInt32(header
.dqh_maxentries
);
468 qfp
->qf_entrycnt
= OSSwapBigToHostInt32(header
.dqh_entrycnt
);
469 qfp
->qf_shift
= dqhashshift(qfp
->qf_maxentries
);
475 * Close down a quota file
478 dqfileclose(struct quotafile
*qfp
, __unused
int type
)
480 struct dqfilehdr header
;
481 struct vfs_context context
;
483 char uio_buf
[UIO_SIZEOF(1)];
485 auio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_READ
,
486 &uio_buf
[0], sizeof(uio_buf
));
487 uio_addiov(auio
, CAST_USER_ADDR_T(&header
), sizeof(header
));
489 context
.vc_thread
= current_thread();
490 context
.vc_ucred
= qfp
->qf_cred
;
492 if (VNOP_READ(qfp
->qf_vp
, auio
, 0, &context
) == 0) {
493 header
.dqh_entrycnt
= OSSwapHostToBigInt32(qfp
->qf_entrycnt
);
494 uio_reset(auio
, 0, UIO_SYSSPACE
, UIO_WRITE
);
495 uio_addiov(auio
, CAST_USER_ADDR_T(&header
), sizeof(header
));
496 (void) VNOP_WRITE(qfp
->qf_vp
, auio
, 0, &context
);
502 * Obtain a dquot structure for the specified identifier and quota file
503 * reading the information from the file if necessary.
506 dqget(u_int32_t id
, struct quotafile
*qfp
, int type
, struct dquot
**dqp
)
509 struct dquot
*ndq
= NULL
;
510 struct dquot
*fdq
= NULL
;
516 if (!dqisinitialized()) {
521 if (id
== 0 || qfp
->qf_vp
== NULLVP
) {
533 if ((dqvp
= qfp
->qf_vp
) == NULLVP
) {
540 dqh
= DQHASH(dqvp
, id
);
543 listlockval
= dq_list_lock_val();
546 * Check the cache first.
548 for (dq
= dqh
->lh_first
; dq
; dq
= dq
->dq_hash
.le_next
) {
549 if (dq
->dq_id
!= id
||
550 dq
->dq_qfile
->qf_vp
!= dqvp
) {
554 dq_lock_internal(dq
);
555 if (dq_list_lock_changed(listlockval
)) {
556 dq_unlock_internal(dq
);
561 * dq_lock_internal may drop the quota_list_lock to msleep, so
562 * we need to re-evaluate the identity of this dq
564 if (dq
->dq_id
!= id
|| dq
->dq_qfile
== NULL
||
565 dq
->dq_qfile
->qf_vp
!= dqvp
) {
566 dq_unlock_internal(dq
);
570 * Cache hit with no references. Take
571 * the structure off the free list.
573 if (dq
->dq_cnt
++ == 0) {
574 if (dq
->dq_flags
& DQ_MOD
) {
575 TAILQ_REMOVE(&dqdirtylist
, dq
, dq_freelist
);
577 TAILQ_REMOVE(&dqfreelist
, dq
, dq_freelist
);
580 dq_unlock_internal(dq
);
584 * we grabbed this from the free list in the first pass
585 * but we found the dq we were looking for in
586 * the cache the 2nd time through
587 * so stick it back on the free list and return the cached entry
589 TAILQ_INSERT_HEAD(&dqfreelist
, fdq
, dq_freelist
);
596 * we allocated this in the first pass
597 * but we found the dq we were looking for in
598 * the cache the 2nd time through so free it
600 zfree(ZV_DQUOT
, ndq
);
607 * Not in cache, allocate a new one.
609 if (TAILQ_EMPTY(&dqfreelist
) &&
610 numdquot
< MAXQUOTAS
* desiredvnodes
) {
611 desireddquot
+= DQUOTINC
;
616 * we captured this from the free list
617 * in the first pass through, so go
622 } else if (numdquot
< desireddquot
) {
625 * drop the quota list lock since zalloc may block
629 ndq
= (struct dquot
*)zalloc_flags(ZV_DQUOT
,
632 listlockval
= dq_list_lock();
634 * need to look for the entry again in the cache
635 * since we dropped the quota list lock and
636 * someone else may have beaten us to creating it
641 * we allocated this in the first pass through
642 * and we're still under out target, so go
650 if (TAILQ_EMPTY(&dqfreelist
)) {
656 * we allocated this in the first pass through
657 * but we're now at the limit of our cache size
660 zfree(ZV_DQUOT
, ndq
);
666 dq
= TAILQ_FIRST(&dqfreelist
);
668 dq_lock_internal(dq
);
670 if (dq_list_lock_changed(listlockval
) || dq
->dq_cnt
|| (dq
->dq_flags
& DQ_MOD
)) {
672 * we lost the race while we weren't holding
673 * the quota list lock... dq_lock_internal
674 * will drop it to msleep... this dq has been
675 * reclaimed... go find another
677 dq_unlock_internal(dq
);
680 * need to look for the entry again in the cache
681 * since we dropped the quota list lock and
682 * someone else may have beaten us to creating it
686 TAILQ_REMOVE(&dqfreelist
, dq
, dq_freelist
);
688 if (dq
->dq_qfile
!= NULL
) {
689 LIST_REMOVE(dq
, dq_hash
);
693 dq_unlock_internal(dq
);
696 * because we may have dropped the quota list lock
697 * in the call to dq_lock_internal, we need to
698 * relookup in the hash in case someone else
699 * caused a dq with this identity to be created...
700 * if we don't find it, we'll use this one
706 * we've either freshly allocated a dq
707 * or we've atomically pulled it out of
708 * the hash and freelists... no one else
709 * can have a reference, which means no
710 * one else can be trying to use this dq
712 dq_lock_internal(dq
);
713 if (dq_list_lock_changed(listlockval
)) {
714 dq_unlock_internal(dq
);
719 * Initialize the contents of the dquot structure.
727 * once we insert it in the hash and
728 * drop the quota_list_lock, it can be
729 * 'found'... however, we're still holding
730 * the dq_lock which will keep us from doing
731 * anything with it until we've finished
734 LIST_INSERT_HEAD(dqh
, dq
, dq_hash
);
739 * we allocated this in the first pass through
740 * but we didn't need it, so free it after
741 * we've droped the quota list lock
743 zfree(ZV_DQUOT
, ndq
);
746 error
= dqlookup(qfp
, id
, &dq
->dq_dqb
, &dq
->dq_index
);
749 * I/O error in reading quota file, release
750 * quota structure and reflect problem to caller.
757 LIST_REMOVE(dq
, dq_hash
);
759 dq_unlock_internal(dq
);
769 * Check for no limit to enforce.
770 * Initialize time values if necessary.
772 if (dq
->dq_isoftlimit
== 0 && dq
->dq_bsoftlimit
== 0 &&
773 dq
->dq_ihardlimit
== 0 && dq
->dq_bhardlimit
== 0) {
774 dq
->dq_flags
|= DQ_FAKE
;
776 if (dq
->dq_id
!= 0) {
780 if (dq
->dq_btime
== 0) {
781 dq
->dq_btime
= tv
.tv_sec
+ qfp
->qf_btime
;
783 if (dq
->dq_itime
== 0) {
784 dq
->dq_itime
= tv
.tv_sec
+ qfp
->qf_itime
;
788 dq_unlock_internal(dq
);
797 * Lookup a dqblk structure for the specified identifier and
798 * quota file. If there is no entry for this identifier then
799 * one is inserted. The actual hash table index is returned.
802 dqlookup(struct quotafile
*qfp
, u_int32_t id
, struct dqblk
*dqb
, uint32_t *index
)
805 struct vfs_context context
;
810 char uio_buf
[UIO_SIZEOF(1)];
817 context
.vc_thread
= current_thread();
818 context
.vc_ucred
= qfp
->qf_cred
;
820 mask
= qfp
->qf_maxentries
- 1;
821 i
= dqhash1(id
, qfp
->qf_shift
, mask
);
822 skip
= dqhash2(id
, mask
);
824 for (last
= (i
+ (qfp
->qf_maxentries
- 1) * skip
) & mask
;
826 i
= (i
+ skip
) & mask
) {
827 auio
= uio_createwithbuffer(1, dqoffset(i
), UIO_SYSSPACE
, UIO_READ
,
828 &uio_buf
[0], sizeof(uio_buf
));
829 uio_addiov(auio
, CAST_USER_ADDR_T(dqb
), sizeof(struct dqblk
));
830 error
= VNOP_READ(dqvp
, auio
, 0, &context
);
832 printf("dqlookup: error %d looking up id %u at index %d\n", error
, id
, i
);
834 } else if (uio_resid(auio
)) {
836 printf("dqlookup: error looking up id %u at index %d\n", id
, i
);
840 * An empty entry means there is no entry
841 * with that id. In this case a new dqb
842 * record will be inserted.
844 if (dqb
->dqb_id
== 0) {
845 bzero(dqb
, sizeof(struct dqblk
));
846 dqb
->dqb_id
= OSSwapHostToBigInt32(id
);
848 * Write back to reserve entry for this id
850 uio_reset(auio
, dqoffset(i
), UIO_SYSSPACE
, UIO_WRITE
);
851 uio_addiov(auio
, CAST_USER_ADDR_T(dqb
), sizeof(struct dqblk
));
852 error
= VNOP_WRITE(dqvp
, auio
, 0, &context
);
853 if (uio_resid(auio
) && error
== 0) {
862 /* An id match means an entry was found. */
863 if (OSSwapBigToHostInt32(dqb
->dqb_id
) == id
) {
864 dqb
->dqb_bhardlimit
= OSSwapBigToHostInt64(dqb
->dqb_bhardlimit
);
865 dqb
->dqb_bsoftlimit
= OSSwapBigToHostInt64(dqb
->dqb_bsoftlimit
);
866 dqb
->dqb_curbytes
= OSSwapBigToHostInt64(dqb
->dqb_curbytes
);
867 dqb
->dqb_ihardlimit
= OSSwapBigToHostInt32(dqb
->dqb_ihardlimit
);
868 dqb
->dqb_isoftlimit
= OSSwapBigToHostInt32(dqb
->dqb_isoftlimit
);
869 dqb
->dqb_curinodes
= OSSwapBigToHostInt32(dqb
->dqb_curinodes
);
870 dqb
->dqb_btime
= OSSwapBigToHostInt32(dqb
->dqb_btime
);
871 dqb
->dqb_itime
= OSSwapBigToHostInt32(dqb
->dqb_itime
);
872 dqb
->dqb_id
= OSSwapBigToHostInt32(dqb
->dqb_id
);
878 *index
= i
; /* remember index so we don't have to recompute it later */
885 * Release a reference to a dquot.
888 dqrele(struct dquot
*dq
)
895 if (dq
->dq_cnt
> 1) {
901 if (dq
->dq_flags
& DQ_MOD
) {
902 (void) dqsync_locked(dq
);
907 TAILQ_INSERT_TAIL(&dqfreelist
, dq
, dq_freelist
);
908 dq_unlock_internal(dq
);
913 * Release a reference to a dquot but don't do any I/O.
916 dqreclaim(struct dquot
*dq
)
923 dq_lock_internal(dq
);
925 if (--dq
->dq_cnt
> 0) {
926 dq_unlock_internal(dq
);
930 if (dq
->dq_flags
& DQ_MOD
) {
931 TAILQ_INSERT_TAIL(&dqdirtylist
, dq
, dq_freelist
);
933 TAILQ_INSERT_TAIL(&dqfreelist
, dq
, dq_freelist
);
936 dq_unlock_internal(dq
);
941 * Update a quota file's orphaned disk quotas.
944 dqsync_orphans(struct quotafile
*qfp
)
950 TAILQ_FOREACH(dq
, &dqdirtylist
, dq_freelist
) {
951 if (dq
->dq_qfile
!= qfp
) {
955 dq_lock_internal(dq
);
957 if (dq
->dq_qfile
!= qfp
) {
959 * the identity of this dq changed while
960 * the quota_list_lock was dropped
961 * dq_lock_internal can drop it to msleep
963 dq_unlock_internal(dq
);
966 if ((dq
->dq_flags
& DQ_MOD
) == 0) {
968 * someone cleaned and removed this from
969 * the dq from the dirty list while the
970 * quota_list_lock was dropped
972 dq_unlock_internal(dq
);
975 if (dq
->dq_cnt
!= 0) {
976 panic("dqsync_orphans: dquot in use");
979 TAILQ_REMOVE(&dqdirtylist
, dq
, dq_freelist
);
983 * we're still holding the dqlock at this point
984 * with the reference count == 0
985 * we shouldn't be able
986 * to pick up another one since we hold dqlock
988 (void) dqsync_locked(dq
);
992 TAILQ_INSERT_TAIL(&dqfreelist
, dq
, dq_freelist
);
994 dq_unlock_internal(dq
);
1001 dqsync(struct dquot
*dq
)
1005 if (dq
!= NODQUOT
) {
1008 if ((dq
->dq_flags
& DQ_MOD
)) {
1009 error
= dqsync_locked(dq
);
1019 * Update the disk quota in the quota file.
1022 dqsync_locked(struct dquot
*dq
)
1024 struct vfs_context context
;
1026 struct dqblk dqb
, *dqblkp
;
1029 char uio_buf
[UIO_SIZEOF(1)];
1031 if (dq
->dq_id
== 0) {
1032 dq
->dq_flags
&= ~DQ_MOD
;
1035 if (dq
->dq_qfile
== NULL
) {
1036 panic("dqsync: NULL dq_qfile");
1038 if ((dqvp
= dq
->dq_qfile
->qf_vp
) == NULLVP
) {
1039 panic("dqsync: NULL qf_vp");
1042 auio
= uio_createwithbuffer(1, dqoffset(dq
->dq_index
), UIO_SYSSPACE
,
1043 UIO_WRITE
, &uio_buf
[0], sizeof(uio_buf
));
1044 uio_addiov(auio
, CAST_USER_ADDR_T(&dqb
), sizeof(struct dqblk
));
1046 context
.vc_thread
= current_thread(); /* XXX */
1047 context
.vc_ucred
= dq
->dq_qfile
->qf_cred
;
1049 dqblkp
= &dq
->dq_dqb
;
1050 dqb
.dqb_bhardlimit
= OSSwapHostToBigInt64(dqblkp
->dqb_bhardlimit
);
1051 dqb
.dqb_bsoftlimit
= OSSwapHostToBigInt64(dqblkp
->dqb_bsoftlimit
);
1052 dqb
.dqb_curbytes
= OSSwapHostToBigInt64(dqblkp
->dqb_curbytes
);
1053 dqb
.dqb_ihardlimit
= OSSwapHostToBigInt32(dqblkp
->dqb_ihardlimit
);
1054 dqb
.dqb_isoftlimit
= OSSwapHostToBigInt32(dqblkp
->dqb_isoftlimit
);
1055 dqb
.dqb_curinodes
= OSSwapHostToBigInt32(dqblkp
->dqb_curinodes
);
1056 dqb
.dqb_btime
= OSSwapHostToBigInt32(dqblkp
->dqb_btime
);
1057 dqb
.dqb_itime
= OSSwapHostToBigInt32(dqblkp
->dqb_itime
);
1058 dqb
.dqb_id
= OSSwapHostToBigInt32(dqblkp
->dqb_id
);
1059 dqb
.dqb_spare
[0] = 0;
1060 dqb
.dqb_spare
[1] = 0;
1061 dqb
.dqb_spare
[2] = 0;
1062 dqb
.dqb_spare
[3] = 0;
1064 error
= VNOP_WRITE(dqvp
, auio
, 0, &context
);
1065 if (uio_resid(auio
) && error
== 0) {
1068 dq
->dq_flags
&= ~DQ_MOD
;
1074 * Flush all entries from the cache for a particular vnode.
1077 dqflush(struct vnode
*vp
)
1079 struct dquot
*dq
, *nextdq
;
1082 if (!dqisinitialized()) {
1087 * Move all dquot's that used to refer to this quota
1088 * file off their hash chains (they will eventually
1089 * fall off the head of the free list and be re-used).
1093 for (dqh
= &dqhashtbl
[dqhash
]; dqh
>= dqhashtbl
; dqh
--) {
1094 for (dq
= dqh
->lh_first
; dq
; dq
= nextdq
) {
1095 nextdq
= dq
->dq_hash
.le_next
;
1096 if (dq
->dq_qfile
->qf_vp
!= vp
) {
1100 panic("dqflush: stray dquot");
1102 LIST_REMOVE(dq
, dq_hash
);
1103 dq
->dq_qfile
= NULL
;
1110 * LP64 support for munging dqblk structure.
1111 * XXX conversion of user_time_t to time_t loses precision; not an issue for
1112 * XXX us now, since we are only ever setting 32 bits worth of time into it.
1114 __private_extern__
void
1115 munge_dqblk(struct dqblk
*dqblkp
, struct user_dqblk
*user_dqblkp
, boolean_t to64
)
1118 /* munge kernel (32 bit) dqblk into user (64 bit) dqblk */
1119 bcopy((caddr_t
)dqblkp
, (caddr_t
)user_dqblkp
, offsetof(struct dqblk
, dqb_btime
));
1120 user_dqblkp
->dqb_id
= dqblkp
->dqb_id
;
1121 user_dqblkp
->dqb_itime
= dqblkp
->dqb_itime
;
1122 user_dqblkp
->dqb_btime
= dqblkp
->dqb_btime
;
1124 /* munge user (64 bit) dqblk into kernel (32 bit) dqblk */
1125 bcopy((caddr_t
)user_dqblkp
, (caddr_t
)dqblkp
, offsetof(struct dqblk
, dqb_btime
));
1126 dqblkp
->dqb_id
= user_dqblkp
->dqb_id
;
1127 dqblkp
->dqb_itime
= user_dqblkp
->dqb_itime
; /* XXX - lose precision */
1128 dqblkp
->dqb_btime
= user_dqblkp
->dqb_btime
; /* XXX - lose precision */