]>
git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_quota.c
2 * Copyright (c) 2002-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1982, 1986, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
32 * This code is derived from software contributed to Berkeley by
33 * Robert Elz at The University of Melbourne.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by the University of
46 * California, Berkeley and its contributors.
47 * 4. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * derived from @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
67 #include <sys/param.h>
68 #include <sys/kernel.h>
69 #include <sys/systm.h>
70 #include <sys/malloc.h>
71 #include <sys/file_internal.h>
72 #include <sys/proc_internal.h>
73 #include <sys/vnode_internal.h>
74 #include <sys/mount_internal.h>
75 #include <sys/quota.h>
76 #include <sys/uio_internal.h>
79 /* vars for quota file lock */
80 lck_grp_t
* qf_lck_grp
;
81 lck_grp_attr_t
* qf_lck_grp_attr
;
82 lck_attr_t
* qf_lck_attr
;
84 /* vars for quota list lock */
85 lck_grp_t
* quota_list_lck_grp
;
86 lck_grp_attr_t
* quota_list_lck_grp_attr
;
87 lck_attr_t
* quota_list_lck_attr
;
88 lck_mtx_t
* quota_list_mtx_lock
;
90 /* Routines to lock and unlock the quota global data */
91 static void dq_list_lock(void);
92 static void dq_list_unlock(void);
94 static void dq_lock_internal(struct dquot
*dq
);
95 static void dq_unlock_internal(struct dquot
*dq
);
97 static u_int32_t quotamagic
[MAXQUOTAS
] = INITQMAGICS
;
101 * Code pertaining to management of the in-core dquot data structures.
103 #define DQHASH(dqvp, id) \
104 (&dqhashtbl[((((int)(dqvp)) >> 8) + id) & dqhash])
105 LIST_HEAD(dqhash
, dquot
) *dqhashtbl
;
108 #define DQUOTINC 5 /* minimum free dquots desired */
109 long numdquot
, desireddquot
= DQUOTINC
;
114 TAILQ_HEAD(dqfreelist
, dquot
) dqfreelist
;
116 * Dquot dirty orphans list
118 TAILQ_HEAD(dqdirtylist
, dquot
) dqdirtylist
;
121 static int dqlookup(struct quotafile
*, u_long
, struct dqblk
*, u_int32_t
*);
122 static int dqsync_locked(struct dquot
*dq
);
124 static void qf_lock(struct quotafile
*);
125 static void qf_unlock(struct quotafile
*);
126 static int qf_ref(struct quotafile
*);
127 static void qf_rele(struct quotafile
*);
131 * Initialize the quota system.
137 dqhashtbl
= hashinit(desiredvnodes
, M_DQUOT
, &dqhash
);
138 TAILQ_INIT(&dqfreelist
);
139 TAILQ_INIT(&dqdirtylist
);
142 * Allocate quota list lock group attribute and group
144 quota_list_lck_grp_attr
= lck_grp_attr_alloc_init();
145 lck_grp_attr_setstat(quota_list_lck_grp_attr
);
146 quota_list_lck_grp
= lck_grp_alloc_init("quota list", quota_list_lck_grp_attr
);
149 * Allocate qouta list lock attribute
151 quota_list_lck_attr
= lck_attr_alloc_init();
152 //lck_attr_setdebug(quota_list_lck_attr);
155 * Allocate quota list lock
157 quota_list_mtx_lock
= lck_mtx_alloc_init(quota_list_lck_grp
, quota_list_lck_attr
);
161 * allocate quota file lock group attribute and group
163 qf_lck_grp_attr
= lck_grp_attr_alloc_init();
164 lck_grp_attr_setstat(qf_lck_grp_attr
);
165 qf_lck_grp
= lck_grp_alloc_init("quota file", qf_lck_grp_attr
);
168 * Allocate quota file lock attribute
170 qf_lck_attr
= lck_attr_alloc_init();
171 //lck_attr_setdebug(qf_lck_attr);
179 lck_mtx_lock(quota_list_mtx_lock
);
185 lck_mtx_unlock(quota_list_mtx_lock
);
190 * must be called with the quota_list_lock held
193 dq_lock_internal(struct dquot
*dq
)
195 while (dq
->dq_lflags
& DQ_LLOCK
) {
196 dq
->dq_lflags
|= DQ_LWANT
;
197 msleep(&dq
->dq_lflags
, quota_list_mtx_lock
, PVFS
, "dq_lock_internal", 0);
199 dq
->dq_lflags
|= DQ_LLOCK
;
203 * must be called with the quota_list_lock held
206 dq_unlock_internal(struct dquot
*dq
)
208 int wanted
= dq
->dq_lflags
& DQ_LWANT
;
210 dq
->dq_lflags
&= ~(DQ_LLOCK
| DQ_LWANT
);
213 wakeup(&dq
->dq_lflags
);
217 dqlock(struct dquot
*dq
) {
219 lck_mtx_lock(quota_list_mtx_lock
);
221 dq_lock_internal(dq
);
223 lck_mtx_unlock(quota_list_mtx_lock
);
227 dqunlock(struct dquot
*dq
) {
229 lck_mtx_lock(quota_list_mtx_lock
);
231 dq_unlock_internal(dq
);
233 lck_mtx_unlock(quota_list_mtx_lock
);
239 qf_get(struct quotafile
*qfp
, int type
)
248 while ( (qfp
->qf_qflags
& (QTF_OPENING
| QTF_CLOSING
)) ) {
249 if ( (qfp
->qf_qflags
& QTF_OPENING
) ) {
253 if ( (qfp
->qf_qflags
& QTF_CLOSING
) ) {
254 qfp
->qf_qflags
|= QTF_WANTED
;
255 msleep(&qfp
->qf_qflags
, quota_list_mtx_lock
, PVFS
, "qf_get", 0);
258 if (qfp
->qf_vp
!= NULLVP
)
261 qfp
->qf_qflags
|= QTF_OPENING
;
265 if ( (qfp
->qf_qflags
& QTF_CLOSING
) ) {
269 qfp
->qf_qflags
|= QTF_CLOSING
;
271 while ( (qfp
->qf_qflags
& QTF_OPENING
) || qfp
->qf_refcnt
) {
272 qfp
->qf_qflags
|= QTF_WANTED
;
273 msleep(&qfp
->qf_qflags
, quota_list_mtx_lock
, PVFS
, "qf_get", 0);
275 if (qfp
->qf_vp
== NULLVP
) {
276 qfp
->qf_qflags
&= ~QTF_CLOSING
;
287 qf_put(struct quotafile
*qfp
, int type
)
296 qfp
->qf_qflags
&= ~type
;
299 if ( (qfp
->qf_qflags
& QTF_WANTED
) ) {
300 qfp
->qf_qflags
&= ~QTF_WANTED
;
301 wakeup(&qfp
->qf_qflags
);
308 qf_lock(struct quotafile
*qfp
)
310 lck_mtx_lock(&qfp
->qf_lock
);
314 qf_unlock(struct quotafile
*qfp
)
316 lck_mtx_unlock(&qfp
->qf_lock
);
321 * take a reference on the quota file while we're
322 * in dqget... this will prevent a quota_off from
323 * occurring while we're potentially playing with
324 * the quota file... the quota_off will stall until
325 * all the current references 'die'... once we start
326 * into quoto_off, all new references will be rejected
327 * we also don't want any dqgets being processed while
328 * we're in the middle of the quota_on... once we've
329 * actually got the quota file open and the associated
330 * struct quotafile inited, we can let them come through
332 * quota list lock must be held on entry
335 qf_ref(struct quotafile
*qfp
)
339 if ( (qfp
->qf_qflags
& (QTF_OPENING
| QTF_CLOSING
)) || (qfp
->qf_vp
== NULLVP
) )
348 * drop our reference and wakeup any waiters if
349 * we were the last one holding a ref
351 * quota list lock must be held on entry
354 qf_rele(struct quotafile
*qfp
)
358 if ( (qfp
->qf_qflags
& QTF_WANTED
) && qfp
->qf_refcnt
== 0) {
359 qfp
->qf_qflags
&= ~QTF_WANTED
;
360 wakeup(&qfp
->qf_qflags
);
366 dqfileinit(struct quotafile
*qfp
)
371 lck_mtx_init(&qfp
->qf_lock
, qf_lck_grp
, qf_lck_attr
);
376 * Initialize a quota file
378 * must be called with the quota file lock held
381 dqfileopen(qfp
, type
)
382 struct quotafile
*qfp
;
385 struct dqfilehdr header
;
386 struct vfs_context context
;
390 char uio_buf
[ UIO_SIZEOF(1) ];
392 context
.vc_proc
= current_proc();
393 context
.vc_ucred
= qfp
->qf_cred
;
395 /* Obtain the file size */
396 if ((error
= vnode_size(qfp
->qf_vp
, &file_size
, &context
)) != 0)
399 /* Read the file header */
400 auio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_READ
,
401 &uio_buf
[0], sizeof(uio_buf
));
402 uio_addiov(auio
, CAST_USER_ADDR_T(&header
), sizeof (header
));
403 error
= VNOP_READ(qfp
->qf_vp
, auio
, 0, &context
);
406 else if (uio_resid(auio
)) {
410 /* Sanity check the quota file header. */
411 if ((header
.dqh_magic
!= quotamagic
[type
]) ||
412 (header
.dqh_version
> QF_VERSION
) ||
413 (!powerof2(header
.dqh_maxentries
)) ||
414 (header
.dqh_maxentries
> (file_size
/ sizeof(struct dqblk
)))) {
418 /* Set up the time limits for this quota. */
419 if (header
.dqh_btime
> 0)
420 qfp
->qf_btime
= header
.dqh_btime
;
422 qfp
->qf_btime
= MAX_DQ_TIME
;
423 if (header
.dqh_itime
> 0)
424 qfp
->qf_itime
= header
.dqh_itime
;
426 qfp
->qf_itime
= MAX_IQ_TIME
;
428 /* Calculate the hash table constants. */
429 qfp
->qf_maxentries
= header
.dqh_maxentries
;
430 qfp
->qf_entrycnt
= header
.dqh_entrycnt
;
431 qfp
->qf_shift
= dqhashshift(header
.dqh_maxentries
);
437 * Close down a quota file
440 dqfileclose(struct quotafile
*qfp
, __unused
int type
)
442 struct dqfilehdr header
;
443 struct vfs_context context
;
445 char uio_buf
[ UIO_SIZEOF(1) ];
447 auio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_READ
,
448 &uio_buf
[0], sizeof(uio_buf
));
449 uio_addiov(auio
, CAST_USER_ADDR_T(&header
), sizeof (header
));
451 context
.vc_proc
= current_proc();
452 context
.vc_ucred
= qfp
->qf_cred
;
454 if (VNOP_READ(qfp
->qf_vp
, auio
, 0, &context
) == 0) {
455 header
.dqh_entrycnt
= qfp
->qf_entrycnt
;
456 uio_reset(auio
, 0, UIO_SYSSPACE
, UIO_WRITE
);
457 uio_addiov(auio
, CAST_USER_ADDR_T(&header
), sizeof (header
));
458 (void) VNOP_WRITE(qfp
->qf_vp
, auio
, 0, &context
);
464 * Obtain a dquot structure for the specified identifier and quota file
465 * reading the information from the file if necessary.
468 dqget(id
, qfp
, type
, dqp
)
470 struct quotafile
*qfp
;
475 struct dquot
*ndq
= NULL
;
476 struct dquot
*fdq
= NULL
;
481 if ( id
== 0 || qfp
->qf_vp
== NULLVP
) {
487 if ( (qf_ref(qfp
)) ) {
493 if ( (dqvp
= qfp
->qf_vp
) == NULLVP
) {
500 dqh
= DQHASH(dqvp
, id
);
504 * Check the cache first.
506 for (dq
= dqh
->lh_first
; dq
; dq
= dq
->dq_hash
.le_next
) {
507 if (dq
->dq_id
!= id
||
508 dq
->dq_qfile
->qf_vp
!= dqvp
)
511 dq_lock_internal(dq
);
513 * dq_lock_internal may drop the quota_list_lock to msleep, so
514 * we need to re-evaluate the identity of this dq
516 if (dq
->dq_id
!= id
|| dq
->dq_qfile
== NULL
||
517 dq
->dq_qfile
->qf_vp
!= dqvp
) {
518 dq_unlock_internal(dq
);
522 * Cache hit with no references. Take
523 * the structure off the free list.
525 if (dq
->dq_cnt
++ == 0) {
526 if (dq
->dq_flags
& DQ_MOD
)
527 TAILQ_REMOVE(&dqdirtylist
, dq
, dq_freelist
);
529 TAILQ_REMOVE(&dqfreelist
, dq
, dq_freelist
);
531 dq_unlock_internal(dq
);
535 * we grabbed this from the free list in the first pass
536 * but we found the dq we were looking for in
537 * the cache the 2nd time through
538 * so stick it back on the free list and return the cached entry
540 TAILQ_INSERT_HEAD(&dqfreelist
, fdq
, dq_freelist
);
547 * we allocated this in the first pass
548 * but we found the dq we were looking for in
549 * the cache the 2nd time through so free it
558 * Not in cache, allocate a new one.
560 if (TAILQ_EMPTY(&dqfreelist
) &&
561 numdquot
< MAXQUOTAS
* desiredvnodes
)
562 desireddquot
+= DQUOTINC
;
566 * we captured this from the free list
567 * in the first pass through, so go
572 } else if (numdquot
< desireddquot
) {
575 * drop the quota list lock since MALLOC may block
579 ndq
= (struct dquot
*)_MALLOC(sizeof *dq
, M_DQUOT
, M_WAITOK
);
580 bzero((char *)ndq
, sizeof *dq
);
584 * need to look for the entry again in the cache
585 * since we dropped the quota list lock and
586 * someone else may have beaten us to creating it
591 * we allocated this in the first pass through
592 * and we're still under out target, so go
600 if (TAILQ_EMPTY(&dqfreelist
)) {
606 * we allocated this in the first pass through
607 * but we're now at the limit of our cache size
616 dq
= TAILQ_FIRST(&dqfreelist
);
618 dq_lock_internal(dq
);
620 if (dq
->dq_cnt
|| (dq
->dq_flags
& DQ_MOD
)) {
622 * we lost the race while we weren't holding
623 * the quota list lock... dq_lock_internal
624 * will drop it to msleep... this dq has been
625 * reclaimed... go find another
627 dq_unlock_internal(dq
);
630 * need to look for the entry again in the cache
631 * since we dropped the quota list lock and
632 * someone else may have beaten us to creating it
636 TAILQ_REMOVE(&dqfreelist
, dq
, dq_freelist
);
638 if (dq
->dq_qfile
!= NULL
) {
639 LIST_REMOVE(dq
, dq_hash
);
643 dq_unlock_internal(dq
);
646 * because we may have dropped the quota list lock
647 * in the call to dq_lock_internal, we need to
648 * relookup in the hash in case someone else
649 * caused a dq with this identity to be created...
650 * if we don't find it, we'll use this one
656 * we've either freshly allocated a dq
657 * or we've atomically pulled it out of
658 * the hash and freelists... no one else
659 * can have a reference, which means no
660 * one else can be trying to use this dq
662 dq_lock_internal(dq
);
665 * Initialize the contents of the dquot structure.
673 * once we insert it in the hash and
674 * drop the quota_list_lock, it can be
675 * 'found'... however, we're still holding
676 * the dq_lock which will keep us from doing
677 * anything with it until we've finished
680 LIST_INSERT_HEAD(dqh
, dq
, dq_hash
);
685 * we allocated this in the first pass through
686 * but we didn't need it, so free it after
687 * we've droped the quota list lock
692 error
= dqlookup(qfp
, id
, &dq
->dq_dqb
, &dq
->dq_index
);
695 * I/O error in reading quota file, release
696 * quota structure and reflect problem to caller.
703 LIST_REMOVE(dq
, dq_hash
);
705 dq_unlock_internal(dq
);
715 * Check for no limit to enforce.
716 * Initialize time values if necessary.
718 if (dq
->dq_isoftlimit
== 0 && dq
->dq_bsoftlimit
== 0 &&
719 dq
->dq_ihardlimit
== 0 && dq
->dq_bhardlimit
== 0)
720 dq
->dq_flags
|= DQ_FAKE
;
721 if (dq
->dq_id
!= 0) {
725 if (dq
->dq_btime
== 0)
726 dq
->dq_btime
= tv
.tv_sec
+ qfp
->qf_btime
;
727 if (dq
->dq_itime
== 0)
728 dq
->dq_itime
= tv
.tv_sec
+ qfp
->qf_itime
;
731 dq_unlock_internal(dq
);
740 * Lookup a dqblk structure for the specified identifier and
741 * quota file. If there is no entry for this identifier then
742 * one is inserted. The actual hash table index is returned.
745 dqlookup(qfp
, id
, dqb
, index
)
746 struct quotafile
*qfp
;
752 struct vfs_context context
;
757 char uio_buf
[ UIO_SIZEOF(1) ];
764 context
.vc_proc
= current_proc();
765 context
.vc_ucred
= qfp
->qf_cred
;
767 mask
= qfp
->qf_maxentries
- 1;
768 i
= dqhash1(id
, qfp
->qf_shift
, mask
);
769 skip
= dqhash2(id
, mask
);
771 for (last
= (i
+ (qfp
->qf_maxentries
-1) * skip
) & mask
;
773 i
= (i
+ skip
) & mask
) {
774 auio
= uio_createwithbuffer(1, dqoffset(i
), UIO_SYSSPACE
, UIO_READ
,
775 &uio_buf
[0], sizeof(uio_buf
));
776 uio_addiov(auio
, CAST_USER_ADDR_T(dqb
), sizeof (struct dqblk
));
777 error
= VNOP_READ(dqvp
, auio
, 0, &context
);
779 printf("dqlookup: error %d looking up id %d at index %d\n", error
, id
, i
);
781 } else if (uio_resid(auio
)) {
783 printf("dqlookup: error looking up id %d at index %d\n", id
, i
);
787 * An empty entry means there is no entry
788 * with that id. In this case a new dqb
789 * record will be inserted.
791 if (dqb
->dqb_id
== 0) {
792 bzero(dqb
, sizeof(struct dqblk
));
795 * Write back to reserve entry for this id
797 uio_reset(auio
, dqoffset(i
), UIO_SYSSPACE
, UIO_WRITE
);
798 uio_addiov(auio
, CAST_USER_ADDR_T(dqb
), sizeof (struct dqblk
));
799 error
= VNOP_WRITE(dqvp
, auio
, 0, &context
);
800 if (uio_resid(auio
) && error
== 0)
806 /* An id match means an entry was found. */
807 if (dqb
->dqb_id
== id
)
812 *index
= i
; /* remember index so we don't have to recompute it later */
819 * Release a reference to a dquot.
822 dqrele(struct dquot
*dq
)
829 if (dq
->dq_cnt
> 1) {
835 if (dq
->dq_flags
& DQ_MOD
)
836 (void) dqsync_locked(dq
);
840 TAILQ_INSERT_TAIL(&dqfreelist
, dq
, dq_freelist
);
841 dq_unlock_internal(dq
);
846 * Release a reference to a dquot but don't do any I/O.
849 dqreclaim(register struct dquot
*dq
)
856 dq_lock_internal(dq
);
858 if (--dq
->dq_cnt
> 0) {
859 dq_unlock_internal(dq
);
863 if (dq
->dq_flags
& DQ_MOD
)
864 TAILQ_INSERT_TAIL(&dqdirtylist
, dq
, dq_freelist
);
866 TAILQ_INSERT_TAIL(&dqfreelist
, dq
, dq_freelist
);
868 dq_unlock_internal(dq
);
873 * Update a quota file's orphaned disk quotas.
877 struct quotafile
*qfp
;
883 TAILQ_FOREACH(dq
, &dqdirtylist
, dq_freelist
) {
884 if (dq
->dq_qfile
!= qfp
)
887 dq_lock_internal(dq
);
889 if (dq
->dq_qfile
!= qfp
) {
891 * the identity of this dq changed while
892 * the quota_list_lock was dropped
893 * dq_lock_internal can drop it to msleep
895 dq_unlock_internal(dq
);
898 if ((dq
->dq_flags
& DQ_MOD
) == 0) {
900 * someone cleaned and removed this from
901 * the dq from the dirty list while the
902 * quota_list_lock was dropped
904 dq_unlock_internal(dq
);
908 panic("dqsync_orphans: dquot in use");
910 TAILQ_REMOVE(&dqdirtylist
, dq
, dq_freelist
);
914 * we're still holding the dqlock at this point
915 * with the reference count == 0
916 * we shouldn't be able
917 * to pick up another one since we hold dqlock
919 (void) dqsync_locked(dq
);
923 TAILQ_INSERT_TAIL(&dqfreelist
, dq
, dq_freelist
);
925 dq_unlock_internal(dq
);
932 dqsync(struct dquot
*dq
)
939 if ( (dq
->dq_flags
& DQ_MOD
) )
940 error
= dqsync_locked(dq
);
949 * Update the disk quota in the quota file.
952 dqsync_locked(struct dquot
*dq
)
954 struct proc
*p
= current_proc(); /* XXX */
955 struct vfs_context context
;
959 char uio_buf
[ UIO_SIZEOF(1) ];
961 if (dq
->dq_id
== 0) {
962 dq
->dq_flags
&= ~DQ_MOD
;
965 if (dq
->dq_qfile
== NULL
)
966 panic("dqsync: NULL dq_qfile");
967 if ((dqvp
= dq
->dq_qfile
->qf_vp
) == NULLVP
)
968 panic("dqsync: NULL qf_vp");
970 auio
= uio_createwithbuffer(1, dqoffset(dq
->dq_index
), UIO_SYSSPACE
,
971 UIO_WRITE
, &uio_buf
[0], sizeof(uio_buf
));
972 uio_addiov(auio
, CAST_USER_ADDR_T(&dq
->dq_dqb
), sizeof (struct dqblk
));
975 context
.vc_ucred
= dq
->dq_qfile
->qf_cred
;
977 error
= VNOP_WRITE(dqvp
, auio
, 0, &context
);
978 if (uio_resid(auio
) && error
== 0)
980 dq
->dq_flags
&= ~DQ_MOD
;
986 * Flush all entries from the cache for a particular vnode.
990 register struct vnode
*vp
;
992 register struct dquot
*dq
, *nextdq
;
996 * Move all dquot's that used to refer to this quota
997 * file off their hash chains (they will eventually
998 * fall off the head of the free list and be re-used).
1002 for (dqh
= &dqhashtbl
[dqhash
]; dqh
>= dqhashtbl
; dqh
--) {
1003 for (dq
= dqh
->lh_first
; dq
; dq
= nextdq
) {
1004 nextdq
= dq
->dq_hash
.le_next
;
1005 if (dq
->dq_qfile
->qf_vp
!= vp
)
1008 panic("dqflush: stray dquot");
1009 LIST_REMOVE(dq
, dq_hash
);
1010 dq
->dq_qfile
= NULL
;
1017 * LP64 support for munging dqblk structure.
1018 * XXX conversion of user_time_t to time_t loses precision; not an issue for
1019 * XXX us now, since we are only ever setting 32 bits worth of time into it.
1021 __private_extern__
void
1022 munge_dqblk(struct dqblk
*dqblkp
, struct user_dqblk
*user_dqblkp
, boolean_t to64
)
1025 /* munge kernel (32 bit) dqblk into user (64 bit) dqblk */
1026 bcopy((caddr_t
)dqblkp
, (caddr_t
)user_dqblkp
, offsetof(struct dqblk
, dqb_btime
));
1027 user_dqblkp
->dqb_id
= dqblkp
->dqb_id
;
1028 user_dqblkp
->dqb_itime
= dqblkp
->dqb_itime
;
1029 user_dqblkp
->dqb_btime
= dqblkp
->dqb_btime
;
1032 /* munge user (64 bit) dqblk into kernel (32 bit) dqblk */
1033 bcopy((caddr_t
)user_dqblkp
, (caddr_t
)dqblkp
, offsetof(struct dqblk
, dqb_btime
));
1034 dqblkp
->dqb_id
= user_dqblkp
->dqb_id
;
1035 dqblkp
->dqb_itime
= user_dqblkp
->dqb_itime
; /* XXX - lose precision */
1036 dqblkp
->dqb_btime
= user_dqblkp
->dqb_btime
; /* XXX - lose precision */