]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/vfs/vfs_quota.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_quota.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2002-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (c) 1982, 1986, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by
33 * Robert Elz at The University of Melbourne.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by the University of
46 * California, Berkeley and its contributors.
47 * 4. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 *
63 * @(#)vfs_quota.c
64 * derived from @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
65 */
66
67#include <sys/param.h>
68#include <sys/kernel.h>
69#include <sys/systm.h>
70#include <kern/zalloc.h>
71#include <sys/file_internal.h>
72#include <sys/proc_internal.h>
73#include <sys/vnode_internal.h>
74#include <sys/mount_internal.h>
75#include <sys/quota.h>
76#include <sys/uio_internal.h>
77
78#include <libkern/OSByteOrder.h>
79
80
81/* vars for quota file lock */
82static LCK_GRP_DECLARE(qf_lck_grp, "quota file");
83
84/* vars for quota list lock */
85static LCK_GRP_DECLARE(quota_list_lck_grp, "quuota list");
86static LCK_MTX_DECLARE(quota_list_mtx_lock, &quota_list_lck_grp);
87
88/* Routines to lock and unlock the quota global data */
89static int dq_list_lock(void);
90static void dq_list_unlock(void);
91
92static void dq_lock_internal(struct dquot *dq);
93static void dq_unlock_internal(struct dquot *dq);
94
95static u_int32_t quotamagic[MAXQUOTAS] = INITQMAGICS;
96
97
98/*
99 * Code pertaining to management of the in-core dquot data structures.
100 */
101#define DQHASH(dqvp, id) \
102 (&dqhashtbl[((((intptr_t)(dqvp)) >> 8) + id) & dqhash])
103LIST_HEAD(dqhash, dquot) * dqhashtbl;
104u_long dqhash;
105
106#define DQUOTINC 5 /* minimum free dquots desired */
107long numdquot, desireddquot = DQUOTINC;
108
109/*
110 * Dquot free list.
111 */
112TAILQ_HEAD(dqfreelist, dquot) dqfreelist;
113/*
114 * Dquot dirty orphans list
115 */
116TAILQ_HEAD(dqdirtylist, dquot) dqdirtylist;
117
118ZONE_VIEW_DEFINE(ZV_DQUOT, "FS quota entries", KHEAP_ID_DEFAULT,
119 sizeof(struct dquot));
120
121static int dqlookup(struct quotafile *, u_int32_t, struct dqblk *, u_int32_t *);
122static int dqsync_locked(struct dquot *dq);
123
124static void qf_lock(struct quotafile *);
125static void qf_unlock(struct quotafile *);
126static int qf_ref(struct quotafile *);
127static void qf_rele(struct quotafile *);
128
129
130/*
131 * Report whether dqhashinit has been run.
132 */
133int
134dqisinitialized(void)
135{
136 return dqhashtbl != NULL;
137}
138
139/*
140 * Initialize hash table for dquot structures.
141 */
142void
143dqhashinit(void)
144{
145 dq_list_lock();
146 if (dqisinitialized()) {
147 goto out;
148 }
149
150 TAILQ_INIT(&dqfreelist);
151 TAILQ_INIT(&dqdirtylist);
152 dqhashtbl = hashinit(desiredvnodes, M_DQUOT, &dqhash);
153out:
154 dq_list_unlock();
155}
156
157
158static volatile int dq_list_lock_cnt = 0;
159
160static int
161dq_list_lock(void)
162{
163 lck_mtx_lock(&quota_list_mtx_lock);
164 return ++dq_list_lock_cnt;
165}
166
167static int
168dq_list_lock_changed(int oldval)
169{
170 return dq_list_lock_cnt != oldval;
171}
172
173static int
174dq_list_lock_val(void)
175{
176 return dq_list_lock_cnt;
177}
178
179void
180dq_list_unlock(void)
181{
182 lck_mtx_unlock(&quota_list_mtx_lock);
183}
184
185
186/*
187 * must be called with the quota_list_lock held
188 */
189void
190dq_lock_internal(struct dquot *dq)
191{
192 while (dq->dq_lflags & DQ_LLOCK) {
193 dq->dq_lflags |= DQ_LWANT;
194 msleep(&dq->dq_lflags, &quota_list_mtx_lock, PVFS, "dq_lock_internal", NULL);
195 }
196 dq->dq_lflags |= DQ_LLOCK;
197}
198
199/*
200 * must be called with the quota_list_lock held
201 */
202void
203dq_unlock_internal(struct dquot *dq)
204{
205 int wanted = dq->dq_lflags & DQ_LWANT;
206
207 dq->dq_lflags &= ~(DQ_LLOCK | DQ_LWANT);
208
209 if (wanted) {
210 wakeup(&dq->dq_lflags);
211 }
212}
213
214void
215dqlock(struct dquot *dq)
216{
217 lck_mtx_lock(&quota_list_mtx_lock);
218
219 dq_lock_internal(dq);
220
221 lck_mtx_unlock(&quota_list_mtx_lock);
222}
223
224void
225dqunlock(struct dquot *dq)
226{
227 lck_mtx_lock(&quota_list_mtx_lock);
228
229 dq_unlock_internal(dq);
230
231 lck_mtx_unlock(&quota_list_mtx_lock);
232}
233
234
235
236int
237qf_get(struct quotafile *qfp, int type)
238{
239 int error = 0;
240
241 dq_list_lock();
242
243 switch (type) {
244 case QTF_OPENING:
245 while ((qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING))) {
246 if ((qfp->qf_qflags & QTF_OPENING)) {
247 error = EBUSY;
248 break;
249 }
250 if ((qfp->qf_qflags & QTF_CLOSING)) {
251 qfp->qf_qflags |= QTF_WANTED;
252 msleep(&qfp->qf_qflags, &quota_list_mtx_lock, PVFS, "qf_get", NULL);
253 }
254 }
255 if (qfp->qf_vp != NULLVP) {
256 error = EBUSY;
257 }
258 if (error == 0) {
259 qfp->qf_qflags |= QTF_OPENING;
260 }
261 break;
262
263 case QTF_CLOSING:
264 if ((qfp->qf_qflags & QTF_CLOSING)) {
265 error = EBUSY;
266 break;
267 }
268 qfp->qf_qflags |= QTF_CLOSING;
269
270 while ((qfp->qf_qflags & QTF_OPENING) || qfp->qf_refcnt) {
271 qfp->qf_qflags |= QTF_WANTED;
272 msleep(&qfp->qf_qflags, &quota_list_mtx_lock, PVFS, "qf_get", NULL);
273 }
274 if (qfp->qf_vp == NULLVP) {
275 qfp->qf_qflags &= ~QTF_CLOSING;
276 error = EBUSY;
277 }
278 break;
279 }
280 dq_list_unlock();
281
282 return error;
283}
284
285void
286qf_put(struct quotafile *qfp, int type)
287{
288 dq_list_lock();
289
290 switch (type) {
291 case QTF_OPENING:
292 case QTF_CLOSING:
293 qfp->qf_qflags &= ~type;
294 break;
295 }
296 if ((qfp->qf_qflags & QTF_WANTED)) {
297 qfp->qf_qflags &= ~QTF_WANTED;
298 wakeup(&qfp->qf_qflags);
299 }
300 dq_list_unlock();
301}
302
303
304static void
305qf_lock(struct quotafile *qfp)
306{
307 lck_mtx_lock(&qfp->qf_lock);
308}
309
310static void
311qf_unlock(struct quotafile *qfp)
312{
313 lck_mtx_unlock(&qfp->qf_lock);
314}
315
316
317/*
318 * take a reference on the quota file while we're
319 * in dqget... this will prevent a quota_off from
320 * occurring while we're potentially playing with
321 * the quota file... the quota_off will stall until
322 * all the current references 'die'... once we start
323 * into quoto_off, all new references will be rejected
324 * we also don't want any dqgets being processed while
325 * we're in the middle of the quota_on... once we've
326 * actually got the quota file open and the associated
327 * struct quotafile inited, we can let them come through
328 *
329 * quota list lock must be held on entry
330 */
331static int
332qf_ref(struct quotafile *qfp)
333{
334 int error = 0;
335
336 if ((qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING)) || (qfp->qf_vp == NULLVP)) {
337 error = EINVAL;
338 } else {
339 qfp->qf_refcnt++;
340 }
341
342 return error;
343}
344
345/*
346 * drop our reference and wakeup any waiters if
347 * we were the last one holding a ref
348 *
349 * quota list lock must be held on entry
350 */
351static void
352qf_rele(struct quotafile *qfp)
353{
354 qfp->qf_refcnt--;
355
356 if ((qfp->qf_qflags & QTF_WANTED) && qfp->qf_refcnt == 0) {
357 qfp->qf_qflags &= ~QTF_WANTED;
358 wakeup(&qfp->qf_qflags);
359 }
360}
361
362
363void
364dqfileinit(struct quotafile *qfp)
365{
366 qfp->qf_vp = NULLVP;
367 qfp->qf_qflags = 0;
368
369 lck_mtx_init(&qfp->qf_lock, &qf_lck_grp, LCK_ATTR_NULL);
370}
371
372
373/*
374 * Initialize a quota file
375 *
376 * must be called with the quota file lock held
377 */
378int
379dqfileopen(struct quotafile *qfp, int type)
380{
381 struct dqfilehdr header;
382 struct vfs_context context;
383 off_t file_size;
384 uio_t auio;
385 int error = 0;
386 char uio_buf[UIO_SIZEOF(1)];
387
388 context.vc_thread = current_thread();
389 context.vc_ucred = qfp->qf_cred;
390
391 /* Obtain the file size */
392 if ((error = vnode_size(qfp->qf_vp, &file_size, &context)) != 0) {
393 goto out;
394 }
395
396 /* Read the file header */
397 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
398 &uio_buf[0], sizeof(uio_buf));
399 uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof(header));
400 error = VNOP_READ(qfp->qf_vp, auio, 0, &context);
401 if (error) {
402 goto out;
403 } else if (uio_resid(auio)) {
404 error = EINVAL;
405 goto out;
406 }
407 /* Sanity check the quota file header. */
408 if ((OSSwapBigToHostInt32(header.dqh_magic) != quotamagic[type]) ||
409 (OSSwapBigToHostInt32(header.dqh_version) > QF_VERSION) ||
410 (!powerof2(OSSwapBigToHostInt32(header.dqh_maxentries))) ||
411 (OSSwapBigToHostInt32(header.dqh_maxentries) > (file_size / sizeof(struct dqblk)))) {
412 error = EINVAL;
413 goto out;
414 }
415 /* Set up the time limits for this quota. */
416 if (header.dqh_btime != 0) {
417 qfp->qf_btime = OSSwapBigToHostInt32(header.dqh_btime);
418 } else {
419 qfp->qf_btime = MAX_DQ_TIME;
420 }
421 if (header.dqh_itime != 0) {
422 qfp->qf_itime = OSSwapBigToHostInt32(header.dqh_itime);
423 } else {
424 qfp->qf_itime = MAX_IQ_TIME;
425 }
426
427 /* Calculate the hash table constants. */
428 qfp->qf_maxentries = OSSwapBigToHostInt32(header.dqh_maxentries);
429 qfp->qf_entrycnt = OSSwapBigToHostInt32(header.dqh_entrycnt);
430 qfp->qf_shift = dqhashshift(qfp->qf_maxentries);
431out:
432 return error;
433}
434
435/*
436 * Close down a quota file
437 */
438void
439dqfileclose(struct quotafile *qfp, __unused int type)
440{
441 struct dqfilehdr header;
442 struct vfs_context context;
443 uio_t auio;
444 char uio_buf[UIO_SIZEOF(1)];
445
446 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
447 &uio_buf[0], sizeof(uio_buf));
448 uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof(header));
449
450 context.vc_thread = current_thread();
451 context.vc_ucred = qfp->qf_cred;
452
453 if (VNOP_READ(qfp->qf_vp, auio, 0, &context) == 0) {
454 header.dqh_entrycnt = OSSwapHostToBigInt32(qfp->qf_entrycnt);
455 uio_reset(auio, 0, UIO_SYSSPACE, UIO_WRITE);
456 uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof(header));
457 (void) VNOP_WRITE(qfp->qf_vp, auio, 0, &context);
458 }
459}
460
461
462/*
463 * Obtain a dquot structure for the specified identifier and quota file
464 * reading the information from the file if necessary.
465 */
466int
467dqget(u_int32_t id, struct quotafile *qfp, int type, struct dquot **dqp)
468{
469 struct dquot *dq;
470 struct dquot *ndq = NULL;
471 struct dquot *fdq = NULL;
472 struct dqhash *dqh;
473 struct vnode *dqvp;
474 int error = 0;
475 int listlockval = 0;
476
477 if (!dqisinitialized()) {
478 *dqp = NODQUOT;
479 return EINVAL;
480 }
481
482 if (id == 0 || qfp->qf_vp == NULLVP) {
483 *dqp = NODQUOT;
484 return EINVAL;
485 }
486 dq_list_lock();
487
488 if ((qf_ref(qfp))) {
489 dq_list_unlock();
490
491 *dqp = NODQUOT;
492 return EINVAL;
493 }
494 if ((dqvp = qfp->qf_vp) == NULLVP) {
495 qf_rele(qfp);
496 dq_list_unlock();
497
498 *dqp = NODQUOT;
499 return EINVAL;
500 }
501 dqh = DQHASH(dqvp, id);
502
503relookup:
504 listlockval = dq_list_lock_val();
505
506 /*
507 * Check the cache first.
508 */
509 for (dq = dqh->lh_first; dq; dq = dq->dq_hash.le_next) {
510 if (dq->dq_id != id ||
511 dq->dq_qfile->qf_vp != dqvp) {
512 continue;
513 }
514
515 dq_lock_internal(dq);
516 if (dq_list_lock_changed(listlockval)) {
517 dq_unlock_internal(dq);
518 goto relookup;
519 }
520
521 /*
522 * dq_lock_internal may drop the quota_list_lock to msleep, so
523 * we need to re-evaluate the identity of this dq
524 */
525 if (dq->dq_id != id || dq->dq_qfile == NULL ||
526 dq->dq_qfile->qf_vp != dqvp) {
527 dq_unlock_internal(dq);
528 goto relookup;
529 }
530 /*
531 * Cache hit with no references. Take
532 * the structure off the free list.
533 */
534 if (dq->dq_cnt++ == 0) {
535 if (dq->dq_flags & DQ_MOD) {
536 TAILQ_REMOVE(&dqdirtylist, dq, dq_freelist);
537 } else {
538 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
539 }
540 }
541 dq_unlock_internal(dq);
542
543 if (fdq != NULL) {
544 /*
545 * we grabbed this from the free list in the first pass
546 * but we found the dq we were looking for in
547 * the cache the 2nd time through
548 * so stick it back on the free list and return the cached entry
549 */
550 TAILQ_INSERT_HEAD(&dqfreelist, fdq, dq_freelist);
551 }
552 qf_rele(qfp);
553 dq_list_unlock();
554
555 if (ndq != NULL) {
556 /*
557 * we allocated this in the first pass
558 * but we found the dq we were looking for in
559 * the cache the 2nd time through so free it
560 */
561 zfree(ZV_DQUOT, ndq);
562 }
563 *dqp = dq;
564
565 return 0;
566 }
567 /*
568 * Not in cache, allocate a new one.
569 */
570 if (TAILQ_EMPTY(&dqfreelist) &&
571 numdquot < MAXQUOTAS * desiredvnodes) {
572 desireddquot += DQUOTINC;
573 }
574
575 if (fdq != NULL) {
576 /*
577 * we captured this from the free list
578 * in the first pass through, so go
579 * ahead and use it
580 */
581 dq = fdq;
582 fdq = NULL;
583 } else if (numdquot < desireddquot) {
584 if (ndq == NULL) {
585 /*
586 * drop the quota list lock since zalloc may block
587 */
588 dq_list_unlock();
589
590 ndq = (struct dquot *)zalloc_flags(ZV_DQUOT,
591 Z_WAITOK | Z_ZERO);
592
593 listlockval = dq_list_lock();
594 /*
595 * need to look for the entry again in the cache
596 * since we dropped the quota list lock and
597 * someone else may have beaten us to creating it
598 */
599 goto relookup;
600 } else {
601 /*
602 * we allocated this in the first pass through
603 * and we're still under out target, so go
604 * ahead and use it
605 */
606 dq = ndq;
607 ndq = NULL;
608 numdquot++;
609 }
610 } else {
611 if (TAILQ_EMPTY(&dqfreelist)) {
612 qf_rele(qfp);
613 dq_list_unlock();
614
615 if (ndq) {
616 /*
617 * we allocated this in the first pass through
618 * but we're now at the limit of our cache size
619 * so free it
620 */
621 zfree(ZV_DQUOT, ndq);
622 }
623 tablefull("dquot");
624 *dqp = NODQUOT;
625 return EUSERS;
626 }
627 dq = TAILQ_FIRST(&dqfreelist);
628
629 dq_lock_internal(dq);
630
631 if (dq_list_lock_changed(listlockval) || dq->dq_cnt || (dq->dq_flags & DQ_MOD)) {
632 /*
633 * we lost the race while we weren't holding
634 * the quota list lock... dq_lock_internal
635 * will drop it to msleep... this dq has been
636 * reclaimed... go find another
637 */
638 dq_unlock_internal(dq);
639
640 /*
641 * need to look for the entry again in the cache
642 * since we dropped the quota list lock and
643 * someone else may have beaten us to creating it
644 */
645 goto relookup;
646 }
647 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
648
649 if (dq->dq_qfile != NULL) {
650 LIST_REMOVE(dq, dq_hash);
651 dq->dq_qfile = NULL;
652 dq->dq_id = 0;
653 }
654 dq_unlock_internal(dq);
655
656 /*
657 * because we may have dropped the quota list lock
658 * in the call to dq_lock_internal, we need to
659 * relookup in the hash in case someone else
660 * caused a dq with this identity to be created...
661 * if we don't find it, we'll use this one
662 */
663 fdq = dq;
664 goto relookup;
665 }
666 /*
667 * we've either freshly allocated a dq
668 * or we've atomically pulled it out of
669 * the hash and freelists... no one else
670 * can have a reference, which means no
671 * one else can be trying to use this dq
672 */
673 dq_lock_internal(dq);
674 if (dq_list_lock_changed(listlockval)) {
675 dq_unlock_internal(dq);
676 goto relookup;
677 }
678
679 /*
680 * Initialize the contents of the dquot structure.
681 */
682 dq->dq_cnt = 1;
683 dq->dq_flags = 0;
684 dq->dq_id = id;
685 dq->dq_qfile = qfp;
686 dq->dq_type = type;
687 /*
688 * once we insert it in the hash and
689 * drop the quota_list_lock, it can be
690 * 'found'... however, we're still holding
691 * the dq_lock which will keep us from doing
692 * anything with it until we've finished
693 * initializing it...
694 */
695 LIST_INSERT_HEAD(dqh, dq, dq_hash);
696 dq_list_unlock();
697
698 if (ndq) {
699 /*
700 * we allocated this in the first pass through
701 * but we didn't need it, so free it after
702 * we've droped the quota list lock
703 */
704 zfree(ZV_DQUOT, ndq);
705 }
706
707 error = dqlookup(qfp, id, &dq->dq_dqb, &dq->dq_index);
708
709 /*
710 * I/O error in reading quota file, release
711 * quota structure and reflect problem to caller.
712 */
713 if (error) {
714 dq_list_lock();
715
716 dq->dq_id = 0;
717 dq->dq_qfile = NULL;
718 LIST_REMOVE(dq, dq_hash);
719
720 dq_unlock_internal(dq);
721 qf_rele(qfp);
722 dq_list_unlock();
723
724 dqrele(dq);
725
726 *dqp = NODQUOT;
727 return error;
728 }
729 /*
730 * Check for no limit to enforce.
731 * Initialize time values if necessary.
732 */
733 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
734 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0) {
735 dq->dq_flags |= DQ_FAKE;
736 }
737 if (dq->dq_id != 0) {
738 struct timeval tv;
739
740 microtime(&tv);
741 if (dq->dq_btime == 0) {
742 dq->dq_btime = tv.tv_sec + qfp->qf_btime;
743 }
744 if (dq->dq_itime == 0) {
745 dq->dq_itime = tv.tv_sec + qfp->qf_itime;
746 }
747 }
748 dq_list_lock();
749 dq_unlock_internal(dq);
750 qf_rele(qfp);
751 dq_list_unlock();
752
753 *dqp = dq;
754 return 0;
755}
756
757/*
758 * Lookup a dqblk structure for the specified identifier and
759 * quota file. If there is no entry for this identifier then
760 * one is inserted. The actual hash table index is returned.
761 */
762static int
763dqlookup(struct quotafile *qfp, u_int32_t id, struct dqblk *dqb, uint32_t *index)
764{
765 struct vnode *dqvp;
766 struct vfs_context context;
767 uio_t auio;
768 int i, skip, last;
769 u_int32_t mask;
770 int error = 0;
771 char uio_buf[UIO_SIZEOF(1)];
772
773
774 qf_lock(qfp);
775
776 dqvp = qfp->qf_vp;
777
778 context.vc_thread = current_thread();
779 context.vc_ucred = qfp->qf_cred;
780
781 mask = qfp->qf_maxentries - 1;
782 i = dqhash1(id, qfp->qf_shift, mask);
783 skip = dqhash2(id, mask);
784
785 for (last = (i + (qfp->qf_maxentries - 1) * skip) & mask;
786 i != last;
787 i = (i + skip) & mask) {
788 auio = uio_createwithbuffer(1, dqoffset(i), UIO_SYSSPACE, UIO_READ,
789 &uio_buf[0], sizeof(uio_buf));
790 uio_addiov(auio, CAST_USER_ADDR_T(dqb), sizeof(struct dqblk));
791 error = VNOP_READ(dqvp, auio, 0, &context);
792 if (error) {
793 printf("dqlookup: error %d looking up id %u at index %d\n", error, id, i);
794 break;
795 } else if (uio_resid(auio)) {
796 error = EIO;
797 printf("dqlookup: error looking up id %u at index %d\n", id, i);
798 break;
799 }
800 /*
801 * An empty entry means there is no entry
802 * with that id. In this case a new dqb
803 * record will be inserted.
804 */
805 if (dqb->dqb_id == 0) {
806 bzero(dqb, sizeof(struct dqblk));
807 dqb->dqb_id = OSSwapHostToBigInt32(id);
808 /*
809 * Write back to reserve entry for this id
810 */
811 uio_reset(auio, dqoffset(i), UIO_SYSSPACE, UIO_WRITE);
812 uio_addiov(auio, CAST_USER_ADDR_T(dqb), sizeof(struct dqblk));
813 error = VNOP_WRITE(dqvp, auio, 0, &context);
814 if (uio_resid(auio) && error == 0) {
815 error = EIO;
816 }
817 if (error == 0) {
818 ++qfp->qf_entrycnt;
819 }
820 dqb->dqb_id = id;
821 break;
822 }
823 /* An id match means an entry was found. */
824 if (OSSwapBigToHostInt32(dqb->dqb_id) == id) {
825 dqb->dqb_bhardlimit = OSSwapBigToHostInt64(dqb->dqb_bhardlimit);
826 dqb->dqb_bsoftlimit = OSSwapBigToHostInt64(dqb->dqb_bsoftlimit);
827 dqb->dqb_curbytes = OSSwapBigToHostInt64(dqb->dqb_curbytes);
828 dqb->dqb_ihardlimit = OSSwapBigToHostInt32(dqb->dqb_ihardlimit);
829 dqb->dqb_isoftlimit = OSSwapBigToHostInt32(dqb->dqb_isoftlimit);
830 dqb->dqb_curinodes = OSSwapBigToHostInt32(dqb->dqb_curinodes);
831 dqb->dqb_btime = OSSwapBigToHostInt32(dqb->dqb_btime);
832 dqb->dqb_itime = OSSwapBigToHostInt32(dqb->dqb_itime);
833 dqb->dqb_id = OSSwapBigToHostInt32(dqb->dqb_id);
834 break;
835 }
836 }
837 qf_unlock(qfp);
838
839 *index = i; /* remember index so we don't have to recompute it later */
840
841 return error;
842}
843
844
845/*
846 * Release a reference to a dquot.
847 */
848void
849dqrele(struct dquot *dq)
850{
851 if (dq == NODQUOT) {
852 return;
853 }
854 dqlock(dq);
855
856 if (dq->dq_cnt > 1) {
857 dq->dq_cnt--;
858
859 dqunlock(dq);
860 return;
861 }
862 if (dq->dq_flags & DQ_MOD) {
863 (void) dqsync_locked(dq);
864 }
865 dq->dq_cnt--;
866
867 dq_list_lock();
868 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
869 dq_unlock_internal(dq);
870 dq_list_unlock();
871}
872
873/*
874 * Release a reference to a dquot but don't do any I/O.
875 */
876void
877dqreclaim(struct dquot *dq)
878{
879 if (dq == NODQUOT) {
880 return;
881 }
882
883 dq_list_lock();
884 dq_lock_internal(dq);
885
886 if (--dq->dq_cnt > 0) {
887 dq_unlock_internal(dq);
888 dq_list_unlock();
889 return;
890 }
891 if (dq->dq_flags & DQ_MOD) {
892 TAILQ_INSERT_TAIL(&dqdirtylist, dq, dq_freelist);
893 } else {
894 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
895 }
896
897 dq_unlock_internal(dq);
898 dq_list_unlock();
899}
900
901/*
902 * Update a quota file's orphaned disk quotas.
903 */
904void
905dqsync_orphans(struct quotafile *qfp)
906{
907 struct dquot *dq;
908
909 dq_list_lock();
910loop:
911 TAILQ_FOREACH(dq, &dqdirtylist, dq_freelist) {
912 if (dq->dq_qfile != qfp) {
913 continue;
914 }
915
916 dq_lock_internal(dq);
917
918 if (dq->dq_qfile != qfp) {
919 /*
920 * the identity of this dq changed while
921 * the quota_list_lock was dropped
922 * dq_lock_internal can drop it to msleep
923 */
924 dq_unlock_internal(dq);
925 goto loop;
926 }
927 if ((dq->dq_flags & DQ_MOD) == 0) {
928 /*
929 * someone cleaned and removed this from
930 * the dq from the dirty list while the
931 * quota_list_lock was dropped
932 */
933 dq_unlock_internal(dq);
934 goto loop;
935 }
936 if (dq->dq_cnt != 0) {
937 panic("dqsync_orphans: dquot in use");
938 }
939
940 TAILQ_REMOVE(&dqdirtylist, dq, dq_freelist);
941
942 dq_list_unlock();
943 /*
944 * we're still holding the dqlock at this point
945 * with the reference count == 0
946 * we shouldn't be able
947 * to pick up another one since we hold dqlock
948 */
949 (void) dqsync_locked(dq);
950
951 dq_list_lock();
952
953 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
954
955 dq_unlock_internal(dq);
956 goto loop;
957 }
958 dq_list_unlock();
959}
960
961int
962dqsync(struct dquot *dq)
963{
964 int error = 0;
965
966 if (dq != NODQUOT) {
967 dqlock(dq);
968
969 if ((dq->dq_flags & DQ_MOD)) {
970 error = dqsync_locked(dq);
971 }
972
973 dqunlock(dq);
974 }
975 return error;
976}
977
978
979/*
980 * Update the disk quota in the quota file.
981 */
982int
983dqsync_locked(struct dquot *dq)
984{
985 struct vfs_context context;
986 struct vnode *dqvp;
987 struct dqblk dqb, *dqblkp;
988 uio_t auio;
989 int error;
990 char uio_buf[UIO_SIZEOF(1)];
991
992 if (dq->dq_id == 0) {
993 dq->dq_flags &= ~DQ_MOD;
994 return 0;
995 }
996 if (dq->dq_qfile == NULL) {
997 panic("dqsync: NULL dq_qfile");
998 }
999 if ((dqvp = dq->dq_qfile->qf_vp) == NULLVP) {
1000 panic("dqsync: NULL qf_vp");
1001 }
1002
1003 auio = uio_createwithbuffer(1, dqoffset(dq->dq_index), UIO_SYSSPACE,
1004 UIO_WRITE, &uio_buf[0], sizeof(uio_buf));
1005 uio_addiov(auio, CAST_USER_ADDR_T(&dqb), sizeof(struct dqblk));
1006
1007 context.vc_thread = current_thread(); /* XXX */
1008 context.vc_ucred = dq->dq_qfile->qf_cred;
1009
1010 dqblkp = &dq->dq_dqb;
1011 dqb.dqb_bhardlimit = OSSwapHostToBigInt64(dqblkp->dqb_bhardlimit);
1012 dqb.dqb_bsoftlimit = OSSwapHostToBigInt64(dqblkp->dqb_bsoftlimit);
1013 dqb.dqb_curbytes = OSSwapHostToBigInt64(dqblkp->dqb_curbytes);
1014 dqb.dqb_ihardlimit = OSSwapHostToBigInt32(dqblkp->dqb_ihardlimit);
1015 dqb.dqb_isoftlimit = OSSwapHostToBigInt32(dqblkp->dqb_isoftlimit);
1016 dqb.dqb_curinodes = OSSwapHostToBigInt32(dqblkp->dqb_curinodes);
1017 dqb.dqb_btime = OSSwapHostToBigInt32(dqblkp->dqb_btime);
1018 dqb.dqb_itime = OSSwapHostToBigInt32(dqblkp->dqb_itime);
1019 dqb.dqb_id = OSSwapHostToBigInt32(dqblkp->dqb_id);
1020 dqb.dqb_spare[0] = 0;
1021 dqb.dqb_spare[1] = 0;
1022 dqb.dqb_spare[2] = 0;
1023 dqb.dqb_spare[3] = 0;
1024
1025 error = VNOP_WRITE(dqvp, auio, 0, &context);
1026 if (uio_resid(auio) && error == 0) {
1027 error = EIO;
1028 }
1029 dq->dq_flags &= ~DQ_MOD;
1030
1031 return error;
1032}
1033
1034/*
1035 * Flush all entries from the cache for a particular vnode.
1036 */
1037void
1038dqflush(struct vnode *vp)
1039{
1040 struct dquot *dq, *nextdq;
1041 struct dqhash *dqh;
1042
1043 if (!dqisinitialized()) {
1044 return;
1045 }
1046
1047 /*
1048 * Move all dquot's that used to refer to this quota
1049 * file off their hash chains (they will eventually
1050 * fall off the head of the free list and be re-used).
1051 */
1052 dq_list_lock();
1053
1054 for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) {
1055 for (dq = dqh->lh_first; dq; dq = nextdq) {
1056 nextdq = dq->dq_hash.le_next;
1057 if (dq->dq_qfile->qf_vp != vp) {
1058 continue;
1059 }
1060 if (dq->dq_cnt) {
1061 panic("dqflush: stray dquot");
1062 }
1063 LIST_REMOVE(dq, dq_hash);
1064 dq->dq_qfile = NULL;
1065 }
1066 }
1067 dq_list_unlock();
1068}
1069
1070/*
1071 * LP64 support for munging dqblk structure.
1072 * XXX conversion of user_time_t to time_t loses precision; not an issue for
1073 * XXX us now, since we are only ever setting 32 bits worth of time into it.
1074 */
1075__private_extern__ void
1076munge_dqblk(struct dqblk *dqblkp, struct user_dqblk *user_dqblkp, boolean_t to64)
1077{
1078 if (to64) {
1079 /* munge kernel (32 bit) dqblk into user (64 bit) dqblk */
1080 bcopy((caddr_t)dqblkp, (caddr_t)user_dqblkp, offsetof(struct dqblk, dqb_btime));
1081 user_dqblkp->dqb_id = dqblkp->dqb_id;
1082 user_dqblkp->dqb_itime = dqblkp->dqb_itime;
1083 user_dqblkp->dqb_btime = dqblkp->dqb_btime;
1084 } else {
1085 /* munge user (64 bit) dqblk into kernel (32 bit) dqblk */
1086 bcopy((caddr_t)user_dqblkp, (caddr_t)dqblkp, offsetof(struct dqblk, dqb_btime));
1087 dqblkp->dqb_id = user_dqblkp->dqb_id;
1088 dqblkp->dqb_itime = user_dqblkp->dqb_itime; /* XXX - lose precision */
1089 dqblkp->dqb_btime = user_dqblkp->dqb_btime; /* XXX - lose precision */
1090 }
1091}