]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_quota.c
b58c75e27d7c37db1f7660011013cea73ed08ab2
[apple/xnu.git] / bsd / vfs / vfs_quota.c
1 /*
2 * Copyright (c) 2002-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by
33 * Robert Elz at The University of Melbourne.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by the University of
46 * California, Berkeley and its contributors.
47 * 4. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 *
63 * @(#)vfs_quota.c
64 * derived from @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
65 */
66
67 #include <sys/param.h>
68 #include <sys/kernel.h>
69 #include <sys/systm.h>
70 #include <kern/zalloc.h>
71 #include <sys/file_internal.h>
72 #include <sys/proc_internal.h>
73 #include <sys/vnode_internal.h>
74 #include <sys/mount_internal.h>
75 #include <sys/quota.h>
76 #include <sys/uio_internal.h>
77
78 #include <libkern/OSByteOrder.h>
79
80
81 /* vars for quota file lock */
82 lck_grp_t * qf_lck_grp;
83 lck_grp_attr_t * qf_lck_grp_attr;
84 lck_attr_t * qf_lck_attr;
85
86 /* vars for quota list lock */
87 lck_grp_t * quota_list_lck_grp;
88 lck_grp_attr_t * quota_list_lck_grp_attr;
89 lck_attr_t * quota_list_lck_attr;
90 lck_mtx_t * quota_list_mtx_lock;
91
92 /* Routines to lock and unlock the quota global data */
93 static int dq_list_lock(void);
94 static void dq_list_unlock(void);
95
96 static void dq_lock_internal(struct dquot *dq);
97 static void dq_unlock_internal(struct dquot *dq);
98
99 static u_int32_t quotamagic[MAXQUOTAS] = INITQMAGICS;
100
101
102 /*
103 * Code pertaining to management of the in-core dquot data structures.
104 */
105 #define DQHASH(dqvp, id) \
106 (&dqhashtbl[((((intptr_t)(dqvp)) >> 8) + id) & dqhash])
107 LIST_HEAD(dqhash, dquot) * dqhashtbl;
108 u_long dqhash;
109
110 #define DQUOTINC 5 /* minimum free dquots desired */
111 long numdquot, desireddquot = DQUOTINC;
112
113 /*
114 * Dquot free list.
115 */
116 TAILQ_HEAD(dqfreelist, dquot) dqfreelist;
117 /*
118 * Dquot dirty orphans list
119 */
120 TAILQ_HEAD(dqdirtylist, dquot) dqdirtylist;
121
122 ZONE_VIEW_DEFINE(ZV_DQUOT, "FS quota entries", KHEAP_ID_DEFAULT,
123 sizeof(struct dquot));
124
125 static int dqlookup(struct quotafile *, u_int32_t, struct dqblk *, u_int32_t *);
126 static int dqsync_locked(struct dquot *dq);
127
128 static void qf_lock(struct quotafile *);
129 static void qf_unlock(struct quotafile *);
130 static int qf_ref(struct quotafile *);
131 static void qf_rele(struct quotafile *);
132
133
134 /*
135 * Initialize locks for the quota system.
136 */
137 void
138 dqinit(void)
139 {
140 /*
141 * Allocate quota list lock group attribute and group
142 */
143 quota_list_lck_grp_attr = lck_grp_attr_alloc_init();
144 quota_list_lck_grp = lck_grp_alloc_init("quota list", quota_list_lck_grp_attr);
145
146 /*
147 * Allocate qouta list lock attribute
148 */
149 quota_list_lck_attr = lck_attr_alloc_init();
150
151 /*
152 * Allocate quota list lock
153 */
154 quota_list_mtx_lock = lck_mtx_alloc_init(quota_list_lck_grp, quota_list_lck_attr);
155
156
157 /*
158 * allocate quota file lock group attribute and group
159 */
160 qf_lck_grp_attr = lck_grp_attr_alloc_init();
161 qf_lck_grp = lck_grp_alloc_init("quota file", qf_lck_grp_attr);
162
163 /*
164 * Allocate quota file lock attribute
165 */
166 qf_lck_attr = lck_attr_alloc_init();
167 }
168
169 /*
170 * Report whether dqhashinit has been run.
171 */
172 int
173 dqisinitialized(void)
174 {
175 return dqhashtbl != NULL;
176 }
177
178 /*
179 * Initialize hash table for dquot structures.
180 */
181 void
182 dqhashinit(void)
183 {
184 dq_list_lock();
185 if (dqisinitialized()) {
186 goto out;
187 }
188
189 TAILQ_INIT(&dqfreelist);
190 TAILQ_INIT(&dqdirtylist);
191 dqhashtbl = hashinit(desiredvnodes, M_DQUOT, &dqhash);
192 out:
193 dq_list_unlock();
194 }
195
196
197 static volatile int dq_list_lock_cnt = 0;
198
199 static int
200 dq_list_lock(void)
201 {
202 lck_mtx_lock(quota_list_mtx_lock);
203 return ++dq_list_lock_cnt;
204 }
205
206 static int
207 dq_list_lock_changed(int oldval)
208 {
209 return dq_list_lock_cnt != oldval;
210 }
211
212 static int
213 dq_list_lock_val(void)
214 {
215 return dq_list_lock_cnt;
216 }
217
218 void
219 dq_list_unlock(void)
220 {
221 lck_mtx_unlock(quota_list_mtx_lock);
222 }
223
224
225 /*
226 * must be called with the quota_list_lock held
227 */
228 void
229 dq_lock_internal(struct dquot *dq)
230 {
231 while (dq->dq_lflags & DQ_LLOCK) {
232 dq->dq_lflags |= DQ_LWANT;
233 msleep(&dq->dq_lflags, quota_list_mtx_lock, PVFS, "dq_lock_internal", NULL);
234 }
235 dq->dq_lflags |= DQ_LLOCK;
236 }
237
238 /*
239 * must be called with the quota_list_lock held
240 */
241 void
242 dq_unlock_internal(struct dquot *dq)
243 {
244 int wanted = dq->dq_lflags & DQ_LWANT;
245
246 dq->dq_lflags &= ~(DQ_LLOCK | DQ_LWANT);
247
248 if (wanted) {
249 wakeup(&dq->dq_lflags);
250 }
251 }
252
253 void
254 dqlock(struct dquot *dq)
255 {
256 lck_mtx_lock(quota_list_mtx_lock);
257
258 dq_lock_internal(dq);
259
260 lck_mtx_unlock(quota_list_mtx_lock);
261 }
262
263 void
264 dqunlock(struct dquot *dq)
265 {
266 lck_mtx_lock(quota_list_mtx_lock);
267
268 dq_unlock_internal(dq);
269
270 lck_mtx_unlock(quota_list_mtx_lock);
271 }
272
273
274
275 int
276 qf_get(struct quotafile *qfp, int type)
277 {
278 int error = 0;
279
280 dq_list_lock();
281
282 switch (type) {
283 case QTF_OPENING:
284 while ((qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING))) {
285 if ((qfp->qf_qflags & QTF_OPENING)) {
286 error = EBUSY;
287 break;
288 }
289 if ((qfp->qf_qflags & QTF_CLOSING)) {
290 qfp->qf_qflags |= QTF_WANTED;
291 msleep(&qfp->qf_qflags, quota_list_mtx_lock, PVFS, "qf_get", NULL);
292 }
293 }
294 if (qfp->qf_vp != NULLVP) {
295 error = EBUSY;
296 }
297 if (error == 0) {
298 qfp->qf_qflags |= QTF_OPENING;
299 }
300 break;
301
302 case QTF_CLOSING:
303 if ((qfp->qf_qflags & QTF_CLOSING)) {
304 error = EBUSY;
305 break;
306 }
307 qfp->qf_qflags |= QTF_CLOSING;
308
309 while ((qfp->qf_qflags & QTF_OPENING) || qfp->qf_refcnt) {
310 qfp->qf_qflags |= QTF_WANTED;
311 msleep(&qfp->qf_qflags, quota_list_mtx_lock, PVFS, "qf_get", NULL);
312 }
313 if (qfp->qf_vp == NULLVP) {
314 qfp->qf_qflags &= ~QTF_CLOSING;
315 error = EBUSY;
316 }
317 break;
318 }
319 dq_list_unlock();
320
321 return error;
322 }
323
324 void
325 qf_put(struct quotafile *qfp, int type)
326 {
327 dq_list_lock();
328
329 switch (type) {
330 case QTF_OPENING:
331 case QTF_CLOSING:
332 qfp->qf_qflags &= ~type;
333 break;
334 }
335 if ((qfp->qf_qflags & QTF_WANTED)) {
336 qfp->qf_qflags &= ~QTF_WANTED;
337 wakeup(&qfp->qf_qflags);
338 }
339 dq_list_unlock();
340 }
341
342
343 static void
344 qf_lock(struct quotafile *qfp)
345 {
346 lck_mtx_lock(&qfp->qf_lock);
347 }
348
349 static void
350 qf_unlock(struct quotafile *qfp)
351 {
352 lck_mtx_unlock(&qfp->qf_lock);
353 }
354
355
356 /*
357 * take a reference on the quota file while we're
358 * in dqget... this will prevent a quota_off from
359 * occurring while we're potentially playing with
360 * the quota file... the quota_off will stall until
361 * all the current references 'die'... once we start
362 * into quoto_off, all new references will be rejected
363 * we also don't want any dqgets being processed while
364 * we're in the middle of the quota_on... once we've
365 * actually got the quota file open and the associated
366 * struct quotafile inited, we can let them come through
367 *
368 * quota list lock must be held on entry
369 */
370 static int
371 qf_ref(struct quotafile *qfp)
372 {
373 int error = 0;
374
375 if ((qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING)) || (qfp->qf_vp == NULLVP)) {
376 error = EINVAL;
377 } else {
378 qfp->qf_refcnt++;
379 }
380
381 return error;
382 }
383
384 /*
385 * drop our reference and wakeup any waiters if
386 * we were the last one holding a ref
387 *
388 * quota list lock must be held on entry
389 */
390 static void
391 qf_rele(struct quotafile *qfp)
392 {
393 qfp->qf_refcnt--;
394
395 if ((qfp->qf_qflags & QTF_WANTED) && qfp->qf_refcnt == 0) {
396 qfp->qf_qflags &= ~QTF_WANTED;
397 wakeup(&qfp->qf_qflags);
398 }
399 }
400
401
402 void
403 dqfileinit(struct quotafile *qfp)
404 {
405 qfp->qf_vp = NULLVP;
406 qfp->qf_qflags = 0;
407
408 lck_mtx_init(&qfp->qf_lock, qf_lck_grp, qf_lck_attr);
409 }
410
411
412 /*
413 * Initialize a quota file
414 *
415 * must be called with the quota file lock held
416 */
417 int
418 dqfileopen(struct quotafile *qfp, int type)
419 {
420 struct dqfilehdr header;
421 struct vfs_context context;
422 off_t file_size;
423 uio_t auio;
424 int error = 0;
425 char uio_buf[UIO_SIZEOF(1)];
426
427 context.vc_thread = current_thread();
428 context.vc_ucred = qfp->qf_cred;
429
430 /* Obtain the file size */
431 if ((error = vnode_size(qfp->qf_vp, &file_size, &context)) != 0) {
432 goto out;
433 }
434
435 /* Read the file header */
436 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
437 &uio_buf[0], sizeof(uio_buf));
438 uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof(header));
439 error = VNOP_READ(qfp->qf_vp, auio, 0, &context);
440 if (error) {
441 goto out;
442 } else if (uio_resid(auio)) {
443 error = EINVAL;
444 goto out;
445 }
446 /* Sanity check the quota file header. */
447 if ((OSSwapBigToHostInt32(header.dqh_magic) != quotamagic[type]) ||
448 (OSSwapBigToHostInt32(header.dqh_version) > QF_VERSION) ||
449 (!powerof2(OSSwapBigToHostInt32(header.dqh_maxentries))) ||
450 (OSSwapBigToHostInt32(header.dqh_maxentries) > (file_size / sizeof(struct dqblk)))) {
451 error = EINVAL;
452 goto out;
453 }
454 /* Set up the time limits for this quota. */
455 if (header.dqh_btime != 0) {
456 qfp->qf_btime = OSSwapBigToHostInt32(header.dqh_btime);
457 } else {
458 qfp->qf_btime = MAX_DQ_TIME;
459 }
460 if (header.dqh_itime != 0) {
461 qfp->qf_itime = OSSwapBigToHostInt32(header.dqh_itime);
462 } else {
463 qfp->qf_itime = MAX_IQ_TIME;
464 }
465
466 /* Calculate the hash table constants. */
467 qfp->qf_maxentries = OSSwapBigToHostInt32(header.dqh_maxentries);
468 qfp->qf_entrycnt = OSSwapBigToHostInt32(header.dqh_entrycnt);
469 qfp->qf_shift = dqhashshift(qfp->qf_maxentries);
470 out:
471 return error;
472 }
473
474 /*
475 * Close down a quota file
476 */
477 void
478 dqfileclose(struct quotafile *qfp, __unused int type)
479 {
480 struct dqfilehdr header;
481 struct vfs_context context;
482 uio_t auio;
483 char uio_buf[UIO_SIZEOF(1)];
484
485 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
486 &uio_buf[0], sizeof(uio_buf));
487 uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof(header));
488
489 context.vc_thread = current_thread();
490 context.vc_ucred = qfp->qf_cred;
491
492 if (VNOP_READ(qfp->qf_vp, auio, 0, &context) == 0) {
493 header.dqh_entrycnt = OSSwapHostToBigInt32(qfp->qf_entrycnt);
494 uio_reset(auio, 0, UIO_SYSSPACE, UIO_WRITE);
495 uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof(header));
496 (void) VNOP_WRITE(qfp->qf_vp, auio, 0, &context);
497 }
498 }
499
500
501 /*
502 * Obtain a dquot structure for the specified identifier and quota file
503 * reading the information from the file if necessary.
504 */
505 int
506 dqget(u_int32_t id, struct quotafile *qfp, int type, struct dquot **dqp)
507 {
508 struct dquot *dq;
509 struct dquot *ndq = NULL;
510 struct dquot *fdq = NULL;
511 struct dqhash *dqh;
512 struct vnode *dqvp;
513 int error = 0;
514 int listlockval = 0;
515
516 if (!dqisinitialized()) {
517 *dqp = NODQUOT;
518 return EINVAL;
519 }
520
521 if (id == 0 || qfp->qf_vp == NULLVP) {
522 *dqp = NODQUOT;
523 return EINVAL;
524 }
525 dq_list_lock();
526
527 if ((qf_ref(qfp))) {
528 dq_list_unlock();
529
530 *dqp = NODQUOT;
531 return EINVAL;
532 }
533 if ((dqvp = qfp->qf_vp) == NULLVP) {
534 qf_rele(qfp);
535 dq_list_unlock();
536
537 *dqp = NODQUOT;
538 return EINVAL;
539 }
540 dqh = DQHASH(dqvp, id);
541
542 relookup:
543 listlockval = dq_list_lock_val();
544
545 /*
546 * Check the cache first.
547 */
548 for (dq = dqh->lh_first; dq; dq = dq->dq_hash.le_next) {
549 if (dq->dq_id != id ||
550 dq->dq_qfile->qf_vp != dqvp) {
551 continue;
552 }
553
554 dq_lock_internal(dq);
555 if (dq_list_lock_changed(listlockval)) {
556 dq_unlock_internal(dq);
557 goto relookup;
558 }
559
560 /*
561 * dq_lock_internal may drop the quota_list_lock to msleep, so
562 * we need to re-evaluate the identity of this dq
563 */
564 if (dq->dq_id != id || dq->dq_qfile == NULL ||
565 dq->dq_qfile->qf_vp != dqvp) {
566 dq_unlock_internal(dq);
567 goto relookup;
568 }
569 /*
570 * Cache hit with no references. Take
571 * the structure off the free list.
572 */
573 if (dq->dq_cnt++ == 0) {
574 if (dq->dq_flags & DQ_MOD) {
575 TAILQ_REMOVE(&dqdirtylist, dq, dq_freelist);
576 } else {
577 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
578 }
579 }
580 dq_unlock_internal(dq);
581
582 if (fdq != NULL) {
583 /*
584 * we grabbed this from the free list in the first pass
585 * but we found the dq we were looking for in
586 * the cache the 2nd time through
587 * so stick it back on the free list and return the cached entry
588 */
589 TAILQ_INSERT_HEAD(&dqfreelist, fdq, dq_freelist);
590 }
591 qf_rele(qfp);
592 dq_list_unlock();
593
594 if (ndq != NULL) {
595 /*
596 * we allocated this in the first pass
597 * but we found the dq we were looking for in
598 * the cache the 2nd time through so free it
599 */
600 zfree(ZV_DQUOT, ndq);
601 }
602 *dqp = dq;
603
604 return 0;
605 }
606 /*
607 * Not in cache, allocate a new one.
608 */
609 if (TAILQ_EMPTY(&dqfreelist) &&
610 numdquot < MAXQUOTAS * desiredvnodes) {
611 desireddquot += DQUOTINC;
612 }
613
614 if (fdq != NULL) {
615 /*
616 * we captured this from the free list
617 * in the first pass through, so go
618 * ahead and use it
619 */
620 dq = fdq;
621 fdq = NULL;
622 } else if (numdquot < desireddquot) {
623 if (ndq == NULL) {
624 /*
625 * drop the quota list lock since zalloc may block
626 */
627 dq_list_unlock();
628
629 ndq = (struct dquot *)zalloc_flags(ZV_DQUOT,
630 Z_WAITOK | Z_ZERO);
631
632 listlockval = dq_list_lock();
633 /*
634 * need to look for the entry again in the cache
635 * since we dropped the quota list lock and
636 * someone else may have beaten us to creating it
637 */
638 goto relookup;
639 } else {
640 /*
641 * we allocated this in the first pass through
642 * and we're still under out target, so go
643 * ahead and use it
644 */
645 dq = ndq;
646 ndq = NULL;
647 numdquot++;
648 }
649 } else {
650 if (TAILQ_EMPTY(&dqfreelist)) {
651 qf_rele(qfp);
652 dq_list_unlock();
653
654 if (ndq) {
655 /*
656 * we allocated this in the first pass through
657 * but we're now at the limit of our cache size
658 * so free it
659 */
660 zfree(ZV_DQUOT, ndq);
661 }
662 tablefull("dquot");
663 *dqp = NODQUOT;
664 return EUSERS;
665 }
666 dq = TAILQ_FIRST(&dqfreelist);
667
668 dq_lock_internal(dq);
669
670 if (dq_list_lock_changed(listlockval) || dq->dq_cnt || (dq->dq_flags & DQ_MOD)) {
671 /*
672 * we lost the race while we weren't holding
673 * the quota list lock... dq_lock_internal
674 * will drop it to msleep... this dq has been
675 * reclaimed... go find another
676 */
677 dq_unlock_internal(dq);
678
679 /*
680 * need to look for the entry again in the cache
681 * since we dropped the quota list lock and
682 * someone else may have beaten us to creating it
683 */
684 goto relookup;
685 }
686 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
687
688 if (dq->dq_qfile != NULL) {
689 LIST_REMOVE(dq, dq_hash);
690 dq->dq_qfile = NULL;
691 dq->dq_id = 0;
692 }
693 dq_unlock_internal(dq);
694
695 /*
696 * because we may have dropped the quota list lock
697 * in the call to dq_lock_internal, we need to
698 * relookup in the hash in case someone else
699 * caused a dq with this identity to be created...
700 * if we don't find it, we'll use this one
701 */
702 fdq = dq;
703 goto relookup;
704 }
705 /*
706 * we've either freshly allocated a dq
707 * or we've atomically pulled it out of
708 * the hash and freelists... no one else
709 * can have a reference, which means no
710 * one else can be trying to use this dq
711 */
712 dq_lock_internal(dq);
713 if (dq_list_lock_changed(listlockval)) {
714 dq_unlock_internal(dq);
715 goto relookup;
716 }
717
718 /*
719 * Initialize the contents of the dquot structure.
720 */
721 dq->dq_cnt = 1;
722 dq->dq_flags = 0;
723 dq->dq_id = id;
724 dq->dq_qfile = qfp;
725 dq->dq_type = type;
726 /*
727 * once we insert it in the hash and
728 * drop the quota_list_lock, it can be
729 * 'found'... however, we're still holding
730 * the dq_lock which will keep us from doing
731 * anything with it until we've finished
732 * initializing it...
733 */
734 LIST_INSERT_HEAD(dqh, dq, dq_hash);
735 dq_list_unlock();
736
737 if (ndq) {
738 /*
739 * we allocated this in the first pass through
740 * but we didn't need it, so free it after
741 * we've droped the quota list lock
742 */
743 zfree(ZV_DQUOT, ndq);
744 }
745
746 error = dqlookup(qfp, id, &dq->dq_dqb, &dq->dq_index);
747
748 /*
749 * I/O error in reading quota file, release
750 * quota structure and reflect problem to caller.
751 */
752 if (error) {
753 dq_list_lock();
754
755 dq->dq_id = 0;
756 dq->dq_qfile = NULL;
757 LIST_REMOVE(dq, dq_hash);
758
759 dq_unlock_internal(dq);
760 qf_rele(qfp);
761 dq_list_unlock();
762
763 dqrele(dq);
764
765 *dqp = NODQUOT;
766 return error;
767 }
768 /*
769 * Check for no limit to enforce.
770 * Initialize time values if necessary.
771 */
772 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
773 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0) {
774 dq->dq_flags |= DQ_FAKE;
775 }
776 if (dq->dq_id != 0) {
777 struct timeval tv;
778
779 microtime(&tv);
780 if (dq->dq_btime == 0) {
781 dq->dq_btime = tv.tv_sec + qfp->qf_btime;
782 }
783 if (dq->dq_itime == 0) {
784 dq->dq_itime = tv.tv_sec + qfp->qf_itime;
785 }
786 }
787 dq_list_lock();
788 dq_unlock_internal(dq);
789 qf_rele(qfp);
790 dq_list_unlock();
791
792 *dqp = dq;
793 return 0;
794 }
795
796 /*
797 * Lookup a dqblk structure for the specified identifier and
798 * quota file. If there is no entry for this identifier then
799 * one is inserted. The actual hash table index is returned.
800 */
801 static int
802 dqlookup(struct quotafile *qfp, u_int32_t id, struct dqblk *dqb, uint32_t *index)
803 {
804 struct vnode *dqvp;
805 struct vfs_context context;
806 uio_t auio;
807 int i, skip, last;
808 u_int32_t mask;
809 int error = 0;
810 char uio_buf[UIO_SIZEOF(1)];
811
812
813 qf_lock(qfp);
814
815 dqvp = qfp->qf_vp;
816
817 context.vc_thread = current_thread();
818 context.vc_ucred = qfp->qf_cred;
819
820 mask = qfp->qf_maxentries - 1;
821 i = dqhash1(id, qfp->qf_shift, mask);
822 skip = dqhash2(id, mask);
823
824 for (last = (i + (qfp->qf_maxentries - 1) * skip) & mask;
825 i != last;
826 i = (i + skip) & mask) {
827 auio = uio_createwithbuffer(1, dqoffset(i), UIO_SYSSPACE, UIO_READ,
828 &uio_buf[0], sizeof(uio_buf));
829 uio_addiov(auio, CAST_USER_ADDR_T(dqb), sizeof(struct dqblk));
830 error = VNOP_READ(dqvp, auio, 0, &context);
831 if (error) {
832 printf("dqlookup: error %d looking up id %u at index %d\n", error, id, i);
833 break;
834 } else if (uio_resid(auio)) {
835 error = EIO;
836 printf("dqlookup: error looking up id %u at index %d\n", id, i);
837 break;
838 }
839 /*
840 * An empty entry means there is no entry
841 * with that id. In this case a new dqb
842 * record will be inserted.
843 */
844 if (dqb->dqb_id == 0) {
845 bzero(dqb, sizeof(struct dqblk));
846 dqb->dqb_id = OSSwapHostToBigInt32(id);
847 /*
848 * Write back to reserve entry for this id
849 */
850 uio_reset(auio, dqoffset(i), UIO_SYSSPACE, UIO_WRITE);
851 uio_addiov(auio, CAST_USER_ADDR_T(dqb), sizeof(struct dqblk));
852 error = VNOP_WRITE(dqvp, auio, 0, &context);
853 if (uio_resid(auio) && error == 0) {
854 error = EIO;
855 }
856 if (error == 0) {
857 ++qfp->qf_entrycnt;
858 }
859 dqb->dqb_id = id;
860 break;
861 }
862 /* An id match means an entry was found. */
863 if (OSSwapBigToHostInt32(dqb->dqb_id) == id) {
864 dqb->dqb_bhardlimit = OSSwapBigToHostInt64(dqb->dqb_bhardlimit);
865 dqb->dqb_bsoftlimit = OSSwapBigToHostInt64(dqb->dqb_bsoftlimit);
866 dqb->dqb_curbytes = OSSwapBigToHostInt64(dqb->dqb_curbytes);
867 dqb->dqb_ihardlimit = OSSwapBigToHostInt32(dqb->dqb_ihardlimit);
868 dqb->dqb_isoftlimit = OSSwapBigToHostInt32(dqb->dqb_isoftlimit);
869 dqb->dqb_curinodes = OSSwapBigToHostInt32(dqb->dqb_curinodes);
870 dqb->dqb_btime = OSSwapBigToHostInt32(dqb->dqb_btime);
871 dqb->dqb_itime = OSSwapBigToHostInt32(dqb->dqb_itime);
872 dqb->dqb_id = OSSwapBigToHostInt32(dqb->dqb_id);
873 break;
874 }
875 }
876 qf_unlock(qfp);
877
878 *index = i; /* remember index so we don't have to recompute it later */
879
880 return error;
881 }
882
883
884 /*
885 * Release a reference to a dquot.
886 */
887 void
888 dqrele(struct dquot *dq)
889 {
890 if (dq == NODQUOT) {
891 return;
892 }
893 dqlock(dq);
894
895 if (dq->dq_cnt > 1) {
896 dq->dq_cnt--;
897
898 dqunlock(dq);
899 return;
900 }
901 if (dq->dq_flags & DQ_MOD) {
902 (void) dqsync_locked(dq);
903 }
904 dq->dq_cnt--;
905
906 dq_list_lock();
907 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
908 dq_unlock_internal(dq);
909 dq_list_unlock();
910 }
911
912 /*
913 * Release a reference to a dquot but don't do any I/O.
914 */
915 void
916 dqreclaim(struct dquot *dq)
917 {
918 if (dq == NODQUOT) {
919 return;
920 }
921
922 dq_list_lock();
923 dq_lock_internal(dq);
924
925 if (--dq->dq_cnt > 0) {
926 dq_unlock_internal(dq);
927 dq_list_unlock();
928 return;
929 }
930 if (dq->dq_flags & DQ_MOD) {
931 TAILQ_INSERT_TAIL(&dqdirtylist, dq, dq_freelist);
932 } else {
933 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
934 }
935
936 dq_unlock_internal(dq);
937 dq_list_unlock();
938 }
939
940 /*
941 * Update a quota file's orphaned disk quotas.
942 */
943 void
944 dqsync_orphans(struct quotafile *qfp)
945 {
946 struct dquot *dq;
947
948 dq_list_lock();
949 loop:
950 TAILQ_FOREACH(dq, &dqdirtylist, dq_freelist) {
951 if (dq->dq_qfile != qfp) {
952 continue;
953 }
954
955 dq_lock_internal(dq);
956
957 if (dq->dq_qfile != qfp) {
958 /*
959 * the identity of this dq changed while
960 * the quota_list_lock was dropped
961 * dq_lock_internal can drop it to msleep
962 */
963 dq_unlock_internal(dq);
964 goto loop;
965 }
966 if ((dq->dq_flags & DQ_MOD) == 0) {
967 /*
968 * someone cleaned and removed this from
969 * the dq from the dirty list while the
970 * quota_list_lock was dropped
971 */
972 dq_unlock_internal(dq);
973 goto loop;
974 }
975 if (dq->dq_cnt != 0) {
976 panic("dqsync_orphans: dquot in use");
977 }
978
979 TAILQ_REMOVE(&dqdirtylist, dq, dq_freelist);
980
981 dq_list_unlock();
982 /*
983 * we're still holding the dqlock at this point
984 * with the reference count == 0
985 * we shouldn't be able
986 * to pick up another one since we hold dqlock
987 */
988 (void) dqsync_locked(dq);
989
990 dq_list_lock();
991
992 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
993
994 dq_unlock_internal(dq);
995 goto loop;
996 }
997 dq_list_unlock();
998 }
999
1000 int
1001 dqsync(struct dquot *dq)
1002 {
1003 int error = 0;
1004
1005 if (dq != NODQUOT) {
1006 dqlock(dq);
1007
1008 if ((dq->dq_flags & DQ_MOD)) {
1009 error = dqsync_locked(dq);
1010 }
1011
1012 dqunlock(dq);
1013 }
1014 return error;
1015 }
1016
1017
1018 /*
1019 * Update the disk quota in the quota file.
1020 */
1021 int
1022 dqsync_locked(struct dquot *dq)
1023 {
1024 struct vfs_context context;
1025 struct vnode *dqvp;
1026 struct dqblk dqb, *dqblkp;
1027 uio_t auio;
1028 int error;
1029 char uio_buf[UIO_SIZEOF(1)];
1030
1031 if (dq->dq_id == 0) {
1032 dq->dq_flags &= ~DQ_MOD;
1033 return 0;
1034 }
1035 if (dq->dq_qfile == NULL) {
1036 panic("dqsync: NULL dq_qfile");
1037 }
1038 if ((dqvp = dq->dq_qfile->qf_vp) == NULLVP) {
1039 panic("dqsync: NULL qf_vp");
1040 }
1041
1042 auio = uio_createwithbuffer(1, dqoffset(dq->dq_index), UIO_SYSSPACE,
1043 UIO_WRITE, &uio_buf[0], sizeof(uio_buf));
1044 uio_addiov(auio, CAST_USER_ADDR_T(&dqb), sizeof(struct dqblk));
1045
1046 context.vc_thread = current_thread(); /* XXX */
1047 context.vc_ucred = dq->dq_qfile->qf_cred;
1048
1049 dqblkp = &dq->dq_dqb;
1050 dqb.dqb_bhardlimit = OSSwapHostToBigInt64(dqblkp->dqb_bhardlimit);
1051 dqb.dqb_bsoftlimit = OSSwapHostToBigInt64(dqblkp->dqb_bsoftlimit);
1052 dqb.dqb_curbytes = OSSwapHostToBigInt64(dqblkp->dqb_curbytes);
1053 dqb.dqb_ihardlimit = OSSwapHostToBigInt32(dqblkp->dqb_ihardlimit);
1054 dqb.dqb_isoftlimit = OSSwapHostToBigInt32(dqblkp->dqb_isoftlimit);
1055 dqb.dqb_curinodes = OSSwapHostToBigInt32(dqblkp->dqb_curinodes);
1056 dqb.dqb_btime = OSSwapHostToBigInt32(dqblkp->dqb_btime);
1057 dqb.dqb_itime = OSSwapHostToBigInt32(dqblkp->dqb_itime);
1058 dqb.dqb_id = OSSwapHostToBigInt32(dqblkp->dqb_id);
1059 dqb.dqb_spare[0] = 0;
1060 dqb.dqb_spare[1] = 0;
1061 dqb.dqb_spare[2] = 0;
1062 dqb.dqb_spare[3] = 0;
1063
1064 error = VNOP_WRITE(dqvp, auio, 0, &context);
1065 if (uio_resid(auio) && error == 0) {
1066 error = EIO;
1067 }
1068 dq->dq_flags &= ~DQ_MOD;
1069
1070 return error;
1071 }
1072
1073 /*
1074 * Flush all entries from the cache for a particular vnode.
1075 */
1076 void
1077 dqflush(struct vnode *vp)
1078 {
1079 struct dquot *dq, *nextdq;
1080 struct dqhash *dqh;
1081
1082 if (!dqisinitialized()) {
1083 return;
1084 }
1085
1086 /*
1087 * Move all dquot's that used to refer to this quota
1088 * file off their hash chains (they will eventually
1089 * fall off the head of the free list and be re-used).
1090 */
1091 dq_list_lock();
1092
1093 for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) {
1094 for (dq = dqh->lh_first; dq; dq = nextdq) {
1095 nextdq = dq->dq_hash.le_next;
1096 if (dq->dq_qfile->qf_vp != vp) {
1097 continue;
1098 }
1099 if (dq->dq_cnt) {
1100 panic("dqflush: stray dquot");
1101 }
1102 LIST_REMOVE(dq, dq_hash);
1103 dq->dq_qfile = NULL;
1104 }
1105 }
1106 dq_list_unlock();
1107 }
1108
1109 /*
1110 * LP64 support for munging dqblk structure.
1111 * XXX conversion of user_time_t to time_t loses precision; not an issue for
1112 * XXX us now, since we are only ever setting 32 bits worth of time into it.
1113 */
1114 __private_extern__ void
1115 munge_dqblk(struct dqblk *dqblkp, struct user_dqblk *user_dqblkp, boolean_t to64)
1116 {
1117 if (to64) {
1118 /* munge kernel (32 bit) dqblk into user (64 bit) dqblk */
1119 bcopy((caddr_t)dqblkp, (caddr_t)user_dqblkp, offsetof(struct dqblk, dqb_btime));
1120 user_dqblkp->dqb_id = dqblkp->dqb_id;
1121 user_dqblkp->dqb_itime = dqblkp->dqb_itime;
1122 user_dqblkp->dqb_btime = dqblkp->dqb_btime;
1123 } else {
1124 /* munge user (64 bit) dqblk into kernel (32 bit) dqblk */
1125 bcopy((caddr_t)user_dqblkp, (caddr_t)dqblkp, offsetof(struct dqblk, dqb_btime));
1126 dqblkp->dqb_id = user_dqblkp->dqb_id;
1127 dqblkp->dqb_itime = user_dqblkp->dqb_itime; /* XXX - lose precision */
1128 dqblkp->dqb_btime = user_dqblkp->dqb_btime; /* XXX - lose precision */
1129 }
1130 }