]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_quota.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_quota.c
1 /*
2 * Copyright (c) 2002-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by
33 * Robert Elz at The University of Melbourne.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by the University of
46 * California, Berkeley and its contributors.
47 * 4. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 *
63 * @(#)vfs_quota.c
64 * derived from @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
65 */
66
67 #include <sys/param.h>
68 #include <sys/kernel.h>
69 #include <sys/systm.h>
70 #include <sys/malloc.h>
71 #include <sys/file_internal.h>
72 #include <sys/proc_internal.h>
73 #include <sys/vnode_internal.h>
74 #include <sys/mount_internal.h>
75 #include <sys/quota.h>
76 #include <sys/uio_internal.h>
77
78 #include <libkern/OSByteOrder.h>
79
80
81 /* vars for quota file lock */
82 lck_grp_t * qf_lck_grp;
83 lck_grp_attr_t * qf_lck_grp_attr;
84 lck_attr_t * qf_lck_attr;
85
86 /* vars for quota list lock */
87 lck_grp_t * quota_list_lck_grp;
88 lck_grp_attr_t * quota_list_lck_grp_attr;
89 lck_attr_t * quota_list_lck_attr;
90 lck_mtx_t * quota_list_mtx_lock;
91
92 /* Routines to lock and unlock the quota global data */
93 static int dq_list_lock(void);
94 static void dq_list_unlock(void);
95
96 static void dq_lock_internal(struct dquot *dq);
97 static void dq_unlock_internal(struct dquot *dq);
98
99 static u_int32_t quotamagic[MAXQUOTAS] = INITQMAGICS;
100
101
102 /*
103 * Code pertaining to management of the in-core dquot data structures.
104 */
105 #define DQHASH(dqvp, id) \
106 (&dqhashtbl[((((intptr_t)(dqvp)) >> 8) + id) & dqhash])
107 LIST_HEAD(dqhash, dquot) * dqhashtbl;
108 u_long dqhash;
109
110 #define DQUOTINC 5 /* minimum free dquots desired */
111 long numdquot, desireddquot = DQUOTINC;
112
113 /*
114 * Dquot free list.
115 */
116 TAILQ_HEAD(dqfreelist, dquot) dqfreelist;
117 /*
118 * Dquot dirty orphans list
119 */
120 TAILQ_HEAD(dqdirtylist, dquot) dqdirtylist;
121
122
123 static int dqlookup(struct quotafile *, u_int32_t, struct dqblk *, u_int32_t *);
124 static int dqsync_locked(struct dquot *dq);
125
126 static void qf_lock(struct quotafile *);
127 static void qf_unlock(struct quotafile *);
128 static int qf_ref(struct quotafile *);
129 static void qf_rele(struct quotafile *);
130
131
132 /*
133 * Initialize locks for the quota system.
134 */
135 void
136 dqinit(void)
137 {
138 /*
139 * Allocate quota list lock group attribute and group
140 */
141 quota_list_lck_grp_attr = lck_grp_attr_alloc_init();
142 quota_list_lck_grp = lck_grp_alloc_init("quota list", quota_list_lck_grp_attr);
143
144 /*
145 * Allocate qouta list lock attribute
146 */
147 quota_list_lck_attr = lck_attr_alloc_init();
148
149 /*
150 * Allocate quota list lock
151 */
152 quota_list_mtx_lock = lck_mtx_alloc_init(quota_list_lck_grp, quota_list_lck_attr);
153
154
155 /*
156 * allocate quota file lock group attribute and group
157 */
158 qf_lck_grp_attr = lck_grp_attr_alloc_init();
159 qf_lck_grp = lck_grp_alloc_init("quota file", qf_lck_grp_attr);
160
161 /*
162 * Allocate quota file lock attribute
163 */
164 qf_lck_attr = lck_attr_alloc_init();
165 }
166
167 /*
168 * Report whether dqhashinit has been run.
169 */
170 int
171 dqisinitialized(void)
172 {
173 return dqhashtbl != NULL;
174 }
175
176 /*
177 * Initialize hash table for dquot structures.
178 */
179 void
180 dqhashinit(void)
181 {
182 dq_list_lock();
183 if (dqisinitialized()) {
184 goto out;
185 }
186
187 TAILQ_INIT(&dqfreelist);
188 TAILQ_INIT(&dqdirtylist);
189 dqhashtbl = hashinit(desiredvnodes, M_DQUOT, &dqhash);
190 out:
191 dq_list_unlock();
192 }
193
194
195 static volatile int dq_list_lock_cnt = 0;
196
197 static int
198 dq_list_lock(void)
199 {
200 lck_mtx_lock(quota_list_mtx_lock);
201 return ++dq_list_lock_cnt;
202 }
203
204 static int
205 dq_list_lock_changed(int oldval)
206 {
207 return dq_list_lock_cnt != oldval;
208 }
209
210 static int
211 dq_list_lock_val(void)
212 {
213 return dq_list_lock_cnt;
214 }
215
216 void
217 dq_list_unlock(void)
218 {
219 lck_mtx_unlock(quota_list_mtx_lock);
220 }
221
222
223 /*
224 * must be called with the quota_list_lock held
225 */
226 void
227 dq_lock_internal(struct dquot *dq)
228 {
229 while (dq->dq_lflags & DQ_LLOCK) {
230 dq->dq_lflags |= DQ_LWANT;
231 msleep(&dq->dq_lflags, quota_list_mtx_lock, PVFS, "dq_lock_internal", NULL);
232 }
233 dq->dq_lflags |= DQ_LLOCK;
234 }
235
236 /*
237 * must be called with the quota_list_lock held
238 */
239 void
240 dq_unlock_internal(struct dquot *dq)
241 {
242 int wanted = dq->dq_lflags & DQ_LWANT;
243
244 dq->dq_lflags &= ~(DQ_LLOCK | DQ_LWANT);
245
246 if (wanted) {
247 wakeup(&dq->dq_lflags);
248 }
249 }
250
251 void
252 dqlock(struct dquot *dq)
253 {
254 lck_mtx_lock(quota_list_mtx_lock);
255
256 dq_lock_internal(dq);
257
258 lck_mtx_unlock(quota_list_mtx_lock);
259 }
260
261 void
262 dqunlock(struct dquot *dq)
263 {
264 lck_mtx_lock(quota_list_mtx_lock);
265
266 dq_unlock_internal(dq);
267
268 lck_mtx_unlock(quota_list_mtx_lock);
269 }
270
271
272
273 int
274 qf_get(struct quotafile *qfp, int type)
275 {
276 int error = 0;
277
278 dq_list_lock();
279
280 switch (type) {
281 case QTF_OPENING:
282 while ((qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING))) {
283 if ((qfp->qf_qflags & QTF_OPENING)) {
284 error = EBUSY;
285 break;
286 }
287 if ((qfp->qf_qflags & QTF_CLOSING)) {
288 qfp->qf_qflags |= QTF_WANTED;
289 msleep(&qfp->qf_qflags, quota_list_mtx_lock, PVFS, "qf_get", NULL);
290 }
291 }
292 if (qfp->qf_vp != NULLVP) {
293 error = EBUSY;
294 }
295 if (error == 0) {
296 qfp->qf_qflags |= QTF_OPENING;
297 }
298 break;
299
300 case QTF_CLOSING:
301 if ((qfp->qf_qflags & QTF_CLOSING)) {
302 error = EBUSY;
303 break;
304 }
305 qfp->qf_qflags |= QTF_CLOSING;
306
307 while ((qfp->qf_qflags & QTF_OPENING) || qfp->qf_refcnt) {
308 qfp->qf_qflags |= QTF_WANTED;
309 msleep(&qfp->qf_qflags, quota_list_mtx_lock, PVFS, "qf_get", NULL);
310 }
311 if (qfp->qf_vp == NULLVP) {
312 qfp->qf_qflags &= ~QTF_CLOSING;
313 error = EBUSY;
314 }
315 break;
316 }
317 dq_list_unlock();
318
319 return error;
320 }
321
322 void
323 qf_put(struct quotafile *qfp, int type)
324 {
325 dq_list_lock();
326
327 switch (type) {
328 case QTF_OPENING:
329 case QTF_CLOSING:
330 qfp->qf_qflags &= ~type;
331 break;
332 }
333 if ((qfp->qf_qflags & QTF_WANTED)) {
334 qfp->qf_qflags &= ~QTF_WANTED;
335 wakeup(&qfp->qf_qflags);
336 }
337 dq_list_unlock();
338 }
339
340
341 static void
342 qf_lock(struct quotafile *qfp)
343 {
344 lck_mtx_lock(&qfp->qf_lock);
345 }
346
347 static void
348 qf_unlock(struct quotafile *qfp)
349 {
350 lck_mtx_unlock(&qfp->qf_lock);
351 }
352
353
354 /*
355 * take a reference on the quota file while we're
356 * in dqget... this will prevent a quota_off from
357 * occurring while we're potentially playing with
358 * the quota file... the quota_off will stall until
359 * all the current references 'die'... once we start
360 * into quoto_off, all new references will be rejected
361 * we also don't want any dqgets being processed while
362 * we're in the middle of the quota_on... once we've
363 * actually got the quota file open and the associated
364 * struct quotafile inited, we can let them come through
365 *
366 * quota list lock must be held on entry
367 */
368 static int
369 qf_ref(struct quotafile *qfp)
370 {
371 int error = 0;
372
373 if ((qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING)) || (qfp->qf_vp == NULLVP)) {
374 error = EINVAL;
375 } else {
376 qfp->qf_refcnt++;
377 }
378
379 return error;
380 }
381
382 /*
383 * drop our reference and wakeup any waiters if
384 * we were the last one holding a ref
385 *
386 * quota list lock must be held on entry
387 */
388 static void
389 qf_rele(struct quotafile *qfp)
390 {
391 qfp->qf_refcnt--;
392
393 if ((qfp->qf_qflags & QTF_WANTED) && qfp->qf_refcnt == 0) {
394 qfp->qf_qflags &= ~QTF_WANTED;
395 wakeup(&qfp->qf_qflags);
396 }
397 }
398
399
400 void
401 dqfileinit(struct quotafile *qfp)
402 {
403 qfp->qf_vp = NULLVP;
404 qfp->qf_qflags = 0;
405
406 lck_mtx_init(&qfp->qf_lock, qf_lck_grp, qf_lck_attr);
407 }
408
409
410 /*
411 * Initialize a quota file
412 *
413 * must be called with the quota file lock held
414 */
415 int
416 dqfileopen(struct quotafile *qfp, int type)
417 {
418 struct dqfilehdr header;
419 struct vfs_context context;
420 off_t file_size;
421 uio_t auio;
422 int error = 0;
423 char uio_buf[UIO_SIZEOF(1)];
424
425 context.vc_thread = current_thread();
426 context.vc_ucred = qfp->qf_cred;
427
428 /* Obtain the file size */
429 if ((error = vnode_size(qfp->qf_vp, &file_size, &context)) != 0) {
430 goto out;
431 }
432
433 /* Read the file header */
434 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
435 &uio_buf[0], sizeof(uio_buf));
436 uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof(header));
437 error = VNOP_READ(qfp->qf_vp, auio, 0, &context);
438 if (error) {
439 goto out;
440 } else if (uio_resid(auio)) {
441 error = EINVAL;
442 goto out;
443 }
444 /* Sanity check the quota file header. */
445 if ((OSSwapBigToHostInt32(header.dqh_magic) != quotamagic[type]) ||
446 (OSSwapBigToHostInt32(header.dqh_version) > QF_VERSION) ||
447 (!powerof2(OSSwapBigToHostInt32(header.dqh_maxentries))) ||
448 (OSSwapBigToHostInt32(header.dqh_maxentries) > (file_size / sizeof(struct dqblk)))) {
449 error = EINVAL;
450 goto out;
451 }
452 /* Set up the time limits for this quota. */
453 if (header.dqh_btime != 0) {
454 qfp->qf_btime = OSSwapBigToHostInt32(header.dqh_btime);
455 } else {
456 qfp->qf_btime = MAX_DQ_TIME;
457 }
458 if (header.dqh_itime != 0) {
459 qfp->qf_itime = OSSwapBigToHostInt32(header.dqh_itime);
460 } else {
461 qfp->qf_itime = MAX_IQ_TIME;
462 }
463
464 /* Calculate the hash table constants. */
465 qfp->qf_maxentries = OSSwapBigToHostInt32(header.dqh_maxentries);
466 qfp->qf_entrycnt = OSSwapBigToHostInt32(header.dqh_entrycnt);
467 qfp->qf_shift = dqhashshift(qfp->qf_maxentries);
468 out:
469 return error;
470 }
471
472 /*
473 * Close down a quota file
474 */
475 void
476 dqfileclose(struct quotafile *qfp, __unused int type)
477 {
478 struct dqfilehdr header;
479 struct vfs_context context;
480 uio_t auio;
481 char uio_buf[UIO_SIZEOF(1)];
482
483 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
484 &uio_buf[0], sizeof(uio_buf));
485 uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof(header));
486
487 context.vc_thread = current_thread();
488 context.vc_ucred = qfp->qf_cred;
489
490 if (VNOP_READ(qfp->qf_vp, auio, 0, &context) == 0) {
491 header.dqh_entrycnt = OSSwapHostToBigInt32(qfp->qf_entrycnt);
492 uio_reset(auio, 0, UIO_SYSSPACE, UIO_WRITE);
493 uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof(header));
494 (void) VNOP_WRITE(qfp->qf_vp, auio, 0, &context);
495 }
496 }
497
498
499 /*
500 * Obtain a dquot structure for the specified identifier and quota file
501 * reading the information from the file if necessary.
502 */
503 int
504 dqget(u_int32_t id, struct quotafile *qfp, int type, struct dquot **dqp)
505 {
506 struct dquot *dq;
507 struct dquot *ndq = NULL;
508 struct dquot *fdq = NULL;
509 struct dqhash *dqh;
510 struct vnode *dqvp;
511 int error = 0;
512 int listlockval = 0;
513
514 if (!dqisinitialized()) {
515 *dqp = NODQUOT;
516 return EINVAL;
517 }
518
519 if (id == 0 || qfp->qf_vp == NULLVP) {
520 *dqp = NODQUOT;
521 return EINVAL;
522 }
523 dq_list_lock();
524
525 if ((qf_ref(qfp))) {
526 dq_list_unlock();
527
528 *dqp = NODQUOT;
529 return EINVAL;
530 }
531 if ((dqvp = qfp->qf_vp) == NULLVP) {
532 qf_rele(qfp);
533 dq_list_unlock();
534
535 *dqp = NODQUOT;
536 return EINVAL;
537 }
538 dqh = DQHASH(dqvp, id);
539
540 relookup:
541 listlockval = dq_list_lock_val();
542
543 /*
544 * Check the cache first.
545 */
546 for (dq = dqh->lh_first; dq; dq = dq->dq_hash.le_next) {
547 if (dq->dq_id != id ||
548 dq->dq_qfile->qf_vp != dqvp) {
549 continue;
550 }
551
552 dq_lock_internal(dq);
553 if (dq_list_lock_changed(listlockval)) {
554 dq_unlock_internal(dq);
555 goto relookup;
556 }
557
558 /*
559 * dq_lock_internal may drop the quota_list_lock to msleep, so
560 * we need to re-evaluate the identity of this dq
561 */
562 if (dq->dq_id != id || dq->dq_qfile == NULL ||
563 dq->dq_qfile->qf_vp != dqvp) {
564 dq_unlock_internal(dq);
565 goto relookup;
566 }
567 /*
568 * Cache hit with no references. Take
569 * the structure off the free list.
570 */
571 if (dq->dq_cnt++ == 0) {
572 if (dq->dq_flags & DQ_MOD) {
573 TAILQ_REMOVE(&dqdirtylist, dq, dq_freelist);
574 } else {
575 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
576 }
577 }
578 dq_unlock_internal(dq);
579
580 if (fdq != NULL) {
581 /*
582 * we grabbed this from the free list in the first pass
583 * but we found the dq we were looking for in
584 * the cache the 2nd time through
585 * so stick it back on the free list and return the cached entry
586 */
587 TAILQ_INSERT_HEAD(&dqfreelist, fdq, dq_freelist);
588 }
589 qf_rele(qfp);
590 dq_list_unlock();
591
592 if (ndq != NULL) {
593 /*
594 * we allocated this in the first pass
595 * but we found the dq we were looking for in
596 * the cache the 2nd time through so free it
597 */
598 _FREE(ndq, M_DQUOT);
599 }
600 *dqp = dq;
601
602 return 0;
603 }
604 /*
605 * Not in cache, allocate a new one.
606 */
607 if (TAILQ_EMPTY(&dqfreelist) &&
608 numdquot < MAXQUOTAS * desiredvnodes) {
609 desireddquot += DQUOTINC;
610 }
611
612 if (fdq != NULL) {
613 /*
614 * we captured this from the free list
615 * in the first pass through, so go
616 * ahead and use it
617 */
618 dq = fdq;
619 fdq = NULL;
620 } else if (numdquot < desireddquot) {
621 if (ndq == NULL) {
622 /*
623 * drop the quota list lock since MALLOC may block
624 */
625 dq_list_unlock();
626
627 ndq = (struct dquot *)_MALLOC(sizeof *dq, M_DQUOT, M_WAITOK);
628 bzero((char *)ndq, sizeof *dq);
629
630 listlockval = dq_list_lock();
631 /*
632 * need to look for the entry again in the cache
633 * since we dropped the quota list lock and
634 * someone else may have beaten us to creating it
635 */
636 goto relookup;
637 } else {
638 /*
639 * we allocated this in the first pass through
640 * and we're still under out target, so go
641 * ahead and use it
642 */
643 dq = ndq;
644 ndq = NULL;
645 numdquot++;
646 }
647 } else {
648 if (TAILQ_EMPTY(&dqfreelist)) {
649 qf_rele(qfp);
650 dq_list_unlock();
651
652 if (ndq) {
653 /*
654 * we allocated this in the first pass through
655 * but we're now at the limit of our cache size
656 * so free it
657 */
658 _FREE(ndq, M_DQUOT);
659 }
660 tablefull("dquot");
661 *dqp = NODQUOT;
662 return EUSERS;
663 }
664 dq = TAILQ_FIRST(&dqfreelist);
665
666 dq_lock_internal(dq);
667
668 if (dq_list_lock_changed(listlockval) || dq->dq_cnt || (dq->dq_flags & DQ_MOD)) {
669 /*
670 * we lost the race while we weren't holding
671 * the quota list lock... dq_lock_internal
672 * will drop it to msleep... this dq has been
673 * reclaimed... go find another
674 */
675 dq_unlock_internal(dq);
676
677 /*
678 * need to look for the entry again in the cache
679 * since we dropped the quota list lock and
680 * someone else may have beaten us to creating it
681 */
682 goto relookup;
683 }
684 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
685
686 if (dq->dq_qfile != NULL) {
687 LIST_REMOVE(dq, dq_hash);
688 dq->dq_qfile = NULL;
689 dq->dq_id = 0;
690 }
691 dq_unlock_internal(dq);
692
693 /*
694 * because we may have dropped the quota list lock
695 * in the call to dq_lock_internal, we need to
696 * relookup in the hash in case someone else
697 * caused a dq with this identity to be created...
698 * if we don't find it, we'll use this one
699 */
700 fdq = dq;
701 goto relookup;
702 }
703 /*
704 * we've either freshly allocated a dq
705 * or we've atomically pulled it out of
706 * the hash and freelists... no one else
707 * can have a reference, which means no
708 * one else can be trying to use this dq
709 */
710 dq_lock_internal(dq);
711 if (dq_list_lock_changed(listlockval)) {
712 dq_unlock_internal(dq);
713 goto relookup;
714 }
715
716 /*
717 * Initialize the contents of the dquot structure.
718 */
719 dq->dq_cnt = 1;
720 dq->dq_flags = 0;
721 dq->dq_id = id;
722 dq->dq_qfile = qfp;
723 dq->dq_type = type;
724 /*
725 * once we insert it in the hash and
726 * drop the quota_list_lock, it can be
727 * 'found'... however, we're still holding
728 * the dq_lock which will keep us from doing
729 * anything with it until we've finished
730 * initializing it...
731 */
732 LIST_INSERT_HEAD(dqh, dq, dq_hash);
733 dq_list_unlock();
734
735 if (ndq) {
736 /*
737 * we allocated this in the first pass through
738 * but we didn't need it, so free it after
739 * we've droped the quota list lock
740 */
741 _FREE(ndq, M_DQUOT);
742 }
743
744 error = dqlookup(qfp, id, &dq->dq_dqb, &dq->dq_index);
745
746 /*
747 * I/O error in reading quota file, release
748 * quota structure and reflect problem to caller.
749 */
750 if (error) {
751 dq_list_lock();
752
753 dq->dq_id = 0;
754 dq->dq_qfile = NULL;
755 LIST_REMOVE(dq, dq_hash);
756
757 dq_unlock_internal(dq);
758 qf_rele(qfp);
759 dq_list_unlock();
760
761 dqrele(dq);
762
763 *dqp = NODQUOT;
764 return error;
765 }
766 /*
767 * Check for no limit to enforce.
768 * Initialize time values if necessary.
769 */
770 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
771 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0) {
772 dq->dq_flags |= DQ_FAKE;
773 }
774 if (dq->dq_id != 0) {
775 struct timeval tv;
776
777 microtime(&tv);
778 if (dq->dq_btime == 0) {
779 dq->dq_btime = tv.tv_sec + qfp->qf_btime;
780 }
781 if (dq->dq_itime == 0) {
782 dq->dq_itime = tv.tv_sec + qfp->qf_itime;
783 }
784 }
785 dq_list_lock();
786 dq_unlock_internal(dq);
787 qf_rele(qfp);
788 dq_list_unlock();
789
790 *dqp = dq;
791 return 0;
792 }
793
794 /*
795 * Lookup a dqblk structure for the specified identifier and
796 * quota file. If there is no entry for this identifier then
797 * one is inserted. The actual hash table index is returned.
798 */
799 static int
800 dqlookup(struct quotafile *qfp, u_int32_t id, struct dqblk *dqb, uint32_t *index)
801 {
802 struct vnode *dqvp;
803 struct vfs_context context;
804 uio_t auio;
805 int i, skip, last;
806 u_int32_t mask;
807 int error = 0;
808 char uio_buf[UIO_SIZEOF(1)];
809
810
811 qf_lock(qfp);
812
813 dqvp = qfp->qf_vp;
814
815 context.vc_thread = current_thread();
816 context.vc_ucred = qfp->qf_cred;
817
818 mask = qfp->qf_maxentries - 1;
819 i = dqhash1(id, qfp->qf_shift, mask);
820 skip = dqhash2(id, mask);
821
822 for (last = (i + (qfp->qf_maxentries - 1) * skip) & mask;
823 i != last;
824 i = (i + skip) & mask) {
825 auio = uio_createwithbuffer(1, dqoffset(i), UIO_SYSSPACE, UIO_READ,
826 &uio_buf[0], sizeof(uio_buf));
827 uio_addiov(auio, CAST_USER_ADDR_T(dqb), sizeof(struct dqblk));
828 error = VNOP_READ(dqvp, auio, 0, &context);
829 if (error) {
830 printf("dqlookup: error %d looking up id %u at index %d\n", error, id, i);
831 break;
832 } else if (uio_resid(auio)) {
833 error = EIO;
834 printf("dqlookup: error looking up id %u at index %d\n", id, i);
835 break;
836 }
837 /*
838 * An empty entry means there is no entry
839 * with that id. In this case a new dqb
840 * record will be inserted.
841 */
842 if (dqb->dqb_id == 0) {
843 bzero(dqb, sizeof(struct dqblk));
844 dqb->dqb_id = OSSwapHostToBigInt32(id);
845 /*
846 * Write back to reserve entry for this id
847 */
848 uio_reset(auio, dqoffset(i), UIO_SYSSPACE, UIO_WRITE);
849 uio_addiov(auio, CAST_USER_ADDR_T(dqb), sizeof(struct dqblk));
850 error = VNOP_WRITE(dqvp, auio, 0, &context);
851 if (uio_resid(auio) && error == 0) {
852 error = EIO;
853 }
854 if (error == 0) {
855 ++qfp->qf_entrycnt;
856 }
857 dqb->dqb_id = id;
858 break;
859 }
860 /* An id match means an entry was found. */
861 if (OSSwapBigToHostInt32(dqb->dqb_id) == id) {
862 dqb->dqb_bhardlimit = OSSwapBigToHostInt64(dqb->dqb_bhardlimit);
863 dqb->dqb_bsoftlimit = OSSwapBigToHostInt64(dqb->dqb_bsoftlimit);
864 dqb->dqb_curbytes = OSSwapBigToHostInt64(dqb->dqb_curbytes);
865 dqb->dqb_ihardlimit = OSSwapBigToHostInt32(dqb->dqb_ihardlimit);
866 dqb->dqb_isoftlimit = OSSwapBigToHostInt32(dqb->dqb_isoftlimit);
867 dqb->dqb_curinodes = OSSwapBigToHostInt32(dqb->dqb_curinodes);
868 dqb->dqb_btime = OSSwapBigToHostInt32(dqb->dqb_btime);
869 dqb->dqb_itime = OSSwapBigToHostInt32(dqb->dqb_itime);
870 dqb->dqb_id = OSSwapBigToHostInt32(dqb->dqb_id);
871 break;
872 }
873 }
874 qf_unlock(qfp);
875
876 *index = i; /* remember index so we don't have to recompute it later */
877
878 return error;
879 }
880
881
882 /*
883 * Release a reference to a dquot.
884 */
885 void
886 dqrele(struct dquot *dq)
887 {
888 if (dq == NODQUOT) {
889 return;
890 }
891 dqlock(dq);
892
893 if (dq->dq_cnt > 1) {
894 dq->dq_cnt--;
895
896 dqunlock(dq);
897 return;
898 }
899 if (dq->dq_flags & DQ_MOD) {
900 (void) dqsync_locked(dq);
901 }
902 dq->dq_cnt--;
903
904 dq_list_lock();
905 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
906 dq_unlock_internal(dq);
907 dq_list_unlock();
908 }
909
910 /*
911 * Release a reference to a dquot but don't do any I/O.
912 */
913 void
914 dqreclaim(struct dquot *dq)
915 {
916 if (dq == NODQUOT) {
917 return;
918 }
919
920 dq_list_lock();
921 dq_lock_internal(dq);
922
923 if (--dq->dq_cnt > 0) {
924 dq_unlock_internal(dq);
925 dq_list_unlock();
926 return;
927 }
928 if (dq->dq_flags & DQ_MOD) {
929 TAILQ_INSERT_TAIL(&dqdirtylist, dq, dq_freelist);
930 } else {
931 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
932 }
933
934 dq_unlock_internal(dq);
935 dq_list_unlock();
936 }
937
938 /*
939 * Update a quota file's orphaned disk quotas.
940 */
941 void
942 dqsync_orphans(struct quotafile *qfp)
943 {
944 struct dquot *dq;
945
946 dq_list_lock();
947 loop:
948 TAILQ_FOREACH(dq, &dqdirtylist, dq_freelist) {
949 if (dq->dq_qfile != qfp) {
950 continue;
951 }
952
953 dq_lock_internal(dq);
954
955 if (dq->dq_qfile != qfp) {
956 /*
957 * the identity of this dq changed while
958 * the quota_list_lock was dropped
959 * dq_lock_internal can drop it to msleep
960 */
961 dq_unlock_internal(dq);
962 goto loop;
963 }
964 if ((dq->dq_flags & DQ_MOD) == 0) {
965 /*
966 * someone cleaned and removed this from
967 * the dq from the dirty list while the
968 * quota_list_lock was dropped
969 */
970 dq_unlock_internal(dq);
971 goto loop;
972 }
973 if (dq->dq_cnt != 0) {
974 panic("dqsync_orphans: dquot in use");
975 }
976
977 TAILQ_REMOVE(&dqdirtylist, dq, dq_freelist);
978
979 dq_list_unlock();
980 /*
981 * we're still holding the dqlock at this point
982 * with the reference count == 0
983 * we shouldn't be able
984 * to pick up another one since we hold dqlock
985 */
986 (void) dqsync_locked(dq);
987
988 dq_list_lock();
989
990 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
991
992 dq_unlock_internal(dq);
993 goto loop;
994 }
995 dq_list_unlock();
996 }
997
998 int
999 dqsync(struct dquot *dq)
1000 {
1001 int error = 0;
1002
1003 if (dq != NODQUOT) {
1004 dqlock(dq);
1005
1006 if ((dq->dq_flags & DQ_MOD)) {
1007 error = dqsync_locked(dq);
1008 }
1009
1010 dqunlock(dq);
1011 }
1012 return error;
1013 }
1014
1015
1016 /*
1017 * Update the disk quota in the quota file.
1018 */
1019 int
1020 dqsync_locked(struct dquot *dq)
1021 {
1022 struct vfs_context context;
1023 struct vnode *dqvp;
1024 struct dqblk dqb, *dqblkp;
1025 uio_t auio;
1026 int error;
1027 char uio_buf[UIO_SIZEOF(1)];
1028
1029 if (dq->dq_id == 0) {
1030 dq->dq_flags &= ~DQ_MOD;
1031 return 0;
1032 }
1033 if (dq->dq_qfile == NULL) {
1034 panic("dqsync: NULL dq_qfile");
1035 }
1036 if ((dqvp = dq->dq_qfile->qf_vp) == NULLVP) {
1037 panic("dqsync: NULL qf_vp");
1038 }
1039
1040 auio = uio_createwithbuffer(1, dqoffset(dq->dq_index), UIO_SYSSPACE,
1041 UIO_WRITE, &uio_buf[0], sizeof(uio_buf));
1042 uio_addiov(auio, CAST_USER_ADDR_T(&dqb), sizeof(struct dqblk));
1043
1044 context.vc_thread = current_thread(); /* XXX */
1045 context.vc_ucred = dq->dq_qfile->qf_cred;
1046
1047 dqblkp = &dq->dq_dqb;
1048 dqb.dqb_bhardlimit = OSSwapHostToBigInt64(dqblkp->dqb_bhardlimit);
1049 dqb.dqb_bsoftlimit = OSSwapHostToBigInt64(dqblkp->dqb_bsoftlimit);
1050 dqb.dqb_curbytes = OSSwapHostToBigInt64(dqblkp->dqb_curbytes);
1051 dqb.dqb_ihardlimit = OSSwapHostToBigInt32(dqblkp->dqb_ihardlimit);
1052 dqb.dqb_isoftlimit = OSSwapHostToBigInt32(dqblkp->dqb_isoftlimit);
1053 dqb.dqb_curinodes = OSSwapHostToBigInt32(dqblkp->dqb_curinodes);
1054 dqb.dqb_btime = OSSwapHostToBigInt32(dqblkp->dqb_btime);
1055 dqb.dqb_itime = OSSwapHostToBigInt32(dqblkp->dqb_itime);
1056 dqb.dqb_id = OSSwapHostToBigInt32(dqblkp->dqb_id);
1057 dqb.dqb_spare[0] = 0;
1058 dqb.dqb_spare[1] = 0;
1059 dqb.dqb_spare[2] = 0;
1060 dqb.dqb_spare[3] = 0;
1061
1062 error = VNOP_WRITE(dqvp, auio, 0, &context);
1063 if (uio_resid(auio) && error == 0) {
1064 error = EIO;
1065 }
1066 dq->dq_flags &= ~DQ_MOD;
1067
1068 return error;
1069 }
1070
1071 /*
1072 * Flush all entries from the cache for a particular vnode.
1073 */
1074 void
1075 dqflush(struct vnode *vp)
1076 {
1077 struct dquot *dq, *nextdq;
1078 struct dqhash *dqh;
1079
1080 if (!dqisinitialized()) {
1081 return;
1082 }
1083
1084 /*
1085 * Move all dquot's that used to refer to this quota
1086 * file off their hash chains (they will eventually
1087 * fall off the head of the free list and be re-used).
1088 */
1089 dq_list_lock();
1090
1091 for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) {
1092 for (dq = dqh->lh_first; dq; dq = nextdq) {
1093 nextdq = dq->dq_hash.le_next;
1094 if (dq->dq_qfile->qf_vp != vp) {
1095 continue;
1096 }
1097 if (dq->dq_cnt) {
1098 panic("dqflush: stray dquot");
1099 }
1100 LIST_REMOVE(dq, dq_hash);
1101 dq->dq_qfile = NULL;
1102 }
1103 }
1104 dq_list_unlock();
1105 }
1106
1107 /*
1108 * LP64 support for munging dqblk structure.
1109 * XXX conversion of user_time_t to time_t loses precision; not an issue for
1110 * XXX us now, since we are only ever setting 32 bits worth of time into it.
1111 */
1112 __private_extern__ void
1113 munge_dqblk(struct dqblk *dqblkp, struct user_dqblk *user_dqblkp, boolean_t to64)
1114 {
1115 if (to64) {
1116 /* munge kernel (32 bit) dqblk into user (64 bit) dqblk */
1117 bcopy((caddr_t)dqblkp, (caddr_t)user_dqblkp, offsetof(struct dqblk, dqb_btime));
1118 user_dqblkp->dqb_id = dqblkp->dqb_id;
1119 user_dqblkp->dqb_itime = dqblkp->dqb_itime;
1120 user_dqblkp->dqb_btime = dqblkp->dqb_btime;
1121 } else {
1122 /* munge user (64 bit) dqblk into kernel (32 bit) dqblk */
1123 bcopy((caddr_t)user_dqblkp, (caddr_t)dqblkp, offsetof(struct dqblk, dqb_btime));
1124 dqblkp->dqb_id = user_dqblkp->dqb_id;
1125 dqblkp->dqb_itime = user_dqblkp->dqb_itime; /* XXX - lose precision */
1126 dqblkp->dqb_btime = user_dqblkp->dqb_btime; /* XXX - lose precision */
1127 }
1128 }