]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_quota.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_quota.c
1 /*
2 * Copyright (c) 2002-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by
33 * Robert Elz at The University of Melbourne.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by the University of
46 * California, Berkeley and its contributors.
47 * 4. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 *
63 * @(#)vfs_quota.c
64 * derived from @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
65 */
66
67 #include <sys/param.h>
68 #include <sys/kernel.h>
69 #include <sys/systm.h>
70 #include <sys/malloc.h>
71 #include <sys/file_internal.h>
72 #include <sys/proc_internal.h>
73 #include <sys/vnode_internal.h>
74 #include <sys/mount_internal.h>
75 #include <sys/quota.h>
76 #include <sys/uio_internal.h>
77
78
79 /* vars for quota file lock */
80 lck_grp_t * qf_lck_grp;
81 lck_grp_attr_t * qf_lck_grp_attr;
82 lck_attr_t * qf_lck_attr;
83
84 /* vars for quota list lock */
85 lck_grp_t * quota_list_lck_grp;
86 lck_grp_attr_t * quota_list_lck_grp_attr;
87 lck_attr_t * quota_list_lck_attr;
88 lck_mtx_t * quota_list_mtx_lock;
89
90 /* Routines to lock and unlock the quota global data */
91 static void dq_list_lock(void);
92 static void dq_list_unlock(void);
93
94 static void dq_lock_internal(struct dquot *dq);
95 static void dq_unlock_internal(struct dquot *dq);
96
97 static u_int32_t quotamagic[MAXQUOTAS] = INITQMAGICS;
98
99
100 /*
101 * Code pertaining to management of the in-core dquot data structures.
102 */
103 #define DQHASH(dqvp, id) \
104 (&dqhashtbl[((((int)(dqvp)) >> 8) + id) & dqhash])
105 LIST_HEAD(dqhash, dquot) *dqhashtbl;
106 u_long dqhash;
107
108 #define DQUOTINC 5 /* minimum free dquots desired */
109 long numdquot, desireddquot = DQUOTINC;
110
111 /*
112 * Dquot free list.
113 */
114 TAILQ_HEAD(dqfreelist, dquot) dqfreelist;
115 /*
116 * Dquot dirty orphans list
117 */
118 TAILQ_HEAD(dqdirtylist, dquot) dqdirtylist;
119
120
121 static int dqlookup(struct quotafile *, u_long, struct dqblk *, u_int32_t *);
122 static int dqsync_locked(struct dquot *dq);
123
124 static void qf_lock(struct quotafile *);
125 static void qf_unlock(struct quotafile *);
126 static int qf_ref(struct quotafile *);
127 static void qf_rele(struct quotafile *);
128
129
130 /*
131 * Initialize the quota system.
132 */
133 void
134 dqinit()
135 {
136
137 dqhashtbl = hashinit(desiredvnodes, M_DQUOT, &dqhash);
138 TAILQ_INIT(&dqfreelist);
139 TAILQ_INIT(&dqdirtylist);
140
141 /*
142 * Allocate quota list lock group attribute and group
143 */
144 quota_list_lck_grp_attr= lck_grp_attr_alloc_init();
145 lck_grp_attr_setstat(quota_list_lck_grp_attr);
146 quota_list_lck_grp = lck_grp_alloc_init("quota list", quota_list_lck_grp_attr);
147
148 /*
149 * Allocate qouta list lock attribute
150 */
151 quota_list_lck_attr = lck_attr_alloc_init();
152 //lck_attr_setdebug(quota_list_lck_attr);
153
154 /*
155 * Allocate quota list lock
156 */
157 quota_list_mtx_lock = lck_mtx_alloc_init(quota_list_lck_grp, quota_list_lck_attr);
158
159
160 /*
161 * allocate quota file lock group attribute and group
162 */
163 qf_lck_grp_attr= lck_grp_attr_alloc_init();
164 lck_grp_attr_setstat(qf_lck_grp_attr);
165 qf_lck_grp = lck_grp_alloc_init("quota file", qf_lck_grp_attr);
166
167 /*
168 * Allocate quota file lock attribute
169 */
170 qf_lck_attr = lck_attr_alloc_init();
171 //lck_attr_setdebug(qf_lck_attr);
172 }
173
174
175
176 void
177 dq_list_lock(void)
178 {
179 lck_mtx_lock(quota_list_mtx_lock);
180 }
181
182 void
183 dq_list_unlock(void)
184 {
185 lck_mtx_unlock(quota_list_mtx_lock);
186 }
187
188
189 /*
190 * must be called with the quota_list_lock held
191 */
192 void
193 dq_lock_internal(struct dquot *dq)
194 {
195 while (dq->dq_lflags & DQ_LLOCK) {
196 dq->dq_lflags |= DQ_LWANT;
197 msleep(&dq->dq_lflags, quota_list_mtx_lock, PVFS, "dq_lock_internal", 0);
198 }
199 dq->dq_lflags |= DQ_LLOCK;
200 }
201
202 /*
203 * must be called with the quota_list_lock held
204 */
205 void
206 dq_unlock_internal(struct dquot *dq)
207 {
208 int wanted = dq->dq_lflags & DQ_LWANT;
209
210 dq->dq_lflags &= ~(DQ_LLOCK | DQ_LWANT);
211
212 if (wanted)
213 wakeup(&dq->dq_lflags);
214 }
215
216 void
217 dqlock(struct dquot *dq) {
218
219 lck_mtx_lock(quota_list_mtx_lock);
220
221 dq_lock_internal(dq);
222
223 lck_mtx_unlock(quota_list_mtx_lock);
224 }
225
226 void
227 dqunlock(struct dquot *dq) {
228
229 lck_mtx_lock(quota_list_mtx_lock);
230
231 dq_unlock_internal(dq);
232
233 lck_mtx_unlock(quota_list_mtx_lock);
234 }
235
236
237
238 int
239 qf_get(struct quotafile *qfp, int type)
240 {
241 int error = 0;
242
243 dq_list_lock();
244
245 switch (type) {
246
247 case QTF_OPENING:
248 while ( (qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING)) ) {
249 if ( (qfp->qf_qflags & QTF_OPENING) ) {
250 error = EBUSY;
251 break;
252 }
253 if ( (qfp->qf_qflags & QTF_CLOSING) ) {
254 qfp->qf_qflags |= QTF_WANTED;
255 msleep(&qfp->qf_qflags, quota_list_mtx_lock, PVFS, "qf_get", 0);
256 }
257 }
258 if (qfp->qf_vp != NULLVP)
259 error = EBUSY;
260 if (error == 0)
261 qfp->qf_qflags |= QTF_OPENING;
262 break;
263
264 case QTF_CLOSING:
265 if ( (qfp->qf_qflags & QTF_CLOSING) ) {
266 error = EBUSY;
267 break;
268 }
269 qfp->qf_qflags |= QTF_CLOSING;
270
271 while ( (qfp->qf_qflags & QTF_OPENING) || qfp->qf_refcnt ) {
272 qfp->qf_qflags |= QTF_WANTED;
273 msleep(&qfp->qf_qflags, quota_list_mtx_lock, PVFS, "qf_get", 0);
274 }
275 if (qfp->qf_vp == NULLVP) {
276 qfp->qf_qflags &= ~QTF_CLOSING;
277 error = EBUSY;
278 }
279 break;
280 }
281 dq_list_unlock();
282
283 return (error);
284 }
285
286 void
287 qf_put(struct quotafile *qfp, int type)
288 {
289
290 dq_list_lock();
291
292 switch (type) {
293
294 case QTF_OPENING:
295 case QTF_CLOSING:
296 qfp->qf_qflags &= ~type;
297 break;
298 }
299 if ( (qfp->qf_qflags & QTF_WANTED) ) {
300 qfp->qf_qflags &= ~QTF_WANTED;
301 wakeup(&qfp->qf_qflags);
302 }
303 dq_list_unlock();
304 }
305
306
307 static void
308 qf_lock(struct quotafile *qfp)
309 {
310 lck_mtx_lock(&qfp->qf_lock);
311 }
312
313 static void
314 qf_unlock(struct quotafile *qfp)
315 {
316 lck_mtx_unlock(&qfp->qf_lock);
317 }
318
319
320 /*
321 * take a reference on the quota file while we're
322 * in dqget... this will prevent a quota_off from
323 * occurring while we're potentially playing with
324 * the quota file... the quota_off will stall until
325 * all the current references 'die'... once we start
326 * into quoto_off, all new references will be rejected
327 * we also don't want any dqgets being processed while
328 * we're in the middle of the quota_on... once we've
329 * actually got the quota file open and the associated
330 * struct quotafile inited, we can let them come through
331 *
332 * quota list lock must be held on entry
333 */
334 static int
335 qf_ref(struct quotafile *qfp)
336 {
337 int error = 0;
338
339 if ( (qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING)) || (qfp->qf_vp == NULLVP) )
340 error = EINVAL;
341 else
342 qfp->qf_refcnt++;
343
344 return (error);
345 }
346
347 /*
348 * drop our reference and wakeup any waiters if
349 * we were the last one holding a ref
350 *
351 * quota list lock must be held on entry
352 */
353 static void
354 qf_rele(struct quotafile *qfp)
355 {
356 qfp->qf_refcnt--;
357
358 if ( (qfp->qf_qflags & QTF_WANTED) && qfp->qf_refcnt == 0) {
359 qfp->qf_qflags &= ~QTF_WANTED;
360 wakeup(&qfp->qf_qflags);
361 }
362 }
363
364
365 void
366 dqfileinit(struct quotafile *qfp)
367 {
368 qfp->qf_vp = NULLVP;
369 qfp->qf_qflags = 0;
370
371 lck_mtx_init(&qfp->qf_lock, qf_lck_grp, qf_lck_attr);
372 }
373
374
375 /*
376 * Initialize a quota file
377 *
378 * must be called with the quota file lock held
379 */
380 int
381 dqfileopen(qfp, type)
382 struct quotafile *qfp;
383 int type;
384 {
385 struct dqfilehdr header;
386 struct vfs_context context;
387 off_t file_size;
388 uio_t auio;
389 int error = 0;
390 char uio_buf[ UIO_SIZEOF(1) ];
391
392 context.vc_proc = current_proc();
393 context.vc_ucred = qfp->qf_cred;
394
395 /* Obtain the file size */
396 if ((error = vnode_size(qfp->qf_vp, &file_size, &context)) != 0)
397 goto out;
398
399 /* Read the file header */
400 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
401 &uio_buf[0], sizeof(uio_buf));
402 uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof (header));
403 error = VNOP_READ(qfp->qf_vp, auio, 0, &context);
404 if (error)
405 goto out;
406 else if (uio_resid(auio)) {
407 error = EINVAL;
408 goto out;
409 }
410 /* Sanity check the quota file header. */
411 if ((header.dqh_magic != quotamagic[type]) ||
412 (header.dqh_version > QF_VERSION) ||
413 (!powerof2(header.dqh_maxentries)) ||
414 (header.dqh_maxentries > (file_size / sizeof(struct dqblk)))) {
415 error = EINVAL;
416 goto out;
417 }
418 /* Set up the time limits for this quota. */
419 if (header.dqh_btime > 0)
420 qfp->qf_btime = header.dqh_btime;
421 else
422 qfp->qf_btime = MAX_DQ_TIME;
423 if (header.dqh_itime > 0)
424 qfp->qf_itime = header.dqh_itime;
425 else
426 qfp->qf_itime = MAX_IQ_TIME;
427
428 /* Calculate the hash table constants. */
429 qfp->qf_maxentries = header.dqh_maxentries;
430 qfp->qf_entrycnt = header.dqh_entrycnt;
431 qfp->qf_shift = dqhashshift(header.dqh_maxentries);
432 out:
433 return (error);
434 }
435
436 /*
437 * Close down a quota file
438 */
439 void
440 dqfileclose(struct quotafile *qfp, __unused int type)
441 {
442 struct dqfilehdr header;
443 struct vfs_context context;
444 uio_t auio;
445 char uio_buf[ UIO_SIZEOF(1) ];
446
447 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
448 &uio_buf[0], sizeof(uio_buf));
449 uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof (header));
450
451 context.vc_proc = current_proc();
452 context.vc_ucred = qfp->qf_cred;
453
454 if (VNOP_READ(qfp->qf_vp, auio, 0, &context) == 0) {
455 header.dqh_entrycnt = qfp->qf_entrycnt;
456 uio_reset(auio, 0, UIO_SYSSPACE, UIO_WRITE);
457 uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof (header));
458 (void) VNOP_WRITE(qfp->qf_vp, auio, 0, &context);
459 }
460 }
461
462
463 /*
464 * Obtain a dquot structure for the specified identifier and quota file
465 * reading the information from the file if necessary.
466 */
467 int
468 dqget(id, qfp, type, dqp)
469 u_long id;
470 struct quotafile *qfp;
471 register int type;
472 struct dquot **dqp;
473 {
474 struct dquot *dq;
475 struct dquot *ndq = NULL;
476 struct dquot *fdq = NULL;
477 struct dqhash *dqh;
478 struct vnode *dqvp;
479 int error = 0;
480
481 if ( id == 0 || qfp->qf_vp == NULLVP ) {
482 *dqp = NODQUOT;
483 return (EINVAL);
484 }
485 dq_list_lock();
486
487 if ( (qf_ref(qfp)) ) {
488 dq_list_unlock();
489
490 *dqp = NODQUOT;
491 return (EINVAL);
492 }
493 if ( (dqvp = qfp->qf_vp) == NULLVP ) {
494 qf_rele(qfp);
495 dq_list_unlock();
496
497 *dqp = NODQUOT;
498 return (EINVAL);
499 }
500 dqh = DQHASH(dqvp, id);
501
502 relookup:
503 /*
504 * Check the cache first.
505 */
506 for (dq = dqh->lh_first; dq; dq = dq->dq_hash.le_next) {
507 if (dq->dq_id != id ||
508 dq->dq_qfile->qf_vp != dqvp)
509 continue;
510
511 dq_lock_internal(dq);
512 /*
513 * dq_lock_internal may drop the quota_list_lock to msleep, so
514 * we need to re-evaluate the identity of this dq
515 */
516 if (dq->dq_id != id || dq->dq_qfile == NULL ||
517 dq->dq_qfile->qf_vp != dqvp) {
518 dq_unlock_internal(dq);
519 goto relookup;
520 }
521 /*
522 * Cache hit with no references. Take
523 * the structure off the free list.
524 */
525 if (dq->dq_cnt++ == 0) {
526 if (dq->dq_flags & DQ_MOD)
527 TAILQ_REMOVE(&dqdirtylist, dq, dq_freelist);
528 else
529 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
530 }
531 dq_unlock_internal(dq);
532
533 if (fdq != NULL) {
534 /*
535 * we grabbed this from the free list in the first pass
536 * but we found the dq we were looking for in
537 * the cache the 2nd time through
538 * so stick it back on the free list and return the cached entry
539 */
540 TAILQ_INSERT_HEAD(&dqfreelist, fdq, dq_freelist);
541 }
542 qf_rele(qfp);
543 dq_list_unlock();
544
545 if (ndq != NULL) {
546 /*
547 * we allocated this in the first pass
548 * but we found the dq we were looking for in
549 * the cache the 2nd time through so free it
550 */
551 _FREE(ndq, M_DQUOT);
552 }
553 *dqp = dq;
554
555 return (0);
556 }
557 /*
558 * Not in cache, allocate a new one.
559 */
560 if (TAILQ_EMPTY(&dqfreelist) &&
561 numdquot < MAXQUOTAS * desiredvnodes)
562 desireddquot += DQUOTINC;
563
564 if (fdq != NULL) {
565 /*
566 * we captured this from the free list
567 * in the first pass through, so go
568 * ahead and use it
569 */
570 dq = fdq;
571 fdq = NULL;
572 } else if (numdquot < desireddquot) {
573 if (ndq == NULL) {
574 /*
575 * drop the quota list lock since MALLOC may block
576 */
577 dq_list_unlock();
578
579 ndq = (struct dquot *)_MALLOC(sizeof *dq, M_DQUOT, M_WAITOK);
580 bzero((char *)ndq, sizeof *dq);
581
582 dq_list_lock();
583 /*
584 * need to look for the entry again in the cache
585 * since we dropped the quota list lock and
586 * someone else may have beaten us to creating it
587 */
588 goto relookup;
589 } else {
590 /*
591 * we allocated this in the first pass through
592 * and we're still under out target, so go
593 * ahead and use it
594 */
595 dq = ndq;
596 ndq = NULL;
597 numdquot++;
598 }
599 } else {
600 if (TAILQ_EMPTY(&dqfreelist)) {
601 qf_rele(qfp);
602 dq_list_unlock();
603
604 if (ndq) {
605 /*
606 * we allocated this in the first pass through
607 * but we're now at the limit of our cache size
608 * so free it
609 */
610 _FREE(ndq, M_DQUOT);
611 }
612 tablefull("dquot");
613 *dqp = NODQUOT;
614 return (EUSERS);
615 }
616 dq = TAILQ_FIRST(&dqfreelist);
617
618 dq_lock_internal(dq);
619
620 if (dq->dq_cnt || (dq->dq_flags & DQ_MOD)) {
621 /*
622 * we lost the race while we weren't holding
623 * the quota list lock... dq_lock_internal
624 * will drop it to msleep... this dq has been
625 * reclaimed... go find another
626 */
627 dq_unlock_internal(dq);
628
629 /*
630 * need to look for the entry again in the cache
631 * since we dropped the quota list lock and
632 * someone else may have beaten us to creating it
633 */
634 goto relookup;
635 }
636 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
637
638 if (dq->dq_qfile != NULL) {
639 LIST_REMOVE(dq, dq_hash);
640 dq->dq_qfile = NULL;
641 dq->dq_id = 0;
642 }
643 dq_unlock_internal(dq);
644
645 /*
646 * because we may have dropped the quota list lock
647 * in the call to dq_lock_internal, we need to
648 * relookup in the hash in case someone else
649 * caused a dq with this identity to be created...
650 * if we don't find it, we'll use this one
651 */
652 fdq = dq;
653 goto relookup;
654 }
655 /*
656 * we've either freshly allocated a dq
657 * or we've atomically pulled it out of
658 * the hash and freelists... no one else
659 * can have a reference, which means no
660 * one else can be trying to use this dq
661 */
662 dq_lock_internal(dq);
663
664 /*
665 * Initialize the contents of the dquot structure.
666 */
667 dq->dq_cnt = 1;
668 dq->dq_flags = 0;
669 dq->dq_id = id;
670 dq->dq_qfile = qfp;
671 dq->dq_type = type;
672 /*
673 * once we insert it in the hash and
674 * drop the quota_list_lock, it can be
675 * 'found'... however, we're still holding
676 * the dq_lock which will keep us from doing
677 * anything with it until we've finished
678 * initializing it...
679 */
680 LIST_INSERT_HEAD(dqh, dq, dq_hash);
681 dq_list_unlock();
682
683 if (ndq) {
684 /*
685 * we allocated this in the first pass through
686 * but we didn't need it, so free it after
687 * we've droped the quota list lock
688 */
689 _FREE(ndq, M_DQUOT);
690 }
691
692 error = dqlookup(qfp, id, &dq->dq_dqb, &dq->dq_index);
693
694 /*
695 * I/O error in reading quota file, release
696 * quota structure and reflect problem to caller.
697 */
698 if (error) {
699 dq_list_lock();
700
701 dq->dq_id = 0;
702 dq->dq_qfile = NULL;
703 LIST_REMOVE(dq, dq_hash);
704
705 dq_unlock_internal(dq);
706 qf_rele(qfp);
707 dq_list_unlock();
708
709 dqrele(dq);
710
711 *dqp = NODQUOT;
712 return (error);
713 }
714 /*
715 * Check for no limit to enforce.
716 * Initialize time values if necessary.
717 */
718 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
719 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
720 dq->dq_flags |= DQ_FAKE;
721 if (dq->dq_id != 0) {
722 struct timeval tv;
723
724 microtime(&tv);
725 if (dq->dq_btime == 0)
726 dq->dq_btime = tv.tv_sec + qfp->qf_btime;
727 if (dq->dq_itime == 0)
728 dq->dq_itime = tv.tv_sec + qfp->qf_itime;
729 }
730 dq_list_lock();
731 dq_unlock_internal(dq);
732 qf_rele(qfp);
733 dq_list_unlock();
734
735 *dqp = dq;
736 return (0);
737 }
738
739 /*
740 * Lookup a dqblk structure for the specified identifier and
741 * quota file. If there is no entry for this identifier then
742 * one is inserted. The actual hash table index is returned.
743 */
744 static int
745 dqlookup(qfp, id, dqb, index)
746 struct quotafile *qfp;
747 u_long id;
748 struct dqblk *dqb;
749 u_int32_t *index;
750 {
751 struct vnode *dqvp;
752 struct vfs_context context;
753 uio_t auio;
754 int i, skip, last;
755 u_long mask;
756 int error = 0;
757 char uio_buf[ UIO_SIZEOF(1) ];
758
759
760 qf_lock(qfp);
761
762 dqvp = qfp->qf_vp;
763
764 context.vc_proc = current_proc();
765 context.vc_ucred = qfp->qf_cred;
766
767 mask = qfp->qf_maxentries - 1;
768 i = dqhash1(id, qfp->qf_shift, mask);
769 skip = dqhash2(id, mask);
770
771 for (last = (i + (qfp->qf_maxentries-1) * skip) & mask;
772 i != last;
773 i = (i + skip) & mask) {
774 auio = uio_createwithbuffer(1, dqoffset(i), UIO_SYSSPACE, UIO_READ,
775 &uio_buf[0], sizeof(uio_buf));
776 uio_addiov(auio, CAST_USER_ADDR_T(dqb), sizeof (struct dqblk));
777 error = VNOP_READ(dqvp, auio, 0, &context);
778 if (error) {
779 printf("dqlookup: error %d looking up id %d at index %d\n", error, id, i);
780 break;
781 } else if (uio_resid(auio)) {
782 error = EIO;
783 printf("dqlookup: error looking up id %d at index %d\n", id, i);
784 break;
785 }
786 /*
787 * An empty entry means there is no entry
788 * with that id. In this case a new dqb
789 * record will be inserted.
790 */
791 if (dqb->dqb_id == 0) {
792 bzero(dqb, sizeof(struct dqblk));
793 dqb->dqb_id = id;
794 /*
795 * Write back to reserve entry for this id
796 */
797 uio_reset(auio, dqoffset(i), UIO_SYSSPACE, UIO_WRITE);
798 uio_addiov(auio, CAST_USER_ADDR_T(dqb), sizeof (struct dqblk));
799 error = VNOP_WRITE(dqvp, auio, 0, &context);
800 if (uio_resid(auio) && error == 0)
801 error = EIO;
802 if (error == 0)
803 ++qfp->qf_entrycnt;
804 break;
805 }
806 /* An id match means an entry was found. */
807 if (dqb->dqb_id == id)
808 break;
809 }
810 qf_unlock(qfp);
811
812 *index = i; /* remember index so we don't have to recompute it later */
813
814 return (error);
815 }
816
817
818 /*
819 * Release a reference to a dquot.
820 */
821 void
822 dqrele(struct dquot *dq)
823 {
824
825 if (dq == NODQUOT)
826 return;
827 dqlock(dq);
828
829 if (dq->dq_cnt > 1) {
830 dq->dq_cnt--;
831
832 dqunlock(dq);
833 return;
834 }
835 if (dq->dq_flags & DQ_MOD)
836 (void) dqsync_locked(dq);
837 dq->dq_cnt--;
838
839 dq_list_lock();
840 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
841 dq_unlock_internal(dq);
842 dq_list_unlock();
843 }
844
845 /*
846 * Release a reference to a dquot but don't do any I/O.
847 */
848 void
849 dqreclaim(register struct dquot *dq)
850 {
851
852 if (dq == NODQUOT)
853 return;
854
855 dq_list_lock();
856 dq_lock_internal(dq);
857
858 if (--dq->dq_cnt > 0) {
859 dq_unlock_internal(dq);
860 dq_list_unlock();
861 return;
862 }
863 if (dq->dq_flags & DQ_MOD)
864 TAILQ_INSERT_TAIL(&dqdirtylist, dq, dq_freelist);
865 else
866 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
867
868 dq_unlock_internal(dq);
869 dq_list_unlock();
870 }
871
872 /*
873 * Update a quota file's orphaned disk quotas.
874 */
875 void
876 dqsync_orphans(qfp)
877 struct quotafile *qfp;
878 {
879 struct dquot *dq;
880
881 dq_list_lock();
882 loop:
883 TAILQ_FOREACH(dq, &dqdirtylist, dq_freelist) {
884 if (dq->dq_qfile != qfp)
885 continue;
886
887 dq_lock_internal(dq);
888
889 if (dq->dq_qfile != qfp) {
890 /*
891 * the identity of this dq changed while
892 * the quota_list_lock was dropped
893 * dq_lock_internal can drop it to msleep
894 */
895 dq_unlock_internal(dq);
896 goto loop;
897 }
898 if ((dq->dq_flags & DQ_MOD) == 0) {
899 /*
900 * someone cleaned and removed this from
901 * the dq from the dirty list while the
902 * quota_list_lock was dropped
903 */
904 dq_unlock_internal(dq);
905 goto loop;
906 }
907 if (dq->dq_cnt != 0)
908 panic("dqsync_orphans: dquot in use");
909
910 TAILQ_REMOVE(&dqdirtylist, dq, dq_freelist);
911
912 dq_list_unlock();
913 /*
914 * we're still holding the dqlock at this point
915 * with the reference count == 0
916 * we shouldn't be able
917 * to pick up another one since we hold dqlock
918 */
919 (void) dqsync_locked(dq);
920
921 dq_list_lock();
922
923 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
924
925 dq_unlock_internal(dq);
926 goto loop;
927 }
928 dq_list_unlock();
929 }
930
931 int
932 dqsync(struct dquot *dq)
933 {
934 int error = 0;
935
936 if (dq != NODQUOT) {
937 dqlock(dq);
938
939 if ( (dq->dq_flags & DQ_MOD) )
940 error = dqsync_locked(dq);
941
942 dqunlock(dq);
943 }
944 return (error);
945 }
946
947
948 /*
949 * Update the disk quota in the quota file.
950 */
951 int
952 dqsync_locked(struct dquot *dq)
953 {
954 struct proc *p = current_proc(); /* XXX */
955 struct vfs_context context;
956 struct vnode *dqvp;
957 uio_t auio;
958 int error;
959 char uio_buf[ UIO_SIZEOF(1) ];
960
961 if (dq->dq_id == 0) {
962 dq->dq_flags &= ~DQ_MOD;
963 return (0);
964 }
965 if (dq->dq_qfile == NULL)
966 panic("dqsync: NULL dq_qfile");
967 if ((dqvp = dq->dq_qfile->qf_vp) == NULLVP)
968 panic("dqsync: NULL qf_vp");
969
970 auio = uio_createwithbuffer(1, dqoffset(dq->dq_index), UIO_SYSSPACE,
971 UIO_WRITE, &uio_buf[0], sizeof(uio_buf));
972 uio_addiov(auio, CAST_USER_ADDR_T(&dq->dq_dqb), sizeof (struct dqblk));
973
974 context.vc_proc = p;
975 context.vc_ucred = dq->dq_qfile->qf_cred;
976
977 error = VNOP_WRITE(dqvp, auio, 0, &context);
978 if (uio_resid(auio) && error == 0)
979 error = EIO;
980 dq->dq_flags &= ~DQ_MOD;
981
982 return (error);
983 }
984
985 /*
986 * Flush all entries from the cache for a particular vnode.
987 */
988 void
989 dqflush(vp)
990 register struct vnode *vp;
991 {
992 register struct dquot *dq, *nextdq;
993 struct dqhash *dqh;
994
995 /*
996 * Move all dquot's that used to refer to this quota
997 * file off their hash chains (they will eventually
998 * fall off the head of the free list and be re-used).
999 */
1000 dq_list_lock();
1001
1002 for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) {
1003 for (dq = dqh->lh_first; dq; dq = nextdq) {
1004 nextdq = dq->dq_hash.le_next;
1005 if (dq->dq_qfile->qf_vp != vp)
1006 continue;
1007 if (dq->dq_cnt)
1008 panic("dqflush: stray dquot");
1009 LIST_REMOVE(dq, dq_hash);
1010 dq->dq_qfile = NULL;
1011 }
1012 }
1013 dq_list_unlock();
1014 }
1015
1016 /*
1017 * LP64 support for munging dqblk structure.
1018 * XXX conversion of user_time_t to time_t loses precision; not an issue for
1019 * XXX us now, since we are only ever setting 32 bits worth of time into it.
1020 */
1021 __private_extern__ void
1022 munge_dqblk(struct dqblk *dqblkp, struct user_dqblk *user_dqblkp, boolean_t to64)
1023 {
1024 if (to64) {
1025 /* munge kernel (32 bit) dqblk into user (64 bit) dqblk */
1026 bcopy((caddr_t)dqblkp, (caddr_t)user_dqblkp, offsetof(struct dqblk, dqb_btime));
1027 user_dqblkp->dqb_id = dqblkp->dqb_id;
1028 user_dqblkp->dqb_itime = dqblkp->dqb_itime;
1029 user_dqblkp->dqb_btime = dqblkp->dqb_btime;
1030 }
1031 else {
1032 /* munge user (64 bit) dqblk into kernel (32 bit) dqblk */
1033 bcopy((caddr_t)user_dqblkp, (caddr_t)dqblkp, offsetof(struct dqblk, dqb_btime));
1034 dqblkp->dqb_id = user_dqblkp->dqb_id;
1035 dqblkp->dqb_itime = user_dqblkp->dqb_itime; /* XXX - lose precision */
1036 dqblkp->dqb_btime = user_dqblkp->dqb_btime; /* XXX - lose precision */
1037 }
1038 }