]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_quota.c
xnu-792.2.4.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_quota.c
1 /*
2 * Copyright (c) 2002-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1982, 1986, 1990, 1993, 1995
24 * The Regents of the University of California. All rights reserved.
25 *
26 * This code is derived from software contributed to Berkeley by
27 * Robert Elz at The University of Melbourne.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 * 1. Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * 2. Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in the
36 * documentation and/or other materials provided with the distribution.
37 * 3. All advertising materials mentioning features or use of this software
38 * must display the following acknowledgement:
39 * This product includes software developed by the University of
40 * California, Berkeley and its contributors.
41 * 4. Neither the name of the University nor the names of its contributors
42 * may be used to endorse or promote products derived from this software
43 * without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 *
57 * @(#)vfs_quota.c
58 * derived from @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
59 */
60
61 #include <sys/param.h>
62 #include <sys/kernel.h>
63 #include <sys/systm.h>
64 #include <sys/malloc.h>
65 #include <sys/file_internal.h>
66 #include <sys/proc_internal.h>
67 #include <sys/vnode_internal.h>
68 #include <sys/mount_internal.h>
69 #include <sys/quota.h>
70 #include <sys/uio_internal.h>
71
72
73 /* vars for quota file lock */
74 lck_grp_t * qf_lck_grp;
75 lck_grp_attr_t * qf_lck_grp_attr;
76 lck_attr_t * qf_lck_attr;
77
78 /* vars for quota list lock */
79 lck_grp_t * quota_list_lck_grp;
80 lck_grp_attr_t * quota_list_lck_grp_attr;
81 lck_attr_t * quota_list_lck_attr;
82 lck_mtx_t * quota_list_mtx_lock;
83
84 /* Routines to lock and unlock the quota global data */
85 static void dq_list_lock(void);
86 static void dq_list_unlock(void);
87
88 static void dq_lock_internal(struct dquot *dq);
89 static void dq_unlock_internal(struct dquot *dq);
90
91 static u_int32_t quotamagic[MAXQUOTAS] = INITQMAGICS;
92
93
94 /*
95 * Code pertaining to management of the in-core dquot data structures.
96 */
97 #define DQHASH(dqvp, id) \
98 (&dqhashtbl[((((int)(dqvp)) >> 8) + id) & dqhash])
99 LIST_HEAD(dqhash, dquot) *dqhashtbl;
100 u_long dqhash;
101
102 #define DQUOTINC 5 /* minimum free dquots desired */
103 long numdquot, desireddquot = DQUOTINC;
104
105 /*
106 * Dquot free list.
107 */
108 TAILQ_HEAD(dqfreelist, dquot) dqfreelist;
109 /*
110 * Dquot dirty orphans list
111 */
112 TAILQ_HEAD(dqdirtylist, dquot) dqdirtylist;
113
114
115 static int dqlookup(struct quotafile *, u_long, struct dqblk *, u_int32_t *);
116 static int dqsync_locked(struct dquot *dq);
117
118 static void qf_lock(struct quotafile *);
119 static void qf_unlock(struct quotafile *);
120 static int qf_ref(struct quotafile *);
121 static void qf_rele(struct quotafile *);
122
123
124 /*
125 * Initialize the quota system.
126 */
127 void
128 dqinit()
129 {
130
131 dqhashtbl = hashinit(desiredvnodes, M_DQUOT, &dqhash);
132 TAILQ_INIT(&dqfreelist);
133 TAILQ_INIT(&dqdirtylist);
134
135 /*
136 * Allocate quota list lock group attribute and group
137 */
138 quota_list_lck_grp_attr= lck_grp_attr_alloc_init();
139 lck_grp_attr_setstat(quota_list_lck_grp_attr);
140 quota_list_lck_grp = lck_grp_alloc_init("quota list", quota_list_lck_grp_attr);
141
142 /*
143 * Allocate qouta list lock attribute
144 */
145 quota_list_lck_attr = lck_attr_alloc_init();
146 //lck_attr_setdebug(quota_list_lck_attr);
147
148 /*
149 * Allocate quota list lock
150 */
151 quota_list_mtx_lock = lck_mtx_alloc_init(quota_list_lck_grp, quota_list_lck_attr);
152
153
154 /*
155 * allocate quota file lock group attribute and group
156 */
157 qf_lck_grp_attr= lck_grp_attr_alloc_init();
158 lck_grp_attr_setstat(qf_lck_grp_attr);
159 qf_lck_grp = lck_grp_alloc_init("quota file", qf_lck_grp_attr);
160
161 /*
162 * Allocate quota file lock attribute
163 */
164 qf_lck_attr = lck_attr_alloc_init();
165 //lck_attr_setdebug(qf_lck_attr);
166 }
167
168
169
170 void
171 dq_list_lock(void)
172 {
173 lck_mtx_lock(quota_list_mtx_lock);
174 }
175
176 void
177 dq_list_unlock(void)
178 {
179 lck_mtx_unlock(quota_list_mtx_lock);
180 }
181
182
183 /*
184 * must be called with the quota_list_lock held
185 */
186 void
187 dq_lock_internal(struct dquot *dq)
188 {
189 while (dq->dq_lflags & DQ_LLOCK) {
190 dq->dq_lflags |= DQ_LWANT;
191 msleep(&dq->dq_lflags, quota_list_mtx_lock, PVFS, "dq_lock_internal", 0);
192 }
193 dq->dq_lflags |= DQ_LLOCK;
194 }
195
196 /*
197 * must be called with the quota_list_lock held
198 */
199 void
200 dq_unlock_internal(struct dquot *dq)
201 {
202 int wanted = dq->dq_lflags & DQ_LWANT;
203
204 dq->dq_lflags &= ~(DQ_LLOCK | DQ_LWANT);
205
206 if (wanted)
207 wakeup(&dq->dq_lflags);
208 }
209
210 void
211 dqlock(struct dquot *dq) {
212
213 lck_mtx_lock(quota_list_mtx_lock);
214
215 dq_lock_internal(dq);
216
217 lck_mtx_unlock(quota_list_mtx_lock);
218 }
219
220 void
221 dqunlock(struct dquot *dq) {
222
223 lck_mtx_lock(quota_list_mtx_lock);
224
225 dq_unlock_internal(dq);
226
227 lck_mtx_unlock(quota_list_mtx_lock);
228 }
229
230
231
232 int
233 qf_get(struct quotafile *qfp, int type)
234 {
235 int error = 0;
236
237 dq_list_lock();
238
239 switch (type) {
240
241 case QTF_OPENING:
242 while ( (qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING)) ) {
243 if ( (qfp->qf_qflags & QTF_OPENING) ) {
244 error = EBUSY;
245 break;
246 }
247 if ( (qfp->qf_qflags & QTF_CLOSING) ) {
248 qfp->qf_qflags |= QTF_WANTED;
249 msleep(&qfp->qf_qflags, quota_list_mtx_lock, PVFS, "qf_get", 0);
250 }
251 }
252 if (qfp->qf_vp != NULLVP)
253 error = EBUSY;
254 if (error == 0)
255 qfp->qf_qflags |= QTF_OPENING;
256 break;
257
258 case QTF_CLOSING:
259 if ( (qfp->qf_qflags & QTF_CLOSING) ) {
260 error = EBUSY;
261 break;
262 }
263 qfp->qf_qflags |= QTF_CLOSING;
264
265 while ( (qfp->qf_qflags & QTF_OPENING) || qfp->qf_refcnt ) {
266 qfp->qf_qflags |= QTF_WANTED;
267 msleep(&qfp->qf_qflags, quota_list_mtx_lock, PVFS, "qf_get", 0);
268 }
269 if (qfp->qf_vp == NULLVP) {
270 qfp->qf_qflags &= ~QTF_CLOSING;
271 error = EBUSY;
272 }
273 break;
274 }
275 dq_list_unlock();
276
277 return (error);
278 }
279
280 void
281 qf_put(struct quotafile *qfp, int type)
282 {
283
284 dq_list_lock();
285
286 switch (type) {
287
288 case QTF_OPENING:
289 case QTF_CLOSING:
290 qfp->qf_qflags &= ~type;
291 break;
292 }
293 if ( (qfp->qf_qflags & QTF_WANTED) ) {
294 qfp->qf_qflags &= ~QTF_WANTED;
295 wakeup(&qfp->qf_qflags);
296 }
297 dq_list_unlock();
298 }
299
300
301 static void
302 qf_lock(struct quotafile *qfp)
303 {
304 lck_mtx_lock(&qfp->qf_lock);
305 }
306
307 static void
308 qf_unlock(struct quotafile *qfp)
309 {
310 lck_mtx_unlock(&qfp->qf_lock);
311 }
312
313
314 /*
315 * take a reference on the quota file while we're
316 * in dqget... this will prevent a quota_off from
317 * occurring while we're potentially playing with
318 * the quota file... the quota_off will stall until
319 * all the current references 'die'... once we start
320 * into quoto_off, all new references will be rejected
321 * we also don't want any dqgets being processed while
322 * we're in the middle of the quota_on... once we've
323 * actually got the quota file open and the associated
324 * struct quotafile inited, we can let them come through
325 *
326 * quota list lock must be held on entry
327 */
328 static int
329 qf_ref(struct quotafile *qfp)
330 {
331 int error = 0;
332
333 if ( (qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING)) || (qfp->qf_vp == NULLVP) )
334 error = EINVAL;
335 else
336 qfp->qf_refcnt++;
337
338 return (error);
339 }
340
341 /*
342 * drop our reference and wakeup any waiters if
343 * we were the last one holding a ref
344 *
345 * quota list lock must be held on entry
346 */
347 static void
348 qf_rele(struct quotafile *qfp)
349 {
350 qfp->qf_refcnt--;
351
352 if ( (qfp->qf_qflags & QTF_WANTED) && qfp->qf_refcnt == 0) {
353 qfp->qf_qflags &= ~QTF_WANTED;
354 wakeup(&qfp->qf_qflags);
355 }
356 }
357
358
359 void
360 dqfileinit(struct quotafile *qfp)
361 {
362 qfp->qf_vp = NULLVP;
363 qfp->qf_qflags = 0;
364
365 lck_mtx_init(&qfp->qf_lock, qf_lck_grp, qf_lck_attr);
366 }
367
368
369 /*
370 * Initialize a quota file
371 *
372 * must be called with the quota file lock held
373 */
374 int
375 dqfileopen(qfp, type)
376 struct quotafile *qfp;
377 int type;
378 {
379 struct dqfilehdr header;
380 struct vfs_context context;
381 off_t file_size;
382 uio_t auio;
383 int error = 0;
384 char uio_buf[ UIO_SIZEOF(1) ];
385
386 context.vc_proc = current_proc();
387 context.vc_ucred = qfp->qf_cred;
388
389 /* Obtain the file size */
390 if ((error = vnode_size(qfp->qf_vp, &file_size, &context)) != 0)
391 goto out;
392
393 /* Read the file header */
394 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
395 &uio_buf[0], sizeof(uio_buf));
396 uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof (header));
397 error = VNOP_READ(qfp->qf_vp, auio, 0, &context);
398 if (error)
399 goto out;
400 else if (uio_resid(auio)) {
401 error = EINVAL;
402 goto out;
403 }
404 /* Sanity check the quota file header. */
405 if ((header.dqh_magic != quotamagic[type]) ||
406 (header.dqh_version > QF_VERSION) ||
407 (!powerof2(header.dqh_maxentries)) ||
408 (header.dqh_maxentries > (file_size / sizeof(struct dqblk)))) {
409 error = EINVAL;
410 goto out;
411 }
412 /* Set up the time limits for this quota. */
413 if (header.dqh_btime > 0)
414 qfp->qf_btime = header.dqh_btime;
415 else
416 qfp->qf_btime = MAX_DQ_TIME;
417 if (header.dqh_itime > 0)
418 qfp->qf_itime = header.dqh_itime;
419 else
420 qfp->qf_itime = MAX_IQ_TIME;
421
422 /* Calculate the hash table constants. */
423 qfp->qf_maxentries = header.dqh_maxentries;
424 qfp->qf_entrycnt = header.dqh_entrycnt;
425 qfp->qf_shift = dqhashshift(header.dqh_maxentries);
426 out:
427 return (error);
428 }
429
430 /*
431 * Close down a quota file
432 */
433 void
434 dqfileclose(struct quotafile *qfp, __unused int type)
435 {
436 struct dqfilehdr header;
437 struct vfs_context context;
438 uio_t auio;
439 char uio_buf[ UIO_SIZEOF(1) ];
440
441 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
442 &uio_buf[0], sizeof(uio_buf));
443 uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof (header));
444
445 context.vc_proc = current_proc();
446 context.vc_ucred = qfp->qf_cred;
447
448 if (VNOP_READ(qfp->qf_vp, auio, 0, &context) == 0) {
449 header.dqh_entrycnt = qfp->qf_entrycnt;
450 uio_reset(auio, 0, UIO_SYSSPACE, UIO_WRITE);
451 uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof (header));
452 (void) VNOP_WRITE(qfp->qf_vp, auio, 0, &context);
453 }
454 }
455
456
457 /*
458 * Obtain a dquot structure for the specified identifier and quota file
459 * reading the information from the file if necessary.
460 */
461 int
462 dqget(id, qfp, type, dqp)
463 u_long id;
464 struct quotafile *qfp;
465 register int type;
466 struct dquot **dqp;
467 {
468 struct dquot *dq;
469 struct dquot *ndq = NULL;
470 struct dquot *fdq = NULL;
471 struct dqhash *dqh;
472 struct vnode *dqvp;
473 int error = 0;
474
475 if ( id == 0 || qfp->qf_vp == NULLVP ) {
476 *dqp = NODQUOT;
477 return (EINVAL);
478 }
479 dq_list_lock();
480
481 if ( (qf_ref(qfp)) ) {
482 dq_list_unlock();
483
484 *dqp = NODQUOT;
485 return (EINVAL);
486 }
487 if ( (dqvp = qfp->qf_vp) == NULLVP ) {
488 qf_rele(qfp);
489 dq_list_unlock();
490
491 *dqp = NODQUOT;
492 return (EINVAL);
493 }
494 dqh = DQHASH(dqvp, id);
495
496 relookup:
497 /*
498 * Check the cache first.
499 */
500 for (dq = dqh->lh_first; dq; dq = dq->dq_hash.le_next) {
501 if (dq->dq_id != id ||
502 dq->dq_qfile->qf_vp != dqvp)
503 continue;
504
505 dq_lock_internal(dq);
506 /*
507 * dq_lock_internal may drop the quota_list_lock to msleep, so
508 * we need to re-evaluate the identity of this dq
509 */
510 if (dq->dq_id != id || dq->dq_qfile == NULL ||
511 dq->dq_qfile->qf_vp != dqvp) {
512 dq_unlock_internal(dq);
513 goto relookup;
514 }
515 /*
516 * Cache hit with no references. Take
517 * the structure off the free list.
518 */
519 if (dq->dq_cnt++ == 0) {
520 if (dq->dq_flags & DQ_MOD)
521 TAILQ_REMOVE(&dqdirtylist, dq, dq_freelist);
522 else
523 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
524 }
525 dq_unlock_internal(dq);
526
527 if (fdq != NULL) {
528 /*
529 * we grabbed this from the free list in the first pass
530 * but we found the dq we were looking for in
531 * the cache the 2nd time through
532 * so stick it back on the free list and return the cached entry
533 */
534 TAILQ_INSERT_HEAD(&dqfreelist, fdq, dq_freelist);
535 }
536 qf_rele(qfp);
537 dq_list_unlock();
538
539 if (ndq != NULL) {
540 /*
541 * we allocated this in the first pass
542 * but we found the dq we were looking for in
543 * the cache the 2nd time through so free it
544 */
545 _FREE(ndq, M_DQUOT);
546 }
547 *dqp = dq;
548
549 return (0);
550 }
551 /*
552 * Not in cache, allocate a new one.
553 */
554 if (TAILQ_EMPTY(&dqfreelist) &&
555 numdquot < MAXQUOTAS * desiredvnodes)
556 desireddquot += DQUOTINC;
557
558 if (fdq != NULL) {
559 /*
560 * we captured this from the free list
561 * in the first pass through, so go
562 * ahead and use it
563 */
564 dq = fdq;
565 fdq = NULL;
566 } else if (numdquot < desireddquot) {
567 if (ndq == NULL) {
568 /*
569 * drop the quota list lock since MALLOC may block
570 */
571 dq_list_unlock();
572
573 ndq = (struct dquot *)_MALLOC(sizeof *dq, M_DQUOT, M_WAITOK);
574 bzero((char *)ndq, sizeof *dq);
575
576 dq_list_lock();
577 /*
578 * need to look for the entry again in the cache
579 * since we dropped the quota list lock and
580 * someone else may have beaten us to creating it
581 */
582 goto relookup;
583 } else {
584 /*
585 * we allocated this in the first pass through
586 * and we're still under out target, so go
587 * ahead and use it
588 */
589 dq = ndq;
590 ndq = NULL;
591 numdquot++;
592 }
593 } else {
594 if (TAILQ_EMPTY(&dqfreelist)) {
595 qf_rele(qfp);
596 dq_list_unlock();
597
598 if (ndq) {
599 /*
600 * we allocated this in the first pass through
601 * but we're now at the limit of our cache size
602 * so free it
603 */
604 _FREE(ndq, M_DQUOT);
605 }
606 tablefull("dquot");
607 *dqp = NODQUOT;
608 return (EUSERS);
609 }
610 dq = TAILQ_FIRST(&dqfreelist);
611
612 dq_lock_internal(dq);
613
614 if (dq->dq_cnt || (dq->dq_flags & DQ_MOD)) {
615 /*
616 * we lost the race while we weren't holding
617 * the quota list lock... dq_lock_internal
618 * will drop it to msleep... this dq has been
619 * reclaimed... go find another
620 */
621 dq_unlock_internal(dq);
622
623 /*
624 * need to look for the entry again in the cache
625 * since we dropped the quota list lock and
626 * someone else may have beaten us to creating it
627 */
628 goto relookup;
629 }
630 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
631
632 if (dq->dq_qfile != NULL) {
633 LIST_REMOVE(dq, dq_hash);
634 dq->dq_qfile = NULL;
635 dq->dq_id = 0;
636 }
637 dq_unlock_internal(dq);
638
639 /*
640 * because we may have dropped the quota list lock
641 * in the call to dq_lock_internal, we need to
642 * relookup in the hash in case someone else
643 * caused a dq with this identity to be created...
644 * if we don't find it, we'll use this one
645 */
646 fdq = dq;
647 goto relookup;
648 }
649 /*
650 * we've either freshly allocated a dq
651 * or we've atomically pulled it out of
652 * the hash and freelists... no one else
653 * can have a reference, which means no
654 * one else can be trying to use this dq
655 */
656 dq_lock_internal(dq);
657
658 /*
659 * Initialize the contents of the dquot structure.
660 */
661 dq->dq_cnt = 1;
662 dq->dq_flags = 0;
663 dq->dq_id = id;
664 dq->dq_qfile = qfp;
665 dq->dq_type = type;
666 /*
667 * once we insert it in the hash and
668 * drop the quota_list_lock, it can be
669 * 'found'... however, we're still holding
670 * the dq_lock which will keep us from doing
671 * anything with it until we've finished
672 * initializing it...
673 */
674 LIST_INSERT_HEAD(dqh, dq, dq_hash);
675 dq_list_unlock();
676
677 if (ndq) {
678 /*
679 * we allocated this in the first pass through
680 * but we didn't need it, so free it after
681 * we've droped the quota list lock
682 */
683 _FREE(ndq, M_DQUOT);
684 }
685
686 error = dqlookup(qfp, id, &dq->dq_dqb, &dq->dq_index);
687
688 /*
689 * I/O error in reading quota file, release
690 * quota structure and reflect problem to caller.
691 */
692 if (error) {
693 dq_list_lock();
694
695 dq->dq_id = 0;
696 dq->dq_qfile = NULL;
697 LIST_REMOVE(dq, dq_hash);
698
699 dq_unlock_internal(dq);
700 qf_rele(qfp);
701 dq_list_unlock();
702
703 dqrele(dq);
704
705 *dqp = NODQUOT;
706 return (error);
707 }
708 /*
709 * Check for no limit to enforce.
710 * Initialize time values if necessary.
711 */
712 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
713 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
714 dq->dq_flags |= DQ_FAKE;
715 if (dq->dq_id != 0) {
716 struct timeval tv;
717
718 microtime(&tv);
719 if (dq->dq_btime == 0)
720 dq->dq_btime = tv.tv_sec + qfp->qf_btime;
721 if (dq->dq_itime == 0)
722 dq->dq_itime = tv.tv_sec + qfp->qf_itime;
723 }
724 dq_list_lock();
725 dq_unlock_internal(dq);
726 qf_rele(qfp);
727 dq_list_unlock();
728
729 *dqp = dq;
730 return (0);
731 }
732
733 /*
734 * Lookup a dqblk structure for the specified identifier and
735 * quota file. If there is no entry for this identifier then
736 * one is inserted. The actual hash table index is returned.
737 */
738 static int
739 dqlookup(qfp, id, dqb, index)
740 struct quotafile *qfp;
741 u_long id;
742 struct dqblk *dqb;
743 u_int32_t *index;
744 {
745 struct vnode *dqvp;
746 struct vfs_context context;
747 uio_t auio;
748 int i, skip, last;
749 u_long mask;
750 int error = 0;
751 char uio_buf[ UIO_SIZEOF(1) ];
752
753
754 qf_lock(qfp);
755
756 dqvp = qfp->qf_vp;
757
758 context.vc_proc = current_proc();
759 context.vc_ucred = qfp->qf_cred;
760
761 mask = qfp->qf_maxentries - 1;
762 i = dqhash1(id, qfp->qf_shift, mask);
763 skip = dqhash2(id, mask);
764
765 for (last = (i + (qfp->qf_maxentries-1) * skip) & mask;
766 i != last;
767 i = (i + skip) & mask) {
768 auio = uio_createwithbuffer(1, dqoffset(i), UIO_SYSSPACE, UIO_READ,
769 &uio_buf[0], sizeof(uio_buf));
770 uio_addiov(auio, CAST_USER_ADDR_T(dqb), sizeof (struct dqblk));
771 error = VNOP_READ(dqvp, auio, 0, &context);
772 if (error) {
773 printf("dqlookup: error %d looking up id %d at index %d\n", error, id, i);
774 break;
775 } else if (uio_resid(auio)) {
776 error = EIO;
777 printf("dqlookup: error looking up id %d at index %d\n", id, i);
778 break;
779 }
780 /*
781 * An empty entry means there is no entry
782 * with that id. In this case a new dqb
783 * record will be inserted.
784 */
785 if (dqb->dqb_id == 0) {
786 bzero(dqb, sizeof(struct dqblk));
787 dqb->dqb_id = id;
788 /*
789 * Write back to reserve entry for this id
790 */
791 uio_reset(auio, dqoffset(i), UIO_SYSSPACE, UIO_WRITE);
792 uio_addiov(auio, CAST_USER_ADDR_T(dqb), sizeof (struct dqblk));
793 error = VNOP_WRITE(dqvp, auio, 0, &context);
794 if (uio_resid(auio) && error == 0)
795 error = EIO;
796 if (error == 0)
797 ++qfp->qf_entrycnt;
798 break;
799 }
800 /* An id match means an entry was found. */
801 if (dqb->dqb_id == id)
802 break;
803 }
804 qf_unlock(qfp);
805
806 *index = i; /* remember index so we don't have to recompute it later */
807
808 return (error);
809 }
810
811
812 /*
813 * Release a reference to a dquot.
814 */
815 void
816 dqrele(struct dquot *dq)
817 {
818
819 if (dq == NODQUOT)
820 return;
821 dqlock(dq);
822
823 if (dq->dq_cnt > 1) {
824 dq->dq_cnt--;
825
826 dqunlock(dq);
827 return;
828 }
829 if (dq->dq_flags & DQ_MOD)
830 (void) dqsync_locked(dq);
831 dq->dq_cnt--;
832
833 dq_list_lock();
834 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
835 dq_unlock_internal(dq);
836 dq_list_unlock();
837 }
838
839 /*
840 * Release a reference to a dquot but don't do any I/O.
841 */
842 void
843 dqreclaim(register struct dquot *dq)
844 {
845
846 if (dq == NODQUOT)
847 return;
848
849 dq_list_lock();
850 dq_lock_internal(dq);
851
852 if (--dq->dq_cnt > 0) {
853 dq_unlock_internal(dq);
854 dq_list_unlock();
855 return;
856 }
857 if (dq->dq_flags & DQ_MOD)
858 TAILQ_INSERT_TAIL(&dqdirtylist, dq, dq_freelist);
859 else
860 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
861
862 dq_unlock_internal(dq);
863 dq_list_unlock();
864 }
865
866 /*
867 * Update a quota file's orphaned disk quotas.
868 */
869 void
870 dqsync_orphans(qfp)
871 struct quotafile *qfp;
872 {
873 struct dquot *dq;
874
875 dq_list_lock();
876 loop:
877 TAILQ_FOREACH(dq, &dqdirtylist, dq_freelist) {
878 if (dq->dq_qfile != qfp)
879 continue;
880
881 dq_lock_internal(dq);
882
883 if (dq->dq_qfile != qfp) {
884 /*
885 * the identity of this dq changed while
886 * the quota_list_lock was dropped
887 * dq_lock_internal can drop it to msleep
888 */
889 dq_unlock_internal(dq);
890 goto loop;
891 }
892 if ((dq->dq_flags & DQ_MOD) == 0) {
893 /*
894 * someone cleaned and removed this from
895 * the dq from the dirty list while the
896 * quota_list_lock was dropped
897 */
898 dq_unlock_internal(dq);
899 goto loop;
900 }
901 if (dq->dq_cnt != 0)
902 panic("dqsync_orphans: dquot in use");
903
904 TAILQ_REMOVE(&dqdirtylist, dq, dq_freelist);
905
906 dq_list_unlock();
907 /*
908 * we're still holding the dqlock at this point
909 * with the reference count == 0
910 * we shouldn't be able
911 * to pick up another one since we hold dqlock
912 */
913 (void) dqsync_locked(dq);
914
915 dq_list_lock();
916
917 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
918
919 dq_unlock_internal(dq);
920 goto loop;
921 }
922 dq_list_unlock();
923 }
924
925 int
926 dqsync(struct dquot *dq)
927 {
928 int error = 0;
929
930 if (dq != NODQUOT) {
931 dqlock(dq);
932
933 if ( (dq->dq_flags & DQ_MOD) )
934 error = dqsync_locked(dq);
935
936 dqunlock(dq);
937 }
938 return (error);
939 }
940
941
942 /*
943 * Update the disk quota in the quota file.
944 */
945 int
946 dqsync_locked(struct dquot *dq)
947 {
948 struct proc *p = current_proc(); /* XXX */
949 struct vfs_context context;
950 struct vnode *dqvp;
951 uio_t auio;
952 int error;
953 char uio_buf[ UIO_SIZEOF(1) ];
954
955 if (dq->dq_id == 0) {
956 dq->dq_flags &= ~DQ_MOD;
957 return (0);
958 }
959 if (dq->dq_qfile == NULL)
960 panic("dqsync: NULL dq_qfile");
961 if ((dqvp = dq->dq_qfile->qf_vp) == NULLVP)
962 panic("dqsync: NULL qf_vp");
963
964 auio = uio_createwithbuffer(1, dqoffset(dq->dq_index), UIO_SYSSPACE,
965 UIO_WRITE, &uio_buf[0], sizeof(uio_buf));
966 uio_addiov(auio, CAST_USER_ADDR_T(&dq->dq_dqb), sizeof (struct dqblk));
967
968 context.vc_proc = p;
969 context.vc_ucred = dq->dq_qfile->qf_cred;
970
971 error = VNOP_WRITE(dqvp, auio, 0, &context);
972 if (uio_resid(auio) && error == 0)
973 error = EIO;
974 dq->dq_flags &= ~DQ_MOD;
975
976 return (error);
977 }
978
979 /*
980 * Flush all entries from the cache for a particular vnode.
981 */
982 void
983 dqflush(vp)
984 register struct vnode *vp;
985 {
986 register struct dquot *dq, *nextdq;
987 struct dqhash *dqh;
988
989 /*
990 * Move all dquot's that used to refer to this quota
991 * file off their hash chains (they will eventually
992 * fall off the head of the free list and be re-used).
993 */
994 dq_list_lock();
995
996 for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) {
997 for (dq = dqh->lh_first; dq; dq = nextdq) {
998 nextdq = dq->dq_hash.le_next;
999 if (dq->dq_qfile->qf_vp != vp)
1000 continue;
1001 if (dq->dq_cnt)
1002 panic("dqflush: stray dquot");
1003 LIST_REMOVE(dq, dq_hash);
1004 dq->dq_qfile = NULL;
1005 }
1006 }
1007 dq_list_unlock();
1008 }
1009
1010 /*
1011 * LP64 support for munging dqblk structure.
1012 * XXX conversion of user_time_t to time_t loses precision; not an issue for
1013 * XXX us now, since we are only ever setting 32 bits worth of time into it.
1014 */
1015 __private_extern__ void
1016 munge_dqblk(struct dqblk *dqblkp, struct user_dqblk *user_dqblkp, boolean_t to64)
1017 {
1018 if (to64) {
1019 /* munge kernel (32 bit) dqblk into user (64 bit) dqblk */
1020 bcopy((caddr_t)dqblkp, (caddr_t)user_dqblkp, offsetof(struct dqblk, dqb_btime));
1021 user_dqblkp->dqb_id = dqblkp->dqb_id;
1022 user_dqblkp->dqb_itime = dqblkp->dqb_itime;
1023 user_dqblkp->dqb_btime = dqblkp->dqb_btime;
1024 }
1025 else {
1026 /* munge user (64 bit) dqblk into kernel (32 bit) dqblk */
1027 bcopy((caddr_t)user_dqblkp, (caddr_t)dqblkp, offsetof(struct dqblk, dqb_btime));
1028 dqblkp->dqb_id = user_dqblkp->dqb_id;
1029 dqblkp->dqb_itime = user_dqblkp->dqb_itime; /* XXX - lose precision */
1030 dqblkp->dqb_btime = user_dqblkp->dqb_btime; /* XXX - lose precision */
1031 }
1032 }