]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_quota.c
4e95edaa0a464cf30113d1de35cdc4195d083729
[apple/xnu.git] / bsd / vfs / vfs_quota.c
1 /*
2 * Copyright (c) 2002-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1990, 1993, 1995
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by
33 * Robert Elz at The University of Melbourne.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by the University of
46 * California, Berkeley and its contributors.
47 * 4. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE.
62 *
63 * @(#)vfs_quota.c
64 * derived from @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
65 */
66
67 #include <sys/param.h>
68 #include <sys/kernel.h>
69 #include <sys/systm.h>
70 #include <sys/malloc.h>
71 #include <sys/file_internal.h>
72 #include <sys/proc_internal.h>
73 #include <sys/vnode_internal.h>
74 #include <sys/mount_internal.h>
75 #include <sys/quota.h>
76 #include <sys/uio_internal.h>
77
78 #include <libkern/OSByteOrder.h>
79
80
81 /* vars for quota file lock */
82 lck_grp_t * qf_lck_grp;
83 lck_grp_attr_t * qf_lck_grp_attr;
84 lck_attr_t * qf_lck_attr;
85
86 /* vars for quota list lock */
87 lck_grp_t * quota_list_lck_grp;
88 lck_grp_attr_t * quota_list_lck_grp_attr;
89 lck_attr_t * quota_list_lck_attr;
90 lck_mtx_t * quota_list_mtx_lock;
91
92 /* Routines to lock and unlock the quota global data */
93 static int dq_list_lock(void);
94 static void dq_list_unlock(void);
95
96 static void dq_lock_internal(struct dquot *dq);
97 static void dq_unlock_internal(struct dquot *dq);
98
99 static u_int32_t quotamagic[MAXQUOTAS] = INITQMAGICS;
100
101
102 /*
103 * Code pertaining to management of the in-core dquot data structures.
104 */
105 #define DQHASH(dqvp, id) \
106 (&dqhashtbl[((((int)(dqvp)) >> 8) + id) & dqhash])
107 LIST_HEAD(dqhash, dquot) *dqhashtbl;
108 u_long dqhash;
109
110 #define DQUOTINC 5 /* minimum free dquots desired */
111 long numdquot, desireddquot = DQUOTINC;
112
113 /*
114 * Dquot free list.
115 */
116 TAILQ_HEAD(dqfreelist, dquot) dqfreelist;
117 /*
118 * Dquot dirty orphans list
119 */
120 TAILQ_HEAD(dqdirtylist, dquot) dqdirtylist;
121
122
123 static int dqlookup(struct quotafile *, u_long, struct dqblk *, u_int32_t *);
124 static int dqsync_locked(struct dquot *dq);
125
126 static void qf_lock(struct quotafile *);
127 static void qf_unlock(struct quotafile *);
128 static int qf_ref(struct quotafile *);
129 static void qf_rele(struct quotafile *);
130
131
132 /*
133 * Initialize the quota system.
134 */
135 void
136 dqinit()
137 {
138
139 dqhashtbl = hashinit(desiredvnodes, M_DQUOT, &dqhash);
140 TAILQ_INIT(&dqfreelist);
141 TAILQ_INIT(&dqdirtylist);
142
143 /*
144 * Allocate quota list lock group attribute and group
145 */
146 quota_list_lck_grp_attr= lck_grp_attr_alloc_init();
147 quota_list_lck_grp = lck_grp_alloc_init("quota list", quota_list_lck_grp_attr);
148
149 /*
150 * Allocate qouta list lock attribute
151 */
152 quota_list_lck_attr = lck_attr_alloc_init();
153
154 /*
155 * Allocate quota list lock
156 */
157 quota_list_mtx_lock = lck_mtx_alloc_init(quota_list_lck_grp, quota_list_lck_attr);
158
159
160 /*
161 * allocate quota file lock group attribute and group
162 */
163 qf_lck_grp_attr= lck_grp_attr_alloc_init();
164 qf_lck_grp = lck_grp_alloc_init("quota file", qf_lck_grp_attr);
165
166 /*
167 * Allocate quota file lock attribute
168 */
169 qf_lck_attr = lck_attr_alloc_init();
170 }
171
172
173 static volatile int dq_list_lock_cnt = 0;
174
175 static int
176 dq_list_lock(void)
177 {
178 lck_mtx_lock(quota_list_mtx_lock);
179 return ++dq_list_lock_cnt;
180 }
181
182 static int
183 dq_list_lock_changed(int oldval) {
184 return (dq_list_lock_cnt != oldval);
185 }
186
187 static int
188 dq_list_lock_val(void) {
189 return dq_list_lock_cnt;
190 }
191
192 void
193 dq_list_unlock(void)
194 {
195 lck_mtx_unlock(quota_list_mtx_lock);
196 }
197
198
199 /*
200 * must be called with the quota_list_lock held
201 */
202 void
203 dq_lock_internal(struct dquot *dq)
204 {
205 while (dq->dq_lflags & DQ_LLOCK) {
206 dq->dq_lflags |= DQ_LWANT;
207 msleep(&dq->dq_lflags, quota_list_mtx_lock, PVFS, "dq_lock_internal", 0);
208 }
209 dq->dq_lflags |= DQ_LLOCK;
210 }
211
212 /*
213 * must be called with the quota_list_lock held
214 */
215 void
216 dq_unlock_internal(struct dquot *dq)
217 {
218 int wanted = dq->dq_lflags & DQ_LWANT;
219
220 dq->dq_lflags &= ~(DQ_LLOCK | DQ_LWANT);
221
222 if (wanted)
223 wakeup(&dq->dq_lflags);
224 }
225
226 void
227 dqlock(struct dquot *dq) {
228
229 lck_mtx_lock(quota_list_mtx_lock);
230
231 dq_lock_internal(dq);
232
233 lck_mtx_unlock(quota_list_mtx_lock);
234 }
235
236 void
237 dqunlock(struct dquot *dq) {
238
239 lck_mtx_lock(quota_list_mtx_lock);
240
241 dq_unlock_internal(dq);
242
243 lck_mtx_unlock(quota_list_mtx_lock);
244 }
245
246
247
248 int
249 qf_get(struct quotafile *qfp, int type)
250 {
251 int error = 0;
252
253 dq_list_lock();
254
255 switch (type) {
256
257 case QTF_OPENING:
258 while ( (qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING)) ) {
259 if ( (qfp->qf_qflags & QTF_OPENING) ) {
260 error = EBUSY;
261 break;
262 }
263 if ( (qfp->qf_qflags & QTF_CLOSING) ) {
264 qfp->qf_qflags |= QTF_WANTED;
265 msleep(&qfp->qf_qflags, quota_list_mtx_lock, PVFS, "qf_get", 0);
266 }
267 }
268 if (qfp->qf_vp != NULLVP)
269 error = EBUSY;
270 if (error == 0)
271 qfp->qf_qflags |= QTF_OPENING;
272 break;
273
274 case QTF_CLOSING:
275 if ( (qfp->qf_qflags & QTF_CLOSING) ) {
276 error = EBUSY;
277 break;
278 }
279 qfp->qf_qflags |= QTF_CLOSING;
280
281 while ( (qfp->qf_qflags & QTF_OPENING) || qfp->qf_refcnt ) {
282 qfp->qf_qflags |= QTF_WANTED;
283 msleep(&qfp->qf_qflags, quota_list_mtx_lock, PVFS, "qf_get", 0);
284 }
285 if (qfp->qf_vp == NULLVP) {
286 qfp->qf_qflags &= ~QTF_CLOSING;
287 error = EBUSY;
288 }
289 break;
290 }
291 dq_list_unlock();
292
293 return (error);
294 }
295
296 void
297 qf_put(struct quotafile *qfp, int type)
298 {
299
300 dq_list_lock();
301
302 switch (type) {
303
304 case QTF_OPENING:
305 case QTF_CLOSING:
306 qfp->qf_qflags &= ~type;
307 break;
308 }
309 if ( (qfp->qf_qflags & QTF_WANTED) ) {
310 qfp->qf_qflags &= ~QTF_WANTED;
311 wakeup(&qfp->qf_qflags);
312 }
313 dq_list_unlock();
314 }
315
316
317 static void
318 qf_lock(struct quotafile *qfp)
319 {
320 lck_mtx_lock(&qfp->qf_lock);
321 }
322
323 static void
324 qf_unlock(struct quotafile *qfp)
325 {
326 lck_mtx_unlock(&qfp->qf_lock);
327 }
328
329
330 /*
331 * take a reference on the quota file while we're
332 * in dqget... this will prevent a quota_off from
333 * occurring while we're potentially playing with
334 * the quota file... the quota_off will stall until
335 * all the current references 'die'... once we start
336 * into quoto_off, all new references will be rejected
337 * we also don't want any dqgets being processed while
338 * we're in the middle of the quota_on... once we've
339 * actually got the quota file open and the associated
340 * struct quotafile inited, we can let them come through
341 *
342 * quota list lock must be held on entry
343 */
344 static int
345 qf_ref(struct quotafile *qfp)
346 {
347 int error = 0;
348
349 if ( (qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING)) || (qfp->qf_vp == NULLVP) )
350 error = EINVAL;
351 else
352 qfp->qf_refcnt++;
353
354 return (error);
355 }
356
357 /*
358 * drop our reference and wakeup any waiters if
359 * we were the last one holding a ref
360 *
361 * quota list lock must be held on entry
362 */
363 static void
364 qf_rele(struct quotafile *qfp)
365 {
366 qfp->qf_refcnt--;
367
368 if ( (qfp->qf_qflags & QTF_WANTED) && qfp->qf_refcnt == 0) {
369 qfp->qf_qflags &= ~QTF_WANTED;
370 wakeup(&qfp->qf_qflags);
371 }
372 }
373
374
375 void
376 dqfileinit(struct quotafile *qfp)
377 {
378 qfp->qf_vp = NULLVP;
379 qfp->qf_qflags = 0;
380
381 lck_mtx_init(&qfp->qf_lock, qf_lck_grp, qf_lck_attr);
382 }
383
384
385 /*
386 * Initialize a quota file
387 *
388 * must be called with the quota file lock held
389 */
390 int
391 dqfileopen(qfp, type)
392 struct quotafile *qfp;
393 int type;
394 {
395 struct dqfilehdr header;
396 struct vfs_context context;
397 off_t file_size;
398 uio_t auio;
399 int error = 0;
400 char uio_buf[ UIO_SIZEOF(1) ];
401
402 context.vc_proc = current_proc();
403 context.vc_ucred = qfp->qf_cred;
404
405 /* Obtain the file size */
406 if ((error = vnode_size(qfp->qf_vp, &file_size, &context)) != 0)
407 goto out;
408
409 /* Read the file header */
410 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
411 &uio_buf[0], sizeof(uio_buf));
412 uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof (header));
413 error = VNOP_READ(qfp->qf_vp, auio, 0, &context);
414 if (error)
415 goto out;
416 else if (uio_resid(auio)) {
417 error = EINVAL;
418 goto out;
419 }
420 /* Sanity check the quota file header. */
421 if ((OSSwapBigToHostInt32(header.dqh_magic) != quotamagic[type]) ||
422 (OSSwapBigToHostInt32(header.dqh_version) > QF_VERSION) ||
423 (!powerof2(OSSwapBigToHostInt32(header.dqh_maxentries))) ||
424 (OSSwapBigToHostInt32(header.dqh_maxentries) > (file_size / sizeof(struct dqblk)))) {
425 error = EINVAL;
426 goto out;
427 }
428 /* Set up the time limits for this quota. */
429 if (header.dqh_btime != 0)
430 qfp->qf_btime = OSSwapBigToHostInt32(header.dqh_btime);
431 else
432 qfp->qf_btime = MAX_DQ_TIME;
433 if (header.dqh_itime != 0)
434 qfp->qf_itime = OSSwapBigToHostInt32(header.dqh_itime);
435 else
436 qfp->qf_itime = MAX_IQ_TIME;
437
438 /* Calculate the hash table constants. */
439 qfp->qf_maxentries = OSSwapBigToHostInt32(header.dqh_maxentries);
440 qfp->qf_entrycnt = OSSwapBigToHostInt32(header.dqh_entrycnt);
441 qfp->qf_shift = dqhashshift(qfp->qf_maxentries);
442 out:
443 return (error);
444 }
445
446 /*
447 * Close down a quota file
448 */
449 void
450 dqfileclose(struct quotafile *qfp, __unused int type)
451 {
452 struct dqfilehdr header;
453 struct vfs_context context;
454 uio_t auio;
455 char uio_buf[ UIO_SIZEOF(1) ];
456
457 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
458 &uio_buf[0], sizeof(uio_buf));
459 uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof (header));
460
461 context.vc_proc = current_proc();
462 context.vc_ucred = qfp->qf_cred;
463
464 if (VNOP_READ(qfp->qf_vp, auio, 0, &context) == 0) {
465 header.dqh_entrycnt = OSSwapHostToBigInt32(qfp->qf_entrycnt);
466 uio_reset(auio, 0, UIO_SYSSPACE, UIO_WRITE);
467 uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof (header));
468 (void) VNOP_WRITE(qfp->qf_vp, auio, 0, &context);
469 }
470 }
471
472
473 /*
474 * Obtain a dquot structure for the specified identifier and quota file
475 * reading the information from the file if necessary.
476 */
477 int
478 dqget(id, qfp, type, dqp)
479 u_long id;
480 struct quotafile *qfp;
481 register int type;
482 struct dquot **dqp;
483 {
484 struct dquot *dq;
485 struct dquot *ndq = NULL;
486 struct dquot *fdq = NULL;
487 struct dqhash *dqh;
488 struct vnode *dqvp;
489 int error = 0;
490 int listlockval = 0;
491
492 if ( id == 0 || qfp->qf_vp == NULLVP ) {
493 *dqp = NODQUOT;
494 return (EINVAL);
495 }
496 dq_list_lock();
497
498 if ( (qf_ref(qfp)) ) {
499 dq_list_unlock();
500
501 *dqp = NODQUOT;
502 return (EINVAL);
503 }
504 if ( (dqvp = qfp->qf_vp) == NULLVP ) {
505 qf_rele(qfp);
506 dq_list_unlock();
507
508 *dqp = NODQUOT;
509 return (EINVAL);
510 }
511 dqh = DQHASH(dqvp, id);
512
513 relookup:
514 listlockval = dq_list_lock_val();
515
516 /*
517 * Check the cache first.
518 */
519 for (dq = dqh->lh_first; dq; dq = dq->dq_hash.le_next) {
520 if (dq->dq_id != id ||
521 dq->dq_qfile->qf_vp != dqvp)
522 continue;
523
524 dq_lock_internal(dq);
525 if (dq_list_lock_changed(listlockval)) {
526 dq_unlock_internal(dq);
527 goto relookup;
528 }
529
530 /*
531 * dq_lock_internal may drop the quota_list_lock to msleep, so
532 * we need to re-evaluate the identity of this dq
533 */
534 if (dq->dq_id != id || dq->dq_qfile == NULL ||
535 dq->dq_qfile->qf_vp != dqvp) {
536 dq_unlock_internal(dq);
537 goto relookup;
538 }
539 /*
540 * Cache hit with no references. Take
541 * the structure off the free list.
542 */
543 if (dq->dq_cnt++ == 0) {
544 if (dq->dq_flags & DQ_MOD)
545 TAILQ_REMOVE(&dqdirtylist, dq, dq_freelist);
546 else
547 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
548 } else if (dq->dq_cnt == 0) {
549 /* We've overflowed */
550 --dq->dq_cnt;
551 dq_unlock_internal(dq);
552 dq_list_unlock();
553 *dqp = NODQUOT;
554 return (EINVAL);
555 }
556 dq_unlock_internal(dq);
557
558 if (fdq != NULL) {
559 /*
560 * we grabbed this from the free list in the first pass
561 * but we found the dq we were looking for in
562 * the cache the 2nd time through
563 * so stick it back on the free list and return the cached entry
564 */
565 TAILQ_INSERT_HEAD(&dqfreelist, fdq, dq_freelist);
566 }
567 qf_rele(qfp);
568 dq_list_unlock();
569
570 if (ndq != NULL) {
571 /*
572 * we allocated this in the first pass
573 * but we found the dq we were looking for in
574 * the cache the 2nd time through so free it
575 */
576 _FREE(ndq, M_DQUOT);
577 }
578 *dqp = dq;
579
580 return (0);
581 }
582 /*
583 * Not in cache, allocate a new one.
584 */
585 if (TAILQ_EMPTY(&dqfreelist) &&
586 numdquot < MAXQUOTAS * desiredvnodes)
587 desireddquot += DQUOTINC;
588
589 if (fdq != NULL) {
590 /*
591 * we captured this from the free list
592 * in the first pass through, so go
593 * ahead and use it
594 */
595 dq = fdq;
596 fdq = NULL;
597 } else if (numdquot < desireddquot) {
598 if (ndq == NULL) {
599 /*
600 * drop the quota list lock since MALLOC may block
601 */
602 dq_list_unlock();
603
604 ndq = (struct dquot *)_MALLOC(sizeof *dq, M_DQUOT, M_WAITOK);
605 bzero((char *)ndq, sizeof *dq);
606
607 listlockval = dq_list_lock();
608 /*
609 * need to look for the entry again in the cache
610 * since we dropped the quota list lock and
611 * someone else may have beaten us to creating it
612 */
613 goto relookup;
614 } else {
615 /*
616 * we allocated this in the first pass through
617 * and we're still under out target, so go
618 * ahead and use it
619 */
620 dq = ndq;
621 ndq = NULL;
622 numdquot++;
623 }
624 } else {
625 if (TAILQ_EMPTY(&dqfreelist)) {
626 qf_rele(qfp);
627 dq_list_unlock();
628
629 if (ndq) {
630 /*
631 * we allocated this in the first pass through
632 * but we're now at the limit of our cache size
633 * so free it
634 */
635 _FREE(ndq, M_DQUOT);
636 }
637 tablefull("dquot");
638 *dqp = NODQUOT;
639 return (EUSERS);
640 }
641 dq = TAILQ_FIRST(&dqfreelist);
642
643 dq_lock_internal(dq);
644
645 if (dq_list_lock_changed(listlockval) || dq->dq_cnt || (dq->dq_flags & DQ_MOD)) {
646 /*
647 * we lost the race while we weren't holding
648 * the quota list lock... dq_lock_internal
649 * will drop it to msleep... this dq has been
650 * reclaimed... go find another
651 */
652 dq_unlock_internal(dq);
653
654 /*
655 * need to look for the entry again in the cache
656 * since we dropped the quota list lock and
657 * someone else may have beaten us to creating it
658 */
659 goto relookup;
660 }
661 TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
662
663 if (dq->dq_qfile != NULL) {
664 LIST_REMOVE(dq, dq_hash);
665 dq->dq_qfile = NULL;
666 dq->dq_id = 0;
667 }
668 dq_unlock_internal(dq);
669
670 /*
671 * because we may have dropped the quota list lock
672 * in the call to dq_lock_internal, we need to
673 * relookup in the hash in case someone else
674 * caused a dq with this identity to be created...
675 * if we don't find it, we'll use this one
676 */
677 fdq = dq;
678 goto relookup;
679 }
680 /*
681 * we've either freshly allocated a dq
682 * or we've atomically pulled it out of
683 * the hash and freelists... no one else
684 * can have a reference, which means no
685 * one else can be trying to use this dq
686 */
687 dq_lock_internal(dq);
688 if (dq_list_lock_changed(listlockval)) {
689 dq_unlock_internal(dq);
690 goto relookup;
691 }
692
693 /*
694 * Initialize the contents of the dquot structure.
695 */
696 dq->dq_cnt = 1;
697 dq->dq_flags = 0;
698 dq->dq_id = id;
699 dq->dq_qfile = qfp;
700 dq->dq_type = type;
701 /*
702 * once we insert it in the hash and
703 * drop the quota_list_lock, it can be
704 * 'found'... however, we're still holding
705 * the dq_lock which will keep us from doing
706 * anything with it until we've finished
707 * initializing it...
708 */
709 LIST_INSERT_HEAD(dqh, dq, dq_hash);
710 dq_list_unlock();
711
712 if (ndq) {
713 /*
714 * we allocated this in the first pass through
715 * but we didn't need it, so free it after
716 * we've droped the quota list lock
717 */
718 _FREE(ndq, M_DQUOT);
719 }
720
721 error = dqlookup(qfp, id, &dq->dq_dqb, &dq->dq_index);
722
723 /*
724 * I/O error in reading quota file, release
725 * quota structure and reflect problem to caller.
726 */
727 if (error) {
728 dq_list_lock();
729
730 dq->dq_id = 0;
731 dq->dq_qfile = NULL;
732 LIST_REMOVE(dq, dq_hash);
733
734 dq_unlock_internal(dq);
735 qf_rele(qfp);
736 dq_list_unlock();
737
738 dqrele(dq);
739
740 *dqp = NODQUOT;
741 return (error);
742 }
743 /*
744 * Check for no limit to enforce.
745 * Initialize time values if necessary.
746 */
747 if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
748 dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
749 dq->dq_flags |= DQ_FAKE;
750 if (dq->dq_id != 0) {
751 struct timeval tv;
752
753 microtime(&tv);
754 if (dq->dq_btime == 0)
755 dq->dq_btime = tv.tv_sec + qfp->qf_btime;
756 if (dq->dq_itime == 0)
757 dq->dq_itime = tv.tv_sec + qfp->qf_itime;
758 }
759 dq_list_lock();
760 dq_unlock_internal(dq);
761 qf_rele(qfp);
762 dq_list_unlock();
763
764 *dqp = dq;
765 return (0);
766 }
767
768 /*
769 * Lookup a dqblk structure for the specified identifier and
770 * quota file. If there is no entry for this identifier then
771 * one is inserted. The actual hash table index is returned.
772 */
773 static int
774 dqlookup(qfp, id, dqb, index)
775 struct quotafile *qfp;
776 u_long id;
777 struct dqblk *dqb;
778 u_int32_t *index;
779 {
780 struct vnode *dqvp;
781 struct vfs_context context;
782 uio_t auio;
783 int i, skip, last;
784 u_long mask;
785 int error = 0;
786 char uio_buf[ UIO_SIZEOF(1) ];
787
788
789 qf_lock(qfp);
790
791 dqvp = qfp->qf_vp;
792
793 context.vc_proc = current_proc();
794 context.vc_ucred = qfp->qf_cred;
795
796 mask = qfp->qf_maxentries - 1;
797 i = dqhash1(id, qfp->qf_shift, mask);
798 skip = dqhash2(id, mask);
799
800 for (last = (i + (qfp->qf_maxentries-1) * skip) & mask;
801 i != last;
802 i = (i + skip) & mask) {
803 auio = uio_createwithbuffer(1, dqoffset(i), UIO_SYSSPACE, UIO_READ,
804 &uio_buf[0], sizeof(uio_buf));
805 uio_addiov(auio, CAST_USER_ADDR_T(dqb), sizeof (struct dqblk));
806 error = VNOP_READ(dqvp, auio, 0, &context);
807 if (error) {
808 printf("dqlookup: error %d looking up id %d at index %d\n", error, id, i);
809 break;
810 } else if (uio_resid(auio)) {
811 error = EIO;
812 printf("dqlookup: error looking up id %d at index %d\n", id, i);
813 break;
814 }
815 /*
816 * An empty entry means there is no entry
817 * with that id. In this case a new dqb
818 * record will be inserted.
819 */
820 if (dqb->dqb_id == 0) {
821 bzero(dqb, sizeof(struct dqblk));
822 dqb->dqb_id = OSSwapHostToBigInt32(id);
823 /*
824 * Write back to reserve entry for this id
825 */
826 uio_reset(auio, dqoffset(i), UIO_SYSSPACE, UIO_WRITE);
827 uio_addiov(auio, CAST_USER_ADDR_T(dqb), sizeof (struct dqblk));
828 error = VNOP_WRITE(dqvp, auio, 0, &context);
829 if (uio_resid(auio) && error == 0)
830 error = EIO;
831 if (error == 0)
832 ++qfp->qf_entrycnt;
833 dqb->dqb_id = id;
834 break;
835 }
836 /* An id match means an entry was found. */
837 if (OSSwapBigToHostInt32(dqb->dqb_id) == id) {
838 dqb->dqb_bhardlimit = OSSwapBigToHostInt64(dqb->dqb_bhardlimit);
839 dqb->dqb_bsoftlimit = OSSwapBigToHostInt64(dqb->dqb_bsoftlimit);
840 dqb->dqb_curbytes = OSSwapBigToHostInt64(dqb->dqb_curbytes);
841 dqb->dqb_ihardlimit = OSSwapBigToHostInt32(dqb->dqb_ihardlimit);
842 dqb->dqb_isoftlimit = OSSwapBigToHostInt32(dqb->dqb_isoftlimit);
843 dqb->dqb_curinodes = OSSwapBigToHostInt32(dqb->dqb_curinodes);
844 dqb->dqb_btime = OSSwapBigToHostInt32(dqb->dqb_btime);
845 dqb->dqb_itime = OSSwapBigToHostInt32(dqb->dqb_itime);
846 dqb->dqb_id = OSSwapBigToHostInt32(dqb->dqb_id);
847 break;
848 }
849 }
850 qf_unlock(qfp);
851
852 *index = i; /* remember index so we don't have to recompute it later */
853
854 return (error);
855 }
856
857
858 /*
859 * Release a reference to a dquot.
860 */
861 void
862 dqrele(struct dquot *dq)
863 {
864
865 if (dq == NODQUOT)
866 return;
867 dqlock(dq);
868
869 if (dq->dq_cnt > 1) {
870 dq->dq_cnt--;
871
872 dqunlock(dq);
873 return;
874 }
875 if (dq->dq_flags & DQ_MOD)
876 (void) dqsync_locked(dq);
877 dq->dq_cnt--;
878
879 dq_list_lock();
880 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
881 dq_unlock_internal(dq);
882 dq_list_unlock();
883 }
884
885 /*
886 * Release a reference to a dquot but don't do any I/O.
887 */
888 void
889 dqreclaim(register struct dquot *dq)
890 {
891
892 if (dq == NODQUOT)
893 return;
894
895 dq_list_lock();
896 dq_lock_internal(dq);
897
898 if (--dq->dq_cnt > 0) {
899 dq_unlock_internal(dq);
900 dq_list_unlock();
901 return;
902 }
903 if (dq->dq_flags & DQ_MOD)
904 TAILQ_INSERT_TAIL(&dqdirtylist, dq, dq_freelist);
905 else
906 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
907
908 dq_unlock_internal(dq);
909 dq_list_unlock();
910 }
911
912 /*
913 * Update a quota file's orphaned disk quotas.
914 */
915 void
916 dqsync_orphans(qfp)
917 struct quotafile *qfp;
918 {
919 struct dquot *dq;
920 int listlockval = 0;
921
922 dq_list_lock();
923 loop:
924 listlockval = dq_list_lock_val();
925
926 TAILQ_FOREACH(dq, &dqdirtylist, dq_freelist) {
927 if (dq->dq_qfile != qfp)
928 continue;
929
930 dq_lock_internal(dq);
931 if (dq_list_lock_changed(listlockval)) {
932 dq_unlock_internal(dq);
933 goto loop;
934 }
935
936 if (dq->dq_qfile != qfp) {
937 /*
938 * the identity of this dq changed while
939 * the quota_list_lock was dropped
940 * dq_lock_internal can drop it to msleep
941 */
942 dq_unlock_internal(dq);
943 goto loop;
944 }
945 if ((dq->dq_flags & DQ_MOD) == 0) {
946 /*
947 * someone cleaned and removed this from
948 * the dq from the dirty list while the
949 * quota_list_lock was dropped
950 */
951 dq_unlock_internal(dq);
952 goto loop;
953 }
954 if (dq->dq_cnt != 0)
955 panic("dqsync_orphans: dquot in use");
956
957 TAILQ_REMOVE(&dqdirtylist, dq, dq_freelist);
958
959 dq_list_unlock();
960 /*
961 * we're still holding the dqlock at this point
962 * with the reference count == 0
963 * we shouldn't be able
964 * to pick up another one since we hold dqlock
965 */
966 (void) dqsync_locked(dq);
967
968 dq_list_lock();
969
970 TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
971
972 dq_unlock_internal(dq);
973 goto loop;
974 }
975 dq_list_unlock();
976 }
977
978 int
979 dqsync(struct dquot *dq)
980 {
981 int error = 0;
982
983 if (dq != NODQUOT) {
984 dqlock(dq);
985
986 if ( (dq->dq_flags & DQ_MOD) )
987 error = dqsync_locked(dq);
988
989 dqunlock(dq);
990 }
991 return (error);
992 }
993
994
995 /*
996 * Update the disk quota in the quota file.
997 */
998 int
999 dqsync_locked(struct dquot *dq)
1000 {
1001 struct proc *p = current_proc(); /* XXX */
1002 struct vfs_context context;
1003 struct vnode *dqvp;
1004 struct dqblk dqb, *dqblkp;
1005 uio_t auio;
1006 int error;
1007 char uio_buf[ UIO_SIZEOF(1) ];
1008
1009 if (dq->dq_id == 0) {
1010 dq->dq_flags &= ~DQ_MOD;
1011 return (0);
1012 }
1013 if (dq->dq_qfile == NULL)
1014 panic("dqsync: NULL dq_qfile");
1015 if ((dqvp = dq->dq_qfile->qf_vp) == NULLVP)
1016 panic("dqsync: NULL qf_vp");
1017
1018 auio = uio_createwithbuffer(1, dqoffset(dq->dq_index), UIO_SYSSPACE,
1019 UIO_WRITE, &uio_buf[0], sizeof(uio_buf));
1020 uio_addiov(auio, CAST_USER_ADDR_T(&dqb), sizeof (struct dqblk));
1021
1022 context.vc_proc = p;
1023 context.vc_ucred = dq->dq_qfile->qf_cred;
1024
1025 dqblkp = &dq->dq_dqb;
1026 dqb.dqb_bhardlimit = OSSwapHostToBigInt64(dqblkp->dqb_bhardlimit);
1027 dqb.dqb_bsoftlimit = OSSwapHostToBigInt64(dqblkp->dqb_bsoftlimit);
1028 dqb.dqb_curbytes = OSSwapHostToBigInt64(dqblkp->dqb_curbytes);
1029 dqb.dqb_ihardlimit = OSSwapHostToBigInt32(dqblkp->dqb_ihardlimit);
1030 dqb.dqb_isoftlimit = OSSwapHostToBigInt32(dqblkp->dqb_isoftlimit);
1031 dqb.dqb_curinodes = OSSwapHostToBigInt32(dqblkp->dqb_curinodes);
1032 dqb.dqb_btime = OSSwapHostToBigInt32(dqblkp->dqb_btime);
1033 dqb.dqb_itime = OSSwapHostToBigInt32(dqblkp->dqb_itime);
1034 dqb.dqb_id = OSSwapHostToBigInt32(dqblkp->dqb_id);
1035 dqb.dqb_spare[0] = 0;
1036 dqb.dqb_spare[1] = 0;
1037 dqb.dqb_spare[2] = 0;
1038 dqb.dqb_spare[3] = 0;
1039
1040 error = VNOP_WRITE(dqvp, auio, 0, &context);
1041 if (uio_resid(auio) && error == 0)
1042 error = EIO;
1043 dq->dq_flags &= ~DQ_MOD;
1044
1045 return (error);
1046 }
1047
1048 /*
1049 * Flush all entries from the cache for a particular vnode.
1050 */
1051 void
1052 dqflush(vp)
1053 register struct vnode *vp;
1054 {
1055 register struct dquot *dq, *nextdq;
1056 struct dqhash *dqh;
1057
1058 /*
1059 * Move all dquot's that used to refer to this quota
1060 * file off their hash chains (they will eventually
1061 * fall off the head of the free list and be re-used).
1062 */
1063 dq_list_lock();
1064
1065 for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) {
1066 for (dq = dqh->lh_first; dq; dq = nextdq) {
1067 nextdq = dq->dq_hash.le_next;
1068 if (dq->dq_qfile->qf_vp != vp)
1069 continue;
1070 if (dq->dq_cnt)
1071 panic("dqflush: stray dquot");
1072 LIST_REMOVE(dq, dq_hash);
1073 dq->dq_qfile = NULL;
1074 }
1075 }
1076 dq_list_unlock();
1077 }
1078
1079 /*
1080 * LP64 support for munging dqblk structure.
1081 * XXX conversion of user_time_t to time_t loses precision; not an issue for
1082 * XXX us now, since we are only ever setting 32 bits worth of time into it.
1083 */
1084 __private_extern__ void
1085 munge_dqblk(struct dqblk *dqblkp, struct user_dqblk *user_dqblkp, boolean_t to64)
1086 {
1087 if (to64) {
1088 /* munge kernel (32 bit) dqblk into user (64 bit) dqblk */
1089 bcopy((caddr_t)dqblkp, (caddr_t)user_dqblkp, offsetof(struct dqblk, dqb_btime));
1090 user_dqblkp->dqb_id = dqblkp->dqb_id;
1091 user_dqblkp->dqb_itime = dqblkp->dqb_itime;
1092 user_dqblkp->dqb_btime = dqblkp->dqb_btime;
1093 }
1094 else {
1095 /* munge user (64 bit) dqblk into kernel (32 bit) dqblk */
1096 bcopy((caddr_t)user_dqblkp, (caddr_t)dqblkp, offsetof(struct dqblk, dqb_btime));
1097 dqblkp->dqb_id = user_dqblkp->dqb_id;
1098 dqblkp->dqb_itime = user_dqblkp->dqb_itime; /* XXX - lose precision */
1099 dqblkp->dqb_btime = user_dqblkp->dqb_btime; /* XXX - lose precision */
1100 }
1101 }