]> git.saurik.com Git - apple/xnu.git/blame - bsd/vfs/vfs_bio.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_bio.c
CommitLineData
1c79356b 1/*
5d5c5d0d
A
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*-
30 * Copyright (c) 1994 Christopher G. Demetriou
31 * Copyright (c) 1982, 1986, 1989, 1993
32 * The Regents of the University of California. All rights reserved.
33 * (c) UNIX System Laboratories, Inc.
34 * All or some portions of this file are derived from material licensed
35 * to the University of California by American Telephone and Telegraph
36 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
37 * the permission of UNIX System Laboratories, Inc.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by the University of
50 * California, Berkeley and its contributors.
51 * 4. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
66 *
1c79356b
A
67 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
68 */
69
70/*
71 * Some references:
72 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
73 * Leffler, et al.: The Design and Implementation of the 4.3BSD
74 * UNIX Operating System (Addison Welley, 1989)
75 */
1c79356b
A
76
77#include <sys/param.h>
78#include <sys/systm.h>
91447636
A
79#include <sys/proc_internal.h>
80#include <sys/buf_internal.h>
81#include <sys/vnode_internal.h>
82#include <sys/mount_internal.h>
1c79356b
A
83#include <sys/trace.h>
84#include <sys/malloc.h>
85#include <sys/resourcevar.h>
86#include <miscfs/specfs/specdev.h>
87#include <sys/ubc.h>
91447636 88#include <sys/kauth.h>
1c79356b
A
89#if DIAGNOSTIC
90#include <kern/assert.h>
91#endif /* DIAGNOSTIC */
92#include <kern/task.h>
93#include <kern/zalloc.h>
91447636
A
94#include <kern/lock.h>
95
96#include <vm/vm_kern.h>
1c79356b
A
97
98#include <sys/kdebug.h>
9bccf70c 99#include <machine/spl.h>
1c79356b 100
91447636 101#if BALANCE_QUEUES
9bccf70c
A
102static __inline__ void bufqinc(int q);
103static __inline__ void bufqdec(int q);
91447636 104#endif
1c79356b 105
91447636
A
106static int bcleanbuf(buf_t bp);
107static int brecover_data(buf_t bp);
108static boolean_t incore(vnode_t vp, daddr64_t blkno);
109static buf_t incore_locked(vnode_t vp, daddr64_t blkno);
110/* timeout is in msecs */
111static buf_t getnewbuf(int slpflag, int slptimeo, int *queue);
112static void bremfree_locked(buf_t bp);
113static void buf_reassign(buf_t bp, vnode_t newvp);
114static errno_t buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo);
115static int buf_iterprepare(vnode_t vp, struct buflists *, int flags);
116static void buf_itercomplete(vnode_t vp, struct buflists *, int flags);
1c79356b 117
91447636 118__private_extern__ int bdwrite_internal(buf_t, int);
1c79356b 119
d52fe63f 120/* zone allocated buffer headers */
91447636
A
121static void bufzoneinit(void);
122static void bcleanbuf_thread_init(void);
123static void bcleanbuf_thread(void);
124
125static zone_t buf_hdr_zone;
126static int buf_hdr_count;
d52fe63f 127
1c79356b
A
128
129/*
130 * Definitions for the buffer hash lists.
131 */
132#define BUFHASH(dvp, lbn) \
133 (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
134LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
135u_long bufhash;
136
137/* Definitions for the buffer stats. */
138struct bufstats bufstats;
139
d52fe63f
A
140/* Number of delayed write buffers */
141int nbdwrite = 0;
91447636 142int blaundrycnt = 0;
d52fe63f 143
1c79356b 144
91447636
A
145static TAILQ_HEAD(ioqueue, buf) iobufqueue;
146static TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
d52fe63f
A
147static int needbuffer;
148static int need_iobuffer;
1c79356b 149
91447636
A
150static lck_grp_t *buf_mtx_grp;
151static lck_attr_t *buf_mtx_attr;
152static lck_grp_attr_t *buf_mtx_grp_attr;
153static lck_mtx_t *iobuffer_mtxp;
154static lck_mtx_t *buf_mtxp;
155
156static __inline__ int
157buf_timestamp(void)
158{
159 struct timeval t;
160 microuptime(&t);
161 return (t.tv_sec);
162}
163
1c79356b
A
164/*
165 * Insq/Remq for the buffer free lists.
166 */
91447636 167#if BALANCE_QUEUES
1c79356b
A
168#define binsheadfree(bp, dp, whichq) do { \
169 TAILQ_INSERT_HEAD(dp, bp, b_freelist); \
170 bufqinc((whichq)); \
171 (bp)->b_whichq = whichq; \
91447636 172 (bp)->b_timestamp = buf_timestamp(); \
1c79356b
A
173 } while (0)
174
175#define binstailfree(bp, dp, whichq) do { \
176 TAILQ_INSERT_TAIL(dp, bp, b_freelist); \
177 bufqinc((whichq)); \
178 (bp)->b_whichq = whichq; \
91447636
A
179 (bp)->b_timestamp = buf_timestamp(); \
180 } while (0)
181#else
182#define binsheadfree(bp, dp, whichq) do { \
183 TAILQ_INSERT_HEAD(dp, bp, b_freelist); \
184 (bp)->b_whichq = whichq; \
185 (bp)->b_timestamp = buf_timestamp(); \
1c79356b
A
186 } while (0)
187
91447636
A
188#define binstailfree(bp, dp, whichq) do { \
189 TAILQ_INSERT_TAIL(dp, bp, b_freelist); \
190 (bp)->b_whichq = whichq; \
191 (bp)->b_timestamp = buf_timestamp(); \
192 } while (0)
193#endif
194
195
1c79356b
A
196#define BHASHENTCHECK(bp) \
197 if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \
765c9de3 198 panic("%x: b_hash.le_prev is not deadbeef", (bp));
1c79356b
A
199
200#define BLISTNONE(bp) \
201 (bp)->b_hash.le_next = (struct buf *)0; \
202 (bp)->b_hash.le_prev = (struct buf **)0xdeadbeef;
203
9bccf70c
A
204/*
205 * Insq/Remq for the vnode usage lists.
206 */
207#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs)
208#define bufremvn(bp) { \
209 LIST_REMOVE(bp, b_vnbufs); \
210 (bp)->b_vnbufs.le_next = NOLIST; \
211}
212
1c79356b
A
213/*
214 * Time in seconds before a buffer on a list is
215 * considered as a stale buffer
216 */
217#define LRU_IS_STALE 120 /* default value for the LRU */
218#define AGE_IS_STALE 60 /* default value for the AGE */
219#define META_IS_STALE 180 /* default value for the BQ_META */
220
221int lru_is_stale = LRU_IS_STALE;
222int age_is_stale = AGE_IS_STALE;
223int meta_is_stale = META_IS_STALE;
21362eb3 224
91447636
A
225
226
9bccf70c
A
227/* LIST_INSERT_HEAD() with assertions */
228static __inline__ void
91447636 229blistenterhead(struct bufhashhdr * head, buf_t bp)
1c79356b
A
230{
231 if ((bp->b_hash.le_next = (head)->lh_first) != NULL)
232 (head)->lh_first->b_hash.le_prev = &(bp)->b_hash.le_next;
233 (head)->lh_first = bp;
234 bp->b_hash.le_prev = &(head)->lh_first;
235 if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef)
236 panic("blistenterhead: le_prev is deadbeef");
1c79356b 237}
1c79356b 238
9bccf70c 239static __inline__ void
91447636 240binshash(buf_t bp, struct bufhashhdr *dp)
1c79356b 241{
91447636 242 buf_t nbp;
9bccf70c 243
1c79356b 244 BHASHENTCHECK(bp);
9bccf70c 245
1c79356b
A
246 nbp = dp->lh_first;
247 for(; nbp != NULL; nbp = nbp->b_hash.le_next) {
248 if(nbp == bp)
249 panic("buf already in hashlist");
250 }
251
1c79356b 252 blistenterhead(dp, bp);
1c79356b
A
253}
254
9bccf70c 255static __inline__ void
91447636 256bremhash(buf_t bp)
1c79356b 257{
1c79356b
A
258 if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef)
259 panic("bremhash le_prev is deadbeef");
260 if (bp->b_hash.le_next == bp)
261 panic("bremhash: next points to self");
262
263 if (bp->b_hash.le_next != NULL)
264 bp->b_hash.le_next->b_hash.le_prev = bp->b_hash.le_prev;
265 *bp->b_hash.le_prev = (bp)->b_hash.le_next;
1c79356b
A
266}
267
1c79356b 268
1c79356b 269
9bccf70c 270
91447636
A
271int
272buf_valid(buf_t bp) {
273
274 if ( (bp->b_flags & (B_DONE | B_DELWRI)) )
275 return 1;
276 return 0;
9bccf70c
A
277}
278
91447636
A
279int
280buf_fromcache(buf_t bp) {
9bccf70c 281
91447636
A
282 if ( (bp->b_flags & B_CACHE) )
283 return 1;
284 return 0;
9bccf70c
A
285}
286
9bccf70c 287void
91447636
A
288buf_markinvalid(buf_t bp) {
289
290 SET(bp->b_flags, B_INVAL);
291}
9bccf70c 292
91447636
A
293void
294buf_markdelayed(buf_t bp) {
295
296 SET(bp->b_flags, B_DELWRI);
297 buf_reassign(bp, bp->b_vp);
9bccf70c
A
298}
299
91447636
A
300void
301buf_markeintr(buf_t bp) {
302
303 SET(bp->b_flags, B_EINTR);
304}
765c9de3 305
91447636
A
306void
307buf_markaged(buf_t bp) {
308
309 SET(bp->b_flags, B_AGE);
765c9de3
A
310}
311
91447636
A
312errno_t
313buf_error(buf_t bp) {
314
315 return (bp->b_error);
316}
1c79356b 317
91447636
A
318void
319buf_seterror(buf_t bp, errno_t error) {
1c79356b 320
91447636
A
321 if ((bp->b_error = error))
322 SET(bp->b_flags, B_ERROR);
323 else
324 CLR(bp->b_flags, B_ERROR);
325}
1c79356b 326
91447636
A
327void
328buf_setflags(buf_t bp, int32_t flags) {
1c79356b 329
91447636
A
330 SET(bp->b_flags, (flags & BUF_X_WRFLAGS));
331}
765c9de3 332
91447636
A
333void
334buf_clearflags(buf_t bp, int32_t flags) {
1c79356b 335
91447636
A
336 CLR(bp->b_flags, (flags & BUF_X_WRFLAGS));
337}
1c79356b 338
91447636
A
339int32_t
340buf_flags(buf_t bp) {
341
342 return ((bp->b_flags & BUF_X_RDFLAGS));
343}
1c79356b 344
91447636
A
345void
346buf_reset(buf_t bp, int32_t io_flags) {
347
348 CLR(bp->b_flags, (B_READ | B_WRITE | B_ERROR | B_DONE | B_INVAL | B_ASYNC | B_NOCACHE));
349 SET(bp->b_flags, (io_flags & (B_ASYNC | B_READ | B_WRITE | B_NOCACHE)));
1c79356b 350
91447636
A
351 bp->b_error = 0;
352}
1c79356b 353
91447636
A
354uint32_t
355buf_count(buf_t bp) {
356
357 return (bp->b_bcount);
358}
765c9de3 359
91447636
A
360void
361buf_setcount(buf_t bp, uint32_t bcount) {
362
363 bp->b_bcount = bcount;
1c79356b
A
364}
365
91447636
A
366uint32_t
367buf_size(buf_t bp) {
368
369 return (bp->b_bufsize);
370}
1c79356b 371
91447636
A
372void
373buf_setsize(buf_t bp, uint32_t bufsize) {
374
375 bp->b_bufsize = bufsize;
376}
1c79356b 377
91447636
A
378uint32_t
379buf_resid(buf_t bp) {
380
381 return (bp->b_resid);
382}
b4c24cb9 383
91447636
A
384void
385buf_setresid(buf_t bp, uint32_t resid) {
386
387 bp->b_resid = resid;
388}
1c79356b 389
91447636
A
390uint32_t
391buf_dirtyoff(buf_t bp) {
1c79356b 392
91447636
A
393 return (bp->b_dirtyoff);
394}
1c79356b 395
91447636
A
396uint32_t
397buf_dirtyend(buf_t bp) {
1c79356b 398
91447636 399 return (bp->b_dirtyend);
1c79356b 400}
1c79356b 401
91447636
A
402void
403buf_setdirtyoff(buf_t bp, uint32_t dirtyoff) {
404
405 bp->b_dirtyoff = dirtyoff;
406}
1c79356b 407
91447636
A
408void
409buf_setdirtyend(buf_t bp, uint32_t dirtyend) {
410
411 bp->b_dirtyend = dirtyend;
1c79356b
A
412}
413
91447636
A
414uintptr_t
415buf_dataptr(buf_t bp) {
416
417 return (bp->b_datap);
418}
1c79356b 419
91447636
A
420void
421buf_setdataptr(buf_t bp, uintptr_t data) {
422
423 bp->b_datap = data;
424}
425
426vnode_t
427buf_vnode(buf_t bp) {
428
429 return (bp->b_vp);
430}
431
432void
433buf_setvnode(buf_t bp, vnode_t vp) {
434
435 bp->b_vp = vp;
436}
437
438
439void *
440buf_callback(buf_t bp)
441{
442 if ( !(bp->b_lflags & BL_IOBUF) )
443 return ((void *) NULL);
444 if ( !(bp->b_flags & B_CALL) )
445 return ((void *) NULL);
446
447 return ((void *)bp->b_iodone);
448}
449
450
451errno_t
452buf_setcallback(buf_t bp, void (*callback)(buf_t, void *), void *transaction)
453{
454
455 if ( !(bp->b_lflags & BL_IOBUF) )
456 return (EINVAL);
457
458 if (callback)
459 bp->b_flags |= (B_CALL | B_ASYNC);
460 else
461 bp->b_flags &= ~B_CALL;
462 bp->b_transaction = transaction;
463 bp->b_iodone = callback;
464
465 return (0);
466}
467
468errno_t
469buf_setupl(buf_t bp, upl_t upl, uint32_t offset)
470{
471
472 if ( !(bp->b_lflags & BL_IOBUF) )
473 return (EINVAL);
474
475 if (upl)
476 bp->b_flags |= B_CLUSTER;
477 else
478 bp->b_flags &= ~B_CLUSTER;
479 bp->b_upl = upl;
480 bp->b_uploffset = offset;
481
482 return (0);
483}
484
485buf_t
486buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), void *arg)
487{
488 buf_t io_bp;
489
490 if (io_offset < 0 || io_size < 0)
491 return (NULL);
492
493 if ((unsigned)(io_offset + io_size) > (unsigned)bp->b_bcount)
494 return (NULL);
495
496 if (bp->b_flags & B_CLUSTER) {
497 if (io_offset && ((bp->b_uploffset + io_offset) & PAGE_MASK))
498 return (NULL);
499
500 if (((bp->b_uploffset + io_offset + io_size) & PAGE_MASK) && ((io_offset + io_size) < bp->b_bcount))
501 return (NULL);
502 }
503 io_bp = alloc_io_buf(bp->b_vp, 0);
504
505 io_bp->b_flags = bp->b_flags & (B_COMMIT_UPL | B_META | B_PAGEIO | B_CLUSTER | B_PHYS | B_ASYNC | B_READ);
506
507 if (iodone) {
508 io_bp->b_transaction = arg;
509 io_bp->b_iodone = iodone;
510 io_bp->b_flags |= B_CALL;
511 }
512 if (bp->b_flags & B_CLUSTER) {
513 io_bp->b_upl = bp->b_upl;
514 io_bp->b_uploffset = bp->b_uploffset + io_offset;
515 } else {
516 io_bp->b_datap = (uintptr_t)(((char *)bp->b_datap) + io_offset);
517 }
518 io_bp->b_bcount = io_size;
519
520 return (io_bp);
521}
522
523
524
525void
526buf_setfilter(buf_t bp, void (*filter)(buf_t, void *), void *transaction,
527 void **old_iodone, void **old_transaction)
528{
529 if (old_iodone)
530 *old_iodone = (void *)(bp->b_iodone);
531 if (old_transaction)
532 *old_transaction = (void *)(bp->b_transaction);
533
534 bp->b_transaction = transaction;
535 bp->b_iodone = filter;
536 bp->b_flags |= B_FILTER;
537}
538
539
540daddr64_t
541buf_blkno(buf_t bp) {
542
543 return (bp->b_blkno);
544}
545
546daddr64_t
547buf_lblkno(buf_t bp) {
548
549 return (bp->b_lblkno);
550}
551
552void
553buf_setblkno(buf_t bp, daddr64_t blkno) {
554
555 bp->b_blkno = blkno;
556}
557
558void
559buf_setlblkno(buf_t bp, daddr64_t lblkno) {
560
561 bp->b_lblkno = lblkno;
562}
563
564dev_t
565buf_device(buf_t bp) {
566
567 return (bp->b_dev);
568}
569
570errno_t
571buf_setdevice(buf_t bp, vnode_t vp) {
572
573 if ((vp->v_type != VBLK) && (vp->v_type != VCHR))
574 return EINVAL;
575 bp->b_dev = vp->v_rdev;
576
577 return 0;
578}
579
580
581void *
582buf_drvdata(buf_t bp) {
583
584 return (bp->b_drvdata);
585}
586
587void
588buf_setdrvdata(buf_t bp, void *drvdata) {
589
590 bp->b_drvdata = drvdata;
591}
592
593void *
594buf_fsprivate(buf_t bp) {
595
596 return (bp->b_fsprivate);
597}
598
599void
600buf_setfsprivate(buf_t bp, void *fsprivate) {
601
602 bp->b_fsprivate = fsprivate;
603}
604
605ucred_t
606buf_rcred(buf_t bp) {
607
608 return (bp->b_rcred);
609}
610
611ucred_t
612buf_wcred(buf_t bp) {
613
614 return (bp->b_wcred);
615}
616
617void *
618buf_upl(buf_t bp) {
619
620 return (bp->b_upl);
621}
622
623uint32_t
624buf_uploffset(buf_t bp) {
625
626 return ((uint32_t)(bp->b_uploffset));
627}
628
629proc_t
630buf_proc(buf_t bp) {
631
632 return (bp->b_proc);
633}
634
635
636errno_t
637buf_map(buf_t bp, caddr_t *io_addr)
638{
639 buf_t real_bp;
640 vm_offset_t vaddr;
641 kern_return_t kret;
642
643 if ( !(bp->b_flags & B_CLUSTER)) {
644 *io_addr = (caddr_t)bp->b_datap;
645 return (0);
646 }
647 real_bp = (buf_t)(bp->b_real_bp);
648
649 if (real_bp && real_bp->b_datap) {
650 /*
651 * b_real_bp is only valid if B_CLUSTER is SET
652 * if it's non-zero, than someone did a cluster_bp call
653 * if the backing physical pages were already mapped
654 * in before the call to cluster_bp (non-zero b_datap),
655 * than we just use that mapping
656 */
657 *io_addr = (caddr_t)real_bp->b_datap;
658 return (0);
659 }
660 kret = ubc_upl_map(bp->b_upl, &vaddr); /* Map it in */
661
662 if (kret != KERN_SUCCESS) {
663 *io_addr = 0;
664
665 return(ENOMEM);
666 }
667 vaddr += bp->b_uploffset;
668
669 *io_addr = (caddr_t)vaddr;
670
671 return (0);
672}
673
674errno_t
675buf_unmap(buf_t bp)
676{
677 buf_t real_bp;
678 kern_return_t kret;
679
680 if ( !(bp->b_flags & B_CLUSTER))
681 return (0);
682 /*
683 * see buf_map for the explanation
684 */
685 real_bp = (buf_t)(bp->b_real_bp);
686
687 if (real_bp && real_bp->b_datap)
688 return (0);
689
690 if (bp->b_lflags & BL_IOBUF) {
691 /*
692 * when we commit these pages, we'll hit
693 * it with UPL_COMMIT_INACTIVE which
694 * will clear the reference bit that got
695 * turned on when we touched the mapping
696 */
697 bp->b_flags |= B_AGE;
698 }
699 kret = ubc_upl_unmap(bp->b_upl);
700
701 if (kret != KERN_SUCCESS)
702 return (EINVAL);
703 return (0);
704}
705
706
707void
708buf_clear(buf_t bp) {
709 caddr_t baddr;
710
711 if (buf_map(bp, &baddr) == 0) {
712 bzero(baddr, bp->b_bcount);
713 buf_unmap(bp);
714 }
715 bp->b_resid = 0;
716}
717
718
719
720/*
721 * Read or write a buffer that is not contiguous on disk.
722 * buffer is marked done/error at the conclusion
723 */
724static int
725buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_bytes)
726{
727 vnode_t vp = buf_vnode(bp);
728 buf_t io_bp; /* For reading or writing a single block */
729 int io_direction;
730 int io_resid;
731 size_t io_contig_bytes;
732 daddr64_t io_blkno;
733 int error = 0;
734 int bmap_flags;
735
736 /*
737 * save our starting point... the bp was already mapped
738 * in buf_strategy before we got called
739 * no sense doing it again.
740 */
741 io_blkno = bp->b_blkno;
742 /*
743 * Make sure we redo this mapping for the next I/O
744 * i.e. this can never be a 'permanent' mapping
745 */
746 bp->b_blkno = bp->b_lblkno;
747
748 /*
749 * Get an io buffer to do the deblocking
750 */
751 io_bp = alloc_io_buf(devvp, 0);
752
753 io_bp->b_lblkno = bp->b_lblkno;
754 io_bp->b_datap = bp->b_datap;
755 io_resid = bp->b_bcount;
756 io_direction = bp->b_flags & B_READ;
757 io_contig_bytes = contig_bytes;
758
759 if (bp->b_flags & B_READ)
760 bmap_flags = VNODE_READ;
761 else
762 bmap_flags = VNODE_WRITE;
763
764 for (;;) {
765 if (io_blkno == -1)
766 /*
767 * this is unexepected, but we'll allow for it
768 */
769 bzero((caddr_t)io_bp->b_datap, (int)io_contig_bytes);
770 else {
771 io_bp->b_bcount = io_contig_bytes;
772 io_bp->b_bufsize = io_contig_bytes;
773 io_bp->b_resid = io_contig_bytes;
774 io_bp->b_blkno = io_blkno;
775
776 buf_reset(io_bp, io_direction);
777 /*
778 * Call the device to do the I/O and wait for it
779 */
780 if ((error = VNOP_STRATEGY(io_bp)))
781 break;
782 if ((error = (int)buf_biowait(io_bp)))
783 break;
784 if (io_bp->b_resid) {
785 io_resid -= (io_contig_bytes - io_bp->b_resid);
786 break;
787 }
788 }
789 if ((io_resid -= io_contig_bytes) == 0)
790 break;
791 f_offset += io_contig_bytes;
792 io_bp->b_datap += io_contig_bytes;
793
794 /*
795 * Map the current position to a physical block number
796 */
797 if ((error = VNOP_BLOCKMAP(vp, f_offset, io_resid, &io_blkno, &io_contig_bytes, NULL, bmap_flags, NULL)))
798 break;
799 }
800 buf_free(io_bp);
801
802 if (error)
803 buf_seterror(bp, error);
804 bp->b_resid = io_resid;
805 /*
806 * This I/O is now complete
807 */
808 buf_biodone(bp);
809
810 return error;
811}
812
813
814/*
815 * struct vnop_strategy_args {
816 * struct buf *a_bp;
817 * } *ap;
818 */
819errno_t
820buf_strategy(vnode_t devvp, void *ap)
821{
822 buf_t bp = ((struct vnop_strategy_args *)ap)->a_bp;
823 vnode_t vp = bp->b_vp;
824 int bmap_flags;
825 errno_t error;
826
827 if (vp == NULL || vp->v_type == VCHR || vp->v_type == VBLK)
828 panic("buf_strategy: b_vp == NULL || vtype == VCHR | VBLK\n");
829 /*
830 * associate the physical device with
831 * with this buf_t even if we don't
832 * end up issuing the I/O...
833 */
834 bp->b_dev = devvp->v_rdev;
835
836 if (bp->b_flags & B_READ)
837 bmap_flags = VNODE_READ;
838 else
839 bmap_flags = VNODE_WRITE;
840
841 if ( !(bp->b_flags & B_CLUSTER)) {
842
843 if ( (bp->b_upl) ) {
844 /*
845 * we have a UPL associated with this bp
846 * go through cluster_bp which knows how
847 * to deal with filesystem block sizes
848 * that aren't equal to the page size
849 */
850 return (cluster_bp(bp));
851 }
852 if (bp->b_blkno == bp->b_lblkno) {
853 off_t f_offset;
854 size_t contig_bytes;
855
856 if ((error = VNOP_BLKTOOFF(vp, bp->b_lblkno, &f_offset))) {
857 buf_seterror(bp, error);
858 buf_biodone(bp);
859
860 return (error);
861 }
862 if ((error = VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))) {
863 buf_seterror(bp, error);
864 buf_biodone(bp);
865
866 return (error);
867 }
868 if (bp->b_blkno == -1)
869 buf_clear(bp);
870 else if ((long)contig_bytes < bp->b_bcount)
871 return (buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes));
872 }
873 if (bp->b_blkno == -1) {
874 buf_biodone(bp);
875 return (0);
876 }
877 }
878 /*
879 * we can issue the I/O because...
880 * either B_CLUSTER is set which
881 * means that the I/O is properly set
882 * up to be a multiple of the page size, or
883 * we were able to successfully set up the
884 * phsyical block mapping
885 */
886 return (VOCALL(devvp->v_op, VOFFSET(vnop_strategy), ap));
887}
888
889
890
891buf_t
892buf_alloc(vnode_t vp)
893{
894 return(alloc_io_buf(vp, 0));
895}
896
897void
898buf_free(buf_t bp) {
899
900 free_io_buf(bp);
901}
902
903
904
905void
906buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg) {
907 buf_t bp;
908 int retval;
909 struct buflists local_iterblkhd;
910 int lock_flags = BAC_NOWAIT | BAC_REMOVE;
911
912 if (flags & BUF_SKIP_LOCKED)
913 lock_flags |= BAC_SKIP_LOCKED;
914 if (flags & BUF_SKIP_NONLOCKED)
915 lock_flags |= BAC_SKIP_NONLOCKED;
916
917 lck_mtx_lock(buf_mtxp);
918
919 if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY)) {
920 lck_mtx_unlock(buf_mtxp);
921 return;
922 }
923 while (!LIST_EMPTY(&local_iterblkhd)) {
924 bp = LIST_FIRST(&local_iterblkhd);
925 LIST_REMOVE(bp, b_vnbufs);
926 LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
927
928 if (buf_acquire_locked(bp, lock_flags, 0, 0))
929 continue;
930
931 lck_mtx_unlock(buf_mtxp);
932
933 retval = callout(bp, arg);
934
935 switch (retval) {
936 case BUF_RETURNED:
937 buf_brelse(bp);
938 break;
939 case BUF_CLAIMED:
940 break;
941 case BUF_RETURNED_DONE:
942 buf_brelse(bp);
943 lck_mtx_lock(buf_mtxp);
944 goto out;
945 case BUF_CLAIMED_DONE:
946 lck_mtx_lock(buf_mtxp);
947 goto out;
948 }
949 lck_mtx_lock(buf_mtxp);
950 }
951out:
952 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
953
954 lck_mtx_unlock(buf_mtxp);
955}
956
957
958/*
959 * Flush out and invalidate all buffers associated with a vnode.
960 */
961int
962buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo)
963{
964 buf_t bp;
965 int error = 0;
966 int must_rescan = 1;
967 struct buflists local_iterblkhd;
968
969 lck_mtx_lock(buf_mtxp);
970
971 for (;;) {
972 if (must_rescan == 0)
973 /*
974 * the lists may not be empty, but all that's left at this
975 * point are metadata or B_LOCKED buffers which are being
976 * skipped... we know this because we made it through both
977 * the clean and dirty lists without dropping buf_mtxp...
978 * each time we drop buf_mtxp we bump "must_rescan"
979 */
980 break;
981 if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd))
982 break;
983 must_rescan = 0;
984 /*
985 * iterate the clean list
986 */
987 if (buf_iterprepare(vp, &local_iterblkhd, VBI_CLEAN)) {
988 goto try_dirty_list;
989 }
990 while (!LIST_EMPTY(&local_iterblkhd)) {
991 bp = LIST_FIRST(&local_iterblkhd);
992
993 LIST_REMOVE(bp, b_vnbufs);
994 LIST_INSERT_HEAD(&vp->v_cleanblkhd, bp, b_vnbufs);
995
996 /*
997 * some filesystems distinguish meta data blocks with a negative logical block #
998 */
999 if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META)))
1000 continue;
1001
1002 if ( (error = (int)buf_acquire_locked(bp, BAC_REMOVE | BAC_SKIP_LOCKED, slpflag, slptimeo)) ) {
1003 if (error == EDEADLK)
1004 /*
1005 * this buffer was marked B_LOCKED...
1006 * we didn't drop buf_mtxp, so we
1007 * we don't need to rescan
1008 */
1009 continue;
1010 if (error == EAGAIN) {
1011 /*
1012 * found a busy buffer... we blocked and
1013 * dropped buf_mtxp, so we're going to
1014 * need to rescan after this pass is completed
1015 */
1016 must_rescan++;
1017 continue;
1018 }
1019 /*
1020 * got some kind of 'real' error out of the msleep
1021 * in buf_acquire_locked, terminate the scan and return the error
1022 */
1023 buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN);
1024
1025 lck_mtx_unlock(buf_mtxp);
1026 return (error);
1027 }
1028 lck_mtx_unlock(buf_mtxp);
1029
1030 SET(bp->b_flags, B_INVAL);
1031 buf_brelse(bp);
1032
1033 lck_mtx_lock(buf_mtxp);
1034
1035 /*
1036 * by dropping buf_mtxp, we allow new
1037 * buffers to be added to the vnode list(s)
1038 * we'll have to rescan at least once more
1039 * if the queues aren't empty
1040 */
1041 must_rescan++;
1042 }
1043 buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN);
1044
1045try_dirty_list:
1046 /*
1047 * Now iterate on dirty blks
1048 */
1049 if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY)) {
1050 continue;
1051 }
1052 while (!LIST_EMPTY(&local_iterblkhd)) {
1053 bp = LIST_FIRST(&local_iterblkhd);
1054
1055 LIST_REMOVE(bp, b_vnbufs);
1056 LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
1057
1058 /*
1059 * some filesystems distinguish meta data blocks with a negative logical block #
1060 */
1061 if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META)))
1062 continue;
1063
1064 if ( (error = (int)buf_acquire_locked(bp, BAC_REMOVE | BAC_SKIP_LOCKED, slpflag, slptimeo)) ) {
1065 if (error == EDEADLK)
1066 /*
1067 * this buffer was marked B_LOCKED...
1068 * we didn't drop buf_mtxp, so we
1069 * we don't need to rescan
1070 */
1071 continue;
1072 if (error == EAGAIN) {
1073 /*
1074 * found a busy buffer... we blocked and
1075 * dropped buf_mtxp, so we're going to
1076 * need to rescan after this pass is completed
1077 */
1078 must_rescan++;
1079 continue;
1080 }
1081 /*
1082 * got some kind of 'real' error out of the msleep
1083 * in buf_acquire_locked, terminate the scan and return the error
1084 */
1085 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1086
1087 lck_mtx_unlock(buf_mtxp);
1088 return (error);
1089 }
1090 lck_mtx_unlock(buf_mtxp);
1091
1092 SET(bp->b_flags, B_INVAL);
1093
1094 if (ISSET(bp->b_flags, B_DELWRI) && (flags & BUF_WRITE_DATA))
1095 (void) VNOP_BWRITE(bp);
1096 else
1097 buf_brelse(bp);
1098
1099 lck_mtx_lock(buf_mtxp);
1100 /*
1101 * by dropping buf_mtxp, we allow new
1102 * buffers to be added to the vnode list(s)
1103 * we'll have to rescan at least once more
1104 * if the queues aren't empty
1105 */
1106 must_rescan++;
1107 }
1108 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1109 }
1110 lck_mtx_unlock(buf_mtxp);
1111
1112 return (0);
1113}
1114
1115void
1116buf_flushdirtyblks(vnode_t vp, int wait, int flags, char *msg) {
1117 buf_t bp;
1118 int writes_issued = 0;
1119 errno_t error;
1120 int busy = 0;
1121 struct buflists local_iterblkhd;
1122 int lock_flags = BAC_NOWAIT | BAC_REMOVE;
1123
1124 if (flags & BUF_SKIP_LOCKED)
1125 lock_flags |= BAC_SKIP_LOCKED;
1126 if (flags & BUF_SKIP_NONLOCKED)
1127 lock_flags |= BAC_SKIP_NONLOCKED;
1128loop:
1129 lck_mtx_lock(buf_mtxp);
1130
1131 if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY) == 0) {
1132 while (!LIST_EMPTY(&local_iterblkhd)) {
1133 bp = LIST_FIRST(&local_iterblkhd);
1134 LIST_REMOVE(bp, b_vnbufs);
1135 LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
1136
1137 if ((error = buf_acquire_locked(bp, lock_flags, 0, 0)) == EBUSY)
1138 busy++;
1139 if (error)
1140 continue;
1141 lck_mtx_unlock(buf_mtxp);
1142
1143 bp->b_flags &= ~B_LOCKED;
1144
1145 /*
1146 * Wait for I/O associated with indirect blocks to complete,
1147 * since there is no way to quickly wait for them below.
1148 */
1149 if ((bp->b_vp == vp) || (wait == 0))
1150 (void) buf_bawrite(bp);
1151 else
1152 (void) VNOP_BWRITE(bp);
1153 writes_issued++;
1154
1155 lck_mtx_lock(buf_mtxp);
1156 }
1157 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1158 }
1159 lck_mtx_unlock(buf_mtxp);
1160
1161 if (wait) {
1162 (void)vnode_waitforwrites(vp, 0, 0, 0, msg);
1163
1164 if (vp->v_dirtyblkhd.lh_first && busy) {
1165 /*
1166 * we had one or more BUSY buffers on
1167 * the dirtyblock list... most likely
1168 * these are due to delayed writes that
1169 * were moved to the bclean queue but
1170 * have not yet been 'written'.
1171 * if we issued some writes on the
1172 * previous pass, we try again immediately
1173 * if we didn't, we'll sleep for some time
1174 * to allow the state to change...
1175 */
1176 if (writes_issued == 0) {
1177 (void)tsleep((caddr_t)&vp->v_numoutput,
1178 PRIBIO + 1, "vnode_flushdirtyblks", hz/20);
1179 }
1180 writes_issued = 0;
1181 busy = 0;
1182
1183 goto loop;
1184 }
1185 }
1186}
1187
1188
1189/*
1190 * called with buf_mtxp held...
1191 * this lock protects the queue manipulation
1192 */
1193static int
1194buf_iterprepare(vnode_t vp, struct buflists *iterheadp, int flags)
1195{
1196 struct buflists * listheadp;
1197
1198 if (flags & VBI_DIRTY)
1199 listheadp = &vp->v_dirtyblkhd;
1200 else
1201 listheadp = &vp->v_cleanblkhd;
1202
1203 while (vp->v_iterblkflags & VBI_ITER) {
1204 vp->v_iterblkflags |= VBI_ITERWANT;
1205 msleep(&vp->v_iterblkflags, buf_mtxp, 0, "buf_iterprepare", 0);
1206 }
1207 if (LIST_EMPTY(listheadp)) {
1208 LIST_INIT(iterheadp);
1209 return(EINVAL);
1210 }
1211 vp->v_iterblkflags |= VBI_ITER;
1212
1213 iterheadp->lh_first = listheadp->lh_first;
1214 listheadp->lh_first->b_vnbufs.le_prev = &iterheadp->lh_first;
1215 LIST_INIT(listheadp);
1216
1217 return(0);
1218}
1219
1220/*
1221 * called with buf_mtxp held...
1222 * this lock protects the queue manipulation
1223 */
1224static void
1225buf_itercomplete(vnode_t vp, struct buflists *iterheadp, int flags)
1226{
1227 struct buflists * listheadp;
1228 buf_t bp;
1229
1230 if (flags & VBI_DIRTY)
1231 listheadp = &vp->v_dirtyblkhd;
1232 else
1233 listheadp = &vp->v_cleanblkhd;
1234
1235 while (!LIST_EMPTY(iterheadp)) {
1236 bp = LIST_FIRST(iterheadp);
1237 LIST_REMOVE(bp, b_vnbufs);
1238 LIST_INSERT_HEAD(listheadp, bp, b_vnbufs);
1239 }
1240 vp->v_iterblkflags &= ~VBI_ITER;
1241
1242 if (vp->v_iterblkflags & VBI_ITERWANT) {
1243 vp->v_iterblkflags &= ~VBI_ITERWANT;
1244 wakeup(&vp->v_iterblkflags);
1245 }
1246}
1247
1248
1249static void
1250bremfree_locked(buf_t bp)
1251{
1252 struct bqueues *dp = NULL;
1253 int whichq = -1;
1254
1255 /*
1256 * We only calculate the head of the freelist when removing
1257 * the last element of the list as that is the only time that
1258 * it is needed (e.g. to reset the tail pointer).
1259 *
1260 * NB: This makes an assumption about how tailq's are implemented.
1261 */
1262 if (bp->b_freelist.tqe_next == NULL) {
1263 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
1264 if (dp->tqh_last == &bp->b_freelist.tqe_next)
1265 break;
1266 if (dp == &bufqueues[BQUEUES])
1267 panic("bremfree: lost tail");
1268 }
1269 TAILQ_REMOVE(dp, bp, b_freelist);
1270 whichq = bp->b_whichq;
1271#if BALANCE_QUEUES
1272 bufqdec(whichq);
1273#endif
1274 bp->b_whichq = -1;
1275 bp->b_timestamp = 0;
1276}
1277
1278/*
1279 * Associate a buffer with a vnode.
1280 */
1281static void
1282bgetvp(vnode_t vp, buf_t bp)
1283{
1284
1285 if (bp->b_vp != vp)
1286 panic("bgetvp: not free");
1287
1288 if (vp->v_type == VBLK || vp->v_type == VCHR)
1289 bp->b_dev = vp->v_rdev;
1290 else
1291 bp->b_dev = NODEV;
1292 /*
1293 * Insert onto list for new vnode.
1294 */
1295 lck_mtx_lock(buf_mtxp);
1296 bufinsvn(bp, &vp->v_cleanblkhd);
1297 lck_mtx_unlock(buf_mtxp);
1298}
1299
1300/*
1301 * Disassociate a buffer from a vnode.
1302 */
1303static void
1304brelvp(buf_t bp)
1305{
1306 vnode_t vp;
1307
1308 if ((vp = bp->b_vp) == (vnode_t)NULL)
1309 panic("brelvp: NULL vp");
1310 /*
1311 * Delete from old vnode list, if on one.
1312 */
1313 lck_mtx_lock(buf_mtxp);
1314 if (bp->b_vnbufs.le_next != NOLIST)
1315 bufremvn(bp);
1316 lck_mtx_unlock(buf_mtxp);
1317
1318 bp->b_vp = (vnode_t)NULL;
1319}
1320
1321/*
1322 * Reassign a buffer from one vnode to another.
1323 * Used to assign file specific control information
1324 * (indirect blocks) to the vnode to which they belong.
1325 */
1326static void
1327buf_reassign(buf_t bp, vnode_t newvp)
1328{
1329 register struct buflists *listheadp;
1c79356b 1330
91447636
A
1331 if (newvp == NULL) {
1332 printf("buf_reassign: NULL");
1333 return;
1334 }
1335 lck_mtx_lock(buf_mtxp);
1336
1337 /*
1338 * Delete from old vnode list, if on one.
1339 */
1340 if (bp->b_vnbufs.le_next != NOLIST)
1341 bufremvn(bp);
1342 /*
1343 * If dirty, put on list of dirty buffers;
1344 * otherwise insert onto list of clean buffers.
1345 */
1346 if (ISSET(bp->b_flags, B_DELWRI))
1347 listheadp = &newvp->v_dirtyblkhd;
1348 else
1349 listheadp = &newvp->v_cleanblkhd;
1350 bufinsvn(bp, listheadp);
1351
1352 lck_mtx_unlock(buf_mtxp);
1c79356b
A
1353}
1354
91447636
A
1355static __inline__ void
1356bufhdrinit(buf_t bp)
55e303ae 1357{
91447636
A
1358 bzero((char *)bp, sizeof *bp);
1359 bp->b_dev = NODEV;
1360 bp->b_rcred = NOCRED;
1361 bp->b_wcred = NOCRED;
1362 bp->b_vnbufs.le_next = NOLIST;
1363 bp->b_flags = B_INVAL;
1364
1365 return;
55e303ae
A
1366}
1367
1368/*
91447636 1369 * Initialize buffers and hash links for buffers.
55e303ae 1370 */
91447636
A
1371__private_extern__ void
1372bufinit()
55e303ae 1373{
91447636
A
1374 buf_t bp;
1375 struct bqueues *dp;
1376 int i;
1377 int metabuf;
1378 long whichq;
1379
1380 /* Initialize the buffer queues ('freelists') and the hash table */
1381 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
1382 TAILQ_INIT(dp);
21362eb3 1383 bufhashtbl = hashinit(nbuf, M_CACHE, &bufhash);
91447636 1384
21362eb3 1385 metabuf = nbuf/8; /* reserved for meta buf */
91447636
A
1386
1387 /* Initialize the buffer headers */
21362eb3 1388 for (i = 0; i < nbuf; i++) {
91447636
A
1389 bp = &buf[i];
1390 bufhdrinit(bp);
1391
1392 /*
1393 * metabuf buffer headers on the meta-data list and
1394 * rest of the buffer headers on the empty list
1395 */
1396 if (--metabuf)
1397 whichq = BQ_META;
1398 else
1399 whichq = BQ_EMPTY;
1400
1401 BLISTNONE(bp);
1402 dp = &bufqueues[whichq];
1403 binsheadfree(bp, dp, whichq);
1404 binshash(bp, &invalhash);
1405 }
1406
1407 for (; i < nbuf + niobuf; i++) {
1408 bp = &buf[i];
1409 bufhdrinit(bp);
1410 binsheadfree(bp, &iobufqueue, -1);
1411 }
1412
21362eb3 1413 /*
91447636
A
1414 * allocate lock group attribute and group
1415 */
21362eb3
A
1416 buf_mtx_grp_attr = lck_grp_attr_alloc_init();
1417 //lck_grp_attr_setstat(buf_mtx_grp_attr);
91447636
A
1418 buf_mtx_grp = lck_grp_alloc_init("buffer cache", buf_mtx_grp_attr);
1419
1420 /*
1421 * allocate the lock attribute
1422 */
1423 buf_mtx_attr = lck_attr_alloc_init();
21362eb3 1424 //lck_attr_setdebug(buf_mtx_attr);
91447636
A
1425
1426 /*
1427 * allocate and initialize mutex's for the buffer and iobuffer pools
1428 */
1429 buf_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr);
1430 iobuffer_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr);
1431
1432 if (iobuffer_mtxp == NULL)
1433 panic("couldn't create iobuffer mutex");
1434
1435 if (buf_mtxp == NULL)
1436 panic("couldn't create buf mutex");
1437
1438 /*
1439 * allocate and initialize cluster specific global locks...
1440 */
1441 cluster_init();
1442
1443 printf("using %d buffer headers and %d cluster IO buffer headers\n",
1444 nbuf, niobuf);
1445
1446 /* Set up zones used by the buffer cache */
1447 bufzoneinit();
1448
1449 /* start the bcleanbuf() thread */
1450 bcleanbuf_thread_init();
1451
1452#if BALANCE_QUEUES
1453 {
1454 static void bufq_balance_thread_init();
1455 /* create a thread to do dynamic buffer queue balancing */
1456 bufq_balance_thread_init();
1457 }
1458#endif /* notyet */
1459}
1460
1461static struct buf *
1462bio_doread(vnode_t vp, daddr64_t blkno, int size, ucred_t cred, int async, int queuetype)
1463{
1464 buf_t bp;
1465
1466 bp = buf_getblk(vp, blkno, size, 0, 0, queuetype);
1467
1468 /*
1469 * If buffer does not have data valid, start a read.
1470 * Note that if buffer is B_INVAL, buf_getblk() won't return it.
1471 * Therefore, it's valid if it's I/O has completed or been delayed.
1472 */
1473 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
1474 struct proc *p;
1475
1476 p = current_proc();
1477
1478 /* Start I/O for the buffer (keeping credentials). */
1479 SET(bp->b_flags, B_READ | async);
21362eb3 1480 if (cred != NOCRED && bp->b_rcred == NOCRED) {
91447636
A
1481 kauth_cred_ref(cred);
1482 bp->b_rcred = cred;
1483 }
1484
1485 VNOP_STRATEGY(bp);
1486
1487 trace(TR_BREADMISS, pack(vp, size), blkno);
1488
1489 /* Pay for the read. */
1490 if (p && p->p_stats)
1491 p->p_stats->p_ru.ru_inblock++; /* XXX */
1492
1493 if (async) {
1494 /*
1495 * since we asked for an ASYNC I/O
1496 * the biodone will do the brelse
1497 * we don't want to pass back a bp
1498 * that we don't 'own'
1499 */
1500 bp = NULL;
1501 }
1502 } else if (async) {
1503 buf_brelse(bp);
1504 bp = NULL;
1505 }
1506
1507 trace(TR_BREADHIT, pack(vp, size), blkno);
1508
1509 return (bp);
55e303ae
A
1510}
1511
1512/*
91447636 1513 * Perform the reads for buf_breadn() and buf_meta_breadn().
55e303ae
A
1514 * Trivial modification to the breada algorithm presented in Bach (p.55).
1515 */
91447636
A
1516static errno_t
1517do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes,
1518 int nrablks, ucred_t cred, buf_t *bpp, int queuetype)
1c79356b 1519{
91447636
A
1520 buf_t bp;
1521 int i;
1c79356b 1522
55e303ae 1523 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, queuetype);
1c79356b
A
1524
1525 /*
1526 * For each of the read-ahead blocks, start a read, if necessary.
1527 */
1528 for (i = 0; i < nrablks; i++) {
1529 /* If it's in the cache, just go on to next one. */
1530 if (incore(vp, rablks[i]))
1531 continue;
1532
1533 /* Get a buffer for the read-ahead block */
55e303ae 1534 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC, queuetype);
1c79356b
A
1535 }
1536
1537 /* Otherwise, we had to start a read for it; wait until it's valid. */
91447636 1538 return (buf_biowait(bp));
1c79356b
A
1539}
1540
91447636 1541
1c79356b 1542/*
91447636
A
1543 * Read a disk block.
1544 * This algorithm described in Bach (p.54).
1c79356b 1545 */
91447636
A
1546errno_t
1547buf_bread(vnode_t vp, daddr64_t blkno, int size, ucred_t cred, buf_t *bpp)
1548{
1549 buf_t bp;
1550
1551 /* Get buffer for block. */
1552 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_READ);
1553
1554 /* Wait for the read to complete, and return result. */
1555 return (buf_biowait(bp));
1556}
1557
1558/*
1559 * Read a disk block. [bread() for meta-data]
1560 * This algorithm described in Bach (p.54).
1561 */
1562errno_t
1563buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, ucred_t cred, buf_t *bpp)
1564{
1565 buf_t bp;
1566
1567 /* Get buffer for block. */
1568 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_META);
1569
1570 /* Wait for the read to complete, and return result. */
1571 return (buf_biowait(bp));
1572}
1573
1574/*
1575 * Read-ahead multiple disk blocks. The first is sync, the rest async.
1576 */
1577errno_t
1578buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, ucred_t cred, buf_t *bpp)
1c79356b 1579{
91447636
A
1580 return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_READ));
1581}
1c79356b 1582
91447636
A
1583/*
1584 * Read-ahead multiple disk blocks. The first is sync, the rest async.
1585 * [buf_breadn() for meta-data]
1586 */
1587errno_t
1588buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, ucred_t cred, buf_t *bpp)
1589{
1590 return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_META));
1c79356b
A
1591}
1592
1593/*
1594 * Block write. Described in Bach (p.56)
1595 */
91447636
A
1596errno_t
1597buf_bwrite(buf_t bp)
1c79356b 1598{
91447636
A
1599 int sync, wasdelayed;
1600 errno_t rv;
1601 proc_t p = current_proc();
1602 vnode_t vp = bp->b_vp;
1c79356b 1603
91447636 1604 if (bp->b_datap == 0) {
55e303ae
A
1605 if (brecover_data(bp) == 0)
1606 return (0);
1607 }
1c79356b
A
1608 /* Remember buffer type, to switch on it later. */
1609 sync = !ISSET(bp->b_flags, B_ASYNC);
1610 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
1611 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
91447636
A
1612
1613 if (wasdelayed)
1614 OSAddAtomic(-1, &nbdwrite);
1c79356b
A
1615
1616 if (!sync) {
1617 /*
1618 * If not synchronous, pay for the I/O operation and make
1619 * sure the buf is on the correct vnode queue. We have
1620 * to do this now, because if we don't, the vnode may not
1621 * be properly notified that its I/O has completed.
1622 */
1623 if (wasdelayed)
91447636 1624 buf_reassign(bp, vp);
1c79356b
A
1625 else
1626 if (p && p->p_stats)
1627 p->p_stats->p_ru.ru_oublock++; /* XXX */
1628 }
d52fe63f 1629 trace(TR_BUFWRITE, pack(vp, bp->b_bcount), bp->b_lblkno);
1c79356b
A
1630
1631 /* Initiate disk write. Make sure the appropriate party is charged. */
91447636
A
1632
1633 OSAddAtomic(1, &vp->v_numoutput);
1c79356b 1634
91447636 1635 VNOP_STRATEGY(bp);
1c79356b
A
1636
1637 if (sync) {
1638 /*
1639 * If I/O was synchronous, wait for it to complete.
1640 */
91447636 1641 rv = buf_biowait(bp);
1c79356b
A
1642
1643 /*
1644 * Pay for the I/O operation, if it's not been paid for, and
1645 * make sure it's on the correct vnode queue. (async operatings
1646 * were payed for above.)
1647 */
1648 if (wasdelayed)
91447636 1649 buf_reassign(bp, vp);
1c79356b
A
1650 else
1651 if (p && p->p_stats)
1652 p->p_stats->p_ru.ru_oublock++; /* XXX */
1653
1654 /* Release the buffer. */
b4c24cb9
A
1655 // XXXdbg - only if the unused bit is set
1656 if (!ISSET(bp->b_flags, B_NORELSE)) {
91447636 1657 buf_brelse(bp);
b4c24cb9
A
1658 } else {
1659 CLR(bp->b_flags, B_NORELSE);
1660 }
1c79356b
A
1661
1662 return (rv);
1663 } else {
1664 return (0);
1665 }
1666}
1667
1668int
1669vn_bwrite(ap)
91447636 1670 struct vnop_bwrite_args *ap;
1c79356b 1671{
91447636 1672 return (buf_bwrite(ap->a_bp));
1c79356b
A
1673}
1674
1675/*
1676 * Delayed write.
1677 *
1678 * The buffer is marked dirty, but is not queued for I/O.
1679 * This routine should be used when the buffer is expected
1680 * to be modified again soon, typically a small write that
1681 * partially fills a buffer.
1682 *
1683 * NB: magnetic tapes cannot be delayed; they must be
1684 * written in the order that the writes are requested.
1685 *
1686 * Described in Leffler, et al. (pp. 208-213).
d52fe63f
A
1687 *
1688 * Note: With the abilitty to allocate additional buffer
1689 * headers, we can get in to the situation where "too" many
91447636
A
1690 * buf_bdwrite()s can create situation where the kernel can create
1691 * buffers faster than the disks can service. Doing a buf_bawrite() in
1692 * cases were we have "too many" outstanding buf_bdwrite()s avoids that.
1c79356b 1693 */
9bccf70c 1694__private_extern__ int
91447636 1695bdwrite_internal(buf_t bp, int return_error)
1c79356b 1696{
91447636
A
1697 proc_t p = current_proc();
1698 vnode_t vp = bp->b_vp;
1c79356b
A
1699
1700 /*
1701 * If the block hasn't been seen before:
1702 * (1) Mark it as having been seen,
1703 * (2) Charge for the write.
1704 * (3) Make sure it's on its vnode's correct block list,
1705 */
1706 if (!ISSET(bp->b_flags, B_DELWRI)) {
1707 SET(bp->b_flags, B_DELWRI);
1708 if (p && p->p_stats)
1709 p->p_stats->p_ru.ru_oublock++; /* XXX */
91447636
A
1710 OSAddAtomic(1, &nbdwrite);
1711 buf_reassign(bp, vp);
1c79356b
A
1712 }
1713
1c79356b
A
1714 /* If this is a tape block, write it the block now. */
1715 if (ISSET(bp->b_flags, B_TAPE)) {
91447636 1716 VNOP_BWRITE(bp);
9bccf70c 1717 return (0);
1c79356b
A
1718 }
1719
d52fe63f 1720 /*
91447636
A
1721 * if we're not LOCKED, but the total number of delayed writes
1722 * has climbed above 75% of the total buffers in the system
1723 * return an error if the caller has indicated that it can
1724 * handle one in this case, otherwise schedule the I/O now
1725 * this is done to prevent us from allocating tons of extra
1726 * buffers when dealing with virtual disks (i.e. DiskImages),
1727 * because additional buffers are dynamically allocated to prevent
1728 * deadlocks from occurring
1729 *
1730 * however, can't do a buf_bawrite() if the LOCKED bit is set because the
1731 * buffer is part of a transaction and can't go to disk until
1732 * the LOCKED bit is cleared.
d52fe63f 1733 */
b4c24cb9 1734 if (!ISSET(bp->b_flags, B_LOCKED) && nbdwrite > ((nbuf/4)*3)) {
9bccf70c
A
1735 if (return_error)
1736 return (EAGAIN);
91447636
A
1737 /*
1738 * If the vnode has "too many" write operations in progress
1739 * wait for them to finish the IO
1740 */
1741 (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, (char *)"buf_bdwrite");
1742
1743 return (buf_bawrite(bp));
d52fe63f
A
1744 }
1745
1c79356b
A
1746 /* Otherwise, the "write" is done, so mark and release the buffer. */
1747 SET(bp->b_flags, B_DONE);
91447636 1748 buf_brelse(bp);
9bccf70c 1749 return (0);
1c79356b
A
1750}
1751
91447636
A
1752errno_t
1753buf_bdwrite(buf_t bp)
9bccf70c 1754{
91447636 1755 return (bdwrite_internal(bp, 0));
9bccf70c
A
1756}
1757
1758
1c79356b 1759/*
91447636 1760 * Asynchronous block write; just an asynchronous buf_bwrite().
d52fe63f
A
1761 *
1762 * Note: With the abilitty to allocate additional buffer
1763 * headers, we can get in to the situation where "too" many
91447636 1764 * buf_bawrite()s can create situation where the kernel can create
d52fe63f
A
1765 * buffers faster than the disks can service.
1766 * We limit the number of "in flight" writes a vnode can have to
1767 * avoid this.
1c79356b 1768 */
9bccf70c 1769static int
91447636 1770bawrite_internal(buf_t bp, int throttle)
1c79356b 1771{
91447636 1772 vnode_t vp = bp->b_vp;
d52fe63f
A
1773
1774 if (vp) {
91447636
A
1775 if (throttle)
1776 /*
1777 * If the vnode has "too many" write operations in progress
1778 * wait for them to finish the IO
1779 */
1780 (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, (const char *)"buf_bawrite");
1781 else if (vp->v_numoutput >= VNODE_ASYNC_THROTTLE)
1782 /*
1783 * return to the caller and
1784 * let him decide what to do
1785 */
1786 return (EWOULDBLOCK);
d52fe63f 1787 }
1c79356b 1788 SET(bp->b_flags, B_ASYNC);
9bccf70c 1789
91447636 1790 return (VNOP_BWRITE(bp));
9bccf70c
A
1791}
1792
91447636
A
1793errno_t
1794buf_bawrite(buf_t bp)
9bccf70c 1795{
91447636 1796 return (bawrite_internal(bp, 1));
1c79356b
A
1797}
1798
91447636 1799
1c79356b
A
1800/*
1801 * Release a buffer on to the free lists.
1802 * Described in Bach (p. 46).
1803 */
1804void
91447636 1805buf_brelse(buf_t bp)
1c79356b
A
1806{
1807 struct bqueues *bufq;
91447636
A
1808 long whichq;
1809 upl_t upl;
1810 int need_wakeup = 0;
1811 int need_bp_wakeup = 0;
1812
1813
1814 if (bp->b_whichq != -1 || !(bp->b_lflags & BL_BUSY))
1815 panic("buf_brelse: bad buffer = %x\n", bp);
1816
1817#ifdef JOE_DEBUG
1818 bp->b_stackbrelse[0] = __builtin_return_address(0);
1819 bp->b_stackbrelse[1] = __builtin_return_address(1);
1820 bp->b_stackbrelse[2] = __builtin_return_address(2);
1821 bp->b_stackbrelse[3] = __builtin_return_address(3);
1822 bp->b_stackbrelse[4] = __builtin_return_address(4);
1823 bp->b_stackbrelse[5] = __builtin_return_address(5);
1824
1825 bp->b_lastbrelse = current_thread();
1826 bp->b_tag = 0;
1827#endif
1828 if (bp->b_lflags & BL_IOBUF) {
1829 free_io_buf(bp);
1830 return;
1831 }
1c79356b
A
1832
1833 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_START,
91447636 1834 bp->b_lblkno * PAGE_SIZE, (int)bp, (int)bp->b_datap,
fa4905b1 1835 bp->b_flags, 0);
1c79356b
A
1836
1837 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
1838
91447636
A
1839 /*
1840 * if we're invalidating a buffer that has the B_FILTER bit
1841 * set then call the b_iodone function so it gets cleaned
1842 * up properly.
1843 *
1844 * the HFS journal code depends on this
1845 */
b4c24cb9 1846 if (ISSET(bp->b_flags, B_META) && ISSET(bp->b_flags, B_INVAL)) {
91447636
A
1847 if (ISSET(bp->b_flags, B_FILTER)) { /* if necessary, call out */
1848 void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
1849 void *arg = (void *)bp->b_transaction;
b4c24cb9 1850
91447636 1851 CLR(bp->b_flags, B_FILTER); /* but note callout done */
b4c24cb9 1852 bp->b_iodone = NULL;
91447636 1853 bp->b_transaction = NULL;
b4c24cb9
A
1854
1855 if (iodone_func == NULL) {
1856 panic("brelse: bp @ 0x%x has NULL b_iodone!\n", bp);
1857 }
91447636 1858 (*iodone_func)(bp, arg);
b4c24cb9
A
1859 }
1860 }
91447636
A
1861 /*
1862 * I/O is done. Cleanup the UPL state
1863 */
1864 upl = bp->b_upl;
1865
1866 if ( !ISSET(bp->b_flags, B_META) && UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
1c79356b 1867 kern_return_t kret;
1c79356b
A
1868 int upl_flags;
1869
91447636 1870 if ( (upl == NULL) ) {
1c79356b 1871 if ( !ISSET(bp->b_flags, B_INVAL)) {
0b4e3aa0 1872 kret = ubc_create_upl(bp->b_vp,
91447636
A
1873 ubc_blktooff(bp->b_vp, bp->b_lblkno),
1874 bp->b_bufsize,
1875 &upl,
1876 NULL,
1877 UPL_PRECIOUS);
1878
1c79356b 1879 if (kret != KERN_SUCCESS)
91447636
A
1880 panic("brelse: Failed to create UPL");
1881#ifdef UPL_DEBUG
1c79356b 1882 upl_ubc_alias_set(upl, bp, 5);
91447636
A
1883#endif /* UPL_DEBUG */
1884 }
1c79356b 1885 } else {
91447636 1886 if (bp->b_datap) {
55e303ae
A
1887 kret = ubc_upl_unmap(upl);
1888
1889 if (kret != KERN_SUCCESS)
91447636
A
1890 panic("ubc_upl_unmap failed");
1891 bp->b_datap = (uintptr_t)NULL;
55e303ae 1892 }
1c79356b
A
1893 }
1894 if (upl) {
1c79356b 1895 if (bp->b_flags & (B_ERROR | B_INVAL)) {
91447636 1896 if (bp->b_flags & (B_READ | B_INVAL))
1c79356b
A
1897 upl_flags = UPL_ABORT_DUMP_PAGES;
1898 else
1899 upl_flags = 0;
91447636 1900
0b4e3aa0 1901 ubc_upl_abort(upl, upl_flags);
1c79356b 1902 } else {
91447636
A
1903 if (ISSET(bp->b_flags, B_DELWRI | B_WASDIRTY))
1904 upl_flags = UPL_COMMIT_SET_DIRTY ;
1905 else
1906 upl_flags = UPL_COMMIT_CLEAR_DIRTY ;
1907
0b4e3aa0 1908 ubc_upl_commit_range(upl, 0, bp->b_bufsize, upl_flags |
91447636 1909 UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
1c79356b 1910 }
91447636 1911 bp->b_upl = NULL;
1c79356b
A
1912 }
1913 } else {
91447636
A
1914 if ( (upl) )
1915 panic("brelse: UPL set for non VREG; vp=%x", bp->b_vp);
1c79356b
A
1916 }
1917
1c79356b 1918 /*
91447636 1919 * If it's locked, don't report an error; try again later.
1c79356b 1920 */
1c79356b
A
1921 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
1922 CLR(bp->b_flags, B_ERROR);
91447636
A
1923 /*
1924 * If it's not cacheable, or an error, mark it invalid.
1925 */
1c79356b
A
1926 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
1927 SET(bp->b_flags, B_INVAL);
91447636 1928
1c79356b
A
1929 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
1930 /*
1931 * If it's invalid or empty, dissociate it from its vnode
1932 * and put on the head of the appropriate queue.
1933 */
91447636
A
1934 if (bp->b_vp)
1935 brelvp(bp);
1936
1937 if (ISSET(bp->b_flags, B_DELWRI))
1938 OSAddAtomic(-1, &nbdwrite);
1939
1940 CLR(bp->b_flags, (B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE));
1941 /*
1942 * Determine which queue the buffer should be on, then put it there.
1943 */
1c79356b
A
1944 if (bp->b_bufsize <= 0)
1945 whichq = BQ_EMPTY; /* no data */
9bccf70c
A
1946 else if (ISSET(bp->b_flags, B_META))
1947 whichq = BQ_META; /* meta-data */
1c79356b
A
1948 else
1949 whichq = BQ_AGE; /* invalid data */
1c79356b 1950 bufq = &bufqueues[whichq];
91447636
A
1951
1952 lck_mtx_lock(buf_mtxp);
1953
1c79356b
A
1954 binsheadfree(bp, bufq, whichq);
1955 } else {
1956 /*
1957 * It has valid data. Put it on the end of the appropriate
1958 * queue, so that it'll stick around for as long as possible.
1959 */
1960 if (ISSET(bp->b_flags, B_LOCKED))
1961 whichq = BQ_LOCKED; /* locked in core */
1962 else if (ISSET(bp->b_flags, B_META))
1963 whichq = BQ_META; /* meta-data */
1964 else if (ISSET(bp->b_flags, B_AGE))
1965 whichq = BQ_AGE; /* stale but valid data */
1966 else
1967 whichq = BQ_LRU; /* valid data */
1c79356b 1968 bufq = &bufqueues[whichq];
91447636
A
1969
1970 CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE));
1971
1972 lck_mtx_lock(buf_mtxp);
1973
1c79356b
A
1974 binstailfree(bp, bufq, whichq);
1975 }
91447636
A
1976 if (needbuffer) {
1977 /*
1978 * needbuffer is a global
1979 * we're currently using buf_mtxp to protect it
1980 * delay doing the actual wakeup until after
1981 * we drop buf_mtxp
1982 */
1983 needbuffer = 0;
1984 need_wakeup = 1;
1985 }
1986 if (ISSET(bp->b_lflags, BL_WANTED)) {
1987 /*
1988 * delay the actual wakeup until after we
1989 * clear BL_BUSY and we've dropped buf_mtxp
1990 */
1991 need_bp_wakeup = 1;
1992 }
1993 /*
1994 * Unlock the buffer.
1995 */
1996 CLR(bp->b_lflags, (BL_BUSY | BL_WANTED));
1c79356b 1997
91447636 1998 lck_mtx_unlock(buf_mtxp);
1c79356b 1999
91447636
A
2000 if (need_wakeup) {
2001 /*
2002 * Wake up any processes waiting for any buffer to become free.
2003 */
2004 wakeup(&needbuffer);
2005 }
2006 if (need_bp_wakeup) {
2007 /*
2008 * Wake up any proceeses waiting for _this_ buffer to become free.
2009 */
2010 wakeup(bp);
2011 }
1c79356b 2012 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_END,
91447636 2013 (int)bp, (int)bp->b_datap, bp->b_flags, 0, 0);
1c79356b
A
2014}
2015
2016/*
2017 * Determine if a block is in the cache.
2018 * Just look on what would be its hash chain. If it's there, return
2019 * a pointer to it, unless it's marked invalid. If it's marked invalid,
2020 * we normally don't return the buffer, unless the caller explicitly
2021 * wants us to.
2022 */
91447636
A
2023static boolean_t
2024incore(vnode_t vp, daddr64_t blkno)
2025{
2026 boolean_t retval;
2027
2028 lck_mtx_lock(buf_mtxp);
2029
2030 if (incore_locked(vp, blkno))
2031 retval = TRUE;
2032 else
2033 retval = FALSE;
2034 lck_mtx_unlock(buf_mtxp);
2035
2036 return (retval);
2037}
2038
2039
2040static buf_t
2041incore_locked(vnode_t vp, daddr64_t blkno)
1c79356b
A
2042{
2043 struct buf *bp;
1c79356b
A
2044
2045 bp = BUFHASH(vp, blkno)->lh_first;
2046
2047 /* Search hash chain */
9bccf70c 2048 for (; bp != NULL; bp = bp->b_hash.le_next) {
1c79356b 2049 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
91447636 2050 !ISSET(bp->b_flags, B_INVAL)) {
1c79356b 2051 return (bp);
91447636 2052 }
1c79356b 2053 }
1c79356b
A
2054 return (0);
2055}
2056
fa4905b1
A
2057
2058/* XXX FIXME -- Update the comment to reflect the UBC changes (please) -- */
1c79356b
A
2059/*
2060 * Get a block of requested size that is associated with
2061 * a given vnode and block offset. If it is found in the
2062 * block cache, mark it as having been found, make it busy
2063 * and return it. Otherwise, return an empty block of the
2064 * correct size. It is up to the caller to insure that the
2065 * cached blocks be of the correct size.
2066 */
91447636
A
2067buf_t
2068buf_getblk(vnode_t vp, daddr64_t blkno, int size, int slpflag, int slptimeo, int operation)
1c79356b 2069{
91447636
A
2070 buf_t bp;
2071 int err;
1c79356b
A
2072 upl_t upl;
2073 upl_page_info_t *pl;
1c79356b 2074 kern_return_t kret;
91447636
A
2075 int ret_only_valid;
2076 struct timespec ts;
2077 int upl_flags;
1c79356b 2078
1c79356b 2079 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_START,
91447636 2080 (int)(blkno * PAGE_SIZE), size, operation, 0, 0);
1c79356b 2081
91447636
A
2082 ret_only_valid = operation & BLK_ONLYVALID;
2083 operation &= ~BLK_ONLYVALID;
2084start:
2085 lck_mtx_lock(buf_mtxp);
2086start_locked:
2087 if ((bp = incore_locked(vp, blkno))) {
2088 /*
2089 * Found in the Buffer Cache
2090 */
2091 if (ISSET(bp->b_lflags, BL_BUSY)) {
2092 /*
2093 * but is busy
2094 */
1c79356b
A
2095 switch (operation) {
2096 case BLK_READ:
2097 case BLK_WRITE:
2098 case BLK_META:
91447636 2099 SET(bp->b_lflags, BL_WANTED);
1c79356b 2100 bufstats.bufs_busyincore++;
91447636
A
2101
2102 /*
2103 * don't retake the mutex after being awakened...
2104 * the time out is in msecs
2105 */
2106 ts.tv_sec = (slptimeo/1000);
2107 ts.tv_nsec = (slptimeo % 1000) * 10 * NSEC_PER_USEC * 1000;
2108
2109 err = msleep(bp, buf_mtxp, slpflag | PDROP | (PRIBIO + 1), "buf_getblk", &ts);
2110
1c79356b
A
2111 /*
2112 * Callers who call with PCATCH or timeout are
2113 * willing to deal with the NULL pointer
2114 */
91447636 2115 if (err && ((slpflag & PCATCH) || ((err == EWOULDBLOCK) && slptimeo)))
1c79356b
A
2116 return (NULL);
2117 goto start;
2118 /*NOTREACHED*/
2119 break;
2120
1c79356b 2121 default:
91447636
A
2122 /*
2123 * unknown operation requested
2124 */
2125 panic("getblk: paging or unknown operation for incore busy buffer - %x\n", operation);
1c79356b
A
2126 /*NOTREACHED*/
2127 break;
2128 }
2129 } else {
91447636
A
2130 /*
2131 * buffer in core and not busy
2132 */
2133 if ( (bp->b_upl) )
2134 panic("buffer has UPL, but not marked BUSY: %x", bp);
2135 SET(bp->b_lflags, BL_BUSY);
2136 SET(bp->b_flags, B_CACHE);
2137#ifdef JOE_DEBUG
2138 bp->b_owner = current_thread();
2139 bp->b_tag = 1;
2140#endif
2141 bremfree_locked(bp);
1c79356b 2142 bufstats.bufs_incore++;
91447636
A
2143
2144 lck_mtx_unlock(buf_mtxp);
1c79356b 2145
91447636
A
2146 if ( !ret_only_valid)
2147 allocbuf(bp, size);
1c79356b 2148
91447636 2149 upl_flags = 0;
1c79356b 2150 switch (operation) {
1c79356b 2151 case BLK_WRITE:
91447636
A
2152 /*
2153 * "write" operation: let the UPL subsystem
2154 * know that we intend to modify the buffer
2155 * cache pages we're gathering.
2156 */
2157 upl_flags |= UPL_WILL_MODIFY;
2158 case BLK_READ:
2159 upl_flags |= UPL_PRECIOUS;
2160 if (UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
0b4e3aa0 2161 kret = ubc_create_upl(vp,
91447636
A
2162 ubc_blktooff(vp, bp->b_lblkno),
2163 bp->b_bufsize,
2164 &upl,
2165 &pl,
2166 upl_flags);
1c79356b 2167 if (kret != KERN_SUCCESS)
91447636 2168 panic("Failed to create UPL");
1c79356b 2169
91447636 2170 bp->b_upl = upl;
1c79356b 2171
91447636
A
2172 if (upl_valid_page(pl, 0)) {
2173 if (upl_dirty_page(pl, 0))
2174 SET(bp->b_flags, B_WASDIRTY);
2175 else
2176 CLR(bp->b_flags, B_WASDIRTY);
2177 } else
2178 CLR(bp->b_flags, (B_DONE | B_CACHE | B_WASDIRTY | B_DELWRI));
1c79356b 2179
91447636 2180 kret = ubc_upl_map(upl, (vm_address_t *)&(bp->b_datap));
1c79356b 2181
9bccf70c 2182 if (kret != KERN_SUCCESS)
91447636 2183 panic("getblk: ubc_upl_map() failed with (%d)", kret);
1c79356b
A
2184 }
2185 break;
2186
2187 case BLK_META:
2188 /*
2189 * VM is not involved in IO for the meta data
2190 * buffer already has valid data
2191 */
1c79356b
A
2192 break;
2193
2194 default:
91447636 2195 panic("getblk: paging or unknown operation for incore buffer- %d\n", operation);
1c79356b
A
2196 /*NOTREACHED*/
2197 break;
2198 }
2199 }
2200 } else { /* not incore() */
2201 int queue = BQ_EMPTY; /* Start with no preference */
1c79356b 2202
91447636
A
2203 if (ret_only_valid) {
2204 lck_mtx_unlock(buf_mtxp);
2205 return (NULL);
1c79356b 2206 }
91447636
A
2207
2208 if ((UBCINVALID(vp)) || !(UBCINFOEXISTS(vp)))
2209 operation = BLK_META;
2210
1c79356b 2211 if ((bp = getnewbuf(slpflag, slptimeo, &queue)) == NULL)
91447636
A
2212 goto start_locked;
2213
2214 /*
2215 * getnewbuf may block for a number of different reasons...
2216 * if it does, it's then possible for someone else to
2217 * create a buffer for the same block and insert it into
2218 * the hash... if we see it incore at this point we dump
2219 * the buffer we were working on and start over
2220 */
2221 if (incore_locked(vp, blkno)) {
0b4e3aa0
A
2222 SET(bp->b_flags, B_INVAL);
2223 binshash(bp, &invalhash);
91447636
A
2224
2225 lck_mtx_unlock(buf_mtxp);
2226
2227 buf_brelse(bp);
0b4e3aa0
A
2228 goto start;
2229 }
b4c24cb9
A
2230 /*
2231 * NOTE: YOU CAN NOT BLOCK UNTIL binshash() HAS BEEN
2232 * CALLED! BE CAREFUL.
2233 */
0b4e3aa0 2234
1c79356b 2235 /*
91447636 2236 * mark the buffer as B_META if indicated
1c79356b 2237 * so that when buffer is released it will goto META queue
1c79356b 2238 */
91447636
A
2239 if (operation == BLK_META)
2240 SET(bp->b_flags, B_META);
9bccf70c
A
2241
2242 bp->b_blkno = bp->b_lblkno = blkno;
2243 bp->b_vp = vp;
2244
0b4e3aa0
A
2245 /*
2246 * Insert in the hash so that incore() can find it
2247 */
2248 binshash(bp, BUFHASH(vp, blkno));
2249
91447636
A
2250 lck_mtx_unlock(buf_mtxp);
2251
9bccf70c 2252 bgetvp(vp, bp);
9bccf70c 2253
1c79356b
A
2254 allocbuf(bp, size);
2255
91447636 2256 upl_flags = 0;
1c79356b
A
2257 switch (operation) {
2258 case BLK_META:
91447636
A
2259 /*
2260 * buffer data is invalid...
2261 *
2262 * I don't want to have to retake buf_mtxp,
2263 * so the miss and vmhits counters are done
2264 * with Atomic updates... all other counters
2265 * in bufstats are protected with either
2266 * buf_mtxp or iobuffer_mtxp
2267 */
2268 OSAddAtomic(1, &bufstats.bufs_miss);
1c79356b
A
2269 break;
2270
1c79356b 2271 case BLK_WRITE:
91447636
A
2272 /*
2273 * "write" operation: let the UPL subsystem know
2274 * that we intend to modify the buffer cache pages
2275 * we're gathering.
2276 */
2277 upl_flags |= UPL_WILL_MODIFY;
2278 case BLK_READ:
2279 { off_t f_offset;
2280 size_t contig_bytes;
2281 int bmap_flags;
1c79356b 2282
91447636
A
2283 if ( (bp->b_upl) )
2284 panic("bp already has UPL: %x",bp);
1c79356b 2285
91447636
A
2286 f_offset = ubc_blktooff(vp, blkno);
2287
2288 upl_flags |= UPL_PRECIOUS;
0b4e3aa0 2289 kret = ubc_create_upl(vp,
91447636
A
2290 f_offset,
2291 bp->b_bufsize,
2292 &upl,
2293 &pl,
2294 upl_flags);
1c79356b 2295
91447636
A
2296 if (kret != KERN_SUCCESS)
2297 panic("Failed to create UPL");
2298#ifdef UPL_DEBUG
1c79356b 2299 upl_ubc_alias_set(upl, bp, 4);
91447636
A
2300#endif /* UPL_DEBUG */
2301 bp->b_upl = upl;
1c79356b
A
2302
2303 if (upl_valid_page(pl, 0)) {
1c79356b 2304
91447636
A
2305 if (operation == BLK_READ)
2306 bmap_flags = VNODE_READ;
2307 else
2308 bmap_flags = VNODE_WRITE;
1c79356b 2309
91447636 2310 SET(bp->b_flags, B_CACHE | B_DONE);
1c79356b 2311
91447636 2312 OSAddAtomic(1, &bufstats.bufs_vmhits);
1c79356b 2313
91447636
A
2314 bp->b_validoff = 0;
2315 bp->b_dirtyoff = 0;
1c79356b 2316
91447636
A
2317 if (upl_dirty_page(pl, 0)) {
2318 /* page is dirty */
2319 SET(bp->b_flags, B_WASDIRTY);
1c79356b 2320
91447636
A
2321 bp->b_validend = bp->b_bcount;
2322 bp->b_dirtyend = bp->b_bcount;
1c79356b 2323 } else {
91447636
A
2324 /* page is clean */
2325 bp->b_validend = bp->b_bcount;
2326 bp->b_dirtyend = 0;
1c79356b 2327 }
91447636
A
2328 /*
2329 * try to recreate the physical block number associated with
2330 * this buffer...
2331 */
2332 if (VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))
2333 panic("getblk: VNOP_BLOCKMAP failed");
2334 /*
2335 * if the extent represented by this buffer
2336 * is not completely physically contiguous on
2337 * disk, than we can't cache the physical mapping
2338 * in the buffer header
2339 */
2340 if ((long)contig_bytes < bp->b_bcount)
2341 bp->b_blkno = bp->b_lblkno;
1c79356b 2342 } else {
91447636 2343 OSAddAtomic(1, &bufstats.bufs_miss);
1c79356b 2344 }
91447636 2345 kret = ubc_upl_map(upl, (vm_address_t *)&(bp->b_datap));
1c79356b 2346
91447636
A
2347 if (kret != KERN_SUCCESS)
2348 panic("getblk: ubc_upl_map() failed with (%d)", kret);
1c79356b 2349 break;
91447636 2350 }
1c79356b 2351 default:
91447636 2352 panic("getblk: paging or unknown operation - %x", operation);
1c79356b
A
2353 /*NOTREACHED*/
2354 break;
2355 }
2356 }
1c79356b 2357 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_END,
91447636
A
2358 (int)bp, (int)bp->b_datap, bp->b_flags, 3, 0);
2359
2360#ifdef JOE_DEBUG
2361 bp->b_stackgetblk[0] = __builtin_return_address(0);
2362 bp->b_stackgetblk[1] = __builtin_return_address(1);
2363 bp->b_stackgetblk[2] = __builtin_return_address(2);
2364 bp->b_stackgetblk[3] = __builtin_return_address(3);
2365 bp->b_stackgetblk[4] = __builtin_return_address(4);
2366 bp->b_stackgetblk[5] = __builtin_return_address(5);
2367#endif
1c79356b
A
2368 return (bp);
2369}
2370
2371/*
2372 * Get an empty, disassociated buffer of given size.
2373 */
91447636
A
2374buf_t
2375buf_geteblk(size)
1c79356b
A
2376 int size;
2377{
91447636
A
2378 buf_t bp;
2379 int queue = BQ_EMPTY;
2380
2381 lck_mtx_lock(buf_mtxp);
1c79356b
A
2382
2383 while ((bp = getnewbuf(0, 0, &queue)) == 0)
2384 ;
1c79356b 2385 SET(bp->b_flags, (B_META|B_INVAL));
1c79356b
A
2386
2387#if DIAGNOSTIC
2388 assert(queue == BQ_EMPTY);
2389#endif /* DIAGNOSTIC */
2390 /* XXX need to implement logic to deal with other queues */
2391
1c79356b 2392 binshash(bp, &invalhash);
1c79356b
A
2393 bufstats.bufs_eblk++;
2394
91447636
A
2395 lck_mtx_unlock(buf_mtxp);
2396
2397 allocbuf(bp, size);
2398
1c79356b
A
2399 return (bp);
2400}
2401
1c79356b
A
2402/*
2403 * Zones for the meta data buffers
2404 */
2405
2406#define MINMETA 512
2407#define MAXMETA 4096
2408
2409struct meta_zone_entry {
2410 zone_t mz_zone;
2411 vm_size_t mz_size;
2412 vm_size_t mz_max;
2413 char *mz_name;
2414};
2415
2416struct meta_zone_entry meta_zones[] = {
2417 {NULL, (MINMETA * 1), 128 * (MINMETA * 1), "buf.512" },
2418 {NULL, (MINMETA * 2), 64 * (MINMETA * 2), "buf.1024" },
1c79356b 2419 {NULL, (MINMETA * 4), 16 * (MINMETA * 4), "buf.2048" },
1c79356b
A
2420 {NULL, (MINMETA * 8), 512 * (MINMETA * 8), "buf.4096" },
2421 {NULL, 0, 0, "" } /* End */
2422};
765c9de3 2423
1c79356b
A
2424/*
2425 * Initialize the meta data zones
2426 */
2427static void
2428bufzoneinit(void)
2429{
2430 int i;
2431
2432 for (i = 0; meta_zones[i].mz_size != 0; i++) {
2433 meta_zones[i].mz_zone =
2434 zinit(meta_zones[i].mz_size,
2435 meta_zones[i].mz_max,
2436 PAGE_SIZE,
2437 meta_zones[i].mz_name);
2438 }
765c9de3 2439 buf_hdr_zone = zinit(sizeof(struct buf), 32, PAGE_SIZE, "buf headers");
1c79356b
A
2440}
2441
9bccf70c 2442static __inline__ zone_t
1c79356b
A
2443getbufzone(size_t size)
2444{
2445 int i;
2446
9bccf70c 2447 if ((size % 512) || (size < MINMETA) || (size > MAXMETA))
1c79356b
A
2448 panic("getbufzone: incorect size = %d", size);
2449
91447636 2450 for (i = 0; meta_zones[i].mz_size != 0; i++) {
9bccf70c
A
2451 if (meta_zones[i].mz_size >= size)
2452 break;
2453 }
2454
1c79356b
A
2455 return (meta_zones[i].mz_zone);
2456}
1c79356b
A
2457
2458/*
2459 * With UBC, there is no need to expand / shrink the file data
2460 * buffer. The VM uses the same pages, hence no waste.
2461 * All the file data buffers can have one size.
2462 * In fact expand / shrink would be an expensive operation.
2463 *
2464 * Only exception to this is meta-data buffers. Most of the
2465 * meta data operations are smaller than PAGE_SIZE. Having the
2466 * meta-data buffers grow and shrink as needed, optimizes use
2467 * of the kernel wired memory.
2468 */
2469
2470int
91447636 2471allocbuf(buf_t bp, int size)
1c79356b
A
2472{
2473 vm_size_t desired_size;
2474
2475 desired_size = roundup(size, CLBYTES);
2476
91447636 2477 if (desired_size < PAGE_SIZE)
1c79356b
A
2478 desired_size = PAGE_SIZE;
2479 if (desired_size > MAXBSIZE)
2480 panic("allocbuf: buffer larger than MAXBSIZE requested");
2481
1c79356b 2482 if (ISSET(bp->b_flags, B_META)) {
1c79356b 2483 zone_t zprev, z;
91447636
A
2484 int nsize = roundup(size, MINMETA);
2485
2486 if (bp->b_datap) {
2487 vm_offset_t elem = (vm_offset_t)bp->b_datap;
2488
2489 if (ISSET(bp->b_flags, B_ZALLOC)) {
2490 if (bp->b_bufsize < nsize) {
2491 /* reallocate to a bigger size */
2492
2493 zprev = getbufzone(bp->b_bufsize);
2494 if (nsize <= MAXMETA) {
2495 desired_size = nsize;
2496 z = getbufzone(nsize);
2497 bp->b_datap = (uintptr_t)zalloc(z);
1c79356b 2498 } else {
91447636 2499 bp->b_datap = (uintptr_t)NULL;
21362eb3 2500 kmem_alloc(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
91447636 2501 CLR(bp->b_flags, B_ZALLOC);
1c79356b 2502 }
91447636
A
2503 bcopy((void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize);
2504 zfree(zprev, (void *)elem);
2505 } else {
2506 desired_size = bp->b_bufsize;
2507 }
2508
2509 } else {
2510 if ((vm_size_t)bp->b_bufsize < desired_size) {
1c79356b 2511 /* reallocate to a bigger size */
91447636 2512 bp->b_datap = (uintptr_t)NULL;
21362eb3 2513 kmem_alloc(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
91447636 2514 bcopy((const void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize);
1c79356b
A
2515 kmem_free(kernel_map, elem, bp->b_bufsize);
2516 } else {
2517 desired_size = bp->b_bufsize;
2518 }
91447636 2519 }
1c79356b
A
2520 } else {
2521 /* new allocation */
2522 if (nsize <= MAXMETA) {
2523 desired_size = nsize;
2524 z = getbufzone(nsize);
91447636 2525 bp->b_datap = (uintptr_t)zalloc(z);
1c79356b 2526 SET(bp->b_flags, B_ZALLOC);
91447636 2527 } else
21362eb3 2528 kmem_alloc(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
1c79356b
A
2529 }
2530 }
9bccf70c
A
2531 bp->b_bufsize = desired_size;
2532 bp->b_bcount = size;
91447636 2533
9bccf70c 2534 return (0);
1c79356b
A
2535}
2536
2537/*
2538 * Get a new buffer from one of the free lists.
2539 *
2540 * Request for a queue is passes in. The queue from which the buffer was taken
2541 * from is returned. Out of range queue requests get BQ_EMPTY. Request for
2542 * BQUEUE means no preference. Use heuristics in that case.
2543 * Heuristics is as follows:
2544 * Try BQ_AGE, BQ_LRU, BQ_EMPTY, BQ_META in that order.
2545 * If none available block till one is made available.
2546 * If buffers available on both BQ_AGE and BQ_LRU, check the timestamps.
2547 * Pick the most stale buffer.
2548 * If found buffer was marked delayed write, start the async. write
2549 * and restart the search.
2550 * Initialize the fields and disassociate the buffer from the vnode.
2551 * Remove the buffer from the hash. Return the buffer and the queue
2552 * on which it was found.
91447636
A
2553 *
2554 * buf_mtxp is held upon entry
2555 * returns with buf_mtxp locked
1c79356b
A
2556 */
2557
91447636
A
2558static buf_t
2559getnewbuf(int slpflag, int slptimeo, int * queue)
1c79356b 2560{
91447636
A
2561 buf_t bp;
2562 buf_t lru_bp;
2563 buf_t age_bp;
2564 buf_t meta_bp;
2565 int age_time, lru_time, bp_time, meta_time;
2566 int req = *queue; /* save it for restarts */
2567 struct timespec ts;
1c79356b
A
2568
2569start:
91447636
A
2570 /*
2571 * invalid request gets empty queue
2572 */
765c9de3
A
2573 if ((*queue > BQUEUES) || (*queue < 0)
2574 || (*queue == BQ_LAUNDRY) || (*queue == BQ_LOCKED))
1c79356b
A
2575 *queue = BQ_EMPTY;
2576
91447636
A
2577 /*
2578 * (*queue == BQUEUES) means no preference
2579 */
1c79356b
A
2580 if (*queue != BQUEUES) {
2581 /* Try for the requested queue first */
2582 bp = bufqueues[*queue].tqh_first;
2583 if (bp)
2584 goto found;
2585 }
2586
2587 /* Unable to use requested queue */
2588 age_bp = bufqueues[BQ_AGE].tqh_first;
2589 lru_bp = bufqueues[BQ_LRU].tqh_first;
2590 meta_bp = bufqueues[BQ_META].tqh_first;
2591
9bccf70c
A
2592 if (!age_bp && !lru_bp && !meta_bp) {
2593 /*
2594 * Unavailble on AGE or LRU or META queues
2595 * Try the empty list first
2596 */
1c79356b
A
2597 bp = bufqueues[BQ_EMPTY].tqh_first;
2598 if (bp) {
2599 *queue = BQ_EMPTY;
2600 goto found;
2601 }
91447636 2602 lck_mtx_unlock(buf_mtxp);
765c9de3 2603
91447636 2604 /* Create a new temporary buffer header */
765c9de3
A
2605 bp = (struct buf *)zalloc(buf_hdr_zone);
2606
91447636
A
2607 lck_mtx_lock(buf_mtxp);
2608
765c9de3
A
2609 if (bp) {
2610 bufhdrinit(bp);
2611 BLISTNONE(bp);
2612 binshash(bp, &invalhash);
2613 SET(bp->b_flags, B_HDRALLOC);
2614 *queue = BQ_EMPTY;
2615 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
2616 buf_hdr_count++;
2617 goto found;
2618 }
91447636 2619 bufstats.bufs_sleeps++;
765c9de3 2620
1c79356b
A
2621 /* wait for a free buffer of any kind */
2622 needbuffer = 1;
91447636
A
2623 /* hz value is 100 */
2624 ts.tv_sec = (slptimeo/1000);
2625 /* the hz value is 100; which leads to 10ms */
2626 ts.tv_nsec = (slptimeo % 1000) * NSEC_PER_USEC * 1000 * 10;
2627 msleep(&needbuffer, buf_mtxp, slpflag|(PRIBIO+1), (char *)"getnewbuf", &ts);
21362eb3 2628
1c79356b
A
2629 return (0);
2630 }
2631
2632 /* Buffer available either on AGE or LRU or META */
2633 bp = NULL;
2634 *queue = -1;
2635
2636 /* Buffer available either on AGE or LRU */
2637 if (!age_bp) {
2638 bp = lru_bp;
2639 *queue = BQ_LRU;
2640 } else if (!lru_bp) {
2641 bp = age_bp;
2642 *queue = BQ_AGE;
2643 } else { /* buffer available on both AGE and LRU */
91447636
A
2644 int t = buf_timestamp();
2645
2646 age_time = t - age_bp->b_timestamp;
2647 lru_time = t - lru_bp->b_timestamp;
1c79356b
A
2648 if ((age_time < 0) || (lru_time < 0)) { /* time set backwards */
2649 bp = age_bp;
2650 *queue = BQ_AGE;
2651 /*
2652 * we should probably re-timestamp eveything in the
2653 * queues at this point with the current time
2654 */
2655 } else {
2656 if ((lru_time >= lru_is_stale) && (age_time < age_is_stale)) {
2657 bp = lru_bp;
2658 *queue = BQ_LRU;
2659 } else {
2660 bp = age_bp;
2661 *queue = BQ_AGE;
2662 }
2663 }
2664 }
2665
2666 if (!bp) { /* Neither on AGE nor on LRU */
2667 bp = meta_bp;
2668 *queue = BQ_META;
2669 } else if (meta_bp) {
91447636
A
2670 int t = buf_timestamp();
2671
2672 bp_time = t - bp->b_timestamp;
2673 meta_time = t - meta_bp->b_timestamp;
1c79356b
A
2674
2675 if (!(bp_time < 0) && !(meta_time < 0)) {
2676 /* time not set backwards */
2677 int bp_is_stale;
2678 bp_is_stale = (*queue == BQ_LRU) ?
2679 lru_is_stale : age_is_stale;
2680
2681 if ((meta_time >= meta_is_stale) &&
2682 (bp_time < bp_is_stale)) {
2683 bp = meta_bp;
2684 *queue = BQ_META;
2685 }
2686 }
2687 }
1c79356b 2688found:
91447636
A
2689 if (ISSET(bp->b_flags, B_LOCKED) || ISSET(bp->b_lflags, BL_BUSY))
2690 panic("getnewbuf: bp @ 0x%x is LOCKED or BUSY! (flags 0x%x)\n", bp, bp->b_flags);
1c79356b
A
2691
2692 /* Clean it */
2693 if (bcleanbuf(bp)) {
91447636
A
2694 /*
2695 * moved to the laundry thread, buffer not ready
2696 */
1c79356b
A
2697 *queue = req;
2698 goto start;
2699 }
1c79356b
A
2700 return (bp);
2701}
9bccf70c 2702
1c79356b
A
2703
2704/*
2705 * Clean a buffer.
2706 * Returns 0 is buffer is ready to use,
91447636 2707 * Returns 1 if issued a buf_bawrite() to indicate
1c79356b 2708 * that the buffer is not ready.
91447636
A
2709 *
2710 * buf_mtxp is held upon entry
2711 * returns with buf_mtxp locked
1c79356b 2712 */
9bccf70c 2713static int
91447636 2714bcleanbuf(buf_t bp)
1c79356b 2715{
21362eb3
A
2716 ucred_t cred;
2717
2718
1c79356b 2719 /* Remove from the queue */
91447636 2720 bremfree_locked(bp);
1c79356b
A
2721
2722 /* Buffer is no longer on free lists. */
91447636
A
2723 SET(bp->b_lflags, BL_BUSY);
2724#ifdef JOE_DEBUG
2725 bp->b_owner = current_thread();
2726 bp->b_tag = 2;
2727#endif
765c9de3
A
2728 /*
2729 * If buffer was a delayed write, start the IO by queuing
2730 * it on the LAUNDRY queue, and return 1
2731 */
1c79356b 2732 if (ISSET(bp->b_flags, B_DELWRI)) {
765c9de3
A
2733 binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
2734 blaundrycnt++;
91447636
A
2735
2736 lck_mtx_unlock(buf_mtxp);
2737
765c9de3 2738 wakeup(&blaundrycnt);
9bccf70c
A
2739 /* and give it a chance to run */
2740 (void)thread_block(THREAD_CONTINUE_NULL);
91447636
A
2741
2742 lck_mtx_lock(buf_mtxp);
1c79356b
A
2743 return (1);
2744 }
91447636
A
2745 bremhash(bp);
2746
2747 lck_mtx_unlock(buf_mtxp);
2748
2749 BLISTNONE(bp);
2750 /*
2751 * disassociate us from our vnode, if we had one...
2752 */
2753 if (bp->b_vp)
2754 brelvp(bp);
2755
2756 if (ISSET(bp->b_flags, B_META)) {
2757 vm_offset_t elem;
2758
2759 elem = (vm_offset_t)bp->b_datap;
2760 bp->b_datap = (uintptr_t)0xdeadbeef;
2761
2762 if (ISSET(bp->b_flags, B_ZALLOC)) {
2763 zone_t z;
2764
2765 z = getbufzone(bp->b_bufsize);
2766 zfree(z, (void *)elem);
2767 } else
2768 kmem_free(kernel_map, elem, bp->b_bufsize);
2769 }
2770
2771 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
2772
2773 /* clear out various other fields */
2774 bp->b_bufsize = 0;
2775 bp->b_datap = (uintptr_t)NULL;
2776 bp->b_upl = (void *)NULL;
2777 /*
2778 * preserve the state of whether this buffer
2779 * was allocated on the fly or not...
2780 * the only other flag that should be set at
2781 * this point is BL_BUSY...
2782 */
2783#ifdef JOE_DEBUG
2784 bp->b_owner = current_thread();
2785 bp->b_tag = 3;
2786#endif
2787 bp->b_lflags = BL_BUSY;
2788 bp->b_flags = (bp->b_flags & B_HDRALLOC);
2789 bp->b_dev = NODEV;
2790 bp->b_blkno = bp->b_lblkno = 0;
2791 bp->b_iodone = NULL;
2792 bp->b_error = 0;
2793 bp->b_resid = 0;
2794 bp->b_bcount = 0;
2795 bp->b_dirtyoff = bp->b_dirtyend = 0;
2796 bp->b_validoff = bp->b_validend = 0;
2797
2798 /* nuke any credentials we were holding */
21362eb3
A
2799 cred = bp->b_rcred;
2800 if (cred != NOCRED) {
2801 bp->b_rcred = NOCRED;
2802 kauth_cred_rele(cred);
91447636 2803 }
21362eb3
A
2804 cred = bp->b_wcred;
2805 if (cred != NOCRED) {
2806 bp->b_wcred = NOCRED;
2807 kauth_cred_rele(cred);
91447636
A
2808 }
2809 lck_mtx_lock(buf_mtxp);
2810
2811 return (0);
2812}
2813
2814
2815
2816errno_t
2817buf_invalblkno(vnode_t vp, daddr64_t lblkno, int flags)
2818{
2819 buf_t bp;
2820 errno_t error;
2821
2822 lck_mtx_lock(buf_mtxp);
2823relook:
2824 if ((bp = incore_locked(vp, lblkno)) == (struct buf *)0) {
2825 lck_mtx_unlock(buf_mtxp);
2826 return (0);
2827 }
2828 if (ISSET(bp->b_lflags, BL_BUSY)) {
2829 if ( !ISSET(flags, BUF_WAIT)) {
2830 lck_mtx_unlock(buf_mtxp);
2831 return (EBUSY);
2832 }
2833 SET(bp->b_lflags, BL_WANTED);
2834
2835 error = msleep((caddr_t)bp, buf_mtxp, (PRIBIO + 1), (char *)"buf_invalblkno", 0);
2836
2837 if (error)
2838 return (error);
2839 goto relook;
2840 }
2841 bremfree_locked(bp);
2842 SET(bp->b_lflags, BL_BUSY);
2843 SET(bp->b_flags, B_INVAL);
2844#ifdef JOE_DEBUG
2845 bp->b_owner = current_thread();
2846 bp->b_tag = 4;
2847#endif
2848 lck_mtx_unlock(buf_mtxp);
2849 buf_brelse(bp);
2850
2851 return (0);
2852}
2853
2854
2855void
2856buf_drop(buf_t bp)
2857{
2858 int need_wakeup = 0;
2859
2860 lck_mtx_lock(buf_mtxp);
2861
2862 if (ISSET(bp->b_lflags, BL_WANTED)) {
2863 /*
2864 * delay the actual wakeup until after we
2865 * clear BL_BUSY and we've dropped buf_mtxp
2866 */
2867 need_wakeup = 1;
2868 }
2869 /*
2870 * Unlock the buffer.
2871 */
2872 CLR(bp->b_lflags, (BL_BUSY | BL_WANTED));
1c79356b 2873
91447636 2874 lck_mtx_unlock(buf_mtxp);
1c79356b 2875
91447636
A
2876 if (need_wakeup) {
2877 /*
2878 * Wake up any proceeses waiting for _this_ buffer to become free.
2879 */
2880 wakeup(bp);
2881 }
2882}
1c79356b 2883
1c79356b 2884
91447636
A
2885errno_t
2886buf_acquire(buf_t bp, int flags, int slpflag, int slptimeo) {
2887 errno_t error;
1c79356b 2888
91447636 2889 lck_mtx_lock(buf_mtxp);
1c79356b 2890
91447636 2891 error = buf_acquire_locked(bp, flags, slpflag, slptimeo);
1c79356b 2892
91447636 2893 lck_mtx_unlock(buf_mtxp);
1c79356b 2894
91447636
A
2895 return (error);
2896}
1c79356b 2897
91447636
A
2898
2899static errno_t
2900buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo)
2901{
2902 errno_t error;
2903 struct timespec ts;
2904
2905 if (ISSET(bp->b_flags, B_LOCKED)) {
2906 if ((flags & BAC_SKIP_LOCKED))
2907 return (EDEADLK);
2908 } else {
2909 if ((flags & BAC_SKIP_NONLOCKED))
2910 return (EDEADLK);
1c79356b 2911 }
91447636
A
2912 if (ISSET(bp->b_lflags, BL_BUSY)) {
2913 /*
2914 * since the mutex_lock may block, the buffer
2915 * may become BUSY, so we need to
2916 * recheck for a NOWAIT request
2917 */
2918 if (flags & BAC_NOWAIT)
2919 return (EBUSY);
2920 SET(bp->b_lflags, BL_WANTED);
2921
2922 /* the hz value is 100; which leads to 10ms */
2923 ts.tv_sec = (slptimeo/100);
2924 ts.tv_nsec = (slptimeo % 100) * 10 * NSEC_PER_USEC * 1000;
2925 error = msleep((caddr_t)bp, buf_mtxp, slpflag | (PRIBIO + 1), (char *)"buf_acquire", &ts);
2926
2927 if (error)
2928 return (error);
2929 return (EAGAIN);
1c79356b 2930 }
91447636
A
2931 if (flags & BAC_REMOVE)
2932 bremfree_locked(bp);
2933 SET(bp->b_lflags, BL_BUSY);
2934#ifdef JOE_DEBUG
2935 bp->b_owner = current_thread();
2936 bp->b_tag = 5;
2937#endif
1c79356b
A
2938 return (0);
2939}
2940
2941
2942/*
2943 * Wait for operations on the buffer to complete.
2944 * When they do, extract and return the I/O's error value.
2945 */
91447636
A
2946errno_t
2947buf_biowait(buf_t bp)
1c79356b 2948{
91447636 2949 lck_mtx_lock(buf_mtxp);
1c79356b 2950
1c79356b 2951 while (!ISSET(bp->b_flags, B_DONE))
91447636
A
2952 (void) msleep(bp, buf_mtxp, (PRIBIO+1), (char *)"buf_biowait", 0);
2953
2954 lck_mtx_unlock(buf_mtxp);
1c79356b
A
2955
2956 /* check for interruption of I/O (e.g. via NFS), then errors. */
2957 if (ISSET(bp->b_flags, B_EINTR)) {
2958 CLR(bp->b_flags, B_EINTR);
2959 return (EINTR);
2960 } else if (ISSET(bp->b_flags, B_ERROR))
2961 return (bp->b_error ? bp->b_error : EIO);
2962 else
2963 return (0);
2964}
2965
2966/*
2967 * Mark I/O complete on a buffer.
2968 *
2969 * If a callback has been requested, e.g. the pageout
2970 * daemon, do so. Otherwise, awaken waiting processes.
2971 *
2972 * [ Leffler, et al., says on p.247:
2973 * "This routine wakes up the blocked process, frees the buffer
2974 * for an asynchronous write, or, for a request by the pagedaemon
2975 * process, invokes a procedure specified in the buffer structure" ]
2976 *
2977 * In real life, the pagedaemon (or other system processes) wants
91447636 2978 * to do async stuff to, and doesn't want the buffer buf_brelse()'d.
1c79356b
A
2979 * (for swap pager, that puts swap buffers on the free lists (!!!),
2980 * for the vn device, that puts malloc'd buffers on the free lists!)
2981 */
91447636
A
2982extern struct timeval priority_IO_timestamp_for_root;
2983extern int hard_throttle_on_root;
2984
1c79356b 2985void
91447636 2986buf_biodone(buf_t bp)
1c79356b 2987{
1c79356b 2988 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_START,
91447636 2989 (int)bp, (int)bp->b_datap, bp->b_flags, 0, 0);
1c79356b
A
2990
2991 if (ISSET(bp->b_flags, B_DONE))
2992 panic("biodone already");
1c79356b 2993
9bccf70c 2994 if (kdebug_enable) {
91447636 2995 int code = DKIO_DONE;
9bccf70c 2996
91447636
A
2997 if (bp->b_flags & B_READ)
2998 code |= DKIO_READ;
2999 if (bp->b_flags & B_ASYNC)
3000 code |= DKIO_ASYNC;
9bccf70c 3001
91447636
A
3002 if (bp->b_flags & B_META)
3003 code |= DKIO_META;
3004 else if (bp->b_flags & B_PAGEIO)
3005 code |= DKIO_PAGING;
9bccf70c 3006
91447636
A
3007 KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE,
3008 (unsigned int)bp, (unsigned int)bp->b_vp,
3009 bp->b_resid, bp->b_error, 0);
9bccf70c 3010 }
91447636
A
3011 if ((bp->b_vp != NULLVP) &&
3012 ((bp->b_flags & (B_PAGEIO | B_READ)) == (B_PAGEIO | B_READ)) &&
3013 (bp->b_vp->v_mount->mnt_kern_flag & MNTK_ROOTDEV)) {
3014 microuptime(&priority_IO_timestamp_for_root);
55e303ae
A
3015 hard_throttle_on_root = 0;
3016 }
91447636
A
3017 /*
3018 * I/O was done, so don't believe
3019 * the DIRTY state from VM anymore
3020 */
3021 CLR(bp->b_flags, B_WASDIRTY);
b4c24cb9 3022
91447636
A
3023 if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW))
3024 /*
3025 * wake up any writer's blocked
3026 * on throttle or waiting for I/O
3027 * to drain
3028 */
3029 vnode_writedone(bp->b_vp);
3030
3031 if (ISSET(bp->b_flags, (B_CALL | B_FILTER))) { /* if necessary, call out */
3032 void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
3033 void *arg = (void *)bp->b_transaction;
3034 int callout = ISSET(bp->b_flags, B_CALL);
3035
3036 CLR(bp->b_flags, (B_CALL | B_FILTER)); /* filters and callouts are one-shot */
b4c24cb9 3037 bp->b_iodone = NULL;
91447636 3038 bp->b_transaction = NULL;
b4c24cb9
A
3039
3040 if (iodone_func == NULL) {
3041 panic("biodone: bp @ 0x%x has NULL b_iodone!\n", bp);
3042 } else {
91447636
A
3043 if (callout)
3044 SET(bp->b_flags, B_DONE); /* note that it's done */
3045 (*iodone_func)(bp, arg);
b4c24cb9 3046 }
91447636
A
3047 if (callout)
3048 /*
3049 * assumes that the call back function takes
3050 * ownership of the bp and deals with releasing it if necessary
3051 */
3052 goto biodone_done;
3053 /*
3054 * in this case the call back function is acting
3055 * strictly as a filter... it does not take
3056 * ownership of the bp and is expecting us
3057 * to finish cleaning up... this is currently used
3058 * by the HFS journaling code
3059 */
1c79356b 3060 }
91447636
A
3061 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release it */
3062 SET(bp->b_flags, B_DONE); /* note that it's done */
1c79356b 3063
91447636
A
3064 buf_brelse(bp);
3065 } else { /* or just wakeup the buffer */
3066 /*
3067 * by taking the mutex, we serialize
3068 * the buf owner calling buf_biowait so that we'll
3069 * only see him in one of 2 states...
3070 * state 1: B_DONE wasn't set and he's
3071 * blocked in msleep
3072 * state 2: he's blocked trying to take the
3073 * mutex before looking at B_DONE
3074 * BL_WANTED is cleared in case anyone else
3075 * is blocked waiting for the buffer... note
3076 * that we haven't cleared B_BUSY yet, so if
3077 * they do get to run, their going to re-set
3078 * BL_WANTED and go back to sleep
3079 */
3080 lck_mtx_lock(buf_mtxp);
1c79356b 3081
91447636
A
3082 CLR(bp->b_lflags, BL_WANTED);
3083 SET(bp->b_flags, B_DONE); /* note that it's done */
3084
3085 lck_mtx_unlock(buf_mtxp);
3086
3087 wakeup(bp);
3088 }
3089biodone_done:
3090 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_END,
3091 (int)bp, (int)bp->b_datap, bp->b_flags, 0, 0);
1c79356b
A
3092}
3093
3094/*
3095 * Return a count of buffers on the "locked" queue.
3096 */
3097int
91447636 3098count_lock_queue(void)
1c79356b 3099{
91447636
A
3100 buf_t bp;
3101 int n = 0;
3102
3103 lck_mtx_lock(buf_mtxp);
1c79356b
A
3104
3105 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
3106 bp = bp->b_freelist.tqe_next)
3107 n++;
91447636
A
3108 lck_mtx_unlock(buf_mtxp);
3109
1c79356b
A
3110 return (n);
3111}
3112
3113/*
3114 * Return a count of 'busy' buffers. Used at the time of shutdown.
3115 */
3116int
91447636 3117count_busy_buffers(void)
1c79356b 3118{
91447636
A
3119 buf_t bp;
3120 int nbusy = 0;
1c79356b 3121
21362eb3 3122 for (bp = &buf[nbuf]; --bp >= buf; )
91447636 3123 if (!ISSET(bp->b_flags, B_INVAL) && ISSET(bp->b_lflags, BL_BUSY))
1c79356b
A
3124 nbusy++;
3125 return (nbusy);
3126}
3127
9bccf70c 3128#if DIAGNOSTIC
1c79356b
A
3129/*
3130 * Print out statistics on the current allocation of the buffer pool.
3131 * Can be enabled to print out on every ``sync'' by setting "syncprt"
3132 * in vfs_syscalls.c using sysctl.
3133 */
3134void
3135vfs_bufstats()
3136{
91447636
A
3137 int i, j, count;
3138 register struct buf *bp;
3139 register struct bqueues *dp;
3140 int counts[MAXBSIZE/CLBYTES+1];
3141 static char *bname[BQUEUES] =
3142 { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" };
3143
3144 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
3145 count = 0;
3146 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
3147 counts[j] = 0;
3148
3149 lck_mtx_lock(buf_mtxp);
3150
3151 for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
3152 counts[bp->b_bufsize/CLBYTES]++;
3153 count++;
3154 }
3155 lck_mtx_unlock(buf_mtxp);
3156
3157 printf("%s: total-%d", bname[i], count);
3158 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
3159 if (counts[j] != 0)
3160 printf(", %d-%d", j * CLBYTES, counts[j]);
3161 printf("\n");
3162 }
3163}
3164#endif /* DIAGNOSTIC */
3165
3166#define NRESERVEDIOBUFS 64
3167
3168
3169buf_t
3170alloc_io_buf(vnode_t vp, int priv)
3171{
3172 buf_t bp;
3173
3174 lck_mtx_lock(iobuffer_mtxp);
3175
3176 while (((niobuf - NRESERVEDIOBUFS < bufstats.bufs_iobufinuse) && !priv) ||
3177 (bp = iobufqueue.tqh_first) == NULL) {
3178 bufstats.bufs_iobufsleeps++;
3179
3180 need_iobuffer = 1;
3181 (void) msleep(&need_iobuffer, iobuffer_mtxp, (PRIBIO+1), (const char *)"alloc_io_buf", 0);
3182 }
3183 TAILQ_REMOVE(&iobufqueue, bp, b_freelist);
3184
3185 bufstats.bufs_iobufinuse++;
3186 if (bufstats.bufs_iobufinuse > bufstats.bufs_iobufmax)
3187 bufstats.bufs_iobufmax = bufstats.bufs_iobufinuse;
3188
3189 lck_mtx_unlock(iobuffer_mtxp);
3190
3191 /*
3192 * initialize various fields
3193 * we don't need to hold the mutex since the buffer
3194 * is now private... the vp should have a reference
3195 * on it and is not protected by this mutex in any event
3196 */
3197 bp->b_timestamp = 0;
3198 bp->b_proc = NULL;
3199
3200 bp->b_datap = 0;
3201 bp->b_flags = 0;
3202 bp->b_lflags = BL_BUSY | BL_IOBUF;
3203 bp->b_blkno = bp->b_lblkno = 0;
3204#ifdef JOE_DEBUG
3205 bp->b_owner = current_thread();
3206 bp->b_tag = 6;
3207#endif
3208 bp->b_iodone = NULL;
3209 bp->b_error = 0;
3210 bp->b_resid = 0;
3211 bp->b_bcount = 0;
3212 bp->b_bufsize = 0;
3213 bp->b_upl = NULL;
3214 bp->b_vp = vp;
3215
3216 if (vp && (vp->v_type == VBLK || vp->v_type == VCHR))
3217 bp->b_dev = vp->v_rdev;
3218 else
3219 bp->b_dev = NODEV;
3220
3221 return (bp);
3222}
3223
3224
3225void
3226free_io_buf(buf_t bp)
3227{
3228 int need_wakeup = 0;
3229
3230 /*
3231 * put buffer back on the head of the iobufqueue
3232 */
3233 bp->b_vp = NULL;
3234 bp->b_flags = B_INVAL;
3235
3236 lck_mtx_lock(iobuffer_mtxp);
3237
3238 binsheadfree(bp, &iobufqueue, -1);
3239
3240 if (need_iobuffer) {
3241 /*
3242 * Wake up any processes waiting because they need an io buffer
3243 *
3244 * do the wakeup after we drop the mutex... it's possible that the
3245 * wakeup will be superfluous if need_iobuffer gets set again and
3246 * another thread runs this path, but it's highly unlikely, doesn't
3247 * hurt, and it means we don't hold up I/O progress if the wakeup blocks
3248 * trying to grab a task related lock...
3249 */
3250 need_iobuffer = 0;
3251 need_wakeup = 1;
3252 }
3253 bufstats.bufs_iobufinuse--;
3254
3255 lck_mtx_unlock(iobuffer_mtxp);
3256
3257 if (need_wakeup)
3258 wakeup(&need_iobuffer);
3259}
3260
3261
3262
3263/*
3264 * If getnewbuf() calls bcleanbuf() on the same thread
3265 * there is a potential for stack overrun and deadlocks.
3266 * So we always handoff the work to a worker thread for completion
3267 */
3268#include <mach/mach_types.h>
3269#include <mach/memory_object_types.h>
3270#include <kern/sched_prim.h>
3271
3272
3273static void
3274bcleanbuf_thread_init(void)
3275{
3276 /* create worker thread */
3277 kernel_thread(kernel_task, bcleanbuf_thread);
3278}
3279
3280static void
3281bcleanbuf_thread(void)
3282{
3283 struct buf *bp;
3284 int error = 0;
3285 int loopcnt = 0;
3286
3287 for (;;) {
3288 lck_mtx_lock(buf_mtxp);
3289
3290 while (blaundrycnt == 0)
3291 (void)msleep((void *)&blaundrycnt, buf_mtxp, PRIBIO, "blaundry", 0);
3292
3293 bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY]);
3294 /*
3295 * Remove from the queue
3296 */
3297 bremfree_locked(bp);
3298 blaundrycnt--;
3299
3300 lck_mtx_unlock(buf_mtxp);
3301 /*
3302 * do the IO
3303 */
3304 error = bawrite_internal(bp, 0);
3305
3306 if (error) {
3307 lck_mtx_lock(buf_mtxp);
3308
3309 binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
3310 blaundrycnt++;
3311
3312 lck_mtx_unlock(buf_mtxp);
3313
3314 if (loopcnt > 10) {
3315 (void)tsleep((void *)&blaundrycnt, PRIBIO, "blaundry", 1);
3316 loopcnt = 0;
3317 } else {
3318 (void)thread_block(THREAD_CONTINUE_NULL);
3319 loopcnt++;
3320 }
3321 }
3322 }
3323}
3324
3325
3326static int
3327brecover_data(buf_t bp)
3328{
3329 int upl_offset;
3330 upl_t upl;
3331 upl_page_info_t *pl;
3332 kern_return_t kret;
3333 vnode_t vp = bp->b_vp;
3334 int upl_flags;
3335
3336
3337 if ( !UBCINFOEXISTS(vp) || bp->b_bufsize == 0)
3338 goto dump_buffer;
3339
3340 upl_flags = UPL_PRECIOUS;
3341 if (! (buf_flags(bp) & B_READ)) {
3342 /*
3343 * "write" operation: let the UPL subsystem know
3344 * that we intend to modify the buffer cache pages we're
3345 * gathering.
3346 */
3347 upl_flags |= UPL_WILL_MODIFY;
3348 }
3349
3350 kret = ubc_create_upl(vp,
3351 ubc_blktooff(vp, bp->b_lblkno),
3352 bp->b_bufsize,
3353 &upl,
3354 &pl,
3355 upl_flags);
3356 if (kret != KERN_SUCCESS)
3357 panic("Failed to create UPL");
3358
3359 for (upl_offset = 0; upl_offset < bp->b_bufsize; upl_offset += PAGE_SIZE) {
3360
3361 if (!upl_valid_page(pl, upl_offset / PAGE_SIZE) || !upl_dirty_page(pl, upl_offset / PAGE_SIZE)) {
3362 ubc_upl_abort(upl, 0);
3363 goto dump_buffer;
3364 }
3365 }
3366 bp->b_upl = upl;
3367
3368 kret = ubc_upl_map(upl, (vm_address_t *)&(bp->b_datap));
3369
3370 if (kret != KERN_SUCCESS)
3371 panic("getblk: ubc_upl_map() failed with (%d)", kret);
3372 return (1);
3373
3374dump_buffer:
3375 bp->b_bufsize = 0;
3376 SET(bp->b_flags, B_INVAL);
3377 buf_brelse(bp);
3378
3379 return(0);
3380}
3381
3382
3383
3384/*
3385 * disabled for now
3386 */
3387
3388#if FLUSH_QUEUES
3389
3390#define NFLUSH 32
3391
3392static int
3393bp_cmp(void *a, void *b)
3394{
3395 buf_t *bp_a = *(buf_t **)a,
3396 *bp_b = *(buf_t **)b;
3397 daddr64_t res;
1c79356b 3398
91447636
A
3399 // don't have to worry about negative block
3400 // numbers so this is ok to do.
3401 //
3402 res = (bp_a->b_blkno - bp_b->b_blkno);
3403
3404 return (int)res;
1c79356b 3405}
1c79356b
A
3406
3407
91447636
A
3408int
3409bflushq(int whichq, mount_t mp)
1c79356b 3410{
91447636
A
3411 buf_t bp, next;
3412 int i, buf_count;
3413 int total_writes = 0;
3414 static buf_t flush_table[NFLUSH];
1c79356b 3415
91447636
A
3416 if (whichq < 0 || whichq >= BQUEUES) {
3417 return (0);
0b4e3aa0
A
3418 }
3419
91447636
A
3420 restart:
3421 lck_mtx_lock(buf_mtxp);
0b4e3aa0 3422
91447636 3423 bp = TAILQ_FIRST(&bufqueues[whichq]);
1c79356b 3424
91447636
A
3425 for (buf_count = 0; bp; bp = next) {
3426 next = bp->b_freelist.tqe_next;
3427
3428 if (bp->b_vp == NULL || bp->b_vp->v_mount != mp) {
3429 continue;
3430 }
b4c24cb9 3431
91447636 3432 if (ISSET(bp->b_flags, B_DELWRI) && !ISSET(bp->b_lflags, BL_BUSY)) {
1c79356b 3433
91447636
A
3434 bremfree_locked(bp);
3435#ifdef JOE_DEBUG
3436 bp->b_owner = current_thread();
3437 bp->b_tag = 7;
3438#endif
3439 SET(bp->b_lflags, BL_BUSY);
3440 flush_table[buf_count] = bp;
3441 buf_count++;
3442 total_writes++;
1c79356b 3443
91447636
A
3444 if (buf_count >= NFLUSH) {
3445 lck_mtx_unlock(buf_mtxp);
1c79356b 3446
91447636 3447 qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
1c79356b 3448
91447636
A
3449 for (i = 0; i < buf_count; i++) {
3450 buf_bawrite(flush_table[i]);
3451 }
3452 goto restart;
3453 }
3454 }
3455 }
3456 lck_mtx_unlock(buf_mtxp);
1c79356b 3457
91447636
A
3458 if (buf_count > 0) {
3459 qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
1c79356b 3460
91447636
A
3461 for (i = 0; i < buf_count; i++) {
3462 buf_bawrite(flush_table[i]);
3463 }
1c79356b 3464 }
91447636
A
3465
3466 return (total_writes);
1c79356b 3467}
91447636 3468#endif
1c79356b 3469
91447636
A
3470
3471#if BALANCE_QUEUES
1c79356b
A
3472
3473/* XXX move this to a separate file */
91447636
A
3474
3475/*
3476 * NOTE: THIS CODE HAS NOT BEEN UPDATED
3477 * WITH RESPECT TO THE NEW LOCKING MODEL
3478 */
3479
3480
1c79356b
A
3481/*
3482 * Dynamic Scaling of the Buffer Queues
3483 */
3484
3485typedef long long blsize_t;
3486
55e303ae 3487blsize_t MAXNBUF; /* initialize to (sane_size / PAGE_SIZE) */
1c79356b
A
3488/* Global tunable limits */
3489blsize_t nbufh; /* number of buffer headers */
3490blsize_t nbuflow; /* minimum number of buffer headers required */
3491blsize_t nbufhigh; /* maximum number of buffer headers allowed */
3492blsize_t nbuftarget; /* preferred number of buffer headers */
3493
3494/*
3495 * assertions:
3496 *
3497 * 1. 0 < nbuflow <= nbufh <= nbufhigh
3498 * 2. nbufhigh <= MAXNBUF
3499 * 3. 0 < nbuflow <= nbuftarget <= nbufhigh
3500 * 4. nbufh can not be set by sysctl().
3501 */
3502
3503/* Per queue tunable limits */
3504
3505struct bufqlim {
3506 blsize_t bl_nlow; /* minimum number of buffer headers required */
3507 blsize_t bl_num; /* number of buffer headers on the queue */
3508 blsize_t bl_nlhigh; /* maximum number of buffer headers allowed */
3509 blsize_t bl_target; /* preferred number of buffer headers */
3510 long bl_stale; /* Seconds after which a buffer is considered stale */
3511} bufqlim[BQUEUES];
3512
3513/*
3514 * assertions:
3515 *
3516 * 1. 0 <= bl_nlow <= bl_num <= bl_nlhigh
3517 * 2. bl_nlhigh <= MAXNBUF
3518 * 3. bufqlim[BQ_META].bl_nlow != 0
3519 * 4. bufqlim[BQ_META].bl_nlow > (number of possible concurrent
3520 * file system IO operations)
3521 * 5. bl_num can not be set by sysctl().
3522 * 6. bl_nhigh <= nbufhigh
3523 */
3524
3525/*
3526 * Rationale:
3527 * ----------
3528 * Defining it blsize_t as long permits 2^31 buffer headers per queue.
3529 * Which can describe (2^31 * PAGE_SIZE) memory per queue.
3530 *
3531 * These limits are exported to by means of sysctl().
3532 * It was decided to define blsize_t as a 64 bit quantity.
3533 * This will make sure that we will not be required to change it
3534 * as long as we do not exceed 64 bit address space for the kernel.
3535 *
3536 * low and high numbers parameters initialized at compile time
3537 * and boot arguments can be used to override them. sysctl()
3538 * would not change the value. sysctl() can get all the values
3539 * but can set only target. num is the current level.
3540 *
3541 * Advantages of having a "bufqscan" thread doing the balancing are,
3542 * Keep enough bufs on BQ_EMPTY.
3543 * getnewbuf() by default will always select a buffer from the BQ_EMPTY.
3544 * getnewbuf() perfoms best if a buffer was found there.
3545 * Also this minimizes the possibility of starting IO
3546 * from getnewbuf(). That's a performance win, too.
3547 *
3548 * Localize complex logic [balancing as well as time aging]
3549 * to balancebufq().
3550 *
3551 * Simplify getnewbuf() logic by elimination of time aging code.
3552 */
3553
3554/*
3555 * Algorithm:
3556 * -----------
3557 * The goal of the dynamic scaling of the buffer queues to to keep
3558 * the size of the LRU close to bl_target. Buffers on a queue would
3559 * be time aged.
3560 *
3561 * There would be a thread which will be responsible for "balancing"
3562 * the buffer cache queues.
3563 *
3564 * The scan order would be: AGE, LRU, META, EMPTY.
3565 */
3566
3567long bufqscanwait = 0;
3568
9bccf70c
A
3569static void bufqscan_thread();
3570static int balancebufq(int q);
3571static int btrimempty(int n);
3572static __inline__ int initbufqscan(void);
3573static __inline__ int nextbufq(int q);
3574static void buqlimprt(int all);
1c79356b 3575
91447636
A
3576
3577static __inline__ void
3578bufqinc(int q)
3579{
3580 if ((q < 0) || (q >= BQUEUES))
3581 return;
3582
3583 bufqlim[q].bl_num++;
3584 return;
3585}
3586
3587static __inline__ void
3588bufqdec(int q)
3589{
3590 if ((q < 0) || (q >= BQUEUES))
3591 return;
3592
3593 bufqlim[q].bl_num--;
3594 return;
3595}
3596
9bccf70c 3597static void
1c79356b
A
3598bufq_balance_thread_init()
3599{
3600
3601 if (bufqscanwait++ == 0) {
1c79356b
A
3602
3603 /* Initalize globals */
55e303ae 3604 MAXNBUF = (sane_size / PAGE_SIZE);
1c79356b
A
3605 nbufh = nbuf;
3606 nbuflow = min(nbufh, 100);
3607 nbufhigh = min(MAXNBUF, max(nbufh, 2048));
55e303ae 3608 nbuftarget = (sane_size >> 5) / PAGE_SIZE;
1c79356b
A
3609 nbuftarget = max(nbuflow, nbuftarget);
3610 nbuftarget = min(nbufhigh, nbuftarget);
3611
3612 /*
3613 * Initialize the bufqlim
3614 */
3615
3616 /* LOCKED queue */
3617 bufqlim[BQ_LOCKED].bl_nlow = 0;
3618 bufqlim[BQ_LOCKED].bl_nlhigh = 32;
3619 bufqlim[BQ_LOCKED].bl_target = 0;
3620 bufqlim[BQ_LOCKED].bl_stale = 30;
3621
3622 /* LRU queue */
3623 bufqlim[BQ_LRU].bl_nlow = 0;
3624 bufqlim[BQ_LRU].bl_nlhigh = nbufhigh/4;
3625 bufqlim[BQ_LRU].bl_target = nbuftarget/4;
3626 bufqlim[BQ_LRU].bl_stale = LRU_IS_STALE;
3627
3628 /* AGE queue */
3629 bufqlim[BQ_AGE].bl_nlow = 0;
3630 bufqlim[BQ_AGE].bl_nlhigh = nbufhigh/4;
3631 bufqlim[BQ_AGE].bl_target = nbuftarget/4;
3632 bufqlim[BQ_AGE].bl_stale = AGE_IS_STALE;
3633
3634 /* EMPTY queue */
3635 bufqlim[BQ_EMPTY].bl_nlow = 0;
3636 bufqlim[BQ_EMPTY].bl_nlhigh = nbufhigh/4;
3637 bufqlim[BQ_EMPTY].bl_target = nbuftarget/4;
3638 bufqlim[BQ_EMPTY].bl_stale = 600000;
3639
3640 /* META queue */
3641 bufqlim[BQ_META].bl_nlow = 0;
3642 bufqlim[BQ_META].bl_nlhigh = nbufhigh/4;
3643 bufqlim[BQ_META].bl_target = nbuftarget/4;
3644 bufqlim[BQ_META].bl_stale = META_IS_STALE;
3645
765c9de3
A
3646 /* LAUNDRY queue */
3647 bufqlim[BQ_LOCKED].bl_nlow = 0;
3648 bufqlim[BQ_LOCKED].bl_nlhigh = 32;
3649 bufqlim[BQ_LOCKED].bl_target = 0;
3650 bufqlim[BQ_LOCKED].bl_stale = 30;
3651
1c79356b
A
3652 buqlimprt(1);
3653 }
3654
3655 /* create worker thread */
3656 kernel_thread(kernel_task, bufqscan_thread);
3657}
3658
3659/* The workloop for the buffer balancing thread */
9bccf70c 3660static void
1c79356b
A
3661bufqscan_thread()
3662{
1c79356b
A
3663 int moretodo = 0;
3664
1c79356b
A
3665 for(;;) {
3666 do {
3667 int q; /* buffer queue to process */
3668
9bccf70c
A
3669 q = initbufqscan();
3670 for (; q; ) {
1c79356b
A
3671 moretodo |= balancebufq(q);
3672 q = nextbufq(q);
3673 }
3674 } while (moretodo);
3675
9bccf70c 3676#if DIAGNOSTIC
1c79356b
A
3677 vfs_bufstats();
3678 buqlimprt(0);
3679#endif
3680 (void)tsleep((void *)&bufqscanwait, PRIBIO, "bufqscanwait", 60 * hz);
3681 moretodo = 0;
3682 }
1c79356b
A
3683}
3684
3685/* Seed for the buffer queue balancing */
9bccf70c 3686static __inline__ int
1c79356b
A
3687initbufqscan()
3688{
3689 /* Start with AGE queue */
3690 return (BQ_AGE);
3691}
3692
3693/* Pick next buffer queue to balance */
9bccf70c 3694static __inline__ int
1c79356b
A
3695nextbufq(int q)
3696{
3697 int order[] = { BQ_AGE, BQ_LRU, BQ_META, BQ_EMPTY, 0 };
3698
3699 q++;
3700 q %= sizeof(order);
3701 return (order[q]);
3702}
3703
3704/* function to balance the buffer queues */
9bccf70c 3705static int
1c79356b
A
3706balancebufq(int q)
3707{
3708 int moretodo = 0;
3709 int s = splbio();
91447636 3710 int n, t;
1c79356b
A
3711
3712 /* reject invalid q */
3713 if ((q < 0) || (q >= BQUEUES))
3714 goto out;
3715
765c9de3
A
3716 /* LOCKED or LAUNDRY queue MUST not be balanced */
3717 if ((q == BQ_LOCKED) || (q == BQ_LAUNDRY))
1c79356b
A
3718 goto out;
3719
3720 n = (bufqlim[q].bl_num - bufqlim[q].bl_target);
3721
3722 /* If queue has less than target nothing more to do */
3723 if (n < 0)
3724 goto out;
3725
3726 if ( n > 8 ) {
3727 /* Balance only a small amount (12.5%) at a time */
3728 n >>= 3;
3729 }
3730
3731 /* EMPTY queue needs special handling */
3732 if (q == BQ_EMPTY) {
3733 moretodo |= btrimempty(n);
3734 goto out;
3735 }
91447636
A
3736
3737 t = buf_timestamp():
1c79356b
A
3738
3739 for (; n > 0; n--) {
3740 struct buf *bp = bufqueues[q].tqh_first;
3741 if (!bp)
3742 break;
3743
3744 /* check if it's stale */
91447636 3745 if ((t - bp->b_timestamp) > bufqlim[q].bl_stale) {
1c79356b 3746 if (bcleanbuf(bp)) {
91447636 3747 /* buf_bawrite() issued, bp not ready */
1c79356b
A
3748 moretodo = 1;
3749 } else {
3750 /* release the cleaned buffer to BQ_EMPTY */
3751 SET(bp->b_flags, B_INVAL);
91447636 3752 buf_brelse(bp);
1c79356b
A
3753 }
3754 } else
3755 break;
3756 }
3757
3758out:
3759 splx(s);
3760 return (moretodo);
3761}
3762
9bccf70c 3763static int
1c79356b
A
3764btrimempty(int n)
3765{
3766 /*
3767 * When struct buf are allocated dynamically, this would
3768 * reclaim upto 'n' struct buf from the empty queue.
3769 */
3770
3771 return (0);
3772}
3773
9bccf70c 3774static void
1c79356b
A
3775buqlimprt(int all)
3776{
3777 int i;
765c9de3
A
3778 static char *bname[BQUEUES] =
3779 { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" };
1c79356b
A
3780
3781 if (all)
3782 for (i = 0; i < BQUEUES; i++) {
3783 printf("%s : ", bname[i]);
9bccf70c
A
3784 printf("min = %ld, ", (long)bufqlim[i].bl_nlow);
3785 printf("cur = %ld, ", (long)bufqlim[i].bl_num);
3786 printf("max = %ld, ", (long)bufqlim[i].bl_nlhigh);
3787 printf("target = %ld, ", (long)bufqlim[i].bl_target);
3788 printf("stale after %ld seconds\n", bufqlim[i].bl_stale);
1c79356b
A
3789 }
3790 else
3791 for (i = 0; i < BQUEUES; i++) {
3792 printf("%s : ", bname[i]);
9bccf70c 3793 printf("cur = %ld, ", (long)bufqlim[i].bl_num);
1c79356b
A
3794 }
3795}
765c9de3 3796
91447636 3797#endif
b4c24cb9 3798
b4c24cb9 3799