]> git.saurik.com Git - apple/xnu.git/blame - bsd/vfs/vfs_bio.c
xnu-1699.22.81.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_bio.c
CommitLineData
1c79356b 1/*
6d2010ae 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*-
30 * Copyright (c) 1994 Christopher G. Demetriou
31 * Copyright (c) 1982, 1986, 1989, 1993
32 * The Regents of the University of California. All rights reserved.
33 * (c) UNIX System Laboratories, Inc.
34 * All or some portions of this file are derived from material licensed
35 * to the University of California by American Telephone and Telegraph
36 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
37 * the permission of UNIX System Laboratories, Inc.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by the University of
50 * California, Berkeley and its contributors.
51 * 4. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
66 *
1c79356b
A
67 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
68 */
69
70/*
71 * Some references:
72 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
73 * Leffler, et al.: The Design and Implementation of the 4.3BSD
74 * UNIX Operating System (Addison Welley, 1989)
75 */
1c79356b
A
76
77#include <sys/param.h>
78#include <sys/systm.h>
91447636
A
79#include <sys/proc_internal.h>
80#include <sys/buf_internal.h>
81#include <sys/vnode_internal.h>
82#include <sys/mount_internal.h>
1c79356b
A
83#include <sys/trace.h>
84#include <sys/malloc.h>
85#include <sys/resourcevar.h>
86#include <miscfs/specfs/specdev.h>
87#include <sys/ubc.h>
91447636 88#include <sys/kauth.h>
1c79356b
A
89#if DIAGNOSTIC
90#include <kern/assert.h>
91#endif /* DIAGNOSTIC */
92#include <kern/task.h>
93#include <kern/zalloc.h>
91447636
A
94#include <kern/lock.h>
95
2d21ac55
A
96#include <sys/fslog.h> /* fslog_io_error() */
97
98#include <mach/mach_types.h>
99#include <mach/memory_object_types.h>
100#include <kern/sched_prim.h> /* thread_block() */
101
91447636 102#include <vm/vm_kern.h>
b0d623f7 103#include <vm/vm_pageout.h>
1c79356b
A
104
105#include <sys/kdebug.h>
2d21ac55
A
106
107#include <libkern/OSAtomic.h>
b0d623f7 108#include <libkern/OSDebug.h>
2d21ac55
A
109#include <sys/ubc_internal.h>
110
111#include <sys/sdt.h>
1c79356b 112
6d2010ae 113
91447636 114#if BALANCE_QUEUES
9bccf70c
A
115static __inline__ void bufqinc(int q);
116static __inline__ void bufqdec(int q);
91447636 117#endif
1c79356b 118
6d2010ae 119int bcleanbuf(buf_t bp, boolean_t discard);
91447636
A
120static int brecover_data(buf_t bp);
121static boolean_t incore(vnode_t vp, daddr64_t blkno);
91447636
A
122/* timeout is in msecs */
123static buf_t getnewbuf(int slpflag, int slptimeo, int *queue);
124static void bremfree_locked(buf_t bp);
125static void buf_reassign(buf_t bp, vnode_t newvp);
126static errno_t buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo);
127static int buf_iterprepare(vnode_t vp, struct buflists *, int flags);
128static void buf_itercomplete(vnode_t vp, struct buflists *, int flags);
6d2010ae
A
129static boolean_t buffer_cache_gc(int);
130static buf_t buf_brelse_shadow(buf_t bp);
131static void buf_free_meta_store(buf_t bp);
132
133static buf_t buf_create_shadow_internal(buf_t bp, boolean_t force_copy,
134 uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv);
135
1c79356b 136
91447636 137__private_extern__ int bdwrite_internal(buf_t, int);
1c79356b 138
d52fe63f 139/* zone allocated buffer headers */
2d21ac55
A
140static void bufzoneinit(void) __attribute__((section("__TEXT, initcode")));
141static void bcleanbuf_thread_init(void) __attribute__((section("__TEXT, initcode")));
91447636
A
142static void bcleanbuf_thread(void);
143
144static zone_t buf_hdr_zone;
145static int buf_hdr_count;
d52fe63f 146
1c79356b
A
147
148/*
149 * Definitions for the buffer hash lists.
150 */
151#define BUFHASH(dvp, lbn) \
152 (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
153LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
154u_long bufhash;
155
2d21ac55
A
156static buf_t incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp);
157
1c79356b
A
158/* Definitions for the buffer stats. */
159struct bufstats bufstats;
160
d52fe63f 161/* Number of delayed write buffers */
2d21ac55 162long nbdwrite = 0;
91447636 163int blaundrycnt = 0;
2d21ac55 164static int boot_nbuf_headers = 0;
d52fe63f 165
6d2010ae 166static TAILQ_HEAD(delayqueue, buf) delaybufqueue;
1c79356b 167
91447636
A
168static TAILQ_HEAD(ioqueue, buf) iobufqueue;
169static TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
d52fe63f
A
170static int needbuffer;
171static int need_iobuffer;
1c79356b 172
91447636
A
173static lck_grp_t *buf_mtx_grp;
174static lck_attr_t *buf_mtx_attr;
175static lck_grp_attr_t *buf_mtx_grp_attr;
176static lck_mtx_t *iobuffer_mtxp;
177static lck_mtx_t *buf_mtxp;
178
b0d623f7
A
179static int buf_busycount;
180
91447636
A
181static __inline__ int
182buf_timestamp(void)
183{
184 struct timeval t;
185 microuptime(&t);
186 return (t.tv_sec);
187}
188
1c79356b
A
189/*
190 * Insq/Remq for the buffer free lists.
191 */
91447636 192#if BALANCE_QUEUES
1c79356b
A
193#define binsheadfree(bp, dp, whichq) do { \
194 TAILQ_INSERT_HEAD(dp, bp, b_freelist); \
195 bufqinc((whichq)); \
1c79356b
A
196 } while (0)
197
198#define binstailfree(bp, dp, whichq) do { \
199 TAILQ_INSERT_TAIL(dp, bp, b_freelist); \
200 bufqinc((whichq)); \
91447636
A
201 } while (0)
202#else
203#define binsheadfree(bp, dp, whichq) do { \
204 TAILQ_INSERT_HEAD(dp, bp, b_freelist); \
1c79356b
A
205 } while (0)
206
91447636
A
207#define binstailfree(bp, dp, whichq) do { \
208 TAILQ_INSERT_TAIL(dp, bp, b_freelist); \
91447636
A
209 } while (0)
210#endif
211
212
1c79356b
A
213#define BHASHENTCHECK(bp) \
214 if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \
2d21ac55 215 panic("%p: b_hash.le_prev is not deadbeef", (bp));
1c79356b
A
216
217#define BLISTNONE(bp) \
218 (bp)->b_hash.le_next = (struct buf *)0; \
219 (bp)->b_hash.le_prev = (struct buf **)0xdeadbeef;
220
9bccf70c
A
221/*
222 * Insq/Remq for the vnode usage lists.
223 */
224#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs)
225#define bufremvn(bp) { \
226 LIST_REMOVE(bp, b_vnbufs); \
227 (bp)->b_vnbufs.le_next = NOLIST; \
228}
229
1c79356b
A
230/*
231 * Time in seconds before a buffer on a list is
232 * considered as a stale buffer
233 */
234#define LRU_IS_STALE 120 /* default value for the LRU */
235#define AGE_IS_STALE 60 /* default value for the AGE */
236#define META_IS_STALE 180 /* default value for the BQ_META */
237
238int lru_is_stale = LRU_IS_STALE;
239int age_is_stale = AGE_IS_STALE;
240int meta_is_stale = META_IS_STALE;
2d21ac55 241
6d2010ae 242#define MAXLAUNDRY 10
91447636 243
9bccf70c
A
244/* LIST_INSERT_HEAD() with assertions */
245static __inline__ void
91447636 246blistenterhead(struct bufhashhdr * head, buf_t bp)
1c79356b
A
247{
248 if ((bp->b_hash.le_next = (head)->lh_first) != NULL)
249 (head)->lh_first->b_hash.le_prev = &(bp)->b_hash.le_next;
250 (head)->lh_first = bp;
251 bp->b_hash.le_prev = &(head)->lh_first;
252 if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef)
253 panic("blistenterhead: le_prev is deadbeef");
1c79356b 254}
1c79356b 255
9bccf70c 256static __inline__ void
91447636 257binshash(buf_t bp, struct bufhashhdr *dp)
1c79356b 258{
0c530ab8 259#if DIAGNOSTIC
91447636 260 buf_t nbp;
0c530ab8 261#endif /* DIAGNOSTIC */
9bccf70c 262
1c79356b 263 BHASHENTCHECK(bp);
9bccf70c 264
0c530ab8 265#if DIAGNOSTIC
1c79356b
A
266 nbp = dp->lh_first;
267 for(; nbp != NULL; nbp = nbp->b_hash.le_next) {
268 if(nbp == bp)
269 panic("buf already in hashlist");
270 }
0c530ab8 271#endif /* DIAGNOSTIC */
1c79356b 272
1c79356b 273 blistenterhead(dp, bp);
1c79356b
A
274}
275
9bccf70c 276static __inline__ void
91447636 277bremhash(buf_t bp)
1c79356b 278{
1c79356b
A
279 if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef)
280 panic("bremhash le_prev is deadbeef");
281 if (bp->b_hash.le_next == bp)
282 panic("bremhash: next points to self");
283
284 if (bp->b_hash.le_next != NULL)
285 bp->b_hash.le_next->b_hash.le_prev = bp->b_hash.le_prev;
286 *bp->b_hash.le_prev = (bp)->b_hash.le_next;
1c79356b
A
287}
288
6d2010ae
A
289/*
290 * buf_mtxp held.
291 */
292static __inline__ void
293bmovelaundry(buf_t bp)
294{
295 bp->b_whichq = BQ_LAUNDRY;
296 bp->b_timestamp = buf_timestamp();
297 binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
298 blaundrycnt++;
299}
1c79356b 300
6d2010ae
A
301static __inline__ void
302buf_release_credentials(buf_t bp)
303{
304 if (IS_VALID_CRED(bp->b_rcred)) {
305 kauth_cred_unref(&bp->b_rcred);
306 }
307 if (IS_VALID_CRED(bp->b_wcred)) {
308 kauth_cred_unref(&bp->b_wcred);
309 }
310}
1c79356b 311
9bccf70c 312
91447636
A
313int
314buf_valid(buf_t bp) {
315
316 if ( (bp->b_flags & (B_DONE | B_DELWRI)) )
317 return 1;
318 return 0;
9bccf70c
A
319}
320
91447636
A
321int
322buf_fromcache(buf_t bp) {
9bccf70c 323
91447636
A
324 if ( (bp->b_flags & B_CACHE) )
325 return 1;
326 return 0;
9bccf70c
A
327}
328
9bccf70c 329void
91447636
A
330buf_markinvalid(buf_t bp) {
331
332 SET(bp->b_flags, B_INVAL);
333}
9bccf70c 334
91447636
A
335void
336buf_markdelayed(buf_t bp) {
337
2d21ac55
A
338 if (!ISSET(bp->b_flags, B_DELWRI)) {
339 SET(bp->b_flags, B_DELWRI);
340
b0d623f7 341 OSAddAtomicLong(1, &nbdwrite);
2d21ac55
A
342 buf_reassign(bp, bp->b_vp);
343 }
344 SET(bp->b_flags, B_DONE);
9bccf70c
A
345}
346
6d2010ae
A
347void
348buf_markclean(buf_t bp) {
349
350 if (ISSET(bp->b_flags, B_DELWRI)) {
351 CLR(bp->b_flags, B_DELWRI);
352
353 OSAddAtomicLong(-1, &nbdwrite);
354 buf_reassign(bp, bp->b_vp);
355 }
356}
357
91447636
A
358void
359buf_markeintr(buf_t bp) {
360
361 SET(bp->b_flags, B_EINTR);
362}
765c9de3 363
2d21ac55 364
91447636
A
365void
366buf_markaged(buf_t bp) {
367
368 SET(bp->b_flags, B_AGE);
765c9de3
A
369}
370
2d21ac55
A
371int
372buf_fua(buf_t bp) {
373
374 if ((bp->b_flags & B_FUA) == B_FUA)
375 return 1;
376 return 0;
377}
378
379void
380buf_markfua(buf_t bp) {
381
382 SET(bp->b_flags, B_FUA);
383}
384
d1ecb069
A
385#ifdef CONFIG_PROTECT
386void *
387buf_getcpaddr(buf_t bp) {
388 return bp->b_cpentry;
389}
390
391void
392buf_setcpaddr(buf_t bp, void *cp_entry_addr) {
393 bp->b_cpentry = (struct cprotect *) cp_entry_addr;
394}
395
396#else
397void *
398buf_getcpaddr(buf_t bp __unused) {
399 return NULL;
400}
401
402void
403buf_setcpaddr(buf_t bp __unused, void *cp_entry_addr __unused) {
404 return;
405}
406#endif /* CONFIG_PROTECT */
407
91447636
A
408errno_t
409buf_error(buf_t bp) {
410
411 return (bp->b_error);
412}
1c79356b 413
91447636
A
414void
415buf_seterror(buf_t bp, errno_t error) {
1c79356b 416
91447636
A
417 if ((bp->b_error = error))
418 SET(bp->b_flags, B_ERROR);
419 else
420 CLR(bp->b_flags, B_ERROR);
421}
1c79356b 422
91447636
A
423void
424buf_setflags(buf_t bp, int32_t flags) {
1c79356b 425
91447636
A
426 SET(bp->b_flags, (flags & BUF_X_WRFLAGS));
427}
765c9de3 428
91447636
A
429void
430buf_clearflags(buf_t bp, int32_t flags) {
1c79356b 431
91447636
A
432 CLR(bp->b_flags, (flags & BUF_X_WRFLAGS));
433}
1c79356b 434
91447636
A
435int32_t
436buf_flags(buf_t bp) {
437
438 return ((bp->b_flags & BUF_X_RDFLAGS));
439}
1c79356b 440
91447636
A
441void
442buf_reset(buf_t bp, int32_t io_flags) {
443
2d21ac55 444 CLR(bp->b_flags, (B_READ | B_WRITE | B_ERROR | B_DONE | B_INVAL | B_ASYNC | B_NOCACHE | B_FUA));
91447636 445 SET(bp->b_flags, (io_flags & (B_ASYNC | B_READ | B_WRITE | B_NOCACHE)));
1c79356b 446
91447636
A
447 bp->b_error = 0;
448}
1c79356b 449
91447636
A
450uint32_t
451buf_count(buf_t bp) {
452
453 return (bp->b_bcount);
454}
765c9de3 455
91447636
A
456void
457buf_setcount(buf_t bp, uint32_t bcount) {
458
459 bp->b_bcount = bcount;
1c79356b
A
460}
461
91447636
A
462uint32_t
463buf_size(buf_t bp) {
464
465 return (bp->b_bufsize);
466}
1c79356b 467
91447636
A
468void
469buf_setsize(buf_t bp, uint32_t bufsize) {
470
471 bp->b_bufsize = bufsize;
472}
1c79356b 473
91447636
A
474uint32_t
475buf_resid(buf_t bp) {
476
477 return (bp->b_resid);
478}
b4c24cb9 479
91447636
A
480void
481buf_setresid(buf_t bp, uint32_t resid) {
482
483 bp->b_resid = resid;
484}
1c79356b 485
91447636
A
486uint32_t
487buf_dirtyoff(buf_t bp) {
1c79356b 488
91447636
A
489 return (bp->b_dirtyoff);
490}
1c79356b 491
91447636
A
492uint32_t
493buf_dirtyend(buf_t bp) {
1c79356b 494
91447636 495 return (bp->b_dirtyend);
1c79356b 496}
1c79356b 497
91447636
A
498void
499buf_setdirtyoff(buf_t bp, uint32_t dirtyoff) {
500
501 bp->b_dirtyoff = dirtyoff;
502}
1c79356b 503
91447636
A
504void
505buf_setdirtyend(buf_t bp, uint32_t dirtyend) {
506
507 bp->b_dirtyend = dirtyend;
1c79356b
A
508}
509
91447636
A
510uintptr_t
511buf_dataptr(buf_t bp) {
512
513 return (bp->b_datap);
514}
1c79356b 515
91447636
A
516void
517buf_setdataptr(buf_t bp, uintptr_t data) {
518
519 bp->b_datap = data;
520}
521
522vnode_t
523buf_vnode(buf_t bp) {
524
525 return (bp->b_vp);
526}
527
528void
529buf_setvnode(buf_t bp, vnode_t vp) {
530
531 bp->b_vp = vp;
532}
533
534
535void *
536buf_callback(buf_t bp)
537{
91447636
A
538 if ( !(bp->b_flags & B_CALL) )
539 return ((void *) NULL);
540
541 return ((void *)bp->b_iodone);
542}
543
544
545errno_t
546buf_setcallback(buf_t bp, void (*callback)(buf_t, void *), void *transaction)
547{
91447636
A
548 if (callback)
549 bp->b_flags |= (B_CALL | B_ASYNC);
550 else
551 bp->b_flags &= ~B_CALL;
552 bp->b_transaction = transaction;
553 bp->b_iodone = callback;
554
555 return (0);
556}
557
558errno_t
559buf_setupl(buf_t bp, upl_t upl, uint32_t offset)
560{
561
562 if ( !(bp->b_lflags & BL_IOBUF) )
563 return (EINVAL);
564
565 if (upl)
566 bp->b_flags |= B_CLUSTER;
567 else
568 bp->b_flags &= ~B_CLUSTER;
569 bp->b_upl = upl;
570 bp->b_uploffset = offset;
571
572 return (0);
573}
574
575buf_t
576buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), void *arg)
577{
578 buf_t io_bp;
579
580 if (io_offset < 0 || io_size < 0)
581 return (NULL);
582
583 if ((unsigned)(io_offset + io_size) > (unsigned)bp->b_bcount)
584 return (NULL);
585
586 if (bp->b_flags & B_CLUSTER) {
587 if (io_offset && ((bp->b_uploffset + io_offset) & PAGE_MASK))
588 return (NULL);
589
590 if (((bp->b_uploffset + io_offset + io_size) & PAGE_MASK) && ((io_offset + io_size) < bp->b_bcount))
591 return (NULL);
592 }
593 io_bp = alloc_io_buf(bp->b_vp, 0);
594
2d21ac55 595 io_bp->b_flags = bp->b_flags & (B_COMMIT_UPL | B_META | B_PAGEIO | B_CLUSTER | B_PHYS | B_RAW | B_ASYNC | B_READ | B_FUA);
91447636
A
596
597 if (iodone) {
598 io_bp->b_transaction = arg;
599 io_bp->b_iodone = iodone;
600 io_bp->b_flags |= B_CALL;
601 }
602 if (bp->b_flags & B_CLUSTER) {
603 io_bp->b_upl = bp->b_upl;
604 io_bp->b_uploffset = bp->b_uploffset + io_offset;
605 } else {
606 io_bp->b_datap = (uintptr_t)(((char *)bp->b_datap) + io_offset);
607 }
608 io_bp->b_bcount = io_size;
609
610 return (io_bp);
611}
612
613
6d2010ae
A
614int
615buf_shadow(buf_t bp)
616{
617 if (bp->b_lflags & BL_SHADOW)
618 return 1;
619 return 0;
620}
621
622
623buf_t
624buf_create_shadow_priv(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg)
625{
626 return (buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 1));
627}
628
629buf_t
630buf_create_shadow(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg)
631{
632 return (buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 0));
633}
634
635
636static buf_t
637buf_create_shadow_internal(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv)
638{
639 buf_t io_bp;
640
641 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_START, bp, 0, 0, 0, 0);
642
643 if ( !(bp->b_flags & B_META) || (bp->b_lflags & BL_IOBUF)) {
644
645 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, 0, 0, 0, 0);
646 return (NULL);
647 }
648#ifdef BUF_MAKE_PRIVATE
649 if (bp->b_shadow_ref && bp->b_data_ref == 0 && external_storage == 0)
650 panic("buf_create_shadow: %p is in the private state (%d, %d)", bp, bp->b_shadow_ref, bp->b_data_ref);
651#endif
652 io_bp = alloc_io_buf(bp->b_vp, priv);
653
654 io_bp->b_flags = bp->b_flags & (B_META | B_ZALLOC | B_ASYNC | B_READ | B_FUA);
655 io_bp->b_blkno = bp->b_blkno;
656 io_bp->b_lblkno = bp->b_lblkno;
657
658 if (iodone) {
659 io_bp->b_transaction = arg;
660 io_bp->b_iodone = iodone;
661 io_bp->b_flags |= B_CALL;
662 }
663 if (force_copy == FALSE) {
664 io_bp->b_bcount = bp->b_bcount;
665 io_bp->b_bufsize = bp->b_bufsize;
666
667 if (external_storage) {
668 io_bp->b_datap = external_storage;
669#ifdef BUF_MAKE_PRIVATE
670 io_bp->b_data_store = NULL;
671#endif
672 } else {
673 io_bp->b_datap = bp->b_datap;
674#ifdef BUF_MAKE_PRIVATE
675 io_bp->b_data_store = bp;
676#endif
677 }
678 *(buf_t *)(&io_bp->b_orig) = bp;
679
680 lck_mtx_lock_spin(buf_mtxp);
681
682 io_bp->b_lflags |= BL_SHADOW;
683 io_bp->b_shadow = bp->b_shadow;
684 bp->b_shadow = io_bp;
685 bp->b_shadow_ref++;
686
687#ifdef BUF_MAKE_PRIVATE
688 if (external_storage)
689 io_bp->b_lflags |= BL_EXTERNAL;
690 else
691 bp->b_data_ref++;
692#endif
693 lck_mtx_unlock(buf_mtxp);
694 } else {
695 if (external_storage) {
696#ifdef BUF_MAKE_PRIVATE
697 io_bp->b_lflags |= BL_EXTERNAL;
698#endif
699 io_bp->b_bcount = bp->b_bcount;
700 io_bp->b_bufsize = bp->b_bufsize;
701 io_bp->b_datap = external_storage;
702 } else {
703 allocbuf(io_bp, bp->b_bcount);
704
705 io_bp->b_lflags |= BL_IOBUF_ALLOC;
706 }
707 bcopy((caddr_t)bp->b_datap, (caddr_t)io_bp->b_datap, bp->b_bcount);
708
709#ifdef BUF_MAKE_PRIVATE
710 io_bp->b_data_store = NULL;
711#endif
712 }
713 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, io_bp, 0);
714
715 return (io_bp);
716}
717
718
719#ifdef BUF_MAKE_PRIVATE
720errno_t
721buf_make_private(buf_t bp)
722{
723 buf_t ds_bp;
724 buf_t t_bp;
725 struct buf my_buf;
726
727 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_START, bp, bp->b_shadow_ref, 0, 0, 0);
728
729 if (bp->b_shadow_ref == 0 || bp->b_data_ref == 0 || ISSET(bp->b_lflags, BL_SHADOW)) {
730
731 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0);
732 return (EINVAL);
733 }
734 my_buf.b_flags = B_META;
735 my_buf.b_datap = (uintptr_t)NULL;
736 allocbuf(&my_buf, bp->b_bcount);
737
738 bcopy((caddr_t)bp->b_datap, (caddr_t)my_buf.b_datap, bp->b_bcount);
739
740 lck_mtx_lock_spin(buf_mtxp);
741
742 for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) {
743 if ( !ISSET(bp->b_lflags, BL_EXTERNAL))
744 break;
745 }
746 ds_bp = t_bp;
747
748 if (ds_bp == NULL && bp->b_data_ref)
749 panic("buf_make_private: b_data_ref != 0 && ds_bp == NULL");
750
751 if (ds_bp && (bp->b_data_ref == 0 || bp->b_shadow_ref == 0))
752 panic("buf_make_private: ref_count == 0 && ds_bp != NULL");
753
754 if (ds_bp == NULL) {
755 lck_mtx_unlock(buf_mtxp);
756
757 buf_free_meta_store(&my_buf);
758
759 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0);
760 return (EINVAL);
761 }
762 for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) {
763 if ( !ISSET(t_bp->b_lflags, BL_EXTERNAL))
764 t_bp->b_data_store = ds_bp;
765 }
766 ds_bp->b_data_ref = bp->b_data_ref;
767
768 bp->b_data_ref = 0;
769 bp->b_datap = my_buf.b_datap;
770
771 lck_mtx_unlock(buf_mtxp);
772
773 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, 0, 0);
774 return (0);
775}
776#endif
777
91447636
A
778
779void
780buf_setfilter(buf_t bp, void (*filter)(buf_t, void *), void *transaction,
6d2010ae 781 void (**old_iodone)(buf_t, void *), void **old_transaction)
91447636 782{
6d2010ae
A
783 if (old_iodone)
784 *old_iodone = bp->b_iodone;
91447636 785 if (old_transaction)
6d2010ae 786 *old_transaction = bp->b_transaction;
91447636
A
787
788 bp->b_transaction = transaction;
789 bp->b_iodone = filter;
2d21ac55
A
790 if (filter)
791 bp->b_flags |= B_FILTER;
792 else
793 bp->b_flags &= ~B_FILTER;
91447636
A
794}
795
796
797daddr64_t
798buf_blkno(buf_t bp) {
799
800 return (bp->b_blkno);
801}
802
803daddr64_t
804buf_lblkno(buf_t bp) {
805
806 return (bp->b_lblkno);
807}
808
809void
810buf_setblkno(buf_t bp, daddr64_t blkno) {
811
812 bp->b_blkno = blkno;
813}
814
815void
816buf_setlblkno(buf_t bp, daddr64_t lblkno) {
817
818 bp->b_lblkno = lblkno;
819}
820
821dev_t
822buf_device(buf_t bp) {
823
824 return (bp->b_dev);
825}
826
827errno_t
828buf_setdevice(buf_t bp, vnode_t vp) {
829
830 if ((vp->v_type != VBLK) && (vp->v_type != VCHR))
831 return EINVAL;
832 bp->b_dev = vp->v_rdev;
833
834 return 0;
835}
836
837
838void *
839buf_drvdata(buf_t bp) {
840
841 return (bp->b_drvdata);
842}
843
844void
845buf_setdrvdata(buf_t bp, void *drvdata) {
846
847 bp->b_drvdata = drvdata;
848}
849
850void *
851buf_fsprivate(buf_t bp) {
852
853 return (bp->b_fsprivate);
854}
855
856void
857buf_setfsprivate(buf_t bp, void *fsprivate) {
858
859 bp->b_fsprivate = fsprivate;
860}
861
b0d623f7 862kauth_cred_t
91447636
A
863buf_rcred(buf_t bp) {
864
865 return (bp->b_rcred);
866}
867
b0d623f7 868kauth_cred_t
91447636
A
869buf_wcred(buf_t bp) {
870
871 return (bp->b_wcred);
872}
873
874void *
875buf_upl(buf_t bp) {
876
877 return (bp->b_upl);
878}
879
880uint32_t
881buf_uploffset(buf_t bp) {
882
883 return ((uint32_t)(bp->b_uploffset));
884}
885
886proc_t
887buf_proc(buf_t bp) {
888
889 return (bp->b_proc);
890}
891
892
893errno_t
894buf_map(buf_t bp, caddr_t *io_addr)
895{
896 buf_t real_bp;
b0d623f7 897 vm_offset_t vaddr;
91447636
A
898 kern_return_t kret;
899
900 if ( !(bp->b_flags & B_CLUSTER)) {
901 *io_addr = (caddr_t)bp->b_datap;
902 return (0);
903 }
904 real_bp = (buf_t)(bp->b_real_bp);
905
906 if (real_bp && real_bp->b_datap) {
907 /*
908 * b_real_bp is only valid if B_CLUSTER is SET
909 * if it's non-zero, than someone did a cluster_bp call
910 * if the backing physical pages were already mapped
911 * in before the call to cluster_bp (non-zero b_datap),
912 * than we just use that mapping
913 */
914 *io_addr = (caddr_t)real_bp->b_datap;
915 return (0);
916 }
917 kret = ubc_upl_map(bp->b_upl, &vaddr); /* Map it in */
918
919 if (kret != KERN_SUCCESS) {
2d21ac55 920 *io_addr = NULL;
91447636
A
921
922 return(ENOMEM);
923 }
924 vaddr += bp->b_uploffset;
925
926 *io_addr = (caddr_t)vaddr;
927
928 return (0);
929}
930
931errno_t
932buf_unmap(buf_t bp)
933{
934 buf_t real_bp;
935 kern_return_t kret;
936
937 if ( !(bp->b_flags & B_CLUSTER))
938 return (0);
939 /*
940 * see buf_map for the explanation
941 */
942 real_bp = (buf_t)(bp->b_real_bp);
943
944 if (real_bp && real_bp->b_datap)
945 return (0);
946
2d21ac55
A
947 if ((bp->b_lflags & BL_IOBUF) &&
948 ((bp->b_flags & (B_PAGEIO | B_READ)) != (B_PAGEIO | B_READ))) {
91447636 949 /*
2d21ac55
A
950 * ignore pageins... the 'right' thing will
951 * happen due to the way we handle speculative
952 * clusters...
953 *
91447636
A
954 * when we commit these pages, we'll hit
955 * it with UPL_COMMIT_INACTIVE which
956 * will clear the reference bit that got
957 * turned on when we touched the mapping
958 */
959 bp->b_flags |= B_AGE;
960 }
961 kret = ubc_upl_unmap(bp->b_upl);
962
963 if (kret != KERN_SUCCESS)
964 return (EINVAL);
965 return (0);
966}
967
968
969void
970buf_clear(buf_t bp) {
971 caddr_t baddr;
972
973 if (buf_map(bp, &baddr) == 0) {
974 bzero(baddr, bp->b_bcount);
975 buf_unmap(bp);
976 }
977 bp->b_resid = 0;
978}
979
91447636
A
980/*
981 * Read or write a buffer that is not contiguous on disk.
982 * buffer is marked done/error at the conclusion
983 */
984static int
985buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_bytes)
986{
987 vnode_t vp = buf_vnode(bp);
988 buf_t io_bp; /* For reading or writing a single block */
989 int io_direction;
990 int io_resid;
991 size_t io_contig_bytes;
992 daddr64_t io_blkno;
993 int error = 0;
994 int bmap_flags;
995
996 /*
997 * save our starting point... the bp was already mapped
998 * in buf_strategy before we got called
999 * no sense doing it again.
1000 */
1001 io_blkno = bp->b_blkno;
1002 /*
1003 * Make sure we redo this mapping for the next I/O
1004 * i.e. this can never be a 'permanent' mapping
1005 */
1006 bp->b_blkno = bp->b_lblkno;
1007
1008 /*
1009 * Get an io buffer to do the deblocking
1010 */
1011 io_bp = alloc_io_buf(devvp, 0);
1012
1013 io_bp->b_lblkno = bp->b_lblkno;
1014 io_bp->b_datap = bp->b_datap;
1015 io_resid = bp->b_bcount;
1016 io_direction = bp->b_flags & B_READ;
1017 io_contig_bytes = contig_bytes;
1018
1019 if (bp->b_flags & B_READ)
1020 bmap_flags = VNODE_READ;
1021 else
1022 bmap_flags = VNODE_WRITE;
1023
1024 for (;;) {
1025 if (io_blkno == -1)
1026 /*
1027 * this is unexepected, but we'll allow for it
1028 */
1029 bzero((caddr_t)io_bp->b_datap, (int)io_contig_bytes);
1030 else {
1031 io_bp->b_bcount = io_contig_bytes;
1032 io_bp->b_bufsize = io_contig_bytes;
1033 io_bp->b_resid = io_contig_bytes;
1034 io_bp->b_blkno = io_blkno;
1035
1036 buf_reset(io_bp, io_direction);
2d21ac55 1037
91447636 1038 /*
2d21ac55 1039 * Call the device to do the I/O and wait for it. Make sure the appropriate party is charged for write
91447636 1040 */
2d21ac55
A
1041
1042 if (!ISSET(bp->b_flags, B_READ))
1043 OSAddAtomic(1, &devvp->v_numoutput);
1044
91447636
A
1045 if ((error = VNOP_STRATEGY(io_bp)))
1046 break;
1047 if ((error = (int)buf_biowait(io_bp)))
1048 break;
1049 if (io_bp->b_resid) {
1050 io_resid -= (io_contig_bytes - io_bp->b_resid);
1051 break;
1052 }
1053 }
1054 if ((io_resid -= io_contig_bytes) == 0)
1055 break;
1056 f_offset += io_contig_bytes;
1057 io_bp->b_datap += io_contig_bytes;
1058
1059 /*
1060 * Map the current position to a physical block number
1061 */
1062 if ((error = VNOP_BLOCKMAP(vp, f_offset, io_resid, &io_blkno, &io_contig_bytes, NULL, bmap_flags, NULL)))
1063 break;
1064 }
1065 buf_free(io_bp);
1066
1067 if (error)
1068 buf_seterror(bp, error);
1069 bp->b_resid = io_resid;
1070 /*
1071 * This I/O is now complete
1072 */
1073 buf_biodone(bp);
1074
1075 return error;
1076}
1077
1078
1079/*
1080 * struct vnop_strategy_args {
1081 * struct buf *a_bp;
1082 * } *ap;
1083 */
1084errno_t
1085buf_strategy(vnode_t devvp, void *ap)
1086{
1087 buf_t bp = ((struct vnop_strategy_args *)ap)->a_bp;
1088 vnode_t vp = bp->b_vp;
1089 int bmap_flags;
1090 errno_t error;
6d2010ae
A
1091#if CONFIG_DTRACE
1092 int dtrace_io_start_flag = 0; /* We only want to trip the io:::start
1093 * probe once, with the true phisical
1094 * block in place (b_blkno)
1095 */
1096
1097#endif
91447636
A
1098
1099 if (vp == NULL || vp->v_type == VCHR || vp->v_type == VBLK)
1100 panic("buf_strategy: b_vp == NULL || vtype == VCHR | VBLK\n");
1101 /*
1102 * associate the physical device with
1103 * with this buf_t even if we don't
1104 * end up issuing the I/O...
1105 */
1106 bp->b_dev = devvp->v_rdev;
1107
1108 if (bp->b_flags & B_READ)
1109 bmap_flags = VNODE_READ;
1110 else
1111 bmap_flags = VNODE_WRITE;
1112
1113 if ( !(bp->b_flags & B_CLUSTER)) {
1114
1115 if ( (bp->b_upl) ) {
1116 /*
1117 * we have a UPL associated with this bp
1118 * go through cluster_bp which knows how
1119 * to deal with filesystem block sizes
1120 * that aren't equal to the page size
1121 */
6d2010ae 1122 DTRACE_IO1(start, buf_t, bp);
91447636
A
1123 return (cluster_bp(bp));
1124 }
1125 if (bp->b_blkno == bp->b_lblkno) {
1126 off_t f_offset;
1127 size_t contig_bytes;
1128
1129 if ((error = VNOP_BLKTOOFF(vp, bp->b_lblkno, &f_offset))) {
6d2010ae 1130 DTRACE_IO1(start, buf_t, bp);
91447636
A
1131 buf_seterror(bp, error);
1132 buf_biodone(bp);
1133
1134 return (error);
1135 }
1136 if ((error = VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))) {
6d2010ae 1137 DTRACE_IO1(start, buf_t, bp);
91447636
A
1138 buf_seterror(bp, error);
1139 buf_biodone(bp);
1140
1141 return (error);
1142 }
6d2010ae
A
1143
1144 DTRACE_IO1(start, buf_t, bp);
1145#if CONFIG_DTRACE
1146 dtrace_io_start_flag = 1;
1147#endif /* CONFIG_DTRACE */
1148
b0d623f7
A
1149 if ((bp->b_blkno == -1) || (contig_bytes == 0)) {
1150 /* Set block number to force biodone later */
1151 bp->b_blkno = -1;
91447636 1152 buf_clear(bp);
b0d623f7 1153 }
6d2010ae 1154 else if ((long)contig_bytes < bp->b_bcount) {
91447636 1155 return (buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes));
6d2010ae 1156 }
91447636 1157 }
6d2010ae
A
1158
1159#if CONFIG_DTRACE
1160 if (dtrace_io_start_flag == 0) {
1161 DTRACE_IO1(start, buf_t, bp);
1162 dtrace_io_start_flag = 1;
1163 }
1164#endif /* CONFIG_DTRACE */
1165
91447636
A
1166 if (bp->b_blkno == -1) {
1167 buf_biodone(bp);
1168 return (0);
1169 }
1170 }
6d2010ae
A
1171
1172#if CONFIG_DTRACE
1173 if (dtrace_io_start_flag == 0)
1174 DTRACE_IO1(start, buf_t, bp);
1175#endif /* CONFIG_DTRACE */
1176
91447636
A
1177 /*
1178 * we can issue the I/O because...
1179 * either B_CLUSTER is set which
1180 * means that the I/O is properly set
1181 * up to be a multiple of the page size, or
1182 * we were able to successfully set up the
1183 * phsyical block mapping
1184 */
1185 return (VOCALL(devvp->v_op, VOFFSET(vnop_strategy), ap));
1186}
1187
1188
1189
1190buf_t
1191buf_alloc(vnode_t vp)
1192{
1193 return(alloc_io_buf(vp, 0));
1194}
1195
1196void
1197buf_free(buf_t bp) {
1198
1199 free_io_buf(bp);
1200}
1201
1202
2d21ac55
A
1203/*
1204 * iterate buffers for the specified vp.
1205 * if BUF_SCAN_DIRTY is set, do the dirty list
1206 * if BUF_SCAN_CLEAN is set, do the clean list
1207 * if neither flag is set, default to BUF_SCAN_DIRTY
1208 * if BUF_NOTIFY_BUSY is set, call the callout function using a NULL bp for busy pages
1209 */
1210
1211struct buf_iterate_info_t {
1212 int flag;
1213 struct buflists *listhead;
1214};
91447636
A
1215
1216void
2d21ac55
A
1217buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg)
1218{
91447636
A
1219 buf_t bp;
1220 int retval;
1221 struct buflists local_iterblkhd;
1222 int lock_flags = BAC_NOWAIT | BAC_REMOVE;
2d21ac55
A
1223 int notify_busy = flags & BUF_NOTIFY_BUSY;
1224 struct buf_iterate_info_t list[2];
1225 int num_lists, i;
91447636
A
1226
1227 if (flags & BUF_SKIP_LOCKED)
1228 lock_flags |= BAC_SKIP_LOCKED;
1229 if (flags & BUF_SKIP_NONLOCKED)
1230 lock_flags |= BAC_SKIP_NONLOCKED;
1231
2d21ac55
A
1232 if ( !(flags & (BUF_SCAN_DIRTY | BUF_SCAN_CLEAN)))
1233 flags |= BUF_SCAN_DIRTY;
1234
1235 num_lists = 0;
1236
1237 if (flags & BUF_SCAN_DIRTY) {
1238 list[num_lists].flag = VBI_DIRTY;
1239 list[num_lists].listhead = &vp->v_dirtyblkhd;
1240 num_lists++;
1241 }
1242 if (flags & BUF_SCAN_CLEAN) {
1243 list[num_lists].flag = VBI_CLEAN;
1244 list[num_lists].listhead = &vp->v_cleanblkhd;
1245 num_lists++;
91447636 1246 }
91447636 1247
2d21ac55
A
1248 for (i = 0; i < num_lists; i++) {
1249 lck_mtx_lock(buf_mtxp);
1250
1251 if (buf_iterprepare(vp, &local_iterblkhd, list[i].flag)) {
1252 lck_mtx_unlock(buf_mtxp);
1253 continue;
1254 }
1255 while (!LIST_EMPTY(&local_iterblkhd)) {
1256 bp = LIST_FIRST(&local_iterblkhd);
1257 LIST_REMOVE(bp, b_vnbufs);
1258 LIST_INSERT_HEAD(list[i].listhead, bp, b_vnbufs);
91447636 1259
2d21ac55
A
1260 if (buf_acquire_locked(bp, lock_flags, 0, 0)) {
1261 if (notify_busy) {
1262 bp = NULL;
1263 } else {
1264 continue;
1265 }
1266 }
91447636 1267
2d21ac55 1268 lck_mtx_unlock(buf_mtxp);
91447636 1269
2d21ac55 1270 retval = callout(bp, arg);
91447636 1271
2d21ac55
A
1272 switch (retval) {
1273 case BUF_RETURNED:
1274 if (bp)
1275 buf_brelse(bp);
1276 break;
1277 case BUF_CLAIMED:
1278 break;
1279 case BUF_RETURNED_DONE:
1280 if (bp)
1281 buf_brelse(bp);
1282 lck_mtx_lock(buf_mtxp);
1283 goto out;
1284 case BUF_CLAIMED_DONE:
1285 lck_mtx_lock(buf_mtxp);
1286 goto out;
1287 }
1288 lck_mtx_lock(buf_mtxp);
1289 } /* while list has more nodes */
1290 out:
1291 buf_itercomplete(vp, &local_iterblkhd, list[i].flag);
1292 lck_mtx_unlock(buf_mtxp);
1293 } /* for each list */
1294} /* buf_iterate */
91447636
A
1295
1296
1297/*
1298 * Flush out and invalidate all buffers associated with a vnode.
1299 */
1300int
1301buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo)
1302{
1303 buf_t bp;
6d2010ae 1304 int aflags;
91447636
A
1305 int error = 0;
1306 int must_rescan = 1;
1307 struct buflists local_iterblkhd;
1308
b0d623f7
A
1309
1310 if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd))
1311 return (0);
1312
91447636
A
1313 lck_mtx_lock(buf_mtxp);
1314
1315 for (;;) {
1316 if (must_rescan == 0)
1317 /*
1318 * the lists may not be empty, but all that's left at this
1319 * point are metadata or B_LOCKED buffers which are being
1320 * skipped... we know this because we made it through both
1321 * the clean and dirty lists without dropping buf_mtxp...
1322 * each time we drop buf_mtxp we bump "must_rescan"
1323 */
1324 break;
1325 if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd))
1326 break;
1327 must_rescan = 0;
1328 /*
1329 * iterate the clean list
1330 */
1331 if (buf_iterprepare(vp, &local_iterblkhd, VBI_CLEAN)) {
1332 goto try_dirty_list;
1333 }
1334 while (!LIST_EMPTY(&local_iterblkhd)) {
6d2010ae 1335
91447636
A
1336 bp = LIST_FIRST(&local_iterblkhd);
1337
1338 LIST_REMOVE(bp, b_vnbufs);
1339 LIST_INSERT_HEAD(&vp->v_cleanblkhd, bp, b_vnbufs);
1340
1341 /*
1342 * some filesystems distinguish meta data blocks with a negative logical block #
1343 */
1344 if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META)))
1345 continue;
1346
6d2010ae
A
1347 aflags = BAC_REMOVE;
1348
1349 if ( !(flags & BUF_INVALIDATE_LOCKED) )
1350 aflags |= BAC_SKIP_LOCKED;
1351
1352 if ( (error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo)) ) {
91447636
A
1353 if (error == EDEADLK)
1354 /*
1355 * this buffer was marked B_LOCKED...
1356 * we didn't drop buf_mtxp, so we
1357 * we don't need to rescan
1358 */
1359 continue;
1360 if (error == EAGAIN) {
1361 /*
1362 * found a busy buffer... we blocked and
1363 * dropped buf_mtxp, so we're going to
1364 * need to rescan after this pass is completed
1365 */
1366 must_rescan++;
1367 continue;
1368 }
1369 /*
1370 * got some kind of 'real' error out of the msleep
1371 * in buf_acquire_locked, terminate the scan and return the error
1372 */
1373 buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN);
1374
1375 lck_mtx_unlock(buf_mtxp);
1376 return (error);
1377 }
1378 lck_mtx_unlock(buf_mtxp);
1379
6d2010ae
A
1380 if (bp->b_flags & B_LOCKED)
1381 KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 0, 0);
1382
1383 CLR(bp->b_flags, B_LOCKED);
91447636
A
1384 SET(bp->b_flags, B_INVAL);
1385 buf_brelse(bp);
1386
1387 lck_mtx_lock(buf_mtxp);
1388
1389 /*
1390 * by dropping buf_mtxp, we allow new
1391 * buffers to be added to the vnode list(s)
1392 * we'll have to rescan at least once more
1393 * if the queues aren't empty
1394 */
1395 must_rescan++;
1396 }
1397 buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN);
1398
1399try_dirty_list:
1400 /*
1401 * Now iterate on dirty blks
1402 */
1403 if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY)) {
1404 continue;
1405 }
1406 while (!LIST_EMPTY(&local_iterblkhd)) {
1407 bp = LIST_FIRST(&local_iterblkhd);
1408
1409 LIST_REMOVE(bp, b_vnbufs);
1410 LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
1411
1412 /*
1413 * some filesystems distinguish meta data blocks with a negative logical block #
1414 */
1415 if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META)))
1416 continue;
1417
6d2010ae
A
1418 aflags = BAC_REMOVE;
1419
1420 if ( !(flags & BUF_INVALIDATE_LOCKED) )
1421 aflags |= BAC_SKIP_LOCKED;
1422
1423 if ( (error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo)) ) {
91447636
A
1424 if (error == EDEADLK)
1425 /*
1426 * this buffer was marked B_LOCKED...
1427 * we didn't drop buf_mtxp, so we
1428 * we don't need to rescan
1429 */
1430 continue;
1431 if (error == EAGAIN) {
1432 /*
1433 * found a busy buffer... we blocked and
1434 * dropped buf_mtxp, so we're going to
1435 * need to rescan after this pass is completed
1436 */
1437 must_rescan++;
1438 continue;
1439 }
1440 /*
1441 * got some kind of 'real' error out of the msleep
1442 * in buf_acquire_locked, terminate the scan and return the error
1443 */
1444 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1445
1446 lck_mtx_unlock(buf_mtxp);
1447 return (error);
1448 }
1449 lck_mtx_unlock(buf_mtxp);
1450
6d2010ae
A
1451 if (bp->b_flags & B_LOCKED)
1452 KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 1, 0);
1453
1454 CLR(bp->b_flags, B_LOCKED);
91447636
A
1455 SET(bp->b_flags, B_INVAL);
1456
1457 if (ISSET(bp->b_flags, B_DELWRI) && (flags & BUF_WRITE_DATA))
1458 (void) VNOP_BWRITE(bp);
1459 else
1460 buf_brelse(bp);
1461
1462 lck_mtx_lock(buf_mtxp);
1463 /*
1464 * by dropping buf_mtxp, we allow new
1465 * buffers to be added to the vnode list(s)
1466 * we'll have to rescan at least once more
1467 * if the queues aren't empty
1468 */
1469 must_rescan++;
1470 }
1471 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1472 }
1473 lck_mtx_unlock(buf_mtxp);
1474
1475 return (0);
1476}
1477
1478void
2d21ac55 1479buf_flushdirtyblks(vnode_t vp, int wait, int flags, const char *msg) {
91447636
A
1480 buf_t bp;
1481 int writes_issued = 0;
1482 errno_t error;
1483 int busy = 0;
1484 struct buflists local_iterblkhd;
1485 int lock_flags = BAC_NOWAIT | BAC_REMOVE;
1486
1487 if (flags & BUF_SKIP_LOCKED)
1488 lock_flags |= BAC_SKIP_LOCKED;
1489 if (flags & BUF_SKIP_NONLOCKED)
1490 lock_flags |= BAC_SKIP_NONLOCKED;
1491loop:
1492 lck_mtx_lock(buf_mtxp);
1493
1494 if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY) == 0) {
1495 while (!LIST_EMPTY(&local_iterblkhd)) {
1496 bp = LIST_FIRST(&local_iterblkhd);
1497 LIST_REMOVE(bp, b_vnbufs);
1498 LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
1499
1500 if ((error = buf_acquire_locked(bp, lock_flags, 0, 0)) == EBUSY)
1501 busy++;
1502 if (error)
1503 continue;
1504 lck_mtx_unlock(buf_mtxp);
1505
1506 bp->b_flags &= ~B_LOCKED;
1507
1508 /*
1509 * Wait for I/O associated with indirect blocks to complete,
1510 * since there is no way to quickly wait for them below.
1511 */
1512 if ((bp->b_vp == vp) || (wait == 0))
1513 (void) buf_bawrite(bp);
1514 else
1515 (void) VNOP_BWRITE(bp);
1516 writes_issued++;
1517
1518 lck_mtx_lock(buf_mtxp);
1519 }
1520 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1521 }
1522 lck_mtx_unlock(buf_mtxp);
1523
1524 if (wait) {
1525 (void)vnode_waitforwrites(vp, 0, 0, 0, msg);
1526
1527 if (vp->v_dirtyblkhd.lh_first && busy) {
1528 /*
1529 * we had one or more BUSY buffers on
1530 * the dirtyblock list... most likely
1531 * these are due to delayed writes that
1532 * were moved to the bclean queue but
1533 * have not yet been 'written'.
1534 * if we issued some writes on the
1535 * previous pass, we try again immediately
1536 * if we didn't, we'll sleep for some time
1537 * to allow the state to change...
1538 */
1539 if (writes_issued == 0) {
1540 (void)tsleep((caddr_t)&vp->v_numoutput,
1541 PRIBIO + 1, "vnode_flushdirtyblks", hz/20);
1542 }
1543 writes_issued = 0;
1544 busy = 0;
1545
1546 goto loop;
1547 }
1548 }
1549}
1550
1551
1552/*
1553 * called with buf_mtxp held...
1554 * this lock protects the queue manipulation
1555 */
1556static int
1557buf_iterprepare(vnode_t vp, struct buflists *iterheadp, int flags)
1558{
1559 struct buflists * listheadp;
1560
1561 if (flags & VBI_DIRTY)
1562 listheadp = &vp->v_dirtyblkhd;
1563 else
1564 listheadp = &vp->v_cleanblkhd;
1565
1566 while (vp->v_iterblkflags & VBI_ITER) {
1567 vp->v_iterblkflags |= VBI_ITERWANT;
2d21ac55 1568 msleep(&vp->v_iterblkflags, buf_mtxp, 0, "buf_iterprepare", NULL);
91447636
A
1569 }
1570 if (LIST_EMPTY(listheadp)) {
1571 LIST_INIT(iterheadp);
1572 return(EINVAL);
1573 }
1574 vp->v_iterblkflags |= VBI_ITER;
1575
1576 iterheadp->lh_first = listheadp->lh_first;
1577 listheadp->lh_first->b_vnbufs.le_prev = &iterheadp->lh_first;
1578 LIST_INIT(listheadp);
1579
1580 return(0);
1581}
1582
1583/*
1584 * called with buf_mtxp held...
1585 * this lock protects the queue manipulation
1586 */
1587static void
1588buf_itercomplete(vnode_t vp, struct buflists *iterheadp, int flags)
1589{
1590 struct buflists * listheadp;
1591 buf_t bp;
1592
1593 if (flags & VBI_DIRTY)
1594 listheadp = &vp->v_dirtyblkhd;
1595 else
1596 listheadp = &vp->v_cleanblkhd;
1597
1598 while (!LIST_EMPTY(iterheadp)) {
1599 bp = LIST_FIRST(iterheadp);
1600 LIST_REMOVE(bp, b_vnbufs);
1601 LIST_INSERT_HEAD(listheadp, bp, b_vnbufs);
1602 }
1603 vp->v_iterblkflags &= ~VBI_ITER;
1604
1605 if (vp->v_iterblkflags & VBI_ITERWANT) {
1606 vp->v_iterblkflags &= ~VBI_ITERWANT;
1607 wakeup(&vp->v_iterblkflags);
1608 }
1609}
1610
1611
1612static void
1613bremfree_locked(buf_t bp)
1614{
1615 struct bqueues *dp = NULL;
2d21ac55 1616 int whichq;
6d2010ae
A
1617
1618 whichq = bp->b_whichq;
1619
1620 if (whichq == -1) {
1621 if (bp->b_shadow_ref == 0)
1622 panic("bremfree_locked: %p not on freelist", bp);
1623 /*
1624 * there are clones pointing to 'bp'...
1625 * therefore, it was not put on a freelist
1626 * when buf_brelse was last called on 'bp'
1627 */
1628 return;
1629 }
91447636
A
1630 /*
1631 * We only calculate the head of the freelist when removing
1632 * the last element of the list as that is the only time that
1633 * it is needed (e.g. to reset the tail pointer).
1634 *
1635 * NB: This makes an assumption about how tailq's are implemented.
1636 */
1637 if (bp->b_freelist.tqe_next == NULL) {
2d21ac55
A
1638 dp = &bufqueues[whichq];
1639
1640 if (dp->tqh_last != &bp->b_freelist.tqe_next)
91447636
A
1641 panic("bremfree: lost tail");
1642 }
1643 TAILQ_REMOVE(dp, bp, b_freelist);
2d21ac55 1644
91447636
A
1645#if BALANCE_QUEUES
1646 bufqdec(whichq);
1647#endif
2d21ac55
A
1648 if (whichq == BQ_LAUNDRY)
1649 blaundrycnt--;
1650
91447636
A
1651 bp->b_whichq = -1;
1652 bp->b_timestamp = 0;
6d2010ae 1653 bp->b_shadow = 0;
91447636
A
1654}
1655
1656/*
1657 * Associate a buffer with a vnode.
2d21ac55 1658 * buf_mtxp must be locked on entry
91447636
A
1659 */
1660static void
2d21ac55 1661bgetvp_locked(vnode_t vp, buf_t bp)
91447636
A
1662{
1663
1664 if (bp->b_vp != vp)
2d21ac55 1665 panic("bgetvp_locked: not free");
91447636
A
1666
1667 if (vp->v_type == VBLK || vp->v_type == VCHR)
1668 bp->b_dev = vp->v_rdev;
1669 else
1670 bp->b_dev = NODEV;
1671 /*
1672 * Insert onto list for new vnode.
1673 */
91447636 1674 bufinsvn(bp, &vp->v_cleanblkhd);
91447636
A
1675}
1676
1677/*
1678 * Disassociate a buffer from a vnode.
2d21ac55 1679 * buf_mtxp must be locked on entry
91447636
A
1680 */
1681static void
2d21ac55 1682brelvp_locked(buf_t bp)
91447636 1683{
91447636
A
1684 /*
1685 * Delete from old vnode list, if on one.
1686 */
91447636
A
1687 if (bp->b_vnbufs.le_next != NOLIST)
1688 bufremvn(bp);
91447636
A
1689
1690 bp->b_vp = (vnode_t)NULL;
1691}
1692
1693/*
1694 * Reassign a buffer from one vnode to another.
1695 * Used to assign file specific control information
1696 * (indirect blocks) to the vnode to which they belong.
1697 */
1698static void
1699buf_reassign(buf_t bp, vnode_t newvp)
1700{
6d2010ae 1701 struct buflists *listheadp;
1c79356b 1702
91447636
A
1703 if (newvp == NULL) {
1704 printf("buf_reassign: NULL");
1705 return;
1706 }
2d21ac55 1707 lck_mtx_lock_spin(buf_mtxp);
91447636
A
1708
1709 /*
1710 * Delete from old vnode list, if on one.
1711 */
1712 if (bp->b_vnbufs.le_next != NOLIST)
1713 bufremvn(bp);
1714 /*
1715 * If dirty, put on list of dirty buffers;
1716 * otherwise insert onto list of clean buffers.
1717 */
1718 if (ISSET(bp->b_flags, B_DELWRI))
1719 listheadp = &newvp->v_dirtyblkhd;
1720 else
1721 listheadp = &newvp->v_cleanblkhd;
1722 bufinsvn(bp, listheadp);
1723
1724 lck_mtx_unlock(buf_mtxp);
1c79356b
A
1725}
1726
91447636
A
1727static __inline__ void
1728bufhdrinit(buf_t bp)
55e303ae 1729{
91447636
A
1730 bzero((char *)bp, sizeof *bp);
1731 bp->b_dev = NODEV;
1732 bp->b_rcred = NOCRED;
1733 bp->b_wcred = NOCRED;
1734 bp->b_vnbufs.le_next = NOLIST;
1735 bp->b_flags = B_INVAL;
1736
1737 return;
55e303ae
A
1738}
1739
1740/*
91447636 1741 * Initialize buffers and hash links for buffers.
55e303ae 1742 */
91447636 1743__private_extern__ void
2d21ac55 1744bufinit(void)
55e303ae 1745{
91447636
A
1746 buf_t bp;
1747 struct bqueues *dp;
1748 int i;
91447636 1749
2d21ac55 1750 nbuf_headers = 0;
91447636
A
1751 /* Initialize the buffer queues ('freelists') and the hash table */
1752 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
1753 TAILQ_INIT(dp);
0c530ab8 1754 bufhashtbl = hashinit(nbuf_hashelements, M_CACHE, &bufhash);
91447636 1755
b0d623f7
A
1756 buf_busycount = 0;
1757
91447636 1758 /* Initialize the buffer headers */
0c530ab8 1759 for (i = 0; i < max_nbuf_headers; i++) {
2d21ac55
A
1760 nbuf_headers++;
1761 bp = &buf_headers[i];
91447636
A
1762 bufhdrinit(bp);
1763
91447636 1764 BLISTNONE(bp);
2d21ac55
A
1765 dp = &bufqueues[BQ_EMPTY];
1766 bp->b_whichq = BQ_EMPTY;
1767 bp->b_timestamp = buf_timestamp();
1768 binsheadfree(bp, dp, BQ_EMPTY);
91447636
A
1769 binshash(bp, &invalhash);
1770 }
2d21ac55 1771 boot_nbuf_headers = nbuf_headers;
6d2010ae
A
1772
1773 TAILQ_INIT(&iobufqueue);
1774 TAILQ_INIT(&delaybufqueue);
1775
2d21ac55
A
1776 for (; i < nbuf_headers + niobuf_headers; i++) {
1777 bp = &buf_headers[i];
91447636 1778 bufhdrinit(bp);
2d21ac55 1779 bp->b_whichq = -1;
91447636
A
1780 binsheadfree(bp, &iobufqueue, -1);
1781 }
1782
2d21ac55 1783 /*
91447636
A
1784 * allocate lock group attribute and group
1785 */
2d21ac55 1786 buf_mtx_grp_attr = lck_grp_attr_alloc_init();
91447636
A
1787 buf_mtx_grp = lck_grp_alloc_init("buffer cache", buf_mtx_grp_attr);
1788
1789 /*
1790 * allocate the lock attribute
1791 */
1792 buf_mtx_attr = lck_attr_alloc_init();
91447636
A
1793
1794 /*
1795 * allocate and initialize mutex's for the buffer and iobuffer pools
1796 */
1797 buf_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr);
1798 iobuffer_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr);
1799
1800 if (iobuffer_mtxp == NULL)
1801 panic("couldn't create iobuffer mutex");
1802
1803 if (buf_mtxp == NULL)
1804 panic("couldn't create buf mutex");
1805
1806 /*
1807 * allocate and initialize cluster specific global locks...
1808 */
1809 cluster_init();
1810
1811 printf("using %d buffer headers and %d cluster IO buffer headers\n",
2d21ac55 1812 nbuf_headers, niobuf_headers);
91447636
A
1813
1814 /* Set up zones used by the buffer cache */
1815 bufzoneinit();
1816
1817 /* start the bcleanbuf() thread */
1818 bcleanbuf_thread_init();
1819
b0d623f7
A
1820 /* Register a callout for relieving vm pressure */
1821 if (vm_set_buffer_cleanup_callout(buffer_cache_gc) != KERN_SUCCESS) {
1822 panic("Couldn't register buffer cache callout for vm pressure!\n");
1823 }
1824
91447636
A
1825#if BALANCE_QUEUES
1826 {
2d21ac55 1827 static void bufq_balance_thread_init(void) __attribute__((section("__TEXT, initcode")));
91447636
A
1828 /* create a thread to do dynamic buffer queue balancing */
1829 bufq_balance_thread_init();
1830 }
1831#endif /* notyet */
1832}
1833
2d21ac55
A
1834
1835
1836/*
1837 * Zones for the meta data buffers
1838 */
1839
1840#define MINMETA 512
1841#define MAXMETA 8192
1842
1843struct meta_zone_entry {
1844 zone_t mz_zone;
1845 vm_size_t mz_size;
1846 vm_size_t mz_max;
1847 const char *mz_name;
1848};
1849
1850struct meta_zone_entry meta_zones[] = {
1851 {NULL, (MINMETA * 1), 128 * (MINMETA * 1), "buf.512" },
1852 {NULL, (MINMETA * 2), 64 * (MINMETA * 2), "buf.1024" },
1853 {NULL, (MINMETA * 4), 16 * (MINMETA * 4), "buf.2048" },
1854 {NULL, (MINMETA * 8), 512 * (MINMETA * 8), "buf.4096" },
1855 {NULL, (MINMETA * 16), 512 * (MINMETA * 16), "buf.8192" },
1856 {NULL, 0, 0, "" } /* End */
1857};
1858
1859/*
1860 * Initialize the meta data zones
1861 */
1862static void
1863bufzoneinit(void)
1864{
1865 int i;
1866
1867 for (i = 0; meta_zones[i].mz_size != 0; i++) {
1868 meta_zones[i].mz_zone =
1869 zinit(meta_zones[i].mz_size,
1870 meta_zones[i].mz_max,
1871 PAGE_SIZE,
1872 meta_zones[i].mz_name);
6d2010ae 1873 zone_change(meta_zones[i].mz_zone, Z_CALLERACCT, FALSE);
2d21ac55
A
1874 }
1875 buf_hdr_zone = zinit(sizeof(struct buf), 32, PAGE_SIZE, "buf headers");
6d2010ae 1876 zone_change(buf_hdr_zone, Z_CALLERACCT, FALSE);
2d21ac55
A
1877}
1878
1879static __inline__ zone_t
1880getbufzone(size_t size)
1881{
1882 int i;
1883
1884 if ((size % 512) || (size < MINMETA) || (size > MAXMETA))
1885 panic("getbufzone: incorect size = %lu", size);
1886
1887 for (i = 0; meta_zones[i].mz_size != 0; i++) {
1888 if (meta_zones[i].mz_size >= size)
1889 break;
1890 }
1891
1892 return (meta_zones[i].mz_zone);
1893}
1894
1895
1896
91447636 1897static struct buf *
b0d623f7 1898bio_doread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, int async, int queuetype)
91447636
A
1899{
1900 buf_t bp;
1901
1902 bp = buf_getblk(vp, blkno, size, 0, 0, queuetype);
1903
1904 /*
1905 * If buffer does not have data valid, start a read.
1906 * Note that if buffer is B_INVAL, buf_getblk() won't return it.
1907 * Therefore, it's valid if it's I/O has completed or been delayed.
1908 */
1909 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
1910 struct proc *p;
1911
1912 p = current_proc();
1913
1914 /* Start I/O for the buffer (keeping credentials). */
1915 SET(bp->b_flags, B_READ | async);
0c530ab8 1916 if (IS_VALID_CRED(cred) && !IS_VALID_CRED(bp->b_rcred)) {
91447636
A
1917 kauth_cred_ref(cred);
1918 bp->b_rcred = cred;
1919 }
1920
1921 VNOP_STRATEGY(bp);
1922
1923 trace(TR_BREADMISS, pack(vp, size), blkno);
1924
1925 /* Pay for the read. */
1926 if (p && p->p_stats)
b0d623f7 1927 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_inblock); /* XXX */
91447636
A
1928
1929 if (async) {
1930 /*
1931 * since we asked for an ASYNC I/O
1932 * the biodone will do the brelse
1933 * we don't want to pass back a bp
1934 * that we don't 'own'
1935 */
1936 bp = NULL;
1937 }
1938 } else if (async) {
1939 buf_brelse(bp);
1940 bp = NULL;
1941 }
1942
1943 trace(TR_BREADHIT, pack(vp, size), blkno);
1944
1945 return (bp);
55e303ae
A
1946}
1947
1948/*
91447636 1949 * Perform the reads for buf_breadn() and buf_meta_breadn().
55e303ae
A
1950 * Trivial modification to the breada algorithm presented in Bach (p.55).
1951 */
91447636
A
1952static errno_t
1953do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes,
b0d623f7 1954 int nrablks, kauth_cred_t cred, buf_t *bpp, int queuetype)
1c79356b 1955{
91447636
A
1956 buf_t bp;
1957 int i;
1c79356b 1958
55e303ae 1959 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, queuetype);
1c79356b
A
1960
1961 /*
1962 * For each of the read-ahead blocks, start a read, if necessary.
1963 */
1964 for (i = 0; i < nrablks; i++) {
1965 /* If it's in the cache, just go on to next one. */
1966 if (incore(vp, rablks[i]))
1967 continue;
1968
1969 /* Get a buffer for the read-ahead block */
55e303ae 1970 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC, queuetype);
1c79356b
A
1971 }
1972
1973 /* Otherwise, we had to start a read for it; wait until it's valid. */
91447636 1974 return (buf_biowait(bp));
1c79356b
A
1975}
1976
91447636 1977
1c79356b 1978/*
91447636
A
1979 * Read a disk block.
1980 * This algorithm described in Bach (p.54).
1c79356b 1981 */
91447636 1982errno_t
b0d623f7 1983buf_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp)
91447636
A
1984{
1985 buf_t bp;
1986
1987 /* Get buffer for block. */
1988 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_READ);
1989
1990 /* Wait for the read to complete, and return result. */
1991 return (buf_biowait(bp));
1992}
1993
1994/*
1995 * Read a disk block. [bread() for meta-data]
1996 * This algorithm described in Bach (p.54).
1997 */
1998errno_t
b0d623f7 1999buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp)
91447636
A
2000{
2001 buf_t bp;
2002
2003 /* Get buffer for block. */
2004 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_META);
2005
2006 /* Wait for the read to complete, and return result. */
2007 return (buf_biowait(bp));
2008}
2009
2010/*
2011 * Read-ahead multiple disk blocks. The first is sync, the rest async.
2012 */
2013errno_t
b0d623f7 2014buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp)
1c79356b 2015{
91447636
A
2016 return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_READ));
2017}
1c79356b 2018
91447636
A
2019/*
2020 * Read-ahead multiple disk blocks. The first is sync, the rest async.
2021 * [buf_breadn() for meta-data]
2022 */
2023errno_t
b0d623f7 2024buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp)
91447636
A
2025{
2026 return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_META));
1c79356b
A
2027}
2028
2029/*
2030 * Block write. Described in Bach (p.56)
2031 */
91447636
A
2032errno_t
2033buf_bwrite(buf_t bp)
1c79356b 2034{
91447636
A
2035 int sync, wasdelayed;
2036 errno_t rv;
2037 proc_t p = current_proc();
2038 vnode_t vp = bp->b_vp;
1c79356b 2039
91447636 2040 if (bp->b_datap == 0) {
55e303ae
A
2041 if (brecover_data(bp) == 0)
2042 return (0);
2043 }
1c79356b
A
2044 /* Remember buffer type, to switch on it later. */
2045 sync = !ISSET(bp->b_flags, B_ASYNC);
2046 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
2047 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
91447636
A
2048
2049 if (wasdelayed)
b0d623f7 2050 OSAddAtomicLong(-1, &nbdwrite);
1c79356b
A
2051
2052 if (!sync) {
2053 /*
2054 * If not synchronous, pay for the I/O operation and make
2055 * sure the buf is on the correct vnode queue. We have
2056 * to do this now, because if we don't, the vnode may not
2057 * be properly notified that its I/O has completed.
2058 */
2059 if (wasdelayed)
91447636 2060 buf_reassign(bp, vp);
1c79356b
A
2061 else
2062 if (p && p->p_stats)
b0d623f7 2063 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
1c79356b 2064 }
d52fe63f 2065 trace(TR_BUFWRITE, pack(vp, bp->b_bcount), bp->b_lblkno);
1c79356b
A
2066
2067 /* Initiate disk write. Make sure the appropriate party is charged. */
91447636
A
2068
2069 OSAddAtomic(1, &vp->v_numoutput);
1c79356b 2070
91447636 2071 VNOP_STRATEGY(bp);
1c79356b
A
2072
2073 if (sync) {
2074 /*
2075 * If I/O was synchronous, wait for it to complete.
2076 */
91447636 2077 rv = buf_biowait(bp);
1c79356b
A
2078
2079 /*
2080 * Pay for the I/O operation, if it's not been paid for, and
2081 * make sure it's on the correct vnode queue. (async operatings
2082 * were payed for above.)
2083 */
2084 if (wasdelayed)
91447636 2085 buf_reassign(bp, vp);
1c79356b
A
2086 else
2087 if (p && p->p_stats)
b0d623f7 2088 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
1c79356b
A
2089
2090 /* Release the buffer. */
b4c24cb9
A
2091 // XXXdbg - only if the unused bit is set
2092 if (!ISSET(bp->b_flags, B_NORELSE)) {
91447636 2093 buf_brelse(bp);
b4c24cb9
A
2094 } else {
2095 CLR(bp->b_flags, B_NORELSE);
2096 }
1c79356b
A
2097
2098 return (rv);
2099 } else {
2100 return (0);
2101 }
2102}
2103
2104int
2d21ac55 2105vn_bwrite(struct vnop_bwrite_args *ap)
1c79356b 2106{
91447636 2107 return (buf_bwrite(ap->a_bp));
1c79356b
A
2108}
2109
2110/*
2111 * Delayed write.
2112 *
2113 * The buffer is marked dirty, but is not queued for I/O.
2114 * This routine should be used when the buffer is expected
2115 * to be modified again soon, typically a small write that
2116 * partially fills a buffer.
2117 *
2118 * NB: magnetic tapes cannot be delayed; they must be
2119 * written in the order that the writes are requested.
2120 *
2121 * Described in Leffler, et al. (pp. 208-213).
d52fe63f 2122 *
b0d623f7 2123 * Note: With the ability to allocate additional buffer
d52fe63f 2124 * headers, we can get in to the situation where "too" many
91447636
A
2125 * buf_bdwrite()s can create situation where the kernel can create
2126 * buffers faster than the disks can service. Doing a buf_bawrite() in
6d2010ae 2127 * cases where we have "too many" outstanding buf_bdwrite()s avoids that.
1c79356b 2128 */
9bccf70c 2129__private_extern__ int
91447636 2130bdwrite_internal(buf_t bp, int return_error)
1c79356b 2131{
91447636
A
2132 proc_t p = current_proc();
2133 vnode_t vp = bp->b_vp;
1c79356b
A
2134
2135 /*
2136 * If the block hasn't been seen before:
2137 * (1) Mark it as having been seen,
2138 * (2) Charge for the write.
2139 * (3) Make sure it's on its vnode's correct block list,
2140 */
2141 if (!ISSET(bp->b_flags, B_DELWRI)) {
2142 SET(bp->b_flags, B_DELWRI);
2143 if (p && p->p_stats)
b0d623f7
A
2144 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
2145 OSAddAtomicLong(1, &nbdwrite);
91447636 2146 buf_reassign(bp, vp);
1c79356b
A
2147 }
2148
d52fe63f 2149 /*
91447636
A
2150 * if we're not LOCKED, but the total number of delayed writes
2151 * has climbed above 75% of the total buffers in the system
2152 * return an error if the caller has indicated that it can
2153 * handle one in this case, otherwise schedule the I/O now
2154 * this is done to prevent us from allocating tons of extra
2155 * buffers when dealing with virtual disks (i.e. DiskImages),
2156 * because additional buffers are dynamically allocated to prevent
2157 * deadlocks from occurring
2158 *
2159 * however, can't do a buf_bawrite() if the LOCKED bit is set because the
2160 * buffer is part of a transaction and can't go to disk until
2161 * the LOCKED bit is cleared.
d52fe63f 2162 */
2d21ac55 2163 if (!ISSET(bp->b_flags, B_LOCKED) && nbdwrite > ((nbuf_headers/4)*3)) {
9bccf70c
A
2164 if (return_error)
2165 return (EAGAIN);
91447636
A
2166 /*
2167 * If the vnode has "too many" write operations in progress
2168 * wait for them to finish the IO
2169 */
2d21ac55 2170 (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, "buf_bdwrite");
91447636
A
2171
2172 return (buf_bawrite(bp));
d52fe63f
A
2173 }
2174
1c79356b
A
2175 /* Otherwise, the "write" is done, so mark and release the buffer. */
2176 SET(bp->b_flags, B_DONE);
91447636 2177 buf_brelse(bp);
9bccf70c 2178 return (0);
1c79356b
A
2179}
2180
91447636
A
2181errno_t
2182buf_bdwrite(buf_t bp)
9bccf70c 2183{
91447636 2184 return (bdwrite_internal(bp, 0));
9bccf70c
A
2185}
2186
2187
1c79356b 2188/*
91447636 2189 * Asynchronous block write; just an asynchronous buf_bwrite().
d52fe63f
A
2190 *
2191 * Note: With the abilitty to allocate additional buffer
2192 * headers, we can get in to the situation where "too" many
91447636 2193 * buf_bawrite()s can create situation where the kernel can create
d52fe63f
A
2194 * buffers faster than the disks can service.
2195 * We limit the number of "in flight" writes a vnode can have to
2196 * avoid this.
1c79356b 2197 */
9bccf70c 2198static int
91447636 2199bawrite_internal(buf_t bp, int throttle)
1c79356b 2200{
91447636 2201 vnode_t vp = bp->b_vp;
d52fe63f
A
2202
2203 if (vp) {
91447636
A
2204 if (throttle)
2205 /*
2206 * If the vnode has "too many" write operations in progress
2207 * wait for them to finish the IO
2208 */
2209 (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, (const char *)"buf_bawrite");
2210 else if (vp->v_numoutput >= VNODE_ASYNC_THROTTLE)
2211 /*
2212 * return to the caller and
2213 * let him decide what to do
2214 */
2215 return (EWOULDBLOCK);
d52fe63f 2216 }
1c79356b 2217 SET(bp->b_flags, B_ASYNC);
9bccf70c 2218
91447636 2219 return (VNOP_BWRITE(bp));
9bccf70c
A
2220}
2221
91447636
A
2222errno_t
2223buf_bawrite(buf_t bp)
9bccf70c 2224{
91447636 2225 return (bawrite_internal(bp, 1));
1c79356b
A
2226}
2227
91447636 2228
6d2010ae
A
2229
2230static void
2231buf_free_meta_store(buf_t bp)
2232{
2233 if (bp->b_bufsize) {
2234 if (ISSET(bp->b_flags, B_ZALLOC)) {
2235 zone_t z;
2236
2237 z = getbufzone(bp->b_bufsize);
2238 zfree(z, (void *)bp->b_datap);
2239 } else
2240 kmem_free(kernel_map, bp->b_datap, bp->b_bufsize);
2241
2242 bp->b_datap = (uintptr_t)NULL;
2243 bp->b_bufsize = 0;
2244 }
2245}
2246
2247
2248static buf_t
2249buf_brelse_shadow(buf_t bp)
2250{
2251 buf_t bp_head;
2252 buf_t bp_temp;
2253 buf_t bp_return = NULL;
2254#ifdef BUF_MAKE_PRIVATE
2255 buf_t bp_data;
2256 int data_ref = 0;
2257#endif
2258 lck_mtx_lock_spin(buf_mtxp);
2259
2260 bp_head = (buf_t)bp->b_orig;
2261
2262 if (bp_head->b_whichq != -1)
2263 panic("buf_brelse_shadow: bp_head on freelist %d\n", bp_head->b_whichq);
2264
2265#ifdef BUF_MAKE_PRIVATE
2266 if (bp_data = bp->b_data_store) {
2267 bp_data->b_data_ref--;
2268 /*
2269 * snapshot the ref count so that we can check it
2270 * outside of the lock... we only want the guy going
2271 * from 1 -> 0 to try and release the storage
2272 */
2273 data_ref = bp_data->b_data_ref;
2274 }
2275#endif
2276 KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_START, bp, bp_head, bp_head->b_shadow_ref, 0, 0);
2277
2278 bp_head->b_shadow_ref--;
2279
2280 for (bp_temp = bp_head; bp_temp && bp != bp_temp->b_shadow; bp_temp = bp_temp->b_shadow);
2281
2282 if (bp_temp == NULL)
2283 panic("buf_brelse_shadow: bp not on list %p", bp_head);
2284
2285 bp_temp->b_shadow = bp_temp->b_shadow->b_shadow;
2286
2287#ifdef BUF_MAKE_PRIVATE
2288 /*
2289 * we're about to free the current 'owner' of the data buffer and
2290 * there is at least one other shadow buf_t still pointing at it
2291 * so transfer it to the first shadow buf left in the chain
2292 */
2293 if (bp == bp_data && data_ref) {
2294 if ((bp_data = bp_head->b_shadow) == NULL)
2295 panic("buf_brelse_shadow: data_ref mismatch bp(%p)", bp);
2296
2297 for (bp_temp = bp_data; bp_temp; bp_temp = bp_temp->b_shadow)
2298 bp_temp->b_data_store = bp_data;
2299 bp_data->b_data_ref = data_ref;
2300 }
2301#endif
2302 if (bp_head->b_shadow_ref == 0 && bp_head->b_shadow)
2303 panic("buf_relse_shadow: b_shadow != NULL && b_shadow_ref == 0 bp(%p)", bp);
2304 if (bp_head->b_shadow_ref && bp_head->b_shadow == 0)
2305 panic("buf_relse_shadow: b_shadow == NULL && b_shadow_ref != 0 bp(%p)", bp);
2306
2307 if (bp_head->b_shadow_ref == 0) {
2308 if (!ISSET(bp_head->b_lflags, BL_BUSY)) {
2309
2310 CLR(bp_head->b_flags, B_AGE);
2311 bp_head->b_timestamp = buf_timestamp();
2312
2313 if (ISSET(bp_head->b_flags, B_LOCKED)) {
2314 bp_head->b_whichq = BQ_LOCKED;
2315 binstailfree(bp_head, &bufqueues[BQ_LOCKED], BQ_LOCKED);
2316 } else {
2317 bp_head->b_whichq = BQ_META;
2318 binstailfree(bp_head, &bufqueues[BQ_META], BQ_META);
2319 }
2320 } else if (ISSET(bp_head->b_lflags, BL_WAITSHADOW)) {
2321 CLR(bp_head->b_lflags, BL_WAITSHADOW);
2322
2323 bp_return = bp_head;
2324 }
2325 }
2326 lck_mtx_unlock(buf_mtxp);
2327#ifdef BUF_MAKE_PRIVATE
2328 if (bp == bp_data && data_ref == 0)
2329 buf_free_meta_store(bp);
2330
2331 bp->b_data_store = NULL;
2332#endif
2333 KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_END, bp, 0, 0, 0, 0);
2334
2335 return (bp_return);
2336}
2337
2338
1c79356b
A
2339/*
2340 * Release a buffer on to the free lists.
2341 * Described in Bach (p. 46).
2342 */
2343void
91447636 2344buf_brelse(buf_t bp)
1c79356b
A
2345{
2346 struct bqueues *bufq;
91447636
A
2347 long whichq;
2348 upl_t upl;
2349 int need_wakeup = 0;
2350 int need_bp_wakeup = 0;
2351
2352
2353 if (bp->b_whichq != -1 || !(bp->b_lflags & BL_BUSY))
2d21ac55 2354 panic("buf_brelse: bad buffer = %p\n", bp);
91447636
A
2355
2356#ifdef JOE_DEBUG
b0d623f7 2357 (void) OSBacktrace(&bp->b_stackbrelse[0], 6);
91447636
A
2358
2359 bp->b_lastbrelse = current_thread();
2360 bp->b_tag = 0;
2361#endif
2362 if (bp->b_lflags & BL_IOBUF) {
6d2010ae
A
2363 buf_t shadow_master_bp = NULL;
2364
2365 if (ISSET(bp->b_lflags, BL_SHADOW))
2366 shadow_master_bp = buf_brelse_shadow(bp);
2367 else if (ISSET(bp->b_lflags, BL_IOBUF_ALLOC))
2368 buf_free_meta_store(bp);
91447636 2369 free_io_buf(bp);
6d2010ae
A
2370
2371 if (shadow_master_bp) {
2372 bp = shadow_master_bp;
2373 goto finish_shadow_master;
2374 }
91447636
A
2375 return;
2376 }
1c79356b
A
2377
2378 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_START,
b0d623f7 2379 bp->b_lblkno * PAGE_SIZE, bp, bp->b_datap,
fa4905b1 2380 bp->b_flags, 0);
1c79356b
A
2381
2382 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
2383
91447636
A
2384 /*
2385 * if we're invalidating a buffer that has the B_FILTER bit
2386 * set then call the b_iodone function so it gets cleaned
2387 * up properly.
2388 *
2389 * the HFS journal code depends on this
2390 */
b4c24cb9 2391 if (ISSET(bp->b_flags, B_META) && ISSET(bp->b_flags, B_INVAL)) {
91447636
A
2392 if (ISSET(bp->b_flags, B_FILTER)) { /* if necessary, call out */
2393 void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
6d2010ae 2394 void *arg = bp->b_transaction;
b4c24cb9 2395
91447636 2396 CLR(bp->b_flags, B_FILTER); /* but note callout done */
b4c24cb9 2397 bp->b_iodone = NULL;
91447636 2398 bp->b_transaction = NULL;
b4c24cb9
A
2399
2400 if (iodone_func == NULL) {
2d21ac55 2401 panic("brelse: bp @ %p has NULL b_iodone!\n", bp);
b4c24cb9 2402 }
91447636 2403 (*iodone_func)(bp, arg);
b4c24cb9
A
2404 }
2405 }
91447636
A
2406 /*
2407 * I/O is done. Cleanup the UPL state
2408 */
2409 upl = bp->b_upl;
2410
2411 if ( !ISSET(bp->b_flags, B_META) && UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
1c79356b 2412 kern_return_t kret;
1c79356b
A
2413 int upl_flags;
2414
6d2010ae 2415 if (upl == NULL) {
1c79356b 2416 if ( !ISSET(bp->b_flags, B_INVAL)) {
0b4e3aa0 2417 kret = ubc_create_upl(bp->b_vp,
91447636
A
2418 ubc_blktooff(bp->b_vp, bp->b_lblkno),
2419 bp->b_bufsize,
2420 &upl,
2421 NULL,
2422 UPL_PRECIOUS);
2423
1c79356b 2424 if (kret != KERN_SUCCESS)
91447636 2425 panic("brelse: Failed to create UPL");
b0d623f7
A
2426#if UPL_DEBUG
2427 upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 5);
91447636
A
2428#endif /* UPL_DEBUG */
2429 }
1c79356b 2430 } else {
91447636 2431 if (bp->b_datap) {
55e303ae
A
2432 kret = ubc_upl_unmap(upl);
2433
2434 if (kret != KERN_SUCCESS)
91447636
A
2435 panic("ubc_upl_unmap failed");
2436 bp->b_datap = (uintptr_t)NULL;
55e303ae 2437 }
1c79356b
A
2438 }
2439 if (upl) {
1c79356b 2440 if (bp->b_flags & (B_ERROR | B_INVAL)) {
91447636 2441 if (bp->b_flags & (B_READ | B_INVAL))
1c79356b
A
2442 upl_flags = UPL_ABORT_DUMP_PAGES;
2443 else
2444 upl_flags = 0;
91447636 2445
0b4e3aa0 2446 ubc_upl_abort(upl, upl_flags);
1c79356b 2447 } else {
91447636
A
2448 if (ISSET(bp->b_flags, B_DELWRI | B_WASDIRTY))
2449 upl_flags = UPL_COMMIT_SET_DIRTY ;
2450 else
2451 upl_flags = UPL_COMMIT_CLEAR_DIRTY ;
2452
0b4e3aa0 2453 ubc_upl_commit_range(upl, 0, bp->b_bufsize, upl_flags |
91447636 2454 UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
1c79356b 2455 }
91447636 2456 bp->b_upl = NULL;
1c79356b
A
2457 }
2458 } else {
91447636 2459 if ( (upl) )
2d21ac55 2460 panic("brelse: UPL set for non VREG; vp=%p", bp->b_vp);
1c79356b
A
2461 }
2462
1c79356b 2463 /*
91447636 2464 * If it's locked, don't report an error; try again later.
1c79356b 2465 */
1c79356b
A
2466 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
2467 CLR(bp->b_flags, B_ERROR);
91447636
A
2468 /*
2469 * If it's not cacheable, or an error, mark it invalid.
2470 */
1c79356b
A
2471 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
2472 SET(bp->b_flags, B_INVAL);
91447636 2473
b0d623f7
A
2474 if ((bp->b_bufsize <= 0) ||
2475 ISSET(bp->b_flags, B_INVAL) ||
2476 (ISSET(bp->b_lflags, BL_WANTDEALLOC) && !ISSET(bp->b_flags, B_DELWRI))) {
6d2010ae
A
2477
2478 boolean_t delayed_buf_free_meta_store = FALSE;
2479
1c79356b 2480 /*
2d21ac55
A
2481 * If it's invalid or empty, dissociate it from its vnode,
2482 * release its storage if B_META, and
2483 * clean it up a bit and put it on the EMPTY queue
1c79356b 2484 */
91447636 2485 if (ISSET(bp->b_flags, B_DELWRI))
b0d623f7 2486 OSAddAtomicLong(-1, &nbdwrite);
91447636 2487
2d21ac55 2488 if (ISSET(bp->b_flags, B_META)) {
6d2010ae
A
2489 if (bp->b_shadow_ref)
2490 delayed_buf_free_meta_store = TRUE;
2491 else
2492 buf_free_meta_store(bp);
2d21ac55 2493 }
91447636 2494 /*
2d21ac55 2495 * nuke any credentials we were holding
91447636 2496 */
6d2010ae
A
2497 buf_release_credentials(bp);
2498
2499 lck_mtx_lock_spin(buf_mtxp);
2500
2501 if (bp->b_shadow_ref) {
2502 SET(bp->b_lflags, BL_WAITSHADOW);
2503
2504 lck_mtx_unlock(buf_mtxp);
2505
2506 return;
2d21ac55 2507 }
6d2010ae 2508 if (delayed_buf_free_meta_store == TRUE) {
91447636 2509
6d2010ae
A
2510 lck_mtx_unlock(buf_mtxp);
2511finish_shadow_master:
2512 buf_free_meta_store(bp);
91447636 2513
6d2010ae
A
2514 lck_mtx_lock_spin(buf_mtxp);
2515 }
2516 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
2d21ac55
A
2517
2518 if (bp->b_vp)
2519 brelvp_locked(bp);
2520
2521 bremhash(bp);
2522 BLISTNONE(bp);
2523 binshash(bp, &invalhash);
2524
6d2010ae
A
2525 bp->b_whichq = BQ_EMPTY;
2526 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
1c79356b 2527 } else {
6d2010ae 2528
1c79356b
A
2529 /*
2530 * It has valid data. Put it on the end of the appropriate
2531 * queue, so that it'll stick around for as long as possible.
2532 */
2533 if (ISSET(bp->b_flags, B_LOCKED))
2534 whichq = BQ_LOCKED; /* locked in core */
2535 else if (ISSET(bp->b_flags, B_META))
2536 whichq = BQ_META; /* meta-data */
2537 else if (ISSET(bp->b_flags, B_AGE))
2538 whichq = BQ_AGE; /* stale but valid data */
2539 else
2540 whichq = BQ_LRU; /* valid data */
1c79356b 2541 bufq = &bufqueues[whichq];
91447636 2542
2d21ac55 2543 bp->b_timestamp = buf_timestamp();
91447636 2544
6d2010ae
A
2545 lck_mtx_lock_spin(buf_mtxp);
2546
2547 /*
2548 * the buf_brelse_shadow routine doesn't take 'ownership'
2549 * of the parent buf_t... it updates state that is protected by
2550 * the buf_mtxp, and checks for BL_BUSY to determine whether to
2551 * put the buf_t back on a free list. b_shadow_ref is protected
2552 * by the lock, and since we have not yet cleared B_BUSY, we need
2553 * to check it while holding the lock to insure that one of us
2554 * puts this buf_t back on a free list when it is safe to do so
2555 */
2556 if (bp->b_shadow_ref == 0) {
2557 CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE));
2558 bp->b_whichq = whichq;
2559 binstailfree(bp, bufq, whichq);
2560 } else {
2561 /*
2562 * there are still cloned buf_t's pointing
2563 * at this guy... need to keep it off the
2564 * freelists until a buf_brelse is done on
2565 * the last clone
2566 */
2567 CLR(bp->b_flags, (B_ASYNC | B_NOCACHE));
2568 }
1c79356b 2569 }
91447636
A
2570 if (needbuffer) {
2571 /*
2572 * needbuffer is a global
2573 * we're currently using buf_mtxp to protect it
2574 * delay doing the actual wakeup until after
2575 * we drop buf_mtxp
2576 */
2577 needbuffer = 0;
2578 need_wakeup = 1;
2579 }
2580 if (ISSET(bp->b_lflags, BL_WANTED)) {
2581 /*
2582 * delay the actual wakeup until after we
2583 * clear BL_BUSY and we've dropped buf_mtxp
2584 */
2585 need_bp_wakeup = 1;
2586 }
2587 /*
2588 * Unlock the buffer.
2589 */
2590 CLR(bp->b_lflags, (BL_BUSY | BL_WANTED));
b0d623f7 2591 buf_busycount--;
1c79356b 2592
91447636 2593 lck_mtx_unlock(buf_mtxp);
1c79356b 2594
91447636
A
2595 if (need_wakeup) {
2596 /*
2597 * Wake up any processes waiting for any buffer to become free.
2598 */
2599 wakeup(&needbuffer);
2600 }
2601 if (need_bp_wakeup) {
2602 /*
2603 * Wake up any proceeses waiting for _this_ buffer to become free.
2604 */
2605 wakeup(bp);
2606 }
1c79356b 2607 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_END,
b0d623f7 2608 bp, bp->b_datap, bp->b_flags, 0, 0);
1c79356b
A
2609}
2610
2611/*
2612 * Determine if a block is in the cache.
2613 * Just look on what would be its hash chain. If it's there, return
2614 * a pointer to it, unless it's marked invalid. If it's marked invalid,
2615 * we normally don't return the buffer, unless the caller explicitly
2616 * wants us to.
2617 */
91447636
A
2618static boolean_t
2619incore(vnode_t vp, daddr64_t blkno)
2620{
2621 boolean_t retval;
2d21ac55 2622 struct bufhashhdr *dp;
91447636 2623
2d21ac55 2624 dp = BUFHASH(vp, blkno);
91447636 2625
2d21ac55
A
2626 lck_mtx_lock_spin(buf_mtxp);
2627
2628 if (incore_locked(vp, blkno, dp))
91447636
A
2629 retval = TRUE;
2630 else
2631 retval = FALSE;
2632 lck_mtx_unlock(buf_mtxp);
2633
2634 return (retval);
2635}
2636
2637
2638static buf_t
2d21ac55 2639incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp)
1c79356b
A
2640{
2641 struct buf *bp;
1c79356b 2642
1c79356b 2643 /* Search hash chain */
2d21ac55 2644 for (bp = dp->lh_first; bp != NULL; bp = bp->b_hash.le_next) {
1c79356b 2645 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
91447636 2646 !ISSET(bp->b_flags, B_INVAL)) {
1c79356b 2647 return (bp);
91447636 2648 }
1c79356b 2649 }
2d21ac55 2650 return (NULL);
1c79356b
A
2651}
2652
fa4905b1
A
2653
2654/* XXX FIXME -- Update the comment to reflect the UBC changes (please) -- */
1c79356b
A
2655/*
2656 * Get a block of requested size that is associated with
2657 * a given vnode and block offset. If it is found in the
2658 * block cache, mark it as having been found, make it busy
2659 * and return it. Otherwise, return an empty block of the
2660 * correct size. It is up to the caller to insure that the
2661 * cached blocks be of the correct size.
2662 */
91447636
A
2663buf_t
2664buf_getblk(vnode_t vp, daddr64_t blkno, int size, int slpflag, int slptimeo, int operation)
1c79356b 2665{
91447636
A
2666 buf_t bp;
2667 int err;
1c79356b
A
2668 upl_t upl;
2669 upl_page_info_t *pl;
1c79356b 2670 kern_return_t kret;
91447636
A
2671 int ret_only_valid;
2672 struct timespec ts;
2673 int upl_flags;
2d21ac55 2674 struct bufhashhdr *dp;
1c79356b 2675
1c79356b 2676 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_START,
b0d623f7 2677 (uintptr_t)(blkno * PAGE_SIZE), size, operation, 0, 0);
1c79356b 2678
91447636
A
2679 ret_only_valid = operation & BLK_ONLYVALID;
2680 operation &= ~BLK_ONLYVALID;
2d21ac55 2681 dp = BUFHASH(vp, blkno);
91447636 2682start:
2d21ac55 2683 lck_mtx_lock_spin(buf_mtxp);
b0d623f7 2684
2d21ac55 2685 if ((bp = incore_locked(vp, blkno, dp))) {
91447636
A
2686 /*
2687 * Found in the Buffer Cache
2688 */
2689 if (ISSET(bp->b_lflags, BL_BUSY)) {
2690 /*
2691 * but is busy
2692 */
1c79356b
A
2693 switch (operation) {
2694 case BLK_READ:
2695 case BLK_WRITE:
2696 case BLK_META:
91447636 2697 SET(bp->b_lflags, BL_WANTED);
1c79356b 2698 bufstats.bufs_busyincore++;
91447636
A
2699
2700 /*
2701 * don't retake the mutex after being awakened...
2702 * the time out is in msecs
2703 */
2704 ts.tv_sec = (slptimeo/1000);
2705 ts.tv_nsec = (slptimeo % 1000) * 10 * NSEC_PER_USEC * 1000;
2706
b0d623f7
A
2707 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 396)) | DBG_FUNC_NONE,
2708 (uintptr_t)blkno, size, operation, 0, 0);
2709
91447636
A
2710 err = msleep(bp, buf_mtxp, slpflag | PDROP | (PRIBIO + 1), "buf_getblk", &ts);
2711
1c79356b
A
2712 /*
2713 * Callers who call with PCATCH or timeout are
2714 * willing to deal with the NULL pointer
2715 */
91447636 2716 if (err && ((slpflag & PCATCH) || ((err == EWOULDBLOCK) && slptimeo)))
1c79356b
A
2717 return (NULL);
2718 goto start;
2719 /*NOTREACHED*/
2720 break;
2721
1c79356b 2722 default:
91447636
A
2723 /*
2724 * unknown operation requested
2725 */
2726 panic("getblk: paging or unknown operation for incore busy buffer - %x\n", operation);
1c79356b
A
2727 /*NOTREACHED*/
2728 break;
2729 }
2730 } else {
91447636
A
2731 /*
2732 * buffer in core and not busy
2733 */
91447636
A
2734 SET(bp->b_lflags, BL_BUSY);
2735 SET(bp->b_flags, B_CACHE);
b0d623f7 2736 buf_busycount++;
2d21ac55 2737
91447636 2738 bremfree_locked(bp);
1c79356b 2739 bufstats.bufs_incore++;
91447636
A
2740
2741 lck_mtx_unlock(buf_mtxp);
2d21ac55
A
2742#ifdef JOE_DEBUG
2743 bp->b_owner = current_thread();
2744 bp->b_tag = 1;
2745#endif
2746 if ( (bp->b_upl) )
2747 panic("buffer has UPL, but not marked BUSY: %p", bp);
1c79356b 2748
2d21ac55 2749 if ( !ret_only_valid && bp->b_bufsize != size)
91447636 2750 allocbuf(bp, size);
1c79356b 2751
91447636 2752 upl_flags = 0;
1c79356b 2753 switch (operation) {
1c79356b 2754 case BLK_WRITE:
91447636
A
2755 /*
2756 * "write" operation: let the UPL subsystem
2757 * know that we intend to modify the buffer
2758 * cache pages we're gathering.
2759 */
2760 upl_flags |= UPL_WILL_MODIFY;
2761 case BLK_READ:
2762 upl_flags |= UPL_PRECIOUS;
2763 if (UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
0b4e3aa0 2764 kret = ubc_create_upl(vp,
91447636
A
2765 ubc_blktooff(vp, bp->b_lblkno),
2766 bp->b_bufsize,
2767 &upl,
2768 &pl,
2769 upl_flags);
1c79356b 2770 if (kret != KERN_SUCCESS)
91447636 2771 panic("Failed to create UPL");
1c79356b 2772
91447636 2773 bp->b_upl = upl;
1c79356b 2774
91447636
A
2775 if (upl_valid_page(pl, 0)) {
2776 if (upl_dirty_page(pl, 0))
2777 SET(bp->b_flags, B_WASDIRTY);
2778 else
2779 CLR(bp->b_flags, B_WASDIRTY);
2780 } else
2781 CLR(bp->b_flags, (B_DONE | B_CACHE | B_WASDIRTY | B_DELWRI));
1c79356b 2782
b0d623f7 2783 kret = ubc_upl_map(upl, (vm_offset_t*)&(bp->b_datap));
1c79356b 2784
9bccf70c 2785 if (kret != KERN_SUCCESS)
91447636 2786 panic("getblk: ubc_upl_map() failed with (%d)", kret);
1c79356b
A
2787 }
2788 break;
2789
2790 case BLK_META:
2791 /*
2792 * VM is not involved in IO for the meta data
2793 * buffer already has valid data
2794 */
1c79356b
A
2795 break;
2796
2797 default:
91447636 2798 panic("getblk: paging or unknown operation for incore buffer- %d\n", operation);
1c79356b
A
2799 /*NOTREACHED*/
2800 break;
2801 }
2802 }
2803 } else { /* not incore() */
2804 int queue = BQ_EMPTY; /* Start with no preference */
1c79356b 2805
91447636
A
2806 if (ret_only_valid) {
2807 lck_mtx_unlock(buf_mtxp);
2808 return (NULL);
1c79356b 2809 }
2d21ac55 2810 if ((vnode_isreg(vp) == 0) || (UBCINFOEXISTS(vp) == 0) /*|| (vnode_issystem(vp) == 1)*/)
91447636
A
2811 operation = BLK_META;
2812
1c79356b 2813 if ((bp = getnewbuf(slpflag, slptimeo, &queue)) == NULL)
b0d623f7 2814 goto start;
91447636
A
2815
2816 /*
2817 * getnewbuf may block for a number of different reasons...
2818 * if it does, it's then possible for someone else to
2819 * create a buffer for the same block and insert it into
2820 * the hash... if we see it incore at this point we dump
2821 * the buffer we were working on and start over
2822 */
2d21ac55 2823 if (incore_locked(vp, blkno, dp)) {
0b4e3aa0
A
2824 SET(bp->b_flags, B_INVAL);
2825 binshash(bp, &invalhash);
91447636
A
2826
2827 lck_mtx_unlock(buf_mtxp);
2828
2829 buf_brelse(bp);
0b4e3aa0
A
2830 goto start;
2831 }
b4c24cb9
A
2832 /*
2833 * NOTE: YOU CAN NOT BLOCK UNTIL binshash() HAS BEEN
2834 * CALLED! BE CAREFUL.
2835 */
0b4e3aa0 2836
1c79356b 2837 /*
91447636 2838 * mark the buffer as B_META if indicated
1c79356b 2839 * so that when buffer is released it will goto META queue
1c79356b 2840 */
91447636
A
2841 if (operation == BLK_META)
2842 SET(bp->b_flags, B_META);
9bccf70c
A
2843
2844 bp->b_blkno = bp->b_lblkno = blkno;
2845 bp->b_vp = vp;
2846
0b4e3aa0
A
2847 /*
2848 * Insert in the hash so that incore() can find it
2849 */
2850 binshash(bp, BUFHASH(vp, blkno));
2851
2d21ac55 2852 bgetvp_locked(vp, bp);
91447636 2853
2d21ac55 2854 lck_mtx_unlock(buf_mtxp);
9bccf70c 2855
1c79356b
A
2856 allocbuf(bp, size);
2857
91447636 2858 upl_flags = 0;
1c79356b
A
2859 switch (operation) {
2860 case BLK_META:
91447636
A
2861 /*
2862 * buffer data is invalid...
2863 *
2864 * I don't want to have to retake buf_mtxp,
2865 * so the miss and vmhits counters are done
2866 * with Atomic updates... all other counters
2867 * in bufstats are protected with either
2868 * buf_mtxp or iobuffer_mtxp
2869 */
b0d623f7 2870 OSAddAtomicLong(1, &bufstats.bufs_miss);
1c79356b
A
2871 break;
2872
1c79356b 2873 case BLK_WRITE:
91447636
A
2874 /*
2875 * "write" operation: let the UPL subsystem know
2876 * that we intend to modify the buffer cache pages
2877 * we're gathering.
2878 */
2879 upl_flags |= UPL_WILL_MODIFY;
2880 case BLK_READ:
2881 { off_t f_offset;
2882 size_t contig_bytes;
2883 int bmap_flags;
1c79356b 2884
91447636 2885 if ( (bp->b_upl) )
2d21ac55 2886 panic("bp already has UPL: %p",bp);
1c79356b 2887
91447636
A
2888 f_offset = ubc_blktooff(vp, blkno);
2889
2890 upl_flags |= UPL_PRECIOUS;
0b4e3aa0 2891 kret = ubc_create_upl(vp,
91447636
A
2892 f_offset,
2893 bp->b_bufsize,
2894 &upl,
2895 &pl,
2896 upl_flags);
1c79356b 2897
91447636
A
2898 if (kret != KERN_SUCCESS)
2899 panic("Failed to create UPL");
b0d623f7
A
2900#if UPL_DEBUG
2901 upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 4);
91447636
A
2902#endif /* UPL_DEBUG */
2903 bp->b_upl = upl;
1c79356b
A
2904
2905 if (upl_valid_page(pl, 0)) {
1c79356b 2906
91447636
A
2907 if (operation == BLK_READ)
2908 bmap_flags = VNODE_READ;
2909 else
2910 bmap_flags = VNODE_WRITE;
1c79356b 2911
91447636 2912 SET(bp->b_flags, B_CACHE | B_DONE);
1c79356b 2913
b0d623f7 2914 OSAddAtomicLong(1, &bufstats.bufs_vmhits);
1c79356b 2915
91447636
A
2916 bp->b_validoff = 0;
2917 bp->b_dirtyoff = 0;
1c79356b 2918
91447636
A
2919 if (upl_dirty_page(pl, 0)) {
2920 /* page is dirty */
2921 SET(bp->b_flags, B_WASDIRTY);
1c79356b 2922
91447636
A
2923 bp->b_validend = bp->b_bcount;
2924 bp->b_dirtyend = bp->b_bcount;
1c79356b 2925 } else {
91447636
A
2926 /* page is clean */
2927 bp->b_validend = bp->b_bcount;
2928 bp->b_dirtyend = 0;
1c79356b 2929 }
91447636
A
2930 /*
2931 * try to recreate the physical block number associated with
2932 * this buffer...
2933 */
2934 if (VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))
2935 panic("getblk: VNOP_BLOCKMAP failed");
2936 /*
2937 * if the extent represented by this buffer
2938 * is not completely physically contiguous on
2939 * disk, than we can't cache the physical mapping
2940 * in the buffer header
2941 */
2942 if ((long)contig_bytes < bp->b_bcount)
2943 bp->b_blkno = bp->b_lblkno;
1c79356b 2944 } else {
b0d623f7 2945 OSAddAtomicLong(1, &bufstats.bufs_miss);
1c79356b 2946 }
b0d623f7 2947 kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
1c79356b 2948
91447636
A
2949 if (kret != KERN_SUCCESS)
2950 panic("getblk: ubc_upl_map() failed with (%d)", kret);
1c79356b 2951 break;
91447636 2952 }
1c79356b 2953 default:
91447636 2954 panic("getblk: paging or unknown operation - %x", operation);
1c79356b
A
2955 /*NOTREACHED*/
2956 break;
2957 }
2958 }
1c79356b 2959 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_END,
b0d623f7 2960 bp, bp->b_datap, bp->b_flags, 3, 0);
91447636
A
2961
2962#ifdef JOE_DEBUG
b0d623f7 2963 (void) OSBacktrace(&bp->b_stackgetblk[0], 6);
91447636 2964#endif
1c79356b
A
2965 return (bp);
2966}
2967
2968/*
2969 * Get an empty, disassociated buffer of given size.
2970 */
91447636 2971buf_t
2d21ac55 2972buf_geteblk(int size)
1c79356b 2973{
b0d623f7 2974 buf_t bp = NULL;
91447636
A
2975 int queue = BQ_EMPTY;
2976
b0d623f7
A
2977 do {
2978 lck_mtx_lock_spin(buf_mtxp);
2979
2980 bp = getnewbuf(0, 0, &queue);
2981 } while (bp == NULL);
1c79356b 2982
1c79356b 2983 SET(bp->b_flags, (B_META|B_INVAL));
1c79356b
A
2984
2985#if DIAGNOSTIC
2986 assert(queue == BQ_EMPTY);
2987#endif /* DIAGNOSTIC */
2988 /* XXX need to implement logic to deal with other queues */
2989
1c79356b 2990 binshash(bp, &invalhash);
1c79356b
A
2991 bufstats.bufs_eblk++;
2992
91447636
A
2993 lck_mtx_unlock(buf_mtxp);
2994
2995 allocbuf(bp, size);
2996
1c79356b
A
2997 return (bp);
2998}
2999
6d2010ae
A
3000uint32_t
3001buf_redundancy_flags(buf_t bp)
3002{
3003 return bp->b_redundancy_flags;
3004}
3005
3006void
3007buf_set_redundancy_flags(buf_t bp, uint32_t flags)
3008{
3009 SET(bp->b_redundancy_flags, flags);
3010}
3011
3012void
3013buf_clear_redundancy_flags(buf_t bp, uint32_t flags)
3014{
3015 CLR(bp->b_redundancy_flags, flags);
3016}
1c79356b
A
3017
3018/*
3019 * With UBC, there is no need to expand / shrink the file data
3020 * buffer. The VM uses the same pages, hence no waste.
3021 * All the file data buffers can have one size.
3022 * In fact expand / shrink would be an expensive operation.
3023 *
3024 * Only exception to this is meta-data buffers. Most of the
3025 * meta data operations are smaller than PAGE_SIZE. Having the
3026 * meta-data buffers grow and shrink as needed, optimizes use
3027 * of the kernel wired memory.
3028 */
3029
3030int
91447636 3031allocbuf(buf_t bp, int size)
1c79356b
A
3032{
3033 vm_size_t desired_size;
3034
3035 desired_size = roundup(size, CLBYTES);
3036
91447636 3037 if (desired_size < PAGE_SIZE)
1c79356b
A
3038 desired_size = PAGE_SIZE;
3039 if (desired_size > MAXBSIZE)
3040 panic("allocbuf: buffer larger than MAXBSIZE requested");
3041
1c79356b 3042 if (ISSET(bp->b_flags, B_META)) {
1c79356b 3043 zone_t zprev, z;
91447636
A
3044 int nsize = roundup(size, MINMETA);
3045
3046 if (bp->b_datap) {
3047 vm_offset_t elem = (vm_offset_t)bp->b_datap;
3048
3049 if (ISSET(bp->b_flags, B_ZALLOC)) {
3050 if (bp->b_bufsize < nsize) {
3051 /* reallocate to a bigger size */
3052
3053 zprev = getbufzone(bp->b_bufsize);
3054 if (nsize <= MAXMETA) {
3055 desired_size = nsize;
3056 z = getbufzone(nsize);
2d21ac55
A
3057 /* b_datap not really a ptr */
3058 *(void **)(&bp->b_datap) = zalloc(z);
1c79356b 3059 } else {
91447636 3060 bp->b_datap = (uintptr_t)NULL;
b0d623f7 3061 kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
91447636 3062 CLR(bp->b_flags, B_ZALLOC);
1c79356b 3063 }
91447636
A
3064 bcopy((void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize);
3065 zfree(zprev, (void *)elem);
3066 } else {
3067 desired_size = bp->b_bufsize;
3068 }
3069
3070 } else {
3071 if ((vm_size_t)bp->b_bufsize < desired_size) {
1c79356b 3072 /* reallocate to a bigger size */
91447636 3073 bp->b_datap = (uintptr_t)NULL;
b0d623f7 3074 kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
91447636 3075 bcopy((const void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize);
1c79356b
A
3076 kmem_free(kernel_map, elem, bp->b_bufsize);
3077 } else {
3078 desired_size = bp->b_bufsize;
3079 }
91447636 3080 }
1c79356b
A
3081 } else {
3082 /* new allocation */
3083 if (nsize <= MAXMETA) {
3084 desired_size = nsize;
3085 z = getbufzone(nsize);
2d21ac55
A
3086 /* b_datap not really a ptr */
3087 *(void **)(&bp->b_datap) = zalloc(z);
1c79356b 3088 SET(bp->b_flags, B_ZALLOC);
91447636 3089 } else
b0d623f7 3090 kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
1c79356b 3091 }
2d21ac55
A
3092
3093 if (bp->b_datap == 0)
3094 panic("allocbuf: NULL b_datap");
1c79356b 3095 }
9bccf70c
A
3096 bp->b_bufsize = desired_size;
3097 bp->b_bcount = size;
91447636 3098
9bccf70c 3099 return (0);
1c79356b
A
3100}
3101
3102/*
3103 * Get a new buffer from one of the free lists.
3104 *
3105 * Request for a queue is passes in. The queue from which the buffer was taken
3106 * from is returned. Out of range queue requests get BQ_EMPTY. Request for
3107 * BQUEUE means no preference. Use heuristics in that case.
3108 * Heuristics is as follows:
3109 * Try BQ_AGE, BQ_LRU, BQ_EMPTY, BQ_META in that order.
3110 * If none available block till one is made available.
3111 * If buffers available on both BQ_AGE and BQ_LRU, check the timestamps.
3112 * Pick the most stale buffer.
3113 * If found buffer was marked delayed write, start the async. write
3114 * and restart the search.
3115 * Initialize the fields and disassociate the buffer from the vnode.
3116 * Remove the buffer from the hash. Return the buffer and the queue
3117 * on which it was found.
91447636
A
3118 *
3119 * buf_mtxp is held upon entry
b0d623f7
A
3120 * returns with buf_mtxp locked if new buf available
3121 * returns with buf_mtxp UNlocked if new buf NOT available
1c79356b
A
3122 */
3123
91447636
A
3124static buf_t
3125getnewbuf(int slpflag, int slptimeo, int * queue)
1c79356b 3126{
91447636
A
3127 buf_t bp;
3128 buf_t lru_bp;
3129 buf_t age_bp;
3130 buf_t meta_bp;
3131 int age_time, lru_time, bp_time, meta_time;
3132 int req = *queue; /* save it for restarts */
3133 struct timespec ts;
1c79356b
A
3134
3135start:
91447636
A
3136 /*
3137 * invalid request gets empty queue
3138 */
2d21ac55 3139 if ((*queue >= BQUEUES) || (*queue < 0)
765c9de3 3140 || (*queue == BQ_LAUNDRY) || (*queue == BQ_LOCKED))
1c79356b 3141 *queue = BQ_EMPTY;
2d21ac55
A
3142
3143
3144 if (*queue == BQ_EMPTY && (bp = bufqueues[*queue].tqh_first))
3145 goto found;
3146
3147 /*
3148 * need to grow number of bufs, add another one rather than recycling
3149 */
3150 if (nbuf_headers < max_nbuf_headers) {
0c530ab8
A
3151 /*
3152 * Increment count now as lock
3153 * is dropped for allocation.
3154 * That avoids over commits
3155 */
2d21ac55 3156 nbuf_headers++;
0c530ab8
A
3157 goto add_newbufs;
3158 }
2d21ac55
A
3159 /* Try for the requested queue first */
3160 bp = bufqueues[*queue].tqh_first;
3161 if (bp)
3162 goto found;
1c79356b
A
3163
3164 /* Unable to use requested queue */
3165 age_bp = bufqueues[BQ_AGE].tqh_first;
3166 lru_bp = bufqueues[BQ_LRU].tqh_first;
3167 meta_bp = bufqueues[BQ_META].tqh_first;
3168
9bccf70c
A
3169 if (!age_bp && !lru_bp && !meta_bp) {
3170 /*
3171 * Unavailble on AGE or LRU or META queues
3172 * Try the empty list first
3173 */
1c79356b
A
3174 bp = bufqueues[BQ_EMPTY].tqh_first;
3175 if (bp) {
3176 *queue = BQ_EMPTY;
3177 goto found;
3178 }
0c530ab8
A
3179 /*
3180 * We have seen is this is hard to trigger.
3181 * This is an overcommit of nbufs but needed
3182 * in some scenarios with diskiamges
3183 */
3184
3185add_newbufs:
91447636 3186 lck_mtx_unlock(buf_mtxp);
765c9de3 3187
91447636 3188 /* Create a new temporary buffer header */
765c9de3 3189 bp = (struct buf *)zalloc(buf_hdr_zone);
2d21ac55 3190
765c9de3
A
3191 if (bp) {
3192 bufhdrinit(bp);
2d21ac55
A
3193 bp->b_whichq = BQ_EMPTY;
3194 bp->b_timestamp = buf_timestamp();
765c9de3 3195 BLISTNONE(bp);
765c9de3
A
3196 SET(bp->b_flags, B_HDRALLOC);
3197 *queue = BQ_EMPTY;
2d21ac55 3198 }
b0d623f7 3199 lck_mtx_lock_spin(buf_mtxp);
2d21ac55
A
3200
3201 if (bp) {
3202 binshash(bp, &invalhash);
765c9de3
A
3203 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
3204 buf_hdr_count++;
3205 goto found;
3206 }
0c530ab8 3207 /* subtract already accounted bufcount */
2d21ac55 3208 nbuf_headers--;
0c530ab8 3209
91447636 3210 bufstats.bufs_sleeps++;
765c9de3 3211
1c79356b
A
3212 /* wait for a free buffer of any kind */
3213 needbuffer = 1;
91447636
A
3214 /* hz value is 100 */
3215 ts.tv_sec = (slptimeo/1000);
3216 /* the hz value is 100; which leads to 10ms */
3217 ts.tv_nsec = (slptimeo % 1000) * NSEC_PER_USEC * 1000 * 10;
b0d623f7
A
3218
3219 msleep(&needbuffer, buf_mtxp, slpflag | PDROP | (PRIBIO+1), "getnewbuf", &ts);
2d21ac55 3220 return (NULL);
1c79356b
A
3221 }
3222
3223 /* Buffer available either on AGE or LRU or META */
3224 bp = NULL;
3225 *queue = -1;
3226
3227 /* Buffer available either on AGE or LRU */
3228 if (!age_bp) {
3229 bp = lru_bp;
3230 *queue = BQ_LRU;
3231 } else if (!lru_bp) {
3232 bp = age_bp;
3233 *queue = BQ_AGE;
3234 } else { /* buffer available on both AGE and LRU */
91447636
A
3235 int t = buf_timestamp();
3236
3237 age_time = t - age_bp->b_timestamp;
3238 lru_time = t - lru_bp->b_timestamp;
1c79356b
A
3239 if ((age_time < 0) || (lru_time < 0)) { /* time set backwards */
3240 bp = age_bp;
3241 *queue = BQ_AGE;
3242 /*
3243 * we should probably re-timestamp eveything in the
3244 * queues at this point with the current time
3245 */
3246 } else {
3247 if ((lru_time >= lru_is_stale) && (age_time < age_is_stale)) {
3248 bp = lru_bp;
3249 *queue = BQ_LRU;
3250 } else {
3251 bp = age_bp;
3252 *queue = BQ_AGE;
3253 }
3254 }
3255 }
3256
3257 if (!bp) { /* Neither on AGE nor on LRU */
3258 bp = meta_bp;
3259 *queue = BQ_META;
3260 } else if (meta_bp) {
91447636
A
3261 int t = buf_timestamp();
3262
3263 bp_time = t - bp->b_timestamp;
3264 meta_time = t - meta_bp->b_timestamp;
1c79356b
A
3265
3266 if (!(bp_time < 0) && !(meta_time < 0)) {
3267 /* time not set backwards */
3268 int bp_is_stale;
3269 bp_is_stale = (*queue == BQ_LRU) ?
3270 lru_is_stale : age_is_stale;
3271
3272 if ((meta_time >= meta_is_stale) &&
3273 (bp_time < bp_is_stale)) {
3274 bp = meta_bp;
3275 *queue = BQ_META;
3276 }
3277 }
3278 }
1c79356b 3279found:
91447636 3280 if (ISSET(bp->b_flags, B_LOCKED) || ISSET(bp->b_lflags, BL_BUSY))
b0d623f7 3281 panic("getnewbuf: bp @ %p is LOCKED or BUSY! (flags 0x%x)\n", bp, bp->b_flags);
1c79356b
A
3282
3283 /* Clean it */
b0d623f7 3284 if (bcleanbuf(bp, FALSE)) {
91447636
A
3285 /*
3286 * moved to the laundry thread, buffer not ready
3287 */
1c79356b
A
3288 *queue = req;
3289 goto start;
3290 }
1c79356b
A
3291 return (bp);
3292}
9bccf70c 3293
1c79356b
A
3294
3295/*
3296 * Clean a buffer.
6d2010ae 3297 * Returns 0 if buffer is ready to use,
91447636 3298 * Returns 1 if issued a buf_bawrite() to indicate
1c79356b 3299 * that the buffer is not ready.
91447636
A
3300 *
3301 * buf_mtxp is held upon entry
3302 * returns with buf_mtxp locked
1c79356b 3303 */
6d2010ae 3304int
b0d623f7 3305bcleanbuf(buf_t bp, boolean_t discard)
1c79356b 3306{
1c79356b 3307 /* Remove from the queue */
91447636 3308 bremfree_locked(bp);
1c79356b 3309
91447636
A
3310#ifdef JOE_DEBUG
3311 bp->b_owner = current_thread();
3312 bp->b_tag = 2;
3313#endif
765c9de3
A
3314 /*
3315 * If buffer was a delayed write, start the IO by queuing
3316 * it on the LAUNDRY queue, and return 1
3317 */
1c79356b 3318 if (ISSET(bp->b_flags, B_DELWRI)) {
b0d623f7
A
3319 if (discard) {
3320 SET(bp->b_lflags, BL_WANTDEALLOC);
3321 }
3322
6d2010ae 3323 bmovelaundry(bp);
91447636
A
3324
3325 lck_mtx_unlock(buf_mtxp);
3326
2d21ac55
A
3327 wakeup(&bufqueues[BQ_LAUNDRY]);
3328 /*
3329 * and give it a chance to run
3330 */
9bccf70c 3331 (void)thread_block(THREAD_CONTINUE_NULL);
91447636 3332
b0d623f7 3333 lck_mtx_lock_spin(buf_mtxp);
2d21ac55 3334
1c79356b
A
3335 return (1);
3336 }
2d21ac55
A
3337#ifdef JOE_DEBUG
3338 bp->b_owner = current_thread();
3339 bp->b_tag = 8;
3340#endif
3341 /*
3342 * Buffer is no longer on any free list... we own it
3343 */
3344 SET(bp->b_lflags, BL_BUSY);
b0d623f7
A
3345 buf_busycount++;
3346
2d21ac55 3347 bremhash(bp);
91447636 3348
91447636
A
3349 /*
3350 * disassociate us from our vnode, if we had one...
3351 */
3352 if (bp->b_vp)
2d21ac55
A
3353 brelvp_locked(bp);
3354
3355 lck_mtx_unlock(buf_mtxp);
3356
3357 BLISTNONE(bp);
91447636 3358
6d2010ae
A
3359 if (ISSET(bp->b_flags, B_META))
3360 buf_free_meta_store(bp);
91447636
A
3361
3362 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
3363
6d2010ae 3364 buf_release_credentials(bp);
91447636 3365
b0d623f7
A
3366 /* If discarding, just move to the empty queue */
3367 if (discard) {
3368 lck_mtx_lock_spin(buf_mtxp);
3369 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
3370 bp->b_whichq = BQ_EMPTY;
3371 binshash(bp, &invalhash);
3372 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
3373 CLR(bp->b_lflags, BL_BUSY);
3374 buf_busycount--;
3375 } else {
3376 /* Not discarding: clean up and prepare for reuse */
3377 bp->b_bufsize = 0;
3378 bp->b_datap = (uintptr_t)NULL;
3379 bp->b_upl = (void *)NULL;
3380 /*
3381 * preserve the state of whether this buffer
3382 * was allocated on the fly or not...
3383 * the only other flag that should be set at
3384 * this point is BL_BUSY...
3385 */
3386#ifdef JOE_DEBUG
3387 bp->b_owner = current_thread();
3388 bp->b_tag = 3;
3389#endif
3390 bp->b_lflags = BL_BUSY;
3391 bp->b_flags = (bp->b_flags & B_HDRALLOC);
3392 bp->b_dev = NODEV;
3393 bp->b_blkno = bp->b_lblkno = 0;
3394 bp->b_iodone = NULL;
3395 bp->b_error = 0;
3396 bp->b_resid = 0;
3397 bp->b_bcount = 0;
3398 bp->b_dirtyoff = bp->b_dirtyend = 0;
3399 bp->b_validoff = bp->b_validend = 0;
d1ecb069
A
3400#ifdef CONFIG_PROTECT
3401 bp->b_cpentry = 0;
3402#endif
b0d623f7
A
3403
3404 lck_mtx_lock_spin(buf_mtxp);
3405 }
91447636
A
3406 return (0);
3407}
3408
3409
3410
3411errno_t
3412buf_invalblkno(vnode_t vp, daddr64_t lblkno, int flags)
3413{
3414 buf_t bp;
3415 errno_t error;
2d21ac55
A
3416 struct bufhashhdr *dp;
3417
3418 dp = BUFHASH(vp, lblkno);
91447636 3419
91447636 3420relook:
b0d623f7
A
3421 lck_mtx_lock_spin(buf_mtxp);
3422
2d21ac55 3423 if ((bp = incore_locked(vp, lblkno, dp)) == (struct buf *)0) {
91447636
A
3424 lck_mtx_unlock(buf_mtxp);
3425 return (0);
3426 }
3427 if (ISSET(bp->b_lflags, BL_BUSY)) {
3428 if ( !ISSET(flags, BUF_WAIT)) {
3429 lck_mtx_unlock(buf_mtxp);
3430 return (EBUSY);
3431 }
3432 SET(bp->b_lflags, BL_WANTED);
3433
b0d623f7 3434 error = msleep((caddr_t)bp, buf_mtxp, PDROP | (PRIBIO + 1), "buf_invalblkno", NULL);
91447636 3435
2d21ac55 3436 if (error) {
91447636 3437 return (error);
2d21ac55 3438 }
91447636
A
3439 goto relook;
3440 }
3441 bremfree_locked(bp);
3442 SET(bp->b_lflags, BL_BUSY);
3443 SET(bp->b_flags, B_INVAL);
b0d623f7 3444 buf_busycount++;
91447636
A
3445#ifdef JOE_DEBUG
3446 bp->b_owner = current_thread();
3447 bp->b_tag = 4;
3448#endif
3449 lck_mtx_unlock(buf_mtxp);
3450 buf_brelse(bp);
3451
3452 return (0);
3453}
3454
3455
3456void
3457buf_drop(buf_t bp)
3458{
3459 int need_wakeup = 0;
3460
2d21ac55 3461 lck_mtx_lock_spin(buf_mtxp);
91447636
A
3462
3463 if (ISSET(bp->b_lflags, BL_WANTED)) {
3464 /*
3465 * delay the actual wakeup until after we
3466 * clear BL_BUSY and we've dropped buf_mtxp
3467 */
3468 need_wakeup = 1;
3469 }
2d21ac55
A
3470#ifdef JOE_DEBUG
3471 bp->b_owner = current_thread();
3472 bp->b_tag = 9;
3473#endif
91447636
A
3474 /*
3475 * Unlock the buffer.
3476 */
3477 CLR(bp->b_lflags, (BL_BUSY | BL_WANTED));
b0d623f7 3478 buf_busycount--;
1c79356b 3479
91447636 3480 lck_mtx_unlock(buf_mtxp);
1c79356b 3481
91447636
A
3482 if (need_wakeup) {
3483 /*
3484 * Wake up any proceeses waiting for _this_ buffer to become free.
3485 */
3486 wakeup(bp);
3487 }
3488}
1c79356b 3489
1c79356b 3490
91447636
A
3491errno_t
3492buf_acquire(buf_t bp, int flags, int slpflag, int slptimeo) {
3493 errno_t error;
1c79356b 3494
b0d623f7 3495 lck_mtx_lock_spin(buf_mtxp);
1c79356b 3496
91447636 3497 error = buf_acquire_locked(bp, flags, slpflag, slptimeo);
1c79356b 3498
91447636 3499 lck_mtx_unlock(buf_mtxp);
1c79356b 3500
91447636
A
3501 return (error);
3502}
1c79356b 3503
91447636
A
3504
3505static errno_t
3506buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo)
3507{
3508 errno_t error;
3509 struct timespec ts;
3510
3511 if (ISSET(bp->b_flags, B_LOCKED)) {
3512 if ((flags & BAC_SKIP_LOCKED))
3513 return (EDEADLK);
3514 } else {
3515 if ((flags & BAC_SKIP_NONLOCKED))
3516 return (EDEADLK);
1c79356b 3517 }
91447636
A
3518 if (ISSET(bp->b_lflags, BL_BUSY)) {
3519 /*
b0d623f7 3520 * since the lck_mtx_lock may block, the buffer
91447636
A
3521 * may become BUSY, so we need to
3522 * recheck for a NOWAIT request
3523 */
3524 if (flags & BAC_NOWAIT)
3525 return (EBUSY);
3526 SET(bp->b_lflags, BL_WANTED);
3527
3528 /* the hz value is 100; which leads to 10ms */
3529 ts.tv_sec = (slptimeo/100);
3530 ts.tv_nsec = (slptimeo % 100) * 10 * NSEC_PER_USEC * 1000;
2d21ac55 3531 error = msleep((caddr_t)bp, buf_mtxp, slpflag | (PRIBIO + 1), "buf_acquire", &ts);
91447636
A
3532
3533 if (error)
3534 return (error);
3535 return (EAGAIN);
1c79356b 3536 }
91447636
A
3537 if (flags & BAC_REMOVE)
3538 bremfree_locked(bp);
3539 SET(bp->b_lflags, BL_BUSY);
b0d623f7
A
3540 buf_busycount++;
3541
91447636
A
3542#ifdef JOE_DEBUG
3543 bp->b_owner = current_thread();
3544 bp->b_tag = 5;
3545#endif
1c79356b
A
3546 return (0);
3547}
3548
3549
3550/*
3551 * Wait for operations on the buffer to complete.
3552 * When they do, extract and return the I/O's error value.
3553 */
91447636
A
3554errno_t
3555buf_biowait(buf_t bp)
1c79356b 3556{
b0d623f7 3557 while (!ISSET(bp->b_flags, B_DONE)) {
1c79356b 3558
b0d623f7 3559 lck_mtx_lock_spin(buf_mtxp);
91447636 3560
b0d623f7
A
3561 if (!ISSET(bp->b_flags, B_DONE)) {
3562 DTRACE_IO1(wait__start, buf_t, bp);
3563 (void) msleep(bp, buf_mtxp, PDROP | (PRIBIO+1), "buf_biowait", NULL);
3564 DTRACE_IO1(wait__done, buf_t, bp);
3565 } else
3566 lck_mtx_unlock(buf_mtxp);
3567 }
1c79356b
A
3568 /* check for interruption of I/O (e.g. via NFS), then errors. */
3569 if (ISSET(bp->b_flags, B_EINTR)) {
3570 CLR(bp->b_flags, B_EINTR);
3571 return (EINTR);
3572 } else if (ISSET(bp->b_flags, B_ERROR))
3573 return (bp->b_error ? bp->b_error : EIO);
3574 else
3575 return (0);
3576}
3577
2d21ac55 3578
1c79356b
A
3579/*
3580 * Mark I/O complete on a buffer.
3581 *
3582 * If a callback has been requested, e.g. the pageout
3583 * daemon, do so. Otherwise, awaken waiting processes.
3584 *
3585 * [ Leffler, et al., says on p.247:
3586 * "This routine wakes up the blocked process, frees the buffer
3587 * for an asynchronous write, or, for a request by the pagedaemon
3588 * process, invokes a procedure specified in the buffer structure" ]
3589 *
3590 * In real life, the pagedaemon (or other system processes) wants
91447636 3591 * to do async stuff to, and doesn't want the buffer buf_brelse()'d.
1c79356b
A
3592 * (for swap pager, that puts swap buffers on the free lists (!!!),
3593 * for the vn device, that puts malloc'd buffers on the free lists!)
3594 */
91447636
A
3595extern struct timeval priority_IO_timestamp_for_root;
3596extern int hard_throttle_on_root;
3597
1c79356b 3598void
91447636 3599buf_biodone(buf_t bp)
1c79356b 3600{
b0d623f7
A
3601 mount_t mp;
3602
1c79356b 3603 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_START,
b0d623f7 3604 bp, bp->b_datap, bp->b_flags, 0, 0);
1c79356b
A
3605
3606 if (ISSET(bp->b_flags, B_DONE))
3607 panic("biodone already");
1c79356b 3608
2d21ac55
A
3609 if (ISSET(bp->b_flags, B_ERROR)) {
3610 fslog_io_error(bp);
3611 }
3612
b0d623f7
A
3613 if (bp->b_vp && bp->b_vp->v_mount) {
3614 mp = bp->b_vp->v_mount;
3615 } else {
3616 mp = NULL;
3617 }
3618
3619 if (mp && (bp->b_flags & B_READ) == 0) {
3620 update_last_io_time(mp);
3621 INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_write_size);
3622 } else if (mp) {
3623 INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_read_size);
e2fac8b1
A
3624 }
3625
9bccf70c 3626 if (kdebug_enable) {
91447636 3627 int code = DKIO_DONE;
9bccf70c 3628
91447636
A
3629 if (bp->b_flags & B_READ)
3630 code |= DKIO_READ;
3631 if (bp->b_flags & B_ASYNC)
3632 code |= DKIO_ASYNC;
9bccf70c 3633
91447636
A
3634 if (bp->b_flags & B_META)
3635 code |= DKIO_META;
3636 else if (bp->b_flags & B_PAGEIO)
3637 code |= DKIO_PAGING;
9bccf70c 3638
6d2010ae
A
3639 if (bp->b_flags & B_THROTTLED_IO)
3640 code |= DKIO_THROTTLE;
3641 else if (bp->b_flags & B_PASSIVE)
3642 code |= DKIO_PASSIVE;
3643
91447636 3644 KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE,
b0d623f7 3645 bp, (uintptr_t)bp->b_vp,
91447636 3646 bp->b_resid, bp->b_error, 0);
9bccf70c 3647 }
91447636 3648 if ((bp->b_vp != NULLVP) &&
b0d623f7 3649 ((bp->b_flags & (B_IOSTREAMING | B_PAGEIO | B_READ)) == (B_PAGEIO | B_READ)) &&
91447636
A
3650 (bp->b_vp->v_mount->mnt_kern_flag & MNTK_ROOTDEV)) {
3651 microuptime(&priority_IO_timestamp_for_root);
55e303ae
A
3652 hard_throttle_on_root = 0;
3653 }
6d2010ae 3654
91447636
A
3655 /*
3656 * I/O was done, so don't believe
6d2010ae
A
3657 * the DIRTY state from VM anymore...
3658 * and we need to reset the THROTTLED/PASSIVE
3659 * indicators
91447636 3660 */
6d2010ae 3661 CLR(bp->b_flags, (B_WASDIRTY | B_THROTTLED_IO | B_PASSIVE));
2d21ac55 3662 DTRACE_IO1(done, buf_t, bp);
b4c24cb9 3663
91447636
A
3664 if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW))
3665 /*
3666 * wake up any writer's blocked
3667 * on throttle or waiting for I/O
3668 * to drain
3669 */
3670 vnode_writedone(bp->b_vp);
3671
3672 if (ISSET(bp->b_flags, (B_CALL | B_FILTER))) { /* if necessary, call out */
3673 void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
6d2010ae 3674 void *arg = bp->b_transaction;
91447636
A
3675 int callout = ISSET(bp->b_flags, B_CALL);
3676
6d2010ae
A
3677 if (iodone_func == NULL)
3678 panic("biodone: bp @ %p has NULL b_iodone!\n", bp);
3679
91447636 3680 CLR(bp->b_flags, (B_CALL | B_FILTER)); /* filters and callouts are one-shot */
b4c24cb9 3681 bp->b_iodone = NULL;
91447636 3682 bp->b_transaction = NULL;
b4c24cb9 3683
6d2010ae
A
3684 if (callout)
3685 SET(bp->b_flags, B_DONE); /* note that it's done */
2d21ac55 3686
6d2010ae
A
3687 (*iodone_func)(bp, arg);
3688
3689 if (callout) {
3690 /*
2d21ac55 3691 * assumes that the callback function takes
91447636
A
3692 * ownership of the bp and deals with releasing it if necessary
3693 */
2d21ac55
A
3694 goto biodone_done;
3695 }
91447636
A
3696 /*
3697 * in this case the call back function is acting
3698 * strictly as a filter... it does not take
3699 * ownership of the bp and is expecting us
3700 * to finish cleaning up... this is currently used
3701 * by the HFS journaling code
3702 */
1c79356b 3703 }
91447636
A
3704 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release it */
3705 SET(bp->b_flags, B_DONE); /* note that it's done */
1c79356b 3706
91447636
A
3707 buf_brelse(bp);
3708 } else { /* or just wakeup the buffer */
3709 /*
3710 * by taking the mutex, we serialize
3711 * the buf owner calling buf_biowait so that we'll
3712 * only see him in one of 2 states...
3713 * state 1: B_DONE wasn't set and he's
3714 * blocked in msleep
3715 * state 2: he's blocked trying to take the
3716 * mutex before looking at B_DONE
3717 * BL_WANTED is cleared in case anyone else
3718 * is blocked waiting for the buffer... note
3719 * that we haven't cleared B_BUSY yet, so if
3720 * they do get to run, their going to re-set
3721 * BL_WANTED and go back to sleep
3722 */
2d21ac55 3723 lck_mtx_lock_spin(buf_mtxp);
1c79356b 3724
91447636
A
3725 CLR(bp->b_lflags, BL_WANTED);
3726 SET(bp->b_flags, B_DONE); /* note that it's done */
3727
3728 lck_mtx_unlock(buf_mtxp);
3729
3730 wakeup(bp);
3731 }
3732biodone_done:
3733 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_END,
b0d623f7 3734 (uintptr_t)bp, (uintptr_t)bp->b_datap, bp->b_flags, 0, 0);
1c79356b
A
3735}
3736
3737/*
3738 * Return a count of buffers on the "locked" queue.
3739 */
3740int
91447636 3741count_lock_queue(void)
1c79356b 3742{
91447636
A
3743 buf_t bp;
3744 int n = 0;
3745
b0d623f7 3746 lck_mtx_lock_spin(buf_mtxp);
1c79356b
A
3747
3748 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
3749 bp = bp->b_freelist.tqe_next)
3750 n++;
91447636
A
3751 lck_mtx_unlock(buf_mtxp);
3752
1c79356b
A
3753 return (n);
3754}
3755
3756/*
3757 * Return a count of 'busy' buffers. Used at the time of shutdown.
3758 */
3759int
91447636 3760count_busy_buffers(void)
1c79356b 3761{
b0d623f7 3762 return buf_busycount + bufstats.bufs_iobufinuse;
1c79356b
A
3763}
3764
9bccf70c 3765#if DIAGNOSTIC
1c79356b
A
3766/*
3767 * Print out statistics on the current allocation of the buffer pool.
3768 * Can be enabled to print out on every ``sync'' by setting "syncprt"
3769 * in vfs_syscalls.c using sysctl.
3770 */
3771void
3772vfs_bufstats()
3773{
91447636 3774 int i, j, count;
6d2010ae
A
3775 struct buf *bp;
3776 struct bqueues *dp;
91447636
A
3777 int counts[MAXBSIZE/CLBYTES+1];
3778 static char *bname[BQUEUES] =
3779 { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" };
3780
3781 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
3782 count = 0;
3783 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
3784 counts[j] = 0;
3785
3786 lck_mtx_lock(buf_mtxp);
3787
3788 for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
3789 counts[bp->b_bufsize/CLBYTES]++;
3790 count++;
3791 }
3792 lck_mtx_unlock(buf_mtxp);
3793
3794 printf("%s: total-%d", bname[i], count);
3795 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
3796 if (counts[j] != 0)
3797 printf(", %d-%d", j * CLBYTES, counts[j]);
3798 printf("\n");
3799 }
3800}
3801#endif /* DIAGNOSTIC */
3802
6d2010ae 3803#define NRESERVEDIOBUFS 128
91447636
A
3804
3805
3806buf_t
3807alloc_io_buf(vnode_t vp, int priv)
3808{
3809 buf_t bp;
3810
b0d623f7 3811 lck_mtx_lock_spin(iobuffer_mtxp);
91447636 3812
2d21ac55 3813 while (((niobuf_headers - NRESERVEDIOBUFS < bufstats.bufs_iobufinuse) && !priv) ||
91447636
A
3814 (bp = iobufqueue.tqh_first) == NULL) {
3815 bufstats.bufs_iobufsleeps++;
3816
3817 need_iobuffer = 1;
6d2010ae 3818 (void) msleep(&need_iobuffer, iobuffer_mtxp, PSPIN | (PRIBIO+1), (const char *)"alloc_io_buf", NULL);
91447636
A
3819 }
3820 TAILQ_REMOVE(&iobufqueue, bp, b_freelist);
3821
3822 bufstats.bufs_iobufinuse++;
3823 if (bufstats.bufs_iobufinuse > bufstats.bufs_iobufmax)
3824 bufstats.bufs_iobufmax = bufstats.bufs_iobufinuse;
3825
3826 lck_mtx_unlock(iobuffer_mtxp);
3827
3828 /*
3829 * initialize various fields
3830 * we don't need to hold the mutex since the buffer
3831 * is now private... the vp should have a reference
3832 * on it and is not protected by this mutex in any event
3833 */
3834 bp->b_timestamp = 0;
3835 bp->b_proc = NULL;
3836
3837 bp->b_datap = 0;
3838 bp->b_flags = 0;
3839 bp->b_lflags = BL_BUSY | BL_IOBUF;
6d2010ae 3840 bp->b_redundancy_flags = 0;
91447636
A
3841 bp->b_blkno = bp->b_lblkno = 0;
3842#ifdef JOE_DEBUG
3843 bp->b_owner = current_thread();
3844 bp->b_tag = 6;
3845#endif
3846 bp->b_iodone = NULL;
3847 bp->b_error = 0;
3848 bp->b_resid = 0;
3849 bp->b_bcount = 0;
3850 bp->b_bufsize = 0;
3851 bp->b_upl = NULL;
3852 bp->b_vp = vp;
d1ecb069
A
3853#ifdef CONFIG_PROTECT
3854 bp->b_cpentry = 0;
3855#endif
91447636
A
3856
3857 if (vp && (vp->v_type == VBLK || vp->v_type == VCHR))
3858 bp->b_dev = vp->v_rdev;
3859 else
3860 bp->b_dev = NODEV;
3861
3862 return (bp);
3863}
3864
3865
3866void
3867free_io_buf(buf_t bp)
3868{
3869 int need_wakeup = 0;
3870
3871 /*
3872 * put buffer back on the head of the iobufqueue
3873 */
3874 bp->b_vp = NULL;
3875 bp->b_flags = B_INVAL;
3876
2d21ac55 3877 lck_mtx_lock_spin(iobuffer_mtxp);
91447636
A
3878
3879 binsheadfree(bp, &iobufqueue, -1);
3880
3881 if (need_iobuffer) {
3882 /*
3883 * Wake up any processes waiting because they need an io buffer
3884 *
3885 * do the wakeup after we drop the mutex... it's possible that the
3886 * wakeup will be superfluous if need_iobuffer gets set again and
3887 * another thread runs this path, but it's highly unlikely, doesn't
3888 * hurt, and it means we don't hold up I/O progress if the wakeup blocks
3889 * trying to grab a task related lock...
3890 */
3891 need_iobuffer = 0;
3892 need_wakeup = 1;
3893 }
b0d623f7
A
3894 if (bufstats.bufs_iobufinuse <= 0)
3895 panic("free_io_buf: bp(%p) - bufstats.bufs_iobufinuse < 0", bp);
3896
91447636
A
3897 bufstats.bufs_iobufinuse--;
3898
3899 lck_mtx_unlock(iobuffer_mtxp);
3900
3901 if (need_wakeup)
3902 wakeup(&need_iobuffer);
3903}
3904
3905
2d21ac55
A
3906void
3907buf_list_lock(void)
3908{
b0d623f7 3909 lck_mtx_lock_spin(buf_mtxp);
2d21ac55
A
3910}
3911
3912void
3913buf_list_unlock(void)
3914{
3915 lck_mtx_unlock(buf_mtxp);
3916}
91447636
A
3917
3918/*
3919 * If getnewbuf() calls bcleanbuf() on the same thread
3920 * there is a potential for stack overrun and deadlocks.
3921 * So we always handoff the work to a worker thread for completion
3922 */
91447636
A
3923
3924
3925static void
3926bcleanbuf_thread_init(void)
3927{
b0d623f7
A
3928 thread_t thread = THREAD_NULL;
3929
91447636 3930 /* create worker thread */
b0d623f7
A
3931 kernel_thread_start((thread_continue_t)bcleanbuf_thread, NULL, &thread);
3932 thread_deallocate(thread);
91447636
A
3933}
3934
6d2010ae
A
3935typedef int (*bcleanbufcontinuation)(int);
3936
91447636
A
3937static void
3938bcleanbuf_thread(void)
3939{
3940 struct buf *bp;
3941 int error = 0;
3942 int loopcnt = 0;
3943
3944 for (;;) {
b0d623f7 3945 lck_mtx_lock_spin(buf_mtxp);
91447636 3946
b0d623f7 3947 while ( (bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY])) == NULL) {
6d2010ae 3948 (void)msleep0(&bufqueues[BQ_LAUNDRY], buf_mtxp, PRIBIO|PDROP, "blaundry", 0, (bcleanbufcontinuation)bcleanbuf_thread);
b0d623f7 3949 }
6d2010ae 3950
91447636
A
3951 /*
3952 * Remove from the queue
3953 */
3954 bremfree_locked(bp);
2d21ac55
A
3955
3956 /*
3957 * Buffer is no longer on any free list
3958 */
3959 SET(bp->b_lflags, BL_BUSY);
b0d623f7 3960 buf_busycount++;
2d21ac55
A
3961
3962#ifdef JOE_DEBUG
3963 bp->b_owner = current_thread();
3964 bp->b_tag = 10;
3965#endif
91447636
A
3966
3967 lck_mtx_unlock(buf_mtxp);
3968 /*
3969 * do the IO
3970 */
3971 error = bawrite_internal(bp, 0);
3972
3973 if (error) {
2d21ac55
A
3974 bp->b_whichq = BQ_LAUNDRY;
3975 bp->b_timestamp = buf_timestamp();
3976
3977 lck_mtx_lock_spin(buf_mtxp);
91447636
A
3978
3979 binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
3980 blaundrycnt++;
3981
6d2010ae 3982 /* we never leave a busy page on the laundry queue */
2d21ac55 3983 CLR(bp->b_lflags, BL_BUSY);
b0d623f7 3984 buf_busycount--;
2d21ac55
A
3985#ifdef JOE_DEBUG
3986 bp->b_owner = current_thread();
3987 bp->b_tag = 11;
3988#endif
3989
91447636 3990 lck_mtx_unlock(buf_mtxp);
6d2010ae
A
3991
3992 if (loopcnt > MAXLAUNDRY) {
3993 /*
3994 * bawrite_internal() can return errors if we're throttled. If we've
3995 * done several I/Os and failed, give the system some time to unthrottle
3996 * the vnode
3997 */
3998 (void)tsleep((void *)&bufqueues[BQ_LAUNDRY], PRIBIO, "blaundry", 1);
91447636
A
3999 loopcnt = 0;
4000 } else {
6d2010ae
A
4001 /* give other threads a chance to run */
4002 (void)thread_block(THREAD_CONTINUE_NULL);
91447636
A
4003 loopcnt++;
4004 }
4005 }
4006 }
4007}
4008
4009
4010static int
4011brecover_data(buf_t bp)
4012{
4013 int upl_offset;
4014 upl_t upl;
4015 upl_page_info_t *pl;
4016 kern_return_t kret;
4017 vnode_t vp = bp->b_vp;
4018 int upl_flags;
4019
4020
4021 if ( !UBCINFOEXISTS(vp) || bp->b_bufsize == 0)
4022 goto dump_buffer;
4023
4024 upl_flags = UPL_PRECIOUS;
4025 if (! (buf_flags(bp) & B_READ)) {
4026 /*
4027 * "write" operation: let the UPL subsystem know
4028 * that we intend to modify the buffer cache pages we're
4029 * gathering.
4030 */
4031 upl_flags |= UPL_WILL_MODIFY;
4032 }
4033
4034 kret = ubc_create_upl(vp,
4035 ubc_blktooff(vp, bp->b_lblkno),
4036 bp->b_bufsize,
4037 &upl,
4038 &pl,
4039 upl_flags);
4040 if (kret != KERN_SUCCESS)
4041 panic("Failed to create UPL");
4042
4043 for (upl_offset = 0; upl_offset < bp->b_bufsize; upl_offset += PAGE_SIZE) {
4044
4045 if (!upl_valid_page(pl, upl_offset / PAGE_SIZE) || !upl_dirty_page(pl, upl_offset / PAGE_SIZE)) {
4046 ubc_upl_abort(upl, 0);
4047 goto dump_buffer;
4048 }
4049 }
4050 bp->b_upl = upl;
4051
b0d623f7 4052 kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
91447636
A
4053
4054 if (kret != KERN_SUCCESS)
4055 panic("getblk: ubc_upl_map() failed with (%d)", kret);
4056 return (1);
4057
4058dump_buffer:
4059 bp->b_bufsize = 0;
4060 SET(bp->b_flags, B_INVAL);
4061 buf_brelse(bp);
4062
4063 return(0);
4064}
4065
b7266188 4066boolean_t
0b4c1975 4067buffer_cache_gc(int all)
b0d623f7
A
4068{
4069 buf_t bp;
4070 boolean_t did_large_zfree = FALSE;
6d2010ae 4071 boolean_t need_wakeup = FALSE;
b0d623f7 4072 int now = buf_timestamp();
6d2010ae
A
4073 uint32_t found = 0, total_found = 0;
4074 struct bqueues privq;
0b4c1975
A
4075 int thresh_hold = BUF_STALE_THRESHHOLD;
4076
4077 if (all)
4078 thresh_hold = 0;
6d2010ae
A
4079 /*
4080 * We only care about metadata (incore storage comes from zalloc()).
4081 * No more than 1024 buffers total, and only those not accessed within the
4082 * last 30s. We will also only examine 128 buffers during a single grab
4083 * of the lock in order to limit lock hold time.
4084 */
4085 lck_mtx_lock(buf_mtxp);
4086 do {
4087 found = 0;
4088 TAILQ_INIT(&privq);
4089 need_wakeup = FALSE;
b0d623f7 4090
6d2010ae
A
4091 while (((bp = TAILQ_FIRST(&bufqueues[BQ_META]))) &&
4092 (now > bp->b_timestamp) &&
4093 (now - bp->b_timestamp > thresh_hold) &&
4094 (found < BUF_MAX_GC_BATCH_SIZE)) {
4095
4096 /* Remove from free list */
4097 bremfree_locked(bp);
4098 found++;
4099
4100#ifdef JOE_DEBUG
4101 bp->b_owner = current_thread();
4102 bp->b_tag = 12;
4103#endif
4104
4105 /* If dirty, move to laundry queue and remember to do wakeup */
4106 if (ISSET(bp->b_flags, B_DELWRI)) {
4107 SET(bp->b_lflags, BL_WANTDEALLOC);
4108
4109 bmovelaundry(bp);
4110 need_wakeup = TRUE;
4111
4112 continue;
4113 }
4114
4115 /*
4116 * Mark busy and put on private list. We could technically get
4117 * away without setting BL_BUSY here.
4118 */
4119 SET(bp->b_lflags, BL_BUSY);
4120 buf_busycount++;
b0d623f7 4121
6d2010ae
A
4122 /*
4123 * Remove from hash and dissociate from vp.
4124 */
4125 bremhash(bp);
4126 if (bp->b_vp) {
4127 brelvp_locked(bp);
4128 }
b0d623f7 4129
6d2010ae
A
4130 TAILQ_INSERT_TAIL(&privq, bp, b_freelist);
4131 }
b0d623f7 4132
6d2010ae
A
4133 if (found == 0) {
4134 break;
4135 }
b0d623f7 4136
6d2010ae
A
4137 /* Drop lock for batch processing */
4138 lck_mtx_unlock(buf_mtxp);
4139
4140 /* Wakeup and yield for laundry if need be */
4141 if (need_wakeup) {
4142 wakeup(&bufqueues[BQ_LAUNDRY]);
4143 (void)thread_block(THREAD_CONTINUE_NULL);
b0d623f7 4144 }
6d2010ae
A
4145
4146 /* Clean up every buffer on private list */
4147 TAILQ_FOREACH(bp, &privq, b_freelist) {
4148 /* Take note if we've definitely freed at least a page to a zone */
4149 if ((ISSET(bp->b_flags, B_ZALLOC)) && (buf_size(bp) >= PAGE_SIZE)) {
4150 did_large_zfree = TRUE;
4151 }
4152
4153 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
4154
4155 /* Free Storage */
4156 buf_free_meta_store(bp);
4157
4158 /* Release credentials */
4159 buf_release_credentials(bp);
4160
4161 /* Prepare for moving to empty queue */
4162 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED
4163 | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
4164 bp->b_whichq = BQ_EMPTY;
4165 BLISTNONE(bp);
4166 }
4167
4168 lck_mtx_lock(buf_mtxp);
4169
4170 /* Back under lock, move them all to invalid hash and clear busy */
4171 TAILQ_FOREACH(bp, &privq, b_freelist) {
4172 binshash(bp, &invalhash);
4173 CLR(bp->b_lflags, BL_BUSY);
4174 buf_busycount--;
4175
4176#ifdef JOE_DEBUG
4177 if (bp->b_owner != current_thread()) {
4178 panic("Buffer stolen from buffer_cache_gc()");
4179 }
4180 bp->b_owner = current_thread();
4181 bp->b_tag = 13;
4182#endif
4183 }
4184
4185 /* And do a big bulk move to the empty queue */
4186 TAILQ_CONCAT(&bufqueues[BQ_EMPTY], &privq, b_freelist);
4187 total_found += found;
4188
4189 } while ((all || (total_found < BUF_MAX_GC_COUNT)) && (found == BUF_MAX_GC_BATCH_SIZE));
b0d623f7
A
4190
4191 lck_mtx_unlock(buf_mtxp);
4192
4193 return did_large_zfree;
4194}
91447636
A
4195
4196
4197/*
4198 * disabled for now
4199 */
4200
4201#if FLUSH_QUEUES
4202
4203#define NFLUSH 32
4204
4205static int
4206bp_cmp(void *a, void *b)
4207{
4208 buf_t *bp_a = *(buf_t **)a,
4209 *bp_b = *(buf_t **)b;
4210 daddr64_t res;
1c79356b 4211
91447636
A
4212 // don't have to worry about negative block
4213 // numbers so this is ok to do.
4214 //
4215 res = (bp_a->b_blkno - bp_b->b_blkno);
4216
4217 return (int)res;
1c79356b 4218}
1c79356b
A
4219
4220
91447636
A
4221int
4222bflushq(int whichq, mount_t mp)
1c79356b 4223{
91447636
A
4224 buf_t bp, next;
4225 int i, buf_count;
4226 int total_writes = 0;
4227 static buf_t flush_table[NFLUSH];
1c79356b 4228
91447636
A
4229 if (whichq < 0 || whichq >= BQUEUES) {
4230 return (0);
0b4e3aa0
A
4231 }
4232
91447636
A
4233 restart:
4234 lck_mtx_lock(buf_mtxp);
0b4e3aa0 4235
91447636 4236 bp = TAILQ_FIRST(&bufqueues[whichq]);
1c79356b 4237
91447636
A
4238 for (buf_count = 0; bp; bp = next) {
4239 next = bp->b_freelist.tqe_next;
4240
4241 if (bp->b_vp == NULL || bp->b_vp->v_mount != mp) {
4242 continue;
4243 }
b4c24cb9 4244
91447636 4245 if (ISSET(bp->b_flags, B_DELWRI) && !ISSET(bp->b_lflags, BL_BUSY)) {
1c79356b 4246
91447636
A
4247 bremfree_locked(bp);
4248#ifdef JOE_DEBUG
4249 bp->b_owner = current_thread();
4250 bp->b_tag = 7;
4251#endif
4252 SET(bp->b_lflags, BL_BUSY);
b0d623f7
A
4253 buf_busycount++;
4254
91447636
A
4255 flush_table[buf_count] = bp;
4256 buf_count++;
4257 total_writes++;
1c79356b 4258
91447636
A
4259 if (buf_count >= NFLUSH) {
4260 lck_mtx_unlock(buf_mtxp);
1c79356b 4261
91447636 4262 qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
1c79356b 4263
91447636
A
4264 for (i = 0; i < buf_count; i++) {
4265 buf_bawrite(flush_table[i]);
4266 }
4267 goto restart;
4268 }
4269 }
4270 }
4271 lck_mtx_unlock(buf_mtxp);
1c79356b 4272
91447636
A
4273 if (buf_count > 0) {
4274 qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
1c79356b 4275
91447636
A
4276 for (i = 0; i < buf_count; i++) {
4277 buf_bawrite(flush_table[i]);
4278 }
1c79356b 4279 }
91447636
A
4280
4281 return (total_writes);
1c79356b 4282}
91447636 4283#endif
1c79356b 4284
91447636
A
4285
4286#if BALANCE_QUEUES
1c79356b
A
4287
4288/* XXX move this to a separate file */
91447636
A
4289
4290/*
4291 * NOTE: THIS CODE HAS NOT BEEN UPDATED
4292 * WITH RESPECT TO THE NEW LOCKING MODEL
4293 */
4294
4295
1c79356b
A
4296/*
4297 * Dynamic Scaling of the Buffer Queues
4298 */
4299
4300typedef long long blsize_t;
4301
55e303ae 4302blsize_t MAXNBUF; /* initialize to (sane_size / PAGE_SIZE) */
1c79356b
A
4303/* Global tunable limits */
4304blsize_t nbufh; /* number of buffer headers */
4305blsize_t nbuflow; /* minimum number of buffer headers required */
4306blsize_t nbufhigh; /* maximum number of buffer headers allowed */
4307blsize_t nbuftarget; /* preferred number of buffer headers */
4308
4309/*
4310 * assertions:
4311 *
4312 * 1. 0 < nbuflow <= nbufh <= nbufhigh
4313 * 2. nbufhigh <= MAXNBUF
4314 * 3. 0 < nbuflow <= nbuftarget <= nbufhigh
4315 * 4. nbufh can not be set by sysctl().
4316 */
4317
4318/* Per queue tunable limits */
4319
4320struct bufqlim {
4321 blsize_t bl_nlow; /* minimum number of buffer headers required */
4322 blsize_t bl_num; /* number of buffer headers on the queue */
4323 blsize_t bl_nlhigh; /* maximum number of buffer headers allowed */
4324 blsize_t bl_target; /* preferred number of buffer headers */
4325 long bl_stale; /* Seconds after which a buffer is considered stale */
4326} bufqlim[BQUEUES];
4327
4328/*
4329 * assertions:
4330 *
4331 * 1. 0 <= bl_nlow <= bl_num <= bl_nlhigh
4332 * 2. bl_nlhigh <= MAXNBUF
4333 * 3. bufqlim[BQ_META].bl_nlow != 0
4334 * 4. bufqlim[BQ_META].bl_nlow > (number of possible concurrent
4335 * file system IO operations)
4336 * 5. bl_num can not be set by sysctl().
4337 * 6. bl_nhigh <= nbufhigh
4338 */
4339
4340/*
4341 * Rationale:
4342 * ----------
4343 * Defining it blsize_t as long permits 2^31 buffer headers per queue.
4344 * Which can describe (2^31 * PAGE_SIZE) memory per queue.
4345 *
4346 * These limits are exported to by means of sysctl().
4347 * It was decided to define blsize_t as a 64 bit quantity.
4348 * This will make sure that we will not be required to change it
4349 * as long as we do not exceed 64 bit address space for the kernel.
4350 *
4351 * low and high numbers parameters initialized at compile time
4352 * and boot arguments can be used to override them. sysctl()
4353 * would not change the value. sysctl() can get all the values
4354 * but can set only target. num is the current level.
4355 *
4356 * Advantages of having a "bufqscan" thread doing the balancing are,
4357 * Keep enough bufs on BQ_EMPTY.
4358 * getnewbuf() by default will always select a buffer from the BQ_EMPTY.
4359 * getnewbuf() perfoms best if a buffer was found there.
4360 * Also this minimizes the possibility of starting IO
4361 * from getnewbuf(). That's a performance win, too.
4362 *
4363 * Localize complex logic [balancing as well as time aging]
4364 * to balancebufq().
4365 *
4366 * Simplify getnewbuf() logic by elimination of time aging code.
4367 */
4368
4369/*
4370 * Algorithm:
4371 * -----------
4372 * The goal of the dynamic scaling of the buffer queues to to keep
4373 * the size of the LRU close to bl_target. Buffers on a queue would
4374 * be time aged.
4375 *
4376 * There would be a thread which will be responsible for "balancing"
4377 * the buffer cache queues.
4378 *
4379 * The scan order would be: AGE, LRU, META, EMPTY.
4380 */
4381
4382long bufqscanwait = 0;
4383
9bccf70c
A
4384static void bufqscan_thread();
4385static int balancebufq(int q);
4386static int btrimempty(int n);
4387static __inline__ int initbufqscan(void);
4388static __inline__ int nextbufq(int q);
4389static void buqlimprt(int all);
1c79356b 4390
91447636
A
4391
4392static __inline__ void
4393bufqinc(int q)
4394{
4395 if ((q < 0) || (q >= BQUEUES))
4396 return;
4397
4398 bufqlim[q].bl_num++;
4399 return;
4400}
4401
4402static __inline__ void
4403bufqdec(int q)
4404{
4405 if ((q < 0) || (q >= BQUEUES))
4406 return;
4407
4408 bufqlim[q].bl_num--;
4409 return;
4410}
4411
9bccf70c 4412static void
2d21ac55 4413bufq_balance_thread_init(void)
1c79356b 4414{
b0d623f7 4415 thread_t thread = THREAD_NULL;
1c79356b
A
4416
4417 if (bufqscanwait++ == 0) {
1c79356b
A
4418
4419 /* Initalize globals */
55e303ae 4420 MAXNBUF = (sane_size / PAGE_SIZE);
2d21ac55 4421 nbufh = nbuf_headers;
1c79356b
A
4422 nbuflow = min(nbufh, 100);
4423 nbufhigh = min(MAXNBUF, max(nbufh, 2048));
55e303ae 4424 nbuftarget = (sane_size >> 5) / PAGE_SIZE;
1c79356b
A
4425 nbuftarget = max(nbuflow, nbuftarget);
4426 nbuftarget = min(nbufhigh, nbuftarget);
4427
4428 /*
4429 * Initialize the bufqlim
4430 */
4431
4432 /* LOCKED queue */
4433 bufqlim[BQ_LOCKED].bl_nlow = 0;
4434 bufqlim[BQ_LOCKED].bl_nlhigh = 32;
4435 bufqlim[BQ_LOCKED].bl_target = 0;
4436 bufqlim[BQ_LOCKED].bl_stale = 30;
4437
4438 /* LRU queue */
4439 bufqlim[BQ_LRU].bl_nlow = 0;
4440 bufqlim[BQ_LRU].bl_nlhigh = nbufhigh/4;
4441 bufqlim[BQ_LRU].bl_target = nbuftarget/4;
4442 bufqlim[BQ_LRU].bl_stale = LRU_IS_STALE;
4443
4444 /* AGE queue */
4445 bufqlim[BQ_AGE].bl_nlow = 0;
4446 bufqlim[BQ_AGE].bl_nlhigh = nbufhigh/4;
4447 bufqlim[BQ_AGE].bl_target = nbuftarget/4;
4448 bufqlim[BQ_AGE].bl_stale = AGE_IS_STALE;
4449
4450 /* EMPTY queue */
4451 bufqlim[BQ_EMPTY].bl_nlow = 0;
4452 bufqlim[BQ_EMPTY].bl_nlhigh = nbufhigh/4;
4453 bufqlim[BQ_EMPTY].bl_target = nbuftarget/4;
4454 bufqlim[BQ_EMPTY].bl_stale = 600000;
4455
4456 /* META queue */
4457 bufqlim[BQ_META].bl_nlow = 0;
4458 bufqlim[BQ_META].bl_nlhigh = nbufhigh/4;
4459 bufqlim[BQ_META].bl_target = nbuftarget/4;
4460 bufqlim[BQ_META].bl_stale = META_IS_STALE;
4461
765c9de3
A
4462 /* LAUNDRY queue */
4463 bufqlim[BQ_LOCKED].bl_nlow = 0;
4464 bufqlim[BQ_LOCKED].bl_nlhigh = 32;
4465 bufqlim[BQ_LOCKED].bl_target = 0;
4466 bufqlim[BQ_LOCKED].bl_stale = 30;
4467
1c79356b
A
4468 buqlimprt(1);
4469 }
4470
4471 /* create worker thread */
b0d623f7
A
4472 kernel_thread_start((thread_continue_t)bufqscan_thread, NULL, &thread);
4473 thread_deallocate(thread);
1c79356b
A
4474}
4475
4476/* The workloop for the buffer balancing thread */
9bccf70c 4477static void
1c79356b
A
4478bufqscan_thread()
4479{
1c79356b
A
4480 int moretodo = 0;
4481
1c79356b
A
4482 for(;;) {
4483 do {
4484 int q; /* buffer queue to process */
4485
9bccf70c
A
4486 q = initbufqscan();
4487 for (; q; ) {
1c79356b
A
4488 moretodo |= balancebufq(q);
4489 q = nextbufq(q);
4490 }
4491 } while (moretodo);
4492
9bccf70c 4493#if DIAGNOSTIC
1c79356b
A
4494 vfs_bufstats();
4495 buqlimprt(0);
4496#endif
4497 (void)tsleep((void *)&bufqscanwait, PRIBIO, "bufqscanwait", 60 * hz);
4498 moretodo = 0;
4499 }
1c79356b
A
4500}
4501
4502/* Seed for the buffer queue balancing */
9bccf70c 4503static __inline__ int
1c79356b
A
4504initbufqscan()
4505{
4506 /* Start with AGE queue */
4507 return (BQ_AGE);
4508}
4509
4510/* Pick next buffer queue to balance */
9bccf70c 4511static __inline__ int
1c79356b
A
4512nextbufq(int q)
4513{
4514 int order[] = { BQ_AGE, BQ_LRU, BQ_META, BQ_EMPTY, 0 };
4515
4516 q++;
4517 q %= sizeof(order);
4518 return (order[q]);
4519}
4520
4521/* function to balance the buffer queues */
9bccf70c 4522static int
1c79356b
A
4523balancebufq(int q)
4524{
4525 int moretodo = 0;
91447636 4526 int n, t;
1c79356b
A
4527
4528 /* reject invalid q */
4529 if ((q < 0) || (q >= BQUEUES))
4530 goto out;
4531
765c9de3
A
4532 /* LOCKED or LAUNDRY queue MUST not be balanced */
4533 if ((q == BQ_LOCKED) || (q == BQ_LAUNDRY))
1c79356b
A
4534 goto out;
4535
4536 n = (bufqlim[q].bl_num - bufqlim[q].bl_target);
4537
4538 /* If queue has less than target nothing more to do */
4539 if (n < 0)
4540 goto out;
4541
4542 if ( n > 8 ) {
4543 /* Balance only a small amount (12.5%) at a time */
4544 n >>= 3;
4545 }
4546
4547 /* EMPTY queue needs special handling */
4548 if (q == BQ_EMPTY) {
4549 moretodo |= btrimempty(n);
4550 goto out;
4551 }
91447636
A
4552
4553 t = buf_timestamp():
1c79356b
A
4554
4555 for (; n > 0; n--) {
4556 struct buf *bp = bufqueues[q].tqh_first;
4557 if (!bp)
4558 break;
4559
4560 /* check if it's stale */
91447636 4561 if ((t - bp->b_timestamp) > bufqlim[q].bl_stale) {
b0d623f7 4562 if (bcleanbuf(bp, FALSE)) {
91447636 4563 /* buf_bawrite() issued, bp not ready */
1c79356b
A
4564 moretodo = 1;
4565 } else {
4566 /* release the cleaned buffer to BQ_EMPTY */
4567 SET(bp->b_flags, B_INVAL);
91447636 4568 buf_brelse(bp);
1c79356b
A
4569 }
4570 } else
4571 break;
4572 }
4573
4574out:
1c79356b
A
4575 return (moretodo);
4576}
4577
9bccf70c 4578static int
1c79356b
A
4579btrimempty(int n)
4580{
4581 /*
4582 * When struct buf are allocated dynamically, this would
4583 * reclaim upto 'n' struct buf from the empty queue.
4584 */
4585
4586 return (0);
4587}
4588
9bccf70c 4589static void
1c79356b
A
4590buqlimprt(int all)
4591{
4592 int i;
765c9de3
A
4593 static char *bname[BQUEUES] =
4594 { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" };
1c79356b
A
4595
4596 if (all)
4597 for (i = 0; i < BQUEUES; i++) {
4598 printf("%s : ", bname[i]);
9bccf70c
A
4599 printf("min = %ld, ", (long)bufqlim[i].bl_nlow);
4600 printf("cur = %ld, ", (long)bufqlim[i].bl_num);
4601 printf("max = %ld, ", (long)bufqlim[i].bl_nlhigh);
4602 printf("target = %ld, ", (long)bufqlim[i].bl_target);
4603 printf("stale after %ld seconds\n", bufqlim[i].bl_stale);
1c79356b
A
4604 }
4605 else
4606 for (i = 0; i < BQUEUES; i++) {
4607 printf("%s : ", bname[i]);
9bccf70c 4608 printf("cur = %ld, ", (long)bufqlim[i].bl_num);
1c79356b
A
4609 }
4610}
765c9de3 4611
91447636 4612#endif
b4c24cb9 4613
b4c24cb9 4614