]> git.saurik.com Git - apple/xnu.git/blame - bsd/vfs/vfs_bio.c
xnu-2782.40.9.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_bio.c
CommitLineData
1c79356b 1/*
fe8ab488 2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*-
30 * Copyright (c) 1994 Christopher G. Demetriou
31 * Copyright (c) 1982, 1986, 1989, 1993
32 * The Regents of the University of California. All rights reserved.
33 * (c) UNIX System Laboratories, Inc.
34 * All or some portions of this file are derived from material licensed
35 * to the University of California by American Telephone and Telegraph
36 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
37 * the permission of UNIX System Laboratories, Inc.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by the University of
50 * California, Berkeley and its contributors.
51 * 4. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
66 *
1c79356b
A
67 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
68 */
69
70/*
71 * Some references:
72 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
73 * Leffler, et al.: The Design and Implementation of the 4.3BSD
74 * UNIX Operating System (Addison Welley, 1989)
75 */
1c79356b
A
76
77#include <sys/param.h>
78#include <sys/systm.h>
91447636
A
79#include <sys/proc_internal.h>
80#include <sys/buf_internal.h>
81#include <sys/vnode_internal.h>
82#include <sys/mount_internal.h>
1c79356b
A
83#include <sys/trace.h>
84#include <sys/malloc.h>
85#include <sys/resourcevar.h>
86#include <miscfs/specfs/specdev.h>
87#include <sys/ubc.h>
91447636 88#include <sys/kauth.h>
1c79356b
A
89#if DIAGNOSTIC
90#include <kern/assert.h>
91#endif /* DIAGNOSTIC */
92#include <kern/task.h>
93#include <kern/zalloc.h>
fe8ab488
A
94#include <kern/locks.h>
95#include <kern/thread.h>
91447636 96
2d21ac55
A
97#include <sys/fslog.h> /* fslog_io_error() */
98
99#include <mach/mach_types.h>
100#include <mach/memory_object_types.h>
101#include <kern/sched_prim.h> /* thread_block() */
102
91447636 103#include <vm/vm_kern.h>
b0d623f7 104#include <vm/vm_pageout.h>
1c79356b
A
105
106#include <sys/kdebug.h>
2d21ac55
A
107
108#include <libkern/OSAtomic.h>
b0d623f7 109#include <libkern/OSDebug.h>
2d21ac55
A
110#include <sys/ubc_internal.h>
111
112#include <sys/sdt.h>
316670eb 113#include <sys/cprotect.h>
1c79356b 114
6d2010ae 115int bcleanbuf(buf_t bp, boolean_t discard);
91447636
A
116static int brecover_data(buf_t bp);
117static boolean_t incore(vnode_t vp, daddr64_t blkno);
91447636
A
118/* timeout is in msecs */
119static buf_t getnewbuf(int slpflag, int slptimeo, int *queue);
120static void bremfree_locked(buf_t bp);
121static void buf_reassign(buf_t bp, vnode_t newvp);
122static errno_t buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo);
123static int buf_iterprepare(vnode_t vp, struct buflists *, int flags);
124static void buf_itercomplete(vnode_t vp, struct buflists *, int flags);
6d2010ae
A
125static boolean_t buffer_cache_gc(int);
126static buf_t buf_brelse_shadow(buf_t bp);
127static void buf_free_meta_store(buf_t bp);
128
129static buf_t buf_create_shadow_internal(buf_t bp, boolean_t force_copy,
130 uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv);
131
1c79356b 132
91447636 133__private_extern__ int bdwrite_internal(buf_t, int);
1c79356b 134
d52fe63f 135/* zone allocated buffer headers */
39236c6e
A
136static void bufzoneinit(void);
137static void bcleanbuf_thread_init(void);
91447636
A
138static void bcleanbuf_thread(void);
139
140static zone_t buf_hdr_zone;
141static int buf_hdr_count;
d52fe63f 142
1c79356b
A
143
144/*
145 * Definitions for the buffer hash lists.
146 */
147#define BUFHASH(dvp, lbn) \
148 (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
149LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
150u_long bufhash;
151
2d21ac55
A
152static buf_t incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp);
153
1c79356b
A
154/* Definitions for the buffer stats. */
155struct bufstats bufstats;
156
d52fe63f 157/* Number of delayed write buffers */
2d21ac55 158long nbdwrite = 0;
91447636 159int blaundrycnt = 0;
2d21ac55 160static int boot_nbuf_headers = 0;
d52fe63f 161
6d2010ae 162static TAILQ_HEAD(delayqueue, buf) delaybufqueue;
1c79356b 163
91447636
A
164static TAILQ_HEAD(ioqueue, buf) iobufqueue;
165static TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
d52fe63f
A
166static int needbuffer;
167static int need_iobuffer;
1c79356b 168
91447636
A
169static lck_grp_t *buf_mtx_grp;
170static lck_attr_t *buf_mtx_attr;
171static lck_grp_attr_t *buf_mtx_grp_attr;
172static lck_mtx_t *iobuffer_mtxp;
173static lck_mtx_t *buf_mtxp;
174
b0d623f7
A
175static int buf_busycount;
176
91447636
A
177static __inline__ int
178buf_timestamp(void)
179{
180 struct timeval t;
181 microuptime(&t);
182 return (t.tv_sec);
183}
184
1c79356b
A
185/*
186 * Insq/Remq for the buffer free lists.
187 */
91447636
A
188#define binsheadfree(bp, dp, whichq) do { \
189 TAILQ_INSERT_HEAD(dp, bp, b_freelist); \
1c79356b
A
190 } while (0)
191
91447636
A
192#define binstailfree(bp, dp, whichq) do { \
193 TAILQ_INSERT_TAIL(dp, bp, b_freelist); \
91447636 194 } while (0)
91447636 195
1c79356b
A
196#define BHASHENTCHECK(bp) \
197 if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \
2d21ac55 198 panic("%p: b_hash.le_prev is not deadbeef", (bp));
1c79356b
A
199
200#define BLISTNONE(bp) \
201 (bp)->b_hash.le_next = (struct buf *)0; \
202 (bp)->b_hash.le_prev = (struct buf **)0xdeadbeef;
203
9bccf70c
A
204/*
205 * Insq/Remq for the vnode usage lists.
206 */
207#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs)
208#define bufremvn(bp) { \
209 LIST_REMOVE(bp, b_vnbufs); \
210 (bp)->b_vnbufs.le_next = NOLIST; \
211}
212
1c79356b
A
213/*
214 * Time in seconds before a buffer on a list is
215 * considered as a stale buffer
216 */
217#define LRU_IS_STALE 120 /* default value for the LRU */
218#define AGE_IS_STALE 60 /* default value for the AGE */
219#define META_IS_STALE 180 /* default value for the BQ_META */
220
221int lru_is_stale = LRU_IS_STALE;
222int age_is_stale = AGE_IS_STALE;
223int meta_is_stale = META_IS_STALE;
2d21ac55 224
6d2010ae 225#define MAXLAUNDRY 10
91447636 226
9bccf70c
A
227/* LIST_INSERT_HEAD() with assertions */
228static __inline__ void
91447636 229blistenterhead(struct bufhashhdr * head, buf_t bp)
1c79356b
A
230{
231 if ((bp->b_hash.le_next = (head)->lh_first) != NULL)
232 (head)->lh_first->b_hash.le_prev = &(bp)->b_hash.le_next;
233 (head)->lh_first = bp;
234 bp->b_hash.le_prev = &(head)->lh_first;
235 if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef)
236 panic("blistenterhead: le_prev is deadbeef");
1c79356b 237}
1c79356b 238
9bccf70c 239static __inline__ void
91447636 240binshash(buf_t bp, struct bufhashhdr *dp)
1c79356b 241{
0c530ab8 242#if DIAGNOSTIC
91447636 243 buf_t nbp;
0c530ab8 244#endif /* DIAGNOSTIC */
9bccf70c 245
1c79356b 246 BHASHENTCHECK(bp);
9bccf70c 247
0c530ab8 248#if DIAGNOSTIC
1c79356b
A
249 nbp = dp->lh_first;
250 for(; nbp != NULL; nbp = nbp->b_hash.le_next) {
251 if(nbp == bp)
252 panic("buf already in hashlist");
253 }
0c530ab8 254#endif /* DIAGNOSTIC */
1c79356b 255
1c79356b 256 blistenterhead(dp, bp);
1c79356b
A
257}
258
9bccf70c 259static __inline__ void
91447636 260bremhash(buf_t bp)
1c79356b 261{
1c79356b
A
262 if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef)
263 panic("bremhash le_prev is deadbeef");
264 if (bp->b_hash.le_next == bp)
265 panic("bremhash: next points to self");
266
267 if (bp->b_hash.le_next != NULL)
268 bp->b_hash.le_next->b_hash.le_prev = bp->b_hash.le_prev;
269 *bp->b_hash.le_prev = (bp)->b_hash.le_next;
1c79356b
A
270}
271
6d2010ae
A
272/*
273 * buf_mtxp held.
274 */
275static __inline__ void
276bmovelaundry(buf_t bp)
277{
278 bp->b_whichq = BQ_LAUNDRY;
279 bp->b_timestamp = buf_timestamp();
280 binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
281 blaundrycnt++;
282}
1c79356b 283
6d2010ae
A
284static __inline__ void
285buf_release_credentials(buf_t bp)
286{
287 if (IS_VALID_CRED(bp->b_rcred)) {
288 kauth_cred_unref(&bp->b_rcred);
289 }
290 if (IS_VALID_CRED(bp->b_wcred)) {
291 kauth_cred_unref(&bp->b_wcred);
292 }
293}
1c79356b 294
9bccf70c 295
91447636
A
296int
297buf_valid(buf_t bp) {
298
299 if ( (bp->b_flags & (B_DONE | B_DELWRI)) )
300 return 1;
301 return 0;
9bccf70c
A
302}
303
91447636
A
304int
305buf_fromcache(buf_t bp) {
9bccf70c 306
91447636
A
307 if ( (bp->b_flags & B_CACHE) )
308 return 1;
309 return 0;
9bccf70c
A
310}
311
9bccf70c 312void
91447636
A
313buf_markinvalid(buf_t bp) {
314
315 SET(bp->b_flags, B_INVAL);
316}
9bccf70c 317
91447636
A
318void
319buf_markdelayed(buf_t bp) {
320
2d21ac55
A
321 if (!ISSET(bp->b_flags, B_DELWRI)) {
322 SET(bp->b_flags, B_DELWRI);
323
b0d623f7 324 OSAddAtomicLong(1, &nbdwrite);
2d21ac55
A
325 buf_reassign(bp, bp->b_vp);
326 }
327 SET(bp->b_flags, B_DONE);
9bccf70c
A
328}
329
6d2010ae
A
330void
331buf_markclean(buf_t bp) {
332
333 if (ISSET(bp->b_flags, B_DELWRI)) {
334 CLR(bp->b_flags, B_DELWRI);
335
336 OSAddAtomicLong(-1, &nbdwrite);
337 buf_reassign(bp, bp->b_vp);
338 }
339}
340
91447636
A
341void
342buf_markeintr(buf_t bp) {
343
344 SET(bp->b_flags, B_EINTR);
345}
765c9de3 346
2d21ac55 347
91447636
A
348void
349buf_markaged(buf_t bp) {
350
351 SET(bp->b_flags, B_AGE);
765c9de3
A
352}
353
2d21ac55
A
354int
355buf_fua(buf_t bp) {
356
357 if ((bp->b_flags & B_FUA) == B_FUA)
358 return 1;
359 return 0;
360}
361
362void
363buf_markfua(buf_t bp) {
364
365 SET(bp->b_flags, B_FUA);
366}
367
316670eb
A
368#if CONFIG_PROTECT
369void
370buf_setcpaddr(buf_t bp, struct cprotect *entry) {
371 bp->b_attr.ba_cpentry = entry;
372}
373
374void
375buf_setcpoff (buf_t bp, uint64_t foffset) {
376 bp->b_attr.ba_cp_file_off = foffset;
377}
378
d1ecb069 379void *
316670eb
A
380bufattr_cpaddr(bufattr_t bap) {
381 return (bap->ba_cpentry);
d1ecb069
A
382}
383
316670eb
A
384uint64_t
385bufattr_cpoff(bufattr_t bap) {
386 return (bap->ba_cp_file_off);
387}
388
389void
390bufattr_setcpaddr(bufattr_t bap, void *cp_entry_addr) {
391 bap->ba_cpentry = cp_entry_addr;
392}
393
394void
395bufattr_setcpoff(bufattr_t bap, uint64_t foffset) {
396 bap->ba_cp_file_off = foffset;
d1ecb069
A
397}
398
399#else
400void *
316670eb
A
401bufattr_cpaddr(bufattr_t bap __unused) {
402 return NULL;
d1ecb069
A
403}
404
316670eb
A
405uint64_t
406bufattr_cpoff(bufattr_t bap __unused) {
407 return 0;
408}
409
410void
411bufattr_setcpaddr(bufattr_t bap __unused, void *cp_entry_addr __unused) {
412}
413
414void
415bufattr_setcpoff(__unused bufattr_t bap, __unused uint64_t foffset) {
d1ecb069
A
416 return;
417}
418#endif /* CONFIG_PROTECT */
419
316670eb
A
420bufattr_t
421bufattr_alloc() {
422 bufattr_t bap;
423 MALLOC(bap, bufattr_t, sizeof(struct bufattr), M_TEMP, M_WAITOK);
424 if (bap == NULL)
425 return NULL;
426
427 bzero(bap, sizeof(struct bufattr));
428 return bap;
429}
430
431void
432bufattr_free(bufattr_t bap) {
433 if (bap)
434 FREE(bap, M_TEMP);
435}
436
fe8ab488
A
437bufattr_t
438bufattr_dup(bufattr_t bap) {
439 bufattr_t new_bufattr;
440 MALLOC(new_bufattr, bufattr_t, sizeof(struct bufattr), M_TEMP, M_WAITOK);
441 if (new_bufattr == NULL)
442 return NULL;
443
444 /* Copy the provided one into the new copy */
445 memcpy (new_bufattr, bap, sizeof(struct bufattr));
446 return new_bufattr;
447}
448
316670eb
A
449int
450bufattr_rawencrypted(bufattr_t bap) {
451 if ( (bap->ba_flags & BA_RAW_ENCRYPTED_IO) )
452 return 1;
453 return 0;
454}
455
7ddcb079
A
456int
457bufattr_throttled(bufattr_t bap) {
39236c6e 458 return (GET_BUFATTR_IO_TIER(bap));
7ddcb079
A
459}
460
fe8ab488
A
461int
462bufattr_passive(bufattr_t bap) {
463 if ( (bap->ba_flags & BA_PASSIVE) )
464 return 1;
465 return 0;
466}
467
316670eb
A
468int
469bufattr_nocache(bufattr_t bap) {
470 if ( (bap->ba_flags & BA_NOCACHE) )
471 return 1;
472 return 0;
473}
474
475int
476bufattr_meta(bufattr_t bap) {
477 if ( (bap->ba_flags & BA_META) )
478 return 1;
479 return 0;
480}
481
fe8ab488
A
482void
483bufattr_markmeta(bufattr_t bap) {
484 SET(bap->ba_flags, BA_META);
485}
486
316670eb 487int
316670eb 488bufattr_delayidlesleep(bufattr_t bap)
316670eb 489{
316670eb
A
490 if ( (bap->ba_flags & BA_DELAYIDLESLEEP) )
491 return 1;
316670eb
A
492 return 0;
493}
494
7ddcb079
A
495bufattr_t
496buf_attr(buf_t bp) {
497 return &bp->b_attr;
498}
499
316670eb
A
500void
501buf_markstatic(buf_t bp __unused) {
502 SET(bp->b_flags, B_STATICCONTENT);
503}
504
505int
506buf_static(buf_t bp) {
507 if ( (bp->b_flags & B_STATICCONTENT) )
508 return 1;
509 return 0;
510}
511
39236c6e
A
512void
513bufattr_markgreedymode(bufattr_t bap) {
514 SET(bap->ba_flags, BA_GREEDY_MODE);
515}
516
517int
518bufattr_greedymode(bufattr_t bap) {
519 if ( (bap->ba_flags & BA_GREEDY_MODE) )
520 return 1;
521 return 0;
522}
523
fe8ab488
A
524void
525bufattr_markisochronous(bufattr_t bap) {
526 SET(bap->ba_flags, BA_ISOCHRONOUS);
527}
528
529int
530bufattr_isochronous(bufattr_t bap) {
531 if ( (bap->ba_flags & BA_ISOCHRONOUS) )
532 return 1;
533 return 0;
534}
535
39236c6e
A
536void
537bufattr_markquickcomplete(bufattr_t bap) {
538 SET(bap->ba_flags, BA_QUICK_COMPLETE);
539}
540
541int
542bufattr_quickcomplete(bufattr_t bap) {
543 if ( (bap->ba_flags & BA_QUICK_COMPLETE) )
544 return 1;
545 return 0;
546}
547
91447636
A
548errno_t
549buf_error(buf_t bp) {
550
551 return (bp->b_error);
552}
1c79356b 553
91447636
A
554void
555buf_seterror(buf_t bp, errno_t error) {
1c79356b 556
91447636
A
557 if ((bp->b_error = error))
558 SET(bp->b_flags, B_ERROR);
559 else
560 CLR(bp->b_flags, B_ERROR);
561}
1c79356b 562
91447636
A
563void
564buf_setflags(buf_t bp, int32_t flags) {
1c79356b 565
91447636
A
566 SET(bp->b_flags, (flags & BUF_X_WRFLAGS));
567}
765c9de3 568
91447636
A
569void
570buf_clearflags(buf_t bp, int32_t flags) {
1c79356b 571
91447636
A
572 CLR(bp->b_flags, (flags & BUF_X_WRFLAGS));
573}
1c79356b 574
91447636
A
575int32_t
576buf_flags(buf_t bp) {
577
578 return ((bp->b_flags & BUF_X_RDFLAGS));
579}
1c79356b 580
91447636
A
581void
582buf_reset(buf_t bp, int32_t io_flags) {
583
2d21ac55 584 CLR(bp->b_flags, (B_READ | B_WRITE | B_ERROR | B_DONE | B_INVAL | B_ASYNC | B_NOCACHE | B_FUA));
91447636 585 SET(bp->b_flags, (io_flags & (B_ASYNC | B_READ | B_WRITE | B_NOCACHE)));
1c79356b 586
91447636
A
587 bp->b_error = 0;
588}
1c79356b 589
91447636
A
590uint32_t
591buf_count(buf_t bp) {
592
593 return (bp->b_bcount);
594}
765c9de3 595
91447636
A
596void
597buf_setcount(buf_t bp, uint32_t bcount) {
598
599 bp->b_bcount = bcount;
1c79356b
A
600}
601
91447636
A
602uint32_t
603buf_size(buf_t bp) {
604
605 return (bp->b_bufsize);
606}
1c79356b 607
91447636
A
608void
609buf_setsize(buf_t bp, uint32_t bufsize) {
610
611 bp->b_bufsize = bufsize;
612}
1c79356b 613
91447636
A
614uint32_t
615buf_resid(buf_t bp) {
616
617 return (bp->b_resid);
618}
b4c24cb9 619
91447636
A
620void
621buf_setresid(buf_t bp, uint32_t resid) {
622
623 bp->b_resid = resid;
624}
1c79356b 625
91447636
A
626uint32_t
627buf_dirtyoff(buf_t bp) {
1c79356b 628
91447636
A
629 return (bp->b_dirtyoff);
630}
1c79356b 631
91447636
A
632uint32_t
633buf_dirtyend(buf_t bp) {
1c79356b 634
91447636 635 return (bp->b_dirtyend);
1c79356b 636}
1c79356b 637
91447636
A
638void
639buf_setdirtyoff(buf_t bp, uint32_t dirtyoff) {
640
641 bp->b_dirtyoff = dirtyoff;
642}
1c79356b 643
91447636
A
644void
645buf_setdirtyend(buf_t bp, uint32_t dirtyend) {
646
647 bp->b_dirtyend = dirtyend;
1c79356b
A
648}
649
91447636
A
650uintptr_t
651buf_dataptr(buf_t bp) {
652
653 return (bp->b_datap);
654}
1c79356b 655
91447636
A
656void
657buf_setdataptr(buf_t bp, uintptr_t data) {
658
659 bp->b_datap = data;
660}
661
662vnode_t
663buf_vnode(buf_t bp) {
664
665 return (bp->b_vp);
666}
667
668void
669buf_setvnode(buf_t bp, vnode_t vp) {
670
671 bp->b_vp = vp;
672}
673
674
675void *
676buf_callback(buf_t bp)
677{
91447636
A
678 if ( !(bp->b_flags & B_CALL) )
679 return ((void *) NULL);
680
681 return ((void *)bp->b_iodone);
682}
683
684
685errno_t
686buf_setcallback(buf_t bp, void (*callback)(buf_t, void *), void *transaction)
687{
91447636
A
688 if (callback)
689 bp->b_flags |= (B_CALL | B_ASYNC);
690 else
691 bp->b_flags &= ~B_CALL;
692 bp->b_transaction = transaction;
693 bp->b_iodone = callback;
694
695 return (0);
696}
697
698errno_t
699buf_setupl(buf_t bp, upl_t upl, uint32_t offset)
700{
701
702 if ( !(bp->b_lflags & BL_IOBUF) )
703 return (EINVAL);
704
705 if (upl)
706 bp->b_flags |= B_CLUSTER;
707 else
708 bp->b_flags &= ~B_CLUSTER;
709 bp->b_upl = upl;
710 bp->b_uploffset = offset;
711
712 return (0);
713}
714
715buf_t
716buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), void *arg)
717{
718 buf_t io_bp;
719
720 if (io_offset < 0 || io_size < 0)
721 return (NULL);
722
723 if ((unsigned)(io_offset + io_size) > (unsigned)bp->b_bcount)
724 return (NULL);
725
726 if (bp->b_flags & B_CLUSTER) {
727 if (io_offset && ((bp->b_uploffset + io_offset) & PAGE_MASK))
728 return (NULL);
729
730 if (((bp->b_uploffset + io_offset + io_size) & PAGE_MASK) && ((io_offset + io_size) < bp->b_bcount))
731 return (NULL);
732 }
733 io_bp = alloc_io_buf(bp->b_vp, 0);
734
2d21ac55 735 io_bp->b_flags = bp->b_flags & (B_COMMIT_UPL | B_META | B_PAGEIO | B_CLUSTER | B_PHYS | B_RAW | B_ASYNC | B_READ | B_FUA);
91447636
A
736
737 if (iodone) {
738 io_bp->b_transaction = arg;
739 io_bp->b_iodone = iodone;
740 io_bp->b_flags |= B_CALL;
741 }
742 if (bp->b_flags & B_CLUSTER) {
743 io_bp->b_upl = bp->b_upl;
744 io_bp->b_uploffset = bp->b_uploffset + io_offset;
745 } else {
746 io_bp->b_datap = (uintptr_t)(((char *)bp->b_datap) + io_offset);
747 }
748 io_bp->b_bcount = io_size;
749
750 return (io_bp);
751}
752
753
6d2010ae
A
754int
755buf_shadow(buf_t bp)
756{
757 if (bp->b_lflags & BL_SHADOW)
758 return 1;
759 return 0;
760}
761
762
763buf_t
764buf_create_shadow_priv(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg)
765{
766 return (buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 1));
767}
768
769buf_t
770buf_create_shadow(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg)
771{
772 return (buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 0));
773}
774
775
776static buf_t
777buf_create_shadow_internal(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv)
778{
779 buf_t io_bp;
780
781 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_START, bp, 0, 0, 0, 0);
782
783 if ( !(bp->b_flags & B_META) || (bp->b_lflags & BL_IOBUF)) {
784
785 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, 0, 0, 0, 0);
786 return (NULL);
787 }
788#ifdef BUF_MAKE_PRIVATE
789 if (bp->b_shadow_ref && bp->b_data_ref == 0 && external_storage == 0)
790 panic("buf_create_shadow: %p is in the private state (%d, %d)", bp, bp->b_shadow_ref, bp->b_data_ref);
791#endif
792 io_bp = alloc_io_buf(bp->b_vp, priv);
793
794 io_bp->b_flags = bp->b_flags & (B_META | B_ZALLOC | B_ASYNC | B_READ | B_FUA);
795 io_bp->b_blkno = bp->b_blkno;
796 io_bp->b_lblkno = bp->b_lblkno;
797
798 if (iodone) {
799 io_bp->b_transaction = arg;
800 io_bp->b_iodone = iodone;
801 io_bp->b_flags |= B_CALL;
802 }
803 if (force_copy == FALSE) {
804 io_bp->b_bcount = bp->b_bcount;
805 io_bp->b_bufsize = bp->b_bufsize;
806
807 if (external_storage) {
808 io_bp->b_datap = external_storage;
809#ifdef BUF_MAKE_PRIVATE
810 io_bp->b_data_store = NULL;
811#endif
812 } else {
813 io_bp->b_datap = bp->b_datap;
814#ifdef BUF_MAKE_PRIVATE
815 io_bp->b_data_store = bp;
816#endif
817 }
818 *(buf_t *)(&io_bp->b_orig) = bp;
819
820 lck_mtx_lock_spin(buf_mtxp);
821
822 io_bp->b_lflags |= BL_SHADOW;
823 io_bp->b_shadow = bp->b_shadow;
824 bp->b_shadow = io_bp;
825 bp->b_shadow_ref++;
826
827#ifdef BUF_MAKE_PRIVATE
828 if (external_storage)
829 io_bp->b_lflags |= BL_EXTERNAL;
830 else
831 bp->b_data_ref++;
832#endif
833 lck_mtx_unlock(buf_mtxp);
834 } else {
835 if (external_storage) {
836#ifdef BUF_MAKE_PRIVATE
837 io_bp->b_lflags |= BL_EXTERNAL;
838#endif
839 io_bp->b_bcount = bp->b_bcount;
840 io_bp->b_bufsize = bp->b_bufsize;
841 io_bp->b_datap = external_storage;
842 } else {
843 allocbuf(io_bp, bp->b_bcount);
844
845 io_bp->b_lflags |= BL_IOBUF_ALLOC;
846 }
847 bcopy((caddr_t)bp->b_datap, (caddr_t)io_bp->b_datap, bp->b_bcount);
848
849#ifdef BUF_MAKE_PRIVATE
850 io_bp->b_data_store = NULL;
851#endif
852 }
853 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, io_bp, 0);
854
855 return (io_bp);
856}
857
858
859#ifdef BUF_MAKE_PRIVATE
860errno_t
861buf_make_private(buf_t bp)
862{
863 buf_t ds_bp;
864 buf_t t_bp;
865 struct buf my_buf;
866
867 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_START, bp, bp->b_shadow_ref, 0, 0, 0);
868
869 if (bp->b_shadow_ref == 0 || bp->b_data_ref == 0 || ISSET(bp->b_lflags, BL_SHADOW)) {
870
871 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0);
872 return (EINVAL);
873 }
874 my_buf.b_flags = B_META;
875 my_buf.b_datap = (uintptr_t)NULL;
876 allocbuf(&my_buf, bp->b_bcount);
877
878 bcopy((caddr_t)bp->b_datap, (caddr_t)my_buf.b_datap, bp->b_bcount);
879
880 lck_mtx_lock_spin(buf_mtxp);
881
882 for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) {
883 if ( !ISSET(bp->b_lflags, BL_EXTERNAL))
884 break;
885 }
886 ds_bp = t_bp;
887
888 if (ds_bp == NULL && bp->b_data_ref)
889 panic("buf_make_private: b_data_ref != 0 && ds_bp == NULL");
890
891 if (ds_bp && (bp->b_data_ref == 0 || bp->b_shadow_ref == 0))
892 panic("buf_make_private: ref_count == 0 && ds_bp != NULL");
893
894 if (ds_bp == NULL) {
895 lck_mtx_unlock(buf_mtxp);
896
897 buf_free_meta_store(&my_buf);
898
899 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0);
900 return (EINVAL);
901 }
902 for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) {
903 if ( !ISSET(t_bp->b_lflags, BL_EXTERNAL))
904 t_bp->b_data_store = ds_bp;
905 }
906 ds_bp->b_data_ref = bp->b_data_ref;
907
908 bp->b_data_ref = 0;
909 bp->b_datap = my_buf.b_datap;
910
911 lck_mtx_unlock(buf_mtxp);
912
913 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, 0, 0);
914 return (0);
915}
916#endif
917
91447636
A
918
919void
920buf_setfilter(buf_t bp, void (*filter)(buf_t, void *), void *transaction,
6d2010ae 921 void (**old_iodone)(buf_t, void *), void **old_transaction)
91447636 922{
6d2010ae
A
923 if (old_iodone)
924 *old_iodone = bp->b_iodone;
91447636 925 if (old_transaction)
6d2010ae 926 *old_transaction = bp->b_transaction;
91447636
A
927
928 bp->b_transaction = transaction;
929 bp->b_iodone = filter;
2d21ac55
A
930 if (filter)
931 bp->b_flags |= B_FILTER;
932 else
933 bp->b_flags &= ~B_FILTER;
91447636
A
934}
935
936
937daddr64_t
938buf_blkno(buf_t bp) {
939
940 return (bp->b_blkno);
941}
942
943daddr64_t
944buf_lblkno(buf_t bp) {
945
946 return (bp->b_lblkno);
947}
948
949void
950buf_setblkno(buf_t bp, daddr64_t blkno) {
951
952 bp->b_blkno = blkno;
953}
954
955void
956buf_setlblkno(buf_t bp, daddr64_t lblkno) {
957
958 bp->b_lblkno = lblkno;
959}
960
961dev_t
962buf_device(buf_t bp) {
963
964 return (bp->b_dev);
965}
966
967errno_t
968buf_setdevice(buf_t bp, vnode_t vp) {
969
970 if ((vp->v_type != VBLK) && (vp->v_type != VCHR))
971 return EINVAL;
972 bp->b_dev = vp->v_rdev;
973
974 return 0;
975}
976
977
978void *
979buf_drvdata(buf_t bp) {
980
981 return (bp->b_drvdata);
982}
983
984void
985buf_setdrvdata(buf_t bp, void *drvdata) {
986
987 bp->b_drvdata = drvdata;
988}
989
990void *
991buf_fsprivate(buf_t bp) {
992
993 return (bp->b_fsprivate);
994}
995
996void
997buf_setfsprivate(buf_t bp, void *fsprivate) {
998
999 bp->b_fsprivate = fsprivate;
1000}
1001
b0d623f7 1002kauth_cred_t
91447636
A
1003buf_rcred(buf_t bp) {
1004
1005 return (bp->b_rcred);
1006}
1007
b0d623f7 1008kauth_cred_t
91447636
A
1009buf_wcred(buf_t bp) {
1010
1011 return (bp->b_wcred);
1012}
1013
1014void *
1015buf_upl(buf_t bp) {
1016
1017 return (bp->b_upl);
1018}
1019
1020uint32_t
1021buf_uploffset(buf_t bp) {
1022
1023 return ((uint32_t)(bp->b_uploffset));
1024}
1025
1026proc_t
1027buf_proc(buf_t bp) {
1028
1029 return (bp->b_proc);
1030}
1031
1032
1033errno_t
1034buf_map(buf_t bp, caddr_t *io_addr)
1035{
1036 buf_t real_bp;
b0d623f7 1037 vm_offset_t vaddr;
91447636
A
1038 kern_return_t kret;
1039
1040 if ( !(bp->b_flags & B_CLUSTER)) {
1041 *io_addr = (caddr_t)bp->b_datap;
1042 return (0);
1043 }
1044 real_bp = (buf_t)(bp->b_real_bp);
1045
1046 if (real_bp && real_bp->b_datap) {
1047 /*
1048 * b_real_bp is only valid if B_CLUSTER is SET
1049 * if it's non-zero, than someone did a cluster_bp call
1050 * if the backing physical pages were already mapped
1051 * in before the call to cluster_bp (non-zero b_datap),
1052 * than we just use that mapping
1053 */
1054 *io_addr = (caddr_t)real_bp->b_datap;
1055 return (0);
1056 }
1057 kret = ubc_upl_map(bp->b_upl, &vaddr); /* Map it in */
1058
1059 if (kret != KERN_SUCCESS) {
2d21ac55 1060 *io_addr = NULL;
91447636
A
1061
1062 return(ENOMEM);
1063 }
1064 vaddr += bp->b_uploffset;
1065
1066 *io_addr = (caddr_t)vaddr;
1067
1068 return (0);
1069}
1070
1071errno_t
1072buf_unmap(buf_t bp)
1073{
1074 buf_t real_bp;
1075 kern_return_t kret;
1076
1077 if ( !(bp->b_flags & B_CLUSTER))
1078 return (0);
1079 /*
1080 * see buf_map for the explanation
1081 */
1082 real_bp = (buf_t)(bp->b_real_bp);
1083
1084 if (real_bp && real_bp->b_datap)
1085 return (0);
1086
2d21ac55
A
1087 if ((bp->b_lflags & BL_IOBUF) &&
1088 ((bp->b_flags & (B_PAGEIO | B_READ)) != (B_PAGEIO | B_READ))) {
91447636 1089 /*
2d21ac55
A
1090 * ignore pageins... the 'right' thing will
1091 * happen due to the way we handle speculative
1092 * clusters...
1093 *
91447636
A
1094 * when we commit these pages, we'll hit
1095 * it with UPL_COMMIT_INACTIVE which
1096 * will clear the reference bit that got
1097 * turned on when we touched the mapping
1098 */
1099 bp->b_flags |= B_AGE;
1100 }
1101 kret = ubc_upl_unmap(bp->b_upl);
1102
1103 if (kret != KERN_SUCCESS)
1104 return (EINVAL);
1105 return (0);
1106}
1107
1108
1109void
1110buf_clear(buf_t bp) {
1111 caddr_t baddr;
1112
1113 if (buf_map(bp, &baddr) == 0) {
1114 bzero(baddr, bp->b_bcount);
1115 buf_unmap(bp);
1116 }
1117 bp->b_resid = 0;
1118}
1119
91447636
A
1120/*
1121 * Read or write a buffer that is not contiguous on disk.
1122 * buffer is marked done/error at the conclusion
1123 */
1124static int
1125buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_bytes)
1126{
1127 vnode_t vp = buf_vnode(bp);
1128 buf_t io_bp; /* For reading or writing a single block */
1129 int io_direction;
1130 int io_resid;
1131 size_t io_contig_bytes;
1132 daddr64_t io_blkno;
1133 int error = 0;
1134 int bmap_flags;
1135
1136 /*
1137 * save our starting point... the bp was already mapped
1138 * in buf_strategy before we got called
1139 * no sense doing it again.
1140 */
1141 io_blkno = bp->b_blkno;
1142 /*
1143 * Make sure we redo this mapping for the next I/O
1144 * i.e. this can never be a 'permanent' mapping
1145 */
1146 bp->b_blkno = bp->b_lblkno;
1147
1148 /*
1149 * Get an io buffer to do the deblocking
1150 */
1151 io_bp = alloc_io_buf(devvp, 0);
1152
1153 io_bp->b_lblkno = bp->b_lblkno;
1154 io_bp->b_datap = bp->b_datap;
1155 io_resid = bp->b_bcount;
1156 io_direction = bp->b_flags & B_READ;
1157 io_contig_bytes = contig_bytes;
1158
1159 if (bp->b_flags & B_READ)
1160 bmap_flags = VNODE_READ;
1161 else
1162 bmap_flags = VNODE_WRITE;
1163
1164 for (;;) {
1165 if (io_blkno == -1)
1166 /*
1167 * this is unexepected, but we'll allow for it
1168 */
1169 bzero((caddr_t)io_bp->b_datap, (int)io_contig_bytes);
1170 else {
1171 io_bp->b_bcount = io_contig_bytes;
1172 io_bp->b_bufsize = io_contig_bytes;
1173 io_bp->b_resid = io_contig_bytes;
1174 io_bp->b_blkno = io_blkno;
1175
1176 buf_reset(io_bp, io_direction);
2d21ac55 1177
91447636 1178 /*
2d21ac55 1179 * Call the device to do the I/O and wait for it. Make sure the appropriate party is charged for write
91447636 1180 */
2d21ac55
A
1181
1182 if (!ISSET(bp->b_flags, B_READ))
1183 OSAddAtomic(1, &devvp->v_numoutput);
1184
91447636
A
1185 if ((error = VNOP_STRATEGY(io_bp)))
1186 break;
1187 if ((error = (int)buf_biowait(io_bp)))
1188 break;
1189 if (io_bp->b_resid) {
1190 io_resid -= (io_contig_bytes - io_bp->b_resid);
1191 break;
1192 }
1193 }
1194 if ((io_resid -= io_contig_bytes) == 0)
1195 break;
1196 f_offset += io_contig_bytes;
1197 io_bp->b_datap += io_contig_bytes;
1198
1199 /*
1200 * Map the current position to a physical block number
1201 */
1202 if ((error = VNOP_BLOCKMAP(vp, f_offset, io_resid, &io_blkno, &io_contig_bytes, NULL, bmap_flags, NULL)))
1203 break;
1204 }
1205 buf_free(io_bp);
1206
1207 if (error)
1208 buf_seterror(bp, error);
1209 bp->b_resid = io_resid;
1210 /*
1211 * This I/O is now complete
1212 */
1213 buf_biodone(bp);
1214
1215 return error;
1216}
1217
1218
1219/*
1220 * struct vnop_strategy_args {
1221 * struct buf *a_bp;
1222 * } *ap;
1223 */
1224errno_t
1225buf_strategy(vnode_t devvp, void *ap)
1226{
1227 buf_t bp = ((struct vnop_strategy_args *)ap)->a_bp;
1228 vnode_t vp = bp->b_vp;
1229 int bmap_flags;
1230 errno_t error;
6d2010ae
A
1231#if CONFIG_DTRACE
1232 int dtrace_io_start_flag = 0; /* We only want to trip the io:::start
39236c6e 1233 * probe once, with the true physical
6d2010ae
A
1234 * block in place (b_blkno)
1235 */
1236
1237#endif
91447636
A
1238
1239 if (vp == NULL || vp->v_type == VCHR || vp->v_type == VBLK)
1240 panic("buf_strategy: b_vp == NULL || vtype == VCHR | VBLK\n");
1241 /*
1242 * associate the physical device with
1243 * with this buf_t even if we don't
1244 * end up issuing the I/O...
1245 */
1246 bp->b_dev = devvp->v_rdev;
1247
1248 if (bp->b_flags & B_READ)
1249 bmap_flags = VNODE_READ;
1250 else
1251 bmap_flags = VNODE_WRITE;
1252
1253 if ( !(bp->b_flags & B_CLUSTER)) {
1254
1255 if ( (bp->b_upl) ) {
1256 /*
1257 * we have a UPL associated with this bp
1258 * go through cluster_bp which knows how
1259 * to deal with filesystem block sizes
1260 * that aren't equal to the page size
1261 */
6d2010ae 1262 DTRACE_IO1(start, buf_t, bp);
91447636
A
1263 return (cluster_bp(bp));
1264 }
1265 if (bp->b_blkno == bp->b_lblkno) {
316670eb 1266 off_t f_offset;
91447636
A
1267 size_t contig_bytes;
1268
1269 if ((error = VNOP_BLKTOOFF(vp, bp->b_lblkno, &f_offset))) {
6d2010ae 1270 DTRACE_IO1(start, buf_t, bp);
91447636
A
1271 buf_seterror(bp, error);
1272 buf_biodone(bp);
1273
316670eb 1274 return (error);
91447636 1275 }
316670eb
A
1276
1277 if ((error = VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))) {
6d2010ae 1278 DTRACE_IO1(start, buf_t, bp);
91447636
A
1279 buf_seterror(bp, error);
1280 buf_biodone(bp);
1281
1282 return (error);
1283 }
316670eb 1284
6d2010ae
A
1285 DTRACE_IO1(start, buf_t, bp);
1286#if CONFIG_DTRACE
1287 dtrace_io_start_flag = 1;
1288#endif /* CONFIG_DTRACE */
316670eb 1289
b0d623f7
A
1290 if ((bp->b_blkno == -1) || (contig_bytes == 0)) {
1291 /* Set block number to force biodone later */
1292 bp->b_blkno = -1;
91447636 1293 buf_clear(bp);
b0d623f7 1294 }
6d2010ae 1295 else if ((long)contig_bytes < bp->b_bcount) {
91447636 1296 return (buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes));
6d2010ae 1297 }
91447636 1298 }
6d2010ae
A
1299
1300#if CONFIG_DTRACE
1301 if (dtrace_io_start_flag == 0) {
1302 DTRACE_IO1(start, buf_t, bp);
1303 dtrace_io_start_flag = 1;
1304 }
1305#endif /* CONFIG_DTRACE */
1306
91447636
A
1307 if (bp->b_blkno == -1) {
1308 buf_biodone(bp);
1309 return (0);
1310 }
1311 }
6d2010ae
A
1312
1313#if CONFIG_DTRACE
1314 if (dtrace_io_start_flag == 0)
1315 DTRACE_IO1(start, buf_t, bp);
1316#endif /* CONFIG_DTRACE */
1317
316670eb
A
1318#if CONFIG_PROTECT
1319 /* Capture f_offset in the bufattr*/
1320 if (bp->b_attr.ba_cpentry != 0) {
1321 /* No need to go here for older EAs */
1322 if(bp->b_attr.ba_cpentry->cp_flags & CP_OFF_IV_ENABLED) {
1323 off_t f_offset;
1324 if ((error = VNOP_BLKTOOFF(bp->b_vp, bp->b_lblkno, &f_offset)))
1325 return error;
1326
1327 /*
1328 * Attach the file offset to this buffer. The
1329 * bufattr attributes will be passed down the stack
1330 * until they reach IOFlashStorage. IOFlashStorage
1331 * will retain the offset in a local variable when it
1332 * issues its I/Os to the NAND controller.
1333 *
1334 * Note that LwVM may end up splitting this I/O
1335 * into sub-I/Os if it crosses a chunk boundary. In this
1336 * case, LwVM will update this field when it dispatches
1337 * each I/O to IOFlashStorage. But from our perspective
1338 * we have only issued a single I/O.
1339 */
1340 bufattr_setcpoff (&(bp->b_attr), (u_int64_t)f_offset);
fe8ab488 1341 CP_DEBUG((CPDBG_OFFSET_IO | DBG_FUNC_NONE), (uint32_t) f_offset, (uint32_t) bp->b_lblkno, (uint32_t) bp->b_blkno, (uint32_t) bp->b_bcount, 0);
316670eb
A
1342 }
1343 }
1344#endif
1345
91447636
A
1346 /*
1347 * we can issue the I/O because...
1348 * either B_CLUSTER is set which
1349 * means that the I/O is properly set
1350 * up to be a multiple of the page size, or
1351 * we were able to successfully set up the
39236c6e 1352 * physical block mapping
91447636 1353 */
39236c6e
A
1354 error = VOCALL(devvp->v_op, VOFFSET(vnop_strategy), ap);
1355 DTRACE_FSINFO(strategy, vnode_t, vp);
1356 return (error);
91447636
A
1357}
1358
1359
1360
1361buf_t
1362buf_alloc(vnode_t vp)
1363{
1364 return(alloc_io_buf(vp, 0));
1365}
1366
1367void
1368buf_free(buf_t bp) {
1369
1370 free_io_buf(bp);
1371}
1372
1373
2d21ac55
A
1374/*
1375 * iterate buffers for the specified vp.
1376 * if BUF_SCAN_DIRTY is set, do the dirty list
1377 * if BUF_SCAN_CLEAN is set, do the clean list
1378 * if neither flag is set, default to BUF_SCAN_DIRTY
1379 * if BUF_NOTIFY_BUSY is set, call the callout function using a NULL bp for busy pages
1380 */
1381
1382struct buf_iterate_info_t {
1383 int flag;
1384 struct buflists *listhead;
1385};
91447636
A
1386
1387void
2d21ac55
A
1388buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg)
1389{
91447636
A
1390 buf_t bp;
1391 int retval;
1392 struct buflists local_iterblkhd;
1393 int lock_flags = BAC_NOWAIT | BAC_REMOVE;
2d21ac55
A
1394 int notify_busy = flags & BUF_NOTIFY_BUSY;
1395 struct buf_iterate_info_t list[2];
1396 int num_lists, i;
91447636
A
1397
1398 if (flags & BUF_SKIP_LOCKED)
1399 lock_flags |= BAC_SKIP_LOCKED;
1400 if (flags & BUF_SKIP_NONLOCKED)
1401 lock_flags |= BAC_SKIP_NONLOCKED;
1402
2d21ac55
A
1403 if ( !(flags & (BUF_SCAN_DIRTY | BUF_SCAN_CLEAN)))
1404 flags |= BUF_SCAN_DIRTY;
1405
1406 num_lists = 0;
1407
1408 if (flags & BUF_SCAN_DIRTY) {
1409 list[num_lists].flag = VBI_DIRTY;
1410 list[num_lists].listhead = &vp->v_dirtyblkhd;
1411 num_lists++;
1412 }
1413 if (flags & BUF_SCAN_CLEAN) {
1414 list[num_lists].flag = VBI_CLEAN;
1415 list[num_lists].listhead = &vp->v_cleanblkhd;
1416 num_lists++;
91447636 1417 }
91447636 1418
2d21ac55
A
1419 for (i = 0; i < num_lists; i++) {
1420 lck_mtx_lock(buf_mtxp);
1421
1422 if (buf_iterprepare(vp, &local_iterblkhd, list[i].flag)) {
1423 lck_mtx_unlock(buf_mtxp);
1424 continue;
1425 }
1426 while (!LIST_EMPTY(&local_iterblkhd)) {
1427 bp = LIST_FIRST(&local_iterblkhd);
1428 LIST_REMOVE(bp, b_vnbufs);
1429 LIST_INSERT_HEAD(list[i].listhead, bp, b_vnbufs);
91447636 1430
2d21ac55
A
1431 if (buf_acquire_locked(bp, lock_flags, 0, 0)) {
1432 if (notify_busy) {
1433 bp = NULL;
1434 } else {
1435 continue;
1436 }
1437 }
91447636 1438
2d21ac55 1439 lck_mtx_unlock(buf_mtxp);
91447636 1440
2d21ac55 1441 retval = callout(bp, arg);
91447636 1442
2d21ac55
A
1443 switch (retval) {
1444 case BUF_RETURNED:
1445 if (bp)
1446 buf_brelse(bp);
1447 break;
1448 case BUF_CLAIMED:
1449 break;
1450 case BUF_RETURNED_DONE:
1451 if (bp)
1452 buf_brelse(bp);
1453 lck_mtx_lock(buf_mtxp);
1454 goto out;
1455 case BUF_CLAIMED_DONE:
1456 lck_mtx_lock(buf_mtxp);
1457 goto out;
1458 }
1459 lck_mtx_lock(buf_mtxp);
1460 } /* while list has more nodes */
1461 out:
1462 buf_itercomplete(vp, &local_iterblkhd, list[i].flag);
1463 lck_mtx_unlock(buf_mtxp);
1464 } /* for each list */
1465} /* buf_iterate */
91447636
A
1466
1467
1468/*
1469 * Flush out and invalidate all buffers associated with a vnode.
1470 */
1471int
1472buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo)
1473{
1474 buf_t bp;
6d2010ae 1475 int aflags;
91447636
A
1476 int error = 0;
1477 int must_rescan = 1;
1478 struct buflists local_iterblkhd;
1479
b0d623f7
A
1480
1481 if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd))
1482 return (0);
1483
91447636
A
1484 lck_mtx_lock(buf_mtxp);
1485
1486 for (;;) {
1487 if (must_rescan == 0)
1488 /*
1489 * the lists may not be empty, but all that's left at this
1490 * point are metadata or B_LOCKED buffers which are being
1491 * skipped... we know this because we made it through both
1492 * the clean and dirty lists without dropping buf_mtxp...
1493 * each time we drop buf_mtxp we bump "must_rescan"
1494 */
1495 break;
1496 if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd))
1497 break;
1498 must_rescan = 0;
1499 /*
1500 * iterate the clean list
1501 */
1502 if (buf_iterprepare(vp, &local_iterblkhd, VBI_CLEAN)) {
1503 goto try_dirty_list;
1504 }
1505 while (!LIST_EMPTY(&local_iterblkhd)) {
6d2010ae 1506
91447636
A
1507 bp = LIST_FIRST(&local_iterblkhd);
1508
1509 LIST_REMOVE(bp, b_vnbufs);
1510 LIST_INSERT_HEAD(&vp->v_cleanblkhd, bp, b_vnbufs);
1511
1512 /*
1513 * some filesystems distinguish meta data blocks with a negative logical block #
1514 */
1515 if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META)))
1516 continue;
1517
6d2010ae
A
1518 aflags = BAC_REMOVE;
1519
1520 if ( !(flags & BUF_INVALIDATE_LOCKED) )
1521 aflags |= BAC_SKIP_LOCKED;
1522
1523 if ( (error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo)) ) {
91447636
A
1524 if (error == EDEADLK)
1525 /*
1526 * this buffer was marked B_LOCKED...
1527 * we didn't drop buf_mtxp, so we
1528 * we don't need to rescan
1529 */
1530 continue;
1531 if (error == EAGAIN) {
1532 /*
1533 * found a busy buffer... we blocked and
1534 * dropped buf_mtxp, so we're going to
1535 * need to rescan after this pass is completed
1536 */
1537 must_rescan++;
1538 continue;
1539 }
1540 /*
1541 * got some kind of 'real' error out of the msleep
1542 * in buf_acquire_locked, terminate the scan and return the error
1543 */
1544 buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN);
1545
1546 lck_mtx_unlock(buf_mtxp);
1547 return (error);
1548 }
1549 lck_mtx_unlock(buf_mtxp);
1550
6d2010ae
A
1551 if (bp->b_flags & B_LOCKED)
1552 KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 0, 0);
1553
1554 CLR(bp->b_flags, B_LOCKED);
91447636
A
1555 SET(bp->b_flags, B_INVAL);
1556 buf_brelse(bp);
1557
1558 lck_mtx_lock(buf_mtxp);
1559
1560 /*
1561 * by dropping buf_mtxp, we allow new
1562 * buffers to be added to the vnode list(s)
1563 * we'll have to rescan at least once more
1564 * if the queues aren't empty
1565 */
1566 must_rescan++;
1567 }
1568 buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN);
1569
1570try_dirty_list:
1571 /*
1572 * Now iterate on dirty blks
1573 */
1574 if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY)) {
1575 continue;
1576 }
1577 while (!LIST_EMPTY(&local_iterblkhd)) {
1578 bp = LIST_FIRST(&local_iterblkhd);
1579
1580 LIST_REMOVE(bp, b_vnbufs);
1581 LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
1582
1583 /*
1584 * some filesystems distinguish meta data blocks with a negative logical block #
1585 */
1586 if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META)))
1587 continue;
1588
6d2010ae
A
1589 aflags = BAC_REMOVE;
1590
1591 if ( !(flags & BUF_INVALIDATE_LOCKED) )
1592 aflags |= BAC_SKIP_LOCKED;
1593
1594 if ( (error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo)) ) {
91447636
A
1595 if (error == EDEADLK)
1596 /*
1597 * this buffer was marked B_LOCKED...
1598 * we didn't drop buf_mtxp, so we
1599 * we don't need to rescan
1600 */
1601 continue;
1602 if (error == EAGAIN) {
1603 /*
1604 * found a busy buffer... we blocked and
1605 * dropped buf_mtxp, so we're going to
1606 * need to rescan after this pass is completed
1607 */
1608 must_rescan++;
1609 continue;
1610 }
1611 /*
1612 * got some kind of 'real' error out of the msleep
1613 * in buf_acquire_locked, terminate the scan and return the error
1614 */
1615 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1616
1617 lck_mtx_unlock(buf_mtxp);
1618 return (error);
1619 }
1620 lck_mtx_unlock(buf_mtxp);
1621
6d2010ae
A
1622 if (bp->b_flags & B_LOCKED)
1623 KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 1, 0);
1624
1625 CLR(bp->b_flags, B_LOCKED);
91447636
A
1626 SET(bp->b_flags, B_INVAL);
1627
1628 if (ISSET(bp->b_flags, B_DELWRI) && (flags & BUF_WRITE_DATA))
1629 (void) VNOP_BWRITE(bp);
1630 else
1631 buf_brelse(bp);
1632
1633 lck_mtx_lock(buf_mtxp);
1634 /*
1635 * by dropping buf_mtxp, we allow new
1636 * buffers to be added to the vnode list(s)
1637 * we'll have to rescan at least once more
1638 * if the queues aren't empty
1639 */
1640 must_rescan++;
1641 }
1642 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1643 }
1644 lck_mtx_unlock(buf_mtxp);
1645
1646 return (0);
1647}
1648
1649void
2d21ac55 1650buf_flushdirtyblks(vnode_t vp, int wait, int flags, const char *msg) {
316670eb
A
1651
1652 (void) buf_flushdirtyblks_skipinfo(vp, wait, flags, msg);
1653 return;
1654}
1655
1656int
1657buf_flushdirtyblks_skipinfo(vnode_t vp, int wait, int flags, const char *msg) {
91447636
A
1658 buf_t bp;
1659 int writes_issued = 0;
1660 errno_t error;
1661 int busy = 0;
1662 struct buflists local_iterblkhd;
1663 int lock_flags = BAC_NOWAIT | BAC_REMOVE;
316670eb 1664 int any_locked = 0;
91447636
A
1665
1666 if (flags & BUF_SKIP_LOCKED)
1667 lock_flags |= BAC_SKIP_LOCKED;
1668 if (flags & BUF_SKIP_NONLOCKED)
1669 lock_flags |= BAC_SKIP_NONLOCKED;
1670loop:
1671 lck_mtx_lock(buf_mtxp);
1672
1673 if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY) == 0) {
1674 while (!LIST_EMPTY(&local_iterblkhd)) {
1675 bp = LIST_FIRST(&local_iterblkhd);
1676 LIST_REMOVE(bp, b_vnbufs);
1677 LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
316670eb
A
1678
1679 if ((error = buf_acquire_locked(bp, lock_flags, 0, 0)) == EBUSY) {
1680 busy++;
1681 }
1682 if (error) {
1683 /*
1684 * If we passed in BUF_SKIP_LOCKED or BUF_SKIP_NONLOCKED,
1685 * we may want to do somethign differently if a locked or unlocked
1686 * buffer was encountered (depending on the arg specified).
1687 * In this case, we know that one of those two was set, and the
1688 * buf acquisition failed above.
1689 *
1690 * If it failed with EDEADLK, then save state which can be emitted
1691 * later on to the caller. Most callers should not care.
1692 */
1693 if (error == EDEADLK) {
1694 any_locked++;
1695 }
1696 continue;
1697 }
91447636
A
1698 lck_mtx_unlock(buf_mtxp);
1699
1700 bp->b_flags &= ~B_LOCKED;
1701
1702 /*
1703 * Wait for I/O associated with indirect blocks to complete,
1704 * since there is no way to quickly wait for them below.
1705 */
1706 if ((bp->b_vp == vp) || (wait == 0))
1707 (void) buf_bawrite(bp);
1708 else
1709 (void) VNOP_BWRITE(bp);
1710 writes_issued++;
1711
1712 lck_mtx_lock(buf_mtxp);
1713 }
1714 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1715 }
1716 lck_mtx_unlock(buf_mtxp);
1717
1718 if (wait) {
1719 (void)vnode_waitforwrites(vp, 0, 0, 0, msg);
1720
1721 if (vp->v_dirtyblkhd.lh_first && busy) {
1722 /*
1723 * we had one or more BUSY buffers on
1724 * the dirtyblock list... most likely
1725 * these are due to delayed writes that
1726 * were moved to the bclean queue but
1727 * have not yet been 'written'.
1728 * if we issued some writes on the
1729 * previous pass, we try again immediately
1730 * if we didn't, we'll sleep for some time
1731 * to allow the state to change...
1732 */
1733 if (writes_issued == 0) {
1734 (void)tsleep((caddr_t)&vp->v_numoutput,
1735 PRIBIO + 1, "vnode_flushdirtyblks", hz/20);
1736 }
1737 writes_issued = 0;
1738 busy = 0;
1739
1740 goto loop;
1741 }
1742 }
316670eb
A
1743
1744 return any_locked;
91447636
A
1745}
1746
1747
1748/*
1749 * called with buf_mtxp held...
1750 * this lock protects the queue manipulation
1751 */
1752static int
1753buf_iterprepare(vnode_t vp, struct buflists *iterheadp, int flags)
1754{
1755 struct buflists * listheadp;
1756
1757 if (flags & VBI_DIRTY)
1758 listheadp = &vp->v_dirtyblkhd;
1759 else
1760 listheadp = &vp->v_cleanblkhd;
1761
1762 while (vp->v_iterblkflags & VBI_ITER) {
1763 vp->v_iterblkflags |= VBI_ITERWANT;
2d21ac55 1764 msleep(&vp->v_iterblkflags, buf_mtxp, 0, "buf_iterprepare", NULL);
91447636
A
1765 }
1766 if (LIST_EMPTY(listheadp)) {
1767 LIST_INIT(iterheadp);
1768 return(EINVAL);
1769 }
1770 vp->v_iterblkflags |= VBI_ITER;
1771
1772 iterheadp->lh_first = listheadp->lh_first;
1773 listheadp->lh_first->b_vnbufs.le_prev = &iterheadp->lh_first;
1774 LIST_INIT(listheadp);
1775
1776 return(0);
1777}
1778
1779/*
1780 * called with buf_mtxp held...
1781 * this lock protects the queue manipulation
1782 */
1783static void
1784buf_itercomplete(vnode_t vp, struct buflists *iterheadp, int flags)
1785{
1786 struct buflists * listheadp;
1787 buf_t bp;
1788
1789 if (flags & VBI_DIRTY)
1790 listheadp = &vp->v_dirtyblkhd;
1791 else
1792 listheadp = &vp->v_cleanblkhd;
1793
1794 while (!LIST_EMPTY(iterheadp)) {
1795 bp = LIST_FIRST(iterheadp);
1796 LIST_REMOVE(bp, b_vnbufs);
1797 LIST_INSERT_HEAD(listheadp, bp, b_vnbufs);
1798 }
1799 vp->v_iterblkflags &= ~VBI_ITER;
1800
1801 if (vp->v_iterblkflags & VBI_ITERWANT) {
1802 vp->v_iterblkflags &= ~VBI_ITERWANT;
1803 wakeup(&vp->v_iterblkflags);
1804 }
1805}
1806
1807
1808static void
1809bremfree_locked(buf_t bp)
1810{
1811 struct bqueues *dp = NULL;
2d21ac55 1812 int whichq;
6d2010ae
A
1813
1814 whichq = bp->b_whichq;
1815
1816 if (whichq == -1) {
1817 if (bp->b_shadow_ref == 0)
1818 panic("bremfree_locked: %p not on freelist", bp);
1819 /*
1820 * there are clones pointing to 'bp'...
1821 * therefore, it was not put on a freelist
1822 * when buf_brelse was last called on 'bp'
1823 */
1824 return;
1825 }
91447636
A
1826 /*
1827 * We only calculate the head of the freelist when removing
1828 * the last element of the list as that is the only time that
1829 * it is needed (e.g. to reset the tail pointer).
1830 *
1831 * NB: This makes an assumption about how tailq's are implemented.
1832 */
1833 if (bp->b_freelist.tqe_next == NULL) {
2d21ac55
A
1834 dp = &bufqueues[whichq];
1835
1836 if (dp->tqh_last != &bp->b_freelist.tqe_next)
91447636
A
1837 panic("bremfree: lost tail");
1838 }
1839 TAILQ_REMOVE(dp, bp, b_freelist);
2d21ac55 1840
2d21ac55
A
1841 if (whichq == BQ_LAUNDRY)
1842 blaundrycnt--;
1843
91447636
A
1844 bp->b_whichq = -1;
1845 bp->b_timestamp = 0;
6d2010ae 1846 bp->b_shadow = 0;
91447636
A
1847}
1848
1849/*
1850 * Associate a buffer with a vnode.
2d21ac55 1851 * buf_mtxp must be locked on entry
91447636
A
1852 */
1853static void
2d21ac55 1854bgetvp_locked(vnode_t vp, buf_t bp)
91447636
A
1855{
1856
1857 if (bp->b_vp != vp)
2d21ac55 1858 panic("bgetvp_locked: not free");
91447636
A
1859
1860 if (vp->v_type == VBLK || vp->v_type == VCHR)
1861 bp->b_dev = vp->v_rdev;
1862 else
1863 bp->b_dev = NODEV;
1864 /*
1865 * Insert onto list for new vnode.
1866 */
91447636 1867 bufinsvn(bp, &vp->v_cleanblkhd);
91447636
A
1868}
1869
1870/*
1871 * Disassociate a buffer from a vnode.
2d21ac55 1872 * buf_mtxp must be locked on entry
91447636
A
1873 */
1874static void
2d21ac55 1875brelvp_locked(buf_t bp)
91447636 1876{
91447636
A
1877 /*
1878 * Delete from old vnode list, if on one.
1879 */
91447636
A
1880 if (bp->b_vnbufs.le_next != NOLIST)
1881 bufremvn(bp);
91447636
A
1882
1883 bp->b_vp = (vnode_t)NULL;
1884}
1885
1886/*
1887 * Reassign a buffer from one vnode to another.
1888 * Used to assign file specific control information
1889 * (indirect blocks) to the vnode to which they belong.
1890 */
1891static void
1892buf_reassign(buf_t bp, vnode_t newvp)
1893{
6d2010ae 1894 struct buflists *listheadp;
1c79356b 1895
91447636
A
1896 if (newvp == NULL) {
1897 printf("buf_reassign: NULL");
1898 return;
1899 }
2d21ac55 1900 lck_mtx_lock_spin(buf_mtxp);
91447636
A
1901
1902 /*
1903 * Delete from old vnode list, if on one.
1904 */
1905 if (bp->b_vnbufs.le_next != NOLIST)
1906 bufremvn(bp);
1907 /*
1908 * If dirty, put on list of dirty buffers;
1909 * otherwise insert onto list of clean buffers.
1910 */
1911 if (ISSET(bp->b_flags, B_DELWRI))
1912 listheadp = &newvp->v_dirtyblkhd;
1913 else
1914 listheadp = &newvp->v_cleanblkhd;
1915 bufinsvn(bp, listheadp);
1916
1917 lck_mtx_unlock(buf_mtxp);
1c79356b
A
1918}
1919
91447636
A
1920static __inline__ void
1921bufhdrinit(buf_t bp)
55e303ae 1922{
91447636
A
1923 bzero((char *)bp, sizeof *bp);
1924 bp->b_dev = NODEV;
1925 bp->b_rcred = NOCRED;
1926 bp->b_wcred = NOCRED;
1927 bp->b_vnbufs.le_next = NOLIST;
1928 bp->b_flags = B_INVAL;
1929
1930 return;
55e303ae
A
1931}
1932
1933/*
91447636 1934 * Initialize buffers and hash links for buffers.
55e303ae 1935 */
91447636 1936__private_extern__ void
2d21ac55 1937bufinit(void)
55e303ae 1938{
91447636
A
1939 buf_t bp;
1940 struct bqueues *dp;
1941 int i;
91447636 1942
2d21ac55 1943 nbuf_headers = 0;
91447636
A
1944 /* Initialize the buffer queues ('freelists') and the hash table */
1945 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
1946 TAILQ_INIT(dp);
0c530ab8 1947 bufhashtbl = hashinit(nbuf_hashelements, M_CACHE, &bufhash);
91447636 1948
b0d623f7
A
1949 buf_busycount = 0;
1950
91447636 1951 /* Initialize the buffer headers */
0c530ab8 1952 for (i = 0; i < max_nbuf_headers; i++) {
2d21ac55
A
1953 nbuf_headers++;
1954 bp = &buf_headers[i];
91447636
A
1955 bufhdrinit(bp);
1956
91447636 1957 BLISTNONE(bp);
2d21ac55
A
1958 dp = &bufqueues[BQ_EMPTY];
1959 bp->b_whichq = BQ_EMPTY;
1960 bp->b_timestamp = buf_timestamp();
1961 binsheadfree(bp, dp, BQ_EMPTY);
91447636
A
1962 binshash(bp, &invalhash);
1963 }
2d21ac55 1964 boot_nbuf_headers = nbuf_headers;
6d2010ae
A
1965
1966 TAILQ_INIT(&iobufqueue);
1967 TAILQ_INIT(&delaybufqueue);
1968
2d21ac55
A
1969 for (; i < nbuf_headers + niobuf_headers; i++) {
1970 bp = &buf_headers[i];
91447636 1971 bufhdrinit(bp);
2d21ac55 1972 bp->b_whichq = -1;
91447636
A
1973 binsheadfree(bp, &iobufqueue, -1);
1974 }
1975
2d21ac55 1976 /*
91447636
A
1977 * allocate lock group attribute and group
1978 */
2d21ac55 1979 buf_mtx_grp_attr = lck_grp_attr_alloc_init();
91447636
A
1980 buf_mtx_grp = lck_grp_alloc_init("buffer cache", buf_mtx_grp_attr);
1981
1982 /*
1983 * allocate the lock attribute
1984 */
1985 buf_mtx_attr = lck_attr_alloc_init();
91447636
A
1986
1987 /*
1988 * allocate and initialize mutex's for the buffer and iobuffer pools
1989 */
1990 buf_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr);
1991 iobuffer_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr);
1992
1993 if (iobuffer_mtxp == NULL)
1994 panic("couldn't create iobuffer mutex");
1995
1996 if (buf_mtxp == NULL)
1997 panic("couldn't create buf mutex");
1998
1999 /*
2000 * allocate and initialize cluster specific global locks...
2001 */
2002 cluster_init();
2003
2004 printf("using %d buffer headers and %d cluster IO buffer headers\n",
2d21ac55 2005 nbuf_headers, niobuf_headers);
91447636
A
2006
2007 /* Set up zones used by the buffer cache */
2008 bufzoneinit();
2009
2010 /* start the bcleanbuf() thread */
2011 bcleanbuf_thread_init();
2012
b0d623f7
A
2013 /* Register a callout for relieving vm pressure */
2014 if (vm_set_buffer_cleanup_callout(buffer_cache_gc) != KERN_SUCCESS) {
2015 panic("Couldn't register buffer cache callout for vm pressure!\n");
2016 }
2017
91447636
A
2018}
2019
2d21ac55
A
2020/*
2021 * Zones for the meta data buffers
2022 */
2023
2024#define MINMETA 512
2025#define MAXMETA 8192
2026
2027struct meta_zone_entry {
2028 zone_t mz_zone;
2029 vm_size_t mz_size;
2030 vm_size_t mz_max;
2031 const char *mz_name;
2032};
2033
2034struct meta_zone_entry meta_zones[] = {
2035 {NULL, (MINMETA * 1), 128 * (MINMETA * 1), "buf.512" },
2036 {NULL, (MINMETA * 2), 64 * (MINMETA * 2), "buf.1024" },
2037 {NULL, (MINMETA * 4), 16 * (MINMETA * 4), "buf.2048" },
2038 {NULL, (MINMETA * 8), 512 * (MINMETA * 8), "buf.4096" },
2039 {NULL, (MINMETA * 16), 512 * (MINMETA * 16), "buf.8192" },
2040 {NULL, 0, 0, "" } /* End */
2041};
2042
2043/*
2044 * Initialize the meta data zones
2045 */
2046static void
2047bufzoneinit(void)
2048{
2049 int i;
2050
2051 for (i = 0; meta_zones[i].mz_size != 0; i++) {
2052 meta_zones[i].mz_zone =
2053 zinit(meta_zones[i].mz_size,
2054 meta_zones[i].mz_max,
2055 PAGE_SIZE,
2056 meta_zones[i].mz_name);
6d2010ae 2057 zone_change(meta_zones[i].mz_zone, Z_CALLERACCT, FALSE);
2d21ac55
A
2058 }
2059 buf_hdr_zone = zinit(sizeof(struct buf), 32, PAGE_SIZE, "buf headers");
6d2010ae 2060 zone_change(buf_hdr_zone, Z_CALLERACCT, FALSE);
2d21ac55
A
2061}
2062
2063static __inline__ zone_t
2064getbufzone(size_t size)
2065{
2066 int i;
2067
2068 if ((size % 512) || (size < MINMETA) || (size > MAXMETA))
2069 panic("getbufzone: incorect size = %lu", size);
2070
2071 for (i = 0; meta_zones[i].mz_size != 0; i++) {
2072 if (meta_zones[i].mz_size >= size)
2073 break;
2074 }
2075
2076 return (meta_zones[i].mz_zone);
2077}
2078
2079
2080
91447636 2081static struct buf *
b0d623f7 2082bio_doread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, int async, int queuetype)
91447636
A
2083{
2084 buf_t bp;
2085
2086 bp = buf_getblk(vp, blkno, size, 0, 0, queuetype);
2087
2088 /*
2089 * If buffer does not have data valid, start a read.
2090 * Note that if buffer is B_INVAL, buf_getblk() won't return it.
2091 * Therefore, it's valid if it's I/O has completed or been delayed.
2092 */
2093 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
2094 struct proc *p;
2095
2096 p = current_proc();
2097
2098 /* Start I/O for the buffer (keeping credentials). */
2099 SET(bp->b_flags, B_READ | async);
0c530ab8 2100 if (IS_VALID_CRED(cred) && !IS_VALID_CRED(bp->b_rcred)) {
91447636
A
2101 kauth_cred_ref(cred);
2102 bp->b_rcred = cred;
2103 }
2104
2105 VNOP_STRATEGY(bp);
2106
2107 trace(TR_BREADMISS, pack(vp, size), blkno);
2108
2109 /* Pay for the read. */
39236c6e 2110 if (p && p->p_stats) {
b0d623f7 2111 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_inblock); /* XXX */
39236c6e 2112 }
91447636
A
2113
2114 if (async) {
2115 /*
2116 * since we asked for an ASYNC I/O
2117 * the biodone will do the brelse
2118 * we don't want to pass back a bp
2119 * that we don't 'own'
2120 */
2121 bp = NULL;
2122 }
2123 } else if (async) {
2124 buf_brelse(bp);
2125 bp = NULL;
2126 }
2127
2128 trace(TR_BREADHIT, pack(vp, size), blkno);
2129
2130 return (bp);
55e303ae
A
2131}
2132
2133/*
91447636 2134 * Perform the reads for buf_breadn() and buf_meta_breadn().
55e303ae
A
2135 * Trivial modification to the breada algorithm presented in Bach (p.55).
2136 */
91447636
A
2137static errno_t
2138do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes,
b0d623f7 2139 int nrablks, kauth_cred_t cred, buf_t *bpp, int queuetype)
1c79356b 2140{
91447636
A
2141 buf_t bp;
2142 int i;
1c79356b 2143
55e303ae 2144 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, queuetype);
1c79356b
A
2145
2146 /*
2147 * For each of the read-ahead blocks, start a read, if necessary.
2148 */
2149 for (i = 0; i < nrablks; i++) {
2150 /* If it's in the cache, just go on to next one. */
2151 if (incore(vp, rablks[i]))
2152 continue;
2153
2154 /* Get a buffer for the read-ahead block */
55e303ae 2155 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC, queuetype);
1c79356b
A
2156 }
2157
2158 /* Otherwise, we had to start a read for it; wait until it's valid. */
91447636 2159 return (buf_biowait(bp));
1c79356b
A
2160}
2161
91447636 2162
1c79356b 2163/*
91447636
A
2164 * Read a disk block.
2165 * This algorithm described in Bach (p.54).
1c79356b 2166 */
91447636 2167errno_t
b0d623f7 2168buf_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp)
91447636
A
2169{
2170 buf_t bp;
2171
2172 /* Get buffer for block. */
2173 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_READ);
2174
2175 /* Wait for the read to complete, and return result. */
2176 return (buf_biowait(bp));
2177}
2178
2179/*
2180 * Read a disk block. [bread() for meta-data]
2181 * This algorithm described in Bach (p.54).
2182 */
2183errno_t
b0d623f7 2184buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp)
91447636
A
2185{
2186 buf_t bp;
2187
2188 /* Get buffer for block. */
2189 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_META);
2190
2191 /* Wait for the read to complete, and return result. */
2192 return (buf_biowait(bp));
2193}
2194
2195/*
2196 * Read-ahead multiple disk blocks. The first is sync, the rest async.
2197 */
2198errno_t
b0d623f7 2199buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp)
1c79356b 2200{
91447636
A
2201 return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_READ));
2202}
1c79356b 2203
91447636
A
2204/*
2205 * Read-ahead multiple disk blocks. The first is sync, the rest async.
2206 * [buf_breadn() for meta-data]
2207 */
2208errno_t
b0d623f7 2209buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp)
91447636
A
2210{
2211 return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_META));
1c79356b
A
2212}
2213
2214/*
2215 * Block write. Described in Bach (p.56)
2216 */
91447636
A
2217errno_t
2218buf_bwrite(buf_t bp)
1c79356b 2219{
91447636
A
2220 int sync, wasdelayed;
2221 errno_t rv;
2222 proc_t p = current_proc();
2223 vnode_t vp = bp->b_vp;
1c79356b 2224
91447636 2225 if (bp->b_datap == 0) {
55e303ae
A
2226 if (brecover_data(bp) == 0)
2227 return (0);
2228 }
1c79356b
A
2229 /* Remember buffer type, to switch on it later. */
2230 sync = !ISSET(bp->b_flags, B_ASYNC);
2231 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
2232 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
91447636
A
2233
2234 if (wasdelayed)
b0d623f7 2235 OSAddAtomicLong(-1, &nbdwrite);
1c79356b
A
2236
2237 if (!sync) {
2238 /*
2239 * If not synchronous, pay for the I/O operation and make
2240 * sure the buf is on the correct vnode queue. We have
2241 * to do this now, because if we don't, the vnode may not
2242 * be properly notified that its I/O has completed.
2243 */
2244 if (wasdelayed)
91447636 2245 buf_reassign(bp, vp);
39236c6e
A
2246 else
2247 if (p && p->p_stats) {
2248 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
39236c6e 2249 }
1c79356b 2250 }
d52fe63f 2251 trace(TR_BUFWRITE, pack(vp, bp->b_bcount), bp->b_lblkno);
1c79356b
A
2252
2253 /* Initiate disk write. Make sure the appropriate party is charged. */
91447636
A
2254
2255 OSAddAtomic(1, &vp->v_numoutput);
1c79356b 2256
91447636 2257 VNOP_STRATEGY(bp);
1c79356b
A
2258
2259 if (sync) {
2260 /*
2261 * If I/O was synchronous, wait for it to complete.
2262 */
91447636 2263 rv = buf_biowait(bp);
1c79356b
A
2264
2265 /*
2266 * Pay for the I/O operation, if it's not been paid for, and
2267 * make sure it's on the correct vnode queue. (async operatings
2268 * were payed for above.)
2269 */
2270 if (wasdelayed)
91447636 2271 buf_reassign(bp, vp);
1c79356b 2272 else
39236c6e
A
2273 if (p && p->p_stats) {
2274 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
39236c6e 2275 }
1c79356b
A
2276
2277 /* Release the buffer. */
b4c24cb9
A
2278 // XXXdbg - only if the unused bit is set
2279 if (!ISSET(bp->b_flags, B_NORELSE)) {
91447636 2280 buf_brelse(bp);
b4c24cb9
A
2281 } else {
2282 CLR(bp->b_flags, B_NORELSE);
2283 }
1c79356b
A
2284
2285 return (rv);
2286 } else {
2287 return (0);
2288 }
2289}
2290
2291int
2d21ac55 2292vn_bwrite(struct vnop_bwrite_args *ap)
1c79356b 2293{
91447636 2294 return (buf_bwrite(ap->a_bp));
1c79356b
A
2295}
2296
2297/*
2298 * Delayed write.
2299 *
2300 * The buffer is marked dirty, but is not queued for I/O.
2301 * This routine should be used when the buffer is expected
2302 * to be modified again soon, typically a small write that
2303 * partially fills a buffer.
2304 *
2305 * NB: magnetic tapes cannot be delayed; they must be
2306 * written in the order that the writes are requested.
2307 *
2308 * Described in Leffler, et al. (pp. 208-213).
d52fe63f 2309 *
b0d623f7 2310 * Note: With the ability to allocate additional buffer
d52fe63f 2311 * headers, we can get in to the situation where "too" many
91447636
A
2312 * buf_bdwrite()s can create situation where the kernel can create
2313 * buffers faster than the disks can service. Doing a buf_bawrite() in
6d2010ae 2314 * cases where we have "too many" outstanding buf_bdwrite()s avoids that.
1c79356b 2315 */
9bccf70c 2316__private_extern__ int
91447636 2317bdwrite_internal(buf_t bp, int return_error)
1c79356b 2318{
91447636
A
2319 proc_t p = current_proc();
2320 vnode_t vp = bp->b_vp;
1c79356b
A
2321
2322 /*
2323 * If the block hasn't been seen before:
2324 * (1) Mark it as having been seen,
2325 * (2) Charge for the write.
2326 * (3) Make sure it's on its vnode's correct block list,
2327 */
2328 if (!ISSET(bp->b_flags, B_DELWRI)) {
2329 SET(bp->b_flags, B_DELWRI);
39236c6e 2330 if (p && p->p_stats) {
b0d623f7 2331 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
39236c6e 2332 }
b0d623f7 2333 OSAddAtomicLong(1, &nbdwrite);
91447636 2334 buf_reassign(bp, vp);
1c79356b
A
2335 }
2336
d52fe63f 2337 /*
91447636
A
2338 * if we're not LOCKED, but the total number of delayed writes
2339 * has climbed above 75% of the total buffers in the system
2340 * return an error if the caller has indicated that it can
2341 * handle one in this case, otherwise schedule the I/O now
2342 * this is done to prevent us from allocating tons of extra
2343 * buffers when dealing with virtual disks (i.e. DiskImages),
2344 * because additional buffers are dynamically allocated to prevent
2345 * deadlocks from occurring
2346 *
2347 * however, can't do a buf_bawrite() if the LOCKED bit is set because the
2348 * buffer is part of a transaction and can't go to disk until
2349 * the LOCKED bit is cleared.
d52fe63f 2350 */
2d21ac55 2351 if (!ISSET(bp->b_flags, B_LOCKED) && nbdwrite > ((nbuf_headers/4)*3)) {
9bccf70c
A
2352 if (return_error)
2353 return (EAGAIN);
91447636
A
2354 /*
2355 * If the vnode has "too many" write operations in progress
2356 * wait for them to finish the IO
2357 */
2d21ac55 2358 (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, "buf_bdwrite");
91447636
A
2359
2360 return (buf_bawrite(bp));
d52fe63f
A
2361 }
2362
1c79356b
A
2363 /* Otherwise, the "write" is done, so mark and release the buffer. */
2364 SET(bp->b_flags, B_DONE);
91447636 2365 buf_brelse(bp);
9bccf70c 2366 return (0);
1c79356b
A
2367}
2368
91447636
A
2369errno_t
2370buf_bdwrite(buf_t bp)
9bccf70c 2371{
91447636 2372 return (bdwrite_internal(bp, 0));
9bccf70c
A
2373}
2374
2375
1c79356b 2376/*
91447636 2377 * Asynchronous block write; just an asynchronous buf_bwrite().
d52fe63f
A
2378 *
2379 * Note: With the abilitty to allocate additional buffer
2380 * headers, we can get in to the situation where "too" many
91447636 2381 * buf_bawrite()s can create situation where the kernel can create
d52fe63f
A
2382 * buffers faster than the disks can service.
2383 * We limit the number of "in flight" writes a vnode can have to
2384 * avoid this.
1c79356b 2385 */
9bccf70c 2386static int
91447636 2387bawrite_internal(buf_t bp, int throttle)
1c79356b 2388{
91447636 2389 vnode_t vp = bp->b_vp;
d52fe63f
A
2390
2391 if (vp) {
91447636
A
2392 if (throttle)
2393 /*
2394 * If the vnode has "too many" write operations in progress
2395 * wait for them to finish the IO
2396 */
2397 (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, (const char *)"buf_bawrite");
2398 else if (vp->v_numoutput >= VNODE_ASYNC_THROTTLE)
2399 /*
2400 * return to the caller and
2401 * let him decide what to do
2402 */
2403 return (EWOULDBLOCK);
d52fe63f 2404 }
1c79356b 2405 SET(bp->b_flags, B_ASYNC);
9bccf70c 2406
91447636 2407 return (VNOP_BWRITE(bp));
9bccf70c
A
2408}
2409
91447636
A
2410errno_t
2411buf_bawrite(buf_t bp)
9bccf70c 2412{
91447636 2413 return (bawrite_internal(bp, 1));
1c79356b
A
2414}
2415
91447636 2416
6d2010ae
A
2417
2418static void
2419buf_free_meta_store(buf_t bp)
2420{
2421 if (bp->b_bufsize) {
2422 if (ISSET(bp->b_flags, B_ZALLOC)) {
2423 zone_t z;
2424
2425 z = getbufzone(bp->b_bufsize);
2426 zfree(z, (void *)bp->b_datap);
2427 } else
2428 kmem_free(kernel_map, bp->b_datap, bp->b_bufsize);
2429
2430 bp->b_datap = (uintptr_t)NULL;
2431 bp->b_bufsize = 0;
2432 }
2433}
2434
2435
2436static buf_t
2437buf_brelse_shadow(buf_t bp)
2438{
2439 buf_t bp_head;
2440 buf_t bp_temp;
2441 buf_t bp_return = NULL;
2442#ifdef BUF_MAKE_PRIVATE
2443 buf_t bp_data;
2444 int data_ref = 0;
2445#endif
316670eb
A
2446 int need_wakeup = 0;
2447
6d2010ae
A
2448 lck_mtx_lock_spin(buf_mtxp);
2449
2450 bp_head = (buf_t)bp->b_orig;
2451
2452 if (bp_head->b_whichq != -1)
2453 panic("buf_brelse_shadow: bp_head on freelist %d\n", bp_head->b_whichq);
2454
2455#ifdef BUF_MAKE_PRIVATE
2456 if (bp_data = bp->b_data_store) {
2457 bp_data->b_data_ref--;
2458 /*
2459 * snapshot the ref count so that we can check it
2460 * outside of the lock... we only want the guy going
2461 * from 1 -> 0 to try and release the storage
2462 */
2463 data_ref = bp_data->b_data_ref;
2464 }
2465#endif
2466 KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_START, bp, bp_head, bp_head->b_shadow_ref, 0, 0);
2467
2468 bp_head->b_shadow_ref--;
2469
2470 for (bp_temp = bp_head; bp_temp && bp != bp_temp->b_shadow; bp_temp = bp_temp->b_shadow);
2471
2472 if (bp_temp == NULL)
2473 panic("buf_brelse_shadow: bp not on list %p", bp_head);
2474
2475 bp_temp->b_shadow = bp_temp->b_shadow->b_shadow;
2476
2477#ifdef BUF_MAKE_PRIVATE
2478 /*
2479 * we're about to free the current 'owner' of the data buffer and
2480 * there is at least one other shadow buf_t still pointing at it
2481 * so transfer it to the first shadow buf left in the chain
2482 */
2483 if (bp == bp_data && data_ref) {
2484 if ((bp_data = bp_head->b_shadow) == NULL)
2485 panic("buf_brelse_shadow: data_ref mismatch bp(%p)", bp);
2486
2487 for (bp_temp = bp_data; bp_temp; bp_temp = bp_temp->b_shadow)
2488 bp_temp->b_data_store = bp_data;
2489 bp_data->b_data_ref = data_ref;
2490 }
2491#endif
2492 if (bp_head->b_shadow_ref == 0 && bp_head->b_shadow)
2493 panic("buf_relse_shadow: b_shadow != NULL && b_shadow_ref == 0 bp(%p)", bp);
2494 if (bp_head->b_shadow_ref && bp_head->b_shadow == 0)
2495 panic("buf_relse_shadow: b_shadow == NULL && b_shadow_ref != 0 bp(%p)", bp);
2496
2497 if (bp_head->b_shadow_ref == 0) {
2498 if (!ISSET(bp_head->b_lflags, BL_BUSY)) {
2499
2500 CLR(bp_head->b_flags, B_AGE);
2501 bp_head->b_timestamp = buf_timestamp();
2502
2503 if (ISSET(bp_head->b_flags, B_LOCKED)) {
2504 bp_head->b_whichq = BQ_LOCKED;
2505 binstailfree(bp_head, &bufqueues[BQ_LOCKED], BQ_LOCKED);
2506 } else {
2507 bp_head->b_whichq = BQ_META;
2508 binstailfree(bp_head, &bufqueues[BQ_META], BQ_META);
2509 }
2510 } else if (ISSET(bp_head->b_lflags, BL_WAITSHADOW)) {
2511 CLR(bp_head->b_lflags, BL_WAITSHADOW);
2512
2513 bp_return = bp_head;
2514 }
316670eb
A
2515 if (ISSET(bp_head->b_lflags, BL_WANTED_REF)) {
2516 CLR(bp_head->b_lflags, BL_WANTED_REF);
2517 need_wakeup = 1;
2518 }
6d2010ae
A
2519 }
2520 lck_mtx_unlock(buf_mtxp);
39236c6e
A
2521
2522 if (need_wakeup)
316670eb 2523 wakeup(bp_head);
316670eb 2524
6d2010ae
A
2525#ifdef BUF_MAKE_PRIVATE
2526 if (bp == bp_data && data_ref == 0)
2527 buf_free_meta_store(bp);
2528
2529 bp->b_data_store = NULL;
2530#endif
2531 KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_END, bp, 0, 0, 0, 0);
2532
2533 return (bp_return);
2534}
2535
2536
1c79356b
A
2537/*
2538 * Release a buffer on to the free lists.
2539 * Described in Bach (p. 46).
2540 */
2541void
91447636 2542buf_brelse(buf_t bp)
1c79356b
A
2543{
2544 struct bqueues *bufq;
91447636
A
2545 long whichq;
2546 upl_t upl;
2547 int need_wakeup = 0;
2548 int need_bp_wakeup = 0;
2549
2550
2551 if (bp->b_whichq != -1 || !(bp->b_lflags & BL_BUSY))
2d21ac55 2552 panic("buf_brelse: bad buffer = %p\n", bp);
91447636
A
2553
2554#ifdef JOE_DEBUG
b0d623f7 2555 (void) OSBacktrace(&bp->b_stackbrelse[0], 6);
91447636
A
2556
2557 bp->b_lastbrelse = current_thread();
2558 bp->b_tag = 0;
2559#endif
2560 if (bp->b_lflags & BL_IOBUF) {
6d2010ae
A
2561 buf_t shadow_master_bp = NULL;
2562
2563 if (ISSET(bp->b_lflags, BL_SHADOW))
2564 shadow_master_bp = buf_brelse_shadow(bp);
2565 else if (ISSET(bp->b_lflags, BL_IOBUF_ALLOC))
2566 buf_free_meta_store(bp);
91447636 2567 free_io_buf(bp);
6d2010ae
A
2568
2569 if (shadow_master_bp) {
2570 bp = shadow_master_bp;
2571 goto finish_shadow_master;
2572 }
91447636
A
2573 return;
2574 }
1c79356b
A
2575
2576 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_START,
b0d623f7 2577 bp->b_lblkno * PAGE_SIZE, bp, bp->b_datap,
fa4905b1 2578 bp->b_flags, 0);
1c79356b
A
2579
2580 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
2581
91447636
A
2582 /*
2583 * if we're invalidating a buffer that has the B_FILTER bit
2584 * set then call the b_iodone function so it gets cleaned
2585 * up properly.
2586 *
2587 * the HFS journal code depends on this
2588 */
b4c24cb9 2589 if (ISSET(bp->b_flags, B_META) && ISSET(bp->b_flags, B_INVAL)) {
91447636
A
2590 if (ISSET(bp->b_flags, B_FILTER)) { /* if necessary, call out */
2591 void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
6d2010ae 2592 void *arg = bp->b_transaction;
b4c24cb9 2593
91447636 2594 CLR(bp->b_flags, B_FILTER); /* but note callout done */
b4c24cb9 2595 bp->b_iodone = NULL;
91447636 2596 bp->b_transaction = NULL;
b4c24cb9
A
2597
2598 if (iodone_func == NULL) {
2d21ac55 2599 panic("brelse: bp @ %p has NULL b_iodone!\n", bp);
b4c24cb9 2600 }
91447636 2601 (*iodone_func)(bp, arg);
b4c24cb9
A
2602 }
2603 }
91447636
A
2604 /*
2605 * I/O is done. Cleanup the UPL state
2606 */
2607 upl = bp->b_upl;
2608
2609 if ( !ISSET(bp->b_flags, B_META) && UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
1c79356b 2610 kern_return_t kret;
1c79356b
A
2611 int upl_flags;
2612
6d2010ae 2613 if (upl == NULL) {
1c79356b 2614 if ( !ISSET(bp->b_flags, B_INVAL)) {
0b4e3aa0 2615 kret = ubc_create_upl(bp->b_vp,
91447636
A
2616 ubc_blktooff(bp->b_vp, bp->b_lblkno),
2617 bp->b_bufsize,
2618 &upl,
2619 NULL,
2620 UPL_PRECIOUS);
2621
1c79356b 2622 if (kret != KERN_SUCCESS)
91447636 2623 panic("brelse: Failed to create UPL");
b0d623f7
A
2624#if UPL_DEBUG
2625 upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 5);
91447636
A
2626#endif /* UPL_DEBUG */
2627 }
1c79356b 2628 } else {
91447636 2629 if (bp->b_datap) {
55e303ae
A
2630 kret = ubc_upl_unmap(upl);
2631
2632 if (kret != KERN_SUCCESS)
91447636
A
2633 panic("ubc_upl_unmap failed");
2634 bp->b_datap = (uintptr_t)NULL;
55e303ae 2635 }
1c79356b
A
2636 }
2637 if (upl) {
1c79356b 2638 if (bp->b_flags & (B_ERROR | B_INVAL)) {
91447636 2639 if (bp->b_flags & (B_READ | B_INVAL))
1c79356b
A
2640 upl_flags = UPL_ABORT_DUMP_PAGES;
2641 else
2642 upl_flags = 0;
91447636 2643
0b4e3aa0 2644 ubc_upl_abort(upl, upl_flags);
1c79356b 2645 } else {
91447636
A
2646 if (ISSET(bp->b_flags, B_DELWRI | B_WASDIRTY))
2647 upl_flags = UPL_COMMIT_SET_DIRTY ;
2648 else
2649 upl_flags = UPL_COMMIT_CLEAR_DIRTY ;
2650
0b4e3aa0 2651 ubc_upl_commit_range(upl, 0, bp->b_bufsize, upl_flags |
91447636 2652 UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
1c79356b 2653 }
91447636 2654 bp->b_upl = NULL;
1c79356b
A
2655 }
2656 } else {
91447636 2657 if ( (upl) )
2d21ac55 2658 panic("brelse: UPL set for non VREG; vp=%p", bp->b_vp);
1c79356b
A
2659 }
2660
1c79356b 2661 /*
91447636 2662 * If it's locked, don't report an error; try again later.
1c79356b 2663 */
1c79356b
A
2664 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
2665 CLR(bp->b_flags, B_ERROR);
91447636
A
2666 /*
2667 * If it's not cacheable, or an error, mark it invalid.
2668 */
1c79356b
A
2669 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
2670 SET(bp->b_flags, B_INVAL);
91447636 2671
b0d623f7
A
2672 if ((bp->b_bufsize <= 0) ||
2673 ISSET(bp->b_flags, B_INVAL) ||
2674 (ISSET(bp->b_lflags, BL_WANTDEALLOC) && !ISSET(bp->b_flags, B_DELWRI))) {
6d2010ae
A
2675
2676 boolean_t delayed_buf_free_meta_store = FALSE;
2677
1c79356b 2678 /*
2d21ac55
A
2679 * If it's invalid or empty, dissociate it from its vnode,
2680 * release its storage if B_META, and
2681 * clean it up a bit and put it on the EMPTY queue
1c79356b 2682 */
91447636 2683 if (ISSET(bp->b_flags, B_DELWRI))
b0d623f7 2684 OSAddAtomicLong(-1, &nbdwrite);
91447636 2685
2d21ac55 2686 if (ISSET(bp->b_flags, B_META)) {
6d2010ae
A
2687 if (bp->b_shadow_ref)
2688 delayed_buf_free_meta_store = TRUE;
2689 else
2690 buf_free_meta_store(bp);
2d21ac55 2691 }
91447636 2692 /*
2d21ac55 2693 * nuke any credentials we were holding
91447636 2694 */
6d2010ae
A
2695 buf_release_credentials(bp);
2696
2697 lck_mtx_lock_spin(buf_mtxp);
2698
2699 if (bp->b_shadow_ref) {
2700 SET(bp->b_lflags, BL_WAITSHADOW);
2701
2702 lck_mtx_unlock(buf_mtxp);
2703
2704 return;
2d21ac55 2705 }
6d2010ae 2706 if (delayed_buf_free_meta_store == TRUE) {
91447636 2707
6d2010ae
A
2708 lck_mtx_unlock(buf_mtxp);
2709finish_shadow_master:
2710 buf_free_meta_store(bp);
91447636 2711
6d2010ae
A
2712 lck_mtx_lock_spin(buf_mtxp);
2713 }
2714 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
2d21ac55
A
2715
2716 if (bp->b_vp)
2717 brelvp_locked(bp);
2718
2719 bremhash(bp);
2720 BLISTNONE(bp);
2721 binshash(bp, &invalhash);
2722
6d2010ae
A
2723 bp->b_whichq = BQ_EMPTY;
2724 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
1c79356b 2725 } else {
6d2010ae 2726
1c79356b
A
2727 /*
2728 * It has valid data. Put it on the end of the appropriate
2729 * queue, so that it'll stick around for as long as possible.
2730 */
2731 if (ISSET(bp->b_flags, B_LOCKED))
2732 whichq = BQ_LOCKED; /* locked in core */
2733 else if (ISSET(bp->b_flags, B_META))
2734 whichq = BQ_META; /* meta-data */
2735 else if (ISSET(bp->b_flags, B_AGE))
2736 whichq = BQ_AGE; /* stale but valid data */
2737 else
2738 whichq = BQ_LRU; /* valid data */
1c79356b 2739 bufq = &bufqueues[whichq];
91447636 2740
2d21ac55 2741 bp->b_timestamp = buf_timestamp();
91447636 2742
6d2010ae
A
2743 lck_mtx_lock_spin(buf_mtxp);
2744
2745 /*
2746 * the buf_brelse_shadow routine doesn't take 'ownership'
2747 * of the parent buf_t... it updates state that is protected by
2748 * the buf_mtxp, and checks for BL_BUSY to determine whether to
2749 * put the buf_t back on a free list. b_shadow_ref is protected
2750 * by the lock, and since we have not yet cleared B_BUSY, we need
2751 * to check it while holding the lock to insure that one of us
2752 * puts this buf_t back on a free list when it is safe to do so
2753 */
2754 if (bp->b_shadow_ref == 0) {
2755 CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE));
2756 bp->b_whichq = whichq;
2757 binstailfree(bp, bufq, whichq);
2758 } else {
2759 /*
2760 * there are still cloned buf_t's pointing
2761 * at this guy... need to keep it off the
2762 * freelists until a buf_brelse is done on
2763 * the last clone
2764 */
2765 CLR(bp->b_flags, (B_ASYNC | B_NOCACHE));
2766 }
1c79356b 2767 }
91447636
A
2768 if (needbuffer) {
2769 /*
2770 * needbuffer is a global
2771 * we're currently using buf_mtxp to protect it
2772 * delay doing the actual wakeup until after
2773 * we drop buf_mtxp
2774 */
2775 needbuffer = 0;
2776 need_wakeup = 1;
2777 }
2778 if (ISSET(bp->b_lflags, BL_WANTED)) {
2779 /*
2780 * delay the actual wakeup until after we
2781 * clear BL_BUSY and we've dropped buf_mtxp
2782 */
2783 need_bp_wakeup = 1;
2784 }
2785 /*
2786 * Unlock the buffer.
2787 */
2788 CLR(bp->b_lflags, (BL_BUSY | BL_WANTED));
b0d623f7 2789 buf_busycount--;
1c79356b 2790
91447636 2791 lck_mtx_unlock(buf_mtxp);
1c79356b 2792
91447636
A
2793 if (need_wakeup) {
2794 /*
2795 * Wake up any processes waiting for any buffer to become free.
2796 */
2797 wakeup(&needbuffer);
2798 }
2799 if (need_bp_wakeup) {
2800 /*
2801 * Wake up any proceeses waiting for _this_ buffer to become free.
2802 */
2803 wakeup(bp);
2804 }
1c79356b 2805 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_END,
b0d623f7 2806 bp, bp->b_datap, bp->b_flags, 0, 0);
1c79356b
A
2807}
2808
2809/*
2810 * Determine if a block is in the cache.
2811 * Just look on what would be its hash chain. If it's there, return
2812 * a pointer to it, unless it's marked invalid. If it's marked invalid,
2813 * we normally don't return the buffer, unless the caller explicitly
2814 * wants us to.
2815 */
91447636
A
2816static boolean_t
2817incore(vnode_t vp, daddr64_t blkno)
2818{
2819 boolean_t retval;
2d21ac55 2820 struct bufhashhdr *dp;
91447636 2821
2d21ac55 2822 dp = BUFHASH(vp, blkno);
91447636 2823
2d21ac55
A
2824 lck_mtx_lock_spin(buf_mtxp);
2825
2826 if (incore_locked(vp, blkno, dp))
91447636
A
2827 retval = TRUE;
2828 else
2829 retval = FALSE;
2830 lck_mtx_unlock(buf_mtxp);
2831
2832 return (retval);
2833}
2834
2835
2836static buf_t
2d21ac55 2837incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp)
1c79356b
A
2838{
2839 struct buf *bp;
1c79356b 2840
1c79356b 2841 /* Search hash chain */
2d21ac55 2842 for (bp = dp->lh_first; bp != NULL; bp = bp->b_hash.le_next) {
1c79356b 2843 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
91447636 2844 !ISSET(bp->b_flags, B_INVAL)) {
1c79356b 2845 return (bp);
91447636 2846 }
1c79356b 2847 }
2d21ac55 2848 return (NULL);
1c79356b
A
2849}
2850
39236c6e 2851
316670eb
A
2852void
2853buf_wait_for_shadow_io(vnode_t vp, daddr64_t blkno)
2854{
2855 buf_t bp;
2856 struct bufhashhdr *dp;
2857
2858 dp = BUFHASH(vp, blkno);
2859
2860 lck_mtx_lock_spin(buf_mtxp);
2861
2862 for (;;) {
2863 if ((bp = incore_locked(vp, blkno, dp)) == NULL)
2864 break;
2865
2866 if (bp->b_shadow_ref == 0)
2867 break;
2868
2869 SET(bp->b_lflags, BL_WANTED_REF);
fa4905b1 2870
316670eb
A
2871 (void) msleep(bp, buf_mtxp, PSPIN | (PRIBIO+1), "buf_wait_for_shadow", NULL);
2872 }
2873 lck_mtx_unlock(buf_mtxp);
2874}
2875
fa4905b1 2876/* XXX FIXME -- Update the comment to reflect the UBC changes (please) -- */
1c79356b
A
2877/*
2878 * Get a block of requested size that is associated with
2879 * a given vnode and block offset. If it is found in the
2880 * block cache, mark it as having been found, make it busy
2881 * and return it. Otherwise, return an empty block of the
2882 * correct size. It is up to the caller to insure that the
2883 * cached blocks be of the correct size.
2884 */
91447636
A
2885buf_t
2886buf_getblk(vnode_t vp, daddr64_t blkno, int size, int slpflag, int slptimeo, int operation)
1c79356b 2887{
91447636
A
2888 buf_t bp;
2889 int err;
1c79356b
A
2890 upl_t upl;
2891 upl_page_info_t *pl;
1c79356b 2892 kern_return_t kret;
91447636
A
2893 int ret_only_valid;
2894 struct timespec ts;
2895 int upl_flags;
2d21ac55 2896 struct bufhashhdr *dp;
1c79356b 2897
1c79356b 2898 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_START,
b0d623f7 2899 (uintptr_t)(blkno * PAGE_SIZE), size, operation, 0, 0);
1c79356b 2900
91447636
A
2901 ret_only_valid = operation & BLK_ONLYVALID;
2902 operation &= ~BLK_ONLYVALID;
2d21ac55 2903 dp = BUFHASH(vp, blkno);
91447636 2904start:
2d21ac55 2905 lck_mtx_lock_spin(buf_mtxp);
b0d623f7 2906
2d21ac55 2907 if ((bp = incore_locked(vp, blkno, dp))) {
91447636
A
2908 /*
2909 * Found in the Buffer Cache
2910 */
2911 if (ISSET(bp->b_lflags, BL_BUSY)) {
2912 /*
2913 * but is busy
2914 */
1c79356b
A
2915 switch (operation) {
2916 case BLK_READ:
2917 case BLK_WRITE:
2918 case BLK_META:
91447636 2919 SET(bp->b_lflags, BL_WANTED);
1c79356b 2920 bufstats.bufs_busyincore++;
91447636
A
2921
2922 /*
2923 * don't retake the mutex after being awakened...
2924 * the time out is in msecs
2925 */
2926 ts.tv_sec = (slptimeo/1000);
2927 ts.tv_nsec = (slptimeo % 1000) * 10 * NSEC_PER_USEC * 1000;
2928
b0d623f7
A
2929 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 396)) | DBG_FUNC_NONE,
2930 (uintptr_t)blkno, size, operation, 0, 0);
2931
91447636
A
2932 err = msleep(bp, buf_mtxp, slpflag | PDROP | (PRIBIO + 1), "buf_getblk", &ts);
2933
1c79356b
A
2934 /*
2935 * Callers who call with PCATCH or timeout are
2936 * willing to deal with the NULL pointer
2937 */
91447636 2938 if (err && ((slpflag & PCATCH) || ((err == EWOULDBLOCK) && slptimeo)))
1c79356b
A
2939 return (NULL);
2940 goto start;
2941 /*NOTREACHED*/
2942 break;
2943
1c79356b 2944 default:
91447636
A
2945 /*
2946 * unknown operation requested
2947 */
2948 panic("getblk: paging or unknown operation for incore busy buffer - %x\n", operation);
1c79356b
A
2949 /*NOTREACHED*/
2950 break;
2951 }
2952 } else {
91447636
A
2953 /*
2954 * buffer in core and not busy
2955 */
91447636
A
2956 SET(bp->b_lflags, BL_BUSY);
2957 SET(bp->b_flags, B_CACHE);
b0d623f7 2958 buf_busycount++;
2d21ac55 2959
91447636 2960 bremfree_locked(bp);
1c79356b 2961 bufstats.bufs_incore++;
91447636
A
2962
2963 lck_mtx_unlock(buf_mtxp);
2d21ac55
A
2964#ifdef JOE_DEBUG
2965 bp->b_owner = current_thread();
2966 bp->b_tag = 1;
2967#endif
2968 if ( (bp->b_upl) )
2969 panic("buffer has UPL, but not marked BUSY: %p", bp);
1c79356b 2970
2d21ac55 2971 if ( !ret_only_valid && bp->b_bufsize != size)
91447636 2972 allocbuf(bp, size);
1c79356b 2973
91447636 2974 upl_flags = 0;
1c79356b 2975 switch (operation) {
1c79356b 2976 case BLK_WRITE:
91447636
A
2977 /*
2978 * "write" operation: let the UPL subsystem
2979 * know that we intend to modify the buffer
2980 * cache pages we're gathering.
2981 */
2982 upl_flags |= UPL_WILL_MODIFY;
2983 case BLK_READ:
2984 upl_flags |= UPL_PRECIOUS;
2985 if (UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
0b4e3aa0 2986 kret = ubc_create_upl(vp,
91447636
A
2987 ubc_blktooff(vp, bp->b_lblkno),
2988 bp->b_bufsize,
2989 &upl,
2990 &pl,
2991 upl_flags);
1c79356b 2992 if (kret != KERN_SUCCESS)
91447636 2993 panic("Failed to create UPL");
1c79356b 2994
91447636 2995 bp->b_upl = upl;
1c79356b 2996
91447636
A
2997 if (upl_valid_page(pl, 0)) {
2998 if (upl_dirty_page(pl, 0))
2999 SET(bp->b_flags, B_WASDIRTY);
3000 else
3001 CLR(bp->b_flags, B_WASDIRTY);
3002 } else
3003 CLR(bp->b_flags, (B_DONE | B_CACHE | B_WASDIRTY | B_DELWRI));
1c79356b 3004
b0d623f7 3005 kret = ubc_upl_map(upl, (vm_offset_t*)&(bp->b_datap));
1c79356b 3006
9bccf70c 3007 if (kret != KERN_SUCCESS)
91447636 3008 panic("getblk: ubc_upl_map() failed with (%d)", kret);
1c79356b
A
3009 }
3010 break;
3011
3012 case BLK_META:
3013 /*
3014 * VM is not involved in IO for the meta data
3015 * buffer already has valid data
3016 */
1c79356b
A
3017 break;
3018
3019 default:
91447636 3020 panic("getblk: paging or unknown operation for incore buffer- %d\n", operation);
1c79356b
A
3021 /*NOTREACHED*/
3022 break;
3023 }
3024 }
3025 } else { /* not incore() */
3026 int queue = BQ_EMPTY; /* Start with no preference */
1c79356b 3027
91447636
A
3028 if (ret_only_valid) {
3029 lck_mtx_unlock(buf_mtxp);
3030 return (NULL);
1c79356b 3031 }
2d21ac55 3032 if ((vnode_isreg(vp) == 0) || (UBCINFOEXISTS(vp) == 0) /*|| (vnode_issystem(vp) == 1)*/)
91447636
A
3033 operation = BLK_META;
3034
1c79356b 3035 if ((bp = getnewbuf(slpflag, slptimeo, &queue)) == NULL)
b0d623f7 3036 goto start;
91447636
A
3037
3038 /*
3039 * getnewbuf may block for a number of different reasons...
3040 * if it does, it's then possible for someone else to
3041 * create a buffer for the same block and insert it into
3042 * the hash... if we see it incore at this point we dump
3043 * the buffer we were working on and start over
3044 */
2d21ac55 3045 if (incore_locked(vp, blkno, dp)) {
0b4e3aa0
A
3046 SET(bp->b_flags, B_INVAL);
3047 binshash(bp, &invalhash);
91447636
A
3048
3049 lck_mtx_unlock(buf_mtxp);
3050
3051 buf_brelse(bp);
0b4e3aa0
A
3052 goto start;
3053 }
b4c24cb9
A
3054 /*
3055 * NOTE: YOU CAN NOT BLOCK UNTIL binshash() HAS BEEN
3056 * CALLED! BE CAREFUL.
3057 */
0b4e3aa0 3058
1c79356b 3059 /*
91447636 3060 * mark the buffer as B_META if indicated
1c79356b 3061 * so that when buffer is released it will goto META queue
1c79356b 3062 */
91447636
A
3063 if (operation == BLK_META)
3064 SET(bp->b_flags, B_META);
9bccf70c
A
3065
3066 bp->b_blkno = bp->b_lblkno = blkno;
3067 bp->b_vp = vp;
3068
0b4e3aa0
A
3069 /*
3070 * Insert in the hash so that incore() can find it
3071 */
3072 binshash(bp, BUFHASH(vp, blkno));
3073
2d21ac55 3074 bgetvp_locked(vp, bp);
91447636 3075
2d21ac55 3076 lck_mtx_unlock(buf_mtxp);
9bccf70c 3077
1c79356b
A
3078 allocbuf(bp, size);
3079
91447636 3080 upl_flags = 0;
1c79356b
A
3081 switch (operation) {
3082 case BLK_META:
91447636
A
3083 /*
3084 * buffer data is invalid...
3085 *
3086 * I don't want to have to retake buf_mtxp,
3087 * so the miss and vmhits counters are done
3088 * with Atomic updates... all other counters
3089 * in bufstats are protected with either
3090 * buf_mtxp or iobuffer_mtxp
3091 */
b0d623f7 3092 OSAddAtomicLong(1, &bufstats.bufs_miss);
1c79356b
A
3093 break;
3094
1c79356b 3095 case BLK_WRITE:
91447636
A
3096 /*
3097 * "write" operation: let the UPL subsystem know
3098 * that we intend to modify the buffer cache pages
3099 * we're gathering.
3100 */
3101 upl_flags |= UPL_WILL_MODIFY;
3102 case BLK_READ:
3103 { off_t f_offset;
3104 size_t contig_bytes;
3105 int bmap_flags;
1c79356b 3106
91447636 3107 if ( (bp->b_upl) )
2d21ac55 3108 panic("bp already has UPL: %p",bp);
1c79356b 3109
91447636
A
3110 f_offset = ubc_blktooff(vp, blkno);
3111
3112 upl_flags |= UPL_PRECIOUS;
0b4e3aa0 3113 kret = ubc_create_upl(vp,
91447636
A
3114 f_offset,
3115 bp->b_bufsize,
3116 &upl,
3117 &pl,
3118 upl_flags);
1c79356b 3119
91447636
A
3120 if (kret != KERN_SUCCESS)
3121 panic("Failed to create UPL");
b0d623f7
A
3122#if UPL_DEBUG
3123 upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 4);
91447636
A
3124#endif /* UPL_DEBUG */
3125 bp->b_upl = upl;
1c79356b
A
3126
3127 if (upl_valid_page(pl, 0)) {
1c79356b 3128
91447636
A
3129 if (operation == BLK_READ)
3130 bmap_flags = VNODE_READ;
3131 else
3132 bmap_flags = VNODE_WRITE;
1c79356b 3133
91447636 3134 SET(bp->b_flags, B_CACHE | B_DONE);
1c79356b 3135
b0d623f7 3136 OSAddAtomicLong(1, &bufstats.bufs_vmhits);
1c79356b 3137
91447636
A
3138 bp->b_validoff = 0;
3139 bp->b_dirtyoff = 0;
1c79356b 3140
91447636
A
3141 if (upl_dirty_page(pl, 0)) {
3142 /* page is dirty */
3143 SET(bp->b_flags, B_WASDIRTY);
1c79356b 3144
91447636
A
3145 bp->b_validend = bp->b_bcount;
3146 bp->b_dirtyend = bp->b_bcount;
1c79356b 3147 } else {
91447636
A
3148 /* page is clean */
3149 bp->b_validend = bp->b_bcount;
3150 bp->b_dirtyend = 0;
1c79356b 3151 }
91447636
A
3152 /*
3153 * try to recreate the physical block number associated with
3154 * this buffer...
3155 */
3156 if (VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))
3157 panic("getblk: VNOP_BLOCKMAP failed");
3158 /*
3159 * if the extent represented by this buffer
3160 * is not completely physically contiguous on
3161 * disk, than we can't cache the physical mapping
3162 * in the buffer header
3163 */
3164 if ((long)contig_bytes < bp->b_bcount)
3165 bp->b_blkno = bp->b_lblkno;
1c79356b 3166 } else {
b0d623f7 3167 OSAddAtomicLong(1, &bufstats.bufs_miss);
1c79356b 3168 }
b0d623f7 3169 kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
1c79356b 3170
91447636
A
3171 if (kret != KERN_SUCCESS)
3172 panic("getblk: ubc_upl_map() failed with (%d)", kret);
1c79356b 3173 break;
91447636 3174 }
1c79356b 3175 default:
91447636 3176 panic("getblk: paging or unknown operation - %x", operation);
1c79356b
A
3177 /*NOTREACHED*/
3178 break;
3179 }
3180 }
1c79356b 3181 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_END,
b0d623f7 3182 bp, bp->b_datap, bp->b_flags, 3, 0);
91447636
A
3183
3184#ifdef JOE_DEBUG
b0d623f7 3185 (void) OSBacktrace(&bp->b_stackgetblk[0], 6);
91447636 3186#endif
1c79356b
A
3187 return (bp);
3188}
3189
3190/*
3191 * Get an empty, disassociated buffer of given size.
3192 */
91447636 3193buf_t
2d21ac55 3194buf_geteblk(int size)
1c79356b 3195{
b0d623f7 3196 buf_t bp = NULL;
91447636
A
3197 int queue = BQ_EMPTY;
3198
b0d623f7
A
3199 do {
3200 lck_mtx_lock_spin(buf_mtxp);
3201
3202 bp = getnewbuf(0, 0, &queue);
3203 } while (bp == NULL);
1c79356b 3204
1c79356b 3205 SET(bp->b_flags, (B_META|B_INVAL));
1c79356b
A
3206
3207#if DIAGNOSTIC
3208 assert(queue == BQ_EMPTY);
3209#endif /* DIAGNOSTIC */
3210 /* XXX need to implement logic to deal with other queues */
3211
1c79356b 3212 binshash(bp, &invalhash);
1c79356b
A
3213 bufstats.bufs_eblk++;
3214
91447636
A
3215 lck_mtx_unlock(buf_mtxp);
3216
3217 allocbuf(bp, size);
3218
1c79356b
A
3219 return (bp);
3220}
3221
6d2010ae
A
3222uint32_t
3223buf_redundancy_flags(buf_t bp)
3224{
3225 return bp->b_redundancy_flags;
3226}
3227
3228void
3229buf_set_redundancy_flags(buf_t bp, uint32_t flags)
3230{
3231 SET(bp->b_redundancy_flags, flags);
3232}
3233
3234void
3235buf_clear_redundancy_flags(buf_t bp, uint32_t flags)
3236{
3237 CLR(bp->b_redundancy_flags, flags);
3238}
1c79356b 3239
fe8ab488
A
3240
3241
3242static void *
3243recycle_buf_from_pool(int nsize)
3244{
3245 buf_t bp;
3246 void *ptr = NULL;
3247
3248 lck_mtx_lock_spin(buf_mtxp);
3249
3250 TAILQ_FOREACH(bp, &bufqueues[BQ_META], b_freelist) {
3251 if (ISSET(bp->b_flags, B_DELWRI) || bp->b_bufsize != nsize)
3252 continue;
3253 ptr = (void *)bp->b_datap;
3254 bp->b_bufsize = 0;
3255
3256 bcleanbuf(bp, TRUE);
3257 break;
3258 }
3259 lck_mtx_unlock(buf_mtxp);
3260
3261 return (ptr);
3262}
3263
3264
3265
3266int zalloc_nopagewait_failed = 0;
3267int recycle_buf_failed = 0;
3268
3269static void *
3270grab_memory_for_meta_buf(int nsize)
3271{
3272 zone_t z;
3273 void *ptr;
3274 boolean_t was_vmpriv;
3275
3276 z = getbufzone(nsize);
3277
3278 /*
3279 * make sure we're NOT priviliged so that
3280 * if a vm_page_grab is needed, it won't
3281 * block if we're out of free pages... if
3282 * it blocks, then we can't honor the
3283 * nopagewait request
3284 */
3285 was_vmpriv = set_vm_privilege(FALSE);
3286
3287 ptr = zalloc_nopagewait(z);
3288
3289 if (was_vmpriv == TRUE)
3290 set_vm_privilege(TRUE);
3291
3292 if (ptr == NULL) {
3293
3294 zalloc_nopagewait_failed++;
3295
3296 ptr = recycle_buf_from_pool(nsize);
3297
3298 if (ptr == NULL) {
3299
3300 recycle_buf_failed++;
3301
3302 if (was_vmpriv == FALSE)
3303 set_vm_privilege(TRUE);
3304
3305 ptr = zalloc(z);
3306
3307 if (was_vmpriv == FALSE)
3308 set_vm_privilege(FALSE);
3309 }
3310 }
3311 return (ptr);
3312}
3313
1c79356b
A
3314/*
3315 * With UBC, there is no need to expand / shrink the file data
3316 * buffer. The VM uses the same pages, hence no waste.
3317 * All the file data buffers can have one size.
3318 * In fact expand / shrink would be an expensive operation.
3319 *
3320 * Only exception to this is meta-data buffers. Most of the
3321 * meta data operations are smaller than PAGE_SIZE. Having the
3322 * meta-data buffers grow and shrink as needed, optimizes use
3323 * of the kernel wired memory.
3324 */
3325
3326int
91447636 3327allocbuf(buf_t bp, int size)
1c79356b
A
3328{
3329 vm_size_t desired_size;
3330
3331 desired_size = roundup(size, CLBYTES);
3332
91447636 3333 if (desired_size < PAGE_SIZE)
1c79356b
A
3334 desired_size = PAGE_SIZE;
3335 if (desired_size > MAXBSIZE)
3336 panic("allocbuf: buffer larger than MAXBSIZE requested");
3337
1c79356b 3338 if (ISSET(bp->b_flags, B_META)) {
91447636
A
3339 int nsize = roundup(size, MINMETA);
3340
3341 if (bp->b_datap) {
3342 vm_offset_t elem = (vm_offset_t)bp->b_datap;
3343
3344 if (ISSET(bp->b_flags, B_ZALLOC)) {
3345 if (bp->b_bufsize < nsize) {
fe8ab488
A
3346 zone_t zprev;
3347
91447636
A
3348 /* reallocate to a bigger size */
3349
3350 zprev = getbufzone(bp->b_bufsize);
3351 if (nsize <= MAXMETA) {
3352 desired_size = nsize;
fe8ab488 3353
2d21ac55 3354 /* b_datap not really a ptr */
fe8ab488 3355 *(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize);
1c79356b 3356 } else {
91447636 3357 bp->b_datap = (uintptr_t)NULL;
b0d623f7 3358 kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
91447636 3359 CLR(bp->b_flags, B_ZALLOC);
1c79356b 3360 }
91447636
A
3361 bcopy((void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize);
3362 zfree(zprev, (void *)elem);
3363 } else {
3364 desired_size = bp->b_bufsize;
3365 }
3366
3367 } else {
3368 if ((vm_size_t)bp->b_bufsize < desired_size) {
1c79356b 3369 /* reallocate to a bigger size */
91447636 3370 bp->b_datap = (uintptr_t)NULL;
b0d623f7 3371 kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
91447636 3372 bcopy((const void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize);
1c79356b
A
3373 kmem_free(kernel_map, elem, bp->b_bufsize);
3374 } else {
3375 desired_size = bp->b_bufsize;
3376 }
91447636 3377 }
1c79356b
A
3378 } else {
3379 /* new allocation */
3380 if (nsize <= MAXMETA) {
3381 desired_size = nsize;
fe8ab488 3382
2d21ac55 3383 /* b_datap not really a ptr */
fe8ab488 3384 *(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize);
1c79356b 3385 SET(bp->b_flags, B_ZALLOC);
91447636 3386 } else
b0d623f7 3387 kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
1c79356b 3388 }
2d21ac55
A
3389
3390 if (bp->b_datap == 0)
3391 panic("allocbuf: NULL b_datap");
1c79356b 3392 }
9bccf70c
A
3393 bp->b_bufsize = desired_size;
3394 bp->b_bcount = size;
91447636 3395
9bccf70c 3396 return (0);
1c79356b
A
3397}
3398
3399/*
3400 * Get a new buffer from one of the free lists.
3401 *
3402 * Request for a queue is passes in. The queue from which the buffer was taken
3403 * from is returned. Out of range queue requests get BQ_EMPTY. Request for
3404 * BQUEUE means no preference. Use heuristics in that case.
3405 * Heuristics is as follows:
3406 * Try BQ_AGE, BQ_LRU, BQ_EMPTY, BQ_META in that order.
3407 * If none available block till one is made available.
3408 * If buffers available on both BQ_AGE and BQ_LRU, check the timestamps.
3409 * Pick the most stale buffer.
3410 * If found buffer was marked delayed write, start the async. write
3411 * and restart the search.
3412 * Initialize the fields and disassociate the buffer from the vnode.
3413 * Remove the buffer from the hash. Return the buffer and the queue
3414 * on which it was found.
91447636
A
3415 *
3416 * buf_mtxp is held upon entry
b0d623f7
A
3417 * returns with buf_mtxp locked if new buf available
3418 * returns with buf_mtxp UNlocked if new buf NOT available
1c79356b
A
3419 */
3420
91447636
A
3421static buf_t
3422getnewbuf(int slpflag, int slptimeo, int * queue)
1c79356b 3423{
91447636
A
3424 buf_t bp;
3425 buf_t lru_bp;
3426 buf_t age_bp;
3427 buf_t meta_bp;
3428 int age_time, lru_time, bp_time, meta_time;
3429 int req = *queue; /* save it for restarts */
3430 struct timespec ts;
1c79356b
A
3431
3432start:
91447636
A
3433 /*
3434 * invalid request gets empty queue
3435 */
2d21ac55 3436 if ((*queue >= BQUEUES) || (*queue < 0)
765c9de3 3437 || (*queue == BQ_LAUNDRY) || (*queue == BQ_LOCKED))
1c79356b 3438 *queue = BQ_EMPTY;
2d21ac55
A
3439
3440
3441 if (*queue == BQ_EMPTY && (bp = bufqueues[*queue].tqh_first))
3442 goto found;
3443
3444 /*
3445 * need to grow number of bufs, add another one rather than recycling
3446 */
3447 if (nbuf_headers < max_nbuf_headers) {
0c530ab8
A
3448 /*
3449 * Increment count now as lock
3450 * is dropped for allocation.
3451 * That avoids over commits
3452 */
2d21ac55 3453 nbuf_headers++;
0c530ab8
A
3454 goto add_newbufs;
3455 }
2d21ac55
A
3456 /* Try for the requested queue first */
3457 bp = bufqueues[*queue].tqh_first;
3458 if (bp)
3459 goto found;
1c79356b
A
3460
3461 /* Unable to use requested queue */
3462 age_bp = bufqueues[BQ_AGE].tqh_first;
3463 lru_bp = bufqueues[BQ_LRU].tqh_first;
3464 meta_bp = bufqueues[BQ_META].tqh_first;
3465
9bccf70c
A
3466 if (!age_bp && !lru_bp && !meta_bp) {
3467 /*
3468 * Unavailble on AGE or LRU or META queues
3469 * Try the empty list first
3470 */
1c79356b
A
3471 bp = bufqueues[BQ_EMPTY].tqh_first;
3472 if (bp) {
3473 *queue = BQ_EMPTY;
3474 goto found;
3475 }
0c530ab8
A
3476 /*
3477 * We have seen is this is hard to trigger.
3478 * This is an overcommit of nbufs but needed
3479 * in some scenarios with diskiamges
3480 */
3481
3482add_newbufs:
91447636 3483 lck_mtx_unlock(buf_mtxp);
765c9de3 3484
91447636 3485 /* Create a new temporary buffer header */
765c9de3 3486 bp = (struct buf *)zalloc(buf_hdr_zone);
2d21ac55 3487
765c9de3
A
3488 if (bp) {
3489 bufhdrinit(bp);
2d21ac55
A
3490 bp->b_whichq = BQ_EMPTY;
3491 bp->b_timestamp = buf_timestamp();
765c9de3 3492 BLISTNONE(bp);
765c9de3
A
3493 SET(bp->b_flags, B_HDRALLOC);
3494 *queue = BQ_EMPTY;
2d21ac55 3495 }
b0d623f7 3496 lck_mtx_lock_spin(buf_mtxp);
2d21ac55
A
3497
3498 if (bp) {
3499 binshash(bp, &invalhash);
765c9de3
A
3500 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
3501 buf_hdr_count++;
3502 goto found;
3503 }
0c530ab8 3504 /* subtract already accounted bufcount */
2d21ac55 3505 nbuf_headers--;
0c530ab8 3506
91447636 3507 bufstats.bufs_sleeps++;
765c9de3 3508
1c79356b
A
3509 /* wait for a free buffer of any kind */
3510 needbuffer = 1;
91447636
A
3511 /* hz value is 100 */
3512 ts.tv_sec = (slptimeo/1000);
3513 /* the hz value is 100; which leads to 10ms */
3514 ts.tv_nsec = (slptimeo % 1000) * NSEC_PER_USEC * 1000 * 10;
b0d623f7
A
3515
3516 msleep(&needbuffer, buf_mtxp, slpflag | PDROP | (PRIBIO+1), "getnewbuf", &ts);
2d21ac55 3517 return (NULL);
1c79356b
A
3518 }
3519
3520 /* Buffer available either on AGE or LRU or META */
3521 bp = NULL;
3522 *queue = -1;
3523
3524 /* Buffer available either on AGE or LRU */
3525 if (!age_bp) {
3526 bp = lru_bp;
3527 *queue = BQ_LRU;
3528 } else if (!lru_bp) {
3529 bp = age_bp;
3530 *queue = BQ_AGE;
3531 } else { /* buffer available on both AGE and LRU */
91447636
A
3532 int t = buf_timestamp();
3533
3534 age_time = t - age_bp->b_timestamp;
3535 lru_time = t - lru_bp->b_timestamp;
1c79356b
A
3536 if ((age_time < 0) || (lru_time < 0)) { /* time set backwards */
3537 bp = age_bp;
3538 *queue = BQ_AGE;
3539 /*
3540 * we should probably re-timestamp eveything in the
3541 * queues at this point with the current time
3542 */
3543 } else {
3544 if ((lru_time >= lru_is_stale) && (age_time < age_is_stale)) {
3545 bp = lru_bp;
3546 *queue = BQ_LRU;
3547 } else {
3548 bp = age_bp;
3549 *queue = BQ_AGE;
3550 }
3551 }
3552 }
3553
3554 if (!bp) { /* Neither on AGE nor on LRU */
3555 bp = meta_bp;
3556 *queue = BQ_META;
3557 } else if (meta_bp) {
91447636
A
3558 int t = buf_timestamp();
3559
3560 bp_time = t - bp->b_timestamp;
3561 meta_time = t - meta_bp->b_timestamp;
1c79356b
A
3562
3563 if (!(bp_time < 0) && !(meta_time < 0)) {
3564 /* time not set backwards */
3565 int bp_is_stale;
3566 bp_is_stale = (*queue == BQ_LRU) ?
3567 lru_is_stale : age_is_stale;
3568
3569 if ((meta_time >= meta_is_stale) &&
3570 (bp_time < bp_is_stale)) {
3571 bp = meta_bp;
3572 *queue = BQ_META;
3573 }
3574 }
3575 }
1c79356b 3576found:
91447636 3577 if (ISSET(bp->b_flags, B_LOCKED) || ISSET(bp->b_lflags, BL_BUSY))
b0d623f7 3578 panic("getnewbuf: bp @ %p is LOCKED or BUSY! (flags 0x%x)\n", bp, bp->b_flags);
1c79356b
A
3579
3580 /* Clean it */
b0d623f7 3581 if (bcleanbuf(bp, FALSE)) {
91447636
A
3582 /*
3583 * moved to the laundry thread, buffer not ready
3584 */
1c79356b
A
3585 *queue = req;
3586 goto start;
3587 }
1c79356b
A
3588 return (bp);
3589}
9bccf70c 3590
1c79356b
A
3591
3592/*
3593 * Clean a buffer.
6d2010ae 3594 * Returns 0 if buffer is ready to use,
91447636 3595 * Returns 1 if issued a buf_bawrite() to indicate
1c79356b 3596 * that the buffer is not ready.
91447636
A
3597 *
3598 * buf_mtxp is held upon entry
3599 * returns with buf_mtxp locked
1c79356b 3600 */
6d2010ae 3601int
b0d623f7 3602bcleanbuf(buf_t bp, boolean_t discard)
1c79356b 3603{
1c79356b 3604 /* Remove from the queue */
91447636 3605 bremfree_locked(bp);
1c79356b 3606
91447636
A
3607#ifdef JOE_DEBUG
3608 bp->b_owner = current_thread();
3609 bp->b_tag = 2;
3610#endif
765c9de3
A
3611 /*
3612 * If buffer was a delayed write, start the IO by queuing
3613 * it on the LAUNDRY queue, and return 1
3614 */
1c79356b 3615 if (ISSET(bp->b_flags, B_DELWRI)) {
b0d623f7
A
3616 if (discard) {
3617 SET(bp->b_lflags, BL_WANTDEALLOC);
3618 }
3619
6d2010ae 3620 bmovelaundry(bp);
91447636
A
3621
3622 lck_mtx_unlock(buf_mtxp);
3623
2d21ac55
A
3624 wakeup(&bufqueues[BQ_LAUNDRY]);
3625 /*
3626 * and give it a chance to run
3627 */
9bccf70c 3628 (void)thread_block(THREAD_CONTINUE_NULL);
91447636 3629
b0d623f7 3630 lck_mtx_lock_spin(buf_mtxp);
2d21ac55 3631
1c79356b
A
3632 return (1);
3633 }
2d21ac55
A
3634#ifdef JOE_DEBUG
3635 bp->b_owner = current_thread();
3636 bp->b_tag = 8;
3637#endif
3638 /*
3639 * Buffer is no longer on any free list... we own it
3640 */
3641 SET(bp->b_lflags, BL_BUSY);
b0d623f7
A
3642 buf_busycount++;
3643
2d21ac55 3644 bremhash(bp);
91447636 3645
91447636
A
3646 /*
3647 * disassociate us from our vnode, if we had one...
3648 */
3649 if (bp->b_vp)
2d21ac55
A
3650 brelvp_locked(bp);
3651
3652 lck_mtx_unlock(buf_mtxp);
3653
3654 BLISTNONE(bp);
91447636 3655
6d2010ae
A
3656 if (ISSET(bp->b_flags, B_META))
3657 buf_free_meta_store(bp);
91447636
A
3658
3659 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
3660
6d2010ae 3661 buf_release_credentials(bp);
fe8ab488
A
3662
3663 bp->b_redundancy_flags = 0;
91447636 3664
b0d623f7
A
3665 /* If discarding, just move to the empty queue */
3666 if (discard) {
3667 lck_mtx_lock_spin(buf_mtxp);
3668 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
3669 bp->b_whichq = BQ_EMPTY;
3670 binshash(bp, &invalhash);
3671 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
3672 CLR(bp->b_lflags, BL_BUSY);
3673 buf_busycount--;
3674 } else {
3675 /* Not discarding: clean up and prepare for reuse */
3676 bp->b_bufsize = 0;
3677 bp->b_datap = (uintptr_t)NULL;
3678 bp->b_upl = (void *)NULL;
3679 /*
3680 * preserve the state of whether this buffer
3681 * was allocated on the fly or not...
3682 * the only other flag that should be set at
3683 * this point is BL_BUSY...
3684 */
3685#ifdef JOE_DEBUG
3686 bp->b_owner = current_thread();
3687 bp->b_tag = 3;
3688#endif
3689 bp->b_lflags = BL_BUSY;
3690 bp->b_flags = (bp->b_flags & B_HDRALLOC);
3691 bp->b_dev = NODEV;
3692 bp->b_blkno = bp->b_lblkno = 0;
3693 bp->b_iodone = NULL;
3694 bp->b_error = 0;
3695 bp->b_resid = 0;
3696 bp->b_bcount = 0;
3697 bp->b_dirtyoff = bp->b_dirtyend = 0;
3698 bp->b_validoff = bp->b_validend = 0;
7ddcb079 3699 bzero(&bp->b_attr, sizeof(struct bufattr));
b0d623f7
A
3700
3701 lck_mtx_lock_spin(buf_mtxp);
3702 }
91447636
A
3703 return (0);
3704}
3705
3706
3707
3708errno_t
3709buf_invalblkno(vnode_t vp, daddr64_t lblkno, int flags)
3710{
3711 buf_t bp;
3712 errno_t error;
2d21ac55
A
3713 struct bufhashhdr *dp;
3714
3715 dp = BUFHASH(vp, lblkno);
91447636 3716
91447636 3717relook:
b0d623f7
A
3718 lck_mtx_lock_spin(buf_mtxp);
3719
2d21ac55 3720 if ((bp = incore_locked(vp, lblkno, dp)) == (struct buf *)0) {
91447636
A
3721 lck_mtx_unlock(buf_mtxp);
3722 return (0);
3723 }
3724 if (ISSET(bp->b_lflags, BL_BUSY)) {
3725 if ( !ISSET(flags, BUF_WAIT)) {
3726 lck_mtx_unlock(buf_mtxp);
3727 return (EBUSY);
3728 }
3729 SET(bp->b_lflags, BL_WANTED);
3730
b0d623f7 3731 error = msleep((caddr_t)bp, buf_mtxp, PDROP | (PRIBIO + 1), "buf_invalblkno", NULL);
91447636 3732
2d21ac55 3733 if (error) {
91447636 3734 return (error);
2d21ac55 3735 }
91447636
A
3736 goto relook;
3737 }
3738 bremfree_locked(bp);
3739 SET(bp->b_lflags, BL_BUSY);
3740 SET(bp->b_flags, B_INVAL);
b0d623f7 3741 buf_busycount++;
91447636
A
3742#ifdef JOE_DEBUG
3743 bp->b_owner = current_thread();
3744 bp->b_tag = 4;
3745#endif
3746 lck_mtx_unlock(buf_mtxp);
3747 buf_brelse(bp);
3748
3749 return (0);
3750}
3751
3752
3753void
3754buf_drop(buf_t bp)
3755{
3756 int need_wakeup = 0;
3757
2d21ac55 3758 lck_mtx_lock_spin(buf_mtxp);
91447636
A
3759
3760 if (ISSET(bp->b_lflags, BL_WANTED)) {
3761 /*
3762 * delay the actual wakeup until after we
3763 * clear BL_BUSY and we've dropped buf_mtxp
3764 */
3765 need_wakeup = 1;
3766 }
2d21ac55
A
3767#ifdef JOE_DEBUG
3768 bp->b_owner = current_thread();
3769 bp->b_tag = 9;
3770#endif
91447636
A
3771 /*
3772 * Unlock the buffer.
3773 */
3774 CLR(bp->b_lflags, (BL_BUSY | BL_WANTED));
b0d623f7 3775 buf_busycount--;
1c79356b 3776
91447636 3777 lck_mtx_unlock(buf_mtxp);
1c79356b 3778
91447636
A
3779 if (need_wakeup) {
3780 /*
3781 * Wake up any proceeses waiting for _this_ buffer to become free.
3782 */
3783 wakeup(bp);
3784 }
3785}
1c79356b 3786
1c79356b 3787
91447636
A
3788errno_t
3789buf_acquire(buf_t bp, int flags, int slpflag, int slptimeo) {
3790 errno_t error;
1c79356b 3791
b0d623f7 3792 lck_mtx_lock_spin(buf_mtxp);
1c79356b 3793
91447636 3794 error = buf_acquire_locked(bp, flags, slpflag, slptimeo);
1c79356b 3795
91447636 3796 lck_mtx_unlock(buf_mtxp);
1c79356b 3797
91447636
A
3798 return (error);
3799}
1c79356b 3800
91447636
A
3801
3802static errno_t
3803buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo)
3804{
3805 errno_t error;
3806 struct timespec ts;
3807
3808 if (ISSET(bp->b_flags, B_LOCKED)) {
3809 if ((flags & BAC_SKIP_LOCKED))
3810 return (EDEADLK);
3811 } else {
3812 if ((flags & BAC_SKIP_NONLOCKED))
3813 return (EDEADLK);
1c79356b 3814 }
91447636
A
3815 if (ISSET(bp->b_lflags, BL_BUSY)) {
3816 /*
b0d623f7 3817 * since the lck_mtx_lock may block, the buffer
91447636
A
3818 * may become BUSY, so we need to
3819 * recheck for a NOWAIT request
3820 */
3821 if (flags & BAC_NOWAIT)
3822 return (EBUSY);
3823 SET(bp->b_lflags, BL_WANTED);
3824
3825 /* the hz value is 100; which leads to 10ms */
3826 ts.tv_sec = (slptimeo/100);
3827 ts.tv_nsec = (slptimeo % 100) * 10 * NSEC_PER_USEC * 1000;
2d21ac55 3828 error = msleep((caddr_t)bp, buf_mtxp, slpflag | (PRIBIO + 1), "buf_acquire", &ts);
91447636
A
3829
3830 if (error)
3831 return (error);
3832 return (EAGAIN);
1c79356b 3833 }
91447636
A
3834 if (flags & BAC_REMOVE)
3835 bremfree_locked(bp);
3836 SET(bp->b_lflags, BL_BUSY);
b0d623f7
A
3837 buf_busycount++;
3838
91447636
A
3839#ifdef JOE_DEBUG
3840 bp->b_owner = current_thread();
3841 bp->b_tag = 5;
3842#endif
1c79356b
A
3843 return (0);
3844}
3845
3846
3847/*
3848 * Wait for operations on the buffer to complete.
3849 * When they do, extract and return the I/O's error value.
3850 */
91447636
A
3851errno_t
3852buf_biowait(buf_t bp)
1c79356b 3853{
b0d623f7 3854 while (!ISSET(bp->b_flags, B_DONE)) {
1c79356b 3855
b0d623f7 3856 lck_mtx_lock_spin(buf_mtxp);
91447636 3857
b0d623f7
A
3858 if (!ISSET(bp->b_flags, B_DONE)) {
3859 DTRACE_IO1(wait__start, buf_t, bp);
3860 (void) msleep(bp, buf_mtxp, PDROP | (PRIBIO+1), "buf_biowait", NULL);
3861 DTRACE_IO1(wait__done, buf_t, bp);
3862 } else
3863 lck_mtx_unlock(buf_mtxp);
3864 }
1c79356b
A
3865 /* check for interruption of I/O (e.g. via NFS), then errors. */
3866 if (ISSET(bp->b_flags, B_EINTR)) {
3867 CLR(bp->b_flags, B_EINTR);
3868 return (EINTR);
3869 } else if (ISSET(bp->b_flags, B_ERROR))
3870 return (bp->b_error ? bp->b_error : EIO);
3871 else
3872 return (0);
3873}
3874
2d21ac55 3875
1c79356b
A
3876/*
3877 * Mark I/O complete on a buffer.
3878 *
3879 * If a callback has been requested, e.g. the pageout
3880 * daemon, do so. Otherwise, awaken waiting processes.
3881 *
3882 * [ Leffler, et al., says on p.247:
3883 * "This routine wakes up the blocked process, frees the buffer
3884 * for an asynchronous write, or, for a request by the pagedaemon
3885 * process, invokes a procedure specified in the buffer structure" ]
3886 *
3887 * In real life, the pagedaemon (or other system processes) wants
91447636 3888 * to do async stuff to, and doesn't want the buffer buf_brelse()'d.
1c79356b
A
3889 * (for swap pager, that puts swap buffers on the free lists (!!!),
3890 * for the vn device, that puts malloc'd buffers on the free lists!)
3891 */
91447636 3892
1c79356b 3893void
91447636 3894buf_biodone(buf_t bp)
1c79356b 3895{
b0d623f7 3896 mount_t mp;
39236c6e 3897 struct bufattr *bap;
b0d623f7 3898
1c79356b 3899 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_START,
b0d623f7 3900 bp, bp->b_datap, bp->b_flags, 0, 0);
1c79356b
A
3901
3902 if (ISSET(bp->b_flags, B_DONE))
3903 panic("biodone already");
1c79356b 3904
39236c6e
A
3905 bap = &bp->b_attr;
3906
b0d623f7
A
3907 if (bp->b_vp && bp->b_vp->v_mount) {
3908 mp = bp->b_vp->v_mount;
3909 } else {
3910 mp = NULL;
3911 }
3912
3913 if (mp && (bp->b_flags & B_READ) == 0) {
3914 update_last_io_time(mp);
3915 INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_write_size);
3916 } else if (mp) {
3917 INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_read_size);
e2fac8b1
A
3918 }
3919
39236c6e
A
3920 if (kdebug_enable) {
3921 int code = DKIO_DONE;
3922 int io_tier = GET_BUFATTR_IO_TIER(bap);
9bccf70c 3923
91447636
A
3924 if (bp->b_flags & B_READ)
3925 code |= DKIO_READ;
3926 if (bp->b_flags & B_ASYNC)
3927 code |= DKIO_ASYNC;
9bccf70c 3928
91447636
A
3929 if (bp->b_flags & B_META)
3930 code |= DKIO_META;
3931 else if (bp->b_flags & B_PAGEIO)
3932 code |= DKIO_PAGING;
9bccf70c 3933
39236c6e 3934 if (io_tier != 0)
6d2010ae 3935 code |= DKIO_THROTTLE;
39236c6e
A
3936
3937 code |= ((io_tier << DKIO_TIER_SHIFT) & DKIO_TIER_MASK);
3938
3939 if (bp->b_flags & B_PASSIVE)
6d2010ae
A
3940 code |= DKIO_PASSIVE;
3941
39236c6e 3942 if (bap->ba_flags & BA_NOCACHE)
316670eb
A
3943 code |= DKIO_NOCACHE;
3944
3945 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE,
39236c6e 3946 buf_kernel_addrperm_addr(bp), (uintptr_t)VM_KERNEL_ADDRPERM(bp->b_vp), bp->b_resid, bp->b_error, 0);
9bccf70c 3947 }
6d2010ae 3948
91447636
A
3949 /*
3950 * I/O was done, so don't believe
6d2010ae
A
3951 * the DIRTY state from VM anymore...
3952 * and we need to reset the THROTTLED/PASSIVE
3953 * indicators
91447636 3954 */
39236c6e
A
3955 CLR(bp->b_flags, (B_WASDIRTY | B_PASSIVE));
3956 CLR(bap->ba_flags, (BA_META | BA_NOCACHE | BA_DELAYIDLESLEEP));
3957
3958 SET_BUFATTR_IO_TIER(bap, 0);
3959
2d21ac55 3960 DTRACE_IO1(done, buf_t, bp);
b4c24cb9 3961
91447636
A
3962 if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW))
3963 /*
3964 * wake up any writer's blocked
3965 * on throttle or waiting for I/O
3966 * to drain
3967 */
3968 vnode_writedone(bp->b_vp);
3969
3970 if (ISSET(bp->b_flags, (B_CALL | B_FILTER))) { /* if necessary, call out */
3971 void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
6d2010ae 3972 void *arg = bp->b_transaction;
91447636
A
3973 int callout = ISSET(bp->b_flags, B_CALL);
3974
6d2010ae
A
3975 if (iodone_func == NULL)
3976 panic("biodone: bp @ %p has NULL b_iodone!\n", bp);
3977
91447636 3978 CLR(bp->b_flags, (B_CALL | B_FILTER)); /* filters and callouts are one-shot */
b4c24cb9 3979 bp->b_iodone = NULL;
91447636 3980 bp->b_transaction = NULL;
b4c24cb9 3981
6d2010ae
A
3982 if (callout)
3983 SET(bp->b_flags, B_DONE); /* note that it's done */
2d21ac55 3984
6d2010ae
A
3985 (*iodone_func)(bp, arg);
3986
3987 if (callout) {
3988 /*
2d21ac55 3989 * assumes that the callback function takes
91447636
A
3990 * ownership of the bp and deals with releasing it if necessary
3991 */
2d21ac55
A
3992 goto biodone_done;
3993 }
91447636
A
3994 /*
3995 * in this case the call back function is acting
3996 * strictly as a filter... it does not take
3997 * ownership of the bp and is expecting us
3998 * to finish cleaning up... this is currently used
3999 * by the HFS journaling code
4000 */
1c79356b 4001 }
91447636
A
4002 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release it */
4003 SET(bp->b_flags, B_DONE); /* note that it's done */
1c79356b 4004
91447636
A
4005 buf_brelse(bp);
4006 } else { /* or just wakeup the buffer */
4007 /*
4008 * by taking the mutex, we serialize
4009 * the buf owner calling buf_biowait so that we'll
4010 * only see him in one of 2 states...
4011 * state 1: B_DONE wasn't set and he's
4012 * blocked in msleep
4013 * state 2: he's blocked trying to take the
4014 * mutex before looking at B_DONE
4015 * BL_WANTED is cleared in case anyone else
4016 * is blocked waiting for the buffer... note
4017 * that we haven't cleared B_BUSY yet, so if
4018 * they do get to run, their going to re-set
4019 * BL_WANTED and go back to sleep
4020 */
2d21ac55 4021 lck_mtx_lock_spin(buf_mtxp);
1c79356b 4022
91447636
A
4023 CLR(bp->b_lflags, BL_WANTED);
4024 SET(bp->b_flags, B_DONE); /* note that it's done */
4025
4026 lck_mtx_unlock(buf_mtxp);
4027
4028 wakeup(bp);
4029 }
4030biodone_done:
4031 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_END,
b0d623f7 4032 (uintptr_t)bp, (uintptr_t)bp->b_datap, bp->b_flags, 0, 0);
1c79356b
A
4033}
4034
39236c6e
A
4035/*
4036 * Obfuscate buf pointers.
4037 */
4038vm_offset_t
4039buf_kernel_addrperm_addr(void * addr)
4040{
4041 if ((vm_offset_t)addr == 0)
4042 return 0;
4043 else
4044 return ((vm_offset_t)addr + buf_kernel_addrperm);
4045}
4046
1c79356b
A
4047/*
4048 * Return a count of buffers on the "locked" queue.
4049 */
4050int
91447636 4051count_lock_queue(void)
1c79356b 4052{
91447636
A
4053 buf_t bp;
4054 int n = 0;
4055
b0d623f7 4056 lck_mtx_lock_spin(buf_mtxp);
1c79356b
A
4057
4058 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
4059 bp = bp->b_freelist.tqe_next)
4060 n++;
91447636
A
4061 lck_mtx_unlock(buf_mtxp);
4062
1c79356b
A
4063 return (n);
4064}
4065
4066/*
4067 * Return a count of 'busy' buffers. Used at the time of shutdown.
316670eb 4068 * note: This is also called from the mach side in debug context in kdp.c
1c79356b
A
4069 */
4070int
91447636 4071count_busy_buffers(void)
1c79356b 4072{
b0d623f7 4073 return buf_busycount + bufstats.bufs_iobufinuse;
1c79356b
A
4074}
4075
9bccf70c 4076#if DIAGNOSTIC
1c79356b
A
4077/*
4078 * Print out statistics on the current allocation of the buffer pool.
4079 * Can be enabled to print out on every ``sync'' by setting "syncprt"
4080 * in vfs_syscalls.c using sysctl.
4081 */
4082void
4083vfs_bufstats()
4084{
91447636 4085 int i, j, count;
6d2010ae
A
4086 struct buf *bp;
4087 struct bqueues *dp;
91447636
A
4088 int counts[MAXBSIZE/CLBYTES+1];
4089 static char *bname[BQUEUES] =
4090 { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" };
4091
4092 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
4093 count = 0;
4094 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
4095 counts[j] = 0;
4096
4097 lck_mtx_lock(buf_mtxp);
4098
4099 for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
4100 counts[bp->b_bufsize/CLBYTES]++;
4101 count++;
4102 }
4103 lck_mtx_unlock(buf_mtxp);
4104
4105 printf("%s: total-%d", bname[i], count);
4106 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
4107 if (counts[j] != 0)
4108 printf(", %d-%d", j * CLBYTES, counts[j]);
4109 printf("\n");
4110 }
4111}
4112#endif /* DIAGNOSTIC */
4113
6d2010ae 4114#define NRESERVEDIOBUFS 128
91447636
A
4115
4116
4117buf_t
4118alloc_io_buf(vnode_t vp, int priv)
4119{
4120 buf_t bp;
4121
b0d623f7 4122 lck_mtx_lock_spin(iobuffer_mtxp);
91447636 4123
2d21ac55 4124 while (((niobuf_headers - NRESERVEDIOBUFS < bufstats.bufs_iobufinuse) && !priv) ||
91447636
A
4125 (bp = iobufqueue.tqh_first) == NULL) {
4126 bufstats.bufs_iobufsleeps++;
4127
4128 need_iobuffer = 1;
6d2010ae 4129 (void) msleep(&need_iobuffer, iobuffer_mtxp, PSPIN | (PRIBIO+1), (const char *)"alloc_io_buf", NULL);
91447636
A
4130 }
4131 TAILQ_REMOVE(&iobufqueue, bp, b_freelist);
4132
4133 bufstats.bufs_iobufinuse++;
4134 if (bufstats.bufs_iobufinuse > bufstats.bufs_iobufmax)
4135 bufstats.bufs_iobufmax = bufstats.bufs_iobufinuse;
4136
4137 lck_mtx_unlock(iobuffer_mtxp);
4138
4139 /*
4140 * initialize various fields
4141 * we don't need to hold the mutex since the buffer
4142 * is now private... the vp should have a reference
4143 * on it and is not protected by this mutex in any event
4144 */
4145 bp->b_timestamp = 0;
4146 bp->b_proc = NULL;
4147
4148 bp->b_datap = 0;
4149 bp->b_flags = 0;
4150 bp->b_lflags = BL_BUSY | BL_IOBUF;
6d2010ae 4151 bp->b_redundancy_flags = 0;
91447636
A
4152 bp->b_blkno = bp->b_lblkno = 0;
4153#ifdef JOE_DEBUG
4154 bp->b_owner = current_thread();
4155 bp->b_tag = 6;
4156#endif
4157 bp->b_iodone = NULL;
4158 bp->b_error = 0;
4159 bp->b_resid = 0;
4160 bp->b_bcount = 0;
4161 bp->b_bufsize = 0;
4162 bp->b_upl = NULL;
4163 bp->b_vp = vp;
7ddcb079 4164 bzero(&bp->b_attr, sizeof(struct bufattr));
91447636
A
4165
4166 if (vp && (vp->v_type == VBLK || vp->v_type == VCHR))
4167 bp->b_dev = vp->v_rdev;
4168 else
4169 bp->b_dev = NODEV;
4170
4171 return (bp);
4172}
4173
4174
4175void
4176free_io_buf(buf_t bp)
4177{
4178 int need_wakeup = 0;
4179
4180 /*
4181 * put buffer back on the head of the iobufqueue
4182 */
4183 bp->b_vp = NULL;
4184 bp->b_flags = B_INVAL;
4185
fe8ab488
A
4186 /* Zero out the bufattr and its flags before relinquishing this iobuf */
4187 bzero (&bp->b_attr, sizeof(struct bufattr));
4188
2d21ac55 4189 lck_mtx_lock_spin(iobuffer_mtxp);
91447636
A
4190
4191 binsheadfree(bp, &iobufqueue, -1);
4192
4193 if (need_iobuffer) {
4194 /*
4195 * Wake up any processes waiting because they need an io buffer
4196 *
4197 * do the wakeup after we drop the mutex... it's possible that the
4198 * wakeup will be superfluous if need_iobuffer gets set again and
4199 * another thread runs this path, but it's highly unlikely, doesn't
4200 * hurt, and it means we don't hold up I/O progress if the wakeup blocks
4201 * trying to grab a task related lock...
4202 */
4203 need_iobuffer = 0;
4204 need_wakeup = 1;
4205 }
b0d623f7
A
4206 if (bufstats.bufs_iobufinuse <= 0)
4207 panic("free_io_buf: bp(%p) - bufstats.bufs_iobufinuse < 0", bp);
4208
91447636
A
4209 bufstats.bufs_iobufinuse--;
4210
4211 lck_mtx_unlock(iobuffer_mtxp);
4212
4213 if (need_wakeup)
4214 wakeup(&need_iobuffer);
4215}
4216
4217
2d21ac55
A
4218void
4219buf_list_lock(void)
4220{
b0d623f7 4221 lck_mtx_lock_spin(buf_mtxp);
2d21ac55
A
4222}
4223
4224void
4225buf_list_unlock(void)
4226{
4227 lck_mtx_unlock(buf_mtxp);
4228}
91447636
A
4229
4230/*
4231 * If getnewbuf() calls bcleanbuf() on the same thread
4232 * there is a potential for stack overrun and deadlocks.
4233 * So we always handoff the work to a worker thread for completion
4234 */
91447636
A
4235
4236
4237static void
4238bcleanbuf_thread_init(void)
4239{
b0d623f7
A
4240 thread_t thread = THREAD_NULL;
4241
91447636 4242 /* create worker thread */
b0d623f7
A
4243 kernel_thread_start((thread_continue_t)bcleanbuf_thread, NULL, &thread);
4244 thread_deallocate(thread);
91447636
A
4245}
4246
6d2010ae
A
4247typedef int (*bcleanbufcontinuation)(int);
4248
91447636
A
4249static void
4250bcleanbuf_thread(void)
4251{
4252 struct buf *bp;
4253 int error = 0;
4254 int loopcnt = 0;
4255
4256 for (;;) {
b0d623f7 4257 lck_mtx_lock_spin(buf_mtxp);
91447636 4258
b0d623f7 4259 while ( (bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY])) == NULL) {
6d2010ae 4260 (void)msleep0(&bufqueues[BQ_LAUNDRY], buf_mtxp, PRIBIO|PDROP, "blaundry", 0, (bcleanbufcontinuation)bcleanbuf_thread);
b0d623f7 4261 }
6d2010ae 4262
91447636
A
4263 /*
4264 * Remove from the queue
4265 */
4266 bremfree_locked(bp);
2d21ac55
A
4267
4268 /*
4269 * Buffer is no longer on any free list
4270 */
4271 SET(bp->b_lflags, BL_BUSY);
b0d623f7 4272 buf_busycount++;
2d21ac55
A
4273
4274#ifdef JOE_DEBUG
4275 bp->b_owner = current_thread();
4276 bp->b_tag = 10;
4277#endif
91447636
A
4278
4279 lck_mtx_unlock(buf_mtxp);
4280 /*
4281 * do the IO
4282 */
4283 error = bawrite_internal(bp, 0);
4284
4285 if (error) {
2d21ac55
A
4286 bp->b_whichq = BQ_LAUNDRY;
4287 bp->b_timestamp = buf_timestamp();
4288
4289 lck_mtx_lock_spin(buf_mtxp);
91447636
A
4290
4291 binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
4292 blaundrycnt++;
4293
6d2010ae 4294 /* we never leave a busy page on the laundry queue */
2d21ac55 4295 CLR(bp->b_lflags, BL_BUSY);
b0d623f7 4296 buf_busycount--;
2d21ac55
A
4297#ifdef JOE_DEBUG
4298 bp->b_owner = current_thread();
4299 bp->b_tag = 11;
4300#endif
4301
91447636 4302 lck_mtx_unlock(buf_mtxp);
6d2010ae
A
4303
4304 if (loopcnt > MAXLAUNDRY) {
4305 /*
4306 * bawrite_internal() can return errors if we're throttled. If we've
4307 * done several I/Os and failed, give the system some time to unthrottle
4308 * the vnode
4309 */
4310 (void)tsleep((void *)&bufqueues[BQ_LAUNDRY], PRIBIO, "blaundry", 1);
91447636
A
4311 loopcnt = 0;
4312 } else {
6d2010ae
A
4313 /* give other threads a chance to run */
4314 (void)thread_block(THREAD_CONTINUE_NULL);
91447636
A
4315 loopcnt++;
4316 }
4317 }
4318 }
4319}
4320
4321
4322static int
4323brecover_data(buf_t bp)
4324{
4325 int upl_offset;
4326 upl_t upl;
4327 upl_page_info_t *pl;
4328 kern_return_t kret;
4329 vnode_t vp = bp->b_vp;
4330 int upl_flags;
4331
4332
4333 if ( !UBCINFOEXISTS(vp) || bp->b_bufsize == 0)
4334 goto dump_buffer;
4335
4336 upl_flags = UPL_PRECIOUS;
4337 if (! (buf_flags(bp) & B_READ)) {
4338 /*
4339 * "write" operation: let the UPL subsystem know
4340 * that we intend to modify the buffer cache pages we're
4341 * gathering.
4342 */
4343 upl_flags |= UPL_WILL_MODIFY;
4344 }
4345
4346 kret = ubc_create_upl(vp,
4347 ubc_blktooff(vp, bp->b_lblkno),
4348 bp->b_bufsize,
4349 &upl,
4350 &pl,
4351 upl_flags);
4352 if (kret != KERN_SUCCESS)
4353 panic("Failed to create UPL");
4354
4355 for (upl_offset = 0; upl_offset < bp->b_bufsize; upl_offset += PAGE_SIZE) {
4356
4357 if (!upl_valid_page(pl, upl_offset / PAGE_SIZE) || !upl_dirty_page(pl, upl_offset / PAGE_SIZE)) {
4358 ubc_upl_abort(upl, 0);
4359 goto dump_buffer;
4360 }
4361 }
4362 bp->b_upl = upl;
4363
b0d623f7 4364 kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
91447636
A
4365
4366 if (kret != KERN_SUCCESS)
4367 panic("getblk: ubc_upl_map() failed with (%d)", kret);
4368 return (1);
4369
4370dump_buffer:
4371 bp->b_bufsize = 0;
4372 SET(bp->b_flags, B_INVAL);
4373 buf_brelse(bp);
4374
4375 return(0);
4376}
4377
b7266188 4378boolean_t
0b4c1975 4379buffer_cache_gc(int all)
b0d623f7
A
4380{
4381 buf_t bp;
4382 boolean_t did_large_zfree = FALSE;
6d2010ae 4383 boolean_t need_wakeup = FALSE;
b0d623f7 4384 int now = buf_timestamp();
316670eb 4385 uint32_t found = 0;
6d2010ae 4386 struct bqueues privq;
0b4c1975
A
4387 int thresh_hold = BUF_STALE_THRESHHOLD;
4388
4389 if (all)
4390 thresh_hold = 0;
6d2010ae
A
4391 /*
4392 * We only care about metadata (incore storage comes from zalloc()).
316670eb
A
4393 * Unless "all" is set (used to evict meta data buffers in preparation
4394 * for deep sleep), we only evict up to BUF_MAX_GC_BATCH_SIZE buffers
fe8ab488
A
4395 * that have not been accessed in the last BUF_STALE_THRESHOLD seconds.
4396 * BUF_MAX_GC_BATCH_SIZE controls both the hold time of the global lock
4397 * "buf_mtxp" and the length of time we spend compute bound in the GC
4398 * thread which calls this function
6d2010ae
A
4399 */
4400 lck_mtx_lock(buf_mtxp);
316670eb 4401
6d2010ae
A
4402 do {
4403 found = 0;
4404 TAILQ_INIT(&privq);
4405 need_wakeup = FALSE;
b0d623f7 4406
6d2010ae
A
4407 while (((bp = TAILQ_FIRST(&bufqueues[BQ_META]))) &&
4408 (now > bp->b_timestamp) &&
4409 (now - bp->b_timestamp > thresh_hold) &&
4410 (found < BUF_MAX_GC_BATCH_SIZE)) {
4411
4412 /* Remove from free list */
4413 bremfree_locked(bp);
4414 found++;
4415
4416#ifdef JOE_DEBUG
4417 bp->b_owner = current_thread();
4418 bp->b_tag = 12;
4419#endif
4420
4421 /* If dirty, move to laundry queue and remember to do wakeup */
4422 if (ISSET(bp->b_flags, B_DELWRI)) {
4423 SET(bp->b_lflags, BL_WANTDEALLOC);
4424
4425 bmovelaundry(bp);
4426 need_wakeup = TRUE;
4427
4428 continue;
4429 }
4430
4431 /*
4432 * Mark busy and put on private list. We could technically get
4433 * away without setting BL_BUSY here.
4434 */
4435 SET(bp->b_lflags, BL_BUSY);
4436 buf_busycount++;
b0d623f7 4437
6d2010ae
A
4438 /*
4439 * Remove from hash and dissociate from vp.
4440 */
4441 bremhash(bp);
4442 if (bp->b_vp) {
4443 brelvp_locked(bp);
4444 }
b0d623f7 4445
6d2010ae
A
4446 TAILQ_INSERT_TAIL(&privq, bp, b_freelist);
4447 }
b0d623f7 4448
6d2010ae
A
4449 if (found == 0) {
4450 break;
4451 }
b0d623f7 4452
6d2010ae
A
4453 /* Drop lock for batch processing */
4454 lck_mtx_unlock(buf_mtxp);
4455
4456 /* Wakeup and yield for laundry if need be */
4457 if (need_wakeup) {
4458 wakeup(&bufqueues[BQ_LAUNDRY]);
4459 (void)thread_block(THREAD_CONTINUE_NULL);
b0d623f7 4460 }
6d2010ae
A
4461
4462 /* Clean up every buffer on private list */
4463 TAILQ_FOREACH(bp, &privq, b_freelist) {
4464 /* Take note if we've definitely freed at least a page to a zone */
4465 if ((ISSET(bp->b_flags, B_ZALLOC)) && (buf_size(bp) >= PAGE_SIZE)) {
4466 did_large_zfree = TRUE;
4467 }
4468
4469 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
4470
4471 /* Free Storage */
4472 buf_free_meta_store(bp);
4473
4474 /* Release credentials */
4475 buf_release_credentials(bp);
4476
4477 /* Prepare for moving to empty queue */
4478 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED
4479 | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
4480 bp->b_whichq = BQ_EMPTY;
4481 BLISTNONE(bp);
4482 }
6d2010ae
A
4483 lck_mtx_lock(buf_mtxp);
4484
4485 /* Back under lock, move them all to invalid hash and clear busy */
4486 TAILQ_FOREACH(bp, &privq, b_freelist) {
4487 binshash(bp, &invalhash);
4488 CLR(bp->b_lflags, BL_BUSY);
4489 buf_busycount--;
4490
4491#ifdef JOE_DEBUG
4492 if (bp->b_owner != current_thread()) {
4493 panic("Buffer stolen from buffer_cache_gc()");
4494 }
4495 bp->b_owner = current_thread();
4496 bp->b_tag = 13;
4497#endif
4498 }
4499
4500 /* And do a big bulk move to the empty queue */
4501 TAILQ_CONCAT(&bufqueues[BQ_EMPTY], &privq, b_freelist);
6d2010ae 4502
316670eb 4503 } while (all && (found == BUF_MAX_GC_BATCH_SIZE));
b0d623f7
A
4504
4505 lck_mtx_unlock(buf_mtxp);
4506
4507 return did_large_zfree;
4508}
91447636
A
4509
4510
4511/*
4512 * disabled for now
4513 */
4514
4515#if FLUSH_QUEUES
4516
4517#define NFLUSH 32
4518
4519static int
4520bp_cmp(void *a, void *b)
4521{
4522 buf_t *bp_a = *(buf_t **)a,
4523 *bp_b = *(buf_t **)b;
4524 daddr64_t res;
1c79356b 4525
91447636
A
4526 // don't have to worry about negative block
4527 // numbers so this is ok to do.
4528 //
4529 res = (bp_a->b_blkno - bp_b->b_blkno);
4530
4531 return (int)res;
1c79356b 4532}
1c79356b
A
4533
4534
91447636
A
4535int
4536bflushq(int whichq, mount_t mp)
1c79356b 4537{
91447636
A
4538 buf_t bp, next;
4539 int i, buf_count;
4540 int total_writes = 0;
4541 static buf_t flush_table[NFLUSH];
1c79356b 4542
91447636
A
4543 if (whichq < 0 || whichq >= BQUEUES) {
4544 return (0);
0b4e3aa0
A
4545 }
4546
91447636
A
4547 restart:
4548 lck_mtx_lock(buf_mtxp);
0b4e3aa0 4549
91447636 4550 bp = TAILQ_FIRST(&bufqueues[whichq]);
1c79356b 4551
91447636
A
4552 for (buf_count = 0; bp; bp = next) {
4553 next = bp->b_freelist.tqe_next;
4554
4555 if (bp->b_vp == NULL || bp->b_vp->v_mount != mp) {
4556 continue;
4557 }
b4c24cb9 4558
91447636 4559 if (ISSET(bp->b_flags, B_DELWRI) && !ISSET(bp->b_lflags, BL_BUSY)) {
1c79356b 4560
91447636
A
4561 bremfree_locked(bp);
4562#ifdef JOE_DEBUG
4563 bp->b_owner = current_thread();
4564 bp->b_tag = 7;
4565#endif
4566 SET(bp->b_lflags, BL_BUSY);
b0d623f7
A
4567 buf_busycount++;
4568
91447636
A
4569 flush_table[buf_count] = bp;
4570 buf_count++;
4571 total_writes++;
1c79356b 4572
91447636
A
4573 if (buf_count >= NFLUSH) {
4574 lck_mtx_unlock(buf_mtxp);
1c79356b 4575
91447636 4576 qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
1c79356b 4577
91447636
A
4578 for (i = 0; i < buf_count; i++) {
4579 buf_bawrite(flush_table[i]);
4580 }
4581 goto restart;
4582 }
4583 }
4584 }
4585 lck_mtx_unlock(buf_mtxp);
1c79356b 4586
91447636
A
4587 if (buf_count > 0) {
4588 qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
1c79356b 4589
91447636
A
4590 for (i = 0; i < buf_count; i++) {
4591 buf_bawrite(flush_table[i]);
4592 }
1c79356b 4593 }
91447636
A
4594
4595 return (total_writes);
1c79356b 4596}
91447636 4597#endif