X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/4452a7af2eac33dbad800bcc91f2399d62c18f53..eb6b6ca394357805f2bdba989abae309f718b4d8:/bsd/vfs/vfs_bio.c diff --git a/bsd/vfs/vfs_bio.c b/bsd/vfs/vfs_bio.c index b8dfb6fc8..5ce788691 100644 --- a/bsd/vfs/vfs_bio.c +++ b/bsd/vfs/vfs_bio.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -91,128 +91,139 @@ #endif /* DIAGNOSTIC */ #include #include -#include +#include +#include + +#include /* fslog_io_error() */ +#include /* dk_error_description_t */ + +#include +#include +#include /* thread_block() */ #include +#include #include -#include -#if BALANCE_QUEUES -static __inline__ void bufqinc(int q); -static __inline__ void bufqdec(int q); -#endif +#include +#include +#include + +#include -static int bcleanbuf(buf_t bp); -static int brecover_data(buf_t bp); +int bcleanbuf(buf_t bp, boolean_t discard); +static int brecover_data(buf_t bp); static boolean_t incore(vnode_t vp, daddr64_t blkno); -static buf_t incore_locked(vnode_t vp, daddr64_t blkno); /* timeout is in msecs */ -static buf_t getnewbuf(int slpflag, int slptimeo, int *queue); -static void bremfree_locked(buf_t bp); -static void buf_reassign(buf_t bp, vnode_t newvp); -static errno_t buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo); -static int buf_iterprepare(vnode_t vp, struct buflists *, int flags); -static void buf_itercomplete(vnode_t vp, struct buflists *, int flags); +static buf_t getnewbuf(int slpflag, int slptimeo, int *queue); +static void bremfree_locked(buf_t bp); +static void buf_reassign(buf_t bp, vnode_t newvp); +static errno_t buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo); +static int buf_iterprepare(vnode_t vp, struct buflists *, int flags); +static void buf_itercomplete(vnode_t vp, struct buflists *, int flags); +static boolean_t buffer_cache_gc(int); +static buf_t buf_brelse_shadow(buf_t bp); +static void buf_free_meta_store(buf_t bp); + +static buf_t buf_create_shadow_internal(buf_t bp, boolean_t force_copy, + uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv); + -__private_extern__ int bdwrite_internal(buf_t, int); +int bdwrite_internal(buf_t, int); + +extern void disk_conditioner_delay(buf_t, int, int, uint64_t); /* zone allocated buffer headers */ -static void bufzoneinit(void); -static void bcleanbuf_thread_init(void); -static void bcleanbuf_thread(void); +static void bufzoneinit(void); +static void bcleanbuf_thread_init(void); +static void bcleanbuf_thread(void); -static zone_t buf_hdr_zone; -static int buf_hdr_count; +static zone_t buf_hdr_zone; +static int buf_hdr_count; /* * Definitions for the buffer hash lists. */ -#define BUFHASH(dvp, lbn) \ +#define BUFHASH(dvp, lbn) \ (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash]) -LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash; -u_long bufhash; +LIST_HEAD(bufhashhdr, buf) * bufhashtbl, invalhash; +u_long bufhash; + +static buf_t incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp); /* Definitions for the buffer stats. */ struct bufstats bufstats; /* Number of delayed write buffers */ -int nbdwrite = 0; +long nbdwrite = 0; int blaundrycnt = 0; +static int boot_nbuf_headers = 0; +static TAILQ_HEAD(delayqueue, buf) delaybufqueue; static TAILQ_HEAD(ioqueue, buf) iobufqueue; static TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES]; static int needbuffer; static int need_iobuffer; -static lck_grp_t *buf_mtx_grp; -static lck_attr_t *buf_mtx_attr; +static lck_grp_t *buf_mtx_grp; +static lck_attr_t *buf_mtx_attr; static lck_grp_attr_t *buf_mtx_grp_attr; -static lck_mtx_t *iobuffer_mtxp; -static lck_mtx_t *buf_mtxp; +static lck_mtx_t *iobuffer_mtxp; +static lck_mtx_t *buf_mtxp; +static lck_mtx_t *buf_gc_callout; + +static int buf_busycount; + +#define FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE 16 +typedef struct { + void (* callout)(int, void *); + void *context; +} fs_buffer_cache_gc_callout_t; + +fs_buffer_cache_gc_callout_t fs_callouts[FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE] = { {NULL, NULL} }; static __inline__ int buf_timestamp(void) { - struct timeval t; + struct timeval t; microuptime(&t); - return (t.tv_sec); + return t.tv_sec; } /* * Insq/Remq for the buffer free lists. */ -#if BALANCE_QUEUES -#define binsheadfree(bp, dp, whichq) do { \ - TAILQ_INSERT_HEAD(dp, bp, b_freelist); \ - bufqinc((whichq)); \ - (bp)->b_whichq = whichq; \ - (bp)->b_timestamp = buf_timestamp(); \ - } while (0) - -#define binstailfree(bp, dp, whichq) do { \ - TAILQ_INSERT_TAIL(dp, bp, b_freelist); \ - bufqinc((whichq)); \ - (bp)->b_whichq = whichq; \ - (bp)->b_timestamp = buf_timestamp(); \ - } while (0) -#else -#define binsheadfree(bp, dp, whichq) do { \ - TAILQ_INSERT_HEAD(dp, bp, b_freelist); \ - (bp)->b_whichq = whichq; \ - (bp)->b_timestamp = buf_timestamp(); \ - } while (0) - -#define binstailfree(bp, dp, whichq) do { \ - TAILQ_INSERT_TAIL(dp, bp, b_freelist); \ - (bp)->b_whichq = whichq; \ - (bp)->b_timestamp = buf_timestamp(); \ - } while (0) -#endif +#define binsheadfree(bp, dp, whichq) do { \ + TAILQ_INSERT_HEAD(dp, bp, b_freelist); \ + } while (0) +#define binstailfree(bp, dp, whichq) do { \ + TAILQ_INSERT_TAIL(dp, bp, b_freelist); \ + } while (0) -#define BHASHENTCHECK(bp) \ - if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \ - panic("%x: b_hash.le_prev is not deadbeef", (bp)); +#define BHASHENTCHECK(bp) \ + if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \ + panic("%p: b_hash.le_prev is not deadbeef", (bp)); -#define BLISTNONE(bp) \ - (bp)->b_hash.le_next = (struct buf *)0; \ +#define BLISTNONE(bp) \ + (bp)->b_hash.le_next = (struct buf *)0; \ (bp)->b_hash.le_prev = (struct buf **)0xdeadbeef; /* * Insq/Remq for the vnode usage lists. */ -#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) -#define bufremvn(bp) { \ - LIST_REMOVE(bp, b_vnbufs); \ - (bp)->b_vnbufs.le_next = NOLIST; \ +#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) +#define bufremvn(bp) { \ + LIST_REMOVE(bp, b_vnbufs); \ + (bp)->b_vnbufs.le_next = NOLIST; \ } /* - * Time in seconds before a buffer on a list is - * considered as a stale buffer + * Time in seconds before a buffer on a list is + * considered as a stale buffer */ #define LRU_IS_STALE 120 /* default value for the LRU */ #define AGE_IS_STALE 60 /* default value for the AGE */ @@ -221,361 +232,842 @@ buf_timestamp(void) int lru_is_stale = LRU_IS_STALE; int age_is_stale = AGE_IS_STALE; int meta_is_stale = META_IS_STALE; -static int boot_nbuf = 0; +#define MAXLAUNDRY 10 /* LIST_INSERT_HEAD() with assertions */ static __inline__ void blistenterhead(struct bufhashhdr * head, buf_t bp) { - if ((bp->b_hash.le_next = (head)->lh_first) != NULL) + if ((bp->b_hash.le_next = (head)->lh_first) != NULL) { (head)->lh_first->b_hash.le_prev = &(bp)->b_hash.le_next; + } (head)->lh_first = bp; bp->b_hash.le_prev = &(head)->lh_first; - if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) + if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) { panic("blistenterhead: le_prev is deadbeef"); + } } -static __inline__ void +static __inline__ void binshash(buf_t bp, struct bufhashhdr *dp) { #if DIAGNOSTIC - buf_t nbp; + buf_t nbp; #endif /* DIAGNOSTIC */ BHASHENTCHECK(bp); #if DIAGNOSTIC nbp = dp->lh_first; - for(; nbp != NULL; nbp = nbp->b_hash.le_next) { - if(nbp == bp) + for (; nbp != NULL; nbp = nbp->b_hash.le_next) { + if (nbp == bp) { panic("buf already in hashlist"); + } } #endif /* DIAGNOSTIC */ blistenterhead(dp, bp); } -static __inline__ void -bremhash(buf_t bp) +static __inline__ void +bremhash(buf_t bp) { - if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) + if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) { panic("bremhash le_prev is deadbeef"); - if (bp->b_hash.le_next == bp) + } + if (bp->b_hash.le_next == bp) { panic("bremhash: next points to self"); + } - if (bp->b_hash.le_next != NULL) + if (bp->b_hash.le_next != NULL) { bp->b_hash.le_next->b_hash.le_prev = bp->b_hash.le_prev; + } *bp->b_hash.le_prev = (bp)->b_hash.le_next; } +/* + * buf_mtxp held. + */ +static __inline__ void +bmovelaundry(buf_t bp) +{ + bp->b_whichq = BQ_LAUNDRY; + bp->b_timestamp = buf_timestamp(); + binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY); + blaundrycnt++; +} + +static __inline__ void +buf_release_credentials(buf_t bp) +{ + if (IS_VALID_CRED(bp->b_rcred)) { + kauth_cred_unref(&bp->b_rcred); + } + if (IS_VALID_CRED(bp->b_wcred)) { + kauth_cred_unref(&bp->b_wcred); + } +} + + +int +buf_valid(buf_t bp) +{ + if ((bp->b_flags & (B_DONE | B_DELWRI))) { + return 1; + } + return 0; +} + +int +buf_fromcache(buf_t bp) +{ + if ((bp->b_flags & B_CACHE)) { + return 1; + } + return 0; +} + +void +buf_markinvalid(buf_t bp) +{ + SET(bp->b_flags, B_INVAL); +} + +void +buf_markdelayed(buf_t bp) +{ + if (!ISSET(bp->b_flags, B_DELWRI)) { + SET(bp->b_flags, B_DELWRI); + + OSAddAtomicLong(1, &nbdwrite); + buf_reassign(bp, bp->b_vp); + } + SET(bp->b_flags, B_DONE); +} + +void +buf_markclean(buf_t bp) +{ + if (ISSET(bp->b_flags, B_DELWRI)) { + CLR(bp->b_flags, B_DELWRI); + + OSAddAtomicLong(-1, &nbdwrite); + buf_reassign(bp, bp->b_vp); + } +} + +void +buf_markeintr(buf_t bp) +{ + SET(bp->b_flags, B_EINTR); +} + + +void +buf_markaged(buf_t bp) +{ + SET(bp->b_flags, B_AGE); +} + +int +buf_fua(buf_t bp) +{ + if ((bp->b_flags & B_FUA) == B_FUA) { + return 1; + } + return 0; +} + +void +buf_markfua(buf_t bp) +{ + SET(bp->b_flags, B_FUA); +} + +#if CONFIG_PROTECT +cpx_t +bufattr_cpx(bufattr_t bap) +{ + return bap->ba_cpx; +} + +void +bufattr_setcpx(bufattr_t bap, cpx_t cpx) +{ + bap->ba_cpx = cpx; +} + +void +buf_setcpoff(buf_t bp, uint64_t foffset) +{ + bp->b_attr.ba_cp_file_off = foffset; +} + +uint64_t +bufattr_cpoff(bufattr_t bap) +{ + return bap->ba_cp_file_off; +} + +void +bufattr_setcpoff(bufattr_t bap, uint64_t foffset) +{ + bap->ba_cp_file_off = foffset; +} + +#else // !CONTECT_PROTECT + +uint64_t +bufattr_cpoff(bufattr_t bap __unused) +{ + return 0; +} + +void +bufattr_setcpoff(__unused bufattr_t bap, __unused uint64_t foffset) +{ + return; +} + +struct cpx * +bufattr_cpx(__unused bufattr_t bap) +{ + return NULL; +} + +void +bufattr_setcpx(__unused bufattr_t bap, __unused struct cpx *cpx) +{ +} + +#endif /* !CONFIG_PROTECT */ + +bufattr_t +bufattr_alloc() +{ + bufattr_t bap; + MALLOC(bap, bufattr_t, sizeof(struct bufattr), M_TEMP, M_WAITOK); + if (bap == NULL) { + return NULL; + } + + bzero(bap, sizeof(struct bufattr)); + return bap; +} + +void +bufattr_free(bufattr_t bap) +{ + if (bap) { + FREE(bap, M_TEMP); + } +} +bufattr_t +bufattr_dup(bufattr_t bap) +{ + bufattr_t new_bufattr; + MALLOC(new_bufattr, bufattr_t, sizeof(struct bufattr), M_TEMP, M_WAITOK); + if (new_bufattr == NULL) { + return NULL; + } + + /* Copy the provided one into the new copy */ + memcpy(new_bufattr, bap, sizeof(struct bufattr)); + return new_bufattr; +} +int +bufattr_rawencrypted(bufattr_t bap) +{ + if ((bap->ba_flags & BA_RAW_ENCRYPTED_IO)) { + return 1; + } + return 0; +} int -buf_valid(buf_t bp) { +bufattr_throttled(bufattr_t bap) +{ + return GET_BUFATTR_IO_TIER(bap); +} - if ( (bp->b_flags & (B_DONE | B_DELWRI)) ) - return 1; +int +bufattr_passive(bufattr_t bap) +{ + if ((bap->ba_flags & BA_PASSIVE)) { + return 1; + } return 0; } int -buf_fromcache(buf_t bp) { +bufattr_nocache(bufattr_t bap) +{ + if ((bap->ba_flags & BA_NOCACHE)) { + return 1; + } + return 0; +} - if ( (bp->b_flags & B_CACHE) ) - return 1; +int +bufattr_meta(bufattr_t bap) +{ + if ((bap->ba_flags & BA_META)) { + return 1; + } return 0; } void -buf_markinvalid(buf_t bp) { - - SET(bp->b_flags, B_INVAL); +bufattr_markmeta(bufattr_t bap) +{ + SET(bap->ba_flags, BA_META); +} + +int +#if !CONFIG_EMBEDDED +bufattr_delayidlesleep(bufattr_t bap) +#else /* !CONFIG_EMBEDDED */ +bufattr_delayidlesleep(__unused bufattr_t bap) +#endif /* !CONFIG_EMBEDDED */ +{ +#if !CONFIG_EMBEDDED + if ((bap->ba_flags & BA_DELAYIDLESLEEP)) { + return 1; + } +#endif /* !CONFIG_EMBEDDED */ + return 0; +} + +bufattr_t +buf_attr(buf_t bp) +{ + return &bp->b_attr; } void -buf_markdelayed(buf_t bp) { - - SET(bp->b_flags, B_DELWRI); - buf_reassign(bp, bp->b_vp); +buf_markstatic(buf_t bp __unused) +{ + SET(bp->b_flags, B_STATICCONTENT); +} + +int +buf_static(buf_t bp) +{ + if ((bp->b_flags & B_STATICCONTENT)) { + return 1; + } + return 0; } void -buf_markeintr(buf_t bp) { - - SET(bp->b_flags, B_EINTR); +bufattr_markgreedymode(bufattr_t bap) +{ + SET(bap->ba_flags, BA_GREEDY_MODE); +} + +int +bufattr_greedymode(bufattr_t bap) +{ + if ((bap->ba_flags & BA_GREEDY_MODE)) { + return 1; + } + return 0; } void -buf_markaged(buf_t bp) { - - SET(bp->b_flags, B_AGE); +bufattr_markisochronous(bufattr_t bap) +{ + SET(bap->ba_flags, BA_ISOCHRONOUS); } -errno_t -buf_error(buf_t bp) { - - return (bp->b_error); +int +bufattr_isochronous(bufattr_t bap) +{ + if ((bap->ba_flags & BA_ISOCHRONOUS)) { + return 1; + } + return 0; } void -buf_seterror(buf_t bp, errno_t error) { +bufattr_markquickcomplete(bufattr_t bap) +{ + SET(bap->ba_flags, BA_QUICK_COMPLETE); +} - if ((bp->b_error = error)) - SET(bp->b_flags, B_ERROR); - else - CLR(bp->b_flags, B_ERROR); +int +bufattr_quickcomplete(bufattr_t bap) +{ + if ((bap->ba_flags & BA_QUICK_COMPLETE)) { + return 1; + } + return 0; } void -buf_setflags(buf_t bp, int32_t flags) { +bufattr_markioscheduled(bufattr_t bap) +{ + SET(bap->ba_flags, BA_IO_SCHEDULED); +} + + +int +bufattr_ioscheduled(bufattr_t bap) +{ + if ((bap->ba_flags & BA_IO_SCHEDULED)) { + return 1; + } + return 0; +} + +errno_t +buf_error(buf_t bp) +{ + return bp->b_error; +} - SET(bp->b_flags, (flags & BUF_X_WRFLAGS)); +void +buf_seterror(buf_t bp, errno_t error) +{ + if ((bp->b_error = error)) { + SET(bp->b_flags, B_ERROR); + } else { + CLR(bp->b_flags, B_ERROR); + } } void -buf_clearflags(buf_t bp, int32_t flags) { +buf_setflags(buf_t bp, int32_t flags) +{ + SET(bp->b_flags, (flags & BUF_X_WRFLAGS)); +} - CLR(bp->b_flags, (flags & BUF_X_WRFLAGS)); +void +buf_clearflags(buf_t bp, int32_t flags) +{ + CLR(bp->b_flags, (flags & BUF_X_WRFLAGS)); } int32_t -buf_flags(buf_t bp) { - - return ((bp->b_flags & BUF_X_RDFLAGS)); +buf_flags(buf_t bp) +{ + return bp->b_flags & BUF_X_RDFLAGS; } void -buf_reset(buf_t bp, int32_t io_flags) { - - CLR(bp->b_flags, (B_READ | B_WRITE | B_ERROR | B_DONE | B_INVAL | B_ASYNC | B_NOCACHE)); +buf_reset(buf_t bp, int32_t io_flags) +{ + CLR(bp->b_flags, (B_READ | B_WRITE | B_ERROR | B_DONE | B_INVAL | B_ASYNC | B_NOCACHE | B_FUA)); SET(bp->b_flags, (io_flags & (B_ASYNC | B_READ | B_WRITE | B_NOCACHE))); bp->b_error = 0; } uint32_t -buf_count(buf_t bp) { - - return (bp->b_bcount); +buf_count(buf_t bp) +{ + return bp->b_bcount; } void -buf_setcount(buf_t bp, uint32_t bcount) { - - bp->b_bcount = bcount; +buf_setcount(buf_t bp, uint32_t bcount) +{ + bp->b_bcount = bcount; } uint32_t -buf_size(buf_t bp) { - - return (bp->b_bufsize); +buf_size(buf_t bp) +{ + return bp->b_bufsize; } void -buf_setsize(buf_t bp, uint32_t bufsize) { - - bp->b_bufsize = bufsize; +buf_setsize(buf_t bp, uint32_t bufsize) +{ + bp->b_bufsize = bufsize; } uint32_t -buf_resid(buf_t bp) { - - return (bp->b_resid); +buf_resid(buf_t bp) +{ + return bp->b_resid; } void -buf_setresid(buf_t bp, uint32_t resid) { - - bp->b_resid = resid; +buf_setresid(buf_t bp, uint32_t resid) +{ + bp->b_resid = resid; } uint32_t -buf_dirtyoff(buf_t bp) { - - return (bp->b_dirtyoff); +buf_dirtyoff(buf_t bp) +{ + return bp->b_dirtyoff; } uint32_t -buf_dirtyend(buf_t bp) { - - return (bp->b_dirtyend); +buf_dirtyend(buf_t bp) +{ + return bp->b_dirtyend; } void -buf_setdirtyoff(buf_t bp, uint32_t dirtyoff) { - - bp->b_dirtyoff = dirtyoff; +buf_setdirtyoff(buf_t bp, uint32_t dirtyoff) +{ + bp->b_dirtyoff = dirtyoff; } void -buf_setdirtyend(buf_t bp, uint32_t dirtyend) { - - bp->b_dirtyend = dirtyend; +buf_setdirtyend(buf_t bp, uint32_t dirtyend) +{ + bp->b_dirtyend = dirtyend; } uintptr_t -buf_dataptr(buf_t bp) { - - return (bp->b_datap); +buf_dataptr(buf_t bp) +{ + return bp->b_datap; } void -buf_setdataptr(buf_t bp, uintptr_t data) { - - bp->b_datap = data; +buf_setdataptr(buf_t bp, uintptr_t data) +{ + bp->b_datap = data; } vnode_t -buf_vnode(buf_t bp) { - - return (bp->b_vp); +buf_vnode(buf_t bp) +{ + return bp->b_vp; } void -buf_setvnode(buf_t bp, vnode_t vp) { - - bp->b_vp = vp; +buf_setvnode(buf_t bp, vnode_t vp) +{ + bp->b_vp = vp; } void * buf_callback(buf_t bp) { - if ( !(bp->b_lflags & BL_IOBUF) ) - return ((void *) NULL); - if ( !(bp->b_flags & B_CALL) ) - return ((void *) NULL); + if (!(bp->b_flags & B_CALL)) { + return (void *) NULL; + } - return ((void *)bp->b_iodone); + return (void *)bp->b_iodone; } errno_t buf_setcallback(buf_t bp, void (*callback)(buf_t, void *), void *transaction) { + assert(!ISSET(bp->b_flags, B_FILTER) && ISSET(bp->b_lflags, BL_BUSY)); - if ( !(bp->b_lflags & BL_IOBUF) ) - return (EINVAL); - - if (callback) - bp->b_flags |= (B_CALL | B_ASYNC); - else - bp->b_flags &= ~B_CALL; + if (callback) { + bp->b_flags |= (B_CALL | B_ASYNC); + } else { + bp->b_flags &= ~B_CALL; + } bp->b_transaction = transaction; bp->b_iodone = callback; - return (0); + return 0; } errno_t buf_setupl(buf_t bp, upl_t upl, uint32_t offset) { + if (!(bp->b_lflags & BL_IOBUF)) { + return EINVAL; + } - if ( !(bp->b_lflags & BL_IOBUF) ) - return (EINVAL); - - if (upl) - bp->b_flags |= B_CLUSTER; - else - bp->b_flags &= ~B_CLUSTER; + if (upl) { + bp->b_flags |= B_CLUSTER; + } else { + bp->b_flags &= ~B_CLUSTER; + } bp->b_upl = upl; bp->b_uploffset = offset; - return (0); + return 0; } buf_t buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), void *arg) { - buf_t io_bp; + buf_t io_bp; - if (io_offset < 0 || io_size < 0) - return (NULL); + if (io_offset < 0 || io_size < 0) { + return NULL; + } - if ((unsigned)(io_offset + io_size) > (unsigned)bp->b_bcount) - return (NULL); + if ((unsigned)(io_offset + io_size) > (unsigned)bp->b_bcount) { + return NULL; + } if (bp->b_flags & B_CLUSTER) { - if (io_offset && ((bp->b_uploffset + io_offset) & PAGE_MASK)) - return (NULL); + if (io_offset && ((bp->b_uploffset + io_offset) & PAGE_MASK)) { + return NULL; + } - if (((bp->b_uploffset + io_offset + io_size) & PAGE_MASK) && ((io_offset + io_size) < bp->b_bcount)) - return (NULL); + if (((bp->b_uploffset + io_offset + io_size) & PAGE_MASK) && ((io_offset + io_size) < bp->b_bcount)) { + return NULL; + } } io_bp = alloc_io_buf(bp->b_vp, 0); - io_bp->b_flags = bp->b_flags & (B_COMMIT_UPL | B_META | B_PAGEIO | B_CLUSTER | B_PHYS | B_ASYNC | B_READ); + io_bp->b_flags = bp->b_flags & (B_COMMIT_UPL | B_META | B_PAGEIO | B_CLUSTER | B_PHYS | B_RAW | B_ASYNC | B_READ | B_FUA); if (iodone) { - io_bp->b_transaction = arg; + io_bp->b_transaction = arg; io_bp->b_iodone = iodone; io_bp->b_flags |= B_CALL; } if (bp->b_flags & B_CLUSTER) { - io_bp->b_upl = bp->b_upl; + io_bp->b_upl = bp->b_upl; io_bp->b_uploffset = bp->b_uploffset + io_offset; } else { - io_bp->b_datap = (uintptr_t)(((char *)bp->b_datap) + io_offset); + io_bp->b_datap = (uintptr_t)(((char *)bp->b_datap) + io_offset); } io_bp->b_bcount = io_size; - return (io_bp); + return io_bp; } - -void -buf_setfilter(buf_t bp, void (*filter)(buf_t, void *), void *transaction, - void **old_iodone, void **old_transaction) +int +buf_shadow(buf_t bp) { - if (old_iodone) - *old_iodone = (void *)(bp->b_iodone); - if (old_transaction) - *old_transaction = (void *)(bp->b_transaction); - - bp->b_transaction = transaction; - bp->b_iodone = filter; - bp->b_flags |= B_FILTER; + if (bp->b_lflags & BL_SHADOW) { + return 1; + } + return 0; } -daddr64_t -buf_blkno(buf_t bp) { +buf_t +buf_create_shadow_priv(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg) +{ + return buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 1); +} - return (bp->b_blkno); +buf_t +buf_create_shadow(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg) +{ + return buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 0); } -daddr64_t -buf_lblkno(buf_t bp) { - return (bp->b_lblkno); +static buf_t +buf_create_shadow_internal(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv) +{ + buf_t io_bp; + + KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_START, bp, 0, 0, 0, 0); + + if (!(bp->b_flags & B_META) || (bp->b_lflags & BL_IOBUF)) { + KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, 0, 0, 0, 0); + return NULL; + } +#ifdef BUF_MAKE_PRIVATE + if (bp->b_shadow_ref && bp->b_data_ref == 0 && external_storage == 0) { + panic("buf_create_shadow: %p is in the private state (%d, %d)", bp, bp->b_shadow_ref, bp->b_data_ref); + } +#endif + io_bp = alloc_io_buf(bp->b_vp, priv); + + io_bp->b_flags = bp->b_flags & (B_META | B_ZALLOC | B_ASYNC | B_READ | B_FUA); + io_bp->b_blkno = bp->b_blkno; + io_bp->b_lblkno = bp->b_lblkno; + + if (iodone) { + io_bp->b_transaction = arg; + io_bp->b_iodone = iodone; + io_bp->b_flags |= B_CALL; + } + if (force_copy == FALSE) { + io_bp->b_bcount = bp->b_bcount; + io_bp->b_bufsize = bp->b_bufsize; + + if (external_storage) { + io_bp->b_datap = external_storage; +#ifdef BUF_MAKE_PRIVATE + io_bp->b_data_store = NULL; +#endif + } else { + io_bp->b_datap = bp->b_datap; +#ifdef BUF_MAKE_PRIVATE + io_bp->b_data_store = bp; +#endif + } + *(buf_t *)(&io_bp->b_orig) = bp; + + lck_mtx_lock_spin(buf_mtxp); + + io_bp->b_lflags |= BL_SHADOW; + io_bp->b_shadow = bp->b_shadow; + bp->b_shadow = io_bp; + bp->b_shadow_ref++; + +#ifdef BUF_MAKE_PRIVATE + if (external_storage) { + io_bp->b_lflags |= BL_EXTERNAL; + } else { + bp->b_data_ref++; + } +#endif + lck_mtx_unlock(buf_mtxp); + } else { + if (external_storage) { +#ifdef BUF_MAKE_PRIVATE + io_bp->b_lflags |= BL_EXTERNAL; +#endif + io_bp->b_bcount = bp->b_bcount; + io_bp->b_bufsize = bp->b_bufsize; + io_bp->b_datap = external_storage; + } else { + allocbuf(io_bp, bp->b_bcount); + + io_bp->b_lflags |= BL_IOBUF_ALLOC; + } + bcopy((caddr_t)bp->b_datap, (caddr_t)io_bp->b_datap, bp->b_bcount); + +#ifdef BUF_MAKE_PRIVATE + io_bp->b_data_store = NULL; +#endif + } + KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, io_bp, 0); + + return io_bp; +} + + +#ifdef BUF_MAKE_PRIVATE +errno_t +buf_make_private(buf_t bp) +{ + buf_t ds_bp; + buf_t t_bp; + struct buf my_buf; + + KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_START, bp, bp->b_shadow_ref, 0, 0, 0); + + if (bp->b_shadow_ref == 0 || bp->b_data_ref == 0 || ISSET(bp->b_lflags, BL_SHADOW)) { + KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0); + return EINVAL; + } + my_buf.b_flags = B_META; + my_buf.b_datap = (uintptr_t)NULL; + allocbuf(&my_buf, bp->b_bcount); + + bcopy((caddr_t)bp->b_datap, (caddr_t)my_buf.b_datap, bp->b_bcount); + + lck_mtx_lock_spin(buf_mtxp); + + for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) { + if (!ISSET(bp->b_lflags, BL_EXTERNAL)) { + break; + } + } + ds_bp = t_bp; + + if (ds_bp == NULL && bp->b_data_ref) { + panic("buf_make_private: b_data_ref != 0 && ds_bp == NULL"); + } + + if (ds_bp && (bp->b_data_ref == 0 || bp->b_shadow_ref == 0)) { + panic("buf_make_private: ref_count == 0 && ds_bp != NULL"); + } + + if (ds_bp == NULL) { + lck_mtx_unlock(buf_mtxp); + + buf_free_meta_store(&my_buf); + + KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0); + return EINVAL; + } + for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) { + if (!ISSET(t_bp->b_lflags, BL_EXTERNAL)) { + t_bp->b_data_store = ds_bp; + } + } + ds_bp->b_data_ref = bp->b_data_ref; + + bp->b_data_ref = 0; + bp->b_datap = my_buf.b_datap; + + lck_mtx_unlock(buf_mtxp); + + KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, 0, 0); + return 0; } +#endif + void -buf_setblkno(buf_t bp, daddr64_t blkno) { +buf_setfilter(buf_t bp, void (*filter)(buf_t, void *), void *transaction, + void(**old_iodone)(buf_t, void *), void **old_transaction) +{ + assert(ISSET(bp->b_lflags, BL_BUSY)); + + if (old_iodone) { + *old_iodone = bp->b_iodone; + } + if (old_transaction) { + *old_transaction = bp->b_transaction; + } + + bp->b_transaction = transaction; + bp->b_iodone = filter; + if (filter) { + bp->b_flags |= B_FILTER; + } else { + bp->b_flags &= ~B_FILTER; + } +} - bp->b_blkno = blkno; + +daddr64_t +buf_blkno(buf_t bp) +{ + return bp->b_blkno; +} + +daddr64_t +buf_lblkno(buf_t bp) +{ + return bp->b_lblkno; } void -buf_setlblkno(buf_t bp, daddr64_t lblkno) { +buf_setblkno(buf_t bp, daddr64_t blkno) +{ + bp->b_blkno = blkno; +} - bp->b_lblkno = lblkno; +void +buf_setlblkno(buf_t bp, daddr64_t lblkno) +{ + bp->b_lblkno = lblkno; } dev_t -buf_device(buf_t bp) { - - return (bp->b_dev); +buf_device(buf_t bp) +{ + return bp->b_dev; } errno_t -buf_setdevice(buf_t bp, vnode_t vp) { - - if ((vp->v_type != VBLK) && (vp->v_type != VCHR)) - return EINVAL; +buf_setdevice(buf_t bp, vnode_t vp) +{ + if ((vp->v_type != VBLK) && (vp->v_type != VCHR)) { + return EINVAL; + } bp->b_dev = vp->v_rdev; return 0; @@ -583,144 +1075,151 @@ buf_setdevice(buf_t bp, vnode_t vp) { void * -buf_drvdata(buf_t bp) { - - return (bp->b_drvdata); +buf_drvdata(buf_t bp) +{ + return bp->b_drvdata; } void -buf_setdrvdata(buf_t bp, void *drvdata) { - - bp->b_drvdata = drvdata; +buf_setdrvdata(buf_t bp, void *drvdata) +{ + bp->b_drvdata = drvdata; } void * -buf_fsprivate(buf_t bp) { - - return (bp->b_fsprivate); +buf_fsprivate(buf_t bp) +{ + return bp->b_fsprivate; } void -buf_setfsprivate(buf_t bp, void *fsprivate) { - - bp->b_fsprivate = fsprivate; +buf_setfsprivate(buf_t bp, void *fsprivate) +{ + bp->b_fsprivate = fsprivate; } -ucred_t -buf_rcred(buf_t bp) { - - return (bp->b_rcred); +kauth_cred_t +buf_rcred(buf_t bp) +{ + return bp->b_rcred; } -ucred_t -buf_wcred(buf_t bp) { - - return (bp->b_wcred); +kauth_cred_t +buf_wcred(buf_t bp) +{ + return bp->b_wcred; } void * -buf_upl(buf_t bp) { - - return (bp->b_upl); +buf_upl(buf_t bp) +{ + return bp->b_upl; } uint32_t -buf_uploffset(buf_t bp) { - - return ((uint32_t)(bp->b_uploffset)); +buf_uploffset(buf_t bp) +{ + return (uint32_t)(bp->b_uploffset); } proc_t -buf_proc(buf_t bp) { - - return (bp->b_proc); +buf_proc(buf_t bp) +{ + return bp->b_proc; } errno_t buf_map(buf_t bp, caddr_t *io_addr) { - buf_t real_bp; - vm_offset_t vaddr; - kern_return_t kret; + buf_t real_bp; + vm_offset_t vaddr; + kern_return_t kret; - if ( !(bp->b_flags & B_CLUSTER)) { - *io_addr = (caddr_t)bp->b_datap; - return (0); + if (!(bp->b_flags & B_CLUSTER)) { + *io_addr = (caddr_t)bp->b_datap; + return 0; } real_bp = (buf_t)(bp->b_real_bp); if (real_bp && real_bp->b_datap) { - /* + /* * b_real_bp is only valid if B_CLUSTER is SET * if it's non-zero, than someone did a cluster_bp call * if the backing physical pages were already mapped * in before the call to cluster_bp (non-zero b_datap), * than we just use that mapping */ - *io_addr = (caddr_t)real_bp->b_datap; - return (0); + *io_addr = (caddr_t)real_bp->b_datap; + return 0; } kret = ubc_upl_map(bp->b_upl, &vaddr); /* Map it in */ if (kret != KERN_SUCCESS) { - *io_addr = 0; + *io_addr = NULL; - return(ENOMEM); + return ENOMEM; } - vaddr += bp->b_uploffset; + vaddr += bp->b_uploffset; *io_addr = (caddr_t)vaddr; - return (0); + return 0; } errno_t buf_unmap(buf_t bp) { - buf_t real_bp; - kern_return_t kret; + buf_t real_bp; + kern_return_t kret; - if ( !(bp->b_flags & B_CLUSTER)) - return (0); + if (!(bp->b_flags & B_CLUSTER)) { + return 0; + } /* * see buf_map for the explanation */ real_bp = (buf_t)(bp->b_real_bp); - if (real_bp && real_bp->b_datap) - return (0); + if (real_bp && real_bp->b_datap) { + return 0; + } - if (bp->b_lflags & BL_IOBUF) { - /* + if ((bp->b_lflags & BL_IOBUF) && + ((bp->b_flags & (B_PAGEIO | B_READ)) != (B_PAGEIO | B_READ))) { + /* + * ignore pageins... the 'right' thing will + * happen due to the way we handle speculative + * clusters... + * * when we commit these pages, we'll hit * it with UPL_COMMIT_INACTIVE which * will clear the reference bit that got * turned on when we touched the mapping */ - bp->b_flags |= B_AGE; + bp->b_flags |= B_AGE; } kret = ubc_upl_unmap(bp->b_upl); - if (kret != KERN_SUCCESS) - return (EINVAL); - return (0); + if (kret != KERN_SUCCESS) { + return EINVAL; + } + return 0; } void -buf_clear(buf_t bp) { - caddr_t baddr; - - if (buf_map(bp, &baddr) == 0) { - bzero(baddr, bp->b_bcount); +buf_clear(buf_t bp) +{ + caddr_t baddr; + + if (buf_map(bp, &baddr) == 0) { + bzero(baddr, bp->b_bcount); buf_unmap(bp); } bp->b_resid = 0; } - - /* * Read or write a buffer that is not contiguous on disk. * buffer is marked done/error at the conclusion @@ -728,14 +1227,14 @@ buf_clear(buf_t bp) { static int buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_bytes) { - vnode_t vp = buf_vnode(bp); - buf_t io_bp; /* For reading or writing a single block */ - int io_direction; - int io_resid; - size_t io_contig_bytes; - daddr64_t io_blkno; - int error = 0; - int bmap_flags; + vnode_t vp = buf_vnode(bp); + buf_t io_bp; /* For reading or writing a single block */ + int io_direction; + int io_resid; + size_t io_contig_bytes; + daddr64_t io_blkno; + int error = 0; + int bmap_flags; /* * save our starting point... the bp was already mapped @@ -748,7 +1247,7 @@ buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_b * i.e. this can never be a 'permanent' mapping */ bp->b_blkno = bp->b_lblkno; - + /* * Get an io buffer to do the deblocking */ @@ -756,55 +1255,67 @@ buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_b io_bp->b_lblkno = bp->b_lblkno; io_bp->b_datap = bp->b_datap; - io_resid = bp->b_bcount; - io_direction = bp->b_flags & B_READ; + io_resid = bp->b_bcount; + io_direction = bp->b_flags & B_READ; io_contig_bytes = contig_bytes; - - if (bp->b_flags & B_READ) - bmap_flags = VNODE_READ; - else - bmap_flags = VNODE_WRITE; + + if (bp->b_flags & B_READ) { + bmap_flags = VNODE_READ; + } else { + bmap_flags = VNODE_WRITE; + } for (;;) { - if (io_blkno == -1) - /* + if (io_blkno == -1) { + /* * this is unexepected, but we'll allow for it */ - bzero((caddr_t)io_bp->b_datap, (int)io_contig_bytes); - else { - io_bp->b_bcount = io_contig_bytes; + bzero((caddr_t)io_bp->b_datap, (int)io_contig_bytes); + } else { + io_bp->b_bcount = io_contig_bytes; io_bp->b_bufsize = io_contig_bytes; io_bp->b_resid = io_contig_bytes; io_bp->b_blkno = io_blkno; buf_reset(io_bp, io_direction); + /* - * Call the device to do the I/O and wait for it + * Call the device to do the I/O and wait for it. Make sure the appropriate party is charged for write */ - if ((error = VNOP_STRATEGY(io_bp))) - break; - if ((error = (int)buf_biowait(io_bp))) - break; + + if (!ISSET(bp->b_flags, B_READ)) { + OSAddAtomic(1, &devvp->v_numoutput); + } + + if ((error = VNOP_STRATEGY(io_bp))) { + break; + } + if ((error = (int)buf_biowait(io_bp))) { + break; + } if (io_bp->b_resid) { - io_resid -= (io_contig_bytes - io_bp->b_resid); + io_resid -= (io_contig_bytes - io_bp->b_resid); break; } } - if ((io_resid -= io_contig_bytes) == 0) - break; + if ((io_resid -= io_contig_bytes) == 0) { + break; + } f_offset += io_contig_bytes; io_bp->b_datap += io_contig_bytes; /* * Map the current position to a physical block number */ - if ((error = VNOP_BLOCKMAP(vp, f_offset, io_resid, &io_blkno, &io_contig_bytes, NULL, bmap_flags, NULL))) - break; + if ((error = VNOP_BLOCKMAP(vp, f_offset, io_resid, &io_blkno, &io_contig_bytes, NULL, bmap_flags, NULL))) { + break; + } } buf_free(io_bp); - - if (error) - buf_seterror(bp, error); + + if (error) { + buf_seterror(bp, error); + } bp->b_resid = io_resid; /* * This I/O is now complete @@ -823,13 +1334,21 @@ buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_b errno_t buf_strategy(vnode_t devvp, void *ap) { - buf_t bp = ((struct vnop_strategy_args *)ap)->a_bp; - vnode_t vp = bp->b_vp; - int bmap_flags; - errno_t error; + buf_t bp = ((struct vnop_strategy_args *)ap)->a_bp; + vnode_t vp = bp->b_vp; + int bmap_flags; + errno_t error; +#if CONFIG_DTRACE + int dtrace_io_start_flag = 0; /* We only want to trip the io:::start + * probe once, with the true physical + * block in place (b_blkno) + */ + +#endif - if (vp == NULL || vp->v_type == VCHR || vp->v_type == VBLK) - panic("buf_strategy: b_vp == NULL || vtype == VCHR | VBLK\n"); + if (vp == NULL || vp->v_type == VCHR || vp->v_type == VBLK) { + panic("buf_strategy: b_vp == NULL || vtype == VCHR | VBLK\n"); + } /* * associate the physical device with * with this buf_t even if we don't @@ -837,57 +1356,123 @@ buf_strategy(vnode_t devvp, void *ap) */ bp->b_dev = devvp->v_rdev; - if (bp->b_flags & B_READ) - bmap_flags = VNODE_READ; - else - bmap_flags = VNODE_WRITE; - - if ( !(bp->b_flags & B_CLUSTER)) { + if (bp->b_flags & B_READ) { + bmap_flags = VNODE_READ; + } else { + bmap_flags = VNODE_WRITE; + } - if ( (bp->b_upl) ) { - /* + if (!(bp->b_flags & B_CLUSTER)) { + if ((bp->b_upl)) { + /* * we have a UPL associated with this bp * go through cluster_bp which knows how * to deal with filesystem block sizes * that aren't equal to the page size */ - return (cluster_bp(bp)); + DTRACE_IO1(start, buf_t, bp); + return cluster_bp(bp); } if (bp->b_blkno == bp->b_lblkno) { - off_t f_offset; - size_t contig_bytes; - + off_t f_offset; + size_t contig_bytes; + if ((error = VNOP_BLKTOOFF(vp, bp->b_lblkno, &f_offset))) { - buf_seterror(bp, error); + DTRACE_IO1(start, buf_t, bp); + buf_seterror(bp, error); buf_biodone(bp); - return (error); + return error; } + if ((error = VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))) { - buf_seterror(bp, error); + DTRACE_IO1(start, buf_t, bp); + buf_seterror(bp, error); buf_biodone(bp); - return (error); + return error; + } + + DTRACE_IO1(start, buf_t, bp); +#if CONFIG_DTRACE + dtrace_io_start_flag = 1; +#endif /* CONFIG_DTRACE */ + + if ((bp->b_blkno == -1) || (contig_bytes == 0)) { + /* Set block number to force biodone later */ + bp->b_blkno = -1; + buf_clear(bp); + } else if ((long)contig_bytes < bp->b_bcount) { + return buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes); } - if (bp->b_blkno == -1) - buf_clear(bp); - else if ((long)contig_bytes < bp->b_bcount) - return (buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes)); } + +#if CONFIG_DTRACE + if (dtrace_io_start_flag == 0) { + DTRACE_IO1(start, buf_t, bp); + dtrace_io_start_flag = 1; + } +#endif /* CONFIG_DTRACE */ + if (bp->b_blkno == -1) { - buf_biodone(bp); - return (0); + buf_biodone(bp); + return 0; + } + } + +#if CONFIG_DTRACE + if (dtrace_io_start_flag == 0) { + DTRACE_IO1(start, buf_t, bp); + } +#endif /* CONFIG_DTRACE */ + +#if CONFIG_PROTECT + /* Capture f_offset in the bufattr*/ + cpx_t cpx = bufattr_cpx(buf_attr(bp)); + if (cpx) { + /* No need to go here for older EAs */ + if (cpx_use_offset_for_iv(cpx) && !cpx_synthetic_offset_for_iv(cpx)) { + off_t f_offset; + if ((error = VNOP_BLKTOOFF(bp->b_vp, bp->b_lblkno, &f_offset))) { + return error; + } + + /* + * Attach the file offset to this buffer. The + * bufattr attributes will be passed down the stack + * until they reach the storage driver (whether + * IOFlashStorage, ASP, or IONVMe). The driver + * will retain the offset in a local variable when it + * issues its I/Os to the NAND controller. + * + * Note that LwVM may end up splitting this I/O + * into sub-I/Os if it crosses a chunk boundary. In this + * case, LwVM will update this field when it dispatches + * each I/O to IOFlashStorage. But from our perspective + * we have only issued a single I/O. + * + * In the case of APFS we do not bounce through another + * intermediate layer (such as CoreStorage). APFS will + * issue the I/Os directly to the block device / IOMedia + * via buf_strategy on the specfs node. + */ + buf_setcpoff(bp, f_offset); + CP_DEBUG((CPDBG_OFFSET_IO | DBG_FUNC_NONE), (uint32_t) f_offset, (uint32_t) bp->b_lblkno, (uint32_t) bp->b_blkno, (uint32_t) bp->b_bcount, 0); } } +#endif + /* * we can issue the I/O because... * either B_CLUSTER is set which * means that the I/O is properly set * up to be a multiple of the page size, or * we were able to successfully set up the - * phsyical block mapping + * physical block mapping */ - return (VOCALL(devvp->v_op, VOFFSET(vnop_strategy), ap)); + error = VOCALL(devvp->v_op, VOFFSET(vnop_strategy), ap); + DTRACE_FSINFO(strategy, vnode_t, vp); + return error; } @@ -895,68 +1480,113 @@ buf_strategy(vnode_t devvp, void *ap) buf_t buf_alloc(vnode_t vp) { - return(alloc_io_buf(vp, 0)); + return alloc_io_buf(vp, is_vm_privileged()); } void -buf_free(buf_t bp) { - - free_io_buf(bp); +buf_free(buf_t bp) +{ + free_io_buf(bp); } +/* + * iterate buffers for the specified vp. + * if BUF_SCAN_DIRTY is set, do the dirty list + * if BUF_SCAN_CLEAN is set, do the clean list + * if neither flag is set, default to BUF_SCAN_DIRTY + * if BUF_NOTIFY_BUSY is set, call the callout function using a NULL bp for busy pages + */ + +struct buf_iterate_info_t { + int flag; + struct buflists *listhead; +}; void -buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg) { - buf_t bp; - int retval; - struct buflists local_iterblkhd; - int lock_flags = BAC_NOWAIT | BAC_REMOVE; +buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg) +{ + buf_t bp; + int retval; + struct buflists local_iterblkhd; + int lock_flags = BAC_NOWAIT | BAC_REMOVE; + int notify_busy = flags & BUF_NOTIFY_BUSY; + struct buf_iterate_info_t list[2]; + int num_lists, i; + + if (flags & BUF_SKIP_LOCKED) { + lock_flags |= BAC_SKIP_LOCKED; + } + if (flags & BUF_SKIP_NONLOCKED) { + lock_flags |= BAC_SKIP_NONLOCKED; + } - if (flags & BUF_SKIP_LOCKED) - lock_flags |= BAC_SKIP_LOCKED; - if (flags & BUF_SKIP_NONLOCKED) - lock_flags |= BAC_SKIP_NONLOCKED; + if (!(flags & (BUF_SCAN_DIRTY | BUF_SCAN_CLEAN))) { + flags |= BUF_SCAN_DIRTY; + } - lck_mtx_lock(buf_mtxp); - - if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY)) { - lck_mtx_unlock(buf_mtxp); - return; + num_lists = 0; + + if (flags & BUF_SCAN_DIRTY) { + list[num_lists].flag = VBI_DIRTY; + list[num_lists].listhead = &vp->v_dirtyblkhd; + num_lists++; } - while (!LIST_EMPTY(&local_iterblkhd)) { - bp = LIST_FIRST(&local_iterblkhd); - LIST_REMOVE(bp, b_vnbufs); - LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs); + if (flags & BUF_SCAN_CLEAN) { + list[num_lists].flag = VBI_CLEAN; + list[num_lists].listhead = &vp->v_cleanblkhd; + num_lists++; + } + + for (i = 0; i < num_lists; i++) { + lck_mtx_lock(buf_mtxp); - if (buf_acquire_locked(bp, lock_flags, 0, 0)) - continue; + if (buf_iterprepare(vp, &local_iterblkhd, list[i].flag)) { + lck_mtx_unlock(buf_mtxp); + continue; + } + while (!LIST_EMPTY(&local_iterblkhd)) { + bp = LIST_FIRST(&local_iterblkhd); + LIST_REMOVE(bp, b_vnbufs); + LIST_INSERT_HEAD(list[i].listhead, bp, b_vnbufs); - lck_mtx_unlock(buf_mtxp); + if (buf_acquire_locked(bp, lock_flags, 0, 0)) { + if (notify_busy) { + bp = NULL; + } else { + continue; + } + } + + lck_mtx_unlock(buf_mtxp); - retval = callout(bp, arg); + retval = callout(bp, arg); - switch (retval) { - case BUF_RETURNED: - buf_brelse(bp); - break; - case BUF_CLAIMED: - break; - case BUF_RETURNED_DONE: - buf_brelse(bp); + switch (retval) { + case BUF_RETURNED: + if (bp) { + buf_brelse(bp); + } + break; + case BUF_CLAIMED: + break; + case BUF_RETURNED_DONE: + if (bp) { + buf_brelse(bp); + } + lck_mtx_lock(buf_mtxp); + goto out; + case BUF_CLAIMED_DONE: + lck_mtx_lock(buf_mtxp); + goto out; + } lck_mtx_lock(buf_mtxp); - goto out; - case BUF_CLAIMED_DONE: - lck_mtx_lock(buf_mtxp); - goto out; - } - lck_mtx_lock(buf_mtxp); - } + } /* while list has more nodes */ out: - buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY); - - lck_mtx_unlock(buf_mtxp); -} + buf_itercomplete(vp, &local_iterblkhd, list[i].flag); + lck_mtx_unlock(buf_mtxp); + } /* for each list */ +} /* buf_iterate */ /* @@ -965,31 +1595,39 @@ out: int buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo) { - buf_t bp; - int error = 0; - int must_rescan = 1; - struct buflists local_iterblkhd; + buf_t bp; + int aflags; + int error = 0; + int must_rescan = 1; + struct buflists local_iterblkhd; + + + if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) { + return 0; + } lck_mtx_lock(buf_mtxp); for (;;) { - if (must_rescan == 0) - /* + if (must_rescan == 0) { + /* * the lists may not be empty, but all that's left at this * point are metadata or B_LOCKED buffers which are being * skipped... we know this because we made it through both * the clean and dirty lists without dropping buf_mtxp... * each time we drop buf_mtxp we bump "must_rescan" */ - break; - if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) - break; + break; + } + if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) { + break; + } must_rescan = 0; /* * iterate the clean list */ if (buf_iterprepare(vp, &local_iterblkhd, VBI_CLEAN)) { - goto try_dirty_list; + goto try_dirty_list; } while (!LIST_EMPTY(&local_iterblkhd)) { bp = LIST_FIRST(&local_iterblkhd); @@ -1000,25 +1638,33 @@ buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo) /* * some filesystems distinguish meta data blocks with a negative logical block # */ - if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) + if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) { continue; + } + + aflags = BAC_REMOVE; + + if (!(flags & BUF_INVALIDATE_LOCKED)) { + aflags |= BAC_SKIP_LOCKED; + } - if ( (error = (int)buf_acquire_locked(bp, BAC_REMOVE | BAC_SKIP_LOCKED, slpflag, slptimeo)) ) { - if (error == EDEADLK) - /* - * this buffer was marked B_LOCKED... + if ((error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo))) { + if (error == EDEADLK) { + /* + * this buffer was marked B_LOCKED... * we didn't drop buf_mtxp, so we * we don't need to rescan */ - continue; - if (error == EAGAIN) { - /* + continue; + } + if (error == EAGAIN) { + /* * found a busy buffer... we blocked and * dropped buf_mtxp, so we're going to * need to rescan after this pass is completed */ - must_rescan++; - continue; + must_rescan++; + continue; } /* * got some kind of 'real' error out of the msleep @@ -1027,10 +1673,15 @@ buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo) buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN); lck_mtx_unlock(buf_mtxp); - return (error); + return error; } lck_mtx_unlock(buf_mtxp); + if (bp->b_flags & B_LOCKED) { + KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 0, 0); + } + + CLR(bp->b_flags, B_LOCKED); SET(bp->b_flags, B_INVAL); buf_brelse(bp); @@ -1062,25 +1713,33 @@ try_dirty_list: /* * some filesystems distinguish meta data blocks with a negative logical block # */ - if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) + if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) { continue; + } - if ( (error = (int)buf_acquire_locked(bp, BAC_REMOVE | BAC_SKIP_LOCKED, slpflag, slptimeo)) ) { - if (error == EDEADLK) - /* - * this buffer was marked B_LOCKED... + aflags = BAC_REMOVE; + + if (!(flags & BUF_INVALIDATE_LOCKED)) { + aflags |= BAC_SKIP_LOCKED; + } + + if ((error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo))) { + if (error == EDEADLK) { + /* + * this buffer was marked B_LOCKED... * we didn't drop buf_mtxp, so we * we don't need to rescan */ - continue; - if (error == EAGAIN) { - /* + continue; + } + if (error == EAGAIN) { + /* * found a busy buffer... we blocked and * dropped buf_mtxp, so we're going to * need to rescan after this pass is completed */ - must_rescan++; - continue; + must_rescan++; + continue; } /* * got some kind of 'real' error out of the msleep @@ -1089,16 +1748,22 @@ try_dirty_list: buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY); lck_mtx_unlock(buf_mtxp); - return (error); + return error; } lck_mtx_unlock(buf_mtxp); + if (bp->b_flags & B_LOCKED) { + KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 1, 0); + } + + CLR(bp->b_flags, B_LOCKED); SET(bp->b_flags, B_INVAL); - if (ISSET(bp->b_flags, B_DELWRI) && (flags & BUF_WRITE_DATA)) + if (ISSET(bp->b_flags, B_DELWRI) && (flags & BUF_WRITE_DATA)) { (void) VNOP_BWRITE(bp); - else + } else { buf_brelse(bp); + } lck_mtx_lock(buf_mtxp); /* @@ -1113,35 +1778,61 @@ try_dirty_list: } lck_mtx_unlock(buf_mtxp); - return (0); + return 0; } void -buf_flushdirtyblks(vnode_t vp, int wait, int flags, char *msg) { - buf_t bp; - int writes_issued = 0; - errno_t error; - int busy = 0; - struct buflists local_iterblkhd; - int lock_flags = BAC_NOWAIT | BAC_REMOVE; - - if (flags & BUF_SKIP_LOCKED) - lock_flags |= BAC_SKIP_LOCKED; - if (flags & BUF_SKIP_NONLOCKED) - lock_flags |= BAC_SKIP_NONLOCKED; -loop: - lck_mtx_lock(buf_mtxp); - - if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY) == 0) { - while (!LIST_EMPTY(&local_iterblkhd)) { +buf_flushdirtyblks(vnode_t vp, int wait, int flags, const char *msg) +{ + (void) buf_flushdirtyblks_skipinfo(vp, wait, flags, msg); + return; +} + +int +buf_flushdirtyblks_skipinfo(vnode_t vp, int wait, int flags, const char *msg) +{ + buf_t bp; + int writes_issued = 0; + errno_t error; + int busy = 0; + struct buflists local_iterblkhd; + int lock_flags = BAC_NOWAIT | BAC_REMOVE; + int any_locked = 0; + + if (flags & BUF_SKIP_LOCKED) { + lock_flags |= BAC_SKIP_LOCKED; + } + if (flags & BUF_SKIP_NONLOCKED) { + lock_flags |= BAC_SKIP_NONLOCKED; + } +loop: + lck_mtx_lock(buf_mtxp); + + if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY) == 0) { + while (!LIST_EMPTY(&local_iterblkhd)) { bp = LIST_FIRST(&local_iterblkhd); LIST_REMOVE(bp, b_vnbufs); LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs); - - if ((error = buf_acquire_locked(bp, lock_flags, 0, 0)) == EBUSY) - busy++; - if (error) - continue; + + if ((error = buf_acquire_locked(bp, lock_flags, 0, 0)) == EBUSY) { + busy++; + } + if (error) { + /* + * If we passed in BUF_SKIP_LOCKED or BUF_SKIP_NONLOCKED, + * we may want to do somethign differently if a locked or unlocked + * buffer was encountered (depending on the arg specified). + * In this case, we know that one of those two was set, and the + * buf acquisition failed above. + * + * If it failed with EDEADLK, then save state which can be emitted + * later on to the caller. Most callers should not care. + */ + if (error == EDEADLK) { + any_locked++; + } + continue; + } lck_mtx_unlock(buf_mtxp); bp->b_flags &= ~B_LOCKED; @@ -1150,10 +1841,11 @@ loop: * Wait for I/O associated with indirect blocks to complete, * since there is no way to quickly wait for them below. */ - if ((bp->b_vp == vp) || (wait == 0)) - (void) buf_bawrite(bp); - else - (void) VNOP_BWRITE(bp); + if ((bp->b_vp == vp) || (wait == 0)) { + (void) buf_bawrite(bp); + } else { + (void) VNOP_BWRITE(bp); + } writes_issued++; lck_mtx_lock(buf_mtxp); @@ -1161,25 +1853,25 @@ loop: buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY); } lck_mtx_unlock(buf_mtxp); - + if (wait) { - (void)vnode_waitforwrites(vp, 0, 0, 0, msg); + (void)vnode_waitforwrites(vp, 0, 0, 0, msg); if (vp->v_dirtyblkhd.lh_first && busy) { - /* + /* * we had one or more BUSY buffers on * the dirtyblock list... most likely * these are due to delayed writes that * were moved to the bclean queue but * have not yet been 'written'. - * if we issued some writes on the + * if we issued some writes on the * previous pass, we try again immediately * if we didn't, we'll sleep for some time * to allow the state to change... */ - if (writes_issued == 0) { - (void)tsleep((caddr_t)&vp->v_numoutput, - PRIBIO + 1, "vnode_flushdirtyblks", hz/20); + if (writes_issued == 0) { + (void)tsleep((caddr_t)&vp->v_numoutput, + PRIBIO + 1, "vnode_flushdirtyblks", hz / 20); } writes_issued = 0; busy = 0; @@ -1187,6 +1879,8 @@ loop: goto loop; } } + + return any_locked; } @@ -1199,26 +1893,27 @@ buf_iterprepare(vnode_t vp, struct buflists *iterheadp, int flags) { struct buflists * listheadp; - if (flags & VBI_DIRTY) + if (flags & VBI_DIRTY) { listheadp = &vp->v_dirtyblkhd; - else + } else { listheadp = &vp->v_cleanblkhd; - - while (vp->v_iterblkflags & VBI_ITER) { - vp->v_iterblkflags |= VBI_ITERWANT; - msleep(&vp->v_iterblkflags, buf_mtxp, 0, "buf_iterprepare", 0); + } + + while (vp->v_iterblkflags & VBI_ITER) { + vp->v_iterblkflags |= VBI_ITERWANT; + msleep(&vp->v_iterblkflags, buf_mtxp, 0, "buf_iterprepare", NULL); } if (LIST_EMPTY(listheadp)) { - LIST_INIT(iterheadp); - return(EINVAL); + LIST_INIT(iterheadp); + return EINVAL; } vp->v_iterblkflags |= VBI_ITER; iterheadp->lh_first = listheadp->lh_first; - listheadp->lh_first->b_vnbufs.le_prev = &iterheadp->lh_first; + listheadp->lh_first->b_vnbufs.le_prev = &iterheadp->lh_first; LIST_INIT(listheadp); - return(0); + return 0; } /* @@ -1231,10 +1926,11 @@ buf_itercomplete(vnode_t vp, struct buflists *iterheadp, int flags) struct buflists * listheadp; buf_t bp; - if (flags & VBI_DIRTY) + if (flags & VBI_DIRTY) { listheadp = &vp->v_dirtyblkhd; - else + } else { listheadp = &vp->v_cleanblkhd; + } while (!LIST_EMPTY(iterheadp)) { bp = LIST_FIRST(iterheadp); @@ -1243,7 +1939,7 @@ buf_itercomplete(vnode_t vp, struct buflists *iterheadp, int flags) } vp->v_iterblkflags &= ~VBI_ITER; - if (vp->v_iterblkflags & VBI_ITERWANT) { + if (vp->v_iterblkflags & VBI_ITERWANT) { vp->v_iterblkflags &= ~VBI_ITERWANT; wakeup(&vp->v_iterblkflags); } @@ -1254,8 +1950,21 @@ static void bremfree_locked(buf_t bp) { struct bqueues *dp = NULL; - int whichq = -1; + int whichq; + + whichq = bp->b_whichq; + if (whichq == -1) { + if (bp->b_shadow_ref == 0) { + panic("bremfree_locked: %p not on freelist", bp); + } + /* + * there are clones pointing to 'bp'... + * therefore, it was not put on a freelist + * when buf_brelse was last called on 'bp' + */ + return; + } /* * We only calculate the head of the freelist when removing * the last element of the list as that is the only time that @@ -1264,60 +1973,58 @@ bremfree_locked(buf_t bp) * NB: This makes an assumption about how tailq's are implemented. */ if (bp->b_freelist.tqe_next == NULL) { - for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) - if (dp->tqh_last == &bp->b_freelist.tqe_next) - break; - if (dp == &bufqueues[BQUEUES]) + dp = &bufqueues[whichq]; + + if (dp->tqh_last != &bp->b_freelist.tqe_next) { panic("bremfree: lost tail"); + } } TAILQ_REMOVE(dp, bp, b_freelist); - whichq = bp->b_whichq; -#if BALANCE_QUEUES - bufqdec(whichq); -#endif + + if (whichq == BQ_LAUNDRY) { + blaundrycnt--; + } + bp->b_whichq = -1; - bp->b_timestamp = 0; + bp->b_timestamp = 0; + bp->b_shadow = 0; } /* * Associate a buffer with a vnode. + * buf_mtxp must be locked on entry */ static void -bgetvp(vnode_t vp, buf_t bp) +bgetvp_locked(vnode_t vp, buf_t bp) { + if (bp->b_vp != vp) { + panic("bgetvp_locked: not free"); + } - if (bp->b_vp != vp) - panic("bgetvp: not free"); - - if (vp->v_type == VBLK || vp->v_type == VCHR) + if (vp->v_type == VBLK || vp->v_type == VCHR) { bp->b_dev = vp->v_rdev; - else + } else { bp->b_dev = NODEV; + } /* * Insert onto list for new vnode. */ - lck_mtx_lock(buf_mtxp); bufinsvn(bp, &vp->v_cleanblkhd); - lck_mtx_unlock(buf_mtxp); } /* * Disassociate a buffer from a vnode. + * buf_mtxp must be locked on entry */ static void -brelvp(buf_t bp) +brelvp_locked(buf_t bp) { - vnode_t vp; - - if ((vp = bp->b_vp) == (vnode_t)NULL) - panic("brelvp: NULL vp"); /* * Delete from old vnode list, if on one. */ - lck_mtx_lock(buf_mtxp); - if (bp->b_vnbufs.le_next != NOLIST) + if (bp->b_vnbufs.le_next != NOLIST) { bufremvn(bp); - lck_mtx_unlock(buf_mtxp); + } bp->b_vp = (vnode_t)NULL; } @@ -1330,27 +2037,29 @@ brelvp(buf_t bp) static void buf_reassign(buf_t bp, vnode_t newvp) { - register struct buflists *listheadp; + struct buflists *listheadp; if (newvp == NULL) { printf("buf_reassign: NULL"); return; } - lck_mtx_lock(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); /* * Delete from old vnode list, if on one. */ - if (bp->b_vnbufs.le_next != NOLIST) + if (bp->b_vnbufs.le_next != NOLIST) { bufremvn(bp); + } /* * If dirty, put on list of dirty buffers; * otherwise insert onto list of clean buffers. */ - if (ISSET(bp->b_flags, B_DELWRI)) + if (ISSET(bp->b_flags, B_DELWRI)) { listheadp = &newvp->v_dirtyblkhd; - else + } else { listheadp = &newvp->v_cleanblkhd; + } bufinsvn(bp, listheadp); lck_mtx_unlock(buf_mtxp); @@ -1373,57 +2082,52 @@ bufhdrinit(buf_t bp) * Initialize buffers and hash links for buffers. */ __private_extern__ void -bufinit() +bufinit(void) { - buf_t bp; + buf_t bp; struct bqueues *dp; - int i; - int metabuf; - long whichq; + int i; - nbuf = 0; + nbuf_headers = 0; /* Initialize the buffer queues ('freelists') and the hash table */ - for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) + for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) { TAILQ_INIT(dp); + } bufhashtbl = hashinit(nbuf_hashelements, M_CACHE, &bufhash); - metabuf = max_nbuf_headers/8; /* reserved for meta buf */ + buf_busycount = 0; /* Initialize the buffer headers */ for (i = 0; i < max_nbuf_headers; i++) { - nbuf++; - bp = &buf[i]; + nbuf_headers++; + bp = &buf_headers[i]; bufhdrinit(bp); - /* - * metabuf buffer headers on the meta-data list and - * rest of the buffer headers on the empty list - */ - if (--metabuf) - whichq = BQ_META; - else - whichq = BQ_EMPTY; - BLISTNONE(bp); - dp = &bufqueues[whichq]; - binsheadfree(bp, dp, whichq); + dp = &bufqueues[BQ_EMPTY]; + bp->b_whichq = BQ_EMPTY; + bp->b_timestamp = buf_timestamp(); + binsheadfree(bp, dp, BQ_EMPTY); binshash(bp, &invalhash); } + boot_nbuf_headers = nbuf_headers; - boot_nbuf = nbuf; + TAILQ_INIT(&iobufqueue); + TAILQ_INIT(&delaybufqueue); - for (; i < nbuf + niobuf; i++) { - bp = &buf[i]; + for (; i < nbuf_headers + niobuf_headers; i++) { + bp = &buf_headers[i]; bufhdrinit(bp); + bp->b_whichq = -1; binsheadfree(bp, &iobufqueue, -1); } - /* + /* * allocate lock group attribute and group */ - buf_mtx_grp_attr = lck_grp_attr_alloc_init(); + buf_mtx_grp_attr = lck_grp_attr_alloc_init(); buf_mtx_grp = lck_grp_alloc_init("buffer cache", buf_mtx_grp_attr); - + /* * allocate the lock attribute */ @@ -1432,14 +2136,21 @@ bufinit() /* * allocate and initialize mutex's for the buffer and iobuffer pools */ - buf_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr); - iobuffer_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr); + buf_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr); + iobuffer_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr); + buf_gc_callout = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr); - if (iobuffer_mtxp == NULL) - panic("couldn't create iobuffer mutex"); + if (iobuffer_mtxp == NULL) { + panic("couldn't create iobuffer mutex"); + } + + if (buf_mtxp == NULL) { + panic("couldn't create buf mutex"); + } - if (buf_mtxp == NULL) - panic("couldn't create buf mutex"); + if (buf_gc_callout == NULL) { + panic("couldn't create buf_gc_callout mutex"); + } /* * allocate and initialize cluster specific global locks... @@ -1447,7 +2158,7 @@ bufinit() cluster_init(); printf("using %d buffer headers and %d cluster IO buffer headers\n", - nbuf, niobuf); + nbuf_headers, niobuf_headers); /* Set up zones used by the buffer cache */ bufzoneinit(); @@ -1455,19 +2166,80 @@ bufinit() /* start the bcleanbuf() thread */ bcleanbuf_thread_init(); -#if BALANCE_QUEUES - { - static void bufq_balance_thread_init(); - /* create a thread to do dynamic buffer queue balancing */ - bufq_balance_thread_init(); + /* Register a callout for relieving vm pressure */ + if (vm_set_buffer_cleanup_callout(buffer_cache_gc) != KERN_SUCCESS) { + panic("Couldn't register buffer cache callout for vm pressure!\n"); + } +} + +/* + * Zones for the meta data buffers + */ + +#define MINMETA 512 +#define MAXMETA 16384 + +struct meta_zone_entry { + zone_t mz_zone; + vm_size_t mz_size; + vm_size_t mz_max; + const char *mz_name; +}; + +struct meta_zone_entry meta_zones[] = { + {.mz_zone = NULL, .mz_size = (MINMETA * 1), .mz_max = 128 * (MINMETA * 1), .mz_name = "buf.512" }, + {.mz_zone = NULL, .mz_size = (MINMETA * 2), .mz_max = 64 * (MINMETA * 2), .mz_name = "buf.1024" }, + {.mz_zone = NULL, .mz_size = (MINMETA * 4), .mz_max = 16 * (MINMETA * 4), .mz_name = "buf.2048" }, + {.mz_zone = NULL, .mz_size = (MINMETA * 8), .mz_max = 512 * (MINMETA * 8), .mz_name = "buf.4096" }, + {.mz_zone = NULL, .mz_size = (MINMETA * 16), .mz_max = 512 * (MINMETA * 16), .mz_name = "buf.8192" }, + {.mz_zone = NULL, .mz_size = (MINMETA * 32), .mz_max = 512 * (MINMETA * 32), .mz_name = "buf.16384" }, + {.mz_zone = NULL, .mz_size = 0, .mz_max = 0, .mz_name = "" } /* End */ +}; + +/* + * Initialize the meta data zones + */ +static void +bufzoneinit(void) +{ + int i; + + for (i = 0; meta_zones[i].mz_size != 0; i++) { + meta_zones[i].mz_zone = + zinit(meta_zones[i].mz_size, + meta_zones[i].mz_max, + PAGE_SIZE, + meta_zones[i].mz_name); + zone_change(meta_zones[i].mz_zone, Z_CALLERACCT, FALSE); + } + buf_hdr_zone = zinit(sizeof(struct buf), 32, PAGE_SIZE, "buf headers"); + zone_change(buf_hdr_zone, Z_CALLERACCT, FALSE); +} + +static __inline__ zone_t +getbufzone(size_t size) +{ + int i; + + if ((size % 512) || (size < MINMETA) || (size > MAXMETA)) { + panic("getbufzone: incorect size = %lu", size); } -#endif /* notyet */ + + for (i = 0; meta_zones[i].mz_size != 0; i++) { + if (meta_zones[i].mz_size >= size) { + break; + } + } + + return meta_zones[i].mz_zone; } + + static struct buf * -bio_doread(vnode_t vp, daddr64_t blkno, int size, ucred_t cred, int async, int queuetype) +bio_doread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, int async, int queuetype) { - buf_t bp; + buf_t bp; bp = buf_getblk(vp, blkno, size, 0, 0, queuetype); @@ -1493,17 +2265,18 @@ bio_doread(vnode_t vp, daddr64_t blkno, int size, ucred_t cred, int async, int q trace(TR_BREADMISS, pack(vp, size), blkno); /* Pay for the read. */ - if (p && p->p_stats) - p->p_stats->p_ru.ru_inblock++; /* XXX */ + if (p && p->p_stats) { + OSIncrementAtomicLong(&p->p_stats->p_ru.ru_inblock); /* XXX */ + } if (async) { - /* + /* * since we asked for an ASYNC I/O * the biodone will do the brelse * we don't want to pass back a bp * that we don't 'own' */ - bp = NULL; + bp = NULL; } } else if (async) { buf_brelse(bp); @@ -1512,19 +2285,19 @@ bio_doread(vnode_t vp, daddr64_t blkno, int size, ucred_t cred, int async, int q trace(TR_BREADHIT, pack(vp, size), blkno); - return (bp); + return bp; } /* - * Perform the reads for buf_breadn() and buf_meta_breadn(). - * Trivial modification to the breada algorithm presented in Bach (p.55). + * Perform the reads for buf_breadn() and buf_meta_breadn(). + * Trivial modification to the breada algorithm presented in Bach (p.55). */ static errno_t -do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, - int nrablks, ucred_t cred, buf_t *bpp, int queuetype) +do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, + int nrablks, kauth_cred_t cred, buf_t *bpp, int queuetype) { - buf_t bp; - int i; + buf_t bp; + int i; bp = *bpp = bio_doread(vp, blkno, size, cred, 0, queuetype); @@ -1533,15 +2306,16 @@ do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int */ for (i = 0; i < nrablks; i++) { /* If it's in the cache, just go on to next one. */ - if (incore(vp, rablks[i])) + if (incore(vp, rablks[i])) { continue; + } /* Get a buffer for the read-ahead block */ (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC, queuetype); } /* Otherwise, we had to start a read for it; wait until it's valid. */ - return (buf_biowait(bp)); + return buf_biowait(bp); } @@ -1550,15 +2324,15 @@ do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int * This algorithm described in Bach (p.54). */ errno_t -buf_bread(vnode_t vp, daddr64_t blkno, int size, ucred_t cred, buf_t *bpp) +buf_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp) { - buf_t bp; + buf_t bp; /* Get buffer for block. */ bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_READ); /* Wait for the read to complete, and return result. */ - return (buf_biowait(bp)); + return buf_biowait(bp); } /* @@ -1566,24 +2340,24 @@ buf_bread(vnode_t vp, daddr64_t blkno, int size, ucred_t cred, buf_t *bpp) * This algorithm described in Bach (p.54). */ errno_t -buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, ucred_t cred, buf_t *bpp) +buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp) { - buf_t bp; + buf_t bp; /* Get buffer for block. */ bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_META); /* Wait for the read to complete, and return result. */ - return (buf_biowait(bp)); + return buf_biowait(bp); } /* * Read-ahead multiple disk blocks. The first is sync, the rest async. */ errno_t -buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, ucred_t cred, buf_t *bpp) +buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp) { - return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_READ)); + return do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_READ); } /* @@ -1591,9 +2365,9 @@ buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasize * [buf_breadn() for meta-data] */ errno_t -buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, ucred_t cred, buf_t *bpp) +buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp) { - return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_META)); + return do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_META); } /* @@ -1602,22 +2376,24 @@ buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *r errno_t buf_bwrite(buf_t bp) { - int sync, wasdelayed; - errno_t rv; - proc_t p = current_proc(); - vnode_t vp = bp->b_vp; + int sync, wasdelayed; + errno_t rv; + proc_t p = current_proc(); + vnode_t vp = bp->b_vp; if (bp->b_datap == 0) { - if (brecover_data(bp) == 0) - return (0); + if (brecover_data(bp) == 0) { + return 0; + } } /* Remember buffer type, to switch on it later. */ sync = !ISSET(bp->b_flags, B_ASYNC); wasdelayed = ISSET(bp->b_flags, B_DELWRI); CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI)); - if (wasdelayed) - OSAddAtomic(-1, &nbdwrite); + if (wasdelayed) { + OSAddAtomicLong(-1, &nbdwrite); + } if (!sync) { /* @@ -1626,18 +2402,18 @@ buf_bwrite(buf_t bp) * to do this now, because if we don't, the vnode may not * be properly notified that its I/O has completed. */ - if (wasdelayed) + if (wasdelayed) { buf_reassign(bp, vp); - else - if (p && p->p_stats) - p->p_stats->p_ru.ru_oublock++; /* XXX */ + } else if (p && p->p_stats) { + OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ + } } trace(TR_BUFWRITE, pack(vp, bp->b_bcount), bp->b_lblkno); /* Initiate disk write. Make sure the appropriate party is charged. */ - OSAddAtomic(1, &vp->v_numoutput); - + OSAddAtomic(1, &vp->v_numoutput); + VNOP_STRATEGY(bp); if (sync) { @@ -1651,31 +2427,25 @@ buf_bwrite(buf_t bp) * make sure it's on the correct vnode queue. (async operatings * were payed for above.) */ - if (wasdelayed) + if (wasdelayed) { buf_reassign(bp, vp); - else - if (p && p->p_stats) - p->p_stats->p_ru.ru_oublock++; /* XXX */ + } else if (p && p->p_stats) { + OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ + } /* Release the buffer. */ - // XXXdbg - only if the unused bit is set - if (!ISSET(bp->b_flags, B_NORELSE)) { - buf_brelse(bp); - } else { - CLR(bp->b_flags, B_NORELSE); - } + buf_brelse(bp); - return (rv); + return rv; } else { - return (0); + return 0; } } int -vn_bwrite(ap) - struct vnop_bwrite_args *ap; +vn_bwrite(struct vnop_bwrite_args *ap) { - return (buf_bwrite(ap->a_bp)); + return buf_bwrite(ap->a_bp); } /* @@ -1691,17 +2461,17 @@ vn_bwrite(ap) * * Described in Leffler, et al. (pp. 208-213). * - * Note: With the abilitty to allocate additional buffer - * headers, we can get in to the situation where "too" many + * Note: With the ability to allocate additional buffer + * headers, we can get in to the situation where "too" many * buf_bdwrite()s can create situation where the kernel can create * buffers faster than the disks can service. Doing a buf_bawrite() in - * cases were we have "too many" outstanding buf_bdwrite()s avoids that. + * cases where we have "too many" outstanding buf_bdwrite()s avoids that. */ -__private_extern__ int +int bdwrite_internal(buf_t bp, int return_error) { - proc_t p = current_proc(); - vnode_t vp = bp->b_vp; + proc_t p = current_proc(); + vnode_t vp = bp->b_vp; /* * If the block hasn't been seen before: @@ -1711,22 +2481,17 @@ bdwrite_internal(buf_t bp, int return_error) */ if (!ISSET(bp->b_flags, B_DELWRI)) { SET(bp->b_flags, B_DELWRI); - if (p && p->p_stats) - p->p_stats->p_ru.ru_oublock++; /* XXX */ - OSAddAtomic(1, &nbdwrite); + if (p && p->p_stats) { + OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ + } + OSAddAtomicLong(1, &nbdwrite); buf_reassign(bp, vp); } - /* If this is a tape block, write it the block now. */ - if (ISSET(bp->b_flags, B_TAPE)) { - VNOP_BWRITE(bp); - return (0); - } - /* * if we're not LOCKED, but the total number of delayed writes * has climbed above 75% of the total buffers in the system - * return an error if the caller has indicated that it can + * return an error if the caller has indicated that it can * handle one in this case, otherwise schedule the I/O now * this is done to prevent us from allocating tons of extra * buffers when dealing with virtual disks (i.e. DiskImages), @@ -1737,36 +2502,37 @@ bdwrite_internal(buf_t bp, int return_error) * buffer is part of a transaction and can't go to disk until * the LOCKED bit is cleared. */ - if (!ISSET(bp->b_flags, B_LOCKED) && nbdwrite > ((nbuf/4)*3)) { - if (return_error) - return (EAGAIN); + if (!ISSET(bp->b_flags, B_LOCKED) && nbdwrite > ((nbuf_headers / 4) * 3)) { + if (return_error) { + return EAGAIN; + } /* * If the vnode has "too many" write operations in progress * wait for them to finish the IO */ - (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, (char *)"buf_bdwrite"); + (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, "buf_bdwrite"); - return (buf_bawrite(bp)); + return buf_bawrite(bp); } - + /* Otherwise, the "write" is done, so mark and release the buffer. */ SET(bp->b_flags, B_DONE); buf_brelse(bp); - return (0); + return 0; } errno_t buf_bdwrite(buf_t bp) { - return (bdwrite_internal(bp, 0)); + return bdwrite_internal(bp, 0); } - + /* * Asynchronous block write; just an asynchronous buf_bwrite(). * * Note: With the abilitty to allocate additional buffer - * headers, we can get in to the situation where "too" many + * headers, we can get in to the situation where "too" many * buf_bawrite()s can create situation where the kernel can create * buffers faster than the disks can service. * We limit the number of "in flight" writes a vnode can have to @@ -1775,31 +2541,162 @@ buf_bdwrite(buf_t bp) static int bawrite_internal(buf_t bp, int throttle) { - vnode_t vp = bp->b_vp; + vnode_t vp = bp->b_vp; if (vp) { - if (throttle) - /* + if (throttle) { + /* * If the vnode has "too many" write operations in progress * wait for them to finish the IO */ - (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, (const char *)"buf_bawrite"); - else if (vp->v_numoutput >= VNODE_ASYNC_THROTTLE) - /* - * return to the caller and + (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, (const char *)"buf_bawrite"); + } else if (vp->v_numoutput >= VNODE_ASYNC_THROTTLE) { + /* + * return to the caller and * let him decide what to do */ - return (EWOULDBLOCK); + return EWOULDBLOCK; + } } SET(bp->b_flags, B_ASYNC); - return (VNOP_BWRITE(bp)); + return VNOP_BWRITE(bp); } errno_t buf_bawrite(buf_t bp) { - return (bawrite_internal(bp, 1)); + return bawrite_internal(bp, 1); +} + + + +static void +buf_free_meta_store(buf_t bp) +{ + if (bp->b_bufsize) { + if (ISSET(bp->b_flags, B_ZALLOC)) { + zone_t z; + + z = getbufzone(bp->b_bufsize); + zfree(z, bp->b_datap); + } else { + kmem_free(kernel_map, bp->b_datap, bp->b_bufsize); + } + + bp->b_datap = (uintptr_t)NULL; + bp->b_bufsize = 0; + } +} + + +static buf_t +buf_brelse_shadow(buf_t bp) +{ + buf_t bp_head; + buf_t bp_temp; + buf_t bp_return = NULL; +#ifdef BUF_MAKE_PRIVATE + buf_t bp_data; + int data_ref = 0; +#endif + int need_wakeup = 0; + + lck_mtx_lock_spin(buf_mtxp); + + __IGNORE_WCASTALIGN(bp_head = (buf_t)bp->b_orig); + + if (bp_head->b_whichq != -1) { + panic("buf_brelse_shadow: bp_head on freelist %d\n", bp_head->b_whichq); + } + +#ifdef BUF_MAKE_PRIVATE + if (bp_data = bp->b_data_store) { + bp_data->b_data_ref--; + /* + * snapshot the ref count so that we can check it + * outside of the lock... we only want the guy going + * from 1 -> 0 to try and release the storage + */ + data_ref = bp_data->b_data_ref; + } +#endif + KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_START, bp, bp_head, bp_head->b_shadow_ref, 0, 0); + + bp_head->b_shadow_ref--; + + for (bp_temp = bp_head; bp_temp && bp != bp_temp->b_shadow; bp_temp = bp_temp->b_shadow) { + ; + } + + if (bp_temp == NULL) { + panic("buf_brelse_shadow: bp not on list %p", bp_head); + } + + bp_temp->b_shadow = bp_temp->b_shadow->b_shadow; + +#ifdef BUF_MAKE_PRIVATE + /* + * we're about to free the current 'owner' of the data buffer and + * there is at least one other shadow buf_t still pointing at it + * so transfer it to the first shadow buf left in the chain + */ + if (bp == bp_data && data_ref) { + if ((bp_data = bp_head->b_shadow) == NULL) { + panic("buf_brelse_shadow: data_ref mismatch bp(%p)", bp); + } + + for (bp_temp = bp_data; bp_temp; bp_temp = bp_temp->b_shadow) { + bp_temp->b_data_store = bp_data; + } + bp_data->b_data_ref = data_ref; + } +#endif + if (bp_head->b_shadow_ref == 0 && bp_head->b_shadow) { + panic("buf_relse_shadow: b_shadow != NULL && b_shadow_ref == 0 bp(%p)", bp); + } + if (bp_head->b_shadow_ref && bp_head->b_shadow == 0) { + panic("buf_relse_shadow: b_shadow == NULL && b_shadow_ref != 0 bp(%p)", bp); + } + + if (bp_head->b_shadow_ref == 0) { + if (!ISSET(bp_head->b_lflags, BL_BUSY)) { + CLR(bp_head->b_flags, B_AGE); + bp_head->b_timestamp = buf_timestamp(); + + if (ISSET(bp_head->b_flags, B_LOCKED)) { + bp_head->b_whichq = BQ_LOCKED; + binstailfree(bp_head, &bufqueues[BQ_LOCKED], BQ_LOCKED); + } else { + bp_head->b_whichq = BQ_META; + binstailfree(bp_head, &bufqueues[BQ_META], BQ_META); + } + } else if (ISSET(bp_head->b_lflags, BL_WAITSHADOW)) { + CLR(bp_head->b_lflags, BL_WAITSHADOW); + + bp_return = bp_head; + } + if (ISSET(bp_head->b_lflags, BL_WANTED_REF)) { + CLR(bp_head->b_lflags, BL_WANTED_REF); + need_wakeup = 1; + } + } + lck_mtx_unlock(buf_mtxp); + + if (need_wakeup) { + wakeup(bp_head); + } + +#ifdef BUF_MAKE_PRIVATE + if (bp == bp_data && data_ref == 0) { + buf_free_meta_store(bp); + } + + bp->b_data_store = NULL; +#endif + KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_END, bp, 0, 0, 0, 0); + + return bp_return; } @@ -1811,34 +2708,42 @@ void buf_brelse(buf_t bp) { struct bqueues *bufq; - long whichq; - upl_t upl; + long whichq; + upl_t upl; int need_wakeup = 0; int need_bp_wakeup = 0; - if (bp->b_whichq != -1 || !(bp->b_lflags & BL_BUSY)) - panic("buf_brelse: bad buffer = %x\n", bp); + if (bp->b_whichq != -1 || !(bp->b_lflags & BL_BUSY)) { + panic("buf_brelse: bad buffer = %p\n", bp); + } #ifdef JOE_DEBUG - bp->b_stackbrelse[0] = __builtin_return_address(0); - bp->b_stackbrelse[1] = __builtin_return_address(1); - bp->b_stackbrelse[2] = __builtin_return_address(2); - bp->b_stackbrelse[3] = __builtin_return_address(3); - bp->b_stackbrelse[4] = __builtin_return_address(4); - bp->b_stackbrelse[5] = __builtin_return_address(5); + (void) OSBacktrace(&bp->b_stackbrelse[0], 6); bp->b_lastbrelse = current_thread(); bp->b_tag = 0; #endif if (bp->b_lflags & BL_IOBUF) { - free_io_buf(bp); + buf_t shadow_master_bp = NULL; + + if (ISSET(bp->b_lflags, BL_SHADOW)) { + shadow_master_bp = buf_brelse_shadow(bp); + } else if (ISSET(bp->b_lflags, BL_IOBUF_ALLOC)) { + buf_free_meta_store(bp); + } + free_io_buf(bp); + + if (shadow_master_bp) { + bp = shadow_master_bp; + goto finish_shadow_master; + } return; } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_START, - bp->b_lblkno * PAGE_SIZE, (int)bp, (int)bp->b_datap, - bp->b_flags, 0); + bp->b_lblkno * PAGE_SIZE, bp, bp->b_datap, + bp->b_flags, 0); trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); @@ -1850,16 +2755,16 @@ buf_brelse(buf_t bp) * the HFS journal code depends on this */ if (ISSET(bp->b_flags, B_META) && ISSET(bp->b_flags, B_INVAL)) { - if (ISSET(bp->b_flags, B_FILTER)) { /* if necessary, call out */ - void (*iodone_func)(struct buf *, void *) = bp->b_iodone; - void *arg = (void *)bp->b_transaction; + if (ISSET(bp->b_flags, B_FILTER)) { /* if necessary, call out */ + void (*iodone_func)(struct buf *, void *) = bp->b_iodone; + void *arg = bp->b_transaction; - CLR(bp->b_flags, B_FILTER); /* but note callout done */ + CLR(bp->b_flags, B_FILTER); /* but note callout done */ bp->b_iodone = NULL; bp->b_transaction = NULL; if (iodone_func == NULL) { - panic("brelse: bp @ 0x%x has NULL b_iodone!\n", bp); + panic("brelse: bp @ %p has NULL b_iodone!\n", bp); } (*iodone_func)(bp, arg); } @@ -1869,118 +2774,176 @@ buf_brelse(buf_t bp) */ upl = bp->b_upl; - if ( !ISSET(bp->b_flags, B_META) && UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) { + if (!ISSET(bp->b_flags, B_META) && UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) { kern_return_t kret; int upl_flags; - if ( (upl == NULL) ) { - if ( !ISSET(bp->b_flags, B_INVAL)) { - kret = ubc_create_upl(bp->b_vp, - ubc_blktooff(bp->b_vp, bp->b_lblkno), - bp->b_bufsize, - &upl, - NULL, - UPL_PRECIOUS); - - if (kret != KERN_SUCCESS) - panic("brelse: Failed to create UPL"); -#ifdef UPL_DEBUG - upl_ubc_alias_set(upl, bp, 5); + if (upl == NULL) { + if (!ISSET(bp->b_flags, B_INVAL)) { + kret = ubc_create_upl_kernel(bp->b_vp, + ubc_blktooff(bp->b_vp, bp->b_lblkno), + bp->b_bufsize, + &upl, + NULL, + UPL_PRECIOUS, + VM_KERN_MEMORY_FILE); + + if (kret != KERN_SUCCESS) { + panic("brelse: Failed to create UPL"); + } +#if UPL_DEBUG + upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 5); #endif /* UPL_DEBUG */ } } else { if (bp->b_datap) { - kret = ubc_upl_unmap(upl); + kret = ubc_upl_unmap(upl); - if (kret != KERN_SUCCESS) - panic("ubc_upl_unmap failed"); + if (kret != KERN_SUCCESS) { + panic("ubc_upl_unmap failed"); + } bp->b_datap = (uintptr_t)NULL; } } if (upl) { if (bp->b_flags & (B_ERROR | B_INVAL)) { - if (bp->b_flags & (B_READ | B_INVAL)) - upl_flags = UPL_ABORT_DUMP_PAGES; - else - upl_flags = 0; + if (bp->b_flags & (B_READ | B_INVAL)) { + upl_flags = UPL_ABORT_DUMP_PAGES; + } else { + upl_flags = 0; + } ubc_upl_abort(upl, upl_flags); } else { - if (ISSET(bp->b_flags, B_DELWRI | B_WASDIRTY)) - upl_flags = UPL_COMMIT_SET_DIRTY ; - else - upl_flags = UPL_COMMIT_CLEAR_DIRTY ; + if (ISSET(bp->b_flags, B_DELWRI | B_WASDIRTY)) { + upl_flags = UPL_COMMIT_SET_DIRTY; + } else { + upl_flags = UPL_COMMIT_CLEAR_DIRTY; + } ubc_upl_commit_range(upl, 0, bp->b_bufsize, upl_flags | - UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY); + UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY); } bp->b_upl = NULL; } } else { - if ( (upl) ) - panic("brelse: UPL set for non VREG; vp=%x", bp->b_vp); - } + if ((upl)) { + panic("brelse: UPL set for non VREG; vp=%p", bp->b_vp); + } + } /* * If it's locked, don't report an error; try again later. */ - if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR)) + if (ISSET(bp->b_flags, (B_LOCKED | B_ERROR)) == (B_LOCKED | B_ERROR)) { CLR(bp->b_flags, B_ERROR); + } /* * If it's not cacheable, or an error, mark it invalid. */ - if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR))) + if (ISSET(bp->b_flags, (B_NOCACHE | B_ERROR))) { SET(bp->b_flags, B_INVAL); - - if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) { + } + + if ((bp->b_bufsize <= 0) || + ISSET(bp->b_flags, B_INVAL) || + (ISSET(bp->b_lflags, BL_WANTDEALLOC) && !ISSET(bp->b_flags, B_DELWRI))) { + boolean_t delayed_buf_free_meta_store = FALSE; + /* - * If it's invalid or empty, dissociate it from its vnode - * and put on the head of the appropriate queue. + * If it's invalid or empty, dissociate it from its vnode, + * release its storage if B_META, and + * clean it up a bit and put it on the EMPTY queue */ - if (bp->b_vp) - brelvp(bp); - - if (ISSET(bp->b_flags, B_DELWRI)) - OSAddAtomic(-1, &nbdwrite); + if (ISSET(bp->b_flags, B_DELWRI)) { + OSAddAtomicLong(-1, &nbdwrite); + } - CLR(bp->b_flags, (B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE)); + if (ISSET(bp->b_flags, B_META)) { + if (bp->b_shadow_ref) { + delayed_buf_free_meta_store = TRUE; + } else { + buf_free_meta_store(bp); + } + } /* - * Determine which queue the buffer should be on, then put it there. + * nuke any credentials we were holding */ - if (bp->b_bufsize <= 0) - whichq = BQ_EMPTY; /* no data */ - else if (ISSET(bp->b_flags, B_META)) - whichq = BQ_META; /* meta-data */ - else - whichq = BQ_AGE; /* invalid data */ - bufq = &bufqueues[whichq]; + buf_release_credentials(bp); - lck_mtx_lock(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); + + if (bp->b_shadow_ref) { + SET(bp->b_lflags, BL_WAITSHADOW); + + lck_mtx_unlock(buf_mtxp); + + return; + } + if (delayed_buf_free_meta_store == TRUE) { + lck_mtx_unlock(buf_mtxp); +finish_shadow_master: + buf_free_meta_store(bp); + + lck_mtx_lock_spin(buf_mtxp); + } + CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA)); + + if (bp->b_vp) { + brelvp_locked(bp); + } - binsheadfree(bp, bufq, whichq); + bremhash(bp); + BLISTNONE(bp); + binshash(bp, &invalhash); + + bp->b_whichq = BQ_EMPTY; + binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY); } else { /* * It has valid data. Put it on the end of the appropriate * queue, so that it'll stick around for as long as possible. */ - if (ISSET(bp->b_flags, B_LOCKED)) - whichq = BQ_LOCKED; /* locked in core */ - else if (ISSET(bp->b_flags, B_META)) - whichq = BQ_META; /* meta-data */ - else if (ISSET(bp->b_flags, B_AGE)) - whichq = BQ_AGE; /* stale but valid data */ - else - whichq = BQ_LRU; /* valid data */ + if (ISSET(bp->b_flags, B_LOCKED)) { + whichq = BQ_LOCKED; /* locked in core */ + } else if (ISSET(bp->b_flags, B_META)) { + whichq = BQ_META; /* meta-data */ + } else if (ISSET(bp->b_flags, B_AGE)) { + whichq = BQ_AGE; /* stale but valid data */ + } else { + whichq = BQ_LRU; /* valid data */ + } bufq = &bufqueues[whichq]; - CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE)); + bp->b_timestamp = buf_timestamp(); - lck_mtx_lock(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); - binstailfree(bp, bufq, whichq); + /* + * the buf_brelse_shadow routine doesn't take 'ownership' + * of the parent buf_t... it updates state that is protected by + * the buf_mtxp, and checks for BL_BUSY to determine whether to + * put the buf_t back on a free list. b_shadow_ref is protected + * by the lock, and since we have not yet cleared B_BUSY, we need + * to check it while holding the lock to insure that one of us + * puts this buf_t back on a free list when it is safe to do so + */ + if (bp->b_shadow_ref == 0) { + CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE)); + bp->b_whichq = whichq; + binstailfree(bp, bufq, whichq); + } else { + /* + * there are still cloned buf_t's pointing + * at this guy... need to keep it off the + * freelists until a buf_brelse is done on + * the last clone + */ + CLR(bp->b_flags, (B_ASYNC | B_NOCACHE)); + } } if (needbuffer) { - /* + /* * needbuffer is a global * we're currently using buf_mtxp to protect it * delay doing the actual wakeup until after @@ -1990,7 +2953,7 @@ buf_brelse(buf_t bp) need_wakeup = 1; } if (ISSET(bp->b_lflags, BL_WANTED)) { - /* + /* * delay the actual wakeup until after we * clear BL_BUSY and we've dropped buf_mtxp */ @@ -2000,23 +2963,24 @@ buf_brelse(buf_t bp) * Unlock the buffer. */ CLR(bp->b_lflags, (BL_BUSY | BL_WANTED)); + buf_busycount--; lck_mtx_unlock(buf_mtxp); if (need_wakeup) { - /* + /* * Wake up any processes waiting for any buffer to become free. */ - wakeup(&needbuffer); + wakeup(&needbuffer); } if (need_bp_wakeup) { - /* + /* * Wake up any proceeses waiting for _this_ buffer to become free. */ - wakeup(bp); + wakeup(bp); } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_END, - (int)bp, (int)bp->b_datap, bp->b_flags, 0, 0); + bp, bp->b_datap, bp->b_flags, 0, 0); } /* @@ -2029,38 +2993,66 @@ buf_brelse(buf_t bp) static boolean_t incore(vnode_t vp, daddr64_t blkno) { - boolean_t retval; + boolean_t retval; + struct bufhashhdr *dp; - lck_mtx_lock(buf_mtxp); + dp = BUFHASH(vp, blkno); + + lck_mtx_lock_spin(buf_mtxp); - if (incore_locked(vp, blkno)) - retval = TRUE; - else - retval = FALSE; + if (incore_locked(vp, blkno, dp)) { + retval = TRUE; + } else { + retval = FALSE; + } lck_mtx_unlock(buf_mtxp); - return (retval); + return retval; } static buf_t -incore_locked(vnode_t vp, daddr64_t blkno) +incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp) { struct buf *bp; - bp = BUFHASH(vp, blkno)->lh_first; - /* Search hash chain */ - for (; bp != NULL; bp = bp->b_hash.le_next) { + for (bp = dp->lh_first; bp != NULL; bp = bp->b_hash.le_next) { if (bp->b_lblkno == blkno && bp->b_vp == vp && !ISSET(bp->b_flags, B_INVAL)) { - return (bp); + return bp; } } - return (0); + return NULL; } +void +buf_wait_for_shadow_io(vnode_t vp, daddr64_t blkno) +{ + buf_t bp; + struct bufhashhdr *dp; + + dp = BUFHASH(vp, blkno); + + lck_mtx_lock_spin(buf_mtxp); + + for (;;) { + if ((bp = incore_locked(vp, blkno, dp)) == NULL) { + break; + } + + if (bp->b_shadow_ref == 0) { + break; + } + + SET(bp->b_lflags, BL_WANTED_REF); + + (void) msleep(bp, buf_mtxp, PSPIN | (PRIBIO + 1), "buf_wait_for_shadow", NULL); + } + lck_mtx_unlock(buf_mtxp); +} + /* XXX FIXME -- Update the comment to reflect the UBC changes (please) -- */ /* * Get a block of requested size that is associated with @@ -2081,16 +3073,18 @@ buf_getblk(vnode_t vp, daddr64_t blkno, int size, int slpflag, int slptimeo, int int ret_only_valid; struct timespec ts; int upl_flags; + struct bufhashhdr *dp; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_START, - (int)(blkno * PAGE_SIZE), size, operation, 0, 0); + (uintptr_t)(blkno * PAGE_SIZE), size, operation, 0, 0); ret_only_valid = operation & BLK_ONLYVALID; operation &= ~BLK_ONLYVALID; + dp = BUFHASH(vp, blkno); start: - lck_mtx_lock(buf_mtxp); -start_locked: - if ((bp = incore_locked(vp, blkno))) { + lck_mtx_lock_spin(buf_mtxp); + + if ((bp = incore_locked(vp, blkno, dp))) { /* * Found in the Buffer Cache */ @@ -2107,50 +3101,92 @@ start_locked: /* * don't retake the mutex after being awakened... - * the time out is in msecs + * the time out is in msecs */ - ts.tv_sec = (slptimeo/1000); + ts.tv_sec = (slptimeo / 1000); ts.tv_nsec = (slptimeo % 1000) * 10 * NSEC_PER_USEC * 1000; + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 396)) | DBG_FUNC_NONE, + (uintptr_t)blkno, size, operation, 0, 0); + err = msleep(bp, buf_mtxp, slpflag | PDROP | (PRIBIO + 1), "buf_getblk", &ts); /* * Callers who call with PCATCH or timeout are * willing to deal with the NULL pointer */ - if (err && ((slpflag & PCATCH) || ((err == EWOULDBLOCK) && slptimeo))) - return (NULL); + if (err && ((slpflag & PCATCH) || ((err == EWOULDBLOCK) && slptimeo))) { + return NULL; + } goto start; - /*NOTREACHED*/ - break; + /*NOTREACHED*/ default: - /* + /* * unknown operation requested */ panic("getblk: paging or unknown operation for incore busy buffer - %x\n", operation); /*NOTREACHED*/ break; - } + } } else { + int clear_bdone; + /* * buffer in core and not busy */ - if ( (bp->b_upl) ) - panic("buffer has UPL, but not marked BUSY: %x", bp); SET(bp->b_lflags, BL_BUSY); SET(bp->b_flags, B_CACHE); + buf_busycount++; + + bremfree_locked(bp); + bufstats.bufs_incore++; + + lck_mtx_unlock(buf_mtxp); #ifdef JOE_DEBUG bp->b_owner = current_thread(); bp->b_tag = 1; #endif - bremfree_locked(bp); - bufstats.bufs_incore++; - - lck_mtx_unlock(buf_mtxp); + if ((bp->b_upl)) { + panic("buffer has UPL, but not marked BUSY: %p", bp); + } + + clear_bdone = FALSE; + if (!ret_only_valid) { + /* + * If the number bytes that are valid is going + * to increase (even if we end up not doing a + * reallocation through allocbuf) we have to read + * the new size first. + * + * This is required in cases where we doing a read + * modify write of a already valid data on disk but + * in cases where the data on disk beyond (blkno + b_bcount) + * is invalid, we may end up doing extra I/O. + */ + if (operation == BLK_META && bp->b_bcount < size) { + /* + * Since we are going to read in the whole size first + * we first have to ensure that any pending delayed write + * is flushed to disk first. + */ + if (ISSET(bp->b_flags, B_DELWRI)) { + CLR(bp->b_flags, B_CACHE); + buf_bwrite(bp); + goto start; + } + /* + * clear B_DONE before returning from + * this function so that the caller can + * can issue a read for the new size. + */ + clear_bdone = TRUE; + } - if ( !ret_only_valid) - allocbuf(bp, size); + if (bp->b_bufsize != size) { + allocbuf(bp, size); + } + } upl_flags = 0; switch (operation) { @@ -2163,37 +3199,42 @@ start_locked: upl_flags |= UPL_WILL_MODIFY; case BLK_READ: upl_flags |= UPL_PRECIOUS; - if (UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) { - kret = ubc_create_upl(vp, - ubc_blktooff(vp, bp->b_lblkno), - bp->b_bufsize, - &upl, - &pl, - upl_flags); - if (kret != KERN_SUCCESS) - panic("Failed to create UPL"); + if (UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) { + kret = ubc_create_upl_kernel(vp, + ubc_blktooff(vp, bp->b_lblkno), + bp->b_bufsize, + &upl, + &pl, + upl_flags, + VM_KERN_MEMORY_FILE); + if (kret != KERN_SUCCESS) { + panic("Failed to create UPL"); + } bp->b_upl = upl; if (upl_valid_page(pl, 0)) { - if (upl_dirty_page(pl, 0)) - SET(bp->b_flags, B_WASDIRTY); - else - CLR(bp->b_flags, B_WASDIRTY); - } else - CLR(bp->b_flags, (B_DONE | B_CACHE | B_WASDIRTY | B_DELWRI)); + if (upl_dirty_page(pl, 0)) { + SET(bp->b_flags, B_WASDIRTY); + } else { + CLR(bp->b_flags, B_WASDIRTY); + } + } else { + CLR(bp->b_flags, (B_DONE | B_CACHE | B_WASDIRTY | B_DELWRI)); + } - kret = ubc_upl_map(upl, (vm_address_t *)&(bp->b_datap)); + kret = ubc_upl_map(upl, (vm_offset_t*)&(bp->b_datap)); - if (kret != KERN_SUCCESS) - panic("getblk: ubc_upl_map() failed with (%d)", kret); + if (kret != KERN_SUCCESS) { + panic("getblk: ubc_upl_map() failed with (%d)", kret); + } } break; case BLK_META: /* * VM is not involved in IO for the meta data - * buffer already has valid data + * buffer already has valid data */ break; @@ -2202,20 +3243,25 @@ start_locked: /*NOTREACHED*/ break; } + + if (clear_bdone) { + CLR(bp->b_flags, B_DONE); + } } } else { /* not incore() */ int queue = BQ_EMPTY; /* Start with no preference */ - + if (ret_only_valid) { lck_mtx_unlock(buf_mtxp); - return (NULL); + return NULL; } - - if ((UBCINVALID(vp)) || !(UBCINFOEXISTS(vp))) + if ((vnode_isreg(vp) == 0) || (UBCINFOEXISTS(vp) == 0) /*|| (vnode_issystem(vp) == 1)*/) { operation = BLK_META; + } - if ((bp = getnewbuf(slpflag, slptimeo, &queue)) == NULL) - goto start_locked; + if ((bp = getnewbuf(slpflag, slptimeo, &queue)) == NULL) { + goto start; + } /* * getnewbuf may block for a number of different reasons... @@ -2224,7 +3270,7 @@ start_locked: * the hash... if we see it incore at this point we dump * the buffer we were working on and start over */ - if (incore_locked(vp, blkno)) { + if (incore_locked(vp, blkno, dp)) { SET(bp->b_flags, B_INVAL); binshash(bp, &invalhash); @@ -2242,20 +3288,21 @@ start_locked: * mark the buffer as B_META if indicated * so that when buffer is released it will goto META queue */ - if (operation == BLK_META) - SET(bp->b_flags, B_META); + if (operation == BLK_META) { + SET(bp->b_flags, B_META); + } bp->b_blkno = bp->b_lblkno = blkno; bp->b_vp = vp; /* - * Insert in the hash so that incore() can find it + * Insert in the hash so that incore() can find it */ - binshash(bp, BUFHASH(vp, blkno)); + binshash(bp, BUFHASH(vp, blkno)); - lck_mtx_unlock(buf_mtxp); + bgetvp_locked(vp, bp); - bgetvp(vp, bp); + lck_mtx_unlock(buf_mtxp); allocbuf(bp, size); @@ -2271,7 +3318,7 @@ start_locked: * in bufstats are protected with either * buf_mtxp or iobuffer_mtxp */ - OSAddAtomic(1, &bufstats.bufs_miss); + OSAddAtomicLong(1, &bufstats.bufs_miss); break; case BLK_WRITE: @@ -2282,113 +3329,134 @@ start_locked: */ upl_flags |= UPL_WILL_MODIFY; case BLK_READ: - { off_t f_offset; - size_t contig_bytes; - int bmap_flags; - - if ( (bp->b_upl) ) - panic("bp already has UPL: %x",bp); - - f_offset = ubc_blktooff(vp, blkno); - - upl_flags |= UPL_PRECIOUS; - kret = ubc_create_upl(vp, - f_offset, - bp->b_bufsize, - &upl, - &pl, - upl_flags); - - if (kret != KERN_SUCCESS) - panic("Failed to create UPL"); -#ifdef UPL_DEBUG - upl_ubc_alias_set(upl, bp, 4); -#endif /* UPL_DEBUG */ - bp->b_upl = upl; - - if (upl_valid_page(pl, 0)) { - - if (operation == BLK_READ) - bmap_flags = VNODE_READ; - else - bmap_flags = VNODE_WRITE; - - SET(bp->b_flags, B_CACHE | B_DONE); - - OSAddAtomic(1, &bufstats.bufs_vmhits); - - bp->b_validoff = 0; - bp->b_dirtyoff = 0; - - if (upl_dirty_page(pl, 0)) { - /* page is dirty */ - SET(bp->b_flags, B_WASDIRTY); + { off_t f_offset; + size_t contig_bytes; + int bmap_flags; - bp->b_validend = bp->b_bcount; - bp->b_dirtyend = bp->b_bcount; - } else { - /* page is clean */ - bp->b_validend = bp->b_bcount; - bp->b_dirtyend = 0; - } - /* - * try to recreate the physical block number associated with - * this buffer... - */ - if (VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL)) - panic("getblk: VNOP_BLOCKMAP failed"); - /* - * if the extent represented by this buffer - * is not completely physically contiguous on - * disk, than we can't cache the physical mapping - * in the buffer header - */ - if ((long)contig_bytes < bp->b_bcount) - bp->b_blkno = bp->b_lblkno; - } else { - OSAddAtomic(1, &bufstats.bufs_miss); - } - kret = ubc_upl_map(upl, (vm_address_t *)&(bp->b_datap)); +#if DEVELOPMENT || DEBUG + /* + * Apple implemented file systems use UBC excludively; they should + * not call in here." + */ + const char* excldfs[] = {"hfs", "afpfs", "smbfs", "acfs", + "exfat", "msdos", "webdav", NULL}; + + for (int i = 0; excldfs[i] != NULL; i++) { + if (vp->v_mount && + !strcmp(vp->v_mount->mnt_vfsstat.f_fstypename, + excldfs[i])) { + panic("%s %s calls buf_getblk", + excldfs[i], + operation == BLK_READ ? "BLK_READ" : "BLK_WRITE"); + } + } +#endif - if (kret != KERN_SUCCESS) - panic("getblk: ubc_upl_map() failed with (%d)", kret); - break; - } + if ((bp->b_upl)) { + panic("bp already has UPL: %p", bp); + } + + f_offset = ubc_blktooff(vp, blkno); + + upl_flags |= UPL_PRECIOUS; + kret = ubc_create_upl_kernel(vp, + f_offset, + bp->b_bufsize, + &upl, + &pl, + upl_flags, + VM_KERN_MEMORY_FILE); + + if (kret != KERN_SUCCESS) { + panic("Failed to create UPL"); + } +#if UPL_DEBUG + upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 4); +#endif /* UPL_DEBUG */ + bp->b_upl = upl; + + if (upl_valid_page(pl, 0)) { + if (operation == BLK_READ) { + bmap_flags = VNODE_READ; + } else { + bmap_flags = VNODE_WRITE; + } + + SET(bp->b_flags, B_CACHE | B_DONE); + + OSAddAtomicLong(1, &bufstats.bufs_vmhits); + + bp->b_validoff = 0; + bp->b_dirtyoff = 0; + + if (upl_dirty_page(pl, 0)) { + /* page is dirty */ + SET(bp->b_flags, B_WASDIRTY); + + bp->b_validend = bp->b_bcount; + bp->b_dirtyend = bp->b_bcount; + } else { + /* page is clean */ + bp->b_validend = bp->b_bcount; + bp->b_dirtyend = 0; + } + /* + * try to recreate the physical block number associated with + * this buffer... + */ + if (VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL)) { + panic("getblk: VNOP_BLOCKMAP failed"); + } + /* + * if the extent represented by this buffer + * is not completely physically contiguous on + * disk, than we can't cache the physical mapping + * in the buffer header + */ + if ((long)contig_bytes < bp->b_bcount) { + bp->b_blkno = bp->b_lblkno; + } + } else { + OSAddAtomicLong(1, &bufstats.bufs_miss); + } + kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap)); + + if (kret != KERN_SUCCESS) { + panic("getblk: ubc_upl_map() failed with (%d)", kret); + } + break;} // end BLK_READ default: panic("getblk: paging or unknown operation - %x", operation); /*NOTREACHED*/ break; - } - } + } // end switch + } //end buf_t !incore + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_END, - (int)bp, (int)bp->b_datap, bp->b_flags, 3, 0); + bp, bp->b_datap, bp->b_flags, 3, 0); #ifdef JOE_DEBUG - bp->b_stackgetblk[0] = __builtin_return_address(0); - bp->b_stackgetblk[1] = __builtin_return_address(1); - bp->b_stackgetblk[2] = __builtin_return_address(2); - bp->b_stackgetblk[3] = __builtin_return_address(3); - bp->b_stackgetblk[4] = __builtin_return_address(4); - bp->b_stackgetblk[5] = __builtin_return_address(5); + (void) OSBacktrace(&bp->b_stackgetblk[0], 6); #endif - return (bp); + return bp; } /* * Get an empty, disassociated buffer of given size. */ buf_t -buf_geteblk(size) - int size; +buf_geteblk(int size) { - buf_t bp; + buf_t bp = NULL; int queue = BQ_EMPTY; - lck_mtx_lock(buf_mtxp); + do { + lck_mtx_lock_spin(buf_mtxp); - while ((bp = getnewbuf(0, 0, &queue)) == 0) - ; - SET(bp->b_flags, (B_META|B_INVAL)); + bp = getnewbuf(0, 0, &queue); + } while (bp == NULL); + + SET(bp->b_flags, (B_META | B_INVAL)); #if DIAGNOSTIC assert(queue == BQ_EMPTY); @@ -2402,67 +3470,105 @@ buf_geteblk(size) allocbuf(bp, size); - return (bp); + return bp; } -/* - * Zones for the meta data buffers - */ +uint32_t +buf_redundancy_flags(buf_t bp) +{ + return bp->b_redundancy_flags; +} -#define MINMETA 512 -#define MAXMETA 4096 +void +buf_set_redundancy_flags(buf_t bp, uint32_t flags) +{ + SET(bp->b_redundancy_flags, flags); +} -struct meta_zone_entry { - zone_t mz_zone; - vm_size_t mz_size; - vm_size_t mz_max; - char *mz_name; -}; +void +buf_clear_redundancy_flags(buf_t bp, uint32_t flags) +{ + CLR(bp->b_redundancy_flags, flags); +} -struct meta_zone_entry meta_zones[] = { - {NULL, (MINMETA * 1), 128 * (MINMETA * 1), "buf.512" }, - {NULL, (MINMETA * 2), 64 * (MINMETA * 2), "buf.1024" }, - {NULL, (MINMETA * 4), 16 * (MINMETA * 4), "buf.2048" }, - {NULL, (MINMETA * 8), 512 * (MINMETA * 8), "buf.4096" }, - {NULL, 0, 0, "" } /* End */ -}; -/* - * Initialize the meta data zones - */ -static void -bufzoneinit(void) + +static void * +recycle_buf_from_pool(int nsize) { - int i; + buf_t bp; + void *ptr = NULL; - for (i = 0; meta_zones[i].mz_size != 0; i++) { - meta_zones[i].mz_zone = - zinit(meta_zones[i].mz_size, - meta_zones[i].mz_max, - PAGE_SIZE, - meta_zones[i].mz_name); + lck_mtx_lock_spin(buf_mtxp); + + TAILQ_FOREACH(bp, &bufqueues[BQ_META], b_freelist) { + if (ISSET(bp->b_flags, B_DELWRI) || bp->b_bufsize != nsize) { + continue; + } + ptr = (void *)bp->b_datap; + bp->b_bufsize = 0; + + bcleanbuf(bp, TRUE); + break; } - buf_hdr_zone = zinit(sizeof(struct buf), 32, PAGE_SIZE, "buf headers"); + lck_mtx_unlock(buf_mtxp); + + return ptr; } -static __inline__ zone_t -getbufzone(size_t size) + + +int zalloc_nopagewait_failed = 0; +int recycle_buf_failed = 0; + +static void * +grab_memory_for_meta_buf(int nsize) { - int i; + zone_t z; + void *ptr; + boolean_t was_vmpriv; - if ((size % 512) || (size < MINMETA) || (size > MAXMETA)) - panic("getbufzone: incorect size = %d", size); + z = getbufzone(nsize); - for (i = 0; meta_zones[i].mz_size != 0; i++) { - if (meta_zones[i].mz_size >= size) - break; + /* + * make sure we're NOT priviliged so that + * if a vm_page_grab is needed, it won't + * block if we're out of free pages... if + * it blocks, then we can't honor the + * nopagewait request + */ + was_vmpriv = set_vm_privilege(FALSE); + + ptr = zalloc_nopagewait(z); + + if (was_vmpriv == TRUE) { + set_vm_privilege(TRUE); } - return (meta_zones[i].mz_zone); + if (ptr == NULL) { + zalloc_nopagewait_failed++; + + ptr = recycle_buf_from_pool(nsize); + + if (ptr == NULL) { + recycle_buf_failed++; + + if (was_vmpriv == FALSE) { + set_vm_privilege(TRUE); + } + + ptr = zalloc(z); + + if (was_vmpriv == FALSE) { + set_vm_privilege(FALSE); + } + } + } + return ptr; } /* - * With UBC, there is no need to expand / shrink the file data + * With UBC, there is no need to expand / shrink the file data * buffer. The VM uses the same pages, hence no waste. * All the file data buffers can have one size. * In fact expand / shrink would be an expensive operation. @@ -2480,45 +3586,48 @@ allocbuf(buf_t bp, int size) desired_size = roundup(size, CLBYTES); - if (desired_size < PAGE_SIZE) + if (desired_size < PAGE_SIZE) { desired_size = PAGE_SIZE; - if (desired_size > MAXBSIZE) + } + if (desired_size > MAXBSIZE) { panic("allocbuf: buffer larger than MAXBSIZE requested"); + } if (ISSET(bp->b_flags, B_META)) { - zone_t zprev, z; int nsize = roundup(size, MINMETA); if (bp->b_datap) { vm_offset_t elem = (vm_offset_t)bp->b_datap; if (ISSET(bp->b_flags, B_ZALLOC)) { - if (bp->b_bufsize < nsize) { - /* reallocate to a bigger size */ + if (bp->b_bufsize < nsize) { + zone_t zprev; + + /* reallocate to a bigger size */ - zprev = getbufzone(bp->b_bufsize); + zprev = getbufzone(bp->b_bufsize); if (nsize <= MAXMETA) { - desired_size = nsize; - z = getbufzone(nsize); - bp->b_datap = (uintptr_t)zalloc(z); + desired_size = nsize; + + /* b_datap not really a ptr */ + *(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize); } else { - bp->b_datap = (uintptr_t)NULL; - kmem_alloc_wired(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size); + bp->b_datap = (uintptr_t)NULL; + kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE); CLR(bp->b_flags, B_ZALLOC); } bcopy((void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize); - zfree(zprev, (void *)elem); + zfree(zprev, elem); } else { - desired_size = bp->b_bufsize; + desired_size = bp->b_bufsize; } - } else { if ((vm_size_t)bp->b_bufsize < desired_size) { /* reallocate to a bigger size */ - bp->b_datap = (uintptr_t)NULL; - kmem_alloc_wired(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size); + bp->b_datap = (uintptr_t)NULL; + kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE); bcopy((const void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize); - kmem_free(kernel_map, elem, bp->b_bufsize); + kmem_free(kernel_map, elem, bp->b_bufsize); } else { desired_size = bp->b_bufsize; } @@ -2527,17 +3636,23 @@ allocbuf(buf_t bp, int size) /* new allocation */ if (nsize <= MAXMETA) { desired_size = nsize; - z = getbufzone(nsize); - bp->b_datap = (uintptr_t)zalloc(z); + + /* b_datap not really a ptr */ + *(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize); SET(bp->b_flags, B_ZALLOC); - } else - kmem_alloc_wired(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size); + } else { + kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE); + } + } + + if (bp->b_datap == 0) { + panic("allocbuf: NULL b_datap"); } } bp->b_bufsize = desired_size; bp->b_bcount = size; - return (0); + return 0; } /* @@ -2558,46 +3673,51 @@ allocbuf(buf_t bp, int size) * on which it was found. * * buf_mtxp is held upon entry - * returns with buf_mtxp locked + * returns with buf_mtxp locked if new buf available + * returns with buf_mtxp UNlocked if new buf NOT available */ static buf_t getnewbuf(int slpflag, int slptimeo, int * queue) { - buf_t bp; - buf_t lru_bp; - buf_t age_bp; - buf_t meta_bp; - int age_time, lru_time, bp_time, meta_time; - int req = *queue; /* save it for restarts */ + buf_t bp; + buf_t lru_bp; + buf_t age_bp; + buf_t meta_bp; + int age_time, lru_time, bp_time, meta_time; + int req = *queue; /* save it for restarts */ struct timespec ts; start: /* * invalid request gets empty queue */ - if ((*queue > BQUEUES) || (*queue < 0) - || (*queue == BQ_LAUNDRY) || (*queue == BQ_LOCKED)) + if ((*queue >= BQUEUES) || (*queue < 0) + || (*queue == BQ_LAUNDRY) || (*queue == BQ_LOCKED)) { *queue = BQ_EMPTY; - /* need to grow number of bufs, add another one rather than recycling */ - if (nbuf < max_nbuf_headers) { + } + + + if (*queue == BQ_EMPTY && (bp = bufqueues[*queue].tqh_first)) { + goto found; + } + + /* + * need to grow number of bufs, add another one rather than recycling + */ + if (nbuf_headers < max_nbuf_headers) { /* - * Increment count now as lock + * Increment count now as lock * is dropped for allocation. * That avoids over commits */ - nbuf++; + nbuf_headers++; goto add_newbufs; } - - /* - * (*queue == BQUEUES) means no preference - */ - if (*queue != BQUEUES) { - /* Try for the requested queue first */ - bp = bufqueues[*queue].tqh_first; - if (bp) - goto found; + /* Try for the requested queue first */ + bp = bufqueues[*queue].tqh_first; + if (bp) { + goto found; } /* Unable to use requested queue */ @@ -2617,7 +3737,7 @@ start: } /* * We have seen is this is hard to trigger. - * This is an overcommit of nbufs but needed + * This is an overcommit of nbufs but needed * in some scenarios with diskiamges */ @@ -2626,32 +3746,37 @@ add_newbufs: /* Create a new temporary buffer header */ bp = (struct buf *)zalloc(buf_hdr_zone); - - lck_mtx_lock(buf_mtxp); if (bp) { bufhdrinit(bp); + bp->b_whichq = BQ_EMPTY; + bp->b_timestamp = buf_timestamp(); BLISTNONE(bp); - binshash(bp, &invalhash); SET(bp->b_flags, B_HDRALLOC); *queue = BQ_EMPTY; + } + lck_mtx_lock_spin(buf_mtxp); + + if (bp) { + binshash(bp, &invalhash); binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY); buf_hdr_count++; goto found; } /* subtract already accounted bufcount */ - nbuf--; + nbuf_headers--; bufstats.bufs_sleeps++; /* wait for a free buffer of any kind */ needbuffer = 1; /* hz value is 100 */ - ts.tv_sec = (slptimeo/1000); + ts.tv_sec = (slptimeo / 1000); /* the hz value is 100; which leads to 10ms */ ts.tv_nsec = (slptimeo % 1000) * NSEC_PER_USEC * 1000 * 10; - msleep(&needbuffer, buf_mtxp, slpflag|(PRIBIO+1), (char *)"getnewbuf", &ts); - return (0); + + msleep(&needbuffer, buf_mtxp, slpflag | PDROP | (PRIBIO + 1), "getnewbuf", &ts); + return NULL; } /* Buffer available either on AGE or LRU or META */ @@ -2666,7 +3791,7 @@ add_newbufs: bp = age_bp; *queue = BQ_AGE; } else { /* buffer available on both AGE and LRU */ - int t = buf_timestamp(); + int t = buf_timestamp(); age_time = t - age_bp->b_timestamp; lru_time = t - lru_bp->b_timestamp; @@ -2691,8 +3816,8 @@ add_newbufs: if (!bp) { /* Neither on AGE nor on LRU */ bp = meta_bp; *queue = BQ_META; - } else if (meta_bp) { - int t = buf_timestamp(); + } else if (meta_bp) { + int t = buf_timestamp(); bp_time = t - bp->b_timestamp; meta_time = t - meta_bp->b_timestamp; @@ -2700,49 +3825,48 @@ add_newbufs: if (!(bp_time < 0) && !(meta_time < 0)) { /* time not set backwards */ int bp_is_stale; - bp_is_stale = (*queue == BQ_LRU) ? - lru_is_stale : age_is_stale; + bp_is_stale = (*queue == BQ_LRU) ? + lru_is_stale : age_is_stale; - if ((meta_time >= meta_is_stale) && - (bp_time < bp_is_stale)) { + if ((meta_time >= meta_is_stale) && + (bp_time < bp_is_stale)) { bp = meta_bp; *queue = BQ_META; } } } found: - if (ISSET(bp->b_flags, B_LOCKED) || ISSET(bp->b_lflags, BL_BUSY)) - panic("getnewbuf: bp @ 0x%x is LOCKED or BUSY! (flags 0x%x)\n", bp, bp->b_flags); + if (ISSET(bp->b_flags, B_LOCKED) || ISSET(bp->b_lflags, BL_BUSY)) { + panic("getnewbuf: bp @ %p is LOCKED or BUSY! (flags 0x%x)\n", bp, bp->b_flags); + } /* Clean it */ - if (bcleanbuf(bp)) { + if (bcleanbuf(bp, FALSE)) { /* * moved to the laundry thread, buffer not ready */ *queue = req; goto start; } - return (bp); + return bp; } -/* +/* * Clean a buffer. - * Returns 0 is buffer is ready to use, - * Returns 1 if issued a buf_bawrite() to indicate + * Returns 0 if buffer is ready to use, + * Returns 1 if issued a buf_bawrite() to indicate * that the buffer is not ready. - * + * * buf_mtxp is held upon entry * returns with buf_mtxp locked */ -static int -bcleanbuf(buf_t bp) +int +bcleanbuf(buf_t bp, boolean_t discard) { /* Remove from the queue */ bremfree_locked(bp); - /* Buffer is no longer on free lists. */ - SET(bp->b_lflags, BL_BUSY); #ifdef JOE_DEBUG bp->b_owner = current_thread(); bp->b_tag = 2; @@ -2752,81 +3876,96 @@ bcleanbuf(buf_t bp) * it on the LAUNDRY queue, and return 1 */ if (ISSET(bp->b_flags, B_DELWRI)) { - binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY); - blaundrycnt++; + if (discard) { + SET(bp->b_lflags, BL_WANTDEALLOC); + } + + bmovelaundry(bp); lck_mtx_unlock(buf_mtxp); - wakeup(&blaundrycnt); - /* and give it a chance to run */ + wakeup(&bufqueues[BQ_LAUNDRY]); + /* + * and give it a chance to run + */ (void)thread_block(THREAD_CONTINUE_NULL); - lck_mtx_lock(buf_mtxp); - return (1); + lck_mtx_lock_spin(buf_mtxp); + + return 1; } - bremhash(bp); +#ifdef JOE_DEBUG + bp->b_owner = current_thread(); + bp->b_tag = 8; +#endif + /* + * Buffer is no longer on any free list... we own it + */ + SET(bp->b_lflags, BL_BUSY); + buf_busycount++; - lck_mtx_unlock(buf_mtxp); + bremhash(bp); - BLISTNONE(bp); /* * disassociate us from our vnode, if we had one... */ - if (bp->b_vp) - brelvp(bp); - - if (ISSET(bp->b_flags, B_META)) { - vm_offset_t elem; + if (bp->b_vp) { + brelvp_locked(bp); + } - elem = (vm_offset_t)bp->b_datap; - bp->b_datap = (uintptr_t)0xdeadbeef; + lck_mtx_unlock(buf_mtxp); - if (ISSET(bp->b_flags, B_ZALLOC)) { - zone_t z; + BLISTNONE(bp); - z = getbufzone(bp->b_bufsize); - zfree(z, (void *)elem); - } else - kmem_free(kernel_map, elem, bp->b_bufsize); + if (ISSET(bp->b_flags, B_META)) { + buf_free_meta_store(bp); } trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); - /* clear out various other fields */ - bp->b_bufsize = 0; - bp->b_datap = (uintptr_t)NULL; - bp->b_upl = (void *)NULL; - /* - * preserve the state of whether this buffer - * was allocated on the fly or not... - * the only other flag that should be set at - * this point is BL_BUSY... - */ + buf_release_credentials(bp); + + /* If discarding, just move to the empty queue */ + if (discard) { + lck_mtx_lock_spin(buf_mtxp); + CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA)); + bp->b_whichq = BQ_EMPTY; + binshash(bp, &invalhash); + binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY); + CLR(bp->b_lflags, BL_BUSY); + buf_busycount--; + } else { + /* Not discarding: clean up and prepare for reuse */ + bp->b_bufsize = 0; + bp->b_datap = (uintptr_t)NULL; + bp->b_upl = (void *)NULL; + bp->b_fsprivate = (void *)NULL; + /* + * preserve the state of whether this buffer + * was allocated on the fly or not... + * the only other flag that should be set at + * this point is BL_BUSY... + */ #ifdef JOE_DEBUG - bp->b_owner = current_thread(); - bp->b_tag = 3; + bp->b_owner = current_thread(); + bp->b_tag = 3; #endif - bp->b_lflags = BL_BUSY; - bp->b_flags = (bp->b_flags & B_HDRALLOC); - bp->b_dev = NODEV; - bp->b_blkno = bp->b_lblkno = 0; - bp->b_iodone = NULL; - bp->b_error = 0; - bp->b_resid = 0; - bp->b_bcount = 0; - bp->b_dirtyoff = bp->b_dirtyend = 0; - bp->b_validoff = bp->b_validend = 0; - - /* nuke any credentials we were holding */ - if (IS_VALID_CRED(bp->b_rcred)) { - kauth_cred_unref(&bp->b_rcred); - } - if (IS_VALID_CRED(bp->b_wcred)) { - kauth_cred_unref(&bp->b_wcred); + bp->b_lflags = BL_BUSY; + bp->b_flags = (bp->b_flags & B_HDRALLOC); + bp->b_redundancy_flags = 0; + bp->b_dev = NODEV; + bp->b_blkno = bp->b_lblkno = 0; + bp->b_iodone = NULL; + bp->b_error = 0; + bp->b_resid = 0; + bp->b_bcount = 0; + bp->b_dirtyoff = bp->b_dirtyend = 0; + bp->b_validoff = bp->b_validend = 0; + bzero(&bp->b_attr, sizeof(struct bufattr)); + + lck_mtx_lock_spin(buf_mtxp); } - lck_mtx_lock(buf_mtxp); - - return (0); + return 0; } @@ -2834,31 +3973,37 @@ bcleanbuf(buf_t bp) errno_t buf_invalblkno(vnode_t vp, daddr64_t lblkno, int flags) { - buf_t bp; - errno_t error; + buf_t bp; + errno_t error; + struct bufhashhdr *dp; - lck_mtx_lock(buf_mtxp); -relook: - if ((bp = incore_locked(vp, lblkno)) == (struct buf *)0) { - lck_mtx_unlock(buf_mtxp); - return (0); + dp = BUFHASH(vp, lblkno); + +relook: + lck_mtx_lock_spin(buf_mtxp); + + if ((bp = incore_locked(vp, lblkno, dp)) == (struct buf *)0) { + lck_mtx_unlock(buf_mtxp); + return 0; } if (ISSET(bp->b_lflags, BL_BUSY)) { - if ( !ISSET(flags, BUF_WAIT)) { - lck_mtx_unlock(buf_mtxp); - return (EBUSY); + if (!ISSET(flags, BUF_WAIT)) { + lck_mtx_unlock(buf_mtxp); + return EBUSY; } - SET(bp->b_lflags, BL_WANTED); + SET(bp->b_lflags, BL_WANTED); - error = msleep((caddr_t)bp, buf_mtxp, (PRIBIO + 1), (char *)"buf_invalblkno", 0); + error = msleep((caddr_t)bp, buf_mtxp, PDROP | (PRIBIO + 1), "buf_invalblkno", NULL); - if (error) - return (error); + if (error) { + return error; + } goto relook; } bremfree_locked(bp); SET(bp->b_lflags, BL_BUSY); SET(bp->b_flags, B_INVAL); + buf_busycount++; #ifdef JOE_DEBUG bp->b_owner = current_thread(); bp->b_tag = 4; @@ -2866,51 +4011,57 @@ relook: lck_mtx_unlock(buf_mtxp); buf_brelse(bp); - return (0); + return 0; } void buf_drop(buf_t bp) { - int need_wakeup = 0; + int need_wakeup = 0; - lck_mtx_lock(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); if (ISSET(bp->b_lflags, BL_WANTED)) { - /* + /* * delay the actual wakeup until after we * clear BL_BUSY and we've dropped buf_mtxp */ need_wakeup = 1; } +#ifdef JOE_DEBUG + bp->b_owner = current_thread(); + bp->b_tag = 9; +#endif /* * Unlock the buffer. */ CLR(bp->b_lflags, (BL_BUSY | BL_WANTED)); + buf_busycount--; lck_mtx_unlock(buf_mtxp); if (need_wakeup) { - /* + /* * Wake up any proceeses waiting for _this_ buffer to become free. */ - wakeup(bp); + wakeup(bp); } } errno_t -buf_acquire(buf_t bp, int flags, int slpflag, int slptimeo) { - errno_t error; +buf_acquire(buf_t bp, int flags, int slpflag, int slptimeo) +{ + errno_t error; - lck_mtx_lock(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); error = buf_acquire_locked(bp, flags, slpflag, slptimeo); - lck_mtx_unlock(buf_mtxp); + lck_mtx_unlock(buf_mtxp); - return (error); + return error; } @@ -2921,39 +4072,46 @@ buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo) struct timespec ts; if (ISSET(bp->b_flags, B_LOCKED)) { - if ((flags & BAC_SKIP_LOCKED)) - return (EDEADLK); + if ((flags & BAC_SKIP_LOCKED)) { + return EDEADLK; + } } else { - if ((flags & BAC_SKIP_NONLOCKED)) - return (EDEADLK); + if ((flags & BAC_SKIP_NONLOCKED)) { + return EDEADLK; + } } - if (ISSET(bp->b_lflags, BL_BUSY)) { - /* - * since the mutex_lock may block, the buffer + if (ISSET(bp->b_lflags, BL_BUSY)) { + /* + * since the lck_mtx_lock may block, the buffer * may become BUSY, so we need to * recheck for a NOWAIT request */ - if (flags & BAC_NOWAIT) - return (EBUSY); - SET(bp->b_lflags, BL_WANTED); + if (flags & BAC_NOWAIT) { + return EBUSY; + } + SET(bp->b_lflags, BL_WANTED); /* the hz value is 100; which leads to 10ms */ - ts.tv_sec = (slptimeo/100); + ts.tv_sec = (slptimeo / 100); ts.tv_nsec = (slptimeo % 100) * 10 * NSEC_PER_USEC * 1000; - error = msleep((caddr_t)bp, buf_mtxp, slpflag | (PRIBIO + 1), (char *)"buf_acquire", &ts); + error = msleep((caddr_t)bp, buf_mtxp, slpflag | (PRIBIO + 1), "buf_acquire", &ts); - if (error) - return (error); - return (EAGAIN); + if (error) { + return error; + } + return EAGAIN; + } + if (flags & BAC_REMOVE) { + bremfree_locked(bp); } - if (flags & BAC_REMOVE) - bremfree_locked(bp); SET(bp->b_lflags, BL_BUSY); + buf_busycount++; + #ifdef JOE_DEBUG bp->b_owner = current_thread(); bp->b_tag = 5; #endif - return (0); + return 0; } @@ -2964,23 +4122,29 @@ buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo) errno_t buf_biowait(buf_t bp) { - lck_mtx_lock(buf_mtxp); - - while (!ISSET(bp->b_flags, B_DONE)) - (void) msleep(bp, buf_mtxp, (PRIBIO+1), (char *)"buf_biowait", 0); + while (!ISSET(bp->b_flags, B_DONE)) { + lck_mtx_lock_spin(buf_mtxp); - lck_mtx_unlock(buf_mtxp); - + if (!ISSET(bp->b_flags, B_DONE)) { + DTRACE_IO1(wait__start, buf_t, bp); + (void) msleep(bp, buf_mtxp, PDROP | (PRIBIO + 1), "buf_biowait", NULL); + DTRACE_IO1(wait__done, buf_t, bp); + } else { + lck_mtx_unlock(buf_mtxp); + } + } /* check for interruption of I/O (e.g. via NFS), then errors. */ if (ISSET(bp->b_flags, B_EINTR)) { CLR(bp->b_flags, B_EINTR); - return (EINTR); - } else if (ISSET(bp->b_flags, B_ERROR)) - return (bp->b_error ? bp->b_error : EIO); - else - return (0); + return EINTR; + } else if (ISSET(bp->b_flags, B_ERROR)) { + return bp->b_error ? bp->b_error : EIO; + } else { + return 0; + } } + /* * Mark I/O complete on a buffer. * @@ -2997,77 +4161,142 @@ buf_biowait(buf_t bp) * (for swap pager, that puts swap buffers on the free lists (!!!), * for the vn device, that puts malloc'd buffers on the free lists!) */ -extern struct timeval priority_IO_timestamp_for_root; -extern int hard_throttle_on_root; void buf_biodone(buf_t bp) { + mount_t mp; + struct bufattr *bap; + struct timeval real_elapsed; + uint64_t real_elapsed_usec = 0; + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_START, - (int)bp, (int)bp->b_datap, bp->b_flags, 0, 0); + bp, bp->b_datap, bp->b_flags, 0, 0); - if (ISSET(bp->b_flags, B_DONE)) + if (ISSET(bp->b_flags, B_DONE)) { panic("biodone already"); + } + + bap = &bp->b_attr; + + if (bp->b_vp && bp->b_vp->v_mount) { + mp = bp->b_vp->v_mount; + } else { + mp = NULL; + } + + if (ISSET(bp->b_flags, B_ERROR)) { + if (mp && (MNT_ROOTFS & mp->mnt_flag)) { + dk_error_description_t desc; + bzero(&desc, sizeof(desc)); + desc.description = panic_disk_error_description; + desc.description_size = panic_disk_error_description_size; + VNOP_IOCTL(mp->mnt_devvp, DKIOCGETERRORDESCRIPTION, (caddr_t)&desc, 0, vfs_context_kernel()); + } + } + + if (mp && (bp->b_flags & B_READ) == 0) { + update_last_io_time(mp); + INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_write_size); + } else if (mp) { + INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_read_size); + } + + throttle_info_end_io(bp); + + if (kdebug_enable) { + int code = DKIO_DONE; + int io_tier = GET_BUFATTR_IO_TIER(bap); + + if (bp->b_flags & B_READ) { + code |= DKIO_READ; + } + if (bp->b_flags & B_ASYNC) { + code |= DKIO_ASYNC; + } + + if (bp->b_flags & B_META) { + code |= DKIO_META; + } else if (bp->b_flags & B_PAGEIO) { + code |= DKIO_PAGING; + } + + if (io_tier != 0) { + code |= DKIO_THROTTLE; + } + + code |= ((io_tier << DKIO_TIER_SHIFT) & DKIO_TIER_MASK); + + if (bp->b_flags & B_PASSIVE) { + code |= DKIO_PASSIVE; + } + + if (bap->ba_flags & BA_NOCACHE) { + code |= DKIO_NOCACHE; + } + + if (bap->ba_flags & BA_IO_TIER_UPGRADE) { + code |= DKIO_TIER_UPGRADE; + } - if (kdebug_enable) { - int code = DKIO_DONE; - - if (bp->b_flags & B_READ) - code |= DKIO_READ; - if (bp->b_flags & B_ASYNC) - code |= DKIO_ASYNC; - - if (bp->b_flags & B_META) - code |= DKIO_META; - else if (bp->b_flags & B_PAGEIO) - code |= DKIO_PAGING; - - KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE, - (unsigned int)bp, (unsigned int)bp->b_vp, - bp->b_resid, bp->b_error, 0); - } - if ((bp->b_vp != NULLVP) && - ((bp->b_flags & (B_PAGEIO | B_READ)) == (B_PAGEIO | B_READ)) && - (bp->b_vp->v_mount->mnt_kern_flag & MNTK_ROOTDEV)) { - microuptime(&priority_IO_timestamp_for_root); - hard_throttle_on_root = 0; + KDBG_RELEASE_NOPROCFILT(FSDBG_CODE(DBG_DKRW, code), + buf_kernel_addrperm_addr(bp), + (uintptr_t)VM_KERNEL_ADDRPERM(bp->b_vp), bp->b_resid, + bp->b_error); } + + microuptime(&real_elapsed); + timevalsub(&real_elapsed, &bp->b_timestamp_tv); + real_elapsed_usec = real_elapsed.tv_sec * USEC_PER_SEC + real_elapsed.tv_usec; + disk_conditioner_delay(bp, 1, bp->b_bcount, real_elapsed_usec); + /* * I/O was done, so don't believe - * the DIRTY state from VM anymore + * the DIRTY state from VM anymore... + * and we need to reset the THROTTLED/PASSIVE + * indicators */ - CLR(bp->b_flags, B_WASDIRTY); + CLR(bp->b_flags, (B_WASDIRTY | B_PASSIVE)); + CLR(bap->ba_flags, (BA_META | BA_NOCACHE | BA_DELAYIDLESLEEP | BA_IO_TIER_UPGRADE)); - if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW)) - /* + SET_BUFATTR_IO_TIER(bap, 0); + + DTRACE_IO1(done, buf_t, bp); + + if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW)) { + /* * wake up any writer's blocked * on throttle or waiting for I/O * to drain */ vnode_writedone(bp->b_vp); + } - if (ISSET(bp->b_flags, (B_CALL | B_FILTER))) { /* if necessary, call out */ - void (*iodone_func)(struct buf *, void *) = bp->b_iodone; - void *arg = (void *)bp->b_transaction; + if (ISSET(bp->b_flags, (B_CALL | B_FILTER))) { /* if necessary, call out */ + void (*iodone_func)(struct buf *, void *) = bp->b_iodone; + void *arg = bp->b_transaction; int callout = ISSET(bp->b_flags, B_CALL); - CLR(bp->b_flags, (B_CALL | B_FILTER)); /* filters and callouts are one-shot */ + if (iodone_func == NULL) { + panic("biodone: bp @ %p has NULL b_iodone!\n", bp); + } + + CLR(bp->b_flags, (B_CALL | B_FILTER)); /* filters and callouts are one-shot */ bp->b_iodone = NULL; bp->b_transaction = NULL; - if (iodone_func == NULL) { - panic("biodone: bp @ 0x%x has NULL b_iodone!\n", bp); - } else { - if (callout) - SET(bp->b_flags, B_DONE); /* note that it's done */ - (*iodone_func)(bp, arg); + if (callout) { + SET(bp->b_flags, B_DONE); /* note that it's done */ } - if (callout) - /* - * assumes that the call back function takes + (*iodone_func)(bp, arg); + + if (callout) { + /* + * assumes that the callback function takes * ownership of the bp and deals with releasing it if necessary */ - goto biodone_done; + goto biodone_done; + } /* * in this case the call back function is acting * strictly as a filter... it does not take @@ -3076,12 +4305,12 @@ buf_biodone(buf_t bp) * by the HFS journaling code */ } - if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release it */ - SET(bp->b_flags, B_DONE); /* note that it's done */ + if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release it */ + SET(bp->b_flags, B_DONE); /* note that it's done */ buf_brelse(bp); - } else { /* or just wakeup the buffer */ - /* + } else { /* or just wakeup the buffer */ + /* * by taking the mutex, we serialize * the buf owner calling buf_biowait so that we'll * only see him in one of 2 states... @@ -3095,18 +4324,31 @@ buf_biodone(buf_t bp) * they do get to run, their going to re-set * BL_WANTED and go back to sleep */ - lck_mtx_lock(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); CLR(bp->b_lflags, BL_WANTED); - SET(bp->b_flags, B_DONE); /* note that it's done */ + SET(bp->b_flags, B_DONE); /* note that it's done */ - lck_mtx_unlock(buf_mtxp); + lck_mtx_unlock(buf_mtxp); wakeup(bp); } biodone_done: KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_END, - (int)bp, (int)bp->b_datap, bp->b_flags, 0, 0); + (uintptr_t)bp, (uintptr_t)bp->b_datap, bp->b_flags, 0, 0); +} + +/* + * Obfuscate buf pointers. + */ +vm_offset_t +buf_kernel_addrperm_addr(void * addr) +{ + if ((vm_offset_t)addr == 0) { + return 0; + } else { + return (vm_offset_t)addr + buf_kernel_addrperm; + } } /* @@ -3115,35 +4357,28 @@ biodone_done: int count_lock_queue(void) { - buf_t bp; - int n = 0; + buf_t bp; + int n = 0; - lck_mtx_lock(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); for (bp = bufqueues[BQ_LOCKED].tqh_first; bp; - bp = bp->b_freelist.tqe_next) + bp = bp->b_freelist.tqe_next) { n++; + } lck_mtx_unlock(buf_mtxp); - return (n); + return n; } /* * Return a count of 'busy' buffers. Used at the time of shutdown. + * note: This is also called from the mach side in debug context in kdp.c */ int count_busy_buffers(void) { - buf_t bp; - int nbusy = 0; - - lck_mtx_lock(buf_mtxp); - for (bp = &buf[boot_nbuf]; --bp >= buf; ) - if (!ISSET(bp->b_flags, B_INVAL) && ISSET(bp->b_lflags, BL_BUSY)) - nbusy++; - lck_mtx_unlock(buf_mtxp); - - return (nbusy); + return buf_busycount + bufstats.bufs_iobufinuse; } #if DIAGNOSTIC @@ -3156,71 +4391,112 @@ void vfs_bufstats() { int i, j, count; - register struct buf *bp; - register struct bqueues *dp; - int counts[MAXBSIZE/CLBYTES+1]; + struct buf *bp; + struct bqueues *dp; + int counts[MAXBSIZE / CLBYTES + 1]; static char *bname[BQUEUES] = - { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" }; + { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" }; for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) { count = 0; - for (j = 0; j <= MAXBSIZE/CLBYTES; j++) + for (j = 0; j <= MAXBSIZE / CLBYTES; j++) { counts[j] = 0; + } lck_mtx_lock(buf_mtxp); for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) { - counts[bp->b_bufsize/CLBYTES]++; + counts[bp->b_bufsize / CLBYTES]++; count++; } lck_mtx_unlock(buf_mtxp); printf("%s: total-%d", bname[i], count); - for (j = 0; j <= MAXBSIZE/CLBYTES; j++) - if (counts[j] != 0) + for (j = 0; j <= MAXBSIZE / CLBYTES; j++) { + if (counts[j] != 0) { printf(", %d-%d", j * CLBYTES, counts[j]); + } + } printf("\n"); } } #endif /* DIAGNOSTIC */ -#define NRESERVEDIOBUFS 64 +#define NRESERVEDIOBUFS 128 +#define MNT_VIRTUALDEV_MAX_IOBUFS 16 +#define VIRTUALDEV_MAX_IOBUFS ((40*niobuf_headers)/100) buf_t alloc_io_buf(vnode_t vp, int priv) { - buf_t bp; + buf_t bp; + mount_t mp = NULL; + int alloc_for_virtualdev = FALSE; + + lck_mtx_lock_spin(iobuffer_mtxp); - lck_mtx_lock(iobuffer_mtxp); + /* + * We subject iobuf requests for diskimages to additional restrictions. + * + * a) A single diskimage mount cannot use up more than + * MNT_VIRTUALDEV_MAX_IOBUFS. However,vm privileged (pageout) requests + * are not subject to this restriction. + * b) iobuf headers used by all diskimage headers by all mount + * points cannot exceed VIRTUALDEV_MAX_IOBUFS. + */ + if (vp && ((mp = vp->v_mount)) && mp != dead_mountp && + mp->mnt_kern_flag & MNTK_VIRTUALDEV) { + alloc_for_virtualdev = TRUE; + while ((!priv && mp->mnt_iobufinuse > MNT_VIRTUALDEV_MAX_IOBUFS) || + bufstats.bufs_iobufinuse_vdev > VIRTUALDEV_MAX_IOBUFS) { + bufstats.bufs_iobufsleeps++; + + need_iobuffer = 1; + (void)msleep(&need_iobuffer, iobuffer_mtxp, + PSPIN | (PRIBIO + 1), (const char *)"alloc_io_buf (1)", + NULL); + } + } - while (((niobuf - NRESERVEDIOBUFS < bufstats.bufs_iobufinuse) && !priv) || - (bp = iobufqueue.tqh_first) == NULL) { + while (((niobuf_headers - NRESERVEDIOBUFS < bufstats.bufs_iobufinuse) && !priv) || + (bp = iobufqueue.tqh_first) == NULL) { bufstats.bufs_iobufsleeps++; need_iobuffer = 1; - (void) msleep(&need_iobuffer, iobuffer_mtxp, (PRIBIO+1), (const char *)"alloc_io_buf", 0); + (void)msleep(&need_iobuffer, iobuffer_mtxp, PSPIN | (PRIBIO + 1), + (const char *)"alloc_io_buf (2)", NULL); } TAILQ_REMOVE(&iobufqueue, bp, b_freelist); bufstats.bufs_iobufinuse++; - if (bufstats.bufs_iobufinuse > bufstats.bufs_iobufmax) + if (bufstats.bufs_iobufinuse > bufstats.bufs_iobufmax) { bufstats.bufs_iobufmax = bufstats.bufs_iobufinuse; + } + + if (alloc_for_virtualdev) { + mp->mnt_iobufinuse++; + bufstats.bufs_iobufinuse_vdev++; + } lck_mtx_unlock(iobuffer_mtxp); /* * initialize various fields * we don't need to hold the mutex since the buffer - * is now private... the vp should have a reference + * is now private... the vp should have a reference * on it and is not protected by this mutex in any event */ - bp->b_timestamp = 0; + bp->b_timestamp = 0; bp->b_proc = NULL; bp->b_datap = 0; bp->b_flags = 0; bp->b_lflags = BL_BUSY | BL_IOBUF; + if (alloc_for_virtualdev) { + bp->b_lflags |= BL_IOBUF_VDEV; + } + bp->b_redundancy_flags = 0; bp->b_blkno = bp->b_lblkno = 0; #ifdef JOE_DEBUG bp->b_owner = current_thread(); @@ -3232,21 +4508,34 @@ alloc_io_buf(vnode_t vp, int priv) bp->b_bcount = 0; bp->b_bufsize = 0; bp->b_upl = NULL; + bp->b_fsprivate = (void *)NULL; bp->b_vp = vp; + bzero(&bp->b_attr, sizeof(struct bufattr)); - if (vp && (vp->v_type == VBLK || vp->v_type == VCHR)) + if (vp && (vp->v_type == VBLK || vp->v_type == VCHR)) { bp->b_dev = vp->v_rdev; - else + } else { bp->b_dev = NODEV; + } - return (bp); + return bp; } void free_io_buf(buf_t bp) { - int need_wakeup = 0; + int need_wakeup = 0; + int free_for_virtualdev = FALSE; + mount_t mp = NULL; + + /* Was this iobuf for a diskimage ? */ + if (bp->b_lflags & BL_IOBUF_VDEV) { + free_for_virtualdev = TRUE; + if (bp->b_vp) { + mp = bp->b_vp->v_mount; + } + } /* * put buffer back on the head of the iobufqueue @@ -3254,12 +4543,15 @@ free_io_buf(buf_t bp) bp->b_vp = NULL; bp->b_flags = B_INVAL; - lck_mtx_lock(iobuffer_mtxp); + /* Zero out the bufattr and its flags before relinquishing this iobuf */ + bzero(&bp->b_attr, sizeof(struct bufattr)); + + lck_mtx_lock_spin(iobuffer_mtxp); binsheadfree(bp, &iobufqueue, -1); if (need_iobuffer) { - /* + /* * Wake up any processes waiting because they need an io buffer * * do the wakeup after we drop the mutex... it's possible that the @@ -3271,33 +4563,59 @@ free_io_buf(buf_t bp) need_iobuffer = 0; need_wakeup = 1; } + if (bufstats.bufs_iobufinuse <= 0) { + panic("free_io_buf: bp(%p) - bufstats.bufs_iobufinuse < 0", bp); + } + bufstats.bufs_iobufinuse--; + if (free_for_virtualdev) { + bufstats.bufs_iobufinuse_vdev--; + if (mp && mp != dead_mountp) { + mp->mnt_iobufinuse--; + } + } + lck_mtx_unlock(iobuffer_mtxp); - if (need_wakeup) - wakeup(&need_iobuffer); + if (need_wakeup) { + wakeup(&need_iobuffer); + } } +void +buf_list_lock(void) +{ + lck_mtx_lock_spin(buf_mtxp); +} + +void +buf_list_unlock(void) +{ + lck_mtx_unlock(buf_mtxp); +} /* * If getnewbuf() calls bcleanbuf() on the same thread * there is a potential for stack overrun and deadlocks. * So we always handoff the work to a worker thread for completion */ -#include -#include -#include static void bcleanbuf_thread_init(void) { + thread_t thread = THREAD_NULL; + /* create worker thread */ - kernel_thread(kernel_task, bcleanbuf_thread); + kernel_thread_start((thread_continue_t)bcleanbuf_thread, NULL, &thread); + thread_deallocate(thread); } +typedef int (*bcleanbufcontinuation)(int); + +__attribute__((noreturn)) static void bcleanbuf_thread(void) { @@ -3306,17 +4624,27 @@ bcleanbuf_thread(void) int loopcnt = 0; for (;;) { - lck_mtx_lock(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); - while (blaundrycnt == 0) - (void)msleep((void *)&blaundrycnt, buf_mtxp, PRIBIO, "blaundry", 0); + while ((bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY])) == NULL) { + (void)msleep0(&bufqueues[BQ_LAUNDRY], buf_mtxp, PRIBIO | PDROP, "blaundry", 0, (bcleanbufcontinuation)bcleanbuf_thread); + } - bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY]); /* * Remove from the queue */ bremfree_locked(bp); - blaundrycnt--; + + /* + * Buffer is no longer on any free list + */ + SET(bp->b_lflags, BL_BUSY); + buf_busycount++; + +#ifdef JOE_DEBUG + bp->b_owner = current_thread(); + bp->b_tag = 10; +#endif lck_mtx_unlock(buf_mtxp); /* @@ -3325,18 +4653,35 @@ bcleanbuf_thread(void) error = bawrite_internal(bp, 0); if (error) { - lck_mtx_lock(buf_mtxp); + bp->b_whichq = BQ_LAUNDRY; + bp->b_timestamp = buf_timestamp(); + + lck_mtx_lock_spin(buf_mtxp); binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY); blaundrycnt++; + /* we never leave a busy page on the laundry queue */ + CLR(bp->b_lflags, BL_BUSY); + buf_busycount--; +#ifdef JOE_DEBUG + bp->b_owner = current_thread(); + bp->b_tag = 11; +#endif + lck_mtx_unlock(buf_mtxp); - if (loopcnt > 10) { - (void)tsleep((void *)&blaundrycnt, PRIBIO, "blaundry", 1); + if (loopcnt > MAXLAUNDRY) { + /* + * bawrite_internal() can return errors if we're throttled. If we've + * done several I/Os and failed, give the system some time to unthrottle + * the vnode + */ + (void)tsleep((void *)&bufqueues[BQ_LAUNDRY], PRIBIO, "blaundry", 1); loopcnt = 0; } else { - (void)thread_block(THREAD_CONTINUE_NULL); + /* give other threads a chance to run */ + (void)thread_block(THREAD_CONTINUE_NULL); loopcnt++; } } @@ -3347,19 +4692,20 @@ bcleanbuf_thread(void) static int brecover_data(buf_t bp) { - int upl_offset; - upl_t upl; + int upl_offset; + upl_t upl; upl_page_info_t *pl; kern_return_t kret; - vnode_t vp = bp->b_vp; + vnode_t vp = bp->b_vp; int upl_flags; - if ( !UBCINFOEXISTS(vp) || bp->b_bufsize == 0) - goto dump_buffer; + if (!UBCINFOEXISTS(vp) || bp->b_bufsize == 0) { + goto dump_buffer; + } upl_flags = UPL_PRECIOUS; - if (! (buf_flags(bp) & B_READ)) { + if (!(buf_flags(bp) & B_READ)) { /* * "write" operation: let the UPL subsystem know * that we intend to modify the buffer cache pages we're @@ -3367,454 +4713,302 @@ brecover_data(buf_t bp) */ upl_flags |= UPL_WILL_MODIFY; } - - kret = ubc_create_upl(vp, - ubc_blktooff(vp, bp->b_lblkno), - bp->b_bufsize, - &upl, - &pl, - upl_flags); - if (kret != KERN_SUCCESS) - panic("Failed to create UPL"); - for (upl_offset = 0; upl_offset < bp->b_bufsize; upl_offset += PAGE_SIZE) { + kret = ubc_create_upl_kernel(vp, + ubc_blktooff(vp, bp->b_lblkno), + bp->b_bufsize, + &upl, + &pl, + upl_flags, + VM_KERN_MEMORY_FILE); + if (kret != KERN_SUCCESS) { + panic("Failed to create UPL"); + } - if (!upl_valid_page(pl, upl_offset / PAGE_SIZE) || !upl_dirty_page(pl, upl_offset / PAGE_SIZE)) { - ubc_upl_abort(upl, 0); + for (upl_offset = 0; upl_offset < bp->b_bufsize; upl_offset += PAGE_SIZE) { + if (!upl_valid_page(pl, upl_offset / PAGE_SIZE) || !upl_dirty_page(pl, upl_offset / PAGE_SIZE)) { + ubc_upl_abort(upl, 0); goto dump_buffer; } } bp->b_upl = upl; - - kret = ubc_upl_map(upl, (vm_address_t *)&(bp->b_datap)); - if (kret != KERN_SUCCESS) - panic("getblk: ubc_upl_map() failed with (%d)", kret); - return (1); + kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap)); + + if (kret != KERN_SUCCESS) { + panic("getblk: ubc_upl_map() failed with (%d)", kret); + } + return 1; dump_buffer: bp->b_bufsize = 0; SET(bp->b_flags, B_INVAL); buf_brelse(bp); - return(0); + return 0; } - - -/* - * disabled for now - */ - -#if FLUSH_QUEUES - -#define NFLUSH 32 - -static int -bp_cmp(void *a, void *b) +int +fs_buffer_cache_gc_register(void (* callout)(int, void *), void *context) { - buf_t *bp_a = *(buf_t **)a, - *bp_b = *(buf_t **)b; - daddr64_t res; - - // don't have to worry about negative block - // numbers so this is ok to do. - // - res = (bp_a->b_blkno - bp_b->b_blkno); + lck_mtx_lock(buf_gc_callout); + for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) { + if (fs_callouts[i].callout == NULL) { + fs_callouts[i].callout = callout; + fs_callouts[i].context = context; + lck_mtx_unlock(buf_gc_callout); + return 0; + } + } - return (int)res; + lck_mtx_unlock(buf_gc_callout); + return ENOMEM; } - int -bflushq(int whichq, mount_t mp) +fs_buffer_cache_gc_unregister(void (* callout)(int, void *), void *context) { - buf_t bp, next; - int i, buf_count; - int total_writes = 0; - static buf_t flush_table[NFLUSH]; + lck_mtx_lock(buf_gc_callout); + for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) { + if (fs_callouts[i].callout == callout && + fs_callouts[i].context == context) { + fs_callouts[i].callout = NULL; + fs_callouts[i].context = NULL; + } + } + lck_mtx_unlock(buf_gc_callout); + return 0; +} - if (whichq < 0 || whichq >= BQUEUES) { - return (0); +static void +fs_buffer_cache_gc_dispatch_callouts(int all) +{ + lck_mtx_lock(buf_gc_callout); + for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) { + if (fs_callouts[i].callout != NULL) { + fs_callouts[i].callout(all, fs_callouts[i].context); + } } + lck_mtx_unlock(buf_gc_callout); +} - restart: +static boolean_t +buffer_cache_gc(int all) +{ + buf_t bp; + boolean_t did_large_zfree = FALSE; + boolean_t need_wakeup = FALSE; + int now = buf_timestamp(); + uint32_t found = 0; + struct bqueues privq; + int thresh_hold = BUF_STALE_THRESHHOLD; + + if (all) { + thresh_hold = 0; + } + /* + * We only care about metadata (incore storage comes from zalloc()). + * Unless "all" is set (used to evict meta data buffers in preparation + * for deep sleep), we only evict up to BUF_MAX_GC_BATCH_SIZE buffers + * that have not been accessed in the last BUF_STALE_THRESHOLD seconds. + * BUF_MAX_GC_BATCH_SIZE controls both the hold time of the global lock + * "buf_mtxp" and the length of time we spend compute bound in the GC + * thread which calls this function + */ lck_mtx_lock(buf_mtxp); - bp = TAILQ_FIRST(&bufqueues[whichq]); - - for (buf_count = 0; bp; bp = next) { - next = bp->b_freelist.tqe_next; - - if (bp->b_vp == NULL || bp->b_vp->v_mount != mp) { - continue; - } + do { + found = 0; + TAILQ_INIT(&privq); + need_wakeup = FALSE; - if (ISSET(bp->b_flags, B_DELWRI) && !ISSET(bp->b_lflags, BL_BUSY)) { + while (((bp = TAILQ_FIRST(&bufqueues[BQ_META]))) && + (now > bp->b_timestamp) && + (now - bp->b_timestamp > thresh_hold) && + (found < BUF_MAX_GC_BATCH_SIZE)) { + /* Remove from free list */ + bremfree_locked(bp); + found++; - bremfree_locked(bp); #ifdef JOE_DEBUG - bp->b_owner = current_thread(); - bp->b_tag = 7; + bp->b_owner = current_thread(); + bp->b_tag = 12; #endif - SET(bp->b_lflags, BL_BUSY); - flush_table[buf_count] = bp; - buf_count++; - total_writes++; - - if (buf_count >= NFLUSH) { - lck_mtx_unlock(buf_mtxp); - - qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp); - - for (i = 0; i < buf_count; i++) { - buf_bawrite(flush_table[i]); - } - goto restart; - } - } - } - lck_mtx_unlock(buf_mtxp); - if (buf_count > 0) { - qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp); + /* If dirty, move to laundry queue and remember to do wakeup */ + if (ISSET(bp->b_flags, B_DELWRI)) { + SET(bp->b_lflags, BL_WANTDEALLOC); - for (i = 0; i < buf_count; i++) { - buf_bawrite(flush_table[i]); - } - } + bmovelaundry(bp); + need_wakeup = TRUE; - return (total_writes); -} -#endif + continue; + } + /* + * Mark busy and put on private list. We could technically get + * away without setting BL_BUSY here. + */ + SET(bp->b_lflags, BL_BUSY); + buf_busycount++; -#if BALANCE_QUEUES + /* + * Remove from hash and dissociate from vp. + */ + bremhash(bp); + if (bp->b_vp) { + brelvp_locked(bp); + } -/* XXX move this to a separate file */ + TAILQ_INSERT_TAIL(&privq, bp, b_freelist); + } -/* - * NOTE: THIS CODE HAS NOT BEEN UPDATED - * WITH RESPECT TO THE NEW LOCKING MODEL - */ - + if (found == 0) { + break; + } -/* - * Dynamic Scaling of the Buffer Queues - */ + /* Drop lock for batch processing */ + lck_mtx_unlock(buf_mtxp); -typedef long long blsize_t; + /* Wakeup and yield for laundry if need be */ + if (need_wakeup) { + wakeup(&bufqueues[BQ_LAUNDRY]); + (void)thread_block(THREAD_CONTINUE_NULL); + } -blsize_t MAXNBUF; /* initialize to (sane_size / PAGE_SIZE) */ -/* Global tunable limits */ -blsize_t nbufh; /* number of buffer headers */ -blsize_t nbuflow; /* minimum number of buffer headers required */ -blsize_t nbufhigh; /* maximum number of buffer headers allowed */ -blsize_t nbuftarget; /* preferred number of buffer headers */ + /* Clean up every buffer on private list */ + TAILQ_FOREACH(bp, &privq, b_freelist) { + /* Take note if we've definitely freed at least a page to a zone */ + if ((ISSET(bp->b_flags, B_ZALLOC)) && (buf_size(bp) >= PAGE_SIZE)) { + did_large_zfree = TRUE; + } -/* - * assertions: - * - * 1. 0 < nbuflow <= nbufh <= nbufhigh - * 2. nbufhigh <= MAXNBUF - * 3. 0 < nbuflow <= nbuftarget <= nbufhigh - * 4. nbufh can not be set by sysctl(). - */ + trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); -/* Per queue tunable limits */ + /* Free Storage */ + buf_free_meta_store(bp); -struct bufqlim { - blsize_t bl_nlow; /* minimum number of buffer headers required */ - blsize_t bl_num; /* number of buffer headers on the queue */ - blsize_t bl_nlhigh; /* maximum number of buffer headers allowed */ - blsize_t bl_target; /* preferred number of buffer headers */ - long bl_stale; /* Seconds after which a buffer is considered stale */ -} bufqlim[BQUEUES]; + /* Release credentials */ + buf_release_credentials(bp); -/* - * assertions: - * - * 1. 0 <= bl_nlow <= bl_num <= bl_nlhigh - * 2. bl_nlhigh <= MAXNBUF - * 3. bufqlim[BQ_META].bl_nlow != 0 - * 4. bufqlim[BQ_META].bl_nlow > (number of possible concurrent - * file system IO operations) - * 5. bl_num can not be set by sysctl(). - * 6. bl_nhigh <= nbufhigh - */ + /* Prepare for moving to empty queue */ + CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED + | B_AGE | B_ASYNC | B_NOCACHE | B_FUA)); + bp->b_whichq = BQ_EMPTY; + BLISTNONE(bp); + } + lck_mtx_lock(buf_mtxp); -/* - * Rationale: - * ---------- - * Defining it blsize_t as long permits 2^31 buffer headers per queue. - * Which can describe (2^31 * PAGE_SIZE) memory per queue. - * - * These limits are exported to by means of sysctl(). - * It was decided to define blsize_t as a 64 bit quantity. - * This will make sure that we will not be required to change it - * as long as we do not exceed 64 bit address space for the kernel. - * - * low and high numbers parameters initialized at compile time - * and boot arguments can be used to override them. sysctl() - * would not change the value. sysctl() can get all the values - * but can set only target. num is the current level. - * - * Advantages of having a "bufqscan" thread doing the balancing are, - * Keep enough bufs on BQ_EMPTY. - * getnewbuf() by default will always select a buffer from the BQ_EMPTY. - * getnewbuf() perfoms best if a buffer was found there. - * Also this minimizes the possibility of starting IO - * from getnewbuf(). That's a performance win, too. - * - * Localize complex logic [balancing as well as time aging] - * to balancebufq(). - * - * Simplify getnewbuf() logic by elimination of time aging code. - */ + /* Back under lock, move them all to invalid hash and clear busy */ + TAILQ_FOREACH(bp, &privq, b_freelist) { + binshash(bp, &invalhash); + CLR(bp->b_lflags, BL_BUSY); + buf_busycount--; -/* - * Algorithm: - * ----------- - * The goal of the dynamic scaling of the buffer queues to to keep - * the size of the LRU close to bl_target. Buffers on a queue would - * be time aged. - * - * There would be a thread which will be responsible for "balancing" - * the buffer cache queues. - * - * The scan order would be: AGE, LRU, META, EMPTY. - */ +#ifdef JOE_DEBUG + if (bp->b_owner != current_thread()) { + panic("Buffer stolen from buffer_cache_gc()"); + } + bp->b_owner = current_thread(); + bp->b_tag = 13; +#endif + } -long bufqscanwait = 0; + /* And do a big bulk move to the empty queue */ + TAILQ_CONCAT(&bufqueues[BQ_EMPTY], &privq, b_freelist); + } while (all && (found == BUF_MAX_GC_BATCH_SIZE)); -static void bufqscan_thread(); -static int balancebufq(int q); -static int btrimempty(int n); -static __inline__ int initbufqscan(void); -static __inline__ int nextbufq(int q); -static void buqlimprt(int all); + lck_mtx_unlock(buf_mtxp); + fs_buffer_cache_gc_dispatch_callouts(all); -static __inline__ void -bufqinc(int q) -{ - if ((q < 0) || (q >= BQUEUES)) - return; - - bufqlim[q].bl_num++; - return; + return did_large_zfree; } -static __inline__ void -bufqdec(int q) -{ - if ((q < 0) || (q >= BQUEUES)) - return; - bufqlim[q].bl_num--; - return; -} +/* + * disabled for now + */ -static void -bufq_balance_thread_init() -{ +#if FLUSH_QUEUES - if (bufqscanwait++ == 0) { +#define NFLUSH 32 - /* Initalize globals */ - MAXNBUF = (sane_size / PAGE_SIZE); - nbufh = nbuf; - nbuflow = min(nbufh, 100); - nbufhigh = min(MAXNBUF, max(nbufh, 2048)); - nbuftarget = (sane_size >> 5) / PAGE_SIZE; - nbuftarget = max(nbuflow, nbuftarget); - nbuftarget = min(nbufhigh, nbuftarget); +static int +bp_cmp(void *a, void *b) +{ + buf_t *bp_a = *(buf_t **)a, + *bp_b = *(buf_t **)b; + daddr64_t res; - /* - * Initialize the bufqlim - */ - - /* LOCKED queue */ - bufqlim[BQ_LOCKED].bl_nlow = 0; - bufqlim[BQ_LOCKED].bl_nlhigh = 32; - bufqlim[BQ_LOCKED].bl_target = 0; - bufqlim[BQ_LOCKED].bl_stale = 30; - - /* LRU queue */ - bufqlim[BQ_LRU].bl_nlow = 0; - bufqlim[BQ_LRU].bl_nlhigh = nbufhigh/4; - bufqlim[BQ_LRU].bl_target = nbuftarget/4; - bufqlim[BQ_LRU].bl_stale = LRU_IS_STALE; - - /* AGE queue */ - bufqlim[BQ_AGE].bl_nlow = 0; - bufqlim[BQ_AGE].bl_nlhigh = nbufhigh/4; - bufqlim[BQ_AGE].bl_target = nbuftarget/4; - bufqlim[BQ_AGE].bl_stale = AGE_IS_STALE; - - /* EMPTY queue */ - bufqlim[BQ_EMPTY].bl_nlow = 0; - bufqlim[BQ_EMPTY].bl_nlhigh = nbufhigh/4; - bufqlim[BQ_EMPTY].bl_target = nbuftarget/4; - bufqlim[BQ_EMPTY].bl_stale = 600000; - - /* META queue */ - bufqlim[BQ_META].bl_nlow = 0; - bufqlim[BQ_META].bl_nlhigh = nbufhigh/4; - bufqlim[BQ_META].bl_target = nbuftarget/4; - bufqlim[BQ_META].bl_stale = META_IS_STALE; - - /* LAUNDRY queue */ - bufqlim[BQ_LOCKED].bl_nlow = 0; - bufqlim[BQ_LOCKED].bl_nlhigh = 32; - bufqlim[BQ_LOCKED].bl_target = 0; - bufqlim[BQ_LOCKED].bl_stale = 30; - - buqlimprt(1); - } + // don't have to worry about negative block + // numbers so this is ok to do. + // + res = (bp_a->b_blkno - bp_b->b_blkno); - /* create worker thread */ - kernel_thread(kernel_task, bufqscan_thread); + return (int)res; } -/* The workloop for the buffer balancing thread */ -static void -bufqscan_thread() -{ - int moretodo = 0; - - for(;;) { - do { - int q; /* buffer queue to process */ - - q = initbufqscan(); - for (; q; ) { - moretodo |= balancebufq(q); - q = nextbufq(q); - } - } while (moretodo); -#if DIAGNOSTIC - vfs_bufstats(); - buqlimprt(0); -#endif - (void)tsleep((void *)&bufqscanwait, PRIBIO, "bufqscanwait", 60 * hz); - moretodo = 0; +int +bflushq(int whichq, mount_t mp) +{ + buf_t bp, next; + int i, buf_count; + int total_writes = 0; + static buf_t flush_table[NFLUSH]; + + if (whichq < 0 || whichq >= BQUEUES) { + return 0; } -} -/* Seed for the buffer queue balancing */ -static __inline__ int -initbufqscan() -{ - /* Start with AGE queue */ - return (BQ_AGE); -} +restart: + lck_mtx_lock(buf_mtxp); -/* Pick next buffer queue to balance */ -static __inline__ int -nextbufq(int q) -{ - int order[] = { BQ_AGE, BQ_LRU, BQ_META, BQ_EMPTY, 0 }; - - q++; - q %= sizeof(order); - return (order[q]); -} + bp = TAILQ_FIRST(&bufqueues[whichq]); -/* function to balance the buffer queues */ -static int -balancebufq(int q) -{ - int moretodo = 0; - int s = splbio(); - int n, t; - - /* reject invalid q */ - if ((q < 0) || (q >= BQUEUES)) - goto out; + for (buf_count = 0; bp; bp = next) { + next = bp->b_freelist.tqe_next; - /* LOCKED or LAUNDRY queue MUST not be balanced */ - if ((q == BQ_LOCKED) || (q == BQ_LAUNDRY)) - goto out; + if (bp->b_vp == NULL || bp->b_vp->v_mount != mp) { + continue; + } - n = (bufqlim[q].bl_num - bufqlim[q].bl_target); + if (ISSET(bp->b_flags, B_DELWRI) && !ISSET(bp->b_lflags, BL_BUSY)) { + bremfree_locked(bp); +#ifdef JOE_DEBUG + bp->b_owner = current_thread(); + bp->b_tag = 7; +#endif + SET(bp->b_lflags, BL_BUSY); + buf_busycount++; - /* If queue has less than target nothing more to do */ - if (n < 0) - goto out; + flush_table[buf_count] = bp; + buf_count++; + total_writes++; - if ( n > 8 ) { - /* Balance only a small amount (12.5%) at a time */ - n >>= 3; - } + if (buf_count >= NFLUSH) { + lck_mtx_unlock(buf_mtxp); - /* EMPTY queue needs special handling */ - if (q == BQ_EMPTY) { - moretodo |= btrimempty(n); - goto out; - } + qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp); - t = buf_timestamp(): - - for (; n > 0; n--) { - struct buf *bp = bufqueues[q].tqh_first; - if (!bp) - break; - - /* check if it's stale */ - if ((t - bp->b_timestamp) > bufqlim[q].bl_stale) { - if (bcleanbuf(bp)) { - /* buf_bawrite() issued, bp not ready */ - moretodo = 1; - } else { - /* release the cleaned buffer to BQ_EMPTY */ - SET(bp->b_flags, B_INVAL); - buf_brelse(bp); + for (i = 0; i < buf_count; i++) { + buf_bawrite(flush_table[i]); + } + goto restart; } - } else - break; + } } + lck_mtx_unlock(buf_mtxp); -out: - splx(s); - return (moretodo); -} - -static int -btrimempty(int n) -{ - /* - * When struct buf are allocated dynamically, this would - * reclaim upto 'n' struct buf from the empty queue. - */ - - return (0); -} - -static void -buqlimprt(int all) -{ - int i; - static char *bname[BQUEUES] = - { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" }; + if (buf_count > 0) { + qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp); - if (all) - for (i = 0; i < BQUEUES; i++) { - printf("%s : ", bname[i]); - printf("min = %ld, ", (long)bufqlim[i].bl_nlow); - printf("cur = %ld, ", (long)bufqlim[i].bl_num); - printf("max = %ld, ", (long)bufqlim[i].bl_nlhigh); - printf("target = %ld, ", (long)bufqlim[i].bl_target); - printf("stale after %ld seconds\n", bufqlim[i].bl_stale); - } - else - for (i = 0; i < BQUEUES; i++) { - printf("%s : ", bname[i]); - printf("cur = %ld, ", (long)bufqlim[i].bl_num); + for (i = 0; i < buf_count; i++) { + buf_bawrite(flush_table[i]); } -} + } + return total_writes; +} #endif - -