X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/91447636331957f3d9b5ca5b508f07c526b0074d..6d2010ae8f7a6078e10b361c6962983bab233e0f:/bsd/vfs/vfs_bio.c diff --git a/bsd/vfs/vfs_bio.c b/bsd/vfs/vfs_bio.c index 5371c4b3a..0d474ed28 100644 --- a/bsd/vfs/vfs_bio.c +++ b/bsd/vfs/vfs_bio.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ /*- @@ -87,20 +93,32 @@ #include #include +#include /* fslog_io_error() */ + +#include +#include +#include /* thread_block() */ + #include +#include #include -#include + +#include +#include +#include + +#include + #if BALANCE_QUEUES static __inline__ void bufqinc(int q); static __inline__ void bufqdec(int q); #endif -static int bcleanbuf(buf_t bp); +int bcleanbuf(buf_t bp, boolean_t discard); static int brecover_data(buf_t bp); static boolean_t incore(vnode_t vp, daddr64_t blkno); -static buf_t incore_locked(vnode_t vp, daddr64_t blkno); /* timeout is in msecs */ static buf_t getnewbuf(int slpflag, int slptimeo, int *queue); static void bremfree_locked(buf_t bp); @@ -108,12 +126,19 @@ static void buf_reassign(buf_t bp, vnode_t newvp); static errno_t buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo); static int buf_iterprepare(vnode_t vp, struct buflists *, int flags); static void buf_itercomplete(vnode_t vp, struct buflists *, int flags); +static boolean_t buffer_cache_gc(int); +static buf_t buf_brelse_shadow(buf_t bp); +static void buf_free_meta_store(buf_t bp); + +static buf_t buf_create_shadow_internal(buf_t bp, boolean_t force_copy, + uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv); + __private_extern__ int bdwrite_internal(buf_t, int); /* zone allocated buffer headers */ -static void bufzoneinit(void); -static void bcleanbuf_thread_init(void); +static void bufzoneinit(void) __attribute__((section("__TEXT, initcode"))); +static void bcleanbuf_thread_init(void) __attribute__((section("__TEXT, initcode"))); static void bcleanbuf_thread(void); static zone_t buf_hdr_zone; @@ -128,13 +153,17 @@ static int buf_hdr_count; LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash; u_long bufhash; +static buf_t incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp); + /* Definitions for the buffer stats. */ struct bufstats bufstats; /* Number of delayed write buffers */ -int nbdwrite = 0; +long nbdwrite = 0; int blaundrycnt = 0; +static int boot_nbuf_headers = 0; +static TAILQ_HEAD(delayqueue, buf) delaybufqueue; static TAILQ_HEAD(ioqueue, buf) iobufqueue; static TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES]; @@ -147,6 +176,8 @@ static lck_grp_attr_t *buf_mtx_grp_attr; static lck_mtx_t *iobuffer_mtxp; static lck_mtx_t *buf_mtxp; +static int buf_busycount; + static __inline__ int buf_timestamp(void) { @@ -162,34 +193,26 @@ buf_timestamp(void) #define binsheadfree(bp, dp, whichq) do { \ TAILQ_INSERT_HEAD(dp, bp, b_freelist); \ bufqinc((whichq)); \ - (bp)->b_whichq = whichq; \ - (bp)->b_timestamp = buf_timestamp(); \ } while (0) #define binstailfree(bp, dp, whichq) do { \ TAILQ_INSERT_TAIL(dp, bp, b_freelist); \ bufqinc((whichq)); \ - (bp)->b_whichq = whichq; \ - (bp)->b_timestamp = buf_timestamp(); \ } while (0) #else #define binsheadfree(bp, dp, whichq) do { \ TAILQ_INSERT_HEAD(dp, bp, b_freelist); \ - (bp)->b_whichq = whichq; \ - (bp)->b_timestamp = buf_timestamp(); \ } while (0) #define binstailfree(bp, dp, whichq) do { \ TAILQ_INSERT_TAIL(dp, bp, b_freelist); \ - (bp)->b_whichq = whichq; \ - (bp)->b_timestamp = buf_timestamp(); \ } while (0) #endif #define BHASHENTCHECK(bp) \ if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \ - panic("%x: b_hash.le_prev is not deadbeef", (bp)); + panic("%p: b_hash.le_prev is not deadbeef", (bp)); #define BLISTNONE(bp) \ (bp)->b_hash.le_next = (struct buf *)0; \ @@ -216,7 +239,7 @@ int lru_is_stale = LRU_IS_STALE; int age_is_stale = AGE_IS_STALE; int meta_is_stale = META_IS_STALE; - +#define MAXLAUNDRY 10 /* LIST_INSERT_HEAD() with assertions */ static __inline__ void @@ -233,15 +256,19 @@ blistenterhead(struct bufhashhdr * head, buf_t bp) static __inline__ void binshash(buf_t bp, struct bufhashhdr *dp) { +#if DIAGNOSTIC buf_t nbp; +#endif /* DIAGNOSTIC */ BHASHENTCHECK(bp); +#if DIAGNOSTIC nbp = dp->lh_first; for(; nbp != NULL; nbp = nbp->b_hash.le_next) { if(nbp == bp) panic("buf already in hashlist"); } +#endif /* DIAGNOSTIC */ blistenterhead(dp, bp); } @@ -259,7 +286,28 @@ bremhash(buf_t bp) *bp->b_hash.le_prev = (bp)->b_hash.le_next; } +/* + * buf_mtxp held. + */ +static __inline__ void +bmovelaundry(buf_t bp) +{ + bp->b_whichq = BQ_LAUNDRY; + bp->b_timestamp = buf_timestamp(); + binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY); + blaundrycnt++; +} +static __inline__ void +buf_release_credentials(buf_t bp) +{ + if (IS_VALID_CRED(bp->b_rcred)) { + kauth_cred_unref(&bp->b_rcred); + } + if (IS_VALID_CRED(bp->b_wcred)) { + kauth_cred_unref(&bp->b_wcred); + } +} int @@ -287,8 +335,24 @@ buf_markinvalid(buf_t bp) { void buf_markdelayed(buf_t bp) { - SET(bp->b_flags, B_DELWRI); - buf_reassign(bp, bp->b_vp); + if (!ISSET(bp->b_flags, B_DELWRI)) { + SET(bp->b_flags, B_DELWRI); + + OSAddAtomicLong(1, &nbdwrite); + buf_reassign(bp, bp->b_vp); + } + SET(bp->b_flags, B_DONE); +} + +void +buf_markclean(buf_t bp) { + + if (ISSET(bp->b_flags, B_DELWRI)) { + CLR(bp->b_flags, B_DELWRI); + + OSAddAtomicLong(-1, &nbdwrite); + buf_reassign(bp, bp->b_vp); + } } void @@ -297,12 +361,50 @@ buf_markeintr(buf_t bp) { SET(bp->b_flags, B_EINTR); } + void buf_markaged(buf_t bp) { SET(bp->b_flags, B_AGE); } +int +buf_fua(buf_t bp) { + + if ((bp->b_flags & B_FUA) == B_FUA) + return 1; + return 0; +} + +void +buf_markfua(buf_t bp) { + + SET(bp->b_flags, B_FUA); +} + +#ifdef CONFIG_PROTECT +void * +buf_getcpaddr(buf_t bp) { + return bp->b_cpentry; +} + +void +buf_setcpaddr(buf_t bp, void *cp_entry_addr) { + bp->b_cpentry = (struct cprotect *) cp_entry_addr; +} + +#else +void * +buf_getcpaddr(buf_t bp __unused) { + return NULL; +} + +void +buf_setcpaddr(buf_t bp __unused, void *cp_entry_addr __unused) { + return; +} +#endif /* CONFIG_PROTECT */ + errno_t buf_error(buf_t bp) { @@ -339,7 +441,7 @@ buf_flags(buf_t bp) { void buf_reset(buf_t bp, int32_t io_flags) { - CLR(bp->b_flags, (B_READ | B_WRITE | B_ERROR | B_DONE | B_INVAL | B_ASYNC | B_NOCACHE)); + CLR(bp->b_flags, (B_READ | B_WRITE | B_ERROR | B_DONE | B_INVAL | B_ASYNC | B_NOCACHE | B_FUA)); SET(bp->b_flags, (io_flags & (B_ASYNC | B_READ | B_WRITE | B_NOCACHE))); bp->b_error = 0; @@ -433,8 +535,6 @@ buf_setvnode(buf_t bp, vnode_t vp) { void * buf_callback(buf_t bp) { - if ( !(bp->b_lflags & BL_IOBUF) ) - return ((void *) NULL); if ( !(bp->b_flags & B_CALL) ) return ((void *) NULL); @@ -445,10 +545,6 @@ buf_callback(buf_t bp) errno_t buf_setcallback(buf_t bp, void (*callback)(buf_t, void *), void *transaction) { - - if ( !(bp->b_lflags & BL_IOBUF) ) - return (EINVAL); - if (callback) bp->b_flags |= (B_CALL | B_ASYNC); else @@ -496,7 +592,7 @@ buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), v } io_bp = alloc_io_buf(bp->b_vp, 0); - io_bp->b_flags = bp->b_flags & (B_COMMIT_UPL | B_META | B_PAGEIO | B_CLUSTER | B_PHYS | B_ASYNC | B_READ); + io_bp->b_flags = bp->b_flags & (B_COMMIT_UPL | B_META | B_PAGEIO | B_CLUSTER | B_PHYS | B_RAW | B_ASYNC | B_READ | B_FUA); if (iodone) { io_bp->b_transaction = arg; @@ -515,19 +611,186 @@ buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), v } +int +buf_shadow(buf_t bp) +{ + if (bp->b_lflags & BL_SHADOW) + return 1; + return 0; +} + + +buf_t +buf_create_shadow_priv(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg) +{ + return (buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 1)); +} + +buf_t +buf_create_shadow(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg) +{ + return (buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 0)); +} + + +static buf_t +buf_create_shadow_internal(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv) +{ + buf_t io_bp; + + KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_START, bp, 0, 0, 0, 0); + + if ( !(bp->b_flags & B_META) || (bp->b_lflags & BL_IOBUF)) { + + KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, 0, 0, 0, 0); + return (NULL); + } +#ifdef BUF_MAKE_PRIVATE + if (bp->b_shadow_ref && bp->b_data_ref == 0 && external_storage == 0) + panic("buf_create_shadow: %p is in the private state (%d, %d)", bp, bp->b_shadow_ref, bp->b_data_ref); +#endif + io_bp = alloc_io_buf(bp->b_vp, priv); + + io_bp->b_flags = bp->b_flags & (B_META | B_ZALLOC | B_ASYNC | B_READ | B_FUA); + io_bp->b_blkno = bp->b_blkno; + io_bp->b_lblkno = bp->b_lblkno; + + if (iodone) { + io_bp->b_transaction = arg; + io_bp->b_iodone = iodone; + io_bp->b_flags |= B_CALL; + } + if (force_copy == FALSE) { + io_bp->b_bcount = bp->b_bcount; + io_bp->b_bufsize = bp->b_bufsize; + + if (external_storage) { + io_bp->b_datap = external_storage; +#ifdef BUF_MAKE_PRIVATE + io_bp->b_data_store = NULL; +#endif + } else { + io_bp->b_datap = bp->b_datap; +#ifdef BUF_MAKE_PRIVATE + io_bp->b_data_store = bp; +#endif + } + *(buf_t *)(&io_bp->b_orig) = bp; + + lck_mtx_lock_spin(buf_mtxp); + + io_bp->b_lflags |= BL_SHADOW; + io_bp->b_shadow = bp->b_shadow; + bp->b_shadow = io_bp; + bp->b_shadow_ref++; + +#ifdef BUF_MAKE_PRIVATE + if (external_storage) + io_bp->b_lflags |= BL_EXTERNAL; + else + bp->b_data_ref++; +#endif + lck_mtx_unlock(buf_mtxp); + } else { + if (external_storage) { +#ifdef BUF_MAKE_PRIVATE + io_bp->b_lflags |= BL_EXTERNAL; +#endif + io_bp->b_bcount = bp->b_bcount; + io_bp->b_bufsize = bp->b_bufsize; + io_bp->b_datap = external_storage; + } else { + allocbuf(io_bp, bp->b_bcount); + + io_bp->b_lflags |= BL_IOBUF_ALLOC; + } + bcopy((caddr_t)bp->b_datap, (caddr_t)io_bp->b_datap, bp->b_bcount); + +#ifdef BUF_MAKE_PRIVATE + io_bp->b_data_store = NULL; +#endif + } + KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, io_bp, 0); + + return (io_bp); +} + + +#ifdef BUF_MAKE_PRIVATE +errno_t +buf_make_private(buf_t bp) +{ + buf_t ds_bp; + buf_t t_bp; + struct buf my_buf; + + KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_START, bp, bp->b_shadow_ref, 0, 0, 0); + + if (bp->b_shadow_ref == 0 || bp->b_data_ref == 0 || ISSET(bp->b_lflags, BL_SHADOW)) { + + KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0); + return (EINVAL); + } + my_buf.b_flags = B_META; + my_buf.b_datap = (uintptr_t)NULL; + allocbuf(&my_buf, bp->b_bcount); + + bcopy((caddr_t)bp->b_datap, (caddr_t)my_buf.b_datap, bp->b_bcount); + + lck_mtx_lock_spin(buf_mtxp); + + for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) { + if ( !ISSET(bp->b_lflags, BL_EXTERNAL)) + break; + } + ds_bp = t_bp; + + if (ds_bp == NULL && bp->b_data_ref) + panic("buf_make_private: b_data_ref != 0 && ds_bp == NULL"); + + if (ds_bp && (bp->b_data_ref == 0 || bp->b_shadow_ref == 0)) + panic("buf_make_private: ref_count == 0 && ds_bp != NULL"); + + if (ds_bp == NULL) { + lck_mtx_unlock(buf_mtxp); + + buf_free_meta_store(&my_buf); + + KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0); + return (EINVAL); + } + for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) { + if ( !ISSET(t_bp->b_lflags, BL_EXTERNAL)) + t_bp->b_data_store = ds_bp; + } + ds_bp->b_data_ref = bp->b_data_ref; + + bp->b_data_ref = 0; + bp->b_datap = my_buf.b_datap; + + lck_mtx_unlock(buf_mtxp); + + KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, 0, 0); + return (0); +} +#endif + void buf_setfilter(buf_t bp, void (*filter)(buf_t, void *), void *transaction, - void **old_iodone, void **old_transaction) + void (**old_iodone)(buf_t, void *), void **old_transaction) { - if (old_iodone) - *old_iodone = (void *)(bp->b_iodone); + if (old_iodone) + *old_iodone = bp->b_iodone; if (old_transaction) - *old_transaction = (void *)(bp->b_transaction); + *old_transaction = bp->b_transaction; bp->b_transaction = transaction; bp->b_iodone = filter; - bp->b_flags |= B_FILTER; + if (filter) + bp->b_flags |= B_FILTER; + else + bp->b_flags &= ~B_FILTER; } @@ -596,13 +859,13 @@ buf_setfsprivate(buf_t bp, void *fsprivate) { bp->b_fsprivate = fsprivate; } -ucred_t +kauth_cred_t buf_rcred(buf_t bp) { return (bp->b_rcred); } -ucred_t +kauth_cred_t buf_wcred(buf_t bp) { return (bp->b_wcred); @@ -631,7 +894,7 @@ errno_t buf_map(buf_t bp, caddr_t *io_addr) { buf_t real_bp; - vm_offset_t vaddr; + vm_offset_t vaddr; kern_return_t kret; if ( !(bp->b_flags & B_CLUSTER)) { @@ -654,7 +917,7 @@ buf_map(buf_t bp, caddr_t *io_addr) kret = ubc_upl_map(bp->b_upl, &vaddr); /* Map it in */ if (kret != KERN_SUCCESS) { - *io_addr = 0; + *io_addr = NULL; return(ENOMEM); } @@ -681,8 +944,13 @@ buf_unmap(buf_t bp) if (real_bp && real_bp->b_datap) return (0); - if (bp->b_lflags & BL_IOBUF) { + if ((bp->b_lflags & BL_IOBUF) && + ((bp->b_flags & (B_PAGEIO | B_READ)) != (B_PAGEIO | B_READ))) { /* + * ignore pageins... the 'right' thing will + * happen due to the way we handle speculative + * clusters... + * * when we commit these pages, we'll hit * it with UPL_COMMIT_INACTIVE which * will clear the reference bit that got @@ -709,8 +977,6 @@ buf_clear(buf_t bp) { bp->b_resid = 0; } - - /* * Read or write a buffer that is not contiguous on disk. * buffer is marked done/error at the conclusion @@ -768,9 +1034,14 @@ buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_b io_bp->b_blkno = io_blkno; buf_reset(io_bp, io_direction); + /* - * Call the device to do the I/O and wait for it + * Call the device to do the I/O and wait for it. Make sure the appropriate party is charged for write */ + + if (!ISSET(bp->b_flags, B_READ)) + OSAddAtomic(1, &devvp->v_numoutput); + if ((error = VNOP_STRATEGY(io_bp))) break; if ((error = (int)buf_biowait(io_bp))) @@ -817,6 +1088,13 @@ buf_strategy(vnode_t devvp, void *ap) vnode_t vp = bp->b_vp; int bmap_flags; errno_t error; +#if CONFIG_DTRACE + int dtrace_io_start_flag = 0; /* We only want to trip the io:::start + * probe once, with the true phisical + * block in place (b_blkno) + */ + +#endif if (vp == NULL || vp->v_type == VCHR || vp->v_type == VBLK) panic("buf_strategy: b_vp == NULL || vtype == VCHR | VBLK\n"); @@ -841,6 +1119,7 @@ buf_strategy(vnode_t devvp, void *ap) * to deal with filesystem block sizes * that aren't equal to the page size */ + DTRACE_IO1(start, buf_t, bp); return (cluster_bp(bp)); } if (bp->b_blkno == bp->b_lblkno) { @@ -848,27 +1127,53 @@ buf_strategy(vnode_t devvp, void *ap) size_t contig_bytes; if ((error = VNOP_BLKTOOFF(vp, bp->b_lblkno, &f_offset))) { + DTRACE_IO1(start, buf_t, bp); buf_seterror(bp, error); buf_biodone(bp); return (error); } if ((error = VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))) { + DTRACE_IO1(start, buf_t, bp); buf_seterror(bp, error); buf_biodone(bp); return (error); } - if (bp->b_blkno == -1) + + DTRACE_IO1(start, buf_t, bp); +#if CONFIG_DTRACE + dtrace_io_start_flag = 1; +#endif /* CONFIG_DTRACE */ + + if ((bp->b_blkno == -1) || (contig_bytes == 0)) { + /* Set block number to force biodone later */ + bp->b_blkno = -1; buf_clear(bp); - else if ((long)contig_bytes < bp->b_bcount) + } + else if ((long)contig_bytes < bp->b_bcount) { return (buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes)); + } } + +#if CONFIG_DTRACE + if (dtrace_io_start_flag == 0) { + DTRACE_IO1(start, buf_t, bp); + dtrace_io_start_flag = 1; + } +#endif /* CONFIG_DTRACE */ + if (bp->b_blkno == -1) { buf_biodone(bp); return (0); } } + +#if CONFIG_DTRACE + if (dtrace_io_start_flag == 0) + DTRACE_IO1(start, buf_t, bp); +#endif /* CONFIG_DTRACE */ + /* * we can issue the I/O because... * either B_CLUSTER is set which @@ -895,58 +1200,98 @@ buf_free(buf_t bp) { } +/* + * iterate buffers for the specified vp. + * if BUF_SCAN_DIRTY is set, do the dirty list + * if BUF_SCAN_CLEAN is set, do the clean list + * if neither flag is set, default to BUF_SCAN_DIRTY + * if BUF_NOTIFY_BUSY is set, call the callout function using a NULL bp for busy pages + */ + +struct buf_iterate_info_t { + int flag; + struct buflists *listhead; +}; void -buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg) { +buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg) +{ buf_t bp; int retval; struct buflists local_iterblkhd; int lock_flags = BAC_NOWAIT | BAC_REMOVE; + int notify_busy = flags & BUF_NOTIFY_BUSY; + struct buf_iterate_info_t list[2]; + int num_lists, i; if (flags & BUF_SKIP_LOCKED) lock_flags |= BAC_SKIP_LOCKED; if (flags & BUF_SKIP_NONLOCKED) lock_flags |= BAC_SKIP_NONLOCKED; - lck_mtx_lock(buf_mtxp); - - if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY)) { - lck_mtx_unlock(buf_mtxp); - return; + if ( !(flags & (BUF_SCAN_DIRTY | BUF_SCAN_CLEAN))) + flags |= BUF_SCAN_DIRTY; + + num_lists = 0; + + if (flags & BUF_SCAN_DIRTY) { + list[num_lists].flag = VBI_DIRTY; + list[num_lists].listhead = &vp->v_dirtyblkhd; + num_lists++; + } + if (flags & BUF_SCAN_CLEAN) { + list[num_lists].flag = VBI_CLEAN; + list[num_lists].listhead = &vp->v_cleanblkhd; + num_lists++; } - while (!LIST_EMPTY(&local_iterblkhd)) { - bp = LIST_FIRST(&local_iterblkhd); - LIST_REMOVE(bp, b_vnbufs); - LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs); - if (buf_acquire_locked(bp, lock_flags, 0, 0)) - continue; + for (i = 0; i < num_lists; i++) { + lck_mtx_lock(buf_mtxp); + + if (buf_iterprepare(vp, &local_iterblkhd, list[i].flag)) { + lck_mtx_unlock(buf_mtxp); + continue; + } + while (!LIST_EMPTY(&local_iterblkhd)) { + bp = LIST_FIRST(&local_iterblkhd); + LIST_REMOVE(bp, b_vnbufs); + LIST_INSERT_HEAD(list[i].listhead, bp, b_vnbufs); - lck_mtx_unlock(buf_mtxp); + if (buf_acquire_locked(bp, lock_flags, 0, 0)) { + if (notify_busy) { + bp = NULL; + } else { + continue; + } + } - retval = callout(bp, arg); + lck_mtx_unlock(buf_mtxp); - switch (retval) { - case BUF_RETURNED: - buf_brelse(bp); - break; - case BUF_CLAIMED: - break; - case BUF_RETURNED_DONE: - buf_brelse(bp); - lck_mtx_lock(buf_mtxp); - goto out; - case BUF_CLAIMED_DONE: - lck_mtx_lock(buf_mtxp); - goto out; - } - lck_mtx_lock(buf_mtxp); - } -out: - buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY); + retval = callout(bp, arg); - lck_mtx_unlock(buf_mtxp); -} + switch (retval) { + case BUF_RETURNED: + if (bp) + buf_brelse(bp); + break; + case BUF_CLAIMED: + break; + case BUF_RETURNED_DONE: + if (bp) + buf_brelse(bp); + lck_mtx_lock(buf_mtxp); + goto out; + case BUF_CLAIMED_DONE: + lck_mtx_lock(buf_mtxp); + goto out; + } + lck_mtx_lock(buf_mtxp); + } /* while list has more nodes */ + out: + buf_itercomplete(vp, &local_iterblkhd, list[i].flag); + lck_mtx_unlock(buf_mtxp); + } /* for each list */ +} /* buf_iterate */ /* @@ -956,10 +1301,15 @@ int buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo) { buf_t bp; + int aflags; int error = 0; int must_rescan = 1; struct buflists local_iterblkhd; + + if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) + return (0); + lck_mtx_lock(buf_mtxp); for (;;) { @@ -982,6 +1332,7 @@ buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo) goto try_dirty_list; } while (!LIST_EMPTY(&local_iterblkhd)) { + bp = LIST_FIRST(&local_iterblkhd); LIST_REMOVE(bp, b_vnbufs); @@ -993,7 +1344,12 @@ buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo) if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) continue; - if ( (error = (int)buf_acquire_locked(bp, BAC_REMOVE | BAC_SKIP_LOCKED, slpflag, slptimeo)) ) { + aflags = BAC_REMOVE; + + if ( !(flags & BUF_INVALIDATE_LOCKED) ) + aflags |= BAC_SKIP_LOCKED; + + if ( (error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo)) ) { if (error == EDEADLK) /* * this buffer was marked B_LOCKED... @@ -1021,6 +1377,10 @@ buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo) } lck_mtx_unlock(buf_mtxp); + if (bp->b_flags & B_LOCKED) + KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 0, 0); + + CLR(bp->b_flags, B_LOCKED); SET(bp->b_flags, B_INVAL); buf_brelse(bp); @@ -1055,7 +1415,12 @@ try_dirty_list: if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) continue; - if ( (error = (int)buf_acquire_locked(bp, BAC_REMOVE | BAC_SKIP_LOCKED, slpflag, slptimeo)) ) { + aflags = BAC_REMOVE; + + if ( !(flags & BUF_INVALIDATE_LOCKED) ) + aflags |= BAC_SKIP_LOCKED; + + if ( (error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo)) ) { if (error == EDEADLK) /* * this buffer was marked B_LOCKED... @@ -1083,6 +1448,10 @@ try_dirty_list: } lck_mtx_unlock(buf_mtxp); + if (bp->b_flags & B_LOCKED) + KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 1, 0); + + CLR(bp->b_flags, B_LOCKED); SET(bp->b_flags, B_INVAL); if (ISSET(bp->b_flags, B_DELWRI) && (flags & BUF_WRITE_DATA)) @@ -1107,7 +1476,7 @@ try_dirty_list: } void -buf_flushdirtyblks(vnode_t vp, int wait, int flags, char *msg) { +buf_flushdirtyblks(vnode_t vp, int wait, int flags, const char *msg) { buf_t bp; int writes_issued = 0; errno_t error; @@ -1196,7 +1565,7 @@ buf_iterprepare(vnode_t vp, struct buflists *iterheadp, int flags) while (vp->v_iterblkflags & VBI_ITER) { vp->v_iterblkflags |= VBI_ITERWANT; - msleep(&vp->v_iterblkflags, buf_mtxp, 0, "buf_iterprepare", 0); + msleep(&vp->v_iterblkflags, buf_mtxp, 0, "buf_iterprepare", NULL); } if (LIST_EMPTY(listheadp)) { LIST_INIT(iterheadp); @@ -1244,8 +1613,20 @@ static void bremfree_locked(buf_t bp) { struct bqueues *dp = NULL; - int whichq = -1; + int whichq; + whichq = bp->b_whichq; + + if (whichq == -1) { + if (bp->b_shadow_ref == 0) + panic("bremfree_locked: %p not on freelist", bp); + /* + * there are clones pointing to 'bp'... + * therefore, it was not put on a freelist + * when buf_brelse was last called on 'bp' + */ + return; + } /* * We only calculate the head of the freelist when removing * the last element of the list as that is the only time that @@ -1254,30 +1635,34 @@ bremfree_locked(buf_t bp) * NB: This makes an assumption about how tailq's are implemented. */ if (bp->b_freelist.tqe_next == NULL) { - for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) - if (dp->tqh_last == &bp->b_freelist.tqe_next) - break; - if (dp == &bufqueues[BQUEUES]) + dp = &bufqueues[whichq]; + + if (dp->tqh_last != &bp->b_freelist.tqe_next) panic("bremfree: lost tail"); } TAILQ_REMOVE(dp, bp, b_freelist); - whichq = bp->b_whichq; + #if BALANCE_QUEUES bufqdec(whichq); #endif + if (whichq == BQ_LAUNDRY) + blaundrycnt--; + bp->b_whichq = -1; bp->b_timestamp = 0; + bp->b_shadow = 0; } /* * Associate a buffer with a vnode. + * buf_mtxp must be locked on entry */ static void -bgetvp(vnode_t vp, buf_t bp) +bgetvp_locked(vnode_t vp, buf_t bp) { if (bp->b_vp != vp) - panic("bgetvp: not free"); + panic("bgetvp_locked: not free"); if (vp->v_type == VBLK || vp->v_type == VCHR) bp->b_dev = vp->v_rdev; @@ -1286,28 +1671,21 @@ bgetvp(vnode_t vp, buf_t bp) /* * Insert onto list for new vnode. */ - lck_mtx_lock(buf_mtxp); bufinsvn(bp, &vp->v_cleanblkhd); - lck_mtx_unlock(buf_mtxp); } /* * Disassociate a buffer from a vnode. + * buf_mtxp must be locked on entry */ static void -brelvp(buf_t bp) +brelvp_locked(buf_t bp) { - vnode_t vp; - - if ((vp = bp->b_vp) == (vnode_t)NULL) - panic("brelvp: NULL vp"); /* * Delete from old vnode list, if on one. */ - lck_mtx_lock(buf_mtxp); if (bp->b_vnbufs.le_next != NOLIST) bufremvn(bp); - lck_mtx_unlock(buf_mtxp); bp->b_vp = (vnode_t)NULL; } @@ -1320,13 +1698,13 @@ brelvp(buf_t bp) static void buf_reassign(buf_t bp, vnode_t newvp) { - register struct buflists *listheadp; + struct buflists *listheadp; if (newvp == NULL) { printf("buf_reassign: NULL"); return; } - lck_mtx_lock(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); /* * Delete from old vnode list, if on one. @@ -1363,59 +1741,55 @@ bufhdrinit(buf_t bp) * Initialize buffers and hash links for buffers. */ __private_extern__ void -bufinit() +bufinit(void) { buf_t bp; struct bqueues *dp; int i; - int metabuf; - long whichq; + nbuf_headers = 0; /* Initialize the buffer queues ('freelists') and the hash table */ for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) TAILQ_INIT(dp); - bufhashtbl = hashinit(nbuf, M_CACHE, &bufhash); + bufhashtbl = hashinit(nbuf_hashelements, M_CACHE, &bufhash); - metabuf = nbuf/8; /* reserved for meta buf */ + buf_busycount = 0; /* Initialize the buffer headers */ - for (i = 0; i < nbuf; i++) { - bp = &buf[i]; + for (i = 0; i < max_nbuf_headers; i++) { + nbuf_headers++; + bp = &buf_headers[i]; bufhdrinit(bp); - /* - * metabuf buffer headers on the meta-data list and - * rest of the buffer headers on the empty list - */ - if (--metabuf) - whichq = BQ_META; - else - whichq = BQ_EMPTY; - BLISTNONE(bp); - dp = &bufqueues[whichq]; - binsheadfree(bp, dp, whichq); + dp = &bufqueues[BQ_EMPTY]; + bp->b_whichq = BQ_EMPTY; + bp->b_timestamp = buf_timestamp(); + binsheadfree(bp, dp, BQ_EMPTY); binshash(bp, &invalhash); } + boot_nbuf_headers = nbuf_headers; + + TAILQ_INIT(&iobufqueue); + TAILQ_INIT(&delaybufqueue); - for (; i < nbuf + niobuf; i++) { - bp = &buf[i]; + for (; i < nbuf_headers + niobuf_headers; i++) { + bp = &buf_headers[i]; bufhdrinit(bp); + bp->b_whichq = -1; binsheadfree(bp, &iobufqueue, -1); } - /* + /* * allocate lock group attribute and group */ - buf_mtx_grp_attr = lck_grp_attr_alloc_init(); - //lck_grp_attr_setstat(buf_mtx_grp_attr); + buf_mtx_grp_attr = lck_grp_attr_alloc_init(); buf_mtx_grp = lck_grp_alloc_init("buffer cache", buf_mtx_grp_attr); /* * allocate the lock attribute */ buf_mtx_attr = lck_attr_alloc_init(); - //lck_attr_setdebug(buf_mtx_attr); /* * allocate and initialize mutex's for the buffer and iobuffer pools @@ -1435,7 +1809,7 @@ bufinit() cluster_init(); printf("using %d buffer headers and %d cluster IO buffer headers\n", - nbuf, niobuf); + nbuf_headers, niobuf_headers); /* Set up zones used by the buffer cache */ bufzoneinit(); @@ -1443,35 +1817,103 @@ bufinit() /* start the bcleanbuf() thread */ bcleanbuf_thread_init(); + /* Register a callout for relieving vm pressure */ + if (vm_set_buffer_cleanup_callout(buffer_cache_gc) != KERN_SUCCESS) { + panic("Couldn't register buffer cache callout for vm pressure!\n"); + } + #if BALANCE_QUEUES { - static void bufq_balance_thread_init(); + static void bufq_balance_thread_init(void) __attribute__((section("__TEXT, initcode"))); /* create a thread to do dynamic buffer queue balancing */ bufq_balance_thread_init(); } #endif /* notyet */ } -static struct buf * -bio_doread(vnode_t vp, daddr64_t blkno, int size, ucred_t cred, int async, int queuetype) -{ - buf_t bp; - bp = buf_getblk(vp, blkno, size, 0, 0, queuetype); - /* - * If buffer does not have data valid, start a read. - * Note that if buffer is B_INVAL, buf_getblk() won't return it. - * Therefore, it's valid if it's I/O has completed or been delayed. - */ - if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) { - struct proc *p; +/* + * Zones for the meta data buffers + */ - p = current_proc(); +#define MINMETA 512 +#define MAXMETA 8192 + +struct meta_zone_entry { + zone_t mz_zone; + vm_size_t mz_size; + vm_size_t mz_max; + const char *mz_name; +}; + +struct meta_zone_entry meta_zones[] = { + {NULL, (MINMETA * 1), 128 * (MINMETA * 1), "buf.512" }, + {NULL, (MINMETA * 2), 64 * (MINMETA * 2), "buf.1024" }, + {NULL, (MINMETA * 4), 16 * (MINMETA * 4), "buf.2048" }, + {NULL, (MINMETA * 8), 512 * (MINMETA * 8), "buf.4096" }, + {NULL, (MINMETA * 16), 512 * (MINMETA * 16), "buf.8192" }, + {NULL, 0, 0, "" } /* End */ +}; + +/* + * Initialize the meta data zones + */ +static void +bufzoneinit(void) +{ + int i; + + for (i = 0; meta_zones[i].mz_size != 0; i++) { + meta_zones[i].mz_zone = + zinit(meta_zones[i].mz_size, + meta_zones[i].mz_max, + PAGE_SIZE, + meta_zones[i].mz_name); + zone_change(meta_zones[i].mz_zone, Z_CALLERACCT, FALSE); + } + buf_hdr_zone = zinit(sizeof(struct buf), 32, PAGE_SIZE, "buf headers"); + zone_change(buf_hdr_zone, Z_CALLERACCT, FALSE); +} + +static __inline__ zone_t +getbufzone(size_t size) +{ + int i; + + if ((size % 512) || (size < MINMETA) || (size > MAXMETA)) + panic("getbufzone: incorect size = %lu", size); + + for (i = 0; meta_zones[i].mz_size != 0; i++) { + if (meta_zones[i].mz_size >= size) + break; + } + + return (meta_zones[i].mz_zone); +} + + + +static struct buf * +bio_doread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, int async, int queuetype) +{ + buf_t bp; + + bp = buf_getblk(vp, blkno, size, 0, 0, queuetype); + + /* + * If buffer does not have data valid, start a read. + * Note that if buffer is B_INVAL, buf_getblk() won't return it. + * Therefore, it's valid if it's I/O has completed or been delayed. + */ + if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) { + struct proc *p; + + p = current_proc(); /* Start I/O for the buffer (keeping credentials). */ SET(bp->b_flags, B_READ | async); - if (cred != NOCRED && bp->b_rcred == NOCRED) { + if (IS_VALID_CRED(cred) && !IS_VALID_CRED(bp->b_rcred)) { kauth_cred_ref(cred); bp->b_rcred = cred; } @@ -1482,7 +1924,7 @@ bio_doread(vnode_t vp, daddr64_t blkno, int size, ucred_t cred, int async, int q /* Pay for the read. */ if (p && p->p_stats) - p->p_stats->p_ru.ru_inblock++; /* XXX */ + OSIncrementAtomicLong(&p->p_stats->p_ru.ru_inblock); /* XXX */ if (async) { /* @@ -1509,7 +1951,7 @@ bio_doread(vnode_t vp, daddr64_t blkno, int size, ucred_t cred, int async, int q */ static errno_t do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, - int nrablks, ucred_t cred, buf_t *bpp, int queuetype) + int nrablks, kauth_cred_t cred, buf_t *bpp, int queuetype) { buf_t bp; int i; @@ -1538,7 +1980,7 @@ do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int * This algorithm described in Bach (p.54). */ errno_t -buf_bread(vnode_t vp, daddr64_t blkno, int size, ucred_t cred, buf_t *bpp) +buf_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp) { buf_t bp; @@ -1554,7 +1996,7 @@ buf_bread(vnode_t vp, daddr64_t blkno, int size, ucred_t cred, buf_t *bpp) * This algorithm described in Bach (p.54). */ errno_t -buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, ucred_t cred, buf_t *bpp) +buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp) { buf_t bp; @@ -1569,7 +2011,7 @@ buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, ucred_t cred, buf_t *bpp) * Read-ahead multiple disk blocks. The first is sync, the rest async. */ errno_t -buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, ucred_t cred, buf_t *bpp) +buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp) { return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_READ)); } @@ -1579,7 +2021,7 @@ buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasize * [buf_breadn() for meta-data] */ errno_t -buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, ucred_t cred, buf_t *bpp) +buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp) { return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_META)); } @@ -1605,7 +2047,7 @@ buf_bwrite(buf_t bp) CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI)); if (wasdelayed) - OSAddAtomic(-1, &nbdwrite); + OSAddAtomicLong(-1, &nbdwrite); if (!sync) { /* @@ -1618,7 +2060,7 @@ buf_bwrite(buf_t bp) buf_reassign(bp, vp); else if (p && p->p_stats) - p->p_stats->p_ru.ru_oublock++; /* XXX */ + OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ } trace(TR_BUFWRITE, pack(vp, bp->b_bcount), bp->b_lblkno); @@ -1643,7 +2085,7 @@ buf_bwrite(buf_t bp) buf_reassign(bp, vp); else if (p && p->p_stats) - p->p_stats->p_ru.ru_oublock++; /* XXX */ + OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ /* Release the buffer. */ // XXXdbg - only if the unused bit is set @@ -1660,8 +2102,7 @@ buf_bwrite(buf_t bp) } int -vn_bwrite(ap) - struct vnop_bwrite_args *ap; +vn_bwrite(struct vnop_bwrite_args *ap) { return (buf_bwrite(ap->a_bp)); } @@ -1679,11 +2120,11 @@ vn_bwrite(ap) * * Described in Leffler, et al. (pp. 208-213). * - * Note: With the abilitty to allocate additional buffer + * Note: With the ability to allocate additional buffer * headers, we can get in to the situation where "too" many * buf_bdwrite()s can create situation where the kernel can create * buffers faster than the disks can service. Doing a buf_bawrite() in - * cases were we have "too many" outstanding buf_bdwrite()s avoids that. + * cases where we have "too many" outstanding buf_bdwrite()s avoids that. */ __private_extern__ int bdwrite_internal(buf_t bp, int return_error) @@ -1700,17 +2141,11 @@ bdwrite_internal(buf_t bp, int return_error) if (!ISSET(bp->b_flags, B_DELWRI)) { SET(bp->b_flags, B_DELWRI); if (p && p->p_stats) - p->p_stats->p_ru.ru_oublock++; /* XXX */ - OSAddAtomic(1, &nbdwrite); + OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ + OSAddAtomicLong(1, &nbdwrite); buf_reassign(bp, vp); } - /* If this is a tape block, write it the block now. */ - if (ISSET(bp->b_flags, B_TAPE)) { - VNOP_BWRITE(bp); - return (0); - } - /* * if we're not LOCKED, but the total number of delayed writes * has climbed above 75% of the total buffers in the system @@ -1725,14 +2160,14 @@ bdwrite_internal(buf_t bp, int return_error) * buffer is part of a transaction and can't go to disk until * the LOCKED bit is cleared. */ - if (!ISSET(bp->b_flags, B_LOCKED) && nbdwrite > ((nbuf/4)*3)) { + if (!ISSET(bp->b_flags, B_LOCKED) && nbdwrite > ((nbuf_headers/4)*3)) { if (return_error) return (EAGAIN); /* * If the vnode has "too many" write operations in progress * wait for them to finish the IO */ - (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, (char *)"buf_bdwrite"); + (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, "buf_bdwrite"); return (buf_bawrite(bp)); } @@ -1791,6 +2226,116 @@ buf_bawrite(buf_t bp) } + +static void +buf_free_meta_store(buf_t bp) +{ + if (bp->b_bufsize) { + if (ISSET(bp->b_flags, B_ZALLOC)) { + zone_t z; + + z = getbufzone(bp->b_bufsize); + zfree(z, (void *)bp->b_datap); + } else + kmem_free(kernel_map, bp->b_datap, bp->b_bufsize); + + bp->b_datap = (uintptr_t)NULL; + bp->b_bufsize = 0; + } +} + + +static buf_t +buf_brelse_shadow(buf_t bp) +{ + buf_t bp_head; + buf_t bp_temp; + buf_t bp_return = NULL; +#ifdef BUF_MAKE_PRIVATE + buf_t bp_data; + int data_ref = 0; +#endif + lck_mtx_lock_spin(buf_mtxp); + + bp_head = (buf_t)bp->b_orig; + + if (bp_head->b_whichq != -1) + panic("buf_brelse_shadow: bp_head on freelist %d\n", bp_head->b_whichq); + +#ifdef BUF_MAKE_PRIVATE + if (bp_data = bp->b_data_store) { + bp_data->b_data_ref--; + /* + * snapshot the ref count so that we can check it + * outside of the lock... we only want the guy going + * from 1 -> 0 to try and release the storage + */ + data_ref = bp_data->b_data_ref; + } +#endif + KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_START, bp, bp_head, bp_head->b_shadow_ref, 0, 0); + + bp_head->b_shadow_ref--; + + for (bp_temp = bp_head; bp_temp && bp != bp_temp->b_shadow; bp_temp = bp_temp->b_shadow); + + if (bp_temp == NULL) + panic("buf_brelse_shadow: bp not on list %p", bp_head); + + bp_temp->b_shadow = bp_temp->b_shadow->b_shadow; + +#ifdef BUF_MAKE_PRIVATE + /* + * we're about to free the current 'owner' of the data buffer and + * there is at least one other shadow buf_t still pointing at it + * so transfer it to the first shadow buf left in the chain + */ + if (bp == bp_data && data_ref) { + if ((bp_data = bp_head->b_shadow) == NULL) + panic("buf_brelse_shadow: data_ref mismatch bp(%p)", bp); + + for (bp_temp = bp_data; bp_temp; bp_temp = bp_temp->b_shadow) + bp_temp->b_data_store = bp_data; + bp_data->b_data_ref = data_ref; + } +#endif + if (bp_head->b_shadow_ref == 0 && bp_head->b_shadow) + panic("buf_relse_shadow: b_shadow != NULL && b_shadow_ref == 0 bp(%p)", bp); + if (bp_head->b_shadow_ref && bp_head->b_shadow == 0) + panic("buf_relse_shadow: b_shadow == NULL && b_shadow_ref != 0 bp(%p)", bp); + + if (bp_head->b_shadow_ref == 0) { + if (!ISSET(bp_head->b_lflags, BL_BUSY)) { + + CLR(bp_head->b_flags, B_AGE); + bp_head->b_timestamp = buf_timestamp(); + + if (ISSET(bp_head->b_flags, B_LOCKED)) { + bp_head->b_whichq = BQ_LOCKED; + binstailfree(bp_head, &bufqueues[BQ_LOCKED], BQ_LOCKED); + } else { + bp_head->b_whichq = BQ_META; + binstailfree(bp_head, &bufqueues[BQ_META], BQ_META); + } + } else if (ISSET(bp_head->b_lflags, BL_WAITSHADOW)) { + CLR(bp_head->b_lflags, BL_WAITSHADOW); + + bp_return = bp_head; + } + } + lck_mtx_unlock(buf_mtxp); +#ifdef BUF_MAKE_PRIVATE + if (bp == bp_data && data_ref == 0) + buf_free_meta_store(bp); + + bp->b_data_store = NULL; +#endif + KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_END, bp, 0, 0, 0, 0); + + return (bp_return); +} + + /* * Release a buffer on to the free lists. * Described in Bach (p. 46). @@ -1806,26 +2351,32 @@ buf_brelse(buf_t bp) if (bp->b_whichq != -1 || !(bp->b_lflags & BL_BUSY)) - panic("buf_brelse: bad buffer = %x\n", bp); + panic("buf_brelse: bad buffer = %p\n", bp); #ifdef JOE_DEBUG - bp->b_stackbrelse[0] = __builtin_return_address(0); - bp->b_stackbrelse[1] = __builtin_return_address(1); - bp->b_stackbrelse[2] = __builtin_return_address(2); - bp->b_stackbrelse[3] = __builtin_return_address(3); - bp->b_stackbrelse[4] = __builtin_return_address(4); - bp->b_stackbrelse[5] = __builtin_return_address(5); + (void) OSBacktrace(&bp->b_stackbrelse[0], 6); bp->b_lastbrelse = current_thread(); bp->b_tag = 0; #endif if (bp->b_lflags & BL_IOBUF) { + buf_t shadow_master_bp = NULL; + + if (ISSET(bp->b_lflags, BL_SHADOW)) + shadow_master_bp = buf_brelse_shadow(bp); + else if (ISSET(bp->b_lflags, BL_IOBUF_ALLOC)) + buf_free_meta_store(bp); free_io_buf(bp); + + if (shadow_master_bp) { + bp = shadow_master_bp; + goto finish_shadow_master; + } return; } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_START, - bp->b_lblkno * PAGE_SIZE, (int)bp, (int)bp->b_datap, + bp->b_lblkno * PAGE_SIZE, bp, bp->b_datap, bp->b_flags, 0); trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); @@ -1840,14 +2391,14 @@ buf_brelse(buf_t bp) if (ISSET(bp->b_flags, B_META) && ISSET(bp->b_flags, B_INVAL)) { if (ISSET(bp->b_flags, B_FILTER)) { /* if necessary, call out */ void (*iodone_func)(struct buf *, void *) = bp->b_iodone; - void *arg = (void *)bp->b_transaction; + void *arg = bp->b_transaction; CLR(bp->b_flags, B_FILTER); /* but note callout done */ bp->b_iodone = NULL; bp->b_transaction = NULL; if (iodone_func == NULL) { - panic("brelse: bp @ 0x%x has NULL b_iodone!\n", bp); + panic("brelse: bp @ %p has NULL b_iodone!\n", bp); } (*iodone_func)(bp, arg); } @@ -1861,7 +2412,7 @@ buf_brelse(buf_t bp) kern_return_t kret; int upl_flags; - if ( (upl == NULL) ) { + if (upl == NULL) { if ( !ISSET(bp->b_flags, B_INVAL)) { kret = ubc_create_upl(bp->b_vp, ubc_blktooff(bp->b_vp, bp->b_lblkno), @@ -1872,8 +2423,8 @@ buf_brelse(buf_t bp) if (kret != KERN_SUCCESS) panic("brelse: Failed to create UPL"); -#ifdef UPL_DEBUG - upl_ubc_alias_set(upl, bp, 5); +#if UPL_DEBUG + upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 5); #endif /* UPL_DEBUG */ } } else { @@ -1906,7 +2457,7 @@ buf_brelse(buf_t bp) } } else { if ( (upl) ) - panic("brelse: UPL set for non VREG; vp=%x", bp->b_vp); + panic("brelse: UPL set for non VREG; vp=%p", bp->b_vp); } /* @@ -1920,33 +2471,61 @@ buf_brelse(buf_t bp) if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR))) SET(bp->b_flags, B_INVAL); - if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) { + if ((bp->b_bufsize <= 0) || + ISSET(bp->b_flags, B_INVAL) || + (ISSET(bp->b_lflags, BL_WANTDEALLOC) && !ISSET(bp->b_flags, B_DELWRI))) { + + boolean_t delayed_buf_free_meta_store = FALSE; + /* - * If it's invalid or empty, dissociate it from its vnode - * and put on the head of the appropriate queue. + * If it's invalid or empty, dissociate it from its vnode, + * release its storage if B_META, and + * clean it up a bit and put it on the EMPTY queue */ - if (bp->b_vp) - brelvp(bp); - if (ISSET(bp->b_flags, B_DELWRI)) - OSAddAtomic(-1, &nbdwrite); + OSAddAtomicLong(-1, &nbdwrite); - CLR(bp->b_flags, (B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE)); + if (ISSET(bp->b_flags, B_META)) { + if (bp->b_shadow_ref) + delayed_buf_free_meta_store = TRUE; + else + buf_free_meta_store(bp); + } /* - * Determine which queue the buffer should be on, then put it there. + * nuke any credentials we were holding */ - if (bp->b_bufsize <= 0) - whichq = BQ_EMPTY; /* no data */ - else if (ISSET(bp->b_flags, B_META)) - whichq = BQ_META; /* meta-data */ - else - whichq = BQ_AGE; /* invalid data */ - bufq = &bufqueues[whichq]; + buf_release_credentials(bp); - lck_mtx_lock(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); + + if (bp->b_shadow_ref) { + SET(bp->b_lflags, BL_WAITSHADOW); + + lck_mtx_unlock(buf_mtxp); + + return; + } + if (delayed_buf_free_meta_store == TRUE) { + + lck_mtx_unlock(buf_mtxp); +finish_shadow_master: + buf_free_meta_store(bp); + + lck_mtx_lock_spin(buf_mtxp); + } + CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA)); + + if (bp->b_vp) + brelvp_locked(bp); - binsheadfree(bp, bufq, whichq); + bremhash(bp); + BLISTNONE(bp); + binshash(bp, &invalhash); + + bp->b_whichq = BQ_EMPTY; + binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY); } else { + /* * It has valid data. Put it on the end of the appropriate * queue, so that it'll stick around for as long as possible. @@ -1961,11 +2540,32 @@ buf_brelse(buf_t bp) whichq = BQ_LRU; /* valid data */ bufq = &bufqueues[whichq]; - CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE)); + bp->b_timestamp = buf_timestamp(); - lck_mtx_lock(buf_mtxp); - - binstailfree(bp, bufq, whichq); + lck_mtx_lock_spin(buf_mtxp); + + /* + * the buf_brelse_shadow routine doesn't take 'ownership' + * of the parent buf_t... it updates state that is protected by + * the buf_mtxp, and checks for BL_BUSY to determine whether to + * put the buf_t back on a free list. b_shadow_ref is protected + * by the lock, and since we have not yet cleared B_BUSY, we need + * to check it while holding the lock to insure that one of us + * puts this buf_t back on a free list when it is safe to do so + */ + if (bp->b_shadow_ref == 0) { + CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE)); + bp->b_whichq = whichq; + binstailfree(bp, bufq, whichq); + } else { + /* + * there are still cloned buf_t's pointing + * at this guy... need to keep it off the + * freelists until a buf_brelse is done on + * the last clone + */ + CLR(bp->b_flags, (B_ASYNC | B_NOCACHE)); + } } if (needbuffer) { /* @@ -1988,6 +2588,7 @@ buf_brelse(buf_t bp) * Unlock the buffer. */ CLR(bp->b_lflags, (BL_BUSY | BL_WANTED)); + buf_busycount--; lck_mtx_unlock(buf_mtxp); @@ -2004,7 +2605,7 @@ buf_brelse(buf_t bp) wakeup(bp); } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_END, - (int)bp, (int)bp->b_datap, bp->b_flags, 0, 0); + bp, bp->b_datap, bp->b_flags, 0, 0); } /* @@ -2018,10 +2619,13 @@ static boolean_t incore(vnode_t vp, daddr64_t blkno) { boolean_t retval; + struct bufhashhdr *dp; - lck_mtx_lock(buf_mtxp); + dp = BUFHASH(vp, blkno); + + lck_mtx_lock_spin(buf_mtxp); - if (incore_locked(vp, blkno)) + if (incore_locked(vp, blkno, dp)) retval = TRUE; else retval = FALSE; @@ -2032,20 +2636,18 @@ incore(vnode_t vp, daddr64_t blkno) static buf_t -incore_locked(vnode_t vp, daddr64_t blkno) +incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp) { struct buf *bp; - bp = BUFHASH(vp, blkno)->lh_first; - /* Search hash chain */ - for (; bp != NULL; bp = bp->b_hash.le_next) { + for (bp = dp->lh_first; bp != NULL; bp = bp->b_hash.le_next) { if (bp->b_lblkno == blkno && bp->b_vp == vp && !ISSET(bp->b_flags, B_INVAL)) { return (bp); } } - return (0); + return (NULL); } @@ -2069,16 +2671,18 @@ buf_getblk(vnode_t vp, daddr64_t blkno, int size, int slpflag, int slptimeo, int int ret_only_valid; struct timespec ts; int upl_flags; + struct bufhashhdr *dp; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_START, - (int)(blkno * PAGE_SIZE), size, operation, 0, 0); + (uintptr_t)(blkno * PAGE_SIZE), size, operation, 0, 0); ret_only_valid = operation & BLK_ONLYVALID; operation &= ~BLK_ONLYVALID; + dp = BUFHASH(vp, blkno); start: - lck_mtx_lock(buf_mtxp); -start_locked: - if ((bp = incore_locked(vp, blkno))) { + lck_mtx_lock_spin(buf_mtxp); + + if ((bp = incore_locked(vp, blkno, dp))) { /* * Found in the Buffer Cache */ @@ -2100,6 +2704,9 @@ start_locked: ts.tv_sec = (slptimeo/1000); ts.tv_nsec = (slptimeo % 1000) * 10 * NSEC_PER_USEC * 1000; + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 396)) | DBG_FUNC_NONE, + (uintptr_t)blkno, size, operation, 0, 0); + err = msleep(bp, buf_mtxp, slpflag | PDROP | (PRIBIO + 1), "buf_getblk", &ts); /* @@ -2124,20 +2731,22 @@ start_locked: /* * buffer in core and not busy */ - if ( (bp->b_upl) ) - panic("buffer has UPL, but not marked BUSY: %x", bp); SET(bp->b_lflags, BL_BUSY); SET(bp->b_flags, B_CACHE); -#ifdef JOE_DEBUG - bp->b_owner = current_thread(); - bp->b_tag = 1; -#endif + buf_busycount++; + bremfree_locked(bp); bufstats.bufs_incore++; lck_mtx_unlock(buf_mtxp); +#ifdef JOE_DEBUG + bp->b_owner = current_thread(); + bp->b_tag = 1; +#endif + if ( (bp->b_upl) ) + panic("buffer has UPL, but not marked BUSY: %p", bp); - if ( !ret_only_valid) + if ( !ret_only_valid && bp->b_bufsize != size) allocbuf(bp, size); upl_flags = 0; @@ -2171,7 +2780,7 @@ start_locked: } else CLR(bp->b_flags, (B_DONE | B_CACHE | B_WASDIRTY | B_DELWRI)); - kret = ubc_upl_map(upl, (vm_address_t *)&(bp->b_datap)); + kret = ubc_upl_map(upl, (vm_offset_t*)&(bp->b_datap)); if (kret != KERN_SUCCESS) panic("getblk: ubc_upl_map() failed with (%d)", kret); @@ -2198,12 +2807,11 @@ start_locked: lck_mtx_unlock(buf_mtxp); return (NULL); } - - if ((UBCINVALID(vp)) || !(UBCINFOEXISTS(vp))) + if ((vnode_isreg(vp) == 0) || (UBCINFOEXISTS(vp) == 0) /*|| (vnode_issystem(vp) == 1)*/) operation = BLK_META; if ((bp = getnewbuf(slpflag, slptimeo, &queue)) == NULL) - goto start_locked; + goto start; /* * getnewbuf may block for a number of different reasons... @@ -2212,7 +2820,7 @@ start_locked: * the hash... if we see it incore at this point we dump * the buffer we were working on and start over */ - if (incore_locked(vp, blkno)) { + if (incore_locked(vp, blkno, dp)) { SET(bp->b_flags, B_INVAL); binshash(bp, &invalhash); @@ -2241,9 +2849,9 @@ start_locked: */ binshash(bp, BUFHASH(vp, blkno)); - lck_mtx_unlock(buf_mtxp); + bgetvp_locked(vp, bp); - bgetvp(vp, bp); + lck_mtx_unlock(buf_mtxp); allocbuf(bp, size); @@ -2259,7 +2867,7 @@ start_locked: * in bufstats are protected with either * buf_mtxp or iobuffer_mtxp */ - OSAddAtomic(1, &bufstats.bufs_miss); + OSAddAtomicLong(1, &bufstats.bufs_miss); break; case BLK_WRITE: @@ -2275,7 +2883,7 @@ start_locked: int bmap_flags; if ( (bp->b_upl) ) - panic("bp already has UPL: %x",bp); + panic("bp already has UPL: %p",bp); f_offset = ubc_blktooff(vp, blkno); @@ -2289,8 +2897,8 @@ start_locked: if (kret != KERN_SUCCESS) panic("Failed to create UPL"); -#ifdef UPL_DEBUG - upl_ubc_alias_set(upl, bp, 4); +#if UPL_DEBUG + upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 4); #endif /* UPL_DEBUG */ bp->b_upl = upl; @@ -2303,7 +2911,7 @@ start_locked: SET(bp->b_flags, B_CACHE | B_DONE); - OSAddAtomic(1, &bufstats.bufs_vmhits); + OSAddAtomicLong(1, &bufstats.bufs_vmhits); bp->b_validoff = 0; bp->b_dirtyoff = 0; @@ -2334,9 +2942,9 @@ start_locked: if ((long)contig_bytes < bp->b_bcount) bp->b_blkno = bp->b_lblkno; } else { - OSAddAtomic(1, &bufstats.bufs_miss); + OSAddAtomicLong(1, &bufstats.bufs_miss); } - kret = ubc_upl_map(upl, (vm_address_t *)&(bp->b_datap)); + kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap)); if (kret != KERN_SUCCESS) panic("getblk: ubc_upl_map() failed with (%d)", kret); @@ -2349,15 +2957,10 @@ start_locked: } } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_END, - (int)bp, (int)bp->b_datap, bp->b_flags, 3, 0); + bp, bp->b_datap, bp->b_flags, 3, 0); #ifdef JOE_DEBUG - bp->b_stackgetblk[0] = __builtin_return_address(0); - bp->b_stackgetblk[1] = __builtin_return_address(1); - bp->b_stackgetblk[2] = __builtin_return_address(2); - bp->b_stackgetblk[3] = __builtin_return_address(3); - bp->b_stackgetblk[4] = __builtin_return_address(4); - bp->b_stackgetblk[5] = __builtin_return_address(5); + (void) OSBacktrace(&bp->b_stackgetblk[0], 6); #endif return (bp); } @@ -2366,16 +2969,17 @@ start_locked: * Get an empty, disassociated buffer of given size. */ buf_t -buf_geteblk(size) - int size; +buf_geteblk(int size) { - buf_t bp; + buf_t bp = NULL; int queue = BQ_EMPTY; - lck_mtx_lock(buf_mtxp); + do { + lck_mtx_lock_spin(buf_mtxp); + + bp = getnewbuf(0, 0, &queue); + } while (bp == NULL); - while ((bp = getnewbuf(0, 0, &queue)) == 0) - ; SET(bp->b_flags, (B_META|B_INVAL)); #if DIAGNOSTIC @@ -2393,60 +2997,22 @@ buf_geteblk(size) return (bp); } -/* - * Zones for the meta data buffers - */ - -#define MINMETA 512 -#define MAXMETA 4096 - -struct meta_zone_entry { - zone_t mz_zone; - vm_size_t mz_size; - vm_size_t mz_max; - char *mz_name; -}; - -struct meta_zone_entry meta_zones[] = { - {NULL, (MINMETA * 1), 128 * (MINMETA * 1), "buf.512" }, - {NULL, (MINMETA * 2), 64 * (MINMETA * 2), "buf.1024" }, - {NULL, (MINMETA * 4), 16 * (MINMETA * 4), "buf.2048" }, - {NULL, (MINMETA * 8), 512 * (MINMETA * 8), "buf.4096" }, - {NULL, 0, 0, "" } /* End */ -}; - -/* - * Initialize the meta data zones - */ -static void -bufzoneinit(void) +uint32_t +buf_redundancy_flags(buf_t bp) { - int i; - - for (i = 0; meta_zones[i].mz_size != 0; i++) { - meta_zones[i].mz_zone = - zinit(meta_zones[i].mz_size, - meta_zones[i].mz_max, - PAGE_SIZE, - meta_zones[i].mz_name); - } - buf_hdr_zone = zinit(sizeof(struct buf), 32, PAGE_SIZE, "buf headers"); + return bp->b_redundancy_flags; } -static __inline__ zone_t -getbufzone(size_t size) +void +buf_set_redundancy_flags(buf_t bp, uint32_t flags) { - int i; - - if ((size % 512) || (size < MINMETA) || (size > MAXMETA)) - panic("getbufzone: incorect size = %d", size); - - for (i = 0; meta_zones[i].mz_size != 0; i++) { - if (meta_zones[i].mz_size >= size) - break; - } + SET(bp->b_redundancy_flags, flags); +} - return (meta_zones[i].mz_zone); +void +buf_clear_redundancy_flags(buf_t bp, uint32_t flags) +{ + CLR(bp->b_redundancy_flags, flags); } /* @@ -2488,10 +3054,11 @@ allocbuf(buf_t bp, int size) if (nsize <= MAXMETA) { desired_size = nsize; z = getbufzone(nsize); - bp->b_datap = (uintptr_t)zalloc(z); + /* b_datap not really a ptr */ + *(void **)(&bp->b_datap) = zalloc(z); } else { bp->b_datap = (uintptr_t)NULL; - kmem_alloc(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size); + kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size); CLR(bp->b_flags, B_ZALLOC); } bcopy((void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize); @@ -2504,7 +3071,7 @@ allocbuf(buf_t bp, int size) if ((vm_size_t)bp->b_bufsize < desired_size) { /* reallocate to a bigger size */ bp->b_datap = (uintptr_t)NULL; - kmem_alloc(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size); + kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size); bcopy((const void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize); kmem_free(kernel_map, elem, bp->b_bufsize); } else { @@ -2516,11 +3083,15 @@ allocbuf(buf_t bp, int size) if (nsize <= MAXMETA) { desired_size = nsize; z = getbufzone(nsize); - bp->b_datap = (uintptr_t)zalloc(z); + /* b_datap not really a ptr */ + *(void **)(&bp->b_datap) = zalloc(z); SET(bp->b_flags, B_ZALLOC); } else - kmem_alloc(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size); + kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size); } + + if (bp->b_datap == 0) + panic("allocbuf: NULL b_datap"); } bp->b_bufsize = desired_size; bp->b_bcount = size; @@ -2546,7 +3117,8 @@ allocbuf(buf_t bp, int size) * on which it was found. * * buf_mtxp is held upon entry - * returns with buf_mtxp locked + * returns with buf_mtxp locked if new buf available + * returns with buf_mtxp UNlocked if new buf NOT available */ static buf_t @@ -2564,19 +3136,30 @@ start: /* * invalid request gets empty queue */ - if ((*queue > BQUEUES) || (*queue < 0) + if ((*queue >= BQUEUES) || (*queue < 0) || (*queue == BQ_LAUNDRY) || (*queue == BQ_LOCKED)) *queue = BQ_EMPTY; + + if (*queue == BQ_EMPTY && (bp = bufqueues[*queue].tqh_first)) + goto found; + /* - * (*queue == BQUEUES) means no preference + * need to grow number of bufs, add another one rather than recycling */ - if (*queue != BQUEUES) { - /* Try for the requested queue first */ - bp = bufqueues[*queue].tqh_first; - if (bp) - goto found; + if (nbuf_headers < max_nbuf_headers) { + /* + * Increment count now as lock + * is dropped for allocation. + * That avoids over commits + */ + nbuf_headers++; + goto add_newbufs; } + /* Try for the requested queue first */ + bp = bufqueues[*queue].tqh_first; + if (bp) + goto found; /* Unable to use requested queue */ age_bp = bufqueues[BQ_AGE].tqh_first; @@ -2593,23 +3176,37 @@ start: *queue = BQ_EMPTY; goto found; } + /* + * We have seen is this is hard to trigger. + * This is an overcommit of nbufs but needed + * in some scenarios with diskiamges + */ + +add_newbufs: lck_mtx_unlock(buf_mtxp); /* Create a new temporary buffer header */ bp = (struct buf *)zalloc(buf_hdr_zone); - - lck_mtx_lock(buf_mtxp); - + if (bp) { bufhdrinit(bp); + bp->b_whichq = BQ_EMPTY; + bp->b_timestamp = buf_timestamp(); BLISTNONE(bp); - binshash(bp, &invalhash); SET(bp->b_flags, B_HDRALLOC); *queue = BQ_EMPTY; + } + lck_mtx_lock_spin(buf_mtxp); + + if (bp) { + binshash(bp, &invalhash); binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY); buf_hdr_count++; goto found; } + /* subtract already accounted bufcount */ + nbuf_headers--; + bufstats.bufs_sleeps++; /* wait for a free buffer of any kind */ @@ -2618,9 +3215,9 @@ start: ts.tv_sec = (slptimeo/1000); /* the hz value is 100; which leads to 10ms */ ts.tv_nsec = (slptimeo % 1000) * NSEC_PER_USEC * 1000 * 10; - msleep(&needbuffer, buf_mtxp, slpflag|(PRIBIO+1), (char *)"getnewbuf", &ts); - return (0); + msleep(&needbuffer, buf_mtxp, slpflag | PDROP | (PRIBIO+1), "getnewbuf", &ts); + return (NULL); } /* Buffer available either on AGE or LRU or META */ @@ -2681,10 +3278,10 @@ start: } found: if (ISSET(bp->b_flags, B_LOCKED) || ISSET(bp->b_lflags, BL_BUSY)) - panic("getnewbuf: bp @ 0x%x is LOCKED or BUSY! (flags 0x%x)\n", bp, bp->b_flags); + panic("getnewbuf: bp @ %p is LOCKED or BUSY! (flags 0x%x)\n", bp, bp->b_flags); /* Clean it */ - if (bcleanbuf(bp)) { + if (bcleanbuf(bp, FALSE)) { /* * moved to the laundry thread, buffer not ready */ @@ -2697,24 +3294,19 @@ found: /* * Clean a buffer. - * Returns 0 is buffer is ready to use, + * Returns 0 if buffer is ready to use, * Returns 1 if issued a buf_bawrite() to indicate * that the buffer is not ready. * * buf_mtxp is held upon entry * returns with buf_mtxp locked */ -static int -bcleanbuf(buf_t bp) +int +bcleanbuf(buf_t bp, boolean_t discard) { - ucred_t cred; - - /* Remove from the queue */ bremfree_locked(bp); - /* Buffer is no longer on free lists. */ - SET(bp->b_lflags, BL_BUSY); #ifdef JOE_DEBUG bp->b_owner = current_thread(); bp->b_tag = 2; @@ -2724,84 +3316,93 @@ bcleanbuf(buf_t bp) * it on the LAUNDRY queue, and return 1 */ if (ISSET(bp->b_flags, B_DELWRI)) { - binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY); - blaundrycnt++; + if (discard) { + SET(bp->b_lflags, BL_WANTDEALLOC); + } + + bmovelaundry(bp); lck_mtx_unlock(buf_mtxp); - wakeup(&blaundrycnt); - /* and give it a chance to run */ + wakeup(&bufqueues[BQ_LAUNDRY]); + /* + * and give it a chance to run + */ (void)thread_block(THREAD_CONTINUE_NULL); - lck_mtx_lock(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); + return (1); } +#ifdef JOE_DEBUG + bp->b_owner = current_thread(); + bp->b_tag = 8; +#endif + /* + * Buffer is no longer on any free list... we own it + */ + SET(bp->b_lflags, BL_BUSY); + buf_busycount++; + bremhash(bp); - lck_mtx_unlock(buf_mtxp); - - BLISTNONE(bp); /* * disassociate us from our vnode, if we had one... */ if (bp->b_vp) - brelvp(bp); - - if (ISSET(bp->b_flags, B_META)) { - vm_offset_t elem; + brelvp_locked(bp); - elem = (vm_offset_t)bp->b_datap; - bp->b_datap = (uintptr_t)0xdeadbeef; + lck_mtx_unlock(buf_mtxp); - if (ISSET(bp->b_flags, B_ZALLOC)) { - zone_t z; + BLISTNONE(bp); - z = getbufzone(bp->b_bufsize); - zfree(z, (void *)elem); - } else - kmem_free(kernel_map, elem, bp->b_bufsize); - } + if (ISSET(bp->b_flags, B_META)) + buf_free_meta_store(bp); trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); - /* clear out various other fields */ - bp->b_bufsize = 0; - bp->b_datap = (uintptr_t)NULL; - bp->b_upl = (void *)NULL; - /* - * preserve the state of whether this buffer - * was allocated on the fly or not... - * the only other flag that should be set at - * this point is BL_BUSY... - */ + buf_release_credentials(bp); + + /* If discarding, just move to the empty queue */ + if (discard) { + lck_mtx_lock_spin(buf_mtxp); + CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA)); + bp->b_whichq = BQ_EMPTY; + binshash(bp, &invalhash); + binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY); + CLR(bp->b_lflags, BL_BUSY); + buf_busycount--; + } else { + /* Not discarding: clean up and prepare for reuse */ + bp->b_bufsize = 0; + bp->b_datap = (uintptr_t)NULL; + bp->b_upl = (void *)NULL; + /* + * preserve the state of whether this buffer + * was allocated on the fly or not... + * the only other flag that should be set at + * this point is BL_BUSY... + */ #ifdef JOE_DEBUG - bp->b_owner = current_thread(); - bp->b_tag = 3; + bp->b_owner = current_thread(); + bp->b_tag = 3; +#endif + bp->b_lflags = BL_BUSY; + bp->b_flags = (bp->b_flags & B_HDRALLOC); + bp->b_dev = NODEV; + bp->b_blkno = bp->b_lblkno = 0; + bp->b_iodone = NULL; + bp->b_error = 0; + bp->b_resid = 0; + bp->b_bcount = 0; + bp->b_dirtyoff = bp->b_dirtyend = 0; + bp->b_validoff = bp->b_validend = 0; +#ifdef CONFIG_PROTECT + bp->b_cpentry = 0; #endif - bp->b_lflags = BL_BUSY; - bp->b_flags = (bp->b_flags & B_HDRALLOC); - bp->b_dev = NODEV; - bp->b_blkno = bp->b_lblkno = 0; - bp->b_iodone = NULL; - bp->b_error = 0; - bp->b_resid = 0; - bp->b_bcount = 0; - bp->b_dirtyoff = bp->b_dirtyend = 0; - bp->b_validoff = bp->b_validend = 0; - /* nuke any credentials we were holding */ - cred = bp->b_rcred; - if (cred != NOCRED) { - bp->b_rcred = NOCRED; - kauth_cred_rele(cred); - } - cred = bp->b_wcred; - if (cred != NOCRED) { - bp->b_wcred = NOCRED; - kauth_cred_rele(cred); + lck_mtx_lock_spin(buf_mtxp); } - lck_mtx_lock(buf_mtxp); - return (0); } @@ -2812,10 +3413,14 @@ buf_invalblkno(vnode_t vp, daddr64_t lblkno, int flags) { buf_t bp; errno_t error; + struct bufhashhdr *dp; + + dp = BUFHASH(vp, lblkno); - lck_mtx_lock(buf_mtxp); relook: - if ((bp = incore_locked(vp, lblkno)) == (struct buf *)0) { + lck_mtx_lock_spin(buf_mtxp); + + if ((bp = incore_locked(vp, lblkno, dp)) == (struct buf *)0) { lck_mtx_unlock(buf_mtxp); return (0); } @@ -2826,15 +3431,17 @@ relook: } SET(bp->b_lflags, BL_WANTED); - error = msleep((caddr_t)bp, buf_mtxp, (PRIBIO + 1), (char *)"buf_invalblkno", 0); + error = msleep((caddr_t)bp, buf_mtxp, PDROP | (PRIBIO + 1), "buf_invalblkno", NULL); - if (error) + if (error) { return (error); + } goto relook; } bremfree_locked(bp); SET(bp->b_lflags, BL_BUSY); SET(bp->b_flags, B_INVAL); + buf_busycount++; #ifdef JOE_DEBUG bp->b_owner = current_thread(); bp->b_tag = 4; @@ -2851,7 +3458,7 @@ buf_drop(buf_t bp) { int need_wakeup = 0; - lck_mtx_lock(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); if (ISSET(bp->b_lflags, BL_WANTED)) { /* @@ -2860,10 +3467,15 @@ buf_drop(buf_t bp) */ need_wakeup = 1; } +#ifdef JOE_DEBUG + bp->b_owner = current_thread(); + bp->b_tag = 9; +#endif /* * Unlock the buffer. */ CLR(bp->b_lflags, (BL_BUSY | BL_WANTED)); + buf_busycount--; lck_mtx_unlock(buf_mtxp); @@ -2880,7 +3492,7 @@ errno_t buf_acquire(buf_t bp, int flags, int slpflag, int slptimeo) { errno_t error; - lck_mtx_lock(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); error = buf_acquire_locked(bp, flags, slpflag, slptimeo); @@ -2905,7 +3517,7 @@ buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo) } if (ISSET(bp->b_lflags, BL_BUSY)) { /* - * since the mutex_lock may block, the buffer + * since the lck_mtx_lock may block, the buffer * may become BUSY, so we need to * recheck for a NOWAIT request */ @@ -2916,7 +3528,7 @@ buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo) /* the hz value is 100; which leads to 10ms */ ts.tv_sec = (slptimeo/100); ts.tv_nsec = (slptimeo % 100) * 10 * NSEC_PER_USEC * 1000; - error = msleep((caddr_t)bp, buf_mtxp, slpflag | (PRIBIO + 1), (char *)"buf_acquire", &ts); + error = msleep((caddr_t)bp, buf_mtxp, slpflag | (PRIBIO + 1), "buf_acquire", &ts); if (error) return (error); @@ -2925,6 +3537,8 @@ buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo) if (flags & BAC_REMOVE) bremfree_locked(bp); SET(bp->b_lflags, BL_BUSY); + buf_busycount++; + #ifdef JOE_DEBUG bp->b_owner = current_thread(); bp->b_tag = 5; @@ -2940,13 +3554,17 @@ buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo) errno_t buf_biowait(buf_t bp) { - lck_mtx_lock(buf_mtxp); + while (!ISSET(bp->b_flags, B_DONE)) { - while (!ISSET(bp->b_flags, B_DONE)) - (void) msleep(bp, buf_mtxp, (PRIBIO+1), (char *)"buf_biowait", 0); + lck_mtx_lock_spin(buf_mtxp); - lck_mtx_unlock(buf_mtxp); - + if (!ISSET(bp->b_flags, B_DONE)) { + DTRACE_IO1(wait__start, buf_t, bp); + (void) msleep(bp, buf_mtxp, PDROP | (PRIBIO+1), "buf_biowait", NULL); + DTRACE_IO1(wait__done, buf_t, bp); + } else + lck_mtx_unlock(buf_mtxp); + } /* check for interruption of I/O (e.g. via NFS), then errors. */ if (ISSET(bp->b_flags, B_EINTR)) { CLR(bp->b_flags, B_EINTR); @@ -2957,6 +3575,7 @@ buf_biowait(buf_t bp) return (0); } + /* * Mark I/O complete on a buffer. * @@ -2979,12 +3598,31 @@ extern int hard_throttle_on_root; void buf_biodone(buf_t bp) { + mount_t mp; + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_START, - (int)bp, (int)bp->b_datap, bp->b_flags, 0, 0); + bp, bp->b_datap, bp->b_flags, 0, 0); if (ISSET(bp->b_flags, B_DONE)) panic("biodone already"); + if (ISSET(bp->b_flags, B_ERROR)) { + fslog_io_error(bp); + } + + if (bp->b_vp && bp->b_vp->v_mount) { + mp = bp->b_vp->v_mount; + } else { + mp = NULL; + } + + if (mp && (bp->b_flags & B_READ) == 0) { + update_last_io_time(mp); + INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_write_size); + } else if (mp) { + INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_read_size); + } + if (kdebug_enable) { int code = DKIO_DONE; @@ -2998,21 +3636,30 @@ buf_biodone(buf_t bp) else if (bp->b_flags & B_PAGEIO) code |= DKIO_PAGING; + if (bp->b_flags & B_THROTTLED_IO) + code |= DKIO_THROTTLE; + else if (bp->b_flags & B_PASSIVE) + code |= DKIO_PASSIVE; + KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE, - (unsigned int)bp, (unsigned int)bp->b_vp, + bp, (uintptr_t)bp->b_vp, bp->b_resid, bp->b_error, 0); } if ((bp->b_vp != NULLVP) && - ((bp->b_flags & (B_PAGEIO | B_READ)) == (B_PAGEIO | B_READ)) && + ((bp->b_flags & (B_IOSTREAMING | B_PAGEIO | B_READ)) == (B_PAGEIO | B_READ)) && (bp->b_vp->v_mount->mnt_kern_flag & MNTK_ROOTDEV)) { microuptime(&priority_IO_timestamp_for_root); hard_throttle_on_root = 0; } + /* * I/O was done, so don't believe - * the DIRTY state from VM anymore + * the DIRTY state from VM anymore... + * and we need to reset the THROTTLED/PASSIVE + * indicators */ - CLR(bp->b_flags, B_WASDIRTY); + CLR(bp->b_flags, (B_WASDIRTY | B_THROTTLED_IO | B_PASSIVE)); + DTRACE_IO1(done, buf_t, bp); if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW)) /* @@ -3024,26 +3671,28 @@ buf_biodone(buf_t bp) if (ISSET(bp->b_flags, (B_CALL | B_FILTER))) { /* if necessary, call out */ void (*iodone_func)(struct buf *, void *) = bp->b_iodone; - void *arg = (void *)bp->b_transaction; + void *arg = bp->b_transaction; int callout = ISSET(bp->b_flags, B_CALL); + if (iodone_func == NULL) + panic("biodone: bp @ %p has NULL b_iodone!\n", bp); + CLR(bp->b_flags, (B_CALL | B_FILTER)); /* filters and callouts are one-shot */ bp->b_iodone = NULL; bp->b_transaction = NULL; - if (iodone_func == NULL) { - panic("biodone: bp @ 0x%x has NULL b_iodone!\n", bp); - } else { - if (callout) - SET(bp->b_flags, B_DONE); /* note that it's done */ - (*iodone_func)(bp, arg); - } if (callout) - /* - * assumes that the call back function takes + SET(bp->b_flags, B_DONE); /* note that it's done */ + + (*iodone_func)(bp, arg); + + if (callout) { + /* + * assumes that the callback function takes * ownership of the bp and deals with releasing it if necessary */ - goto biodone_done; + goto biodone_done; + } /* * in this case the call back function is acting * strictly as a filter... it does not take @@ -3071,7 +3720,7 @@ buf_biodone(buf_t bp) * they do get to run, their going to re-set * BL_WANTED and go back to sleep */ - lck_mtx_lock(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); CLR(bp->b_lflags, BL_WANTED); SET(bp->b_flags, B_DONE); /* note that it's done */ @@ -3082,7 +3731,7 @@ buf_biodone(buf_t bp) } biodone_done: KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_END, - (int)bp, (int)bp->b_datap, bp->b_flags, 0, 0); + (uintptr_t)bp, (uintptr_t)bp->b_datap, bp->b_flags, 0, 0); } /* @@ -3094,7 +3743,7 @@ count_lock_queue(void) buf_t bp; int n = 0; - lck_mtx_lock(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); for (bp = bufqueues[BQ_LOCKED].tqh_first; bp; bp = bp->b_freelist.tqe_next) @@ -3110,13 +3759,7 @@ count_lock_queue(void) int count_busy_buffers(void) { - buf_t bp; - int nbusy = 0; - - for (bp = &buf[nbuf]; --bp >= buf; ) - if (!ISSET(bp->b_flags, B_INVAL) && ISSET(bp->b_lflags, BL_BUSY)) - nbusy++; - return (nbusy); + return buf_busycount + bufstats.bufs_iobufinuse; } #if DIAGNOSTIC @@ -3129,8 +3772,8 @@ void vfs_bufstats() { int i, j, count; - register struct buf *bp; - register struct bqueues *dp; + struct buf *bp; + struct bqueues *dp; int counts[MAXBSIZE/CLBYTES+1]; static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" }; @@ -3157,7 +3800,7 @@ vfs_bufstats() } #endif /* DIAGNOSTIC */ -#define NRESERVEDIOBUFS 64 +#define NRESERVEDIOBUFS 128 buf_t @@ -3165,14 +3808,14 @@ alloc_io_buf(vnode_t vp, int priv) { buf_t bp; - lck_mtx_lock(iobuffer_mtxp); + lck_mtx_lock_spin(iobuffer_mtxp); - while (((niobuf - NRESERVEDIOBUFS < bufstats.bufs_iobufinuse) && !priv) || + while (((niobuf_headers - NRESERVEDIOBUFS < bufstats.bufs_iobufinuse) && !priv) || (bp = iobufqueue.tqh_first) == NULL) { bufstats.bufs_iobufsleeps++; need_iobuffer = 1; - (void) msleep(&need_iobuffer, iobuffer_mtxp, (PRIBIO+1), (const char *)"alloc_io_buf", 0); + (void) msleep(&need_iobuffer, iobuffer_mtxp, PSPIN | (PRIBIO+1), (const char *)"alloc_io_buf", NULL); } TAILQ_REMOVE(&iobufqueue, bp, b_freelist); @@ -3194,6 +3837,7 @@ alloc_io_buf(vnode_t vp, int priv) bp->b_datap = 0; bp->b_flags = 0; bp->b_lflags = BL_BUSY | BL_IOBUF; + bp->b_redundancy_flags = 0; bp->b_blkno = bp->b_lblkno = 0; #ifdef JOE_DEBUG bp->b_owner = current_thread(); @@ -3206,6 +3850,9 @@ alloc_io_buf(vnode_t vp, int priv) bp->b_bufsize = 0; bp->b_upl = NULL; bp->b_vp = vp; +#ifdef CONFIG_PROTECT + bp->b_cpentry = 0; +#endif if (vp && (vp->v_type == VBLK || vp->v_type == VCHR)) bp->b_dev = vp->v_rdev; @@ -3227,7 +3874,7 @@ free_io_buf(buf_t bp) bp->b_vp = NULL; bp->b_flags = B_INVAL; - lck_mtx_lock(iobuffer_mtxp); + lck_mtx_lock_spin(iobuffer_mtxp); binsheadfree(bp, &iobufqueue, -1); @@ -3244,6 +3891,9 @@ free_io_buf(buf_t bp) need_iobuffer = 0; need_wakeup = 1; } + if (bufstats.bufs_iobufinuse <= 0) + panic("free_io_buf: bp(%p) - bufstats.bufs_iobufinuse < 0", bp); + bufstats.bufs_iobufinuse--; lck_mtx_unlock(iobuffer_mtxp); @@ -3253,24 +3903,37 @@ free_io_buf(buf_t bp) } +void +buf_list_lock(void) +{ + lck_mtx_lock_spin(buf_mtxp); +} + +void +buf_list_unlock(void) +{ + lck_mtx_unlock(buf_mtxp); +} /* * If getnewbuf() calls bcleanbuf() on the same thread * there is a potential for stack overrun and deadlocks. * So we always handoff the work to a worker thread for completion */ -#include -#include -#include static void bcleanbuf_thread_init(void) { + thread_t thread = THREAD_NULL; + /* create worker thread */ - kernel_thread(kernel_task, bcleanbuf_thread); + kernel_thread_start((thread_continue_t)bcleanbuf_thread, NULL, &thread); + thread_deallocate(thread); } +typedef int (*bcleanbufcontinuation)(int); + static void bcleanbuf_thread(void) { @@ -3279,17 +3942,27 @@ bcleanbuf_thread(void) int loopcnt = 0; for (;;) { - lck_mtx_lock(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); - while (blaundrycnt == 0) - (void)msleep((void *)&blaundrycnt, buf_mtxp, PRIBIO, "blaundry", 0); - - bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY]); + while ( (bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY])) == NULL) { + (void)msleep0(&bufqueues[BQ_LAUNDRY], buf_mtxp, PRIBIO|PDROP, "blaundry", 0, (bcleanbufcontinuation)bcleanbuf_thread); + } + /* * Remove from the queue */ bremfree_locked(bp); - blaundrycnt--; + + /* + * Buffer is no longer on any free list + */ + SET(bp->b_lflags, BL_BUSY); + buf_busycount++; + +#ifdef JOE_DEBUG + bp->b_owner = current_thread(); + bp->b_tag = 10; +#endif lck_mtx_unlock(buf_mtxp); /* @@ -3298,18 +3971,35 @@ bcleanbuf_thread(void) error = bawrite_internal(bp, 0); if (error) { - lck_mtx_lock(buf_mtxp); + bp->b_whichq = BQ_LAUNDRY; + bp->b_timestamp = buf_timestamp(); + + lck_mtx_lock_spin(buf_mtxp); binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY); blaundrycnt++; - lck_mtx_unlock(buf_mtxp); + /* we never leave a busy page on the laundry queue */ + CLR(bp->b_lflags, BL_BUSY); + buf_busycount--; +#ifdef JOE_DEBUG + bp->b_owner = current_thread(); + bp->b_tag = 11; +#endif - if (loopcnt > 10) { - (void)tsleep((void *)&blaundrycnt, PRIBIO, "blaundry", 1); + lck_mtx_unlock(buf_mtxp); + + if (loopcnt > MAXLAUNDRY) { + /* + * bawrite_internal() can return errors if we're throttled. If we've + * done several I/Os and failed, give the system some time to unthrottle + * the vnode + */ + (void)tsleep((void *)&bufqueues[BQ_LAUNDRY], PRIBIO, "blaundry", 1); loopcnt = 0; } else { - (void)thread_block(THREAD_CONTINUE_NULL); + /* give other threads a chance to run */ + (void)thread_block(THREAD_CONTINUE_NULL); loopcnt++; } } @@ -3359,7 +4049,7 @@ brecover_data(buf_t bp) } bp->b_upl = upl; - kret = ubc_upl_map(upl, (vm_address_t *)&(bp->b_datap)); + kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap)); if (kret != KERN_SUCCESS) panic("getblk: ubc_upl_map() failed with (%d)", kret); @@ -3373,6 +4063,135 @@ dump_buffer: return(0); } +boolean_t +buffer_cache_gc(int all) +{ + buf_t bp; + boolean_t did_large_zfree = FALSE; + boolean_t need_wakeup = FALSE; + int now = buf_timestamp(); + uint32_t found = 0, total_found = 0; + struct bqueues privq; + int thresh_hold = BUF_STALE_THRESHHOLD; + + if (all) + thresh_hold = 0; + /* + * We only care about metadata (incore storage comes from zalloc()). + * No more than 1024 buffers total, and only those not accessed within the + * last 30s. We will also only examine 128 buffers during a single grab + * of the lock in order to limit lock hold time. + */ + lck_mtx_lock(buf_mtxp); + do { + found = 0; + TAILQ_INIT(&privq); + need_wakeup = FALSE; + + while (((bp = TAILQ_FIRST(&bufqueues[BQ_META]))) && + (now > bp->b_timestamp) && + (now - bp->b_timestamp > thresh_hold) && + (found < BUF_MAX_GC_BATCH_SIZE)) { + + /* Remove from free list */ + bremfree_locked(bp); + found++; + +#ifdef JOE_DEBUG + bp->b_owner = current_thread(); + bp->b_tag = 12; +#endif + + /* If dirty, move to laundry queue and remember to do wakeup */ + if (ISSET(bp->b_flags, B_DELWRI)) { + SET(bp->b_lflags, BL_WANTDEALLOC); + + bmovelaundry(bp); + need_wakeup = TRUE; + + continue; + } + + /* + * Mark busy and put on private list. We could technically get + * away without setting BL_BUSY here. + */ + SET(bp->b_lflags, BL_BUSY); + buf_busycount++; + + /* + * Remove from hash and dissociate from vp. + */ + bremhash(bp); + if (bp->b_vp) { + brelvp_locked(bp); + } + + TAILQ_INSERT_TAIL(&privq, bp, b_freelist); + } + + if (found == 0) { + break; + } + + /* Drop lock for batch processing */ + lck_mtx_unlock(buf_mtxp); + + /* Wakeup and yield for laundry if need be */ + if (need_wakeup) { + wakeup(&bufqueues[BQ_LAUNDRY]); + (void)thread_block(THREAD_CONTINUE_NULL); + } + + /* Clean up every buffer on private list */ + TAILQ_FOREACH(bp, &privq, b_freelist) { + /* Take note if we've definitely freed at least a page to a zone */ + if ((ISSET(bp->b_flags, B_ZALLOC)) && (buf_size(bp) >= PAGE_SIZE)) { + did_large_zfree = TRUE; + } + + trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); + + /* Free Storage */ + buf_free_meta_store(bp); + + /* Release credentials */ + buf_release_credentials(bp); + + /* Prepare for moving to empty queue */ + CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED + | B_AGE | B_ASYNC | B_NOCACHE | B_FUA)); + bp->b_whichq = BQ_EMPTY; + BLISTNONE(bp); + } + + lck_mtx_lock(buf_mtxp); + + /* Back under lock, move them all to invalid hash and clear busy */ + TAILQ_FOREACH(bp, &privq, b_freelist) { + binshash(bp, &invalhash); + CLR(bp->b_lflags, BL_BUSY); + buf_busycount--; + +#ifdef JOE_DEBUG + if (bp->b_owner != current_thread()) { + panic("Buffer stolen from buffer_cache_gc()"); + } + bp->b_owner = current_thread(); + bp->b_tag = 13; +#endif + } + + /* And do a big bulk move to the empty queue */ + TAILQ_CONCAT(&bufqueues[BQ_EMPTY], &privq, b_freelist); + total_found += found; + + } while ((all || (total_found < BUF_MAX_GC_COUNT)) && (found == BUF_MAX_GC_BATCH_SIZE)); + + lck_mtx_unlock(buf_mtxp); + + return did_large_zfree; +} /* @@ -3431,6 +4250,8 @@ bflushq(int whichq, mount_t mp) bp->b_tag = 7; #endif SET(bp->b_lflags, BL_BUSY); + buf_busycount++; + flush_table[buf_count] = bp; buf_count++; total_writes++; @@ -3589,14 +4410,15 @@ bufqdec(int q) } static void -bufq_balance_thread_init() +bufq_balance_thread_init(void) { + thread_t thread = THREAD_NULL; if (bufqscanwait++ == 0) { /* Initalize globals */ MAXNBUF = (sane_size / PAGE_SIZE); - nbufh = nbuf; + nbufh = nbuf_headers; nbuflow = min(nbufh, 100); nbufhigh = min(MAXNBUF, max(nbufh, 2048)); nbuftarget = (sane_size >> 5) / PAGE_SIZE; @@ -3647,7 +4469,8 @@ bufq_balance_thread_init() } /* create worker thread */ - kernel_thread(kernel_task, bufqscan_thread); + kernel_thread_start((thread_continue_t)bufqscan_thread, NULL, &thread); + thread_deallocate(thread); } /* The workloop for the buffer balancing thread */ @@ -3700,7 +4523,6 @@ static int balancebufq(int q) { int moretodo = 0; - int s = splbio(); int n, t; /* reject invalid q */ @@ -3737,7 +4559,7 @@ balancebufq(int q) /* check if it's stale */ if ((t - bp->b_timestamp) > bufqlim[q].bl_stale) { - if (bcleanbuf(bp)) { + if (bcleanbuf(bp, FALSE)) { /* buf_bawrite() issued, bp not ready */ moretodo = 1; } else { @@ -3750,7 +4572,6 @@ balancebufq(int q) } out: - splx(s); return (moretodo); }