X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/b0d623f7f2ae71ed96e60569f61f9a9a27016e80..a1c7dba18ef36983396c282fe85292db066e39db:/bsd/vfs/vfs_bio.c diff --git a/bsd/vfs/vfs_bio.c b/bsd/vfs/vfs_bio.c index 3f4c4e593..c6e919d9e 100644 --- a/bsd/vfs/vfs_bio.c +++ b/bsd/vfs/vfs_bio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2008 Apple Inc. All rights reserved. + * Copyright (c) 2000-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -91,7 +91,8 @@ #endif /* DIAGNOSTIC */ #include #include -#include +#include +#include #include /* fslog_io_error() */ @@ -109,13 +110,9 @@ #include #include +#include -#if BALANCE_QUEUES -static __inline__ void bufqinc(int q); -static __inline__ void bufqdec(int q); -#endif - -static int bcleanbuf(buf_t bp, boolean_t discard); +int bcleanbuf(buf_t bp, boolean_t discard); static int brecover_data(buf_t bp); static boolean_t incore(vnode_t vp, daddr64_t blkno); /* timeout is in msecs */ @@ -125,13 +122,19 @@ static void buf_reassign(buf_t bp, vnode_t newvp); static errno_t buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo); static int buf_iterprepare(vnode_t vp, struct buflists *, int flags); static void buf_itercomplete(vnode_t vp, struct buflists *, int flags); -static boolean_t buffer_cache_gc(void); +static boolean_t buffer_cache_gc(int); +static buf_t buf_brelse_shadow(buf_t bp); +static void buf_free_meta_store(buf_t bp); + +static buf_t buf_create_shadow_internal(buf_t bp, boolean_t force_copy, + uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv); + __private_extern__ int bdwrite_internal(buf_t, int); /* zone allocated buffer headers */ -static void bufzoneinit(void) __attribute__((section("__TEXT, initcode"))); -static void bcleanbuf_thread_init(void) __attribute__((section("__TEXT, initcode"))); +static void bufzoneinit(void); +static void bcleanbuf_thread_init(void); static void bcleanbuf_thread(void); static zone_t buf_hdr_zone; @@ -156,6 +159,7 @@ long nbdwrite = 0; int blaundrycnt = 0; static int boot_nbuf_headers = 0; +static TAILQ_HEAD(delayqueue, buf) delaybufqueue; static TAILQ_HEAD(ioqueue, buf) iobufqueue; static TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES]; @@ -181,17 +185,6 @@ buf_timestamp(void) /* * Insq/Remq for the buffer free lists. */ -#if BALANCE_QUEUES -#define binsheadfree(bp, dp, whichq) do { \ - TAILQ_INSERT_HEAD(dp, bp, b_freelist); \ - bufqinc((whichq)); \ - } while (0) - -#define binstailfree(bp, dp, whichq) do { \ - TAILQ_INSERT_TAIL(dp, bp, b_freelist); \ - bufqinc((whichq)); \ - } while (0) -#else #define binsheadfree(bp, dp, whichq) do { \ TAILQ_INSERT_HEAD(dp, bp, b_freelist); \ } while (0) @@ -199,8 +192,6 @@ buf_timestamp(void) #define binstailfree(bp, dp, whichq) do { \ TAILQ_INSERT_TAIL(dp, bp, b_freelist); \ } while (0) -#endif - #define BHASHENTCHECK(bp) \ if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \ @@ -231,7 +222,7 @@ int lru_is_stale = LRU_IS_STALE; int age_is_stale = AGE_IS_STALE; int meta_is_stale = META_IS_STALE; - +#define MAXLAUNDRY 10 /* LIST_INSERT_HEAD() with assertions */ static __inline__ void @@ -278,7 +269,28 @@ bremhash(buf_t bp) *bp->b_hash.le_prev = (bp)->b_hash.le_next; } +/* + * buf_mtxp held. + */ +static __inline__ void +bmovelaundry(buf_t bp) +{ + bp->b_whichq = BQ_LAUNDRY; + bp->b_timestamp = buf_timestamp(); + binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY); + blaundrycnt++; +} +static __inline__ void +buf_release_credentials(buf_t bp) +{ + if (IS_VALID_CRED(bp->b_rcred)) { + kauth_cred_unref(&bp->b_rcred); + } + if (IS_VALID_CRED(bp->b_wcred)) { + kauth_cred_unref(&bp->b_wcred); + } +} int @@ -315,6 +327,17 @@ buf_markdelayed(buf_t bp) { SET(bp->b_flags, B_DONE); } +void +buf_markclean(buf_t bp) { + + if (ISSET(bp->b_flags, B_DELWRI)) { + CLR(bp->b_flags, B_DELWRI); + + OSAddAtomicLong(-1, &nbdwrite); + buf_reassign(bp, bp->b_vp); + } +} + void buf_markeintr(buf_t bp) { @@ -342,6 +365,186 @@ buf_markfua(buf_t bp) { SET(bp->b_flags, B_FUA); } +#if CONFIG_PROTECT +void +buf_setcpaddr(buf_t bp, struct cprotect *entry) { + bp->b_attr.ba_cpentry = entry; +} + +void +buf_setcpoff (buf_t bp, uint64_t foffset) { + bp->b_attr.ba_cp_file_off = foffset; +} + +void * +bufattr_cpaddr(bufattr_t bap) { + return (bap->ba_cpentry); +} + +uint64_t +bufattr_cpoff(bufattr_t bap) { + return (bap->ba_cp_file_off); +} + +void +bufattr_setcpaddr(bufattr_t bap, void *cp_entry_addr) { + bap->ba_cpentry = cp_entry_addr; +} + +void +bufattr_setcpoff(bufattr_t bap, uint64_t foffset) { + bap->ba_cp_file_off = foffset; +} + +#else +void * +bufattr_cpaddr(bufattr_t bap __unused) { + return NULL; +} + +uint64_t +bufattr_cpoff(bufattr_t bap __unused) { + return 0; +} + +void +bufattr_setcpaddr(bufattr_t bap __unused, void *cp_entry_addr __unused) { +} + +void +bufattr_setcpoff(__unused bufattr_t bap, __unused uint64_t foffset) { + return; +} +#endif /* CONFIG_PROTECT */ + +bufattr_t +bufattr_alloc() { + bufattr_t bap; + MALLOC(bap, bufattr_t, sizeof(struct bufattr), M_TEMP, M_WAITOK); + if (bap == NULL) + return NULL; + + bzero(bap, sizeof(struct bufattr)); + return bap; +} + +void +bufattr_free(bufattr_t bap) { + if (bap) + FREE(bap, M_TEMP); +} + +bufattr_t +bufattr_dup(bufattr_t bap) { + bufattr_t new_bufattr; + MALLOC(new_bufattr, bufattr_t, sizeof(struct bufattr), M_TEMP, M_WAITOK); + if (new_bufattr == NULL) + return NULL; + + /* Copy the provided one into the new copy */ + memcpy (new_bufattr, bap, sizeof(struct bufattr)); + return new_bufattr; +} + +int +bufattr_rawencrypted(bufattr_t bap) { + if ( (bap->ba_flags & BA_RAW_ENCRYPTED_IO) ) + return 1; + return 0; +} + +int +bufattr_throttled(bufattr_t bap) { + return (GET_BUFATTR_IO_TIER(bap)); +} + +int +bufattr_passive(bufattr_t bap) { + if ( (bap->ba_flags & BA_PASSIVE) ) + return 1; + return 0; +} + +int +bufattr_nocache(bufattr_t bap) { + if ( (bap->ba_flags & BA_NOCACHE) ) + return 1; + return 0; +} + +int +bufattr_meta(bufattr_t bap) { + if ( (bap->ba_flags & BA_META) ) + return 1; + return 0; +} + +void +bufattr_markmeta(bufattr_t bap) { + SET(bap->ba_flags, BA_META); +} + +int +bufattr_delayidlesleep(bufattr_t bap) +{ + if ( (bap->ba_flags & BA_DELAYIDLESLEEP) ) + return 1; + return 0; +} + +bufattr_t +buf_attr(buf_t bp) { + return &bp->b_attr; +} + +void +buf_markstatic(buf_t bp __unused) { + SET(bp->b_flags, B_STATICCONTENT); +} + +int +buf_static(buf_t bp) { + if ( (bp->b_flags & B_STATICCONTENT) ) + return 1; + return 0; +} + +void +bufattr_markgreedymode(bufattr_t bap) { + SET(bap->ba_flags, BA_GREEDY_MODE); +} + +int +bufattr_greedymode(bufattr_t bap) { + if ( (bap->ba_flags & BA_GREEDY_MODE) ) + return 1; + return 0; +} + +void +bufattr_markisochronous(bufattr_t bap) { + SET(bap->ba_flags, BA_ISOCHRONOUS); +} + +int +bufattr_isochronous(bufattr_t bap) { + if ( (bap->ba_flags & BA_ISOCHRONOUS) ) + return 1; + return 0; +} + +void +bufattr_markquickcomplete(bufattr_t bap) { + SET(bap->ba_flags, BA_QUICK_COMPLETE); +} + +int +bufattr_quickcomplete(bufattr_t bap) { + if ( (bap->ba_flags & BA_QUICK_COMPLETE) ) + return 1; + return 0; +} + errno_t buf_error(buf_t bp) { @@ -548,15 +751,179 @@ buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), v } +int +buf_shadow(buf_t bp) +{ + if (bp->b_lflags & BL_SHADOW) + return 1; + return 0; +} + + +buf_t +buf_create_shadow_priv(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg) +{ + return (buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 1)); +} + +buf_t +buf_create_shadow(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg) +{ + return (buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 0)); +} + + +static buf_t +buf_create_shadow_internal(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv) +{ + buf_t io_bp; + + KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_START, bp, 0, 0, 0, 0); + + if ( !(bp->b_flags & B_META) || (bp->b_lflags & BL_IOBUF)) { + + KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, 0, 0, 0, 0); + return (NULL); + } +#ifdef BUF_MAKE_PRIVATE + if (bp->b_shadow_ref && bp->b_data_ref == 0 && external_storage == 0) + panic("buf_create_shadow: %p is in the private state (%d, %d)", bp, bp->b_shadow_ref, bp->b_data_ref); +#endif + io_bp = alloc_io_buf(bp->b_vp, priv); + + io_bp->b_flags = bp->b_flags & (B_META | B_ZALLOC | B_ASYNC | B_READ | B_FUA); + io_bp->b_blkno = bp->b_blkno; + io_bp->b_lblkno = bp->b_lblkno; + + if (iodone) { + io_bp->b_transaction = arg; + io_bp->b_iodone = iodone; + io_bp->b_flags |= B_CALL; + } + if (force_copy == FALSE) { + io_bp->b_bcount = bp->b_bcount; + io_bp->b_bufsize = bp->b_bufsize; + + if (external_storage) { + io_bp->b_datap = external_storage; +#ifdef BUF_MAKE_PRIVATE + io_bp->b_data_store = NULL; +#endif + } else { + io_bp->b_datap = bp->b_datap; +#ifdef BUF_MAKE_PRIVATE + io_bp->b_data_store = bp; +#endif + } + *(buf_t *)(&io_bp->b_orig) = bp; + + lck_mtx_lock_spin(buf_mtxp); + + io_bp->b_lflags |= BL_SHADOW; + io_bp->b_shadow = bp->b_shadow; + bp->b_shadow = io_bp; + bp->b_shadow_ref++; + +#ifdef BUF_MAKE_PRIVATE + if (external_storage) + io_bp->b_lflags |= BL_EXTERNAL; + else + bp->b_data_ref++; +#endif + lck_mtx_unlock(buf_mtxp); + } else { + if (external_storage) { +#ifdef BUF_MAKE_PRIVATE + io_bp->b_lflags |= BL_EXTERNAL; +#endif + io_bp->b_bcount = bp->b_bcount; + io_bp->b_bufsize = bp->b_bufsize; + io_bp->b_datap = external_storage; + } else { + allocbuf(io_bp, bp->b_bcount); + + io_bp->b_lflags |= BL_IOBUF_ALLOC; + } + bcopy((caddr_t)bp->b_datap, (caddr_t)io_bp->b_datap, bp->b_bcount); + +#ifdef BUF_MAKE_PRIVATE + io_bp->b_data_store = NULL; +#endif + } + KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, io_bp, 0); + + return (io_bp); +} + + +#ifdef BUF_MAKE_PRIVATE +errno_t +buf_make_private(buf_t bp) +{ + buf_t ds_bp; + buf_t t_bp; + struct buf my_buf; + + KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_START, bp, bp->b_shadow_ref, 0, 0, 0); + + if (bp->b_shadow_ref == 0 || bp->b_data_ref == 0 || ISSET(bp->b_lflags, BL_SHADOW)) { + + KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0); + return (EINVAL); + } + my_buf.b_flags = B_META; + my_buf.b_datap = (uintptr_t)NULL; + allocbuf(&my_buf, bp->b_bcount); + + bcopy((caddr_t)bp->b_datap, (caddr_t)my_buf.b_datap, bp->b_bcount); + + lck_mtx_lock_spin(buf_mtxp); + + for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) { + if ( !ISSET(bp->b_lflags, BL_EXTERNAL)) + break; + } + ds_bp = t_bp; + + if (ds_bp == NULL && bp->b_data_ref) + panic("buf_make_private: b_data_ref != 0 && ds_bp == NULL"); + + if (ds_bp && (bp->b_data_ref == 0 || bp->b_shadow_ref == 0)) + panic("buf_make_private: ref_count == 0 && ds_bp != NULL"); + + if (ds_bp == NULL) { + lck_mtx_unlock(buf_mtxp); + + buf_free_meta_store(&my_buf); + + KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0); + return (EINVAL); + } + for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) { + if ( !ISSET(t_bp->b_lflags, BL_EXTERNAL)) + t_bp->b_data_store = ds_bp; + } + ds_bp->b_data_ref = bp->b_data_ref; + + bp->b_data_ref = 0; + bp->b_datap = my_buf.b_datap; + + lck_mtx_unlock(buf_mtxp); + + KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, 0, 0); + return (0); +} +#endif + void buf_setfilter(buf_t bp, void (*filter)(buf_t, void *), void *transaction, - void **old_iodone, void **old_transaction) + void (**old_iodone)(buf_t, void *), void **old_transaction) { - if (old_iodone) - *old_iodone = (void *)(bp->b_iodone); + if (old_iodone) + *old_iodone = bp->b_iodone; if (old_transaction) - *old_transaction = (void *)(bp->b_transaction); + *old_transaction = bp->b_transaction; bp->b_transaction = transaction; bp->b_iodone = filter; @@ -750,8 +1117,6 @@ buf_clear(buf_t bp) { bp->b_resid = 0; } - - /* * Read or write a buffer that is not contiguous on disk. * buffer is marked done/error at the conclusion @@ -863,6 +1228,13 @@ buf_strategy(vnode_t devvp, void *ap) vnode_t vp = bp->b_vp; int bmap_flags; errno_t error; +#if CONFIG_DTRACE + int dtrace_io_start_flag = 0; /* We only want to trip the io:::start + * probe once, with the true physical + * block in place (b_blkno) + */ + +#endif if (vp == NULL || vp->v_type == VCHR || vp->v_type == VBLK) panic("buf_strategy: b_vp == NULL || vtype == VCHR | VBLK\n"); @@ -872,7 +1244,6 @@ buf_strategy(vnode_t devvp, void *ap) * end up issuing the I/O... */ bp->b_dev = devvp->v_rdev; - DTRACE_IO1(start, buf_t, bp); if (bp->b_flags & B_READ) bmap_flags = VNODE_READ; @@ -888,46 +1259,101 @@ buf_strategy(vnode_t devvp, void *ap) * to deal with filesystem block sizes * that aren't equal to the page size */ + DTRACE_IO1(start, buf_t, bp); return (cluster_bp(bp)); } if (bp->b_blkno == bp->b_lblkno) { - off_t f_offset; + off_t f_offset; size_t contig_bytes; if ((error = VNOP_BLKTOOFF(vp, bp->b_lblkno, &f_offset))) { + DTRACE_IO1(start, buf_t, bp); buf_seterror(bp, error); buf_biodone(bp); - return (error); + return (error); } - if ((error = VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))) { + + if ((error = VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))) { + DTRACE_IO1(start, buf_t, bp); buf_seterror(bp, error); buf_biodone(bp); return (error); } + + DTRACE_IO1(start, buf_t, bp); +#if CONFIG_DTRACE + dtrace_io_start_flag = 1; +#endif /* CONFIG_DTRACE */ + if ((bp->b_blkno == -1) || (contig_bytes == 0)) { /* Set block number to force biodone later */ bp->b_blkno = -1; buf_clear(bp); } - else if ((long)contig_bytes < bp->b_bcount) + else if ((long)contig_bytes < bp->b_bcount) { return (buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes)); + } + } + +#if CONFIG_DTRACE + if (dtrace_io_start_flag == 0) { + DTRACE_IO1(start, buf_t, bp); + dtrace_io_start_flag = 1; } +#endif /* CONFIG_DTRACE */ + if (bp->b_blkno == -1) { buf_biodone(bp); return (0); } } + +#if CONFIG_DTRACE + if (dtrace_io_start_flag == 0) + DTRACE_IO1(start, buf_t, bp); +#endif /* CONFIG_DTRACE */ + +#if CONFIG_PROTECT + /* Capture f_offset in the bufattr*/ + if (bp->b_attr.ba_cpentry != 0) { + /* No need to go here for older EAs */ + if(bp->b_attr.ba_cpentry->cp_flags & CP_OFF_IV_ENABLED) { + off_t f_offset; + if ((error = VNOP_BLKTOOFF(bp->b_vp, bp->b_lblkno, &f_offset))) + return error; + + /* + * Attach the file offset to this buffer. The + * bufattr attributes will be passed down the stack + * until they reach IOFlashStorage. IOFlashStorage + * will retain the offset in a local variable when it + * issues its I/Os to the NAND controller. + * + * Note that LwVM may end up splitting this I/O + * into sub-I/Os if it crosses a chunk boundary. In this + * case, LwVM will update this field when it dispatches + * each I/O to IOFlashStorage. But from our perspective + * we have only issued a single I/O. + */ + bufattr_setcpoff (&(bp->b_attr), (u_int64_t)f_offset); + CP_DEBUG((CPDBG_OFFSET_IO | DBG_FUNC_NONE), (uint32_t) f_offset, (uint32_t) bp->b_lblkno, (uint32_t) bp->b_blkno, (uint32_t) bp->b_bcount, 0); + } + } +#endif + /* * we can issue the I/O because... * either B_CLUSTER is set which * means that the I/O is properly set * up to be a multiple of the page size, or * we were able to successfully set up the - * phsyical block mapping + * physical block mapping */ - return (VOCALL(devvp->v_op, VOFFSET(vnop_strategy), ap)); + error = VOCALL(devvp->v_op, VOFFSET(vnop_strategy), ap); + DTRACE_FSINFO(strategy, vnode_t, vp); + return (error); } @@ -1046,6 +1472,7 @@ int buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo) { buf_t bp; + int aflags; int error = 0; int must_rescan = 1; struct buflists local_iterblkhd; @@ -1076,6 +1503,7 @@ buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo) goto try_dirty_list; } while (!LIST_EMPTY(&local_iterblkhd)) { + bp = LIST_FIRST(&local_iterblkhd); LIST_REMOVE(bp, b_vnbufs); @@ -1087,7 +1515,12 @@ buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo) if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) continue; - if ( (error = (int)buf_acquire_locked(bp, BAC_REMOVE | BAC_SKIP_LOCKED, slpflag, slptimeo)) ) { + aflags = BAC_REMOVE; + + if ( !(flags & BUF_INVALIDATE_LOCKED) ) + aflags |= BAC_SKIP_LOCKED; + + if ( (error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo)) ) { if (error == EDEADLK) /* * this buffer was marked B_LOCKED... @@ -1115,6 +1548,10 @@ buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo) } lck_mtx_unlock(buf_mtxp); + if (bp->b_flags & B_LOCKED) + KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 0, 0); + + CLR(bp->b_flags, B_LOCKED); SET(bp->b_flags, B_INVAL); buf_brelse(bp); @@ -1149,7 +1586,12 @@ try_dirty_list: if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) continue; - if ( (error = (int)buf_acquire_locked(bp, BAC_REMOVE | BAC_SKIP_LOCKED, slpflag, slptimeo)) ) { + aflags = BAC_REMOVE; + + if ( !(flags & BUF_INVALIDATE_LOCKED) ) + aflags |= BAC_SKIP_LOCKED; + + if ( (error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo)) ) { if (error == EDEADLK) /* * this buffer was marked B_LOCKED... @@ -1177,6 +1619,10 @@ try_dirty_list: } lck_mtx_unlock(buf_mtxp); + if (bp->b_flags & B_LOCKED) + KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 1, 0); + + CLR(bp->b_flags, B_LOCKED); SET(bp->b_flags, B_INVAL); if (ISSET(bp->b_flags, B_DELWRI) && (flags & BUF_WRITE_DATA)) @@ -1202,12 +1648,20 @@ try_dirty_list: void buf_flushdirtyblks(vnode_t vp, int wait, int flags, const char *msg) { + + (void) buf_flushdirtyblks_skipinfo(vp, wait, flags, msg); + return; +} + +int +buf_flushdirtyblks_skipinfo(vnode_t vp, int wait, int flags, const char *msg) { buf_t bp; int writes_issued = 0; errno_t error; int busy = 0; struct buflists local_iterblkhd; int lock_flags = BAC_NOWAIT | BAC_REMOVE; + int any_locked = 0; if (flags & BUF_SKIP_LOCKED) lock_flags |= BAC_SKIP_LOCKED; @@ -1221,11 +1675,26 @@ loop: bp = LIST_FIRST(&local_iterblkhd); LIST_REMOVE(bp, b_vnbufs); LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs); - - if ((error = buf_acquire_locked(bp, lock_flags, 0, 0)) == EBUSY) - busy++; - if (error) - continue; + + if ((error = buf_acquire_locked(bp, lock_flags, 0, 0)) == EBUSY) { + busy++; + } + if (error) { + /* + * If we passed in BUF_SKIP_LOCKED or BUF_SKIP_NONLOCKED, + * we may want to do somethign differently if a locked or unlocked + * buffer was encountered (depending on the arg specified). + * In this case, we know that one of those two was set, and the + * buf acquisition failed above. + * + * If it failed with EDEADLK, then save state which can be emitted + * later on to the caller. Most callers should not care. + */ + if (error == EDEADLK) { + any_locked++; + } + continue; + } lck_mtx_unlock(buf_mtxp); bp->b_flags &= ~B_LOCKED; @@ -1271,6 +1740,8 @@ loop: goto loop; } } + + return any_locked; } @@ -1339,15 +1810,26 @@ bremfree_locked(buf_t bp) { struct bqueues *dp = NULL; int whichq; - /* - * We only calculate the head of the freelist when removing + + whichq = bp->b_whichq; + + if (whichq == -1) { + if (bp->b_shadow_ref == 0) + panic("bremfree_locked: %p not on freelist", bp); + /* + * there are clones pointing to 'bp'... + * therefore, it was not put on a freelist + * when buf_brelse was last called on 'bp' + */ + return; + } + /* + * We only calculate the head of the freelist when removing * the last element of the list as that is the only time that * it is needed (e.g. to reset the tail pointer). * * NB: This makes an assumption about how tailq's are implemented. */ - whichq = bp->b_whichq; - if (bp->b_freelist.tqe_next == NULL) { dp = &bufqueues[whichq]; @@ -1356,14 +1838,12 @@ bremfree_locked(buf_t bp) } TAILQ_REMOVE(dp, bp, b_freelist); -#if BALANCE_QUEUES - bufqdec(whichq); -#endif if (whichq == BQ_LAUNDRY) blaundrycnt--; bp->b_whichq = -1; bp->b_timestamp = 0; + bp->b_shadow = 0; } /* @@ -1411,7 +1891,7 @@ brelvp_locked(buf_t bp) static void buf_reassign(buf_t bp, vnode_t newvp) { - register struct buflists *listheadp; + struct buflists *listheadp; if (newvp == NULL) { printf("buf_reassign: NULL"); @@ -1481,8 +1961,11 @@ bufinit(void) binsheadfree(bp, dp, BQ_EMPTY); binshash(bp, &invalhash); } - boot_nbuf_headers = nbuf_headers; + + TAILQ_INIT(&iobufqueue); + TAILQ_INIT(&delaybufqueue); + for (; i < nbuf_headers + niobuf_headers; i++) { bp = &buf_headers[i]; bufhdrinit(bp); @@ -1532,17 +2015,8 @@ bufinit(void) panic("Couldn't register buffer cache callout for vm pressure!\n"); } -#if BALANCE_QUEUES - { - static void bufq_balance_thread_init(void) __attribute__((section("__TEXT, initcode"))); - /* create a thread to do dynamic buffer queue balancing */ - bufq_balance_thread_init(); - } -#endif /* notyet */ } - - /* * Zones for the meta data buffers */ @@ -1580,8 +2054,10 @@ bufzoneinit(void) meta_zones[i].mz_max, PAGE_SIZE, meta_zones[i].mz_name); + zone_change(meta_zones[i].mz_zone, Z_CALLERACCT, FALSE); } buf_hdr_zone = zinit(sizeof(struct buf), 32, PAGE_SIZE, "buf headers"); + zone_change(buf_hdr_zone, Z_CALLERACCT, FALSE); } static __inline__ zone_t @@ -1631,8 +2107,9 @@ bio_doread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, int async, trace(TR_BREADMISS, pack(vp, size), blkno); /* Pay for the read. */ - if (p && p->p_stats) + if (p && p->p_stats) { OSIncrementAtomicLong(&p->p_stats->p_ru.ru_inblock); /* XXX */ + } if (async) { /* @@ -1766,9 +2243,10 @@ buf_bwrite(buf_t bp) */ if (wasdelayed) buf_reassign(bp, vp); - else - if (p && p->p_stats) - OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ + else + if (p && p->p_stats) { + OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ + } } trace(TR_BUFWRITE, pack(vp, bp->b_bcount), bp->b_lblkno); @@ -1792,8 +2270,9 @@ buf_bwrite(buf_t bp) if (wasdelayed) buf_reassign(bp, vp); else - if (p && p->p_stats) - OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ + if (p && p->p_stats) { + OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ + } /* Release the buffer. */ // XXXdbg - only if the unused bit is set @@ -1832,7 +2311,7 @@ vn_bwrite(struct vnop_bwrite_args *ap) * headers, we can get in to the situation where "too" many * buf_bdwrite()s can create situation where the kernel can create * buffers faster than the disks can service. Doing a buf_bawrite() in - * cases were we have "too many" outstanding buf_bdwrite()s avoids that. + * cases where we have "too many" outstanding buf_bdwrite()s avoids that. */ __private_extern__ int bdwrite_internal(buf_t bp, int return_error) @@ -1848,8 +2327,9 @@ bdwrite_internal(buf_t bp, int return_error) */ if (!ISSET(bp->b_flags, B_DELWRI)) { SET(bp->b_flags, B_DELWRI); - if (p && p->p_stats) + if (p && p->p_stats) { OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ + } OSAddAtomicLong(1, &nbdwrite); buf_reassign(bp, vp); } @@ -1934,6 +2414,126 @@ buf_bawrite(buf_t bp) } + +static void +buf_free_meta_store(buf_t bp) +{ + if (bp->b_bufsize) { + if (ISSET(bp->b_flags, B_ZALLOC)) { + zone_t z; + + z = getbufzone(bp->b_bufsize); + zfree(z, (void *)bp->b_datap); + } else + kmem_free(kernel_map, bp->b_datap, bp->b_bufsize); + + bp->b_datap = (uintptr_t)NULL; + bp->b_bufsize = 0; + } +} + + +static buf_t +buf_brelse_shadow(buf_t bp) +{ + buf_t bp_head; + buf_t bp_temp; + buf_t bp_return = NULL; +#ifdef BUF_MAKE_PRIVATE + buf_t bp_data; + int data_ref = 0; +#endif + int need_wakeup = 0; + + lck_mtx_lock_spin(buf_mtxp); + + bp_head = (buf_t)bp->b_orig; + + if (bp_head->b_whichq != -1) + panic("buf_brelse_shadow: bp_head on freelist %d\n", bp_head->b_whichq); + +#ifdef BUF_MAKE_PRIVATE + if (bp_data = bp->b_data_store) { + bp_data->b_data_ref--; + /* + * snapshot the ref count so that we can check it + * outside of the lock... we only want the guy going + * from 1 -> 0 to try and release the storage + */ + data_ref = bp_data->b_data_ref; + } +#endif + KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_START, bp, bp_head, bp_head->b_shadow_ref, 0, 0); + + bp_head->b_shadow_ref--; + + for (bp_temp = bp_head; bp_temp && bp != bp_temp->b_shadow; bp_temp = bp_temp->b_shadow); + + if (bp_temp == NULL) + panic("buf_brelse_shadow: bp not on list %p", bp_head); + + bp_temp->b_shadow = bp_temp->b_shadow->b_shadow; + +#ifdef BUF_MAKE_PRIVATE + /* + * we're about to free the current 'owner' of the data buffer and + * there is at least one other shadow buf_t still pointing at it + * so transfer it to the first shadow buf left in the chain + */ + if (bp == bp_data && data_ref) { + if ((bp_data = bp_head->b_shadow) == NULL) + panic("buf_brelse_shadow: data_ref mismatch bp(%p)", bp); + + for (bp_temp = bp_data; bp_temp; bp_temp = bp_temp->b_shadow) + bp_temp->b_data_store = bp_data; + bp_data->b_data_ref = data_ref; + } +#endif + if (bp_head->b_shadow_ref == 0 && bp_head->b_shadow) + panic("buf_relse_shadow: b_shadow != NULL && b_shadow_ref == 0 bp(%p)", bp); + if (bp_head->b_shadow_ref && bp_head->b_shadow == 0) + panic("buf_relse_shadow: b_shadow == NULL && b_shadow_ref != 0 bp(%p)", bp); + + if (bp_head->b_shadow_ref == 0) { + if (!ISSET(bp_head->b_lflags, BL_BUSY)) { + + CLR(bp_head->b_flags, B_AGE); + bp_head->b_timestamp = buf_timestamp(); + + if (ISSET(bp_head->b_flags, B_LOCKED)) { + bp_head->b_whichq = BQ_LOCKED; + binstailfree(bp_head, &bufqueues[BQ_LOCKED], BQ_LOCKED); + } else { + bp_head->b_whichq = BQ_META; + binstailfree(bp_head, &bufqueues[BQ_META], BQ_META); + } + } else if (ISSET(bp_head->b_lflags, BL_WAITSHADOW)) { + CLR(bp_head->b_lflags, BL_WAITSHADOW); + + bp_return = bp_head; + } + if (ISSET(bp_head->b_lflags, BL_WANTED_REF)) { + CLR(bp_head->b_lflags, BL_WANTED_REF); + need_wakeup = 1; + } + } + lck_mtx_unlock(buf_mtxp); + + if (need_wakeup) + wakeup(bp_head); + +#ifdef BUF_MAKE_PRIVATE + if (bp == bp_data && data_ref == 0) + buf_free_meta_store(bp); + + bp->b_data_store = NULL; +#endif + KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_END, bp, 0, 0, 0, 0); + + return (bp_return); +} + + /* * Release a buffer on to the free lists. * Described in Bach (p. 46). @@ -1958,7 +2558,18 @@ buf_brelse(buf_t bp) bp->b_tag = 0; #endif if (bp->b_lflags & BL_IOBUF) { + buf_t shadow_master_bp = NULL; + + if (ISSET(bp->b_lflags, BL_SHADOW)) + shadow_master_bp = buf_brelse_shadow(bp); + else if (ISSET(bp->b_lflags, BL_IOBUF_ALLOC)) + buf_free_meta_store(bp); free_io_buf(bp); + + if (shadow_master_bp) { + bp = shadow_master_bp; + goto finish_shadow_master; + } return; } @@ -1978,7 +2589,7 @@ buf_brelse(buf_t bp) if (ISSET(bp->b_flags, B_META) && ISSET(bp->b_flags, B_INVAL)) { if (ISSET(bp->b_flags, B_FILTER)) { /* if necessary, call out */ void (*iodone_func)(struct buf *, void *) = bp->b_iodone; - void *arg = (void *)bp->b_transaction; + void *arg = bp->b_transaction; CLR(bp->b_flags, B_FILTER); /* but note callout done */ bp->b_iodone = NULL; @@ -1999,7 +2610,7 @@ buf_brelse(buf_t bp) kern_return_t kret; int upl_flags; - if ( (upl == NULL) ) { + if (upl == NULL) { if ( !ISSET(bp->b_flags, B_INVAL)) { kret = ubc_create_upl(bp->b_vp, ubc_blktooff(bp->b_vp, bp->b_lblkno), @@ -2061,6 +2672,9 @@ buf_brelse(buf_t bp) if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL) || (ISSET(bp->b_lflags, BL_WANTDEALLOC) && !ISSET(bp->b_flags, B_DELWRI))) { + + boolean_t delayed_buf_free_meta_store = FALSE; + /* * If it's invalid or empty, dissociate it from its vnode, * release its storage if B_META, and @@ -2070,34 +2684,34 @@ buf_brelse(buf_t bp) OSAddAtomicLong(-1, &nbdwrite); if (ISSET(bp->b_flags, B_META)) { - if (bp->b_bufsize) { - if (ISSET(bp->b_flags, B_ZALLOC)) { - zone_t z; - - z = getbufzone(bp->b_bufsize); - zfree(z, (void *)bp->b_datap); - } else - kmem_free(kernel_map, bp->b_datap, bp->b_bufsize); - - bp->b_datap = (uintptr_t)NULL; - bp->b_bufsize = 0; - } + if (bp->b_shadow_ref) + delayed_buf_free_meta_store = TRUE; + else + buf_free_meta_store(bp); } /* * nuke any credentials we were holding */ - if (IS_VALID_CRED(bp->b_rcred)) { - kauth_cred_unref(&bp->b_rcred); - } - if (IS_VALID_CRED(bp->b_wcred)) { - kauth_cred_unref(&bp->b_wcred); + buf_release_credentials(bp); + + lck_mtx_lock_spin(buf_mtxp); + + if (bp->b_shadow_ref) { + SET(bp->b_lflags, BL_WAITSHADOW); + + lck_mtx_unlock(buf_mtxp); + + return; } - CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA)); + if (delayed_buf_free_meta_store == TRUE) { - bufq = &bufqueues[BQ_EMPTY]; - bp->b_whichq = BQ_EMPTY; + lck_mtx_unlock(buf_mtxp); +finish_shadow_master: + buf_free_meta_store(bp); - lck_mtx_lock_spin(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); + } + CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA)); if (bp->b_vp) brelvp_locked(bp); @@ -2106,8 +2720,10 @@ buf_brelse(buf_t bp) BLISTNONE(bp); binshash(bp, &invalhash); - binsheadfree(bp, bufq, BQ_EMPTY); + bp->b_whichq = BQ_EMPTY; + binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY); } else { + /* * It has valid data. Put it on the end of the appropriate * queue, so that it'll stick around for as long as possible. @@ -2122,13 +2738,32 @@ buf_brelse(buf_t bp) whichq = BQ_LRU; /* valid data */ bufq = &bufqueues[whichq]; - CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE)); - bp->b_whichq = whichq; bp->b_timestamp = buf_timestamp(); - lck_mtx_lock_spin(buf_mtxp); - - binstailfree(bp, bufq, whichq); + lck_mtx_lock_spin(buf_mtxp); + + /* + * the buf_brelse_shadow routine doesn't take 'ownership' + * of the parent buf_t... it updates state that is protected by + * the buf_mtxp, and checks for BL_BUSY to determine whether to + * put the buf_t back on a free list. b_shadow_ref is protected + * by the lock, and since we have not yet cleared B_BUSY, we need + * to check it while holding the lock to insure that one of us + * puts this buf_t back on a free list when it is safe to do so + */ + if (bp->b_shadow_ref == 0) { + CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE)); + bp->b_whichq = whichq; + binstailfree(bp, bufq, whichq); + } else { + /* + * there are still cloned buf_t's pointing + * at this guy... need to keep it off the + * freelists until a buf_brelse is done on + * the last clone + */ + CLR(bp->b_flags, (B_ASYNC | B_NOCACHE)); + } } if (needbuffer) { /* @@ -2214,6 +2849,30 @@ incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp) } +void +buf_wait_for_shadow_io(vnode_t vp, daddr64_t blkno) +{ + buf_t bp; + struct bufhashhdr *dp; + + dp = BUFHASH(vp, blkno); + + lck_mtx_lock_spin(buf_mtxp); + + for (;;) { + if ((bp = incore_locked(vp, blkno, dp)) == NULL) + break; + + if (bp->b_shadow_ref == 0) + break; + + SET(bp->b_lflags, BL_WANTED_REF); + + (void) msleep(bp, buf_mtxp, PSPIN | (PRIBIO+1), "buf_wait_for_shadow", NULL); + } + lck_mtx_unlock(buf_mtxp); +} + /* XXX FIXME -- Update the comment to reflect the UBC changes (please) -- */ /* * Get a block of requested size that is associated with @@ -2560,6 +3219,97 @@ buf_geteblk(int size) return (bp); } +uint32_t +buf_redundancy_flags(buf_t bp) +{ + return bp->b_redundancy_flags; +} + +void +buf_set_redundancy_flags(buf_t bp, uint32_t flags) +{ + SET(bp->b_redundancy_flags, flags); +} + +void +buf_clear_redundancy_flags(buf_t bp, uint32_t flags) +{ + CLR(bp->b_redundancy_flags, flags); +} + + + +static void * +recycle_buf_from_pool(int nsize) +{ + buf_t bp; + void *ptr = NULL; + + lck_mtx_lock_spin(buf_mtxp); + + TAILQ_FOREACH(bp, &bufqueues[BQ_META], b_freelist) { + if (ISSET(bp->b_flags, B_DELWRI) || bp->b_bufsize != nsize) + continue; + ptr = (void *)bp->b_datap; + bp->b_bufsize = 0; + + bcleanbuf(bp, TRUE); + break; + } + lck_mtx_unlock(buf_mtxp); + + return (ptr); +} + + + +int zalloc_nopagewait_failed = 0; +int recycle_buf_failed = 0; + +static void * +grab_memory_for_meta_buf(int nsize) +{ + zone_t z; + void *ptr; + boolean_t was_vmpriv; + + z = getbufzone(nsize); + + /* + * make sure we're NOT priviliged so that + * if a vm_page_grab is needed, it won't + * block if we're out of free pages... if + * it blocks, then we can't honor the + * nopagewait request + */ + was_vmpriv = set_vm_privilege(FALSE); + + ptr = zalloc_nopagewait(z); + + if (was_vmpriv == TRUE) + set_vm_privilege(TRUE); + + if (ptr == NULL) { + + zalloc_nopagewait_failed++; + + ptr = recycle_buf_from_pool(nsize); + + if (ptr == NULL) { + + recycle_buf_failed++; + + if (was_vmpriv == FALSE) + set_vm_privilege(TRUE); + + ptr = zalloc(z); + + if (was_vmpriv == FALSE) + set_vm_privilege(FALSE); + } + } + return (ptr); +} /* * With UBC, there is no need to expand / shrink the file data @@ -2586,7 +3336,6 @@ allocbuf(buf_t bp, int size) panic("allocbuf: buffer larger than MAXBSIZE requested"); if (ISSET(bp->b_flags, B_META)) { - zone_t zprev, z; int nsize = roundup(size, MINMETA); if (bp->b_datap) { @@ -2594,14 +3343,16 @@ allocbuf(buf_t bp, int size) if (ISSET(bp->b_flags, B_ZALLOC)) { if (bp->b_bufsize < nsize) { + zone_t zprev; + /* reallocate to a bigger size */ zprev = getbufzone(bp->b_bufsize); if (nsize <= MAXMETA) { desired_size = nsize; - z = getbufzone(nsize); + /* b_datap not really a ptr */ - *(void **)(&bp->b_datap) = zalloc(z); + *(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize); } else { bp->b_datap = (uintptr_t)NULL; kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size); @@ -2628,9 +3379,9 @@ allocbuf(buf_t bp, int size) /* new allocation */ if (nsize <= MAXMETA) { desired_size = nsize; - z = getbufzone(nsize); + /* b_datap not really a ptr */ - *(void **)(&bp->b_datap) = zalloc(z); + *(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize); SET(bp->b_flags, B_ZALLOC); } else kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size); @@ -2840,14 +3591,14 @@ found: /* * Clean a buffer. - * Returns 0 is buffer is ready to use, + * Returns 0 if buffer is ready to use, * Returns 1 if issued a buf_bawrite() to indicate * that the buffer is not ready. * * buf_mtxp is held upon entry * returns with buf_mtxp locked */ -static int +int bcleanbuf(buf_t bp, boolean_t discard) { /* Remove from the queue */ @@ -2866,10 +3617,7 @@ bcleanbuf(buf_t bp, boolean_t discard) SET(bp->b_lflags, BL_WANTDEALLOC); } - bp->b_whichq = BQ_LAUNDRY; - bp->b_timestamp = buf_timestamp(); - binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY); - blaundrycnt++; + bmovelaundry(bp); lck_mtx_unlock(buf_mtxp); @@ -2905,30 +3653,14 @@ bcleanbuf(buf_t bp, boolean_t discard) BLISTNONE(bp); - if (ISSET(bp->b_flags, B_META)) { - vm_offset_t elem; - - elem = (vm_offset_t)bp->b_datap; - bp->b_datap = (uintptr_t)0xdeadbeef; - - if (ISSET(bp->b_flags, B_ZALLOC)) { - zone_t z; - - z = getbufzone(bp->b_bufsize); - zfree(z, (void *)elem); - } else - kmem_free(kernel_map, elem, bp->b_bufsize); - } + if (ISSET(bp->b_flags, B_META)) + buf_free_meta_store(bp); trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); - /* nuke any credentials we were holding */ - if (IS_VALID_CRED(bp->b_rcred)) { - kauth_cred_unref(&bp->b_rcred); - } - if (IS_VALID_CRED(bp->b_wcred)) { - kauth_cred_unref(&bp->b_wcred); - } + buf_release_credentials(bp); + + bp->b_redundancy_flags = 0; /* If discarding, just move to the empty queue */ if (discard) { @@ -2964,6 +3696,7 @@ bcleanbuf(buf_t bp, boolean_t discard) bp->b_bcount = 0; bp->b_dirtyoff = bp->b_dirtyend = 0; bp->b_validoff = bp->b_validend = 0; + bzero(&bp->b_attr, sizeof(struct bufattr)); lck_mtx_lock_spin(buf_mtxp); } @@ -3139,24 +3872,6 @@ buf_biowait(buf_t bp) return (0); } -/* - * Wait for the callback operation on a B_CALL buffer to complete. - */ -void -buf_biowait_callback(buf_t bp) -{ - while (!ISSET(bp->b_lflags, BL_CALLDONE)) { - - lck_mtx_lock_spin(buf_mtxp); - - if (!ISSET(bp->b_lflags, BL_CALLDONE)) { - DTRACE_IO1(wait__start, buf_t, bp); - (void) msleep(bp, buf_mtxp, PDROP | (PRIBIO+1), "buf_biowait", NULL); - DTRACE_IO1(wait__done, buf_t, bp); - } else - lck_mtx_unlock(buf_mtxp); - } -} /* * Mark I/O complete on a buffer. @@ -3174,13 +3889,12 @@ buf_biowait_callback(buf_t bp) * (for swap pager, that puts swap buffers on the free lists (!!!), * for the vn device, that puts malloc'd buffers on the free lists!) */ -extern struct timeval priority_IO_timestamp_for_root; -extern int hard_throttle_on_root; void buf_biodone(buf_t bp) { mount_t mp; + struct bufattr *bap; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_START, bp, bp->b_datap, bp->b_flags, 0, 0); @@ -3188,9 +3902,7 @@ buf_biodone(buf_t bp) if (ISSET(bp->b_flags, B_DONE)) panic("biodone already"); - if (ISSET(bp->b_flags, B_ERROR)) { - fslog_io_error(bp); - } + bap = &bp->b_attr; if (bp->b_vp && bp->b_vp->v_mount) { mp = bp->b_vp->v_mount; @@ -3205,8 +3917,9 @@ buf_biodone(buf_t bp) INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_read_size); } - if (kdebug_enable) { - int code = DKIO_DONE; + if (kdebug_enable) { + int code = DKIO_DONE; + int io_tier = GET_BUFATTR_IO_TIER(bap); if (bp->b_flags & B_READ) code |= DKIO_READ; @@ -3218,21 +3931,32 @@ buf_biodone(buf_t bp) else if (bp->b_flags & B_PAGEIO) code |= DKIO_PAGING; - KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE, - bp, (uintptr_t)bp->b_vp, - bp->b_resid, bp->b_error, 0); + if (io_tier != 0) + code |= DKIO_THROTTLE; + + code |= ((io_tier << DKIO_TIER_SHIFT) & DKIO_TIER_MASK); + + if (bp->b_flags & B_PASSIVE) + code |= DKIO_PASSIVE; + + if (bap->ba_flags & BA_NOCACHE) + code |= DKIO_NOCACHE; + + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE, + buf_kernel_addrperm_addr(bp), (uintptr_t)VM_KERNEL_ADDRPERM(bp->b_vp), bp->b_resid, bp->b_error, 0); } - if ((bp->b_vp != NULLVP) && - ((bp->b_flags & (B_IOSTREAMING | B_PAGEIO | B_READ)) == (B_PAGEIO | B_READ)) && - (bp->b_vp->v_mount->mnt_kern_flag & MNTK_ROOTDEV)) { - microuptime(&priority_IO_timestamp_for_root); - hard_throttle_on_root = 0; - } + /* * I/O was done, so don't believe - * the DIRTY state from VM anymore + * the DIRTY state from VM anymore... + * and we need to reset the THROTTLED/PASSIVE + * indicators */ - CLR(bp->b_flags, B_WASDIRTY); + CLR(bp->b_flags, (B_WASDIRTY | B_PASSIVE)); + CLR(bap->ba_flags, (BA_META | BA_NOCACHE | BA_DELAYIDLESLEEP)); + + SET_BUFATTR_IO_TIER(bap, 0); + DTRACE_IO1(done, buf_t, bp); if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW)) @@ -3245,46 +3969,26 @@ buf_biodone(buf_t bp) if (ISSET(bp->b_flags, (B_CALL | B_FILTER))) { /* if necessary, call out */ void (*iodone_func)(struct buf *, void *) = bp->b_iodone; - void *arg = (void *)bp->b_transaction; + void *arg = bp->b_transaction; int callout = ISSET(bp->b_flags, B_CALL); + if (iodone_func == NULL) + panic("biodone: bp @ %p has NULL b_iodone!\n", bp); + CLR(bp->b_flags, (B_CALL | B_FILTER)); /* filters and callouts are one-shot */ bp->b_iodone = NULL; bp->b_transaction = NULL; - if (iodone_func == NULL) { - panic("biodone: bp @ %p has NULL b_iodone!\n", bp); - } else { - if (callout) - SET(bp->b_flags, B_DONE); /* note that it's done */ - (*iodone_func)(bp, arg); - } - if (callout) { - int need_wakeup = 0; + if (callout) + SET(bp->b_flags, B_DONE); /* note that it's done */ - /* + (*iodone_func)(bp, arg); + + if (callout) { + /* * assumes that the callback function takes * ownership of the bp and deals with releasing it if necessary - * BL_WANTED indicates that we've decided to wait on the - * completion of this I/O in a synchronous manner... we - * still call the callback function, but in addition we - * will do a wakeup... BL_CALLDONE indicates that the callback - * routine has completed and its ok for the waiter to take - * 'ownership' of this bp back */ - lck_mtx_lock_spin(buf_mtxp); - - if (bp->b_lflags & BL_WANTED) { - CLR(bp->b_lflags, BL_WANTED); - need_wakeup = 1; - } - SET(bp->b_lflags, BL_CALLDONE); - - lck_mtx_unlock(buf_mtxp); - - if (need_wakeup) - wakeup(bp); - goto biodone_done; } /* @@ -3328,6 +4032,18 @@ biodone_done: (uintptr_t)bp, (uintptr_t)bp->b_datap, bp->b_flags, 0, 0); } +/* + * Obfuscate buf pointers. + */ +vm_offset_t +buf_kernel_addrperm_addr(void * addr) +{ + if ((vm_offset_t)addr == 0) + return 0; + else + return ((vm_offset_t)addr + buf_kernel_addrperm); +} + /* * Return a count of buffers on the "locked" queue. */ @@ -3349,6 +4065,7 @@ count_lock_queue(void) /* * Return a count of 'busy' buffers. Used at the time of shutdown. + * note: This is also called from the mach side in debug context in kdp.c */ int count_busy_buffers(void) @@ -3366,8 +4083,8 @@ void vfs_bufstats() { int i, j, count; - register struct buf *bp; - register struct bqueues *dp; + struct buf *bp; + struct bqueues *dp; int counts[MAXBSIZE/CLBYTES+1]; static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" }; @@ -3394,7 +4111,7 @@ vfs_bufstats() } #endif /* DIAGNOSTIC */ -#define NRESERVEDIOBUFS 64 +#define NRESERVEDIOBUFS 128 buf_t @@ -3409,9 +4126,7 @@ alloc_io_buf(vnode_t vp, int priv) bufstats.bufs_iobufsleeps++; need_iobuffer = 1; - (void) msleep(&need_iobuffer, iobuffer_mtxp, PDROP | (PRIBIO+1), (const char *)"alloc_io_buf", NULL); - - lck_mtx_lock_spin(iobuffer_mtxp); + (void) msleep(&need_iobuffer, iobuffer_mtxp, PSPIN | (PRIBIO+1), (const char *)"alloc_io_buf", NULL); } TAILQ_REMOVE(&iobufqueue, bp, b_freelist); @@ -3433,6 +4148,7 @@ alloc_io_buf(vnode_t vp, int priv) bp->b_datap = 0; bp->b_flags = 0; bp->b_lflags = BL_BUSY | BL_IOBUF; + bp->b_redundancy_flags = 0; bp->b_blkno = bp->b_lblkno = 0; #ifdef JOE_DEBUG bp->b_owner = current_thread(); @@ -3445,6 +4161,7 @@ alloc_io_buf(vnode_t vp, int priv) bp->b_bufsize = 0; bp->b_upl = NULL; bp->b_vp = vp; + bzero(&bp->b_attr, sizeof(struct bufattr)); if (vp && (vp->v_type == VBLK || vp->v_type == VCHR)) bp->b_dev = vp->v_rdev; @@ -3466,6 +4183,9 @@ free_io_buf(buf_t bp) bp->b_vp = NULL; bp->b_flags = B_INVAL; + /* Zero out the bufattr and its flags before relinquishing this iobuf */ + bzero (&bp->b_attr, sizeof(struct bufattr)); + lck_mtx_lock_spin(iobuffer_mtxp); binsheadfree(bp, &iobufqueue, -1); @@ -3524,6 +4244,8 @@ bcleanbuf_thread_init(void) thread_deallocate(thread); } +typedef int (*bcleanbufcontinuation)(int); + static void bcleanbuf_thread(void) { @@ -3535,10 +4257,9 @@ bcleanbuf_thread(void) lck_mtx_lock_spin(buf_mtxp); while ( (bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY])) == NULL) { - (void)msleep((void *)&bufqueues[BQ_LAUNDRY], buf_mtxp, PDROP | PRIBIO, "blaundry", NULL); - - lck_mtx_lock_spin(buf_mtxp); + (void)msleep0(&bufqueues[BQ_LAUNDRY], buf_mtxp, PRIBIO|PDROP, "blaundry", 0, (bcleanbufcontinuation)bcleanbuf_thread); } + /* * Remove from the queue */ @@ -3570,7 +4291,7 @@ bcleanbuf_thread(void) binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY); blaundrycnt++; - /* we never leave a busy page on the laundary queue */ + /* we never leave a busy page on the laundry queue */ CLR(bp->b_lflags, BL_BUSY); buf_busycount--; #ifdef JOE_DEBUG @@ -3579,12 +4300,18 @@ bcleanbuf_thread(void) #endif lck_mtx_unlock(buf_mtxp); - - if (loopcnt > 10) { - (void)tsleep((void *)&bufqueues[BQ_LAUNDRY], PRIBIO, "blaundry", 1); + + if (loopcnt > MAXLAUNDRY) { + /* + * bawrite_internal() can return errors if we're throttled. If we've + * done several I/Os and failed, give the system some time to unthrottle + * the vnode + */ + (void)tsleep((void *)&bufqueues[BQ_LAUNDRY], PRIBIO, "blaundry", 1); loopcnt = 0; } else { - (void)thread_block(THREAD_CONTINUE_NULL); + /* give other threads a chance to run */ + (void)thread_block(THREAD_CONTINUE_NULL); loopcnt++; } } @@ -3648,33 +4375,132 @@ dump_buffer: return(0); } -static boolean_t -buffer_cache_gc(void) +boolean_t +buffer_cache_gc(int all) { buf_t bp; boolean_t did_large_zfree = FALSE; + boolean_t need_wakeup = FALSE; int now = buf_timestamp(); + uint32_t found = 0; + struct bqueues privq; + int thresh_hold = BUF_STALE_THRESHHOLD; - lck_mtx_lock_spin(buf_mtxp); + if (all) + thresh_hold = 0; + /* + * We only care about metadata (incore storage comes from zalloc()). + * Unless "all" is set (used to evict meta data buffers in preparation + * for deep sleep), we only evict up to BUF_MAX_GC_BATCH_SIZE buffers + * that have not been accessed in the last BUF_STALE_THRESHOLD seconds. + * BUF_MAX_GC_BATCH_SIZE controls both the hold time of the global lock + * "buf_mtxp" and the length of time we spend compute bound in the GC + * thread which calls this function + */ + lck_mtx_lock(buf_mtxp); + + do { + found = 0; + TAILQ_INIT(&privq); + need_wakeup = FALSE; + + while (((bp = TAILQ_FIRST(&bufqueues[BQ_META]))) && + (now > bp->b_timestamp) && + (now - bp->b_timestamp > thresh_hold) && + (found < BUF_MAX_GC_BATCH_SIZE)) { + + /* Remove from free list */ + bremfree_locked(bp); + found++; + +#ifdef JOE_DEBUG + bp->b_owner = current_thread(); + bp->b_tag = 12; +#endif + + /* If dirty, move to laundry queue and remember to do wakeup */ + if (ISSET(bp->b_flags, B_DELWRI)) { + SET(bp->b_lflags, BL_WANTDEALLOC); + + bmovelaundry(bp); + need_wakeup = TRUE; + + continue; + } + + /* + * Mark busy and put on private list. We could technically get + * away without setting BL_BUSY here. + */ + SET(bp->b_lflags, BL_BUSY); + buf_busycount++; + + /* + * Remove from hash and dissociate from vp. + */ + bremhash(bp); + if (bp->b_vp) { + brelvp_locked(bp); + } + + TAILQ_INSERT_TAIL(&privq, bp, b_freelist); + } + + if (found == 0) { + break; + } + + /* Drop lock for batch processing */ + lck_mtx_unlock(buf_mtxp); + + /* Wakeup and yield for laundry if need be */ + if (need_wakeup) { + wakeup(&bufqueues[BQ_LAUNDRY]); + (void)thread_block(THREAD_CONTINUE_NULL); + } + + /* Clean up every buffer on private list */ + TAILQ_FOREACH(bp, &privq, b_freelist) { + /* Take note if we've definitely freed at least a page to a zone */ + if ((ISSET(bp->b_flags, B_ZALLOC)) && (buf_size(bp) >= PAGE_SIZE)) { + did_large_zfree = TRUE; + } - /* We only care about metadata (incore storage comes from zalloc()) */ - bp = TAILQ_FIRST(&bufqueues[BQ_META]); + trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); - /* Only collect buffers unused in the last N seconds. Note: ordered by timestamp. */ - while ((bp != NULL) && ((now - bp->b_timestamp) > BUF_STALE_THRESHHOLD)) { - int result, size; - boolean_t is_zalloc; + /* Free Storage */ + buf_free_meta_store(bp); - size = buf_size(bp); - is_zalloc = ISSET(bp->b_flags, B_ZALLOC); + /* Release credentials */ + buf_release_credentials(bp); - result = bcleanbuf(bp, TRUE); - if ((result == 0) && is_zalloc && (size >= PAGE_SIZE)) { - /* We've definitely freed at least a page to a zone */ - did_large_zfree = TRUE; + /* Prepare for moving to empty queue */ + CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED + | B_AGE | B_ASYNC | B_NOCACHE | B_FUA)); + bp->b_whichq = BQ_EMPTY; + BLISTNONE(bp); } - bp = TAILQ_FIRST(&bufqueues[BQ_META]); - } + lck_mtx_lock(buf_mtxp); + + /* Back under lock, move them all to invalid hash and clear busy */ + TAILQ_FOREACH(bp, &privq, b_freelist) { + binshash(bp, &invalhash); + CLR(bp->b_lflags, BL_BUSY); + buf_busycount--; + +#ifdef JOE_DEBUG + if (bp->b_owner != current_thread()) { + panic("Buffer stolen from buffer_cache_gc()"); + } + bp->b_owner = current_thread(); + bp->b_tag = 13; +#endif + } + + /* And do a big bulk move to the empty queue */ + TAILQ_CONCAT(&bufqueues[BQ_EMPTY], &privq, b_freelist); + + } while (all && (found == BUF_MAX_GC_BATCH_SIZE)); lck_mtx_unlock(buf_mtxp); @@ -3769,334 +4595,3 @@ bflushq(int whichq, mount_t mp) return (total_writes); } #endif - - -#if BALANCE_QUEUES - -/* XXX move this to a separate file */ - -/* - * NOTE: THIS CODE HAS NOT BEEN UPDATED - * WITH RESPECT TO THE NEW LOCKING MODEL - */ - - -/* - * Dynamic Scaling of the Buffer Queues - */ - -typedef long long blsize_t; - -blsize_t MAXNBUF; /* initialize to (sane_size / PAGE_SIZE) */ -/* Global tunable limits */ -blsize_t nbufh; /* number of buffer headers */ -blsize_t nbuflow; /* minimum number of buffer headers required */ -blsize_t nbufhigh; /* maximum number of buffer headers allowed */ -blsize_t nbuftarget; /* preferred number of buffer headers */ - -/* - * assertions: - * - * 1. 0 < nbuflow <= nbufh <= nbufhigh - * 2. nbufhigh <= MAXNBUF - * 3. 0 < nbuflow <= nbuftarget <= nbufhigh - * 4. nbufh can not be set by sysctl(). - */ - -/* Per queue tunable limits */ - -struct bufqlim { - blsize_t bl_nlow; /* minimum number of buffer headers required */ - blsize_t bl_num; /* number of buffer headers on the queue */ - blsize_t bl_nlhigh; /* maximum number of buffer headers allowed */ - blsize_t bl_target; /* preferred number of buffer headers */ - long bl_stale; /* Seconds after which a buffer is considered stale */ -} bufqlim[BQUEUES]; - -/* - * assertions: - * - * 1. 0 <= bl_nlow <= bl_num <= bl_nlhigh - * 2. bl_nlhigh <= MAXNBUF - * 3. bufqlim[BQ_META].bl_nlow != 0 - * 4. bufqlim[BQ_META].bl_nlow > (number of possible concurrent - * file system IO operations) - * 5. bl_num can not be set by sysctl(). - * 6. bl_nhigh <= nbufhigh - */ - -/* - * Rationale: - * ---------- - * Defining it blsize_t as long permits 2^31 buffer headers per queue. - * Which can describe (2^31 * PAGE_SIZE) memory per queue. - * - * These limits are exported to by means of sysctl(). - * It was decided to define blsize_t as a 64 bit quantity. - * This will make sure that we will not be required to change it - * as long as we do not exceed 64 bit address space for the kernel. - * - * low and high numbers parameters initialized at compile time - * and boot arguments can be used to override them. sysctl() - * would not change the value. sysctl() can get all the values - * but can set only target. num is the current level. - * - * Advantages of having a "bufqscan" thread doing the balancing are, - * Keep enough bufs on BQ_EMPTY. - * getnewbuf() by default will always select a buffer from the BQ_EMPTY. - * getnewbuf() perfoms best if a buffer was found there. - * Also this minimizes the possibility of starting IO - * from getnewbuf(). That's a performance win, too. - * - * Localize complex logic [balancing as well as time aging] - * to balancebufq(). - * - * Simplify getnewbuf() logic by elimination of time aging code. - */ - -/* - * Algorithm: - * ----------- - * The goal of the dynamic scaling of the buffer queues to to keep - * the size of the LRU close to bl_target. Buffers on a queue would - * be time aged. - * - * There would be a thread which will be responsible for "balancing" - * the buffer cache queues. - * - * The scan order would be: AGE, LRU, META, EMPTY. - */ - -long bufqscanwait = 0; - -static void bufqscan_thread(); -static int balancebufq(int q); -static int btrimempty(int n); -static __inline__ int initbufqscan(void); -static __inline__ int nextbufq(int q); -static void buqlimprt(int all); - - -static __inline__ void -bufqinc(int q) -{ - if ((q < 0) || (q >= BQUEUES)) - return; - - bufqlim[q].bl_num++; - return; -} - -static __inline__ void -bufqdec(int q) -{ - if ((q < 0) || (q >= BQUEUES)) - return; - - bufqlim[q].bl_num--; - return; -} - -static void -bufq_balance_thread_init(void) -{ - thread_t thread = THREAD_NULL; - - if (bufqscanwait++ == 0) { - - /* Initalize globals */ - MAXNBUF = (sane_size / PAGE_SIZE); - nbufh = nbuf_headers; - nbuflow = min(nbufh, 100); - nbufhigh = min(MAXNBUF, max(nbufh, 2048)); - nbuftarget = (sane_size >> 5) / PAGE_SIZE; - nbuftarget = max(nbuflow, nbuftarget); - nbuftarget = min(nbufhigh, nbuftarget); - - /* - * Initialize the bufqlim - */ - - /* LOCKED queue */ - bufqlim[BQ_LOCKED].bl_nlow = 0; - bufqlim[BQ_LOCKED].bl_nlhigh = 32; - bufqlim[BQ_LOCKED].bl_target = 0; - bufqlim[BQ_LOCKED].bl_stale = 30; - - /* LRU queue */ - bufqlim[BQ_LRU].bl_nlow = 0; - bufqlim[BQ_LRU].bl_nlhigh = nbufhigh/4; - bufqlim[BQ_LRU].bl_target = nbuftarget/4; - bufqlim[BQ_LRU].bl_stale = LRU_IS_STALE; - - /* AGE queue */ - bufqlim[BQ_AGE].bl_nlow = 0; - bufqlim[BQ_AGE].bl_nlhigh = nbufhigh/4; - bufqlim[BQ_AGE].bl_target = nbuftarget/4; - bufqlim[BQ_AGE].bl_stale = AGE_IS_STALE; - - /* EMPTY queue */ - bufqlim[BQ_EMPTY].bl_nlow = 0; - bufqlim[BQ_EMPTY].bl_nlhigh = nbufhigh/4; - bufqlim[BQ_EMPTY].bl_target = nbuftarget/4; - bufqlim[BQ_EMPTY].bl_stale = 600000; - - /* META queue */ - bufqlim[BQ_META].bl_nlow = 0; - bufqlim[BQ_META].bl_nlhigh = nbufhigh/4; - bufqlim[BQ_META].bl_target = nbuftarget/4; - bufqlim[BQ_META].bl_stale = META_IS_STALE; - - /* LAUNDRY queue */ - bufqlim[BQ_LOCKED].bl_nlow = 0; - bufqlim[BQ_LOCKED].bl_nlhigh = 32; - bufqlim[BQ_LOCKED].bl_target = 0; - bufqlim[BQ_LOCKED].bl_stale = 30; - - buqlimprt(1); - } - - /* create worker thread */ - kernel_thread_start((thread_continue_t)bufqscan_thread, NULL, &thread); - thread_deallocate(thread); -} - -/* The workloop for the buffer balancing thread */ -static void -bufqscan_thread() -{ - int moretodo = 0; - - for(;;) { - do { - int q; /* buffer queue to process */ - - q = initbufqscan(); - for (; q; ) { - moretodo |= balancebufq(q); - q = nextbufq(q); - } - } while (moretodo); - -#if DIAGNOSTIC - vfs_bufstats(); - buqlimprt(0); -#endif - (void)tsleep((void *)&bufqscanwait, PRIBIO, "bufqscanwait", 60 * hz); - moretodo = 0; - } -} - -/* Seed for the buffer queue balancing */ -static __inline__ int -initbufqscan() -{ - /* Start with AGE queue */ - return (BQ_AGE); -} - -/* Pick next buffer queue to balance */ -static __inline__ int -nextbufq(int q) -{ - int order[] = { BQ_AGE, BQ_LRU, BQ_META, BQ_EMPTY, 0 }; - - q++; - q %= sizeof(order); - return (order[q]); -} - -/* function to balance the buffer queues */ -static int -balancebufq(int q) -{ - int moretodo = 0; - int n, t; - - /* reject invalid q */ - if ((q < 0) || (q >= BQUEUES)) - goto out; - - /* LOCKED or LAUNDRY queue MUST not be balanced */ - if ((q == BQ_LOCKED) || (q == BQ_LAUNDRY)) - goto out; - - n = (bufqlim[q].bl_num - bufqlim[q].bl_target); - - /* If queue has less than target nothing more to do */ - if (n < 0) - goto out; - - if ( n > 8 ) { - /* Balance only a small amount (12.5%) at a time */ - n >>= 3; - } - - /* EMPTY queue needs special handling */ - if (q == BQ_EMPTY) { - moretodo |= btrimempty(n); - goto out; - } - - t = buf_timestamp(): - - for (; n > 0; n--) { - struct buf *bp = bufqueues[q].tqh_first; - if (!bp) - break; - - /* check if it's stale */ - if ((t - bp->b_timestamp) > bufqlim[q].bl_stale) { - if (bcleanbuf(bp, FALSE)) { - /* buf_bawrite() issued, bp not ready */ - moretodo = 1; - } else { - /* release the cleaned buffer to BQ_EMPTY */ - SET(bp->b_flags, B_INVAL); - buf_brelse(bp); - } - } else - break; - } - -out: - return (moretodo); -} - -static int -btrimempty(int n) -{ - /* - * When struct buf are allocated dynamically, this would - * reclaim upto 'n' struct buf from the empty queue. - */ - - return (0); -} - -static void -buqlimprt(int all) -{ - int i; - static char *bname[BQUEUES] = - { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" }; - - if (all) - for (i = 0; i < BQUEUES; i++) { - printf("%s : ", bname[i]); - printf("min = %ld, ", (long)bufqlim[i].bl_nlow); - printf("cur = %ld, ", (long)bufqlim[i].bl_num); - printf("max = %ld, ", (long)bufqlim[i].bl_nlhigh); - printf("target = %ld, ", (long)bufqlim[i].bl_target); - printf("stale after %ld seconds\n", bufqlim[i].bl_stale); - } - else - for (i = 0; i < BQUEUES; i++) { - printf("%s : ", bname[i]); - printf("cur = %ld, ", (long)bufqlim[i].bl_num); - } -} - -#endif - -