/*
- * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
#include <kern/locks.h>
#include <kern/thread.h>
-#include <sys/fslog.h> /* fslog_io_error() */
-#include <sys/disk.h> /* dk_error_description_t */
+#include <sys/fslog.h> /* fslog_io_error() */
+#include <sys/disk.h> /* dk_error_description_t */
#include <mach/mach_types.h>
#include <mach/memory_object_types.h>
-#include <kern/sched_prim.h> /* thread_block() */
+#include <kern/sched_prim.h> /* thread_block() */
#include <vm/vm_kern.h>
#include <vm/vm_pageout.h>
#include <sys/sdt.h>
-int bcleanbuf(buf_t bp, boolean_t discard);
-static int brecover_data(buf_t bp);
+int bcleanbuf(buf_t bp, boolean_t discard);
+static int brecover_data(buf_t bp);
static boolean_t incore(vnode_t vp, daddr64_t blkno);
/* timeout is in msecs */
-static buf_t getnewbuf(int slpflag, int slptimeo, int *queue);
-static void bremfree_locked(buf_t bp);
-static void buf_reassign(buf_t bp, vnode_t newvp);
-static errno_t buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo);
-static int buf_iterprepare(vnode_t vp, struct buflists *, int flags);
-static void buf_itercomplete(vnode_t vp, struct buflists *, int flags);
+static buf_t getnewbuf(int slpflag, int slptimeo, int *queue);
+static void bremfree_locked(buf_t bp);
+static void buf_reassign(buf_t bp, vnode_t newvp);
+static errno_t buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo);
+static int buf_iterprepare(vnode_t vp, struct buflists *, int flags);
+static void buf_itercomplete(vnode_t vp, struct buflists *, int flags);
static boolean_t buffer_cache_gc(int);
-static buf_t buf_brelse_shadow(buf_t bp);
-static void buf_free_meta_store(buf_t bp);
+static buf_t buf_brelse_shadow(buf_t bp);
+static void buf_free_meta_store(buf_t bp);
-static buf_t buf_create_shadow_internal(buf_t bp, boolean_t force_copy,
- uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv);
+static buf_t buf_create_shadow_internal(buf_t bp, boolean_t force_copy,
+ uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv);
int bdwrite_internal(buf_t, int);
extern void disk_conditioner_delay(buf_t, int, int, uint64_t);
/* zone allocated buffer headers */
-static void bufzoneinit(void);
-static void bcleanbuf_thread_init(void);
-static void bcleanbuf_thread(void);
+static void bufzoneinit(void);
+static void bcleanbuf_thread_init(void);
+static void bcleanbuf_thread(void);
-static zone_t buf_hdr_zone;
-static int buf_hdr_count;
+static zone_t buf_hdr_zone;
+static int buf_hdr_count;
/*
* Definitions for the buffer hash lists.
*/
-#define BUFHASH(dvp, lbn) \
+#define BUFHASH(dvp, lbn) \
(&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
-LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
-u_long bufhash;
+LIST_HEAD(bufhashhdr, buf) * bufhashtbl, invalhash;
+u_long bufhash;
-static buf_t incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp);
+static buf_t incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp);
/* Definitions for the buffer stats. */
struct bufstats bufstats;
static int needbuffer;
static int need_iobuffer;
-static lck_grp_t *buf_mtx_grp;
-static lck_attr_t *buf_mtx_attr;
+static lck_grp_t *buf_mtx_grp;
+static lck_attr_t *buf_mtx_attr;
static lck_grp_attr_t *buf_mtx_grp_attr;
-static lck_mtx_t *iobuffer_mtxp;
-static lck_mtx_t *buf_mtxp;
-static lck_mtx_t *buf_gc_callout;
+static lck_mtx_t *iobuffer_mtxp;
+static lck_mtx_t *buf_mtxp;
+static lck_mtx_t *buf_gc_callout;
static int buf_busycount;
static __inline__ int
buf_timestamp(void)
{
- struct timeval t;
+ struct timeval t;
microuptime(&t);
- return (t.tv_sec);
+ return t.tv_sec;
}
/*
* Insq/Remq for the buffer free lists.
*/
-#define binsheadfree(bp, dp, whichq) do { \
- TAILQ_INSERT_HEAD(dp, bp, b_freelist); \
- } while (0)
+#define binsheadfree(bp, dp, whichq) do { \
+ TAILQ_INSERT_HEAD(dp, bp, b_freelist); \
+ } while (0)
-#define binstailfree(bp, dp, whichq) do { \
- TAILQ_INSERT_TAIL(dp, bp, b_freelist); \
- } while (0)
+#define binstailfree(bp, dp, whichq) do { \
+ TAILQ_INSERT_TAIL(dp, bp, b_freelist); \
+ } while (0)
-#define BHASHENTCHECK(bp) \
- if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \
- panic("%p: b_hash.le_prev is not deadbeef", (bp));
+#define BHASHENTCHECK(bp) \
+ if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \
+ panic("%p: b_hash.le_prev is not deadbeef", (bp));
-#define BLISTNONE(bp) \
- (bp)->b_hash.le_next = (struct buf *)0; \
+#define BLISTNONE(bp) \
+ (bp)->b_hash.le_next = (struct buf *)0; \
(bp)->b_hash.le_prev = (struct buf **)0xdeadbeef;
/*
* Insq/Remq for the vnode usage lists.
*/
-#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs)
-#define bufremvn(bp) { \
- LIST_REMOVE(bp, b_vnbufs); \
- (bp)->b_vnbufs.le_next = NOLIST; \
+#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs)
+#define bufremvn(bp) { \
+ LIST_REMOVE(bp, b_vnbufs); \
+ (bp)->b_vnbufs.le_next = NOLIST; \
}
/*
- * Time in seconds before a buffer on a list is
- * considered as a stale buffer
+ * Time in seconds before a buffer on a list is
+ * considered as a stale buffer
*/
#define LRU_IS_STALE 120 /* default value for the LRU */
#define AGE_IS_STALE 60 /* default value for the AGE */
int age_is_stale = AGE_IS_STALE;
int meta_is_stale = META_IS_STALE;
-#define MAXLAUNDRY 10
+#define MAXLAUNDRY 10
/* LIST_INSERT_HEAD() with assertions */
static __inline__ void
blistenterhead(struct bufhashhdr * head, buf_t bp)
{
- if ((bp->b_hash.le_next = (head)->lh_first) != NULL)
+ if ((bp->b_hash.le_next = (head)->lh_first) != NULL) {
(head)->lh_first->b_hash.le_prev = &(bp)->b_hash.le_next;
+ }
(head)->lh_first = bp;
bp->b_hash.le_prev = &(head)->lh_first;
- if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef)
+ if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) {
panic("blistenterhead: le_prev is deadbeef");
+ }
}
-static __inline__ void
+static __inline__ void
binshash(buf_t bp, struct bufhashhdr *dp)
{
#if DIAGNOSTIC
- buf_t nbp;
+ buf_t nbp;
#endif /* DIAGNOSTIC */
BHASHENTCHECK(bp);
#if DIAGNOSTIC
nbp = dp->lh_first;
- for(; nbp != NULL; nbp = nbp->b_hash.le_next) {
- if(nbp == bp)
+ for (; nbp != NULL; nbp = nbp->b_hash.le_next) {
+ if (nbp == bp) {
panic("buf already in hashlist");
+ }
}
#endif /* DIAGNOSTIC */
blistenterhead(dp, bp);
}
-static __inline__ void
-bremhash(buf_t bp)
+static __inline__ void
+bremhash(buf_t bp)
{
- if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef)
+ if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) {
panic("bremhash le_prev is deadbeef");
- if (bp->b_hash.le_next == bp)
+ }
+ if (bp->b_hash.le_next == bp) {
panic("bremhash: next points to self");
+ }
- if (bp->b_hash.le_next != NULL)
+ if (bp->b_hash.le_next != NULL) {
bp->b_hash.le_next->b_hash.le_prev = bp->b_hash.le_prev;
+ }
*bp->b_hash.le_prev = (bp)->b_hash.le_next;
}
int
-buf_valid(buf_t bp) {
-
- if ( (bp->b_flags & (B_DONE | B_DELWRI)) )
- return 1;
+buf_valid(buf_t bp)
+{
+ if ((bp->b_flags & (B_DONE | B_DELWRI))) {
+ return 1;
+ }
return 0;
}
int
-buf_fromcache(buf_t bp) {
-
- if ( (bp->b_flags & B_CACHE) )
- return 1;
+buf_fromcache(buf_t bp)
+{
+ if ((bp->b_flags & B_CACHE)) {
+ return 1;
+ }
return 0;
}
void
-buf_markinvalid(buf_t bp) {
-
- SET(bp->b_flags, B_INVAL);
+buf_markinvalid(buf_t bp)
+{
+ SET(bp->b_flags, B_INVAL);
}
void
-buf_markdelayed(buf_t bp) {
-
+buf_markdelayed(buf_t bp)
+{
if (!ISSET(bp->b_flags, B_DELWRI)) {
SET(bp->b_flags, B_DELWRI);
OSAddAtomicLong(1, &nbdwrite);
buf_reassign(bp, bp->b_vp);
}
- SET(bp->b_flags, B_DONE);
+ SET(bp->b_flags, B_DONE);
}
void
-buf_markclean(buf_t bp) {
-
+buf_markclean(buf_t bp)
+{
if (ISSET(bp->b_flags, B_DELWRI)) {
CLR(bp->b_flags, B_DELWRI);
}
void
-buf_markeintr(buf_t bp) {
-
- SET(bp->b_flags, B_EINTR);
+buf_markeintr(buf_t bp)
+{
+ SET(bp->b_flags, B_EINTR);
}
void
-buf_markaged(buf_t bp) {
-
- SET(bp->b_flags, B_AGE);
+buf_markaged(buf_t bp)
+{
+ SET(bp->b_flags, B_AGE);
}
int
-buf_fua(buf_t bp) {
-
- if ((bp->b_flags & B_FUA) == B_FUA)
- return 1;
+buf_fua(buf_t bp)
+{
+ if ((bp->b_flags & B_FUA) == B_FUA) {
+ return 1;
+ }
return 0;
}
-void
-buf_markfua(buf_t bp) {
-
- SET(bp->b_flags, B_FUA);
+void
+buf_markfua(buf_t bp)
+{
+ SET(bp->b_flags, B_FUA);
}
#if CONFIG_PROTECT
-cpx_t bufattr_cpx(bufattr_t bap)
+cpx_t
+bufattr_cpx(bufattr_t bap)
{
return bap->ba_cpx;
}
-void bufattr_setcpx(bufattr_t bap, cpx_t cpx)
+void
+bufattr_setcpx(bufattr_t bap, cpx_t cpx)
{
bap->ba_cpx = cpx;
}
void
-buf_setcpoff (buf_t bp, uint64_t foffset) {
+buf_setcpoff(buf_t bp, uint64_t foffset)
+{
bp->b_attr.ba_cp_file_off = foffset;
}
uint64_t
-bufattr_cpoff(bufattr_t bap) {
+bufattr_cpoff(bufattr_t bap)
+{
return bap->ba_cp_file_off;
}
void
-bufattr_setcpoff(bufattr_t bap, uint64_t foffset) {
+bufattr_setcpoff(bufattr_t bap, uint64_t foffset)
+{
bap->ba_cp_file_off = foffset;
}
#else // !CONTECT_PROTECT
uint64_t
-bufattr_cpoff(bufattr_t bap __unused) {
+bufattr_cpoff(bufattr_t bap __unused)
+{
return 0;
}
void
-bufattr_setcpoff(__unused bufattr_t bap, __unused uint64_t foffset) {
+bufattr_setcpoff(__unused bufattr_t bap, __unused uint64_t foffset)
+{
return;
}
-struct cpx *bufattr_cpx(__unused bufattr_t bap)
+struct cpx *
+bufattr_cpx(__unused bufattr_t bap)
{
return NULL;
}
-void bufattr_setcpx(__unused bufattr_t bap, __unused struct cpx *cpx)
+void
+bufattr_setcpx(__unused bufattr_t bap, __unused struct cpx *cpx)
{
}
#endif /* !CONFIG_PROTECT */
bufattr_t
-bufattr_alloc() {
+bufattr_alloc()
+{
bufattr_t bap;
MALLOC(bap, bufattr_t, sizeof(struct bufattr), M_TEMP, M_WAITOK);
- if (bap == NULL)
+ if (bap == NULL) {
return NULL;
+ }
bzero(bap, sizeof(struct bufattr));
return bap;
}
void
-bufattr_free(bufattr_t bap) {
- if (bap)
+bufattr_free(bufattr_t bap)
+{
+ if (bap) {
FREE(bap, M_TEMP);
+ }
}
bufattr_t
-bufattr_dup(bufattr_t bap) {
+bufattr_dup(bufattr_t bap)
+{
bufattr_t new_bufattr;
MALLOC(new_bufattr, bufattr_t, sizeof(struct bufattr), M_TEMP, M_WAITOK);
- if (new_bufattr == NULL)
+ if (new_bufattr == NULL) {
return NULL;
+ }
/* Copy the provided one into the new copy */
- memcpy (new_bufattr, bap, sizeof(struct bufattr));
+ memcpy(new_bufattr, bap, sizeof(struct bufattr));
return new_bufattr;
}
int
-bufattr_rawencrypted(bufattr_t bap) {
- if ( (bap->ba_flags & BA_RAW_ENCRYPTED_IO) )
+bufattr_rawencrypted(bufattr_t bap)
+{
+ if ((bap->ba_flags & BA_RAW_ENCRYPTED_IO)) {
return 1;
+ }
return 0;
}
int
-bufattr_throttled(bufattr_t bap) {
- return (GET_BUFATTR_IO_TIER(bap));
+bufattr_throttled(bufattr_t bap)
+{
+ return GET_BUFATTR_IO_TIER(bap);
}
int
-bufattr_passive(bufattr_t bap) {
- if ( (bap->ba_flags & BA_PASSIVE) )
+bufattr_passive(bufattr_t bap)
+{
+ if ((bap->ba_flags & BA_PASSIVE)) {
return 1;
+ }
return 0;
}
int
-bufattr_nocache(bufattr_t bap) {
- if ( (bap->ba_flags & BA_NOCACHE) )
+bufattr_nocache(bufattr_t bap)
+{
+ if ((bap->ba_flags & BA_NOCACHE)) {
return 1;
+ }
return 0;
}
int
-bufattr_meta(bufattr_t bap) {
- if ( (bap->ba_flags & BA_META) )
+bufattr_meta(bufattr_t bap)
+{
+ if ((bap->ba_flags & BA_META)) {
return 1;
+ }
return 0;
}
void
-bufattr_markmeta(bufattr_t bap) {
- SET(bap->ba_flags, BA_META);
+bufattr_markmeta(bufattr_t bap)
+{
+ SET(bap->ba_flags, BA_META);
}
int
#if !CONFIG_EMBEDDED
-bufattr_delayidlesleep(bufattr_t bap)
+bufattr_delayidlesleep(bufattr_t bap)
#else /* !CONFIG_EMBEDDED */
-bufattr_delayidlesleep(__unused bufattr_t bap)
+bufattr_delayidlesleep(__unused bufattr_t bap)
#endif /* !CONFIG_EMBEDDED */
{
#if !CONFIG_EMBEDDED
- if ( (bap->ba_flags & BA_DELAYIDLESLEEP) )
+ if ((bap->ba_flags & BA_DELAYIDLESLEEP)) {
return 1;
+ }
#endif /* !CONFIG_EMBEDDED */
return 0;
}
bufattr_t
-buf_attr(buf_t bp) {
+buf_attr(buf_t bp)
+{
return &bp->b_attr;
}
-void
-buf_markstatic(buf_t bp __unused) {
+void
+buf_markstatic(buf_t bp __unused)
+{
SET(bp->b_flags, B_STATICCONTENT);
}
int
-buf_static(buf_t bp) {
- if ( (bp->b_flags & B_STATICCONTENT) )
- return 1;
- return 0;
+buf_static(buf_t bp)
+{
+ if ((bp->b_flags & B_STATICCONTENT)) {
+ return 1;
+ }
+ return 0;
}
-void
-bufattr_markgreedymode(bufattr_t bap) {
+void
+bufattr_markgreedymode(bufattr_t bap)
+{
SET(bap->ba_flags, BA_GREEDY_MODE);
}
int
-bufattr_greedymode(bufattr_t bap) {
- if ( (bap->ba_flags & BA_GREEDY_MODE) )
- return 1;
- return 0;
+bufattr_greedymode(bufattr_t bap)
+{
+ if ((bap->ba_flags & BA_GREEDY_MODE)) {
+ return 1;
+ }
+ return 0;
}
-void
-bufattr_markisochronous(bufattr_t bap) {
+void
+bufattr_markisochronous(bufattr_t bap)
+{
SET(bap->ba_flags, BA_ISOCHRONOUS);
}
int
-bufattr_isochronous(bufattr_t bap) {
- if ( (bap->ba_flags & BA_ISOCHRONOUS) )
- return 1;
- return 0;
+bufattr_isochronous(bufattr_t bap)
+{
+ if ((bap->ba_flags & BA_ISOCHRONOUS)) {
+ return 1;
+ }
+ return 0;
}
-void
-bufattr_markquickcomplete(bufattr_t bap) {
+void
+bufattr_markquickcomplete(bufattr_t bap)
+{
SET(bap->ba_flags, BA_QUICK_COMPLETE);
}
int
-bufattr_quickcomplete(bufattr_t bap) {
- if ( (bap->ba_flags & BA_QUICK_COMPLETE) )
- return 1;
- return 0;
+bufattr_quickcomplete(bufattr_t bap)
+{
+ if ((bap->ba_flags & BA_QUICK_COMPLETE)) {
+ return 1;
+ }
+ return 0;
}
-errno_t
-buf_error(buf_t bp) {
-
- return (bp->b_error);
+void
+bufattr_markioscheduled(bufattr_t bap)
+{
+ SET(bap->ba_flags, BA_IO_SCHEDULED);
}
-void
-buf_seterror(buf_t bp, errno_t error) {
- if ((bp->b_error = error))
- SET(bp->b_flags, B_ERROR);
- else
- CLR(bp->b_flags, B_ERROR);
+int
+bufattr_ioscheduled(bufattr_t bap)
+{
+ if ((bap->ba_flags & BA_IO_SCHEDULED)) {
+ return 1;
+ }
+ return 0;
}
-void
-buf_setflags(buf_t bp, int32_t flags) {
+errno_t
+buf_error(buf_t bp)
+{
+ return bp->b_error;
+}
- SET(bp->b_flags, (flags & BUF_X_WRFLAGS));
+void
+buf_seterror(buf_t bp, errno_t error)
+{
+ if ((bp->b_error = error)) {
+ SET(bp->b_flags, B_ERROR);
+ } else {
+ CLR(bp->b_flags, B_ERROR);
+ }
}
void
-buf_clearflags(buf_t bp, int32_t flags) {
+buf_setflags(buf_t bp, int32_t flags)
+{
+ SET(bp->b_flags, (flags & BUF_X_WRFLAGS));
+}
- CLR(bp->b_flags, (flags & BUF_X_WRFLAGS));
+void
+buf_clearflags(buf_t bp, int32_t flags)
+{
+ CLR(bp->b_flags, (flags & BUF_X_WRFLAGS));
}
int32_t
-buf_flags(buf_t bp) {
-
- return ((bp->b_flags & BUF_X_RDFLAGS));
+buf_flags(buf_t bp)
+{
+ return bp->b_flags & BUF_X_RDFLAGS;
}
void
-buf_reset(buf_t bp, int32_t io_flags) {
-
- CLR(bp->b_flags, (B_READ | B_WRITE | B_ERROR | B_DONE | B_INVAL | B_ASYNC | B_NOCACHE | B_FUA));
+buf_reset(buf_t bp, int32_t io_flags)
+{
+ CLR(bp->b_flags, (B_READ | B_WRITE | B_ERROR | B_DONE | B_INVAL | B_ASYNC | B_NOCACHE | B_FUA));
SET(bp->b_flags, (io_flags & (B_ASYNC | B_READ | B_WRITE | B_NOCACHE)));
bp->b_error = 0;
}
uint32_t
-buf_count(buf_t bp) {
-
- return (bp->b_bcount);
+buf_count(buf_t bp)
+{
+ return bp->b_bcount;
}
void
-buf_setcount(buf_t bp, uint32_t bcount) {
-
- bp->b_bcount = bcount;
+buf_setcount(buf_t bp, uint32_t bcount)
+{
+ bp->b_bcount = bcount;
}
uint32_t
-buf_size(buf_t bp) {
-
- return (bp->b_bufsize);
+buf_size(buf_t bp)
+{
+ return bp->b_bufsize;
}
void
-buf_setsize(buf_t bp, uint32_t bufsize) {
-
- bp->b_bufsize = bufsize;
+buf_setsize(buf_t bp, uint32_t bufsize)
+{
+ bp->b_bufsize = bufsize;
}
uint32_t
-buf_resid(buf_t bp) {
-
- return (bp->b_resid);
+buf_resid(buf_t bp)
+{
+ return bp->b_resid;
}
void
-buf_setresid(buf_t bp, uint32_t resid) {
-
- bp->b_resid = resid;
+buf_setresid(buf_t bp, uint32_t resid)
+{
+ bp->b_resid = resid;
}
uint32_t
-buf_dirtyoff(buf_t bp) {
-
- return (bp->b_dirtyoff);
+buf_dirtyoff(buf_t bp)
+{
+ return bp->b_dirtyoff;
}
uint32_t
-buf_dirtyend(buf_t bp) {
-
- return (bp->b_dirtyend);
+buf_dirtyend(buf_t bp)
+{
+ return bp->b_dirtyend;
}
void
-buf_setdirtyoff(buf_t bp, uint32_t dirtyoff) {
-
- bp->b_dirtyoff = dirtyoff;
+buf_setdirtyoff(buf_t bp, uint32_t dirtyoff)
+{
+ bp->b_dirtyoff = dirtyoff;
}
void
-buf_setdirtyend(buf_t bp, uint32_t dirtyend) {
-
- bp->b_dirtyend = dirtyend;
+buf_setdirtyend(buf_t bp, uint32_t dirtyend)
+{
+ bp->b_dirtyend = dirtyend;
}
uintptr_t
-buf_dataptr(buf_t bp) {
-
- return (bp->b_datap);
+buf_dataptr(buf_t bp)
+{
+ return bp->b_datap;
}
void
-buf_setdataptr(buf_t bp, uintptr_t data) {
-
- bp->b_datap = data;
+buf_setdataptr(buf_t bp, uintptr_t data)
+{
+ bp->b_datap = data;
}
vnode_t
-buf_vnode(buf_t bp) {
-
- return (bp->b_vp);
+buf_vnode(buf_t bp)
+{
+ return bp->b_vp;
}
void
-buf_setvnode(buf_t bp, vnode_t vp) {
-
- bp->b_vp = vp;
+buf_setvnode(buf_t bp, vnode_t vp)
+{
+ bp->b_vp = vp;
}
void *
buf_callback(buf_t bp)
{
- if ( !(bp->b_flags & B_CALL) )
- return ((void *) NULL);
+ if (!(bp->b_flags & B_CALL)) {
+ return (void *) NULL;
+ }
- return ((void *)bp->b_iodone);
+ return (void *)bp->b_iodone;
}
{
assert(!ISSET(bp->b_flags, B_FILTER) && ISSET(bp->b_lflags, BL_BUSY));
- if (callback)
- bp->b_flags |= (B_CALL | B_ASYNC);
- else
- bp->b_flags &= ~B_CALL;
+ if (callback) {
+ bp->b_flags |= (B_CALL | B_ASYNC);
+ } else {
+ bp->b_flags &= ~B_CALL;
+ }
bp->b_transaction = transaction;
bp->b_iodone = callback;
- return (0);
+ return 0;
}
errno_t
buf_setupl(buf_t bp, upl_t upl, uint32_t offset)
{
+ if (!(bp->b_lflags & BL_IOBUF)) {
+ return EINVAL;
+ }
- if ( !(bp->b_lflags & BL_IOBUF) )
- return (EINVAL);
-
- if (upl)
- bp->b_flags |= B_CLUSTER;
- else
- bp->b_flags &= ~B_CLUSTER;
+ if (upl) {
+ bp->b_flags |= B_CLUSTER;
+ } else {
+ bp->b_flags &= ~B_CLUSTER;
+ }
bp->b_upl = upl;
bp->b_uploffset = offset;
- return (0);
+ return 0;
}
buf_t
buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), void *arg)
{
- buf_t io_bp;
+ buf_t io_bp;
- if (io_offset < 0 || io_size < 0)
- return (NULL);
+ if (io_offset < 0 || io_size < 0) {
+ return NULL;
+ }
- if ((unsigned)(io_offset + io_size) > (unsigned)bp->b_bcount)
- return (NULL);
+ if ((unsigned)(io_offset + io_size) > (unsigned)bp->b_bcount) {
+ return NULL;
+ }
if (bp->b_flags & B_CLUSTER) {
- if (io_offset && ((bp->b_uploffset + io_offset) & PAGE_MASK))
- return (NULL);
+ if (io_offset && ((bp->b_uploffset + io_offset) & PAGE_MASK)) {
+ return NULL;
+ }
- if (((bp->b_uploffset + io_offset + io_size) & PAGE_MASK) && ((io_offset + io_size) < bp->b_bcount))
- return (NULL);
+ if (((bp->b_uploffset + io_offset + io_size) & PAGE_MASK) && ((io_offset + io_size) < bp->b_bcount)) {
+ return NULL;
+ }
}
io_bp = alloc_io_buf(bp->b_vp, 0);
io_bp->b_flags = bp->b_flags & (B_COMMIT_UPL | B_META | B_PAGEIO | B_CLUSTER | B_PHYS | B_RAW | B_ASYNC | B_READ | B_FUA);
if (iodone) {
- io_bp->b_transaction = arg;
+ io_bp->b_transaction = arg;
io_bp->b_iodone = iodone;
io_bp->b_flags |= B_CALL;
}
if (bp->b_flags & B_CLUSTER) {
- io_bp->b_upl = bp->b_upl;
+ io_bp->b_upl = bp->b_upl;
io_bp->b_uploffset = bp->b_uploffset + io_offset;
} else {
- io_bp->b_datap = (uintptr_t)(((char *)bp->b_datap) + io_offset);
+ io_bp->b_datap = (uintptr_t)(((char *)bp->b_datap) + io_offset);
}
io_bp->b_bcount = io_size;
- return (io_bp);
+ return io_bp;
}
int
buf_shadow(buf_t bp)
{
- if (bp->b_lflags & BL_SHADOW)
+ if (bp->b_lflags & BL_SHADOW) {
return 1;
+ }
return 0;
}
buf_t
buf_create_shadow_priv(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg)
{
- return (buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 1));
+ return buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 1);
}
buf_t
buf_create_shadow(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg)
{
- return (buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 0));
+ return buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 0);
}
static buf_t
buf_create_shadow_internal(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv)
{
- buf_t io_bp;
+ buf_t io_bp;
KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_START, bp, 0, 0, 0, 0);
- if ( !(bp->b_flags & B_META) || (bp->b_lflags & BL_IOBUF)) {
-
+ if (!(bp->b_flags & B_META) || (bp->b_lflags & BL_IOBUF)) {
KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, 0, 0, 0, 0);
- return (NULL);
+ return NULL;
}
#ifdef BUF_MAKE_PRIVATE
- if (bp->b_shadow_ref && bp->b_data_ref == 0 && external_storage == 0)
+ if (bp->b_shadow_ref && bp->b_data_ref == 0 && external_storage == 0) {
panic("buf_create_shadow: %p is in the private state (%d, %d)", bp, bp->b_shadow_ref, bp->b_data_ref);
+ }
#endif
io_bp = alloc_io_buf(bp->b_vp, priv);
io_bp->b_lblkno = bp->b_lblkno;
if (iodone) {
- io_bp->b_transaction = arg;
+ io_bp->b_transaction = arg;
io_bp->b_iodone = iodone;
io_bp->b_flags |= B_CALL;
}
bp->b_shadow_ref++;
#ifdef BUF_MAKE_PRIVATE
- if (external_storage)
+ if (external_storage) {
io_bp->b_lflags |= BL_EXTERNAL;
- else
+ } else {
bp->b_data_ref++;
+ }
#endif
lck_mtx_unlock(buf_mtxp);
} else {
}
KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, io_bp, 0);
- return (io_bp);
+ return io_bp;
}
errno_t
buf_make_private(buf_t bp)
{
- buf_t ds_bp;
- buf_t t_bp;
+ buf_t ds_bp;
+ buf_t t_bp;
struct buf my_buf;
KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_START, bp, bp->b_shadow_ref, 0, 0, 0);
if (bp->b_shadow_ref == 0 || bp->b_data_ref == 0 || ISSET(bp->b_lflags, BL_SHADOW)) {
-
KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0);
- return (EINVAL);
+ return EINVAL;
}
my_buf.b_flags = B_META;
my_buf.b_datap = (uintptr_t)NULL;
lck_mtx_lock_spin(buf_mtxp);
for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) {
- if ( !ISSET(bp->b_lflags, BL_EXTERNAL))
+ if (!ISSET(bp->b_lflags, BL_EXTERNAL)) {
break;
+ }
}
ds_bp = t_bp;
- if (ds_bp == NULL && bp->b_data_ref)
+ if (ds_bp == NULL && bp->b_data_ref) {
panic("buf_make_private: b_data_ref != 0 && ds_bp == NULL");
+ }
- if (ds_bp && (bp->b_data_ref == 0 || bp->b_shadow_ref == 0))
+ if (ds_bp && (bp->b_data_ref == 0 || bp->b_shadow_ref == 0)) {
panic("buf_make_private: ref_count == 0 && ds_bp != NULL");
+ }
if (ds_bp == NULL) {
lck_mtx_unlock(buf_mtxp);
buf_free_meta_store(&my_buf);
KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0);
- return (EINVAL);
+ return EINVAL;
}
for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) {
- if ( !ISSET(t_bp->b_lflags, BL_EXTERNAL))
+ if (!ISSET(t_bp->b_lflags, BL_EXTERNAL)) {
t_bp->b_data_store = ds_bp;
+ }
}
ds_bp->b_data_ref = bp->b_data_ref;
lck_mtx_unlock(buf_mtxp);
KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, 0, 0);
- return (0);
+ return 0;
}
#endif
void
buf_setfilter(buf_t bp, void (*filter)(buf_t, void *), void *transaction,
- void (**old_iodone)(buf_t, void *), void **old_transaction)
+ void(**old_iodone)(buf_t, void *), void **old_transaction)
{
assert(ISSET(bp->b_lflags, BL_BUSY));
- if (old_iodone)
+ if (old_iodone) {
*old_iodone = bp->b_iodone;
- if (old_transaction)
+ }
+ if (old_transaction) {
*old_transaction = bp->b_transaction;
+ }
bp->b_transaction = transaction;
bp->b_iodone = filter;
- if (filter)
- bp->b_flags |= B_FILTER;
- else
- bp->b_flags &= ~B_FILTER;
+ if (filter) {
+ bp->b_flags |= B_FILTER;
+ } else {
+ bp->b_flags &= ~B_FILTER;
+ }
}
daddr64_t
-buf_blkno(buf_t bp) {
-
- return (bp->b_blkno);
+buf_blkno(buf_t bp)
+{
+ return bp->b_blkno;
}
daddr64_t
-buf_lblkno(buf_t bp) {
-
- return (bp->b_lblkno);
+buf_lblkno(buf_t bp)
+{
+ return bp->b_lblkno;
}
void
-buf_setblkno(buf_t bp, daddr64_t blkno) {
-
- bp->b_blkno = blkno;
+buf_setblkno(buf_t bp, daddr64_t blkno)
+{
+ bp->b_blkno = blkno;
}
void
-buf_setlblkno(buf_t bp, daddr64_t lblkno) {
-
- bp->b_lblkno = lblkno;
+buf_setlblkno(buf_t bp, daddr64_t lblkno)
+{
+ bp->b_lblkno = lblkno;
}
dev_t
-buf_device(buf_t bp) {
-
- return (bp->b_dev);
+buf_device(buf_t bp)
+{
+ return bp->b_dev;
}
errno_t
-buf_setdevice(buf_t bp, vnode_t vp) {
-
- if ((vp->v_type != VBLK) && (vp->v_type != VCHR))
- return EINVAL;
+buf_setdevice(buf_t bp, vnode_t vp)
+{
+ if ((vp->v_type != VBLK) && (vp->v_type != VCHR)) {
+ return EINVAL;
+ }
bp->b_dev = vp->v_rdev;
return 0;
void *
-buf_drvdata(buf_t bp) {
-
- return (bp->b_drvdata);
+buf_drvdata(buf_t bp)
+{
+ return bp->b_drvdata;
}
void
-buf_setdrvdata(buf_t bp, void *drvdata) {
-
- bp->b_drvdata = drvdata;
+buf_setdrvdata(buf_t bp, void *drvdata)
+{
+ bp->b_drvdata = drvdata;
}
void *
-buf_fsprivate(buf_t bp) {
-
- return (bp->b_fsprivate);
+buf_fsprivate(buf_t bp)
+{
+ return bp->b_fsprivate;
}
void
-buf_setfsprivate(buf_t bp, void *fsprivate) {
-
- bp->b_fsprivate = fsprivate;
+buf_setfsprivate(buf_t bp, void *fsprivate)
+{
+ bp->b_fsprivate = fsprivate;
}
kauth_cred_t
-buf_rcred(buf_t bp) {
-
- return (bp->b_rcred);
+buf_rcred(buf_t bp)
+{
+ return bp->b_rcred;
}
kauth_cred_t
-buf_wcred(buf_t bp) {
-
- return (bp->b_wcred);
+buf_wcred(buf_t bp)
+{
+ return bp->b_wcred;
}
void *
-buf_upl(buf_t bp) {
-
- return (bp->b_upl);
+buf_upl(buf_t bp)
+{
+ return bp->b_upl;
}
uint32_t
-buf_uploffset(buf_t bp) {
-
- return ((uint32_t)(bp->b_uploffset));
+buf_uploffset(buf_t bp)
+{
+ return (uint32_t)(bp->b_uploffset);
}
proc_t
-buf_proc(buf_t bp) {
-
- return (bp->b_proc);
+buf_proc(buf_t bp)
+{
+ return bp->b_proc;
}
errno_t
buf_map(buf_t bp, caddr_t *io_addr)
{
- buf_t real_bp;
- vm_offset_t vaddr;
- kern_return_t kret;
+ buf_t real_bp;
+ vm_offset_t vaddr;
+ kern_return_t kret;
- if ( !(bp->b_flags & B_CLUSTER)) {
- *io_addr = (caddr_t)bp->b_datap;
- return (0);
+ if (!(bp->b_flags & B_CLUSTER)) {
+ *io_addr = (caddr_t)bp->b_datap;
+ return 0;
}
real_bp = (buf_t)(bp->b_real_bp);
if (real_bp && real_bp->b_datap) {
- /*
+ /*
* b_real_bp is only valid if B_CLUSTER is SET
* if it's non-zero, than someone did a cluster_bp call
* if the backing physical pages were already mapped
* in before the call to cluster_bp (non-zero b_datap),
* than we just use that mapping
*/
- *io_addr = (caddr_t)real_bp->b_datap;
- return (0);
+ *io_addr = (caddr_t)real_bp->b_datap;
+ return 0;
}
kret = ubc_upl_map(bp->b_upl, &vaddr); /* Map it in */
if (kret != KERN_SUCCESS) {
- *io_addr = NULL;
+ *io_addr = NULL;
- return(ENOMEM);
+ return ENOMEM;
}
- vaddr += bp->b_uploffset;
+ vaddr += bp->b_uploffset;
*io_addr = (caddr_t)vaddr;
- return (0);
+ return 0;
}
errno_t
buf_unmap(buf_t bp)
{
- buf_t real_bp;
- kern_return_t kret;
+ buf_t real_bp;
+ kern_return_t kret;
- if ( !(bp->b_flags & B_CLUSTER))
- return (0);
+ if (!(bp->b_flags & B_CLUSTER)) {
+ return 0;
+ }
/*
* see buf_map for the explanation
*/
real_bp = (buf_t)(bp->b_real_bp);
- if (real_bp && real_bp->b_datap)
- return (0);
+ if (real_bp && real_bp->b_datap) {
+ return 0;
+ }
if ((bp->b_lflags & BL_IOBUF) &&
((bp->b_flags & (B_PAGEIO | B_READ)) != (B_PAGEIO | B_READ))) {
- /*
+ /*
* ignore pageins... the 'right' thing will
* happen due to the way we handle speculative
* clusters...
* will clear the reference bit that got
* turned on when we touched the mapping
*/
- bp->b_flags |= B_AGE;
+ bp->b_flags |= B_AGE;
}
kret = ubc_upl_unmap(bp->b_upl);
- if (kret != KERN_SUCCESS)
- return (EINVAL);
- return (0);
+ if (kret != KERN_SUCCESS) {
+ return EINVAL;
+ }
+ return 0;
}
void
-buf_clear(buf_t bp) {
- caddr_t baddr;
-
- if (buf_map(bp, &baddr) == 0) {
- bzero(baddr, bp->b_bcount);
+buf_clear(buf_t bp)
+{
+ caddr_t baddr;
+
+ if (buf_map(bp, &baddr) == 0) {
+ bzero(baddr, bp->b_bcount);
buf_unmap(bp);
}
bp->b_resid = 0;
static int
buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_bytes)
{
- vnode_t vp = buf_vnode(bp);
- buf_t io_bp; /* For reading or writing a single block */
- int io_direction;
- int io_resid;
- size_t io_contig_bytes;
- daddr64_t io_blkno;
- int error = 0;
- int bmap_flags;
+ vnode_t vp = buf_vnode(bp);
+ buf_t io_bp; /* For reading or writing a single block */
+ int io_direction;
+ int io_resid;
+ size_t io_contig_bytes;
+ daddr64_t io_blkno;
+ int error = 0;
+ int bmap_flags;
/*
* save our starting point... the bp was already mapped
* i.e. this can never be a 'permanent' mapping
*/
bp->b_blkno = bp->b_lblkno;
-
+
/*
* Get an io buffer to do the deblocking
*/
io_bp->b_lblkno = bp->b_lblkno;
io_bp->b_datap = bp->b_datap;
- io_resid = bp->b_bcount;
- io_direction = bp->b_flags & B_READ;
+ io_resid = bp->b_bcount;
+ io_direction = bp->b_flags & B_READ;
io_contig_bytes = contig_bytes;
-
- if (bp->b_flags & B_READ)
- bmap_flags = VNODE_READ;
- else
- bmap_flags = VNODE_WRITE;
+
+ if (bp->b_flags & B_READ) {
+ bmap_flags = VNODE_READ;
+ } else {
+ bmap_flags = VNODE_WRITE;
+ }
for (;;) {
- if (io_blkno == -1)
- /*
+ if (io_blkno == -1) {
+ /*
* this is unexepected, but we'll allow for it
*/
- bzero((caddr_t)io_bp->b_datap, (int)io_contig_bytes);
- else {
- io_bp->b_bcount = io_contig_bytes;
+ bzero((caddr_t)io_bp->b_datap, (int)io_contig_bytes);
+ } else {
+ io_bp->b_bcount = io_contig_bytes;
io_bp->b_bufsize = io_contig_bytes;
io_bp->b_resid = io_contig_bytes;
io_bp->b_blkno = io_blkno;
* Call the device to do the I/O and wait for it. Make sure the appropriate party is charged for write
*/
- if (!ISSET(bp->b_flags, B_READ))
- OSAddAtomic(1, &devvp->v_numoutput);
+ if (!ISSET(bp->b_flags, B_READ)) {
+ OSAddAtomic(1, &devvp->v_numoutput);
+ }
- if ((error = VNOP_STRATEGY(io_bp)))
- break;
- if ((error = (int)buf_biowait(io_bp)))
- break;
+ if ((error = VNOP_STRATEGY(io_bp))) {
+ break;
+ }
+ if ((error = (int)buf_biowait(io_bp))) {
+ break;
+ }
if (io_bp->b_resid) {
- io_resid -= (io_contig_bytes - io_bp->b_resid);
+ io_resid -= (io_contig_bytes - io_bp->b_resid);
break;
}
}
- if ((io_resid -= io_contig_bytes) == 0)
- break;
+ if ((io_resid -= io_contig_bytes) == 0) {
+ break;
+ }
f_offset += io_contig_bytes;
io_bp->b_datap += io_contig_bytes;
/*
* Map the current position to a physical block number
*/
- if ((error = VNOP_BLOCKMAP(vp, f_offset, io_resid, &io_blkno, &io_contig_bytes, NULL, bmap_flags, NULL)))
- break;
+ if ((error = VNOP_BLOCKMAP(vp, f_offset, io_resid, &io_blkno, &io_contig_bytes, NULL, bmap_flags, NULL))) {
+ break;
+ }
}
buf_free(io_bp);
-
- if (error)
- buf_seterror(bp, error);
+
+ if (error) {
+ buf_seterror(bp, error);
+ }
bp->b_resid = io_resid;
/*
* This I/O is now complete
errno_t
buf_strategy(vnode_t devvp, void *ap)
{
- buf_t bp = ((struct vnop_strategy_args *)ap)->a_bp;
- vnode_t vp = bp->b_vp;
- int bmap_flags;
- errno_t error;
+ buf_t bp = ((struct vnop_strategy_args *)ap)->a_bp;
+ vnode_t vp = bp->b_vp;
+ int bmap_flags;
+ errno_t error;
#if CONFIG_DTRACE
- int dtrace_io_start_flag = 0; /* We only want to trip the io:::start
- * probe once, with the true physical
- * block in place (b_blkno)
- */
+ int dtrace_io_start_flag = 0; /* We only want to trip the io:::start
+ * probe once, with the true physical
+ * block in place (b_blkno)
+ */
-#endif
+#endif
- if (vp == NULL || vp->v_type == VCHR || vp->v_type == VBLK)
- panic("buf_strategy: b_vp == NULL || vtype == VCHR | VBLK\n");
+ if (vp == NULL || vp->v_type == VCHR || vp->v_type == VBLK) {
+ panic("buf_strategy: b_vp == NULL || vtype == VCHR | VBLK\n");
+ }
/*
* associate the physical device with
* with this buf_t even if we don't
*/
bp->b_dev = devvp->v_rdev;
- if (bp->b_flags & B_READ)
- bmap_flags = VNODE_READ;
- else
- bmap_flags = VNODE_WRITE;
-
- if ( !(bp->b_flags & B_CLUSTER)) {
+ if (bp->b_flags & B_READ) {
+ bmap_flags = VNODE_READ;
+ } else {
+ bmap_flags = VNODE_WRITE;
+ }
- if ( (bp->b_upl) ) {
- /*
+ if (!(bp->b_flags & B_CLUSTER)) {
+ if ((bp->b_upl)) {
+ /*
* we have a UPL associated with this bp
* go through cluster_bp which knows how
* to deal with filesystem block sizes
* that aren't equal to the page size
*/
DTRACE_IO1(start, buf_t, bp);
- return (cluster_bp(bp));
+ return cluster_bp(bp);
}
if (bp->b_blkno == bp->b_lblkno) {
- off_t f_offset;
- size_t contig_bytes;
-
+ off_t f_offset;
+ size_t contig_bytes;
+
if ((error = VNOP_BLKTOOFF(vp, bp->b_lblkno, &f_offset))) {
DTRACE_IO1(start, buf_t, bp);
- buf_seterror(bp, error);
+ buf_seterror(bp, error);
buf_biodone(bp);
- return (error);
+ return error;
}
- if ((error = VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))) {
+ if ((error = VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))) {
DTRACE_IO1(start, buf_t, bp);
- buf_seterror(bp, error);
+ buf_seterror(bp, error);
buf_biodone(bp);
- return (error);
+ return error;
}
DTRACE_IO1(start, buf_t, bp);
#if CONFIG_DTRACE
dtrace_io_start_flag = 1;
-#endif /* CONFIG_DTRACE */
+#endif /* CONFIG_DTRACE */
if ((bp->b_blkno == -1) || (contig_bytes == 0)) {
/* Set block number to force biodone later */
bp->b_blkno = -1;
- buf_clear(bp);
- }
- else if ((long)contig_bytes < bp->b_bcount) {
- return (buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes));
+ buf_clear(bp);
+ } else if ((long)contig_bytes < bp->b_bcount) {
+ return buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes);
}
}
-
+
#if CONFIG_DTRACE
if (dtrace_io_start_flag == 0) {
DTRACE_IO1(start, buf_t, bp);
dtrace_io_start_flag = 1;
}
#endif /* CONFIG_DTRACE */
-
+
if (bp->b_blkno == -1) {
- buf_biodone(bp);
- return (0);
+ buf_biodone(bp);
+ return 0;
}
}
#if CONFIG_DTRACE
- if (dtrace_io_start_flag == 0)
+ if (dtrace_io_start_flag == 0) {
DTRACE_IO1(start, buf_t, bp);
+ }
#endif /* CONFIG_DTRACE */
-
+
#if CONFIG_PROTECT
/* Capture f_offset in the bufattr*/
cpx_t cpx = bufattr_cpx(buf_attr(bp));
if (cpx) {
/* No need to go here for older EAs */
- if(cpx_use_offset_for_iv(cpx) && !cpx_synthetic_offset_for_iv(cpx)) {
+ if (cpx_use_offset_for_iv(cpx) && !cpx_synthetic_offset_for_iv(cpx)) {
off_t f_offset;
- if ((error = VNOP_BLKTOOFF(bp->b_vp, bp->b_lblkno, &f_offset)))
+ if ((error = VNOP_BLKTOOFF(bp->b_vp, bp->b_lblkno, &f_offset))) {
return error;
+ }
- /*
+ /*
* Attach the file offset to this buffer. The
* bufattr attributes will be passed down the stack
- * until they reach the storage driver (whether
+ * until they reach the storage driver (whether
* IOFlashStorage, ASP, or IONVMe). The driver
* will retain the offset in a local variable when it
- * issues its I/Os to the NAND controller.
- *
- * Note that LwVM may end up splitting this I/O
+ * issues its I/Os to the NAND controller.
+ *
+ * Note that LwVM may end up splitting this I/O
* into sub-I/Os if it crosses a chunk boundary. In this
* case, LwVM will update this field when it dispatches
* each I/O to IOFlashStorage. But from our perspective
* we have only issued a single I/O.
*
- * In the case of APFS we do not bounce through another
+ * In the case of APFS we do not bounce through another
* intermediate layer (such as CoreStorage). APFS will
* issue the I/Os directly to the block device / IOMedia
- * via buf_strategy on the specfs node.
+ * via buf_strategy on the specfs node.
*/
buf_setcpoff(bp, f_offset);
CP_DEBUG((CPDBG_OFFSET_IO | DBG_FUNC_NONE), (uint32_t) f_offset, (uint32_t) bp->b_lblkno, (uint32_t) bp->b_blkno, (uint32_t) bp->b_bcount, 0);
*/
error = VOCALL(devvp->v_op, VOFFSET(vnop_strategy), ap);
DTRACE_FSINFO(strategy, vnode_t, vp);
- return (error);
+ return error;
}
buf_t
buf_alloc(vnode_t vp)
{
- return(alloc_io_buf(vp, is_vm_privileged()));
+ return alloc_io_buf(vp, is_vm_privileged());
}
void
-buf_free(buf_t bp) {
-
- free_io_buf(bp);
+buf_free(buf_t bp)
+{
+ free_io_buf(bp);
}
/*
- * iterate buffers for the specified vp.
+ * iterate buffers for the specified vp.
* if BUF_SCAN_DIRTY is set, do the dirty list
* if BUF_SCAN_CLEAN is set, do the clean list
* if neither flag is set, default to BUF_SCAN_DIRTY
void
buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg)
{
- buf_t bp;
- int retval;
- struct buflists local_iterblkhd;
- int lock_flags = BAC_NOWAIT | BAC_REMOVE;
- int notify_busy = flags & BUF_NOTIFY_BUSY;
+ buf_t bp;
+ int retval;
+ struct buflists local_iterblkhd;
+ int lock_flags = BAC_NOWAIT | BAC_REMOVE;
+ int notify_busy = flags & BUF_NOTIFY_BUSY;
struct buf_iterate_info_t list[2];
- int num_lists, i;
+ int num_lists, i;
- if (flags & BUF_SKIP_LOCKED)
- lock_flags |= BAC_SKIP_LOCKED;
- if (flags & BUF_SKIP_NONLOCKED)
- lock_flags |= BAC_SKIP_NONLOCKED;
+ if (flags & BUF_SKIP_LOCKED) {
+ lock_flags |= BAC_SKIP_LOCKED;
+ }
+ if (flags & BUF_SKIP_NONLOCKED) {
+ lock_flags |= BAC_SKIP_NONLOCKED;
+ }
- if ( !(flags & (BUF_SCAN_DIRTY | BUF_SCAN_CLEAN)))
- flags |= BUF_SCAN_DIRTY;
+ if (!(flags & (BUF_SCAN_DIRTY | BUF_SCAN_CLEAN))) {
+ flags |= BUF_SCAN_DIRTY;
+ }
num_lists = 0;
if (flags & BUF_SCAN_DIRTY) {
- list[num_lists].flag = VBI_DIRTY;
+ list[num_lists].flag = VBI_DIRTY;
list[num_lists].listhead = &vp->v_dirtyblkhd;
num_lists++;
}
for (i = 0; i < num_lists; i++) {
lck_mtx_lock(buf_mtxp);
-
- if (buf_iterprepare(vp, &local_iterblkhd, list[i].flag)) {
+
+ if (buf_iterprepare(vp, &local_iterblkhd, list[i].flag)) {
lck_mtx_unlock(buf_mtxp);
continue;
}
switch (retval) {
case BUF_RETURNED:
- if (bp)
+ if (bp) {
buf_brelse(bp);
+ }
break;
case BUF_CLAIMED:
break;
case BUF_RETURNED_DONE:
- if (bp)
+ if (bp) {
buf_brelse(bp);
+ }
lck_mtx_lock(buf_mtxp);
goto out;
case BUF_CLAIMED_DONE:
}
lck_mtx_lock(buf_mtxp);
} /* while list has more nodes */
- out:
+out:
buf_itercomplete(vp, &local_iterblkhd, list[i].flag);
lck_mtx_unlock(buf_mtxp);
} /* for each list */
int
buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo)
{
- buf_t bp;
- int aflags;
- int error = 0;
- int must_rescan = 1;
- struct buflists local_iterblkhd;
+ buf_t bp;
+ int aflags;
+ int error = 0;
+ int must_rescan = 1;
+ struct buflists local_iterblkhd;
- if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd))
- return (0);
+ if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) {
+ return 0;
+ }
lck_mtx_lock(buf_mtxp);
for (;;) {
- if (must_rescan == 0)
- /*
+ if (must_rescan == 0) {
+ /*
* the lists may not be empty, but all that's left at this
* point are metadata or B_LOCKED buffers which are being
* skipped... we know this because we made it through both
* the clean and dirty lists without dropping buf_mtxp...
* each time we drop buf_mtxp we bump "must_rescan"
*/
- break;
- if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd))
- break;
+ break;
+ }
+ if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) {
+ break;
+ }
must_rescan = 0;
/*
* iterate the clean list
*/
if (buf_iterprepare(vp, &local_iterblkhd, VBI_CLEAN)) {
- goto try_dirty_list;
+ goto try_dirty_list;
}
while (!LIST_EMPTY(&local_iterblkhd)) {
-
bp = LIST_FIRST(&local_iterblkhd);
LIST_REMOVE(bp, b_vnbufs);
/*
* some filesystems distinguish meta data blocks with a negative logical block #
*/
- if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META)))
+ if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) {
continue;
+ }
aflags = BAC_REMOVE;
- if ( !(flags & BUF_INVALIDATE_LOCKED) )
+ if (!(flags & BUF_INVALIDATE_LOCKED)) {
aflags |= BAC_SKIP_LOCKED;
+ }
- if ( (error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo)) ) {
- if (error == EDEADLK)
- /*
- * this buffer was marked B_LOCKED...
+ if ((error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo))) {
+ if (error == EDEADLK) {
+ /*
+ * this buffer was marked B_LOCKED...
* we didn't drop buf_mtxp, so we
* we don't need to rescan
*/
- continue;
- if (error == EAGAIN) {
- /*
+ continue;
+ }
+ if (error == EAGAIN) {
+ /*
* found a busy buffer... we blocked and
* dropped buf_mtxp, so we're going to
* need to rescan after this pass is completed
*/
- must_rescan++;
- continue;
+ must_rescan++;
+ continue;
}
/*
* got some kind of 'real' error out of the msleep
buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN);
lck_mtx_unlock(buf_mtxp);
- return (error);
+ return error;
}
lck_mtx_unlock(buf_mtxp);
- if (bp->b_flags & B_LOCKED)
+ if (bp->b_flags & B_LOCKED) {
KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 0, 0);
+ }
CLR(bp->b_flags, B_LOCKED);
SET(bp->b_flags, B_INVAL);
/*
* some filesystems distinguish meta data blocks with a negative logical block #
*/
- if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META)))
+ if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) {
continue;
+ }
aflags = BAC_REMOVE;
- if ( !(flags & BUF_INVALIDATE_LOCKED) )
+ if (!(flags & BUF_INVALIDATE_LOCKED)) {
aflags |= BAC_SKIP_LOCKED;
+ }
- if ( (error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo)) ) {
- if (error == EDEADLK)
- /*
- * this buffer was marked B_LOCKED...
+ if ((error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo))) {
+ if (error == EDEADLK) {
+ /*
+ * this buffer was marked B_LOCKED...
* we didn't drop buf_mtxp, so we
* we don't need to rescan
*/
- continue;
- if (error == EAGAIN) {
- /*
+ continue;
+ }
+ if (error == EAGAIN) {
+ /*
* found a busy buffer... we blocked and
* dropped buf_mtxp, so we're going to
* need to rescan after this pass is completed
*/
- must_rescan++;
- continue;
+ must_rescan++;
+ continue;
}
/*
* got some kind of 'real' error out of the msleep
buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
lck_mtx_unlock(buf_mtxp);
- return (error);
+ return error;
}
lck_mtx_unlock(buf_mtxp);
- if (bp->b_flags & B_LOCKED)
+ if (bp->b_flags & B_LOCKED) {
KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 1, 0);
+ }
CLR(bp->b_flags, B_LOCKED);
SET(bp->b_flags, B_INVAL);
- if (ISSET(bp->b_flags, B_DELWRI) && (flags & BUF_WRITE_DATA))
+ if (ISSET(bp->b_flags, B_DELWRI) && (flags & BUF_WRITE_DATA)) {
(void) VNOP_BWRITE(bp);
- else
+ } else {
buf_brelse(bp);
+ }
lck_mtx_lock(buf_mtxp);
/*
}
lck_mtx_unlock(buf_mtxp);
- return (0);
+ return 0;
}
void
-buf_flushdirtyblks(vnode_t vp, int wait, int flags, const char *msg) {
-
+buf_flushdirtyblks(vnode_t vp, int wait, int flags, const char *msg)
+{
(void) buf_flushdirtyblks_skipinfo(vp, wait, flags, msg);
return;
}
int
-buf_flushdirtyblks_skipinfo(vnode_t vp, int wait, int flags, const char *msg) {
- buf_t bp;
- int writes_issued = 0;
- errno_t error;
- int busy = 0;
- struct buflists local_iterblkhd;
- int lock_flags = BAC_NOWAIT | BAC_REMOVE;
+buf_flushdirtyblks_skipinfo(vnode_t vp, int wait, int flags, const char *msg)
+{
+ buf_t bp;
+ int writes_issued = 0;
+ errno_t error;
+ int busy = 0;
+ struct buflists local_iterblkhd;
+ int lock_flags = BAC_NOWAIT | BAC_REMOVE;
int any_locked = 0;
- if (flags & BUF_SKIP_LOCKED)
- lock_flags |= BAC_SKIP_LOCKED;
- if (flags & BUF_SKIP_NONLOCKED)
- lock_flags |= BAC_SKIP_NONLOCKED;
+ if (flags & BUF_SKIP_LOCKED) {
+ lock_flags |= BAC_SKIP_LOCKED;
+ }
+ if (flags & BUF_SKIP_NONLOCKED) {
+ lock_flags |= BAC_SKIP_NONLOCKED;
+ }
loop:
lck_mtx_lock(buf_mtxp);
- if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY) == 0) {
- while (!LIST_EMPTY(&local_iterblkhd)) {
+ if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY) == 0) {
+ while (!LIST_EMPTY(&local_iterblkhd)) {
bp = LIST_FIRST(&local_iterblkhd);
LIST_REMOVE(bp, b_vnbufs);
LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
busy++;
}
if (error) {
- /*
+ /*
* If we passed in BUF_SKIP_LOCKED or BUF_SKIP_NONLOCKED,
* we may want to do somethign differently if a locked or unlocked
* buffer was encountered (depending on the arg specified).
* In this case, we know that one of those two was set, and the
- * buf acquisition failed above.
- *
+ * buf acquisition failed above.
+ *
* If it failed with EDEADLK, then save state which can be emitted
* later on to the caller. Most callers should not care.
*/
* Wait for I/O associated with indirect blocks to complete,
* since there is no way to quickly wait for them below.
*/
- if ((bp->b_vp == vp) || (wait == 0))
- (void) buf_bawrite(bp);
- else
- (void) VNOP_BWRITE(bp);
+ if ((bp->b_vp == vp) || (wait == 0)) {
+ (void) buf_bawrite(bp);
+ } else {
+ (void) VNOP_BWRITE(bp);
+ }
writes_issued++;
lck_mtx_lock(buf_mtxp);
buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
}
lck_mtx_unlock(buf_mtxp);
-
+
if (wait) {
- (void)vnode_waitforwrites(vp, 0, 0, 0, msg);
+ (void)vnode_waitforwrites(vp, 0, 0, 0, msg);
if (vp->v_dirtyblkhd.lh_first && busy) {
- /*
+ /*
* we had one or more BUSY buffers on
* the dirtyblock list... most likely
* these are due to delayed writes that
* were moved to the bclean queue but
* have not yet been 'written'.
- * if we issued some writes on the
+ * if we issued some writes on the
* previous pass, we try again immediately
* if we didn't, we'll sleep for some time
* to allow the state to change...
*/
- if (writes_issued == 0) {
- (void)tsleep((caddr_t)&vp->v_numoutput,
- PRIBIO + 1, "vnode_flushdirtyblks", hz/20);
+ if (writes_issued == 0) {
+ (void)tsleep((caddr_t)&vp->v_numoutput,
+ PRIBIO + 1, "vnode_flushdirtyblks", hz / 20);
}
writes_issued = 0;
busy = 0;
{
struct buflists * listheadp;
- if (flags & VBI_DIRTY)
+ if (flags & VBI_DIRTY) {
listheadp = &vp->v_dirtyblkhd;
- else
+ } else {
listheadp = &vp->v_cleanblkhd;
-
- while (vp->v_iterblkflags & VBI_ITER) {
- vp->v_iterblkflags |= VBI_ITERWANT;
- msleep(&vp->v_iterblkflags, buf_mtxp, 0, "buf_iterprepare", NULL);
+ }
+
+ while (vp->v_iterblkflags & VBI_ITER) {
+ vp->v_iterblkflags |= VBI_ITERWANT;
+ msleep(&vp->v_iterblkflags, buf_mtxp, 0, "buf_iterprepare", NULL);
}
if (LIST_EMPTY(listheadp)) {
- LIST_INIT(iterheadp);
- return(EINVAL);
+ LIST_INIT(iterheadp);
+ return EINVAL;
}
vp->v_iterblkflags |= VBI_ITER;
iterheadp->lh_first = listheadp->lh_first;
- listheadp->lh_first->b_vnbufs.le_prev = &iterheadp->lh_first;
+ listheadp->lh_first->b_vnbufs.le_prev = &iterheadp->lh_first;
LIST_INIT(listheadp);
- return(0);
+ return 0;
}
/*
struct buflists * listheadp;
buf_t bp;
- if (flags & VBI_DIRTY)
+ if (flags & VBI_DIRTY) {
listheadp = &vp->v_dirtyblkhd;
- else
+ } else {
listheadp = &vp->v_cleanblkhd;
+ }
while (!LIST_EMPTY(iterheadp)) {
bp = LIST_FIRST(iterheadp);
}
vp->v_iterblkflags &= ~VBI_ITER;
- if (vp->v_iterblkflags & VBI_ITERWANT) {
+ if (vp->v_iterblkflags & VBI_ITERWANT) {
vp->v_iterblkflags &= ~VBI_ITERWANT;
wakeup(&vp->v_iterblkflags);
}
whichq = bp->b_whichq;
if (whichq == -1) {
- if (bp->b_shadow_ref == 0)
+ if (bp->b_shadow_ref == 0) {
panic("bremfree_locked: %p not on freelist", bp);
+ }
/*
* there are clones pointing to 'bp'...
* therefore, it was not put on a freelist
* NB: This makes an assumption about how tailq's are implemented.
*/
if (bp->b_freelist.tqe_next == NULL) {
- dp = &bufqueues[whichq];
+ dp = &bufqueues[whichq];
- if (dp->tqh_last != &bp->b_freelist.tqe_next)
+ if (dp->tqh_last != &bp->b_freelist.tqe_next) {
panic("bremfree: lost tail");
+ }
}
TAILQ_REMOVE(dp, bp, b_freelist);
- if (whichq == BQ_LAUNDRY)
- blaundrycnt--;
+ if (whichq == BQ_LAUNDRY) {
+ blaundrycnt--;
+ }
bp->b_whichq = -1;
- bp->b_timestamp = 0;
+ bp->b_timestamp = 0;
bp->b_shadow = 0;
}
static void
bgetvp_locked(vnode_t vp, buf_t bp)
{
-
- if (bp->b_vp != vp)
+ if (bp->b_vp != vp) {
panic("bgetvp_locked: not free");
+ }
- if (vp->v_type == VBLK || vp->v_type == VCHR)
+ if (vp->v_type == VBLK || vp->v_type == VCHR) {
bp->b_dev = vp->v_rdev;
- else
+ } else {
bp->b_dev = NODEV;
+ }
/*
* Insert onto list for new vnode.
*/
/*
* Delete from old vnode list, if on one.
*/
- if (bp->b_vnbufs.le_next != NOLIST)
+ if (bp->b_vnbufs.le_next != NOLIST) {
bufremvn(bp);
+ }
bp->b_vp = (vnode_t)NULL;
}
/*
* Delete from old vnode list, if on one.
*/
- if (bp->b_vnbufs.le_next != NOLIST)
+ if (bp->b_vnbufs.le_next != NOLIST) {
bufremvn(bp);
+ }
/*
* If dirty, put on list of dirty buffers;
* otherwise insert onto list of clean buffers.
*/
- if (ISSET(bp->b_flags, B_DELWRI))
+ if (ISSET(bp->b_flags, B_DELWRI)) {
listheadp = &newvp->v_dirtyblkhd;
- else
+ } else {
listheadp = &newvp->v_cleanblkhd;
+ }
bufinsvn(bp, listheadp);
lck_mtx_unlock(buf_mtxp);
__private_extern__ void
bufinit(void)
{
- buf_t bp;
+ buf_t bp;
struct bqueues *dp;
- int i;
+ int i;
nbuf_headers = 0;
/* Initialize the buffer queues ('freelists') and the hash table */
- for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
+ for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) {
TAILQ_INIT(dp);
+ }
bufhashtbl = hashinit(nbuf_hashelements, M_CACHE, &bufhash);
buf_busycount = 0;
*/
buf_mtx_grp_attr = lck_grp_attr_alloc_init();
buf_mtx_grp = lck_grp_alloc_init("buffer cache", buf_mtx_grp_attr);
-
+
/*
* allocate the lock attribute
*/
/*
* allocate and initialize mutex's for the buffer and iobuffer pools
*/
- buf_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr);
- iobuffer_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr);
+ buf_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr);
+ iobuffer_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr);
buf_gc_callout = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr);
- if (iobuffer_mtxp == NULL)
- panic("couldn't create iobuffer mutex");
+ if (iobuffer_mtxp == NULL) {
+ panic("couldn't create iobuffer mutex");
+ }
- if (buf_mtxp == NULL)
- panic("couldn't create buf mutex");
+ if (buf_mtxp == NULL) {
+ panic("couldn't create buf mutex");
+ }
- if (buf_gc_callout == NULL)
+ if (buf_gc_callout == NULL) {
panic("couldn't create buf_gc_callout mutex");
+ }
/*
* allocate and initialize cluster specific global locks...
cluster_init();
printf("using %d buffer headers and %d cluster IO buffer headers\n",
- nbuf_headers, niobuf_headers);
+ nbuf_headers, niobuf_headers);
/* Set up zones used by the buffer cache */
bufzoneinit();
if (vm_set_buffer_cleanup_callout(buffer_cache_gc) != KERN_SUCCESS) {
panic("Couldn't register buffer cache callout for vm pressure!\n");
}
-
}
/*
};
struct meta_zone_entry meta_zones[] = {
- {NULL, (MINMETA * 1), 128 * (MINMETA * 1), "buf.512" },
- {NULL, (MINMETA * 2), 64 * (MINMETA * 2), "buf.1024" },
- {NULL, (MINMETA * 4), 16 * (MINMETA * 4), "buf.2048" },
- {NULL, (MINMETA * 8), 512 * (MINMETA * 8), "buf.4096" },
- {NULL, (MINMETA * 16), 512 * (MINMETA * 16), "buf.8192" },
- {NULL, (MINMETA * 32), 512 * (MINMETA * 32), "buf.16384" },
- {NULL, 0, 0, "" } /* End */
+ {.mz_zone = NULL, .mz_size = (MINMETA * 1), .mz_max = 128 * (MINMETA * 1), .mz_name = "buf.512" },
+ {.mz_zone = NULL, .mz_size = (MINMETA * 2), .mz_max = 64 * (MINMETA * 2), .mz_name = "buf.1024" },
+ {.mz_zone = NULL, .mz_size = (MINMETA * 4), .mz_max = 16 * (MINMETA * 4), .mz_name = "buf.2048" },
+ {.mz_zone = NULL, .mz_size = (MINMETA * 8), .mz_max = 512 * (MINMETA * 8), .mz_name = "buf.4096" },
+ {.mz_zone = NULL, .mz_size = (MINMETA * 16), .mz_max = 512 * (MINMETA * 16), .mz_name = "buf.8192" },
+ {.mz_zone = NULL, .mz_size = (MINMETA * 32), .mz_max = 512 * (MINMETA * 32), .mz_name = "buf.16384" },
+ {.mz_zone = NULL, .mz_size = 0, .mz_max = 0, .mz_name = "" } /* End */
};
/*
int i;
for (i = 0; meta_zones[i].mz_size != 0; i++) {
- meta_zones[i].mz_zone =
- zinit(meta_zones[i].mz_size,
- meta_zones[i].mz_max,
- PAGE_SIZE,
- meta_zones[i].mz_name);
+ meta_zones[i].mz_zone =
+ zinit(meta_zones[i].mz_size,
+ meta_zones[i].mz_max,
+ PAGE_SIZE,
+ meta_zones[i].mz_name);
zone_change(meta_zones[i].mz_zone, Z_CALLERACCT, FALSE);
}
buf_hdr_zone = zinit(sizeof(struct buf), 32, PAGE_SIZE, "buf headers");
{
int i;
- if ((size % 512) || (size < MINMETA) || (size > MAXMETA))
+ if ((size % 512) || (size < MINMETA) || (size > MAXMETA)) {
panic("getbufzone: incorect size = %lu", size);
+ }
for (i = 0; meta_zones[i].mz_size != 0; i++) {
- if (meta_zones[i].mz_size >= size)
+ if (meta_zones[i].mz_size >= size) {
break;
+ }
}
- return (meta_zones[i].mz_zone);
+ return meta_zones[i].mz_zone;
}
static struct buf *
bio_doread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, int async, int queuetype)
{
- buf_t bp;
+ buf_t bp;
bp = buf_getblk(vp, blkno, size, 0, 0, queuetype);
trace(TR_BREADMISS, pack(vp, size), blkno);
/* Pay for the read. */
- if (p && p->p_stats) {
- OSIncrementAtomicLong(&p->p_stats->p_ru.ru_inblock); /* XXX */
+ if (p && p->p_stats) {
+ OSIncrementAtomicLong(&p->p_stats->p_ru.ru_inblock); /* XXX */
}
if (async) {
- /*
+ /*
* since we asked for an ASYNC I/O
* the biodone will do the brelse
* we don't want to pass back a bp
* that we don't 'own'
*/
- bp = NULL;
+ bp = NULL;
}
} else if (async) {
buf_brelse(bp);
trace(TR_BREADHIT, pack(vp, size), blkno);
- return (bp);
+ return bp;
}
/*
- * Perform the reads for buf_breadn() and buf_meta_breadn().
- * Trivial modification to the breada algorithm presented in Bach (p.55).
+ * Perform the reads for buf_breadn() and buf_meta_breadn().
+ * Trivial modification to the breada algorithm presented in Bach (p.55).
*/
static errno_t
-do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes,
- int nrablks, kauth_cred_t cred, buf_t *bpp, int queuetype)
+do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes,
+ int nrablks, kauth_cred_t cred, buf_t *bpp, int queuetype)
{
- buf_t bp;
- int i;
+ buf_t bp;
+ int i;
bp = *bpp = bio_doread(vp, blkno, size, cred, 0, queuetype);
*/
for (i = 0; i < nrablks; i++) {
/* If it's in the cache, just go on to next one. */
- if (incore(vp, rablks[i]))
+ if (incore(vp, rablks[i])) {
continue;
+ }
/* Get a buffer for the read-ahead block */
(void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC, queuetype);
}
/* Otherwise, we had to start a read for it; wait until it's valid. */
- return (buf_biowait(bp));
+ return buf_biowait(bp);
}
errno_t
buf_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp)
{
- buf_t bp;
+ buf_t bp;
/* Get buffer for block. */
bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_READ);
/* Wait for the read to complete, and return result. */
- return (buf_biowait(bp));
+ return buf_biowait(bp);
}
/*
errno_t
buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp)
{
- buf_t bp;
+ buf_t bp;
/* Get buffer for block. */
bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_META);
/* Wait for the read to complete, and return result. */
- return (buf_biowait(bp));
+ return buf_biowait(bp);
}
/*
errno_t
buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp)
{
- return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_READ));
+ return do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_READ);
}
/*
errno_t
buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp)
{
- return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_META));
+ return do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_META);
}
/*
errno_t
buf_bwrite(buf_t bp)
{
- int sync, wasdelayed;
- errno_t rv;
- proc_t p = current_proc();
- vnode_t vp = bp->b_vp;
+ int sync, wasdelayed;
+ errno_t rv;
+ proc_t p = current_proc();
+ vnode_t vp = bp->b_vp;
if (bp->b_datap == 0) {
- if (brecover_data(bp) == 0)
- return (0);
+ if (brecover_data(bp) == 0) {
+ return 0;
+ }
}
/* Remember buffer type, to switch on it later. */
sync = !ISSET(bp->b_flags, B_ASYNC);
wasdelayed = ISSET(bp->b_flags, B_DELWRI);
CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
- if (wasdelayed)
+ if (wasdelayed) {
OSAddAtomicLong(-1, &nbdwrite);
+ }
if (!sync) {
/*
* to do this now, because if we don't, the vnode may not
* be properly notified that its I/O has completed.
*/
- if (wasdelayed)
+ if (wasdelayed) {
buf_reassign(bp, vp);
- else
- if (p && p->p_stats) {
- OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
- }
+ } else if (p && p->p_stats) {
+ OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
+ }
}
trace(TR_BUFWRITE, pack(vp, bp->b_bcount), bp->b_lblkno);
/* Initiate disk write. Make sure the appropriate party is charged. */
- OSAddAtomic(1, &vp->v_numoutput);
-
+ OSAddAtomic(1, &vp->v_numoutput);
+
VNOP_STRATEGY(bp);
if (sync) {
* make sure it's on the correct vnode queue. (async operatings
* were payed for above.)
*/
- if (wasdelayed)
+ if (wasdelayed) {
buf_reassign(bp, vp);
- else
- if (p && p->p_stats) {
- OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
- }
+ } else if (p && p->p_stats) {
+ OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
+ }
/* Release the buffer. */
buf_brelse(bp);
- return (rv);
+ return rv;
} else {
- return (0);
+ return 0;
}
}
int
vn_bwrite(struct vnop_bwrite_args *ap)
{
- return (buf_bwrite(ap->a_bp));
+ return buf_bwrite(ap->a_bp);
}
/*
* Described in Leffler, et al. (pp. 208-213).
*
* Note: With the ability to allocate additional buffer
- * headers, we can get in to the situation where "too" many
+ * headers, we can get in to the situation where "too" many
* buf_bdwrite()s can create situation where the kernel can create
* buffers faster than the disks can service. Doing a buf_bawrite() in
* cases where we have "too many" outstanding buf_bdwrite()s avoids that.
int
bdwrite_internal(buf_t bp, int return_error)
{
- proc_t p = current_proc();
- vnode_t vp = bp->b_vp;
+ proc_t p = current_proc();
+ vnode_t vp = bp->b_vp;
/*
* If the block hasn't been seen before:
*/
if (!ISSET(bp->b_flags, B_DELWRI)) {
SET(bp->b_flags, B_DELWRI);
- if (p && p->p_stats) {
- OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
+ if (p && p->p_stats) {
+ OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
}
OSAddAtomicLong(1, &nbdwrite);
buf_reassign(bp, vp);
/*
* if we're not LOCKED, but the total number of delayed writes
* has climbed above 75% of the total buffers in the system
- * return an error if the caller has indicated that it can
+ * return an error if the caller has indicated that it can
* handle one in this case, otherwise schedule the I/O now
* this is done to prevent us from allocating tons of extra
* buffers when dealing with virtual disks (i.e. DiskImages),
* buffer is part of a transaction and can't go to disk until
* the LOCKED bit is cleared.
*/
- if (!ISSET(bp->b_flags, B_LOCKED) && nbdwrite > ((nbuf_headers/4)*3)) {
- if (return_error)
- return (EAGAIN);
+ if (!ISSET(bp->b_flags, B_LOCKED) && nbdwrite > ((nbuf_headers / 4) * 3)) {
+ if (return_error) {
+ return EAGAIN;
+ }
/*
* If the vnode has "too many" write operations in progress
* wait for them to finish the IO
*/
(void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, "buf_bdwrite");
- return (buf_bawrite(bp));
+ return buf_bawrite(bp);
}
-
+
/* Otherwise, the "write" is done, so mark and release the buffer. */
SET(bp->b_flags, B_DONE);
buf_brelse(bp);
- return (0);
+ return 0;
}
errno_t
buf_bdwrite(buf_t bp)
{
- return (bdwrite_internal(bp, 0));
+ return bdwrite_internal(bp, 0);
}
-
+
/*
* Asynchronous block write; just an asynchronous buf_bwrite().
*
* Note: With the abilitty to allocate additional buffer
- * headers, we can get in to the situation where "too" many
+ * headers, we can get in to the situation where "too" many
* buf_bawrite()s can create situation where the kernel can create
* buffers faster than the disks can service.
* We limit the number of "in flight" writes a vnode can have to
static int
bawrite_internal(buf_t bp, int throttle)
{
- vnode_t vp = bp->b_vp;
+ vnode_t vp = bp->b_vp;
if (vp) {
- if (throttle)
- /*
+ if (throttle) {
+ /*
* If the vnode has "too many" write operations in progress
* wait for them to finish the IO
*/
- (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, (const char *)"buf_bawrite");
- else if (vp->v_numoutput >= VNODE_ASYNC_THROTTLE)
- /*
- * return to the caller and
+ (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, (const char *)"buf_bawrite");
+ } else if (vp->v_numoutput >= VNODE_ASYNC_THROTTLE) {
+ /*
+ * return to the caller and
* let him decide what to do
*/
- return (EWOULDBLOCK);
+ return EWOULDBLOCK;
+ }
}
SET(bp->b_flags, B_ASYNC);
- return (VNOP_BWRITE(bp));
+ return VNOP_BWRITE(bp);
}
errno_t
buf_bawrite(buf_t bp)
{
- return (bawrite_internal(bp, 1));
+ return bawrite_internal(bp, 1);
}
zone_t z;
z = getbufzone(bp->b_bufsize);
- zfree(z, (void *)bp->b_datap);
- } else
- kmem_free(kernel_map, bp->b_datap, bp->b_bufsize);
+ zfree(z, bp->b_datap);
+ } else {
+ kmem_free(kernel_map, bp->b_datap, bp->b_bufsize);
+ }
bp->b_datap = (uintptr_t)NULL;
bp->b_bufsize = 0;
static buf_t
buf_brelse_shadow(buf_t bp)
{
- buf_t bp_head;
- buf_t bp_temp;
- buf_t bp_return = NULL;
+ buf_t bp_head;
+ buf_t bp_temp;
+ buf_t bp_return = NULL;
#ifdef BUF_MAKE_PRIVATE
- buf_t bp_data;
- int data_ref = 0;
+ buf_t bp_data;
+ int data_ref = 0;
#endif
int need_wakeup = 0;
__IGNORE_WCASTALIGN(bp_head = (buf_t)bp->b_orig);
- if (bp_head->b_whichq != -1)
+ if (bp_head->b_whichq != -1) {
panic("buf_brelse_shadow: bp_head on freelist %d\n", bp_head->b_whichq);
+ }
#ifdef BUF_MAKE_PRIVATE
if (bp_data = bp->b_data_store) {
bp_data->b_data_ref--;
/*
- * snapshot the ref count so that we can check it
+ * snapshot the ref count so that we can check it
* outside of the lock... we only want the guy going
* from 1 -> 0 to try and release the storage
*/
bp_head->b_shadow_ref--;
- for (bp_temp = bp_head; bp_temp && bp != bp_temp->b_shadow; bp_temp = bp_temp->b_shadow);
+ for (bp_temp = bp_head; bp_temp && bp != bp_temp->b_shadow; bp_temp = bp_temp->b_shadow) {
+ ;
+ }
- if (bp_temp == NULL)
+ if (bp_temp == NULL) {
panic("buf_brelse_shadow: bp not on list %p", bp_head);
+ }
bp_temp->b_shadow = bp_temp->b_shadow->b_shadow;
* so transfer it to the first shadow buf left in the chain
*/
if (bp == bp_data && data_ref) {
- if ((bp_data = bp_head->b_shadow) == NULL)
+ if ((bp_data = bp_head->b_shadow) == NULL) {
panic("buf_brelse_shadow: data_ref mismatch bp(%p)", bp);
+ }
- for (bp_temp = bp_data; bp_temp; bp_temp = bp_temp->b_shadow)
+ for (bp_temp = bp_data; bp_temp; bp_temp = bp_temp->b_shadow) {
bp_temp->b_data_store = bp_data;
+ }
bp_data->b_data_ref = data_ref;
}
#endif
- if (bp_head->b_shadow_ref == 0 && bp_head->b_shadow)
- panic("buf_relse_shadow: b_shadow != NULL && b_shadow_ref == 0 bp(%p)", bp);
- if (bp_head->b_shadow_ref && bp_head->b_shadow == 0)
- panic("buf_relse_shadow: b_shadow == NULL && b_shadow_ref != 0 bp(%p)", bp);
+ if (bp_head->b_shadow_ref == 0 && bp_head->b_shadow) {
+ panic("buf_relse_shadow: b_shadow != NULL && b_shadow_ref == 0 bp(%p)", bp);
+ }
+ if (bp_head->b_shadow_ref && bp_head->b_shadow == 0) {
+ panic("buf_relse_shadow: b_shadow == NULL && b_shadow_ref != 0 bp(%p)", bp);
+ }
if (bp_head->b_shadow_ref == 0) {
if (!ISSET(bp_head->b_lflags, BL_BUSY)) {
-
CLR(bp_head->b_flags, B_AGE);
bp_head->b_timestamp = buf_timestamp();
}
lck_mtx_unlock(buf_mtxp);
- if (need_wakeup)
+ if (need_wakeup) {
wakeup(bp_head);
+ }
-#ifdef BUF_MAKE_PRIVATE
- if (bp == bp_data && data_ref == 0)
+#ifdef BUF_MAKE_PRIVATE
+ if (bp == bp_data && data_ref == 0) {
buf_free_meta_store(bp);
+ }
bp->b_data_store = NULL;
#endif
KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_END, bp, 0, 0, 0, 0);
- return (bp_return);
+ return bp_return;
}
buf_brelse(buf_t bp)
{
struct bqueues *bufq;
- long whichq;
- upl_t upl;
+ long whichq;
+ upl_t upl;
int need_wakeup = 0;
int need_bp_wakeup = 0;
- if (bp->b_whichq != -1 || !(bp->b_lflags & BL_BUSY))
- panic("buf_brelse: bad buffer = %p\n", bp);
+ if (bp->b_whichq != -1 || !(bp->b_lflags & BL_BUSY)) {
+ panic("buf_brelse: bad buffer = %p\n", bp);
+ }
#ifdef JOE_DEBUG
(void) OSBacktrace(&bp->b_stackbrelse[0], 6);
bp->b_tag = 0;
#endif
if (bp->b_lflags & BL_IOBUF) {
- buf_t shadow_master_bp = NULL;
+ buf_t shadow_master_bp = NULL;
- if (ISSET(bp->b_lflags, BL_SHADOW))
+ if (ISSET(bp->b_lflags, BL_SHADOW)) {
shadow_master_bp = buf_brelse_shadow(bp);
- else if (ISSET(bp->b_lflags, BL_IOBUF_ALLOC))
- buf_free_meta_store(bp);
- free_io_buf(bp);
+ } else if (ISSET(bp->b_lflags, BL_IOBUF_ALLOC)) {
+ buf_free_meta_store(bp);
+ }
+ free_io_buf(bp);
if (shadow_master_bp) {
bp = shadow_master_bp;
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_START,
- bp->b_lblkno * PAGE_SIZE, bp, bp->b_datap,
- bp->b_flags, 0);
+ bp->b_lblkno * PAGE_SIZE, bp, bp->b_datap,
+ bp->b_flags, 0);
trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
* the HFS journal code depends on this
*/
if (ISSET(bp->b_flags, B_META) && ISSET(bp->b_flags, B_INVAL)) {
- if (ISSET(bp->b_flags, B_FILTER)) { /* if necessary, call out */
- void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
- void *arg = bp->b_transaction;
+ if (ISSET(bp->b_flags, B_FILTER)) { /* if necessary, call out */
+ void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
+ void *arg = bp->b_transaction;
- CLR(bp->b_flags, B_FILTER); /* but note callout done */
+ CLR(bp->b_flags, B_FILTER); /* but note callout done */
bp->b_iodone = NULL;
bp->b_transaction = NULL;
*/
upl = bp->b_upl;
- if ( !ISSET(bp->b_flags, B_META) && UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
+ if (!ISSET(bp->b_flags, B_META) && UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
kern_return_t kret;
int upl_flags;
if (upl == NULL) {
- if ( !ISSET(bp->b_flags, B_INVAL)) {
+ if (!ISSET(bp->b_flags, B_INVAL)) {
kret = ubc_create_upl_kernel(bp->b_vp,
- ubc_blktooff(bp->b_vp, bp->b_lblkno),
- bp->b_bufsize,
- &upl,
- NULL,
- UPL_PRECIOUS,
- VM_KERN_MEMORY_FILE);
-
- if (kret != KERN_SUCCESS)
- panic("brelse: Failed to create UPL");
+ ubc_blktooff(bp->b_vp, bp->b_lblkno),
+ bp->b_bufsize,
+ &upl,
+ NULL,
+ UPL_PRECIOUS,
+ VM_KERN_MEMORY_FILE);
+
+ if (kret != KERN_SUCCESS) {
+ panic("brelse: Failed to create UPL");
+ }
#if UPL_DEBUG
upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 5);
#endif /* UPL_DEBUG */
}
} else {
if (bp->b_datap) {
- kret = ubc_upl_unmap(upl);
+ kret = ubc_upl_unmap(upl);
- if (kret != KERN_SUCCESS)
- panic("ubc_upl_unmap failed");
+ if (kret != KERN_SUCCESS) {
+ panic("ubc_upl_unmap failed");
+ }
bp->b_datap = (uintptr_t)NULL;
}
}
if (upl) {
if (bp->b_flags & (B_ERROR | B_INVAL)) {
- if (bp->b_flags & (B_READ | B_INVAL))
- upl_flags = UPL_ABORT_DUMP_PAGES;
- else
- upl_flags = 0;
+ if (bp->b_flags & (B_READ | B_INVAL)) {
+ upl_flags = UPL_ABORT_DUMP_PAGES;
+ } else {
+ upl_flags = 0;
+ }
ubc_upl_abort(upl, upl_flags);
} else {
- if (ISSET(bp->b_flags, B_DELWRI | B_WASDIRTY))
- upl_flags = UPL_COMMIT_SET_DIRTY ;
- else
- upl_flags = UPL_COMMIT_CLEAR_DIRTY ;
+ if (ISSET(bp->b_flags, B_DELWRI | B_WASDIRTY)) {
+ upl_flags = UPL_COMMIT_SET_DIRTY;
+ } else {
+ upl_flags = UPL_COMMIT_CLEAR_DIRTY;
+ }
ubc_upl_commit_range(upl, 0, bp->b_bufsize, upl_flags |
- UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
+ UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
}
bp->b_upl = NULL;
}
} else {
- if ( (upl) )
+ if ((upl)) {
panic("brelse: UPL set for non VREG; vp=%p", bp->b_vp);
- }
+ }
+ }
/*
* If it's locked, don't report an error; try again later.
*/
- if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
+ if (ISSET(bp->b_flags, (B_LOCKED | B_ERROR)) == (B_LOCKED | B_ERROR)) {
CLR(bp->b_flags, B_ERROR);
+ }
/*
* If it's not cacheable, or an error, mark it invalid.
*/
- if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
+ if (ISSET(bp->b_flags, (B_NOCACHE | B_ERROR))) {
SET(bp->b_flags, B_INVAL);
-
- if ((bp->b_bufsize <= 0) ||
- ISSET(bp->b_flags, B_INVAL) ||
- (ISSET(bp->b_lflags, BL_WANTDEALLOC) && !ISSET(bp->b_flags, B_DELWRI))) {
+ }
- boolean_t delayed_buf_free_meta_store = FALSE;
+ if ((bp->b_bufsize <= 0) ||
+ ISSET(bp->b_flags, B_INVAL) ||
+ (ISSET(bp->b_lflags, BL_WANTDEALLOC) && !ISSET(bp->b_flags, B_DELWRI))) {
+ boolean_t delayed_buf_free_meta_store = FALSE;
/*
* If it's invalid or empty, dissociate it from its vnode,
* release its storage if B_META, and
* clean it up a bit and put it on the EMPTY queue
*/
- if (ISSET(bp->b_flags, B_DELWRI))
+ if (ISSET(bp->b_flags, B_DELWRI)) {
OSAddAtomicLong(-1, &nbdwrite);
+ }
if (ISSET(bp->b_flags, B_META)) {
- if (bp->b_shadow_ref)
+ if (bp->b_shadow_ref) {
delayed_buf_free_meta_store = TRUE;
- else
+ } else {
buf_free_meta_store(bp);
+ }
}
/*
* nuke any credentials we were holding
if (bp->b_shadow_ref) {
SET(bp->b_lflags, BL_WAITSHADOW);
-
+
lck_mtx_unlock(buf_mtxp);
-
+
return;
}
if (delayed_buf_free_meta_store == TRUE) {
-
lck_mtx_unlock(buf_mtxp);
finish_shadow_master:
buf_free_meta_store(bp);
}
CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
- if (bp->b_vp)
+ if (bp->b_vp) {
brelvp_locked(bp);
+ }
bremhash(bp);
BLISTNONE(bp);
bp->b_whichq = BQ_EMPTY;
binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
} else {
-
/*
* It has valid data. Put it on the end of the appropriate
* queue, so that it'll stick around for as long as possible.
*/
- if (ISSET(bp->b_flags, B_LOCKED))
- whichq = BQ_LOCKED; /* locked in core */
- else if (ISSET(bp->b_flags, B_META))
- whichq = BQ_META; /* meta-data */
- else if (ISSET(bp->b_flags, B_AGE))
- whichq = BQ_AGE; /* stale but valid data */
- else
- whichq = BQ_LRU; /* valid data */
+ if (ISSET(bp->b_flags, B_LOCKED)) {
+ whichq = BQ_LOCKED; /* locked in core */
+ } else if (ISSET(bp->b_flags, B_META)) {
+ whichq = BQ_META; /* meta-data */
+ } else if (ISSET(bp->b_flags, B_AGE)) {
+ whichq = BQ_AGE; /* stale but valid data */
+ } else {
+ whichq = BQ_LRU; /* valid data */
+ }
bufq = &bufqueues[whichq];
bp->b_timestamp = buf_timestamp();
lck_mtx_lock_spin(buf_mtxp);
-
+
/*
* the buf_brelse_shadow routine doesn't take 'ownership'
* of the parent buf_t... it updates state that is protected by
/*
* there are still cloned buf_t's pointing
* at this guy... need to keep it off the
- * freelists until a buf_brelse is done on
+ * freelists until a buf_brelse is done on
* the last clone
*/
CLR(bp->b_flags, (B_ASYNC | B_NOCACHE));
}
}
if (needbuffer) {
- /*
+ /*
* needbuffer is a global
* we're currently using buf_mtxp to protect it
* delay doing the actual wakeup until after
need_wakeup = 1;
}
if (ISSET(bp->b_lflags, BL_WANTED)) {
- /*
+ /*
* delay the actual wakeup until after we
* clear BL_BUSY and we've dropped buf_mtxp
*/
lck_mtx_unlock(buf_mtxp);
if (need_wakeup) {
- /*
+ /*
* Wake up any processes waiting for any buffer to become free.
*/
- wakeup(&needbuffer);
+ wakeup(&needbuffer);
}
if (need_bp_wakeup) {
- /*
+ /*
* Wake up any proceeses waiting for _this_ buffer to become free.
*/
- wakeup(bp);
+ wakeup(bp);
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_END,
- bp, bp->b_datap, bp->b_flags, 0, 0);
+ bp, bp->b_datap, bp->b_flags, 0, 0);
}
/*
static boolean_t
incore(vnode_t vp, daddr64_t blkno)
{
- boolean_t retval;
- struct bufhashhdr *dp;
+ boolean_t retval;
+ struct bufhashhdr *dp;
dp = BUFHASH(vp, blkno);
lck_mtx_lock_spin(buf_mtxp);
- if (incore_locked(vp, blkno, dp))
- retval = TRUE;
- else
- retval = FALSE;
+ if (incore_locked(vp, blkno, dp)) {
+ retval = TRUE;
+ } else {
+ retval = FALSE;
+ }
lck_mtx_unlock(buf_mtxp);
- return (retval);
+ return retval;
}
for (bp = dp->lh_first; bp != NULL; bp = bp->b_hash.le_next) {
if (bp->b_lblkno == blkno && bp->b_vp == vp &&
!ISSET(bp->b_flags, B_INVAL)) {
- return (bp);
+ return bp;
}
}
- return (NULL);
+ return NULL;
}
buf_wait_for_shadow_io(vnode_t vp, daddr64_t blkno)
{
buf_t bp;
- struct bufhashhdr *dp;
+ struct bufhashhdr *dp;
dp = BUFHASH(vp, blkno);
lck_mtx_lock_spin(buf_mtxp);
for (;;) {
- if ((bp = incore_locked(vp, blkno, dp)) == NULL)
+ if ((bp = incore_locked(vp, blkno, dp)) == NULL) {
break;
+ }
- if (bp->b_shadow_ref == 0)
+ if (bp->b_shadow_ref == 0) {
break;
+ }
SET(bp->b_lflags, BL_WANTED_REF);
- (void) msleep(bp, buf_mtxp, PSPIN | (PRIBIO+1), "buf_wait_for_shadow", NULL);
+ (void) msleep(bp, buf_mtxp, PSPIN | (PRIBIO + 1), "buf_wait_for_shadow", NULL);
}
lck_mtx_unlock(buf_mtxp);
}
-
+
/* XXX FIXME -- Update the comment to reflect the UBC changes (please) -- */
/*
* Get a block of requested size that is associated with
int ret_only_valid;
struct timespec ts;
int upl_flags;
- struct bufhashhdr *dp;
+ struct bufhashhdr *dp;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_START,
- (uintptr_t)(blkno * PAGE_SIZE), size, operation, 0, 0);
+ (uintptr_t)(blkno * PAGE_SIZE), size, operation, 0, 0);
ret_only_valid = operation & BLK_ONLYVALID;
operation &= ~BLK_ONLYVALID;
/*
* don't retake the mutex after being awakened...
- * the time out is in msecs
+ * the time out is in msecs
*/
- ts.tv_sec = (slptimeo/1000);
+ ts.tv_sec = (slptimeo / 1000);
ts.tv_nsec = (slptimeo % 1000) * 10 * NSEC_PER_USEC * 1000;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 396)) | DBG_FUNC_NONE,
- (uintptr_t)blkno, size, operation, 0, 0);
+ (uintptr_t)blkno, size, operation, 0, 0);
err = msleep(bp, buf_mtxp, slpflag | PDROP | (PRIBIO + 1), "buf_getblk", &ts);
* Callers who call with PCATCH or timeout are
* willing to deal with the NULL pointer
*/
- if (err && ((slpflag & PCATCH) || ((err == EWOULDBLOCK) && slptimeo)))
- return (NULL);
+ if (err && ((slpflag & PCATCH) || ((err == EWOULDBLOCK) && slptimeo))) {
+ return NULL;
+ }
goto start;
- /*NOTREACHED*/
+ /*NOTREACHED*/
default:
- /*
+ /*
* unknown operation requested
*/
panic("getblk: paging or unknown operation for incore busy buffer - %x\n", operation);
/*NOTREACHED*/
break;
- }
+ }
} else {
int clear_bdone;
bremfree_locked(bp);
bufstats.bufs_incore++;
-
+
lck_mtx_unlock(buf_mtxp);
#ifdef JOE_DEBUG
bp->b_owner = current_thread();
bp->b_tag = 1;
#endif
- if ( (bp->b_upl) )
- panic("buffer has UPL, but not marked BUSY: %p", bp);
+ if ((bp->b_upl)) {
+ panic("buffer has UPL, but not marked BUSY: %p", bp);
+ }
clear_bdone = FALSE;
if (!ret_only_valid) {
clear_bdone = TRUE;
}
- if (bp->b_bufsize != size)
+ if (bp->b_bufsize != size) {
allocbuf(bp, size);
+ }
}
upl_flags = 0;
upl_flags |= UPL_WILL_MODIFY;
case BLK_READ:
upl_flags |= UPL_PRECIOUS;
- if (UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
+ if (UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
kret = ubc_create_upl_kernel(vp,
- ubc_blktooff(vp, bp->b_lblkno),
- bp->b_bufsize,
- &upl,
- &pl,
- upl_flags,
- VM_KERN_MEMORY_FILE);
- if (kret != KERN_SUCCESS)
- panic("Failed to create UPL");
+ ubc_blktooff(vp, bp->b_lblkno),
+ bp->b_bufsize,
+ &upl,
+ &pl,
+ upl_flags,
+ VM_KERN_MEMORY_FILE);
+ if (kret != KERN_SUCCESS) {
+ panic("Failed to create UPL");
+ }
bp->b_upl = upl;
if (upl_valid_page(pl, 0)) {
- if (upl_dirty_page(pl, 0))
- SET(bp->b_flags, B_WASDIRTY);
- else
- CLR(bp->b_flags, B_WASDIRTY);
- } else
- CLR(bp->b_flags, (B_DONE | B_CACHE | B_WASDIRTY | B_DELWRI));
+ if (upl_dirty_page(pl, 0)) {
+ SET(bp->b_flags, B_WASDIRTY);
+ } else {
+ CLR(bp->b_flags, B_WASDIRTY);
+ }
+ } else {
+ CLR(bp->b_flags, (B_DONE | B_CACHE | B_WASDIRTY | B_DELWRI));
+ }
kret = ubc_upl_map(upl, (vm_offset_t*)&(bp->b_datap));
- if (kret != KERN_SUCCESS)
- panic("getblk: ubc_upl_map() failed with (%d)", kret);
+ if (kret != KERN_SUCCESS) {
+ panic("getblk: ubc_upl_map() failed with (%d)", kret);
+ }
}
break;
case BLK_META:
/*
* VM is not involved in IO for the meta data
- * buffer already has valid data
+ * buffer already has valid data
*/
break;
break;
}
- if (clear_bdone)
+ if (clear_bdone) {
CLR(bp->b_flags, B_DONE);
+ }
}
} else { /* not incore() */
int queue = BQ_EMPTY; /* Start with no preference */
-
+
if (ret_only_valid) {
lck_mtx_unlock(buf_mtxp);
- return (NULL);
+ return NULL;
}
- if ((vnode_isreg(vp) == 0) || (UBCINFOEXISTS(vp) == 0) /*|| (vnode_issystem(vp) == 1)*/)
+ if ((vnode_isreg(vp) == 0) || (UBCINFOEXISTS(vp) == 0) /*|| (vnode_issystem(vp) == 1)*/) {
operation = BLK_META;
+ }
- if ((bp = getnewbuf(slpflag, slptimeo, &queue)) == NULL)
+ if ((bp = getnewbuf(slpflag, slptimeo, &queue)) == NULL) {
goto start;
+ }
/*
* getnewbuf may block for a number of different reasons...
* mark the buffer as B_META if indicated
* so that when buffer is released it will goto META queue
*/
- if (operation == BLK_META)
- SET(bp->b_flags, B_META);
+ if (operation == BLK_META) {
+ SET(bp->b_flags, B_META);
+ }
bp->b_blkno = bp->b_lblkno = blkno;
bp->b_vp = vp;
/*
- * Insert in the hash so that incore() can find it
+ * Insert in the hash so that incore() can find it
*/
- binshash(bp, BUFHASH(vp, blkno));
+ binshash(bp, BUFHASH(vp, blkno));
bgetvp_locked(vp, bp);
* in bufstats are protected with either
* buf_mtxp or iobuffer_mtxp
*/
- OSAddAtomicLong(1, &bufstats.bufs_miss);
+ OSAddAtomicLong(1, &bufstats.bufs_miss);
break;
case BLK_WRITE:
*/
upl_flags |= UPL_WILL_MODIFY;
case BLK_READ:
- { off_t f_offset;
- size_t contig_bytes;
- int bmap_flags;
+ { off_t f_offset;
+ size_t contig_bytes;
+ int bmap_flags;
#if DEVELOPMENT || DEBUG
/*
* Apple implemented file systems use UBC excludively; they should
* not call in here."
*/
- const char* excldfs[] = {"hfs", "afpfs", "smbfs", "acfs",
- "exfat", "msdos", "webdav", NULL};
-
- for (int i = 0; excldfs[i] != NULL; i++) {
- if (vp->v_mount &&
- !strcmp(vp->v_mount->mnt_vfsstat.f_fstypename,
- excldfs[i])) {
- panic("%s %s calls buf_getblk",
- excldfs[i],
- operation == BLK_READ ? "BLK_READ" : "BLK_WRITE");
- }
- }
+ const char* excldfs[] = {"hfs", "afpfs", "smbfs", "acfs",
+ "exfat", "msdos", "webdav", NULL};
+
+ for (int i = 0; excldfs[i] != NULL; i++) {
+ if (vp->v_mount &&
+ !strcmp(vp->v_mount->mnt_vfsstat.f_fstypename,
+ excldfs[i])) {
+ panic("%s %s calls buf_getblk",
+ excldfs[i],
+ operation == BLK_READ ? "BLK_READ" : "BLK_WRITE");
+ }
+ }
#endif
- if ( (bp->b_upl) )
- panic("bp already has UPL: %p",bp);
+ if ((bp->b_upl)) {
+ panic("bp already has UPL: %p", bp);
+ }
- f_offset = ubc_blktooff(vp, blkno);
+ f_offset = ubc_blktooff(vp, blkno);
- upl_flags |= UPL_PRECIOUS;
- kret = ubc_create_upl_kernel(vp,
- f_offset,
- bp->b_bufsize,
- &upl,
- &pl,
- upl_flags,
- VM_KERN_MEMORY_FILE);
+ upl_flags |= UPL_PRECIOUS;
+ kret = ubc_create_upl_kernel(vp,
+ f_offset,
+ bp->b_bufsize,
+ &upl,
+ &pl,
+ upl_flags,
+ VM_KERN_MEMORY_FILE);
- if (kret != KERN_SUCCESS)
- panic("Failed to create UPL");
+ if (kret != KERN_SUCCESS) {
+ panic("Failed to create UPL");
+ }
#if UPL_DEBUG
- upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 4);
+ upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 4);
#endif /* UPL_DEBUG */
- bp->b_upl = upl;
-
- if (upl_valid_page(pl, 0)) {
-
- if (operation == BLK_READ)
- bmap_flags = VNODE_READ;
- else
- bmap_flags = VNODE_WRITE;
-
- SET(bp->b_flags, B_CACHE | B_DONE);
-
- OSAddAtomicLong(1, &bufstats.bufs_vmhits);
-
- bp->b_validoff = 0;
- bp->b_dirtyoff = 0;
-
- if (upl_dirty_page(pl, 0)) {
- /* page is dirty */
- SET(bp->b_flags, B_WASDIRTY);
-
- bp->b_validend = bp->b_bcount;
- bp->b_dirtyend = bp->b_bcount;
- } else {
- /* page is clean */
- bp->b_validend = bp->b_bcount;
- bp->b_dirtyend = 0;
- }
- /*
- * try to recreate the physical block number associated with
- * this buffer...
- */
- if (VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))
- panic("getblk: VNOP_BLOCKMAP failed");
- /*
- * if the extent represented by this buffer
- * is not completely physically contiguous on
- * disk, than we can't cache the physical mapping
- * in the buffer header
- */
- if ((long)contig_bytes < bp->b_bcount)
- bp->b_blkno = bp->b_lblkno;
- } else {
- OSAddAtomicLong(1, &bufstats.bufs_miss);
- }
- kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
-
- if (kret != KERN_SUCCESS)
- panic("getblk: ubc_upl_map() failed with (%d)", kret);
- break;
- } // end BLK_READ
+ bp->b_upl = upl;
+
+ if (upl_valid_page(pl, 0)) {
+ if (operation == BLK_READ) {
+ bmap_flags = VNODE_READ;
+ } else {
+ bmap_flags = VNODE_WRITE;
+ }
+
+ SET(bp->b_flags, B_CACHE | B_DONE);
+
+ OSAddAtomicLong(1, &bufstats.bufs_vmhits);
+
+ bp->b_validoff = 0;
+ bp->b_dirtyoff = 0;
+
+ if (upl_dirty_page(pl, 0)) {
+ /* page is dirty */
+ SET(bp->b_flags, B_WASDIRTY);
+
+ bp->b_validend = bp->b_bcount;
+ bp->b_dirtyend = bp->b_bcount;
+ } else {
+ /* page is clean */
+ bp->b_validend = bp->b_bcount;
+ bp->b_dirtyend = 0;
+ }
+ /*
+ * try to recreate the physical block number associated with
+ * this buffer...
+ */
+ if (VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL)) {
+ panic("getblk: VNOP_BLOCKMAP failed");
+ }
+ /*
+ * if the extent represented by this buffer
+ * is not completely physically contiguous on
+ * disk, than we can't cache the physical mapping
+ * in the buffer header
+ */
+ if ((long)contig_bytes < bp->b_bcount) {
+ bp->b_blkno = bp->b_lblkno;
+ }
+ } else {
+ OSAddAtomicLong(1, &bufstats.bufs_miss);
+ }
+ kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
+
+ if (kret != KERN_SUCCESS) {
+ panic("getblk: ubc_upl_map() failed with (%d)", kret);
+ }
+ break;} // end BLK_READ
default:
panic("getblk: paging or unknown operation - %x", operation);
/*NOTREACHED*/
} //end buf_t !incore
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_END,
- bp, bp->b_datap, bp->b_flags, 3, 0);
+ bp, bp->b_datap, bp->b_flags, 3, 0);
#ifdef JOE_DEBUG
(void) OSBacktrace(&bp->b_stackgetblk[0], 6);
#endif
- return (bp);
+ return bp;
}
/*
buf_t
buf_geteblk(int size)
{
- buf_t bp = NULL;
+ buf_t bp = NULL;
int queue = BQ_EMPTY;
do {
bp = getnewbuf(0, 0, &queue);
} while (bp == NULL);
- SET(bp->b_flags, (B_META|B_INVAL));
+ SET(bp->b_flags, (B_META | B_INVAL));
#if DIAGNOSTIC
assert(queue == BQ_EMPTY);
allocbuf(bp, size);
- return (bp);
+ return bp;
}
uint32_t
static void *
recycle_buf_from_pool(int nsize)
{
- buf_t bp;
- void *ptr = NULL;
+ buf_t bp;
+ void *ptr = NULL;
lck_mtx_lock_spin(buf_mtxp);
TAILQ_FOREACH(bp, &bufqueues[BQ_META], b_freelist) {
- if (ISSET(bp->b_flags, B_DELWRI) || bp->b_bufsize != nsize)
+ if (ISSET(bp->b_flags, B_DELWRI) || bp->b_bufsize != nsize) {
continue;
+ }
ptr = (void *)bp->b_datap;
bp->b_bufsize = 0;
}
lck_mtx_unlock(buf_mtxp);
- return (ptr);
+ return ptr;
}
ptr = zalloc_nopagewait(z);
- if (was_vmpriv == TRUE)
+ if (was_vmpriv == TRUE) {
set_vm_privilege(TRUE);
+ }
if (ptr == NULL) {
-
zalloc_nopagewait_failed++;
ptr = recycle_buf_from_pool(nsize);
if (ptr == NULL) {
-
recycle_buf_failed++;
- if (was_vmpriv == FALSE)
+ if (was_vmpriv == FALSE) {
set_vm_privilege(TRUE);
+ }
ptr = zalloc(z);
- if (was_vmpriv == FALSE)
+ if (was_vmpriv == FALSE) {
set_vm_privilege(FALSE);
+ }
}
}
- return (ptr);
+ return ptr;
}
/*
- * With UBC, there is no need to expand / shrink the file data
+ * With UBC, there is no need to expand / shrink the file data
* buffer. The VM uses the same pages, hence no waste.
* All the file data buffers can have one size.
* In fact expand / shrink would be an expensive operation.
desired_size = roundup(size, CLBYTES);
- if (desired_size < PAGE_SIZE)
+ if (desired_size < PAGE_SIZE) {
desired_size = PAGE_SIZE;
- if (desired_size > MAXBSIZE)
+ }
+ if (desired_size > MAXBSIZE) {
panic("allocbuf: buffer larger than MAXBSIZE requested");
+ }
if (ISSET(bp->b_flags, B_META)) {
int nsize = roundup(size, MINMETA);
vm_offset_t elem = (vm_offset_t)bp->b_datap;
if (ISSET(bp->b_flags, B_ZALLOC)) {
- if (bp->b_bufsize < nsize) {
- zone_t zprev;
+ if (bp->b_bufsize < nsize) {
+ zone_t zprev;
- /* reallocate to a bigger size */
+ /* reallocate to a bigger size */
- zprev = getbufzone(bp->b_bufsize);
+ zprev = getbufzone(bp->b_bufsize);
if (nsize <= MAXMETA) {
- desired_size = nsize;
+ desired_size = nsize;
/* b_datap not really a ptr */
*(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize);
} else {
- bp->b_datap = (uintptr_t)NULL;
- kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE);
+ bp->b_datap = (uintptr_t)NULL;
+ kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE);
CLR(bp->b_flags, B_ZALLOC);
}
bcopy((void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize);
- zfree(zprev, (void *)elem);
+ zfree(zprev, elem);
} else {
- desired_size = bp->b_bufsize;
+ desired_size = bp->b_bufsize;
}
-
} else {
if ((vm_size_t)bp->b_bufsize < desired_size) {
/* reallocate to a bigger size */
- bp->b_datap = (uintptr_t)NULL;
+ bp->b_datap = (uintptr_t)NULL;
kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE);
bcopy((const void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize);
- kmem_free(kernel_map, elem, bp->b_bufsize);
+ kmem_free(kernel_map, elem, bp->b_bufsize);
} else {
desired_size = bp->b_bufsize;
}
/* b_datap not really a ptr */
*(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize);
SET(bp->b_flags, B_ZALLOC);
- } else
+ } else {
kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE);
+ }
}
- if (bp->b_datap == 0)
- panic("allocbuf: NULL b_datap");
+ if (bp->b_datap == 0) {
+ panic("allocbuf: NULL b_datap");
+ }
}
bp->b_bufsize = desired_size;
bp->b_bcount = size;
- return (0);
+ return 0;
}
/*
static buf_t
getnewbuf(int slpflag, int slptimeo, int * queue)
{
- buf_t bp;
- buf_t lru_bp;
- buf_t age_bp;
- buf_t meta_bp;
- int age_time, lru_time, bp_time, meta_time;
- int req = *queue; /* save it for restarts */
+ buf_t bp;
+ buf_t lru_bp;
+ buf_t age_bp;
+ buf_t meta_bp;
+ int age_time, lru_time, bp_time, meta_time;
+ int req = *queue; /* save it for restarts */
struct timespec ts;
start:
* invalid request gets empty queue
*/
if ((*queue >= BQUEUES) || (*queue < 0)
- || (*queue == BQ_LAUNDRY) || (*queue == BQ_LOCKED))
+ || (*queue == BQ_LAUNDRY) || (*queue == BQ_LOCKED)) {
*queue = BQ_EMPTY;
+ }
- if (*queue == BQ_EMPTY && (bp = bufqueues[*queue].tqh_first))
- goto found;
+ if (*queue == BQ_EMPTY && (bp = bufqueues[*queue].tqh_first)) {
+ goto found;
+ }
/*
* need to grow number of bufs, add another one rather than recycling
*/
if (nbuf_headers < max_nbuf_headers) {
/*
- * Increment count now as lock
+ * Increment count now as lock
* is dropped for allocation.
* That avoids over commits
*/
}
/* Try for the requested queue first */
bp = bufqueues[*queue].tqh_first;
- if (bp)
- goto found;
+ if (bp) {
+ goto found;
+ }
/* Unable to use requested queue */
age_bp = bufqueues[BQ_AGE].tqh_first;
}
/*
* We have seen is this is hard to trigger.
- * This is an overcommit of nbufs but needed
+ * This is an overcommit of nbufs but needed
* in some scenarios with diskiamges
*/
/* Create a new temporary buffer header */
bp = (struct buf *)zalloc(buf_hdr_zone);
-
+
if (bp) {
bufhdrinit(bp);
bp->b_whichq = BQ_EMPTY;
/* wait for a free buffer of any kind */
needbuffer = 1;
/* hz value is 100 */
- ts.tv_sec = (slptimeo/1000);
+ ts.tv_sec = (slptimeo / 1000);
/* the hz value is 100; which leads to 10ms */
ts.tv_nsec = (slptimeo % 1000) * NSEC_PER_USEC * 1000 * 10;
- msleep(&needbuffer, buf_mtxp, slpflag | PDROP | (PRIBIO+1), "getnewbuf", &ts);
- return (NULL);
+ msleep(&needbuffer, buf_mtxp, slpflag | PDROP | (PRIBIO + 1), "getnewbuf", &ts);
+ return NULL;
}
/* Buffer available either on AGE or LRU or META */
bp = age_bp;
*queue = BQ_AGE;
} else { /* buffer available on both AGE and LRU */
- int t = buf_timestamp();
+ int t = buf_timestamp();
age_time = t - age_bp->b_timestamp;
lru_time = t - lru_bp->b_timestamp;
if (!bp) { /* Neither on AGE nor on LRU */
bp = meta_bp;
*queue = BQ_META;
- } else if (meta_bp) {
- int t = buf_timestamp();
+ } else if (meta_bp) {
+ int t = buf_timestamp();
bp_time = t - bp->b_timestamp;
meta_time = t - meta_bp->b_timestamp;
if (!(bp_time < 0) && !(meta_time < 0)) {
/* time not set backwards */
int bp_is_stale;
- bp_is_stale = (*queue == BQ_LRU) ?
- lru_is_stale : age_is_stale;
+ bp_is_stale = (*queue == BQ_LRU) ?
+ lru_is_stale : age_is_stale;
- if ((meta_time >= meta_is_stale) &&
- (bp_time < bp_is_stale)) {
+ if ((meta_time >= meta_is_stale) &&
+ (bp_time < bp_is_stale)) {
bp = meta_bp;
*queue = BQ_META;
}
}
}
found:
- if (ISSET(bp->b_flags, B_LOCKED) || ISSET(bp->b_lflags, BL_BUSY))
- panic("getnewbuf: bp @ %p is LOCKED or BUSY! (flags 0x%x)\n", bp, bp->b_flags);
+ if (ISSET(bp->b_flags, B_LOCKED) || ISSET(bp->b_lflags, BL_BUSY)) {
+ panic("getnewbuf: bp @ %p is LOCKED or BUSY! (flags 0x%x)\n", bp, bp->b_flags);
+ }
/* Clean it */
if (bcleanbuf(bp, FALSE)) {
*queue = req;
goto start;
}
- return (bp);
+ return bp;
}
-/*
+/*
* Clean a buffer.
* Returns 0 if buffer is ready to use,
- * Returns 1 if issued a buf_bawrite() to indicate
+ * Returns 1 if issued a buf_bawrite() to indicate
* that the buffer is not ready.
- *
+ *
* buf_mtxp is held upon entry
* returns with buf_mtxp locked
*/
lck_mtx_lock_spin(buf_mtxp);
- return (1);
+ return 1;
}
#ifdef JOE_DEBUG
bp->b_owner = current_thread();
*/
SET(bp->b_lflags, BL_BUSY);
buf_busycount++;
-
+
bremhash(bp);
/*
* disassociate us from our vnode, if we had one...
*/
- if (bp->b_vp)
+ if (bp->b_vp) {
brelvp_locked(bp);
+ }
lck_mtx_unlock(buf_mtxp);
BLISTNONE(bp);
- if (ISSET(bp->b_flags, B_META))
+ if (ISSET(bp->b_flags, B_META)) {
buf_free_meta_store(bp);
+ }
trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
buf_release_credentials(bp);
-
+
/* If discarding, just move to the empty queue */
if (discard) {
lck_mtx_lock_spin(buf_mtxp);
lck_mtx_lock_spin(buf_mtxp);
}
- return (0);
+ return 0;
}
errno_t
buf_invalblkno(vnode_t vp, daddr64_t lblkno, int flags)
{
- buf_t bp;
- errno_t error;
+ buf_t bp;
+ errno_t error;
struct bufhashhdr *dp;
dp = BUFHASH(vp, lblkno);
-relook:
+relook:
lck_mtx_lock_spin(buf_mtxp);
if ((bp = incore_locked(vp, lblkno, dp)) == (struct buf *)0) {
- lck_mtx_unlock(buf_mtxp);
- return (0);
+ lck_mtx_unlock(buf_mtxp);
+ return 0;
}
if (ISSET(bp->b_lflags, BL_BUSY)) {
- if ( !ISSET(flags, BUF_WAIT)) {
- lck_mtx_unlock(buf_mtxp);
- return (EBUSY);
+ if (!ISSET(flags, BUF_WAIT)) {
+ lck_mtx_unlock(buf_mtxp);
+ return EBUSY;
}
- SET(bp->b_lflags, BL_WANTED);
+ SET(bp->b_lflags, BL_WANTED);
error = msleep((caddr_t)bp, buf_mtxp, PDROP | (PRIBIO + 1), "buf_invalblkno", NULL);
if (error) {
- return (error);
+ return error;
}
goto relook;
}
lck_mtx_unlock(buf_mtxp);
buf_brelse(bp);
- return (0);
+ return 0;
}
void
buf_drop(buf_t bp)
{
- int need_wakeup = 0;
+ int need_wakeup = 0;
lck_mtx_lock_spin(buf_mtxp);
if (ISSET(bp->b_lflags, BL_WANTED)) {
- /*
+ /*
* delay the actual wakeup until after we
* clear BL_BUSY and we've dropped buf_mtxp
*/
lck_mtx_unlock(buf_mtxp);
if (need_wakeup) {
- /*
+ /*
* Wake up any proceeses waiting for _this_ buffer to become free.
*/
- wakeup(bp);
+ wakeup(bp);
}
}
errno_t
-buf_acquire(buf_t bp, int flags, int slpflag, int slptimeo) {
- errno_t error;
+buf_acquire(buf_t bp, int flags, int slpflag, int slptimeo)
+{
+ errno_t error;
- lck_mtx_lock_spin(buf_mtxp);
+ lck_mtx_lock_spin(buf_mtxp);
error = buf_acquire_locked(bp, flags, slpflag, slptimeo);
- lck_mtx_unlock(buf_mtxp);
+ lck_mtx_unlock(buf_mtxp);
- return (error);
+ return error;
}
struct timespec ts;
if (ISSET(bp->b_flags, B_LOCKED)) {
- if ((flags & BAC_SKIP_LOCKED))
- return (EDEADLK);
+ if ((flags & BAC_SKIP_LOCKED)) {
+ return EDEADLK;
+ }
} else {
- if ((flags & BAC_SKIP_NONLOCKED))
- return (EDEADLK);
+ if ((flags & BAC_SKIP_NONLOCKED)) {
+ return EDEADLK;
+ }
}
- if (ISSET(bp->b_lflags, BL_BUSY)) {
- /*
+ if (ISSET(bp->b_lflags, BL_BUSY)) {
+ /*
* since the lck_mtx_lock may block, the buffer
* may become BUSY, so we need to
* recheck for a NOWAIT request
*/
- if (flags & BAC_NOWAIT)
- return (EBUSY);
- SET(bp->b_lflags, BL_WANTED);
+ if (flags & BAC_NOWAIT) {
+ return EBUSY;
+ }
+ SET(bp->b_lflags, BL_WANTED);
/* the hz value is 100; which leads to 10ms */
- ts.tv_sec = (slptimeo/100);
+ ts.tv_sec = (slptimeo / 100);
ts.tv_nsec = (slptimeo % 100) * 10 * NSEC_PER_USEC * 1000;
error = msleep((caddr_t)bp, buf_mtxp, slpflag | (PRIBIO + 1), "buf_acquire", &ts);
- if (error)
- return (error);
- return (EAGAIN);
+ if (error) {
+ return error;
+ }
+ return EAGAIN;
+ }
+ if (flags & BAC_REMOVE) {
+ bremfree_locked(bp);
}
- if (flags & BAC_REMOVE)
- bremfree_locked(bp);
SET(bp->b_lflags, BL_BUSY);
buf_busycount++;
bp->b_owner = current_thread();
bp->b_tag = 5;
#endif
- return (0);
+ return 0;
}
buf_biowait(buf_t bp)
{
while (!ISSET(bp->b_flags, B_DONE)) {
-
lck_mtx_lock_spin(buf_mtxp);
if (!ISSET(bp->b_flags, B_DONE)) {
DTRACE_IO1(wait__start, buf_t, bp);
- (void) msleep(bp, buf_mtxp, PDROP | (PRIBIO+1), "buf_biowait", NULL);
+ (void) msleep(bp, buf_mtxp, PDROP | (PRIBIO + 1), "buf_biowait", NULL);
DTRACE_IO1(wait__done, buf_t, bp);
- } else
+ } else {
lck_mtx_unlock(buf_mtxp);
+ }
}
/* check for interruption of I/O (e.g. via NFS), then errors. */
if (ISSET(bp->b_flags, B_EINTR)) {
CLR(bp->b_flags, B_EINTR);
- return (EINTR);
- } else if (ISSET(bp->b_flags, B_ERROR))
- return (bp->b_error ? bp->b_error : EIO);
- else
- return (0);
+ return EINTR;
+ } else if (ISSET(bp->b_flags, B_ERROR)) {
+ return bp->b_error ? bp->b_error : EIO;
+ } else {
+ return 0;
+ }
}
struct bufattr *bap;
struct timeval real_elapsed;
uint64_t real_elapsed_usec = 0;
-
+
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_START,
- bp, bp->b_datap, bp->b_flags, 0, 0);
+ bp, bp->b_datap, bp->b_flags, 0, 0);
- if (ISSET(bp->b_flags, B_DONE))
+ if (ISSET(bp->b_flags, B_DONE)) {
panic("biodone already");
+ }
bap = &bp->b_attr;
} else {
mp = NULL;
}
-
+
if (ISSET(bp->b_flags, B_ERROR)) {
if (mp && (MNT_ROOTFS & mp->mnt_flag)) {
dk_error_description_t desc;
int code = DKIO_DONE;
int io_tier = GET_BUFATTR_IO_TIER(bap);
- if (bp->b_flags & B_READ)
- code |= DKIO_READ;
- if (bp->b_flags & B_ASYNC)
- code |= DKIO_ASYNC;
+ if (bp->b_flags & B_READ) {
+ code |= DKIO_READ;
+ }
+ if (bp->b_flags & B_ASYNC) {
+ code |= DKIO_ASYNC;
+ }
- if (bp->b_flags & B_META)
- code |= DKIO_META;
- else if (bp->b_flags & B_PAGEIO)
- code |= DKIO_PAGING;
+ if (bp->b_flags & B_META) {
+ code |= DKIO_META;
+ } else if (bp->b_flags & B_PAGEIO) {
+ code |= DKIO_PAGING;
+ }
- if (io_tier != 0)
+ if (io_tier != 0) {
code |= DKIO_THROTTLE;
+ }
code |= ((io_tier << DKIO_TIER_SHIFT) & DKIO_TIER_MASK);
- if (bp->b_flags & B_PASSIVE)
+ if (bp->b_flags & B_PASSIVE) {
code |= DKIO_PASSIVE;
+ }
- if (bap->ba_flags & BA_NOCACHE)
+ if (bap->ba_flags & BA_NOCACHE) {
code |= DKIO_NOCACHE;
+ }
if (bap->ba_flags & BA_IO_TIER_UPGRADE) {
code |= DKIO_TIER_UPGRADE;
}
KDBG_RELEASE_NOPROCFILT(FSDBG_CODE(DBG_DKRW, code),
- buf_kernel_addrperm_addr(bp),
- (uintptr_t)VM_KERNEL_ADDRPERM(bp->b_vp), bp->b_resid,
- bp->b_error);
+ buf_kernel_addrperm_addr(bp),
+ (uintptr_t)VM_KERNEL_ADDRPERM(bp->b_vp), bp->b_resid,
+ bp->b_error);
}
microuptime(&real_elapsed);
DTRACE_IO1(done, buf_t, bp);
- if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW))
- /*
+ if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW)) {
+ /*
* wake up any writer's blocked
* on throttle or waiting for I/O
* to drain
*/
vnode_writedone(bp->b_vp);
+ }
- if (ISSET(bp->b_flags, (B_CALL | B_FILTER))) { /* if necessary, call out */
- void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
- void *arg = bp->b_transaction;
+ if (ISSET(bp->b_flags, (B_CALL | B_FILTER))) { /* if necessary, call out */
+ void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
+ void *arg = bp->b_transaction;
int callout = ISSET(bp->b_flags, B_CALL);
- if (iodone_func == NULL)
- panic("biodone: bp @ %p has NULL b_iodone!\n", bp);
+ if (iodone_func == NULL) {
+ panic("biodone: bp @ %p has NULL b_iodone!\n", bp);
+ }
- CLR(bp->b_flags, (B_CALL | B_FILTER)); /* filters and callouts are one-shot */
+ CLR(bp->b_flags, (B_CALL | B_FILTER)); /* filters and callouts are one-shot */
bp->b_iodone = NULL;
bp->b_transaction = NULL;
- if (callout)
- SET(bp->b_flags, B_DONE); /* note that it's done */
-
+ if (callout) {
+ SET(bp->b_flags, B_DONE); /* note that it's done */
+ }
(*iodone_func)(bp, arg);
if (callout) {
* by the HFS journaling code
*/
}
- if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release it */
- SET(bp->b_flags, B_DONE); /* note that it's done */
+ if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release it */
+ SET(bp->b_flags, B_DONE); /* note that it's done */
buf_brelse(bp);
- } else { /* or just wakeup the buffer */
- /*
+ } else { /* or just wakeup the buffer */
+ /*
* by taking the mutex, we serialize
* the buf owner calling buf_biowait so that we'll
* only see him in one of 2 states...
* they do get to run, their going to re-set
* BL_WANTED and go back to sleep
*/
- lck_mtx_lock_spin(buf_mtxp);
+ lck_mtx_lock_spin(buf_mtxp);
CLR(bp->b_lflags, BL_WANTED);
- SET(bp->b_flags, B_DONE); /* note that it's done */
+ SET(bp->b_flags, B_DONE); /* note that it's done */
- lck_mtx_unlock(buf_mtxp);
+ lck_mtx_unlock(buf_mtxp);
wakeup(bp);
}
biodone_done:
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_END,
- (uintptr_t)bp, (uintptr_t)bp->b_datap, bp->b_flags, 0, 0);
+ (uintptr_t)bp, (uintptr_t)bp->b_datap, bp->b_flags, 0, 0);
}
/*
vm_offset_t
buf_kernel_addrperm_addr(void * addr)
{
- if ((vm_offset_t)addr == 0)
+ if ((vm_offset_t)addr == 0) {
return 0;
- else
- return ((vm_offset_t)addr + buf_kernel_addrperm);
+ } else {
+ return (vm_offset_t)addr + buf_kernel_addrperm;
+ }
}
/*
int
count_lock_queue(void)
{
- buf_t bp;
- int n = 0;
+ buf_t bp;
+ int n = 0;
lck_mtx_lock_spin(buf_mtxp);
for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
- bp = bp->b_freelist.tqe_next)
+ bp = bp->b_freelist.tqe_next) {
n++;
+ }
lck_mtx_unlock(buf_mtxp);
- return (n);
+ return n;
}
/*
int i, j, count;
struct buf *bp;
struct bqueues *dp;
- int counts[MAXBSIZE/CLBYTES+1];
+ int counts[MAXBSIZE / CLBYTES + 1];
static char *bname[BQUEUES] =
- { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" };
+ { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" };
for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
count = 0;
- for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
+ for (j = 0; j <= MAXBSIZE / CLBYTES; j++) {
counts[j] = 0;
+ }
lck_mtx_lock(buf_mtxp);
for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
- counts[bp->b_bufsize/CLBYTES]++;
+ counts[bp->b_bufsize / CLBYTES]++;
count++;
}
lck_mtx_unlock(buf_mtxp);
printf("%s: total-%d", bname[i], count);
- for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
- if (counts[j] != 0)
+ for (j = 0; j <= MAXBSIZE / CLBYTES; j++) {
+ if (counts[j] != 0) {
printf(", %d-%d", j * CLBYTES, counts[j]);
+ }
+ }
printf("\n");
}
}
#endif /* DIAGNOSTIC */
-#define NRESERVEDIOBUFS 128
+#define NRESERVEDIOBUFS 128
#define MNT_VIRTUALDEV_MAX_IOBUFS 16
#define VIRTUALDEV_MAX_IOBUFS ((40*niobuf_headers)/100)
buf_t
alloc_io_buf(vnode_t vp, int priv)
{
- buf_t bp;
+ buf_t bp;
mount_t mp = NULL;
int alloc_for_virtualdev = FALSE;
need_iobuffer = 1;
(void)msleep(&need_iobuffer, iobuffer_mtxp,
- PSPIN | (PRIBIO+1), (const char *)"alloc_io_buf (1)",
+ PSPIN | (PRIBIO + 1), (const char *)"alloc_io_buf (1)",
NULL);
}
}
- while (((niobuf_headers - NRESERVEDIOBUFS < bufstats.bufs_iobufinuse) && !priv) ||
- (bp = iobufqueue.tqh_first) == NULL) {
+ while (((niobuf_headers - NRESERVEDIOBUFS < bufstats.bufs_iobufinuse) && !priv) ||
+ (bp = iobufqueue.tqh_first) == NULL) {
bufstats.bufs_iobufsleeps++;
need_iobuffer = 1;
- (void)msleep(&need_iobuffer, iobuffer_mtxp, PSPIN | (PRIBIO+1),
+ (void)msleep(&need_iobuffer, iobuffer_mtxp, PSPIN | (PRIBIO + 1),
(const char *)"alloc_io_buf (2)", NULL);
}
TAILQ_REMOVE(&iobufqueue, bp, b_freelist);
bufstats.bufs_iobufinuse++;
- if (bufstats.bufs_iobufinuse > bufstats.bufs_iobufmax)
+ if (bufstats.bufs_iobufinuse > bufstats.bufs_iobufmax) {
bufstats.bufs_iobufmax = bufstats.bufs_iobufinuse;
+ }
if (alloc_for_virtualdev) {
mp->mnt_iobufinuse++;
/*
* initialize various fields
* we don't need to hold the mutex since the buffer
- * is now private... the vp should have a reference
+ * is now private... the vp should have a reference
* on it and is not protected by this mutex in any event
*/
- bp->b_timestamp = 0;
+ bp->b_timestamp = 0;
bp->b_proc = NULL;
bp->b_datap = 0;
bp->b_flags = 0;
bp->b_lflags = BL_BUSY | BL_IOBUF;
- if (alloc_for_virtualdev)
+ if (alloc_for_virtualdev) {
bp->b_lflags |= BL_IOBUF_VDEV;
+ }
bp->b_redundancy_flags = 0;
bp->b_blkno = bp->b_lblkno = 0;
#ifdef JOE_DEBUG
bp->b_vp = vp;
bzero(&bp->b_attr, sizeof(struct bufattr));
- if (vp && (vp->v_type == VBLK || vp->v_type == VCHR))
+ if (vp && (vp->v_type == VBLK || vp->v_type == VCHR)) {
bp->b_dev = vp->v_rdev;
- else
+ } else {
bp->b_dev = NODEV;
+ }
- return (bp);
+ return bp;
}
/* Was this iobuf for a diskimage ? */
if (bp->b_lflags & BL_IOBUF_VDEV) {
free_for_virtualdev = TRUE;
- if (bp->b_vp)
+ if (bp->b_vp) {
mp = bp->b_vp->v_mount;
+ }
}
/*
bp->b_flags = B_INVAL;
/* Zero out the bufattr and its flags before relinquishing this iobuf */
- bzero (&bp->b_attr, sizeof(struct bufattr));
-
+ bzero(&bp->b_attr, sizeof(struct bufattr));
+
lck_mtx_lock_spin(iobuffer_mtxp);
binsheadfree(bp, &iobufqueue, -1);
if (need_iobuffer) {
- /*
+ /*
* Wake up any processes waiting because they need an io buffer
*
* do the wakeup after we drop the mutex... it's possible that the
need_iobuffer = 0;
need_wakeup = 1;
}
- if (bufstats.bufs_iobufinuse <= 0)
+ if (bufstats.bufs_iobufinuse <= 0) {
panic("free_io_buf: bp(%p) - bufstats.bufs_iobufinuse < 0", bp);
+ }
bufstats.bufs_iobufinuse--;
if (free_for_virtualdev) {
bufstats.bufs_iobufinuse_vdev--;
- if (mp && mp != dead_mountp)
+ if (mp && mp != dead_mountp) {
mp->mnt_iobufinuse--;
+ }
}
lck_mtx_unlock(iobuffer_mtxp);
- if (need_wakeup)
- wakeup(&need_iobuffer);
+ if (need_wakeup) {
+ wakeup(&need_iobuffer);
+ }
}
void
buf_list_lock(void)
{
- lck_mtx_lock_spin(buf_mtxp);
+ lck_mtx_lock_spin(buf_mtxp);
}
void
buf_list_unlock(void)
{
- lck_mtx_unlock(buf_mtxp);
+ lck_mtx_unlock(buf_mtxp);
}
/*
static void
bcleanbuf_thread_init(void)
{
- thread_t thread = THREAD_NULL;
+ thread_t thread = THREAD_NULL;
/* create worker thread */
kernel_thread_start((thread_continue_t)bcleanbuf_thread, NULL, &thread);
int loopcnt = 0;
for (;;) {
- lck_mtx_lock_spin(buf_mtxp);
+ lck_mtx_lock_spin(buf_mtxp);
- while ( (bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY])) == NULL) {
- (void)msleep0(&bufqueues[BQ_LAUNDRY], buf_mtxp, PRIBIO|PDROP, "blaundry", 0, (bcleanbufcontinuation)bcleanbuf_thread);
+ while ((bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY])) == NULL) {
+ (void)msleep0(&bufqueues[BQ_LAUNDRY], buf_mtxp, PRIBIO | PDROP, "blaundry", 0, (bcleanbufcontinuation)bcleanbuf_thread);
}
-
+
/*
* Remove from the queue
*/
error = bawrite_internal(bp, 0);
if (error) {
- bp->b_whichq = BQ_LAUNDRY;
+ bp->b_whichq = BQ_LAUNDRY;
bp->b_timestamp = buf_timestamp();
- lck_mtx_lock_spin(buf_mtxp);
+ lck_mtx_lock_spin(buf_mtxp);
binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
blaundrycnt++;
#endif
lck_mtx_unlock(buf_mtxp);
-
+
if (loopcnt > MAXLAUNDRY) {
/*
* bawrite_internal() can return errors if we're throttled. If we've
static int
brecover_data(buf_t bp)
{
- int upl_offset;
- upl_t upl;
+ int upl_offset;
+ upl_t upl;
upl_page_info_t *pl;
kern_return_t kret;
- vnode_t vp = bp->b_vp;
+ vnode_t vp = bp->b_vp;
int upl_flags;
- if ( !UBCINFOEXISTS(vp) || bp->b_bufsize == 0)
- goto dump_buffer;
+ if (!UBCINFOEXISTS(vp) || bp->b_bufsize == 0) {
+ goto dump_buffer;
+ }
upl_flags = UPL_PRECIOUS;
- if (! (buf_flags(bp) & B_READ)) {
+ if (!(buf_flags(bp) & B_READ)) {
/*
* "write" operation: let the UPL subsystem know
* that we intend to modify the buffer cache pages we're
*/
upl_flags |= UPL_WILL_MODIFY;
}
-
+
kret = ubc_create_upl_kernel(vp,
- ubc_blktooff(vp, bp->b_lblkno),
- bp->b_bufsize,
- &upl,
- &pl,
- upl_flags,
- VM_KERN_MEMORY_FILE);
- if (kret != KERN_SUCCESS)
- panic("Failed to create UPL");
+ ubc_blktooff(vp, bp->b_lblkno),
+ bp->b_bufsize,
+ &upl,
+ &pl,
+ upl_flags,
+ VM_KERN_MEMORY_FILE);
+ if (kret != KERN_SUCCESS) {
+ panic("Failed to create UPL");
+ }
for (upl_offset = 0; upl_offset < bp->b_bufsize; upl_offset += PAGE_SIZE) {
-
- if (!upl_valid_page(pl, upl_offset / PAGE_SIZE) || !upl_dirty_page(pl, upl_offset / PAGE_SIZE)) {
- ubc_upl_abort(upl, 0);
+ if (!upl_valid_page(pl, upl_offset / PAGE_SIZE) || !upl_dirty_page(pl, upl_offset / PAGE_SIZE)) {
+ ubc_upl_abort(upl, 0);
goto dump_buffer;
}
}
bp->b_upl = upl;
-
+
kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
- if (kret != KERN_SUCCESS)
- panic("getblk: ubc_upl_map() failed with (%d)", kret);
- return (1);
+ if (kret != KERN_SUCCESS) {
+ panic("getblk: ubc_upl_map() failed with (%d)", kret);
+ }
+ return 1;
dump_buffer:
bp->b_bufsize = 0;
SET(bp->b_flags, B_INVAL);
buf_brelse(bp);
- return(0);
+ return 0;
}
int
fs_buffer_cache_gc_dispatch_callouts(int all)
{
lck_mtx_lock(buf_gc_callout);
- for(int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) {
+ for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) {
if (fs_callouts[i].callout != NULL) {
fs_callouts[i].callout(all, fs_callouts[i].context);
}
lck_mtx_unlock(buf_gc_callout);
}
-static boolean_t
+static boolean_t
buffer_cache_gc(int all)
{
buf_t bp;
struct bqueues privq;
int thresh_hold = BUF_STALE_THRESHHOLD;
- if (all)
+ if (all) {
thresh_hold = 0;
- /*
+ }
+ /*
* We only care about metadata (incore storage comes from zalloc()).
* Unless "all" is set (used to evict meta data buffers in preparation
* for deep sleep), we only evict up to BUF_MAX_GC_BATCH_SIZE buffers
TAILQ_INIT(&privq);
need_wakeup = FALSE;
- while (((bp = TAILQ_FIRST(&bufqueues[BQ_META]))) &&
- (now > bp->b_timestamp) &&
- (now - bp->b_timestamp > thresh_hold) &&
- (found < BUF_MAX_GC_BATCH_SIZE)) {
-
+ while (((bp = TAILQ_FIRST(&bufqueues[BQ_META]))) &&
+ (now > bp->b_timestamp) &&
+ (now - bp->b_timestamp > thresh_hold) &&
+ (found < BUF_MAX_GC_BATCH_SIZE)) {
/* Remove from free list */
bremfree_locked(bp);
found++;
continue;
}
- /*
- * Mark busy and put on private list. We could technically get
+ /*
+ * Mark busy and put on private list. We could technically get
* away without setting BL_BUSY here.
*/
SET(bp->b_lflags, BL_BUSY);
buf_busycount++;
- /*
+ /*
* Remove from hash and dissociate from vp.
*/
bremhash(bp);
/* Take note if we've definitely freed at least a page to a zone */
if ((ISSET(bp->b_flags, B_ZALLOC)) && (buf_size(bp) >= PAGE_SIZE)) {
did_large_zfree = TRUE;
- }
+ }
trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
buf_release_credentials(bp);
/* Prepare for moving to empty queue */
- CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED
- | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
+ CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED
+ | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
bp->b_whichq = BQ_EMPTY;
BLISTNONE(bp);
}
/* And do a big bulk move to the empty queue */
TAILQ_CONCAT(&bufqueues[BQ_EMPTY], &privq, b_freelist);
-
} while (all && (found == BUF_MAX_GC_BATCH_SIZE));
lck_mtx_unlock(buf_mtxp);
static int
bp_cmp(void *a, void *b)
{
- buf_t *bp_a = *(buf_t **)a,
- *bp_b = *(buf_t **)b;
- daddr64_t res;
+ buf_t *bp_a = *(buf_t **)a,
+ *bp_b = *(buf_t **)b;
+ daddr64_t res;
- // don't have to worry about negative block
- // numbers so this is ok to do.
- //
- res = (bp_a->b_blkno - bp_b->b_blkno);
+ // don't have to worry about negative block
+ // numbers so this is ok to do.
+ //
+ res = (bp_a->b_blkno - bp_b->b_blkno);
- return (int)res;
+ return (int)res;
}
int
bflushq(int whichq, mount_t mp)
{
- buf_t bp, next;
- int i, buf_count;
- int total_writes = 0;
+ buf_t bp, next;
+ int i, buf_count;
+ int total_writes = 0;
static buf_t flush_table[NFLUSH];
if (whichq < 0 || whichq >= BQUEUES) {
- return (0);
+ return 0;
}
- restart:
+restart:
lck_mtx_lock(buf_mtxp);
bp = TAILQ_FIRST(&bufqueues[whichq]);
for (buf_count = 0; bp; bp = next) {
- next = bp->b_freelist.tqe_next;
-
- if (bp->b_vp == NULL || bp->b_vp->v_mount != mp) {
- continue;
- }
+ next = bp->b_freelist.tqe_next;
- if (ISSET(bp->b_flags, B_DELWRI) && !ISSET(bp->b_lflags, BL_BUSY)) {
+ if (bp->b_vp == NULL || bp->b_vp->v_mount != mp) {
+ continue;
+ }
- bremfree_locked(bp);
+ if (ISSET(bp->b_flags, B_DELWRI) && !ISSET(bp->b_lflags, BL_BUSY)) {
+ bremfree_locked(bp);
#ifdef JOE_DEBUG
- bp->b_owner = current_thread();
- bp->b_tag = 7;
+ bp->b_owner = current_thread();
+ bp->b_tag = 7;
#endif
- SET(bp->b_lflags, BL_BUSY);
- buf_busycount++;
+ SET(bp->b_lflags, BL_BUSY);
+ buf_busycount++;
- flush_table[buf_count] = bp;
- buf_count++;
- total_writes++;
+ flush_table[buf_count] = bp;
+ buf_count++;
+ total_writes++;
- if (buf_count >= NFLUSH) {
- lck_mtx_unlock(buf_mtxp);
+ if (buf_count >= NFLUSH) {
+ lck_mtx_unlock(buf_mtxp);
- qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
+ qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
- for (i = 0; i < buf_count; i++) {
- buf_bawrite(flush_table[i]);
- }
- goto restart;
+ for (i = 0; i < buf_count; i++) {
+ buf_bawrite(flush_table[i]);
+ }
+ goto restart;
+ }
}
- }
}
lck_mtx_unlock(buf_mtxp);
if (buf_count > 0) {
- qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
+ qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
- for (i = 0; i < buf_count; i++) {
- buf_bawrite(flush_table[i]);
- }
+ for (i = 0; i < buf_count; i++) {
+ buf_bawrite(flush_table[i]);
+ }
}
- return (total_writes);
+ return total_writes;
}
#endif