- bp->b_upl = upl;
-
- if (upl_valid_page(pl, 0)) {
-
- if (operation == BLK_READ)
- bmap_flags = VNODE_READ;
- else
- bmap_flags = VNODE_WRITE;
-
- SET(bp->b_flags, B_CACHE | B_DONE);
-
- OSAddAtomicLong(1, &bufstats.bufs_vmhits);
-
- bp->b_validoff = 0;
- bp->b_dirtyoff = 0;
-
- if (upl_dirty_page(pl, 0)) {
- /* page is dirty */
- SET(bp->b_flags, B_WASDIRTY);
-
- bp->b_validend = bp->b_bcount;
- bp->b_dirtyend = bp->b_bcount;
- } else {
- /* page is clean */
- bp->b_validend = bp->b_bcount;
- bp->b_dirtyend = 0;
- }
- /*
- * try to recreate the physical block number associated with
- * this buffer...
- */
- if (VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))
- panic("getblk: VNOP_BLOCKMAP failed");
- /*
- * if the extent represented by this buffer
- * is not completely physically contiguous on
- * disk, than we can't cache the physical mapping
- * in the buffer header
- */
- if ((long)contig_bytes < bp->b_bcount)
- bp->b_blkno = bp->b_lblkno;
- } else {
- OSAddAtomicLong(1, &bufstats.bufs_miss);
- }
- kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
-
- if (kret != KERN_SUCCESS)
- panic("getblk: ubc_upl_map() failed with (%d)", kret);
- break;
- }
+ bp->b_upl = upl;
+
+ if (upl_valid_page(pl, 0)) {
+ if (operation == BLK_READ) {
+ bmap_flags = VNODE_READ;
+ } else {
+ bmap_flags = VNODE_WRITE;
+ }
+
+ SET(bp->b_flags, B_CACHE | B_DONE);
+
+ OSAddAtomicLong(1, &bufstats.bufs_vmhits);
+
+ bp->b_validoff = 0;
+ bp->b_dirtyoff = 0;
+
+ if (upl_dirty_page(pl, 0)) {
+ /* page is dirty */
+ SET(bp->b_flags, B_WASDIRTY);
+
+ bp->b_validend = bp->b_bcount;
+ bp->b_dirtyend = bp->b_bcount;
+ } else {
+ /* page is clean */
+ bp->b_validend = bp->b_bcount;
+ bp->b_dirtyend = 0;
+ }
+ /*
+ * try to recreate the physical block number associated with
+ * this buffer...
+ */
+ if (VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL)) {
+ panic("getblk: VNOP_BLOCKMAP failed");
+ }
+ /*
+ * if the extent represented by this buffer
+ * is not completely physically contiguous on
+ * disk, than we can't cache the physical mapping
+ * in the buffer header
+ */
+ if ((long)contig_bytes < bp->b_bcount) {
+ bp->b_blkno = bp->b_lblkno;
+ }
+ } else {
+ OSAddAtomicLong(1, &bufstats.bufs_miss);
+ }
+ kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
+
+ if (kret != KERN_SUCCESS) {
+ panic("getblk: ubc_upl_map() failed with (%d)", kret);
+ }
+ break;} // end BLK_READ