+ return bawrite_internal(bp, 1);
+}
+
+
+
+static void
+buf_free_meta_store(buf_t bp)
+{
+ if (bp->b_bufsize) {
+ uintptr_t datap = bp->b_datap;
+ int bufsize = bp->b_bufsize;
+
+ bp->b_datap = (uintptr_t)NULL;
+ bp->b_bufsize = 0;
+
+ /*
+ * Ensure the assignment of b_datap has global visibility
+ * before we free the region.
+ */
+ OSMemoryBarrier();
+
+ if (ISSET(bp->b_flags, B_ZALLOC)) {
+ kheap_free(KHEAP_VFS_BIO, datap, bufsize);
+ } else {
+ kmem_free(kernel_map, datap, bufsize);
+ }
+ }
+}
+
+
+static buf_t
+buf_brelse_shadow(buf_t bp)
+{
+ buf_t bp_head;
+ buf_t bp_temp;
+ buf_t bp_return = NULL;
+#ifdef BUF_MAKE_PRIVATE
+ buf_t bp_data;
+ int data_ref = 0;
+#endif
+ int need_wakeup = 0;
+
+ lck_mtx_lock_spin(&buf_mtx);
+
+ __IGNORE_WCASTALIGN(bp_head = (buf_t)bp->b_orig);
+
+ if (bp_head->b_whichq != -1) {
+ panic("buf_brelse_shadow: bp_head on freelist %d\n", bp_head->b_whichq);
+ }
+
+#ifdef BUF_MAKE_PRIVATE
+ if (bp_data = bp->b_data_store) {
+ bp_data->b_data_ref--;
+ /*
+ * snapshot the ref count so that we can check it
+ * outside of the lock... we only want the guy going
+ * from 1 -> 0 to try and release the storage
+ */
+ data_ref = bp_data->b_data_ref;
+ }
+#endif
+ KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_START, bp, bp_head, bp_head->b_shadow_ref, 0, 0);
+
+ bp_head->b_shadow_ref--;
+
+ for (bp_temp = bp_head; bp_temp && bp != bp_temp->b_shadow; bp_temp = bp_temp->b_shadow) {
+ ;
+ }
+
+ if (bp_temp == NULL) {
+ panic("buf_brelse_shadow: bp not on list %p", bp_head);
+ }
+
+ bp_temp->b_shadow = bp_temp->b_shadow->b_shadow;
+
+#ifdef BUF_MAKE_PRIVATE
+ /*
+ * we're about to free the current 'owner' of the data buffer and
+ * there is at least one other shadow buf_t still pointing at it
+ * so transfer it to the first shadow buf left in the chain
+ */
+ if (bp == bp_data && data_ref) {
+ if ((bp_data = bp_head->b_shadow) == NULL) {
+ panic("buf_brelse_shadow: data_ref mismatch bp(%p)", bp);
+ }
+
+ for (bp_temp = bp_data; bp_temp; bp_temp = bp_temp->b_shadow) {
+ bp_temp->b_data_store = bp_data;
+ }
+ bp_data->b_data_ref = data_ref;
+ }
+#endif
+ if (bp_head->b_shadow_ref == 0 && bp_head->b_shadow) {
+ panic("buf_relse_shadow: b_shadow != NULL && b_shadow_ref == 0 bp(%p)", bp);
+ }
+ if (bp_head->b_shadow_ref && bp_head->b_shadow == 0) {
+ panic("buf_relse_shadow: b_shadow == NULL && b_shadow_ref != 0 bp(%p)", bp);
+ }
+
+ if (bp_head->b_shadow_ref == 0) {
+ if (!ISSET(bp_head->b_lflags, BL_BUSY)) {
+ CLR(bp_head->b_flags, B_AGE);
+ bp_head->b_timestamp = buf_timestamp();
+
+ if (ISSET(bp_head->b_flags, B_LOCKED)) {
+ bp_head->b_whichq = BQ_LOCKED;
+ binstailfree(bp_head, &bufqueues[BQ_LOCKED], BQ_LOCKED);
+ } else {
+ bp_head->b_whichq = BQ_META;
+ binstailfree(bp_head, &bufqueues[BQ_META], BQ_META);
+ }
+ } else if (ISSET(bp_head->b_lflags, BL_WAITSHADOW)) {
+ CLR(bp_head->b_lflags, BL_WAITSHADOW);
+
+ bp_return = bp_head;
+ }
+ if (ISSET(bp_head->b_lflags, BL_WANTED_REF)) {
+ CLR(bp_head->b_lflags, BL_WANTED_REF);
+ need_wakeup = 1;
+ }
+ }
+ lck_mtx_unlock(&buf_mtx);
+
+ if (need_wakeup) {
+ wakeup(bp_head);
+ }
+
+#ifdef BUF_MAKE_PRIVATE
+ if (bp == bp_data && data_ref == 0) {
+ buf_free_meta_store(bp);
+ }
+
+ bp->b_data_store = NULL;
+#endif
+ KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_END, bp, 0, 0, 0, 0);
+
+ return bp_return;