2 * Copyright (c) 2014-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include "../tests/hfs_extents_test.h"
32 #include "hfs_extents.h"
36 #include "hfs_extents.h"
38 // In this file, group refers to a set of 8 extents
40 static uint32_t hfs_total_blocks(const HFSPlusExtentDescriptor
*ext
, int count
);
41 static errno_t
hfs_ext_iter_next_group(struct hfs_ext_iter
*iter
);
42 static errno_t
hfs_ext_iter_update(struct hfs_ext_iter
*iter
,
43 HFSPlusExtentDescriptor
*extents
,
45 HFSPlusExtentRecord cat_extents
);
46 static errno_t
hfs_ext_iter_check_group(hfs_ext_iter_t
*iter
);
50 #define CHECK(x, var, goto_label) \
54 printf("%s:%u error: %d\n", __func__, __LINE__, var); \
60 ({ typeof (a) _a = (a); typeof (b) _b = (b); _a < _b ? _a : _b; })
62 static __attribute__((pure
))
63 const HFSPlusExtentKey
*hfs_ext_iter_key(const hfs_ext_iter_t
*iter
)
65 return (const HFSPlusExtentKey
*)&iter
->bt_iter
.key
;
68 static __attribute__((pure
))
69 HFSPlusExtentKey
*hfs_ext_iter_key_mut(hfs_ext_iter_t
*iter
)
71 return (HFSPlusExtentKey
*)&iter
->bt_iter
.key
;
74 // Returns the total number of blocks for the @count extents provided
75 uint32_t hfs_total_blocks(const HFSPlusExtentDescriptor
*extents
, int count
)
77 uint32_t block_count
= 0;
78 for (int i
= 0; i
< count
; ++i
)
79 block_count
+= extents
[i
].blockCount
;
84 * Checks a group of extents: makes sure that if it's the last group
85 * for a fork, that all the remaining extents are properly zeroed and
86 * if it's not then checks that all extents are set. This also sets
87 * @group_block_count and @last_in_fork. Returns ESTALE if
90 errno_t
hfs_ext_iter_check_group(hfs_ext_iter_t
*iter
)
92 filefork_t
*ff
= VTOF(iter
->vp
);
93 const HFSPlusExtentKey
*key
= hfs_ext_iter_key(iter
);
97 for (i
= 0; i
< kHFSPlusExtentDensity
; ++i
) {
98 if (!iter
->group
[i
].blockCount
)
100 count
+= iter
->group
[i
].blockCount
;
103 if (i
< kHFSPlusExtentDensity
) {
104 iter
->last_in_fork
= true;
105 if (key
->startBlock
+ count
!= ff_allocblocks(ff
))
108 // Check remainder of extents
109 for (++i
; i
< kHFSPlusExtentDensity
; ++i
) {
110 if (iter
->group
[i
].blockCount
)
114 if (key
->startBlock
+ count
> ff_allocblocks(ff
))
117 iter
->last_in_fork
= (key
->startBlock
+ count
== ff_allocblocks(ff
));
120 iter
->group_block_count
= count
;
127 printf("hfs_ext_iter_check_group: bad group; start: %u, total blocks: %u\n",
128 key
->startBlock
, ff_allocblocks(ff
));
130 for (int j
= 0; j
< kHFSPlusExtentDensity
; ++j
) {
131 printf("%s<%u, %u>", j
? ", " : "",
132 iter
->group
[j
].startBlock
, iter
->group
[j
].blockCount
);
141 // NOTE: doesn't copy group data
142 static void hfs_ext_iter_copy(const hfs_ext_iter_t
*src
, hfs_ext_iter_t
*dst
)
145 memcpy(&dst
->bt_iter
.key
, &src
->bt_iter
.key
, sizeof(HFSPlusExtentKey
));
147 dst
->file_block
= src
->file_block
;
150 dst
->bt_iter
.hint
= src
->bt_iter
.hint
;
151 dst
->bt_iter
.version
= 0;
152 dst
->bt_iter
.reserved
= 0;
153 dst
->bt_iter
.hitCount
= 0;
154 dst
->bt_iter
.maxLeafRecs
= 0;
157 bool hfs_ext_iter_is_catalog_extents(hfs_ext_iter_t
*iter
)
159 return hfs_ext_iter_key(iter
)->startBlock
== 0;
162 #if !HFS_EXTENTS_TEST
165 * Finds the extent for offset. It might be in the catalog or the extents
168 errno_t
hfs_ext_find(vnode_t vp
, off_t offset
, hfs_ext_iter_t
*iter
)
171 hfsmount_t
*hfsmp
= VTOHFS(vp
);
175 uint32_t end_block
, index
;
176 HFSPlusExtentKey
*key
= hfs_ext_iter_key_mut(iter
);
178 filefork_t
*ff
= VTOF(vp
);
180 CHECK(SearchExtentFile(hfsmp
, ff
, offset
,
181 key
, iter
->group
, &index
,
182 &iter
->bt_iter
.hint
.nodeNum
, &end_block
), ret
, exit
);
185 iter
->file_block
= end_block
- iter
->group
[index
].blockCount
;
187 if (!key
->keyLength
) {
188 // We're pointing at the catalog record extents so fix up the key
189 key
->keyLength
= kHFSPlusExtentKeyMaximumLength
;
190 key
->forkType
= (VNODE_IS_RSRC(iter
->vp
)
191 ? kHFSResourceForkType
: kHFSDataForkType
);
193 key
->fileID
= VTOC(iter
->vp
)->c_fileid
;
197 CHECK(hfs_ext_iter_check_group(iter
), ret
, exit
);
203 return MacToVFSError(ret
);
206 static uint32_t hfs_ext_iter_next_group_block(const hfs_ext_iter_t
*iter
)
208 const HFSPlusExtentKey
*key
= hfs_ext_iter_key(iter
);
210 return key
->startBlock
+ iter
->group_block_count
;
214 * Move the iterator to the next group. Don't call if there's a chance
215 * there is no entry; the caller should check last_in_fork instead.
217 static errno_t
hfs_ext_iter_next_group(hfs_ext_iter_t
*iter
)
220 hfsmount_t
*hfsmp
= VTOHFS(iter
->vp
);
221 filefork_t
* const tree
= hfsmp
->hfs_extents_cp
->c_datafork
;
222 HFSPlusExtentKey
*key
= hfs_ext_iter_key_mut(iter
);
223 const bool catalog_extents
= hfs_ext_iter_is_catalog_extents(iter
);
224 const uint32_t next_block
= hfs_ext_iter_next_group_block(iter
);
226 FSBufferDescriptor fbd
= {
227 .bufferAddress
= &iter
->group
,
229 .itemSize
= sizeof(iter
->group
)
232 if (catalog_extents
) {
233 key
->startBlock
= next_block
;
235 CHECK(BTSearchRecord(tree
, &iter
->bt_iter
, &fbd
, NULL
,
236 &iter
->bt_iter
), ret
, exit
);
238 const uint32_t file_id
= key
->fileID
;
239 const uint8_t fork_type
= key
->forkType
;
241 CHECK(BTIterateRecord(tree
, kBTreeNextRecord
, &iter
->bt_iter
,
242 &fbd
, NULL
), ret
, exit
);
244 if (key
->fileID
!= file_id
245 || key
->forkType
!= fork_type
246 || key
->startBlock
!= next_block
) {
247 // This indicates an inconsistency
253 iter
->file_block
= key
->startBlock
;
256 CHECK(hfs_ext_iter_check_group(iter
), ret
, exit
);
262 return MacToVFSError(ret
);
266 * Updates with the extents provided and sets the key up for the next group.
267 * It is assumed that any previous record that might collide has been deleted.
268 * NOTE: @extents must point to a buffer that can be zero padded to multiple
271 errno_t
hfs_ext_iter_update(hfs_ext_iter_t
*iter
,
272 HFSPlusExtentDescriptor
*extents
,
274 HFSPlusExtentRecord cat_extents
)
277 hfsmount_t
*hfsmp
= VTOHFS(iter
->vp
);
278 cnode_t
*cp
= VTOC(iter
->vp
);
279 HFSPlusExtentKey
*key
= hfs_ext_iter_key_mut(iter
);
283 extents
= iter
->group
;
285 if (count
% kHFSPlusExtentDensity
) {
286 // Zero out last group
287 bzero(&extents
[count
], (kHFSPlusExtentDensity
288 - (count
% 8)) * sizeof(*extents
));
291 if (hfs_ext_iter_is_catalog_extents(iter
)) {
292 // Caller is responsible for in-memory updates
295 hfs_ext_copy_rec(extents
, cat_extents
);
297 struct cat_fork fork
;
299 hfs_fork_copy(&fork
, &VTOF(iter
->vp
)->ff_data
, extents
);
300 hfs_prepare_fork_for_update(VTOF(iter
->vp
), &fork
, &fork
, hfsmp
->blockSize
);
302 bool is_rsrc
= VNODE_IS_RSRC(iter
->vp
);
303 CHECK(cat_update(hfsmp
, &cp
->c_desc
, &cp
->c_attr
,
304 is_rsrc
? NULL
: &fork
,
305 is_rsrc
? &fork
: NULL
), ret
, exit
);
307 // Set the key to the next group
308 key
->startBlock
= hfs_total_blocks(extents
, kHFSPlusExtentDensity
);
313 // Deal with the remainder which must be overflow extents
314 for (; ndx
< count
; ndx
+= 8) {
315 filefork_t
* const tree
= hfsmp
->hfs_extents_cp
->c_datafork
;
317 FSBufferDescriptor fbd
= {
318 .bufferAddress
= &extents
[ndx
],
320 .itemSize
= sizeof(HFSPlusExtentRecord
)
323 CHECK(BTInsertRecord(tree
, &iter
->bt_iter
, &fbd
,
324 sizeof(HFSPlusExtentRecord
)), ret
, exit
);
326 // Set the key to the next group
327 key
->startBlock
+= hfs_total_blocks(&extents
[ndx
], kHFSPlusExtentDensity
);
337 #endif // !HFS_EXTENTS_TEST
339 static void push_ext(HFSPlusExtentDescriptor
*extents
, int *count
,
340 const HFSPlusExtentDescriptor
*ext
)
342 if (!ext
->blockCount
)
345 if (*count
&& hfs_ext_end(&extents
[*count
- 1]) == ext
->startBlock
)
346 extents
[*count
- 1].blockCount
+= ext
->blockCount
;
348 extents
[(*count
)++] = *ext
;
352 * NOTE: Here we rely on the replacement extents not being too big as
353 * otherwise the number of BTree records that we have to delete could be
356 errno_t
hfs_ext_replace(hfsmount_t
*hfsmp
, vnode_t vp
,
358 const HFSPlusExtentDescriptor
*repl
,
360 HFSPlusExtentRecord catalog_extents
)
363 filefork_t
* const tree
= hfsmp
->hfs_extents_cp
->c_datafork
;
364 hfs_ext_iter_t
*iter_in
= NULL
, *iter_out
;
365 HFSPlusExtentDescriptor
*extents
= NULL
;
366 int buffered_extents
= 0;
367 const int max_roll_back_extents
= 16384; // 128k
368 HFSPlusExtentDescriptor
*roll_back_extents
= NULL
;
369 int roll_back_count
= 0;
370 const uint32_t end_file_block
= file_block
+ hfs_total_blocks(repl
, repl_count
);
371 filefork_t
*ff
= VTOF(vp
);
372 uint32_t start_group_block
= 0, block
= 0;
374 // Indicate we haven't touched catalog extents
375 catalog_extents
[0].blockCount
= 0;
377 if (end_file_block
> ff_allocblocks(ff
))
380 iter_in
= hfs_malloc(sizeof(*iter_in
) * 2);
381 iter_out
= iter_in
+ 1;
382 HFSPlusExtentKey
*key_in
= hfs_ext_iter_key_mut(iter_in
);
384 // Get to where we want to start
385 off_t offset
= hfs_blk_to_bytes(file_block
, hfsmp
->blockSize
);
388 * If the replacement is at the start of a group, we want to pull in the
389 * group before so that we tidy up any padding that we might have done
390 * in a prior hfs_ext_replace call.
395 CHECK(hfs_ext_find(vp
, offset
, iter_in
), ret
, exit
);
397 start_group_block
= key_in
->startBlock
;
399 roll_back_extents
= hfs_malloc(max_roll_back_extents
400 * sizeof(HFSPlusExtentDescriptor
));
402 // Move to the first extent in this group
405 hfs_ext_iter_copy(iter_in
, iter_out
);
407 // Create a buffer for our extents
408 buffered_extents
= roundup(3 * kHFSPlusExtentDensity
+ repl_count
,
409 kHFSPlusExtentDensity
);
410 extents
= hfs_malloc(sizeof(*extents
) * buffered_extents
);
414 * Iterate through the extents that are affected by this replace operation.
415 * We cannot push more than 16 + repl_count extents here; 8 for the group
416 * containing the replacement start, repl_count for the replacements and 8
417 * for the group containing the end. If we went back a group due to
418 * decrementing the offset above, it's still the same because we know in
419 * that case the replacement starts at the beginning of the next group.
421 block
= start_group_block
;
424 hfs_ext_copy_rec(iter_in
->group
, &roll_back_extents
[roll_back_count
]);
425 roll_back_count
+= kHFSPlusExtentDensity
;
427 if (!hfs_ext_iter_is_catalog_extents(iter_in
)) {
428 // Delete this extent group; we're going to replace it
429 CHECK(BTDeleteRecord(tree
, &iter_in
->bt_iter
), ret
, exit
);
433 HFSPlusExtentDescriptor
*ext
= &iter_in
->group
[iter_in
->ndx
];
434 if (!ext
->blockCount
) {
436 * We ran out of existing extents so we just write the
437 * extents and we're done.
442 // If the current extent does not overlap replacement...
443 if (block
+ ext
->blockCount
<= file_block
|| block
>= end_file_block
) {
444 // Keep the current extent exactly as it is
445 push_ext(extents
, &count
, ext
);
447 HFSPlusExtentDescriptor dealloc_ext
= *ext
;
449 if (block
<= file_block
) {
451 * The middle or tail of the current extent overlaps
452 * the replacement extents. Keep the non-overlapping
453 * head of the current extent.
455 uint32_t trimmed_len
= file_block
- block
;
458 // Push (keep) non-overlapping head of current extent
459 push_ext(extents
, &count
,
460 &(HFSPlusExtentDescriptor
){ ext
->startBlock
,
464 * Deallocate the part of the current extent that
465 * overlaps the replacement extents. That starts
466 * at @file_block. For now, assume it goes
467 * through the end of the current extent. (If the
468 * current extent extends beyond the end of the
469 * replacement extents, we'll update the
472 dealloc_ext
.startBlock
+= trimmed_len
;
473 dealloc_ext
.blockCount
-= trimmed_len
;
476 // Insert the replacements
477 for (int i
= 0; i
< repl_count
; ++i
)
478 push_ext(extents
, &count
, &repl
[i
]);
481 if (block
+ ext
->blockCount
> end_file_block
) {
483 * The head or middle of the current extent overlaps
484 * the replacement extents. Keep the non-overlapping
485 * tail of the current extent.
487 uint32_t overlap
= end_file_block
- block
;
489 // Push (keep) non-overlapping tail of current extent
490 push_ext(extents
, &count
,
491 &(HFSPlusExtentDescriptor
){ ext
->startBlock
+ overlap
,
492 ext
->blockCount
- overlap
});
495 * Deallocate the part of current extent that overlaps
498 dealloc_ext
.blockCount
= (ext
->startBlock
+ overlap
499 - dealloc_ext
.startBlock
);
502 CHECK(BlockDeallocate(hfsmp
, dealloc_ext
.startBlock
,
503 dealloc_ext
.blockCount
, 0), ret
, exit
);
506 // Move to next (existing) extent from iterator
507 block
+= ext
->blockCount
;
509 if (++iter_in
->ndx
>= kHFSPlusExtentDensity
) {
510 if (block
>= end_file_block
) {
511 if (iter_in
->last_in_fork
|| !(count
% kHFSPlusExtentDensity
)) {
513 * This is the easy case. We've hit the end or we have a
514 * multiple of 8, so we can just write out the extents we
515 * have and it should all fit within a transaction.
521 if (count
+ kHFSPlusExtentDensity
> buffered_extents
523 + kHFSPlusExtentDensity
> max_roll_back_extents
)) {
525 * We've run out of room for the next group, so drop out
526 * and take a different strategy.
532 CHECK(hfs_ext_iter_next_group(iter_in
), ret
, exit
);
537 * We're not at the end so we need to try and pad to a multiple of 8
538 * so that we don't have to touch all the subsequent records. We pad
539 * by stealing single blocks.
545 // @in points to the record we're stealing from
548 count
= roundup(count
, kHFSPlusExtentDensity
);
550 // @out is where we put the stolen single blocks
555 // We suceeded in padding; we're done
560 * "Steal" a block, or move a one-block extent within the
563 * If the extent we're "stealing" from (@in) is only one
564 * block long, we'll end up copying it to @out, setting
565 * @in's blockCount to zero, and decrementing @in. So, we
566 * either split a multi-block extent; or move it within
567 * the @extents array.
569 extents
[out
].blockCount
= 1;
570 extents
[out
].startBlock
= (extents
[in
].startBlock
571 + extents
[in
].blockCount
- 1);
573 } while (--extents
[in
].blockCount
|| --in
>= stop_at
);
575 // We ran out of extents
576 if (roll_back_count
+ kHFSPlusExtentDensity
> max_roll_back_extents
) {
581 // Need to shift extents starting at out + 1
583 memmove(&extents
[stop_at
], &extents
[out
],
584 (count
- out
) * sizeof(*extents
));
585 count
-= out
- stop_at
;
587 // Pull in the next group
588 CHECK(hfs_ext_iter_next_group(iter_in
), ret
, exit
);
590 // Take a copy of these extents for roll back purposes
591 hfs_ext_copy_rec(iter_in
->group
, &roll_back_extents
[roll_back_count
]);
592 roll_back_count
+= kHFSPlusExtentDensity
;
594 // Delete this group; we're going to replace it
595 CHECK(BTDeleteRecord(tree
, &iter_in
->bt_iter
), ret
, exit
);
597 if (iter_in
->last_in_fork
) {
598 // Great! We've hit the end. Coalesce and write out.
599 int old_count
= count
;
603 * First coalesce the extents we already have. Takes
604 * advantage of push_ext coalescing the input extent with
605 * the last extent in @extents. If the extents are not
606 * contiguous, then this just copies the extents over
607 * themselves and sets @count back to @old_count.
609 for (int i
= 0; i
< old_count
; ++i
)
610 push_ext(extents
, &count
, &extents
[i
]);
612 // Make room if necessary
613 const int flush_count
= buffered_extents
- kHFSPlusExtentDensity
;
614 if (count
> flush_count
) {
615 CHECK(hfs_ext_iter_update(iter_out
, extents
,
616 flush_count
, catalog_extents
), ret
, exit
);
618 memmove(&extents
[0], &extents
[flush_count
],
619 (count
- flush_count
) * sizeof(*extents
));
621 count
-= flush_count
;
624 // Add in the extents we just read in
625 for (int i
= 0; i
< kHFSPlusExtentDensity
; ++i
) {
626 HFSPlusExtentDescriptor
*ext
= &iter_in
->group
[i
];
627 if (!ext
->blockCount
)
629 push_ext(extents
, &count
, ext
);
633 } // if (iter_in->last_in_fork)
636 * Otherwise, we're not at the end, so we add these extents and then
637 * try and pad out again to a multiple of 8. We start by making room.
639 if (count
> buffered_extents
- kHFSPlusExtentDensity
) {
640 // Only write out one group here
641 CHECK(hfs_ext_iter_update(iter_out
, extents
,
642 kHFSPlusExtentDensity
,
643 catalog_extents
), ret
, exit
);
645 memmove(&extents
[0], &extents
[kHFSPlusExtentDensity
],
646 (count
- kHFSPlusExtentDensity
) * sizeof(*extents
));
648 count
-= kHFSPlusExtentDensity
;
651 // Record where to stop when padding above
654 // Copy in the new extents
655 hfs_ext_copy_rec(iter_in
->group
, &extents
[count
]);
656 count
+= kHFSPlusExtentDensity
;
661 // Write the remaining extents
662 CHECK(hfs_ext_iter_update(iter_out
, extents
, count
,
663 catalog_extents
), ret
, exit
);
665 CHECK(BTFlushPath(hfsmp
->hfs_catalog_cp
->c_datafork
), ret
, exit
);
666 CHECK(BTFlushPath(hfsmp
->hfs_extents_cp
->c_datafork
), ret
, exit
);
670 if (ret
&& roll_back_count
) {
674 printf("hfs_ext_replace:%u: roll back failed\n", __LINE__); \
675 hfs_mark_inconsistent(hfsmp, HFS_ROLLBACK_FAILED); \
676 goto roll_back_failed; \
679 // First delete any groups we inserted
680 HFSPlusExtentKey
*key_out
= hfs_ext_iter_key_mut(iter_out
);
682 key_in
->startBlock
= start_group_block
;
683 if (!key_in
->startBlock
&& key_out
->startBlock
> key_in
->startBlock
) {
684 key_in
->startBlock
+= hfs_total_blocks(catalog_extents
,
685 kHFSPlusExtentDensity
);
688 if (key_out
->startBlock
> key_in
->startBlock
) {
689 FSBufferDescriptor fbd
= {
690 .bufferAddress
= &iter_in
->group
,
692 .itemSize
= sizeof(iter_in
->group
)
695 if (BTSearchRecord(tree
, &iter_in
->bt_iter
, &fbd
, NULL
,
696 &iter_in
->bt_iter
)) {
701 if (BTDeleteRecord(tree
, &iter_in
->bt_iter
))
704 key_in
->startBlock
+= hfs_total_blocks(iter_in
->group
,
705 kHFSPlusExtentDensity
);
707 if (key_in
->startBlock
>= key_out
->startBlock
)
710 if (BTSearchRecord(tree
, &iter_in
->bt_iter
, &fbd
, NULL
,
711 &iter_in
->bt_iter
)) {
718 key_out
->startBlock
= start_group_block
;
720 // Roll back all the extents
721 if (hfs_ext_iter_update(iter_out
, roll_back_extents
, roll_back_count
,
726 // And we need to reallocate the blocks we deallocated
727 const uint32_t end_block
= min(block
, end_file_block
);
728 block
= start_group_block
;
729 for (int i
= 0; i
< roll_back_count
&& block
< end_block
; ++i
) {
730 HFSPlusExtentDescriptor
*ext
= &roll_back_extents
[i
];
732 if (block
+ ext
->blockCount
<= file_block
)
735 HFSPlusExtentDescriptor alloc_ext
= *ext
;
737 if (block
<= file_block
) {
738 uint32_t trimmed_len
= file_block
- block
;
740 alloc_ext
.startBlock
+= trimmed_len
;
741 alloc_ext
.blockCount
-= trimmed_len
;
744 if (block
+ ext
->blockCount
> end_file_block
) {
745 uint32_t overlap
= end_file_block
- block
;
747 alloc_ext
.blockCount
= (ext
->startBlock
+ overlap
748 - alloc_ext
.startBlock
);
751 if (hfs_block_alloc(hfsmp
, &alloc_ext
, HFS_ALLOC_ROLL_BACK
, NULL
))
754 block
+= ext
->blockCount
;
757 if (BTFlushPath(hfsmp
->hfs_catalog_cp
->c_datafork
)
758 || BTFlushPath(hfsmp
->hfs_extents_cp
->c_datafork
)) {
761 } // if (ret && roll_back_count)
765 hfs_free(iter_in
, sizeof(*iter_in
) * 2);
766 hfs_free(extents
, sizeof(*extents
) * buffered_extents
);
767 hfs_free(roll_back_extents
, (max_roll_back_extents
768 * sizeof(HFSPlusExtentDescriptor
)));
770 return MacToVFSError(ret
);