]> git.saurik.com Git - apple/hfs.git/blob - core/hfs_extents.c
hfs-366.1.1.tar.gz
[apple/hfs.git] / core / hfs_extents.c
1 /*
2 * Copyright (c) 2014-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #if HFS_EXTENTS_TEST
30
31 #include "../tests/hfs_extents_test.h"
32 #include "hfs_extents.h"
33
34 #else
35
36 #include "hfs_extents.h"
37
38 // In this file, group refers to a set of 8 extents
39
40 static uint32_t hfs_total_blocks(const HFSPlusExtentDescriptor *ext, int count);
41 static errno_t hfs_ext_iter_next_group(struct hfs_ext_iter *iter);
42 static errno_t hfs_ext_iter_update(struct hfs_ext_iter *iter,
43 HFSPlusExtentDescriptor *extents,
44 int count,
45 HFSPlusExtentRecord cat_extents);
46 static errno_t hfs_ext_iter_check_group(hfs_ext_iter_t *iter);
47
48 #endif
49
50 #define CHECK(x, var, goto_label) \
51 do { \
52 var = (x); \
53 if (var) { \
54 printf("%s:%u error: %d\n", __func__, __LINE__, var); \
55 goto goto_label; \
56 } \
57 } while (0)
58
59 #define min(a,b) \
60 ({ typeof (a) _a = (a); typeof (b) _b = (b); _a < _b ? _a : _b; })
61
62 static __attribute__((pure))
63 const HFSPlusExtentKey *hfs_ext_iter_key(const hfs_ext_iter_t *iter)
64 {
65 return (const HFSPlusExtentKey *)&iter->bt_iter.key;
66 }
67
68 static __attribute__((pure))
69 HFSPlusExtentKey *hfs_ext_iter_key_mut(hfs_ext_iter_t *iter)
70 {
71 return (HFSPlusExtentKey *)&iter->bt_iter.key;
72 }
73
74 // Returns the total number of blocks for the @count extents provided
75 uint32_t hfs_total_blocks(const HFSPlusExtentDescriptor *extents, int count)
76 {
77 uint32_t block_count = 0;
78 for (int i = 0; i < count; ++i)
79 block_count += extents[i].blockCount;
80 return block_count;
81 }
82
83 /*
84 * Checks a group of extents: makes sure that if it's the last group
85 * for a fork, that all the remaining extents are properly zeroed and
86 * if it's not then checks that all extents are set. This also sets
87 * @group_block_count and @last_in_fork. Returns ESTALE if
88 * inconsistent.
89 */
90 errno_t hfs_ext_iter_check_group(hfs_ext_iter_t *iter)
91 {
92 filefork_t *ff = VTOF(iter->vp);
93 const HFSPlusExtentKey *key = hfs_ext_iter_key(iter);
94 uint32_t count = 0;
95 int i;
96
97 for (i = 0; i < kHFSPlusExtentDensity; ++i) {
98 if (!iter->group[i].blockCount)
99 break;
100 count += iter->group[i].blockCount;
101 }
102
103 if (i < kHFSPlusExtentDensity) {
104 iter->last_in_fork = true;
105 if (key->startBlock + count != ff_allocblocks(ff))
106 goto bad;
107
108 // Check remainder of extents
109 for (++i; i < kHFSPlusExtentDensity; ++i) {
110 if (iter->group[i].blockCount)
111 goto bad;
112 }
113 } else {
114 if (key->startBlock + count > ff_allocblocks(ff))
115 goto bad;
116
117 iter->last_in_fork = (key->startBlock + count == ff_allocblocks(ff));
118 }
119
120 iter->group_block_count = count;
121
122 return 0;
123
124 bad:
125
126 #if DEBUG
127 printf("hfs_ext_iter_check_group: bad group; start: %u, total blocks: %u\n",
128 key->startBlock, ff_allocblocks(ff));
129
130 for (int j = 0; j < kHFSPlusExtentDensity; ++j) {
131 printf("%s<%u, %u>", j ? ", " : "",
132 iter->group[j].startBlock, iter->group[j].blockCount);
133 }
134
135 printf("\n");
136 #endif
137
138 return ESTALE;
139 }
140
141 // NOTE: doesn't copy group data
142 static void hfs_ext_iter_copy(const hfs_ext_iter_t *src, hfs_ext_iter_t *dst)
143 {
144 dst->vp = src->vp;
145 memcpy(&dst->bt_iter.key, &src->bt_iter.key, sizeof(HFSPlusExtentKey));
146
147 dst->file_block = src->file_block;
148 dst->ndx = src->ndx;
149
150 dst->bt_iter.hint = src->bt_iter.hint;
151 dst->bt_iter.version = 0;
152 dst->bt_iter.reserved = 0;
153 dst->bt_iter.hitCount = 0;
154 dst->bt_iter.maxLeafRecs = 0;
155 }
156
157 bool hfs_ext_iter_is_catalog_extents(hfs_ext_iter_t *iter)
158 {
159 return hfs_ext_iter_key(iter)->startBlock == 0;
160 }
161
162 #if !HFS_EXTENTS_TEST
163
164 /*
165 * Finds the extent for offset. It might be in the catalog or the extents
166 * file.
167 */
168 errno_t hfs_ext_find(vnode_t vp, off_t offset, hfs_ext_iter_t *iter)
169 {
170 errno_t ret;
171 hfsmount_t *hfsmp = VTOHFS(vp);
172
173 iter->vp = vp;
174
175 uint32_t end_block, index;
176 HFSPlusExtentKey *key = hfs_ext_iter_key_mut(iter);
177
178 filefork_t *ff = VTOF(vp);
179
180 CHECK(SearchExtentFile(hfsmp, ff, offset,
181 key, iter->group, &index,
182 &iter->bt_iter.hint.nodeNum, &end_block), ret, exit);
183
184 iter->ndx = index;
185 iter->file_block = end_block - iter->group[index].blockCount;
186
187 if (!key->keyLength) {
188 // We're pointing at the catalog record extents so fix up the key
189 key->keyLength = kHFSPlusExtentKeyMaximumLength;
190 key->forkType = (VNODE_IS_RSRC(iter->vp)
191 ? kHFSResourceForkType : kHFSDataForkType);
192 key->pad = 0;
193 key->fileID = VTOC(iter->vp)->c_fileid;
194 key->startBlock = 0;
195 }
196
197 CHECK(hfs_ext_iter_check_group(iter), ret, exit);
198
199 ret = 0;
200
201 exit:
202
203 return MacToVFSError(ret);
204 }
205
206 static uint32_t hfs_ext_iter_next_group_block(const hfs_ext_iter_t *iter)
207 {
208 const HFSPlusExtentKey *key = hfs_ext_iter_key(iter);
209
210 return key->startBlock + iter->group_block_count;
211 }
212
213 /*
214 * Move the iterator to the next group. Don't call if there's a chance
215 * there is no entry; the caller should check last_in_fork instead.
216 */
217 static errno_t hfs_ext_iter_next_group(hfs_ext_iter_t *iter)
218 {
219 errno_t ret;
220 hfsmount_t *hfsmp = VTOHFS(iter->vp);
221 filefork_t * const tree = hfsmp->hfs_extents_cp->c_datafork;
222 HFSPlusExtentKey *key = hfs_ext_iter_key_mut(iter);
223 const bool catalog_extents = hfs_ext_iter_is_catalog_extents(iter);
224 const uint32_t next_block = hfs_ext_iter_next_group_block(iter);
225
226 FSBufferDescriptor fbd = {
227 .bufferAddress = &iter->group,
228 .itemCount = 1,
229 .itemSize = sizeof(iter->group)
230 };
231
232 if (catalog_extents) {
233 key->startBlock = next_block;
234
235 CHECK(BTSearchRecord(tree, &iter->bt_iter, &fbd, NULL,
236 &iter->bt_iter), ret, exit);
237 } else {
238 const uint32_t file_id = key->fileID;
239 const uint8_t fork_type = key->forkType;
240
241 CHECK(BTIterateRecord(tree, kBTreeNextRecord, &iter->bt_iter,
242 &fbd, NULL), ret, exit);
243
244 if (key->fileID != file_id
245 || key->forkType != fork_type
246 || key->startBlock != next_block) {
247 // This indicates an inconsistency
248 ret = ESTALE;
249 goto exit;
250 }
251 }
252
253 iter->file_block = key->startBlock;
254 iter->ndx = 0;
255
256 CHECK(hfs_ext_iter_check_group(iter), ret, exit);
257
258 ret = 0;
259
260 exit:
261
262 return MacToVFSError(ret);
263 }
264
265 /*
266 * Updates with the extents provided and sets the key up for the next group.
267 * It is assumed that any previous record that might collide has been deleted.
268 * NOTE: @extents must point to a buffer that can be zero padded to multiple
269 * of 8 extents.
270 */
271 errno_t hfs_ext_iter_update(hfs_ext_iter_t *iter,
272 HFSPlusExtentDescriptor *extents,
273 int count,
274 HFSPlusExtentRecord cat_extents)
275 {
276 errno_t ret;
277 hfsmount_t *hfsmp = VTOHFS(iter->vp);
278 cnode_t *cp = VTOC(iter->vp);
279 HFSPlusExtentKey *key = hfs_ext_iter_key_mut(iter);
280 int ndx = 0;
281
282 if (!extents)
283 extents = iter->group;
284
285 if (count % kHFSPlusExtentDensity) {
286 // Zero out last group
287 bzero(&extents[count], (kHFSPlusExtentDensity
288 - (count % 8)) * sizeof(*extents));
289 }
290
291 if (hfs_ext_iter_is_catalog_extents(iter)) {
292 // Caller is responsible for in-memory updates
293
294 if (cat_extents)
295 hfs_ext_copy_rec(extents, cat_extents);
296
297 struct cat_fork fork;
298
299 hfs_fork_copy(&fork, &VTOF(iter->vp)->ff_data, extents);
300 hfs_prepare_fork_for_update(VTOF(iter->vp), &fork, &fork, hfsmp->blockSize);
301
302 bool is_rsrc = VNODE_IS_RSRC(iter->vp);
303 CHECK(cat_update(hfsmp, &cp->c_desc, &cp->c_attr,
304 is_rsrc ? NULL : &fork,
305 is_rsrc ? &fork : NULL), ret, exit);
306
307 // Set the key to the next group
308 key->startBlock = hfs_total_blocks(extents, kHFSPlusExtentDensity);
309
310 ndx += 8;
311 }
312
313 // Deal with the remainder which must be overflow extents
314 for (; ndx < count; ndx += 8) {
315 filefork_t * const tree = hfsmp->hfs_extents_cp->c_datafork;
316
317 FSBufferDescriptor fbd = {
318 .bufferAddress = &extents[ndx],
319 .itemCount = 1,
320 .itemSize = sizeof(HFSPlusExtentRecord)
321 };
322
323 CHECK(BTInsertRecord(tree, &iter->bt_iter, &fbd,
324 sizeof(HFSPlusExtentRecord)), ret, exit);
325
326 // Set the key to the next group
327 key->startBlock += hfs_total_blocks(&extents[ndx], kHFSPlusExtentDensity);
328 }
329
330 ret = 0;
331
332 exit:
333
334 return ret;
335 }
336
337 #endif // !HFS_EXTENTS_TEST
338
339 static void push_ext(HFSPlusExtentDescriptor *extents, int *count,
340 const HFSPlusExtentDescriptor *ext)
341 {
342 if (!ext->blockCount)
343 return;
344
345 if (*count && hfs_ext_end(&extents[*count - 1]) == ext->startBlock)
346 extents[*count - 1].blockCount += ext->blockCount;
347 else
348 extents[(*count)++] = *ext;
349 }
350
351 /*
352 * NOTE: Here we rely on the replacement extents not being too big as
353 * otherwise the number of BTree records that we have to delete could be
354 * too large.
355 */
356 errno_t hfs_ext_replace(hfsmount_t *hfsmp, vnode_t vp,
357 uint32_t file_block,
358 const HFSPlusExtentDescriptor *repl,
359 int repl_count,
360 HFSPlusExtentRecord catalog_extents)
361 {
362 errno_t ret;
363 filefork_t * const tree = hfsmp->hfs_extents_cp->c_datafork;
364 hfs_ext_iter_t *iter_in = NULL, *iter_out;
365 HFSPlusExtentDescriptor *extents = NULL;
366 int buffered_extents = 0;
367 const int max_roll_back_extents = 16384; // 128k
368 HFSPlusExtentDescriptor *roll_back_extents = NULL;
369 int roll_back_count = 0;
370 const uint32_t end_file_block = file_block + hfs_total_blocks(repl, repl_count);
371 filefork_t *ff = VTOF(vp);
372 uint32_t start_group_block = 0, block = 0;
373
374 // Indicate we haven't touched catalog extents
375 catalog_extents[0].blockCount = 0;
376
377 if (end_file_block > ff_allocblocks(ff))
378 return EINVAL;
379
380 iter_in = hfs_malloc(sizeof(*iter_in) * 2);
381 iter_out = iter_in + 1;
382 HFSPlusExtentKey *key_in = hfs_ext_iter_key_mut(iter_in);
383
384 // Get to where we want to start
385 off_t offset = hfs_blk_to_bytes(file_block, hfsmp->blockSize);
386
387 /*
388 * If the replacement is at the start of a group, we want to pull in the
389 * group before so that we tidy up any padding that we might have done
390 * in a prior hfs_ext_replace call.
391 */
392 if (offset > 0)
393 --offset;
394
395 CHECK(hfs_ext_find(vp, offset, iter_in), ret, exit);
396
397 start_group_block = key_in->startBlock;
398
399 roll_back_extents = hfs_malloc(max_roll_back_extents
400 * sizeof(HFSPlusExtentDescriptor));
401
402 // Move to the first extent in this group
403 iter_in->ndx = 0;
404
405 hfs_ext_iter_copy(iter_in, iter_out);
406
407 // Create a buffer for our extents
408 buffered_extents = roundup(3 * kHFSPlusExtentDensity + repl_count,
409 kHFSPlusExtentDensity);
410 extents = hfs_malloc(sizeof(*extents) * buffered_extents);
411 int count = 0;
412
413 /*
414 * Iterate through the extents that are affected by this replace operation.
415 * We cannot push more than 16 + repl_count extents here; 8 for the group
416 * containing the replacement start, repl_count for the replacements and 8
417 * for the group containing the end. If we went back a group due to
418 * decrementing the offset above, it's still the same because we know in
419 * that case the replacement starts at the beginning of the next group.
420 */
421 block = start_group_block;
422 for (;;) {
423 if (!iter_in->ndx) {
424 hfs_ext_copy_rec(iter_in->group, &roll_back_extents[roll_back_count]);
425 roll_back_count += kHFSPlusExtentDensity;
426
427 if (!hfs_ext_iter_is_catalog_extents(iter_in)) {
428 // Delete this extent group; we're going to replace it
429 CHECK(BTDeleteRecord(tree, &iter_in->bt_iter), ret, exit);
430 }
431 }
432
433 HFSPlusExtentDescriptor *ext = &iter_in->group[iter_in->ndx];
434 if (!ext->blockCount) {
435 /*
436 * We ran out of existing extents so we just write the
437 * extents and we're done.
438 */
439 goto finish;
440 }
441
442 // If the current extent does not overlap replacement...
443 if (block + ext->blockCount <= file_block || block >= end_file_block) {
444 // Keep the current extent exactly as it is
445 push_ext(extents, &count, ext);
446 } else {
447 HFSPlusExtentDescriptor dealloc_ext = *ext;
448
449 if (block <= file_block) {
450 /*
451 * The middle or tail of the current extent overlaps
452 * the replacement extents. Keep the non-overlapping
453 * head of the current extent.
454 */
455 uint32_t trimmed_len = file_block - block;
456
457 if (trimmed_len) {
458 // Push (keep) non-overlapping head of current extent
459 push_ext(extents, &count,
460 &(HFSPlusExtentDescriptor){ ext->startBlock,
461 trimmed_len });
462
463 /*
464 * Deallocate the part of the current extent that
465 * overlaps the replacement extents. That starts
466 * at @file_block. For now, assume it goes
467 * through the end of the current extent. (If the
468 * current extent extends beyond the end of the
469 * replacement extents, we'll update the
470 * blockCount below.)
471 */
472 dealloc_ext.startBlock += trimmed_len;
473 dealloc_ext.blockCount -= trimmed_len;
474 }
475
476 // Insert the replacements
477 for (int i = 0; i < repl_count; ++i)
478 push_ext(extents, &count, &repl[i]);
479 }
480
481 if (block + ext->blockCount > end_file_block) {
482 /*
483 * The head or middle of the current extent overlaps
484 * the replacement extents. Keep the non-overlapping
485 * tail of the current extent.
486 */
487 uint32_t overlap = end_file_block - block;
488
489 // Push (keep) non-overlapping tail of current extent
490 push_ext(extents, &count,
491 &(HFSPlusExtentDescriptor){ ext->startBlock + overlap,
492 ext->blockCount - overlap });
493
494 /*
495 * Deallocate the part of current extent that overlaps
496 * the replacements.
497 */
498 dealloc_ext.blockCount = (ext->startBlock + overlap
499 - dealloc_ext.startBlock);
500 }
501
502 CHECK(BlockDeallocate(hfsmp, dealloc_ext.startBlock,
503 dealloc_ext.blockCount, 0), ret, exit);
504 }
505
506 // Move to next (existing) extent from iterator
507 block += ext->blockCount;
508
509 if (++iter_in->ndx >= kHFSPlusExtentDensity) {
510 if (block >= end_file_block) {
511 if (iter_in->last_in_fork || !(count % kHFSPlusExtentDensity)) {
512 /*
513 * This is the easy case. We've hit the end or we have a
514 * multiple of 8, so we can just write out the extents we
515 * have and it should all fit within a transaction.
516 */
517
518 goto finish;
519 }
520
521 if (count + kHFSPlusExtentDensity > buffered_extents
522 || (roll_back_count
523 + kHFSPlusExtentDensity > max_roll_back_extents)) {
524 /*
525 * We've run out of room for the next group, so drop out
526 * and take a different strategy.
527 */
528 break;
529 }
530 }
531
532 CHECK(hfs_ext_iter_next_group(iter_in), ret, exit);
533 }
534 } // for (;;)
535
536 /*
537 * We're not at the end so we need to try and pad to a multiple of 8
538 * so that we don't have to touch all the subsequent records. We pad
539 * by stealing single blocks.
540 */
541
542 int stop_at = 0;
543
544 for (;;) {
545 // @in points to the record we're stealing from
546 int in = count - 1;
547
548 count = roundup(count, kHFSPlusExtentDensity);
549
550 // @out is where we put the stolen single blocks
551 int out = count - 1;
552
553 do {
554 if (out <= in) {
555 // We suceeded in padding; we're done
556 goto finish;
557 }
558
559 /*
560 * "Steal" a block, or move a one-block extent within the
561 * @extents array.
562 *
563 * If the extent we're "stealing" from (@in) is only one
564 * block long, we'll end up copying it to @out, setting
565 * @in's blockCount to zero, and decrementing @in. So, we
566 * either split a multi-block extent; or move it within
567 * the @extents array.
568 */
569 extents[out].blockCount = 1;
570 extents[out].startBlock = (extents[in].startBlock
571 + extents[in].blockCount - 1);
572 --out;
573 } while (--extents[in].blockCount || --in >= stop_at);
574
575 // We ran out of extents
576 if (roll_back_count + kHFSPlusExtentDensity > max_roll_back_extents) {
577 ret = ENOSPC;
578 goto exit;
579 }
580
581 // Need to shift extents starting at out + 1
582 ++out;
583 memmove(&extents[stop_at], &extents[out],
584 (count - out) * sizeof(*extents));
585 count -= out - stop_at;
586
587 // Pull in the next group
588 CHECK(hfs_ext_iter_next_group(iter_in), ret, exit);
589
590 // Take a copy of these extents for roll back purposes
591 hfs_ext_copy_rec(iter_in->group, &roll_back_extents[roll_back_count]);
592 roll_back_count += kHFSPlusExtentDensity;
593
594 // Delete this group; we're going to replace it
595 CHECK(BTDeleteRecord(tree, &iter_in->bt_iter), ret, exit);
596
597 if (iter_in->last_in_fork) {
598 // Great! We've hit the end. Coalesce and write out.
599 int old_count = count;
600 count = 0;
601
602 /*
603 * First coalesce the extents we already have. Takes
604 * advantage of push_ext coalescing the input extent with
605 * the last extent in @extents. If the extents are not
606 * contiguous, then this just copies the extents over
607 * themselves and sets @count back to @old_count.
608 */
609 for (int i = 0; i < old_count; ++i)
610 push_ext(extents, &count, &extents[i]);
611
612 // Make room if necessary
613 const int flush_count = buffered_extents - kHFSPlusExtentDensity;
614 if (count > flush_count) {
615 CHECK(hfs_ext_iter_update(iter_out, extents,
616 flush_count, catalog_extents), ret, exit);
617
618 memmove(&extents[0], &extents[flush_count],
619 (count - flush_count) * sizeof(*extents));
620
621 count -= flush_count;
622 }
623
624 // Add in the extents we just read in
625 for (int i = 0; i < kHFSPlusExtentDensity; ++i) {
626 HFSPlusExtentDescriptor *ext = &iter_in->group[i];
627 if (!ext->blockCount)
628 break;
629 push_ext(extents, &count, ext);
630 }
631
632 goto finish;
633 } // if (iter_in->last_in_fork)
634
635 /*
636 * Otherwise, we're not at the end, so we add these extents and then
637 * try and pad out again to a multiple of 8. We start by making room.
638 */
639 if (count > buffered_extents - kHFSPlusExtentDensity) {
640 // Only write out one group here
641 CHECK(hfs_ext_iter_update(iter_out, extents,
642 kHFSPlusExtentDensity,
643 catalog_extents), ret, exit);
644
645 memmove(&extents[0], &extents[kHFSPlusExtentDensity],
646 (count - kHFSPlusExtentDensity) * sizeof(*extents));
647
648 count -= kHFSPlusExtentDensity;
649 }
650
651 // Record where to stop when padding above
652 stop_at = count;
653
654 // Copy in the new extents
655 hfs_ext_copy_rec(iter_in->group, &extents[count]);
656 count += kHFSPlusExtentDensity;
657 } // for (;;)
658
659 finish:
660
661 // Write the remaining extents
662 CHECK(hfs_ext_iter_update(iter_out, extents, count,
663 catalog_extents), ret, exit);
664
665 CHECK(BTFlushPath(hfsmp->hfs_catalog_cp->c_datafork), ret, exit);
666 CHECK(BTFlushPath(hfsmp->hfs_extents_cp->c_datafork), ret, exit);
667
668 exit:
669
670 if (ret && roll_back_count) {
671
672 #define RB_FAILED \
673 do { \
674 printf("hfs_ext_replace:%u: roll back failed\n", __LINE__); \
675 hfs_mark_inconsistent(hfsmp, HFS_ROLLBACK_FAILED); \
676 goto roll_back_failed; \
677 } while (0)
678
679 // First delete any groups we inserted
680 HFSPlusExtentKey *key_out = hfs_ext_iter_key_mut(iter_out);
681
682 key_in->startBlock = start_group_block;
683 if (!key_in->startBlock && key_out->startBlock > key_in->startBlock) {
684 key_in->startBlock += hfs_total_blocks(catalog_extents,
685 kHFSPlusExtentDensity);
686 }
687
688 if (key_out->startBlock > key_in->startBlock) {
689 FSBufferDescriptor fbd = {
690 .bufferAddress = &iter_in->group,
691 .itemCount = 1,
692 .itemSize = sizeof(iter_in->group)
693 };
694
695 if (BTSearchRecord(tree, &iter_in->bt_iter, &fbd, NULL,
696 &iter_in->bt_iter)) {
697 RB_FAILED;
698 }
699
700 for (;;) {
701 if (BTDeleteRecord(tree, &iter_in->bt_iter))
702 RB_FAILED;
703
704 key_in->startBlock += hfs_total_blocks(iter_in->group,
705 kHFSPlusExtentDensity);
706
707 if (key_in->startBlock >= key_out->startBlock)
708 break;
709
710 if (BTSearchRecord(tree, &iter_in->bt_iter, &fbd, NULL,
711 &iter_in->bt_iter)) {
712 RB_FAILED;
713 }
714 }
715 }
716
717 // Position iter_out
718 key_out->startBlock = start_group_block;
719
720 // Roll back all the extents
721 if (hfs_ext_iter_update(iter_out, roll_back_extents, roll_back_count,
722 catalog_extents)) {
723 RB_FAILED;
724 }
725
726 // And we need to reallocate the blocks we deallocated
727 const uint32_t end_block = min(block, end_file_block);
728 block = start_group_block;
729 for (int i = 0; i < roll_back_count && block < end_block; ++i) {
730 HFSPlusExtentDescriptor *ext = &roll_back_extents[i];
731
732 if (block + ext->blockCount <= file_block)
733 continue;
734
735 HFSPlusExtentDescriptor alloc_ext = *ext;
736
737 if (block <= file_block) {
738 uint32_t trimmed_len = file_block - block;
739
740 alloc_ext.startBlock += trimmed_len;
741 alloc_ext.blockCount -= trimmed_len;
742 }
743
744 if (block + ext->blockCount > end_file_block) {
745 uint32_t overlap = end_file_block - block;
746
747 alloc_ext.blockCount = (ext->startBlock + overlap
748 - alloc_ext.startBlock);
749 }
750
751 if (hfs_block_alloc(hfsmp, &alloc_ext, HFS_ALLOC_ROLL_BACK, NULL))
752 RB_FAILED;
753
754 block += ext->blockCount;
755 }
756
757 if (BTFlushPath(hfsmp->hfs_catalog_cp->c_datafork)
758 || BTFlushPath(hfsmp->hfs_extents_cp->c_datafork)) {
759 RB_FAILED;
760 }
761 } // if (ret && roll_back_count)
762
763 roll_back_failed:
764
765 hfs_free(iter_in, sizeof(*iter_in) * 2);
766 hfs_free(extents, sizeof(*extents) * buffered_extents);
767 hfs_free(roll_back_extents, (max_roll_back_extents
768 * sizeof(HFSPlusExtentDescriptor)));
769
770 return MacToVFSError(ret);
771 }