5 // Created by Yakov Ben Zaken on 22/03/2018.
9 #include "lf_hfs_btrees_io.h"
11 #include "lf_hfs_xattr.h"
12 #include "lf_hfs_cnode.h"
13 #include "lf_hfs_endian.h"
14 #include "lf_hfs_btrees_io.h"
15 #include "lf_hfs_utils.h"
16 #include "lf_hfs_file_mgr_internal.h"
17 #include "lf_hfs_btrees_private.h"
18 #include "lf_hfs_generic_buf.h"
19 #include "lf_hfs_vfsutils.h"
20 #include "lf_hfs_vfsops.h"
21 #include "lf_hfs_readwrite_ops.h"
22 #include "lf_hfs_file_extent_mapping.h"
23 #include "lf_hfs_vnops.h"
24 #include "lf_hfs_journal.h"
26 static int ClearBTNodes(struct vnode
*vp
, int blksize
, off_t offset
, off_t amount
);
27 static int btree_journal_modify_block_end(struct hfsmount
*hfsmp
, GenericLFBuf
*bp
);
28 void btree_swap_node(GenericLFBuf
*bp
, __unused
void *arg
);
31 * Return btree node size for given vnode.
34 * For btree vnode, returns btree node size.
35 * For non-btree vnodes, returns 0.
37 u_int16_t
get_btree_nodesize(struct vnode
*vp
)
39 BTreeControlBlockPtr btree
;
40 u_int16_t node_size
= 0;
42 if (vnode_issystem(vp
)) {
43 btree
= (BTreeControlBlockPtr
) VTOF(vp
)->fcbBTCBPtr
;
45 node_size
= btree
->nodeSize
;
52 OSStatus
SetBTreeBlockSize(FileReference vp
, ByteCount blockSize
, __unused ItemCount minBlockCount
)
54 BTreeControlBlockPtr bTreePtr
;
56 hfs_assert(vp
!= NULL
);
57 hfs_assert(blockSize
>= kMinNodeSize
);
58 if (blockSize
> MAXBSIZE
)
59 return (fsBTBadNodeSize
);
61 bTreePtr
= (BTreeControlBlockPtr
)VTOF(vp
)->fcbBTCBPtr
;
62 bTreePtr
->nodeSize
= blockSize
;
68 OSStatus
GetBTreeBlock(FileReference vp
, uint64_t blockNum
, GetBlockOptions options
, BlockDescriptor
*block
)
70 OSStatus retval
= E_NONE
;
71 GenericLFBufPtr bp
= NULL
;
72 u_int8_t allow_empty_node
;
74 /* If the btree block is being read using hint, it is
75 * fine for the swap code to find zeroed out nodes.
77 if (options
& kGetBlockHint
) {
78 allow_empty_node
= true;
80 allow_empty_node
= false;
83 if (options
& kGetEmptyBlock
) {
87 offset
= (daddr64_t
)blockNum
* (daddr64_t
)block
->blockSize
;
88 bp
= lf_hfs_generic_buf_allocate(vp
, blockNum
, (uint32_t)block
->blockSize
, GEN_BUF_IS_UPTODATE
| GEN_BUF_LITTLE_ENDIAN
);
89 if (bp
&& !hfs_vnop_blockmap(&(struct vnop_blockmap_args
){
92 .a_size
= block
->blockSize
,
97 bp
= lf_hfs_generic_buf_allocate(vp
, blockNum
, (uint32_t)block
->blockSize
, 0);
98 retval
= lf_hfs_generic_buf_read( bp
);
101 retval
= -1; //XXX need better error
103 if (retval
== E_NONE
) {
104 block
->blockHeader
= bp
;
105 block
->buffer
= bp
->pvData
;
106 block
->blockNum
= bp
->uBlockN
;
107 block
->blockReadFromDisk
= !(bp
->uCacheFlags
& GEN_BUF_LITTLE_ENDIAN
);
110 block
->isModified
= 0;
112 /* Check and endian swap B-Tree node (only if it's a valid block) */
113 if (!(options
& kGetEmptyBlock
))
115 /* This happens when we first open the b-tree, we might not have all the node data on hand */
116 if ((((BTNodeDescriptor
*)block
->buffer
)->kind
== kBTHeaderNode
) &&
117 (((BTHeaderRec
*)((char *)block
->buffer
+ 14))->nodeSize
!= bp
->uValidBytes
) &&
118 (SWAP_BE16 (((BTHeaderRec
*)((char *)block
->buffer
+ 14))->nodeSize
) != bp
->uValidBytes
)) {
121 * Don't swap the node descriptor, record offsets, or other records.
122 * This record will be invalidated and re-read with the correct node
123 * size once the B-tree control block is set up with the node size
124 * from the header record.
126 retval
= hfs_swap_BTNode (block
, vp
, kSwapBTNodeHeaderRecordOnly
, allow_empty_node
);
131 * In this case, we have enough data in-hand to do basic validation
132 * on the B-Tree node.
134 if (block
->blockReadFromDisk
)
137 * The node was just read from disk, so always swap/check it.
138 * This is necessary on big endian since the test below won't trigger.
140 retval
= hfs_swap_BTNode (block
, vp
, kSwapBTNodeBigToHost
, allow_empty_node
);
144 * Block wasn't read from disk; it was found in the cache.
146 if (*((u_int16_t
*)((char *)block
->buffer
+ (block
->blockSize
- sizeof (u_int16_t
)))) == 0x0e00) {
148 * The node was left in the cache in non-native order, so swap it.
149 * This only happens on little endian, after the node is written
152 retval
= hfs_swap_BTNode (block
, vp
, kSwapBTNodeBigToHost
, allow_empty_node
);
154 else if (*((u_int16_t
*)((char *)block
->buffer
+ (block
->blockSize
- sizeof (u_int16_t
)))) == 0x000e) {
156 * The node was in-cache in native-endianness. We don't need to do
157 * anything here, because the node is ready to use. Set retval == 0.
162 * If the node doesn't have hex 14 (0xe) in the last two bytes of the buffer,
163 * it doesn't necessarily mean that this is a bad node. Zeroed nodes that are
164 * marked as unused in the b-tree map node would be OK and not have valid content.
173 lf_hfs_generic_buf_release(bp
);
175 block
->blockHeader
= NULL
;
176 block
->buffer
= NULL
;
183 void ModifyBlockStart(FileReference vp
, BlockDescPtr blockPtr
)
185 struct hfsmount
*hfsmp
= VTOHFS(vp
);
186 GenericLFBuf
*bp
= NULL
;
188 if (hfsmp
->jnl
== NULL
) {
192 bp
= (GenericLFBuf
*) blockPtr
->blockHeader
;
195 LFHFS_LOG(LEVEL_ERROR
, "ModifyBlockStart: ModifyBlockStart: null bp for blockdescptr %p?!?\n", blockPtr
);
200 journal_modify_block_start(hfsmp
->jnl
, bp
);
201 blockPtr
->isModified
= 1;
205 btree_swap_node(GenericLFBuf
*bp
, __unused
void *arg
)
207 lf_hfs_generic_buf_lock(bp
);
209 if (!(bp
->uCacheFlags
& GEN_BUF_LITTLE_ENDIAN
)) {
213 // struct hfsmount *hfsmp = (struct hfsmount *)arg;
215 struct vnode
*vp
= bp
->psVnode
;
216 BlockDescriptor block
;
218 /* Prepare the block pointer */
219 block
.blockHeader
= bp
;
220 block
.buffer
= bp
->pvData
;
221 block
.blockNum
= bp
->uBlockN
;
222 block
.blockReadFromDisk
= !(bp
->uCacheFlags
& GEN_BUF_LITTLE_ENDIAN
);
223 block
.blockSize
= bp
->uDataSize
;
225 /* Swap the data now that this node is ready to go to disk.
226 * We allow swapping of zeroed out nodes here because we might
227 * be writing node whose last record just got deleted.
229 retval
= hfs_swap_BTNode (&block
, vp
, kSwapBTNodeHostToBig
, true);
232 LFHFS_LOG(LEVEL_ERROR
, "btree_swap_node: btree_swap_node: about to write corrupt node!\n");
236 lf_hfs_generic_buf_unlock(bp
);
241 btree_journal_modify_block_end(struct hfsmount
*hfsmp
, GenericLFBuf
*bp
)
243 return journal_modify_block_end(hfsmp
->jnl
, bp
, btree_swap_node
, hfsmp
);
246 OSStatus
ReleaseBTreeBlock(FileReference vp
, BlockDescPtr blockPtr
, ReleaseBlockOptions options
)
249 OSStatus retval
= E_NONE
;
250 GenericLFBufPtr bp
= NULL
;
251 struct hfsmount
*hfsmp
= VTOHFS(vp
);
253 bp
= (GenericLFBufPtr
) blockPtr
->blockHeader
;
260 if (options
& kTrashBlock
) {
261 if (hfsmp
->jnl
&& (bp
->uCacheFlags
& GEN_BUF_WRITE_LOCK
))
263 journal_kill_block(hfsmp
->jnl
, bp
);
267 lf_hfs_generic_buf_invalidate(bp
);
270 /* Don't let anyone else try to use this bp, it's been consumed */
271 blockPtr
->blockHeader
= NULL
;
274 if (options
& kForceWriteBlock
) {
278 if (blockPtr
->isModified
== 0) {
279 LFHFS_LOG(LEVEL_ERROR
, "releaseblock: modified is 0 but forcewrite set! bp %p\n", bp
);
283 retval
= btree_journal_modify_block_end(hfsmp
, bp
);
284 blockPtr
->isModified
= 0;
288 btree_swap_node(bp
, NULL
);
289 retval
= lf_hfs_generic_buf_write(bp
);
290 lf_hfs_generic_buf_release(bp
);
293 /* Don't let anyone else try to use this bp, it's been consumed */
294 blockPtr
->blockHeader
= NULL
;
296 } else if (options
& kMarkBlockDirty
) {
299 if ( (options
& kLockTransaction
)
300 && hfsmp
->jnl
== NULL
306 if (blockPtr
->isModified
== 0) {
307 LFHFS_LOG(LEVEL_ERROR
, "releaseblock: modified is 0 but markdirty set! bp %p\n", bp
);
310 retval
= btree_journal_modify_block_end(hfsmp
, bp
);
311 blockPtr
->isModified
= 0;
315 btree_swap_node(bp
, NULL
);
316 retval
= lf_hfs_generic_buf_write(bp
);
317 lf_hfs_generic_buf_release(bp
);
320 blockPtr
->blockHeader
= NULL
;
325 /* Don't let anyone else try to use this bp, it's been consumed */
326 blockPtr
->blockHeader
= NULL
;
329 btree_swap_node(bp
, NULL
);
331 // check if we had previously called journal_modify_block_start()
332 // on this block and if so, abort it (which will call buf_brelse()).
333 if (hfsmp
->jnl
&& blockPtr
->isModified
) {
334 // XXXdbg - I don't want to call modify_block_abort()
335 // because I think it may be screwing up the
336 // journal and blowing away a block that has
339 // journal_modify_block_abort(hfsmp->jnl, bp);
340 //panic("hfs: releaseblock called for 0x%x but mod_block_start previously called.\n", bp);
341 btree_journal_modify_block_end(hfsmp
, bp
);
342 blockPtr
->isModified
= 0;
346 lf_hfs_generic_buf_release(bp
); /* note: B-tree code will clear blockPtr->blockHeader and blockPtr->buffer */
349 /* Don't let anyone else try to use this bp, it's been consumed */
350 blockPtr
->blockHeader
= NULL
;
359 OSStatus
ExtendBTreeFile(FileReference vp
, FSSize minEOF
, FSSize maxEOF
)
361 #pragma unused (maxEOF)
363 OSStatus retval
= 0, ret
= 0;
364 int64_t actualBytesAdded
, origSize
;
365 u_int64_t bytesToAdd
;
366 u_int32_t startAllocation
;
367 u_int32_t fileblocks
;
374 filePtr
= GetFileControlBlock(vp
);
376 if ( (off_t
)minEOF
> filePtr
->fcbEOF
)
378 bytesToAdd
= minEOF
- filePtr
->fcbEOF
;
380 if (bytesToAdd
< filePtr
->ff_clumpsize
)
381 bytesToAdd
= filePtr
->ff_clumpsize
; //XXX why not always be a mutiple of clump size?
391 * The Extents B-tree can't have overflow extents. ExtendFileC will
392 * return an error if an attempt is made to extend the Extents B-tree
393 * when the resident extents are exhausted.
396 /* Protect allocation bitmap and extents overflow file. */
397 lockflags
= SFL_BITMAP
;
398 if (VTOC(vp
)->c_fileid
!= kHFSExtentsFileID
)
399 lockflags
|= SFL_EXTENTS
;
400 lockflags
= hfs_systemfile_lock(vcb
, lockflags
, HFS_EXCLUSIVE_LOCK
);
402 (void) BTGetInformation(filePtr
, 0, &btInfo
);
406 * The b-tree code expects nodes to be contiguous. So when
407 * the allocation block size is less than the b-tree node
408 * size, we need to force disk allocations to be contiguous.
410 if (vcb
->blockSize
>= btInfo
.nodeSize
) {
413 /* Ensure that all b-tree nodes are contiguous on disk */
414 extendFlags
= kEFContigMask
;
418 origSize
= filePtr
->fcbEOF
;
419 fileblocks
= filePtr
->ff_blocks
;
420 startAllocation
= vcb
->nextAllocation
;
422 // loop trying to get a contiguous chunk that's an integer multiple
423 // of the btree node size. if we can't get a contiguous chunk that
424 // is at least the node size then we break out of the loop and let
425 // the error propagate back up.
426 while((off_t
)bytesToAdd
>= btInfo
.nodeSize
) {
428 retval
= ExtendFileC(vcb
, filePtr
, bytesToAdd
, 0,
429 kEFContigMask
| kEFMetadataMask
| kEFNoClumpMask
,
430 (int64_t *)&actualBytesAdded
);
431 if (retval
== dskFulErr
&& actualBytesAdded
== 0) {
433 if (bytesToAdd
< btInfo
.nodeSize
) {
435 } else if ((bytesToAdd
% btInfo
.nodeSize
) != 0) {
436 // make sure it's an integer multiple of the nodeSize
437 bytesToAdd
-= (bytesToAdd
% btInfo
.nodeSize
);
440 } while (retval
== dskFulErr
&& actualBytesAdded
== 0);
442 if (retval
== dskFulErr
&& actualBytesAdded
== 0 && bytesToAdd
<= btInfo
.nodeSize
) {
446 filePtr
->fcbEOF
= (u_int64_t
)filePtr
->ff_blocks
* (u_int64_t
)vcb
->blockSize
;
447 bytesToAdd
= minEOF
- filePtr
->fcbEOF
;
451 * If a new extent was added then move the roving allocator
452 * reference forward by the current b-tree file size so
453 * there's plenty of room to grow.
456 ((VCBTOHFS(vcb
)->hfs_flags
& HFS_METADATA_ZONE
) == 0) &&
457 (vcb
->nextAllocation
> startAllocation
) &&
458 ((vcb
->nextAllocation
+ fileblocks
) < vcb
->allocLimit
)) {
459 HFS_UPDATE_NEXT_ALLOCATION(vcb
, vcb
->nextAllocation
+ fileblocks
);
462 filePtr
->fcbEOF
= (u_int64_t
)filePtr
->ff_blocks
* (u_int64_t
)vcb
->blockSize
;
464 // XXXdbg ExtendFileC() could have returned an error even though
465 // it grew the file to be big enough for our needs. If this is
466 // the case, we don't care about retval so we blow it away.
468 if (filePtr
->fcbEOF
>= (off_t
)minEOF
&& retval
!= 0) {
472 // XXXdbg if the file grew but isn't large enough or isn't an
473 // even multiple of the nodeSize then trim things back. if
474 // the file isn't large enough we trim back to the original
475 // size. otherwise we trim back to be an even multiple of the
478 if ((filePtr
->fcbEOF
< (off_t
)minEOF
) || ((filePtr
->fcbEOF
- origSize
) % btInfo
.nodeSize
) != 0) {
480 if (filePtr
->fcbEOF
< (off_t
)minEOF
) {
483 if (filePtr
->fcbEOF
< origSize
) {
484 LFHFS_LOG(LEVEL_ERROR
, "ExtendBTreeFile: btree file eof %lld less than orig size %lld!\n",
485 filePtr
->fcbEOF
, origSize
);
489 trim
= filePtr
->fcbEOF
- origSize
;
491 trim
= ((filePtr
->fcbEOF
- origSize
) % btInfo
.nodeSize
);
494 ret
= TruncateFileC(vcb
, filePtr
, filePtr
->fcbEOF
- trim
, 0, 0, FTOC(filePtr
)->c_fileid
, 0);
495 filePtr
->fcbEOF
= (u_int64_t
)filePtr
->ff_blocks
* (u_int64_t
)vcb
->blockSize
;
497 // XXXdbg - assert if the file didn't get trimmed back properly
498 if ((filePtr
->fcbEOF
% btInfo
.nodeSize
) != 0) {
499 LFHFS_LOG(LEVEL_ERROR
, "ExtendBTreeFile: truncate file didn't! fcbEOF %lld nsize %d fcb %p\n",
500 filePtr
->fcbEOF
, btInfo
.nodeSize
, filePtr
);
506 LFHFS_LOG(LEVEL_ERROR
, "ExtendBTreeFile: error truncating btree files (sz 0x%llx, trim %lld, ret %ld)\n",
507 filePtr
->fcbEOF
, trim
, (long)ret
);
512 if(VTOC(vp
)->c_fileid
!= kHFSExtentsFileID
) {
514 * Get any extents overflow b-tree changes to disk ASAP!
516 (void) BTFlushPath(VTOF(vcb
->extentsRefNum
));
517 (void) hfs_fsync(vcb
->extentsRefNum
, MNT_WAIT
, 0);
519 hfs_systemfile_unlock(vcb
, lockflags
);
522 if ((filePtr
->fcbEOF
% btInfo
.nodeSize
) != 0) {
523 LFHFS_LOG(LEVEL_ERROR
, "extendbtree: fcb %p has eof 0x%llx not a multiple of 0x%x (trim %llx)\n",
524 filePtr
, filePtr
->fcbEOF
, btInfo
.nodeSize
, trim
);
529 * Update the Alternate MDB or Alternate VolumeHeader
531 VTOC(vp
)->c_flag
|= C_MODIFIED
;
532 if ((VTOC(vp
)->c_fileid
== kHFSExtentsFileID
) ||
533 (VTOC(vp
)->c_fileid
== kHFSCatalogFileID
) ||
534 (VTOC(vp
)->c_fileid
== kHFSAttributesFileID
)
537 (void) hfs_flushvolumeheader(VCBTOHFS(vcb
), HFS_FVH_WRITE_ALT
);
539 VTOC(vp
)->c_touch_chgtime
= TRUE
;
540 VTOC(vp
)->c_touch_modtime
= TRUE
;
542 (void) hfs_update(vp
, 0);
545 ret
= ClearBTNodes(vp
, btInfo
.nodeSize
, origSize
, (filePtr
->fcbEOF
- origSize
));
551 hfs_systemfile_unlock(vcb
, lockflags
);
558 * Clear out (zero) new b-tree nodes on disk.
561 ClearBTNodes(struct vnode
*vp
, int blksize
, off_t offset
, off_t amount
)
563 GenericLFBufPtr bp
= NULL
;
567 blk
= offset
/ blksize
;
568 blkcnt
= amount
/ blksize
;
572 bp
= lf_hfs_generic_buf_allocate(vp
, blk
, blksize
, GEN_BUF_NON_CACHED
);
576 // XXXdbg -- skipping the journal since it makes a transaction
577 // become *way* too large
578 lf_hfs_generic_buf_write(bp
);
579 lf_hfs_generic_buf_release(bp
);
589 extern char hfs_attrname
[];
592 * Create an HFS+ Attribute B-tree File.
594 * No global resources should be held.
597 hfs_create_attr_btree(struct hfsmount
*hfsmp
, u_int32_t nodesize
, u_int32_t nodecnt
)
599 struct vnode
* vp
= NULL
;
600 struct cat_desc cndesc
;
601 struct cat_attr cnattr
;
602 struct cat_fork cfork
;
603 BlockDescriptor blkdesc
;
604 BTNodeDescriptor
*ndp
;
606 BTreeControlBlockPtr btcb
= NULL
;
607 GenericLFBufPtr bp
= NULL
;
611 u_int32_t node_num
, num_map_nodes
;
612 u_int32_t bytes_per_map_record
;
617 int newvnode_flags
= 0;
621 * Serialize creation using HFS_CREATING_BTREE flag.
623 hfs_lock_mount (hfsmp
);
624 if (hfsmp
->hfs_flags
& HFS_CREATING_BTREE
) {
625 /* Someone else beat us, wait for them to finish. */
626 hfs_unlock_mount (hfsmp
);
628 if (hfsmp
->hfs_attribute_vp
) {
633 hfsmp
->hfs_flags
|= HFS_CREATING_BTREE
;
634 hfs_unlock_mount (hfsmp
);
636 /* Check if were out of usable disk space. */
637 if ((hfs_freeblks(hfsmp
, 1) == 0)) {
643 * Set up Attribute B-tree vnode
644 * (this must be done before we start a transaction
645 * or take any system file locks)
647 bzero(&cndesc
, sizeof(cndesc
));
648 cndesc
.cd_parentcnid
= kHFSRootParentID
;
649 cndesc
.cd_flags
|= CD_ISMETA
;
650 cndesc
.cd_nameptr
= (const u_int8_t
*)hfs_attrname
;
651 cndesc
.cd_namelen
= strlen(hfs_attrname
);
652 cndesc
.cd_cnid
= kHFSAttributesFileID
;
654 bzero(&cnattr
, sizeof(cnattr
));
655 cnattr
.ca_linkcount
= 1;
656 cnattr
.ca_mode
= S_IFREG
;
657 cnattr
.ca_fileid
= cndesc
.cd_cnid
;
659 bzero(&cfork
, sizeof(cfork
));
660 cfork
.cf_clump
= nodesize
* nodecnt
;
662 result
= hfs_getnewvnode(hfsmp
, NULL
, NULL
, &cndesc
, 0, &cnattr
,
663 &cfork
, &vp
, &newvnode_flags
);
668 * Set up Attribute B-tree control block
670 btcb
= hfs_mallocz(sizeof(*btcb
));
672 btcb
->nodeSize
= nodesize
;
673 btcb
->maxKeyLength
= kHFSPlusAttrKeyMaximumLength
;
674 btcb
->btreeType
= 0xFF;
675 btcb
->attributes
= kBTVariableIndexKeysMask
| kBTBigKeysMask
;
676 btcb
->version
= kBTreeVersion
;
677 btcb
->writeCount
= 1;
678 btcb
->flags
= 0; /* kBTHeaderDirty */
679 btcb
->fileRefNum
= vp
;
680 btcb
->getBlockProc
= GetBTreeBlock
;
681 btcb
->releaseBlockProc
= ReleaseBTreeBlock
;
682 btcb
->setEndOfForkProc
= ExtendBTreeFile
;
683 btcb
->keyCompareProc
= (KeyCompareProcPtr
)hfs_attrkeycompare
;
686 * NOTE: We must make sure to zero out this pointer if we error out in this function!
687 * If we don't, then unmount will treat it as a valid pointer which can lead to a
690 VTOF(vp
)->fcbBTCBPtr
= btcb
;
693 * Allocate some space
695 if (hfs_start_transaction(hfsmp
) != 0) {
701 /* Note ExtendBTreeFile will acquire the necessary system file locks. */
702 result
= ExtendBTreeFile(vp
, nodesize
, cfork
.cf_clump
);
706 btcb
->totalNodes
= (u_int32_t
)(VTOF(vp
)->ff_size
) / nodesize
;
709 * Figure out how many map nodes we'll need.
711 * bytes_per_map_record = the number of bytes in the map record of a
712 * map node. Since that is the only record in the node, it is the size
713 * of the node minus the node descriptor at the start, and two record
714 * offsets at the end of the node. The "- 2" is to round the size down
715 * to a multiple of 4 bytes (since sizeof(BTNodeDescriptor) is not a
718 * The value "temp" here is the number of *bits* in the map record of
721 bytes_per_map_record
= nodesize
- sizeof(BTNodeDescriptor
) - 2*sizeof(u_int16_t
) - 2;
722 temp
= 8 * (nodesize
- sizeof(BTNodeDescriptor
)
723 - sizeof(BTHeaderRec
)
724 - kBTreeHeaderUserBytes
725 - 4 * sizeof(u_int16_t
));
726 if (btcb
->totalNodes
> temp
) {
727 num_map_nodes
= howmany(btcb
->totalNodes
- temp
, bytes_per_map_record
* 8);
733 btcb
->freeNodes
= btcb
->totalNodes
- 1 - num_map_nodes
;
736 * Initialize the b-tree header on disk
738 bp
= lf_hfs_generic_buf_allocate(vp
, 0, btcb
->nodeSize
, 0);
745 blkdesc
.buffer
= buffer
;
746 blkdesc
.blockHeader
= (void *)bp
;
747 blkdesc
.blockReadFromDisk
= 0;
748 blkdesc
.isModified
= 0;
750 ModifyBlockStart(vp
, &blkdesc
);
752 if (bp
->uDataSize
!= nodesize
)
754 LFHFS_LOG(LEVEL_ERROR
, "hfs_create_attr_btree: bad buffer size (%u)\n", bp
->uDataSize
);
758 bzero(buffer
, nodesize
);
759 index
= (u_int16_t
*)buffer
;
761 /* FILL IN THE NODE DESCRIPTOR: */
762 ndp
= (BTNodeDescriptor
*)buffer
;
763 if (num_map_nodes
!= 0)
765 ndp
->kind
= kBTHeaderNode
;
767 offset
= sizeof(BTNodeDescriptor
);
768 index
[(nodesize
/ 2) - 1] = offset
;
770 /* FILL IN THE HEADER RECORD: */
771 bthp
= (BTHeaderRec
*)((u_int8_t
*)buffer
+ offset
);
772 bthp
->nodeSize
= nodesize
;
773 bthp
->totalNodes
= btcb
->totalNodes
;
774 bthp
->freeNodes
= btcb
->freeNodes
;
775 bthp
->clumpSize
= cfork
.cf_clump
;
776 bthp
->btreeType
= 0xFF;
777 bthp
->attributes
= kBTVariableIndexKeysMask
| kBTBigKeysMask
;
778 bthp
->maxKeyLength
= kHFSPlusAttrKeyMaximumLength
;
779 bthp
->keyCompareType
= kHFSBinaryCompare
;
780 offset
+= sizeof(BTHeaderRec
);
781 index
[(nodesize
/ 2) - 2] = offset
;
783 /* FILL IN THE USER RECORD: */
784 offset
+= kBTreeHeaderUserBytes
;
785 index
[(nodesize
/ 2) - 3] = offset
;
787 /* Mark the header node and map nodes in use in the map record.
789 * NOTE: Assumes that the header node's map record has at least
790 * (num_map_nodes + 1) bits.
792 bitmap
= (u_int8_t
*) buffer
+ offset
;
793 temp
= num_map_nodes
+ 1; /* +1 for the header node */
798 *bitmap
= ~(0xFF >> temp
);
800 offset
+= nodesize
- sizeof(BTNodeDescriptor
) - sizeof(BTHeaderRec
)
801 - kBTreeHeaderUserBytes
- (4 * sizeof(int16_t));
802 index
[(nodesize
/ 2) - 4] = offset
;
806 result
= btree_journal_modify_block_end(hfsmp
, bp
);
810 result
= lf_hfs_generic_buf_write(bp
);
811 lf_hfs_generic_buf_release(bp
);
816 /* Create the map nodes: node numbers 1 .. num_map_nodes */
817 for (node_num
=1; node_num
<= num_map_nodes
; ++node_num
) {
818 bp
= lf_hfs_generic_buf_allocate(vp
, node_num
, btcb
->nodeSize
, 0);
823 buffer
= (void *)bp
->pvData
;
824 blkdesc
.buffer
= buffer
;
825 blkdesc
.blockHeader
= (void *)bp
;
826 blkdesc
.blockReadFromDisk
= 0;
827 blkdesc
.isModified
= 0;
829 ModifyBlockStart(vp
, &blkdesc
);
831 bzero(buffer
, nodesize
);
832 index
= (u_int16_t
*)buffer
;
834 /* Fill in the node descriptor */
835 ndp
= (BTNodeDescriptor
*)buffer
;
836 if (node_num
!= num_map_nodes
)
837 ndp
->fLink
= node_num
+ 1;
838 ndp
->kind
= kBTMapNode
;
840 offset
= sizeof(BTNodeDescriptor
);
841 index
[(nodesize
/ 2) - 1] = offset
;
844 /* Fill in the map record's offset */
845 /* Note: We assume that the map record is all zeroes */
846 offset
= sizeof(BTNodeDescriptor
) + bytes_per_map_record
;
847 index
[(nodesize
/ 2) - 2] = offset
;
851 result
= btree_journal_modify_block_end(hfsmp
, bp
);
855 result
= lf_hfs_generic_buf_write(bp
);
856 lf_hfs_generic_buf_release(bp
);
862 /* Update vp/cp for attribute btree */
863 hfs_lock_mount (hfsmp
);
864 hfsmp
->hfs_attribute_cp
= VTOC(vp
);
865 hfsmp
->hfs_attribute_vp
= vp
;
866 hfs_unlock_mount (hfsmp
);
868 (void) hfs_flushvolumeheader(hfsmp
, HFS_FVH_WRITE_ALT
);
871 hfs_end_transaction(hfsmp
);
875 /* Initialize the vnode for virtual attribute data file */
876 result
= init_attrdata_vnode(hfsmp
);
878 LFHFS_LOG(LEVEL_ERROR
, "hfs_create_attr_btree: vol=%s init_attrdata_vnode() error=%d\n", hfsmp
->vcbVN
, result
);
885 * If we're about to error out, then make sure to zero out the B-Tree control block pointer
886 * from the filefork of the EA B-Tree cnode/vnode. Failing to do this will lead to a use
887 * after free at unmount or BTFlushPath. Since we're about to error out anyway, this memory
890 VTOF(vp
)->fcbBTCBPtr
= NULL
;
895 hfs_unlock(VTOC(vp
));
900 hfs_vnop_reclaim(vp
);
902 /* XXX need to give back blocks ? */
905 hfs_end_transaction(hfsmp
);
909 * All done, clear HFS_CREATING_BTREE, and wake up any sleepers.
911 hfs_lock_mount (hfsmp
);
912 hfsmp
->hfs_flags
&= ~HFS_CREATING_BTREE
;
913 hfs_unlock_mount (hfsmp
);