2 // lf_hfs_btree_node_reserve.c
5 // Created by Yakov Ben Zaken on 22/03/2018.
9 #include "lf_hfs_btrees_private.h"
10 #include "lf_hfs_utils.h"
11 #include "lf_hfs_vfsutils.h"
20 * Each kernel thread can have it's own reserve of b-tree
21 * nodes. This reserve info is kept in a hash table.
23 * Don't forget to call BTReleaseReserve when you're finished
24 * or you will leave stale node reserves in the hash.
29 * BE CAREFUL WHEN INCREASING THE SIZE OF THIS STRUCT!
31 * It must remain equal in size to the opaque cat_cookie_t
32 * struct (in hfs_catalog.h).
35 LIST_ENTRY(nreserve
) nr_hash
; /* hash chain */
36 int nr_nodecnt
; /* count of nodes held in reserve */
37 int nr_newnodes
; /* nodes that were allocated */
38 struct vnode
*nr_btvp
; /* b-tree file vnode */
39 void *nr_tag
; /* unique tag (per thread) */
42 #define NR_GET_TAG() (pthread_self())
46 #define NR_HASH(btvp, tag) \
47 (&nr_hashtbl[((((intptr_t)(btvp)) >> 8) ^ ((intptr_t)(tag) >> 4)) & nr_hashmask])
49 LIST_HEAD(nodereserve
, nreserve
) *nr_hashtbl
;
53 pthread_mutex_t nr_mutex
;
55 /* Internal Node Reserve Hash Routines (private) */
56 static void nr_insert (struct vnode
*, struct nreserve
*nrp
, int);
57 static void nr_delete (struct vnode
*, struct nreserve
*nrp
, int *);
58 static void nr_update (struct vnode
*, int);
62 * BTReserveSetup - initialize the node reserve hash table
64 void BTReserveSetup(void)
66 if (sizeof(struct nreserve
) != sizeof(cat_cookie_t
))
68 LFHFS_LOG(LEVEL_ERROR
,"BTReserveSetup: nreserve size != opaque struct size");
72 nr_hashtbl
= hashinit(NR_CACHE
, &nr_hashmask
);
74 lf_lck_mtx_init(&nr_mutex
);
79 * BTReserveSpace - obtain a node reserve (for current thread)
81 * Used by the Catalog Layer (hfs_catalog.c) to reserve space.
83 * When data is NULL, we only insure that there's enough space
84 * but it is not reserved (assumes you keep the b-tree lock).
87 BTReserveSpace(FCB
*file
, int operations
, void* data
)
89 BTreeControlBlock
*btree
;
90 int rsrvNodes
, availNodes
, totalNodes
;
96 btree
= (BTreeControlBlockPtr
)file
->fcbBTCBPtr
;
97 clumpsize
= file
->ff_clumpsize
;
99 REQUIRE_FILE_LOCK(btree
->fileRefNum
, true);
102 * The node reserve is based on the number of b-tree
103 * operations (insert/deletes) and the height of the
106 height
= btree
->treeDepth
;
108 height
= 2; /* prevent underflow in rsrvNodes calculation */
109 inserts
= operations
& 0xffff;
110 deletes
= operations
>> 16;
113 * Allow for at least one root split.
115 * Each delete operation can propogate a big key up the
116 * index. This can cause a split at each level up.
118 * Each insert operation can cause a local split and a
119 * split at each level up.
121 rsrvNodes
= 1 + (deletes
* (height
- 2)) + (inserts
* (height
- 1));
123 availNodes
= btree
->freeNodes
- btree
->reservedNodes
;
125 if (rsrvNodes
> availNodes
) {
126 u_int32_t reqblks
, freeblks
, rsrvblks
;
128 struct hfsmount
*hfsmp
;
131 * For UNIX conformance, we try and reserve the MIN of either 5% of
132 * total file blocks or 10MB worth of blocks, for growing existing
133 * files. On non-HFS filesystems, creating a new directory entry may
134 * not cause additional disk space to be allocated, but on HFS, creating
135 * a new entry could cause the b-tree to grow. As a result, we take
136 * some precautions here to prevent that on configurations that try to
137 * satisfy conformance.
139 hfsmp
= VTOVCB(btree
->fileRefNum
);
140 rsrvblks
= (uint32_t)(((u_int64_t
)hfsmp
->allocLimit
* 5) / 100);
141 if (hfsmp
->blockSize
> HFS_BT_MAXRESERVE
) {
145 bt_rsrv
= (HFS_BT_MAXRESERVE
/ hfsmp
->blockSize
);
147 rsrvblks
= MIN(rsrvblks
, bt_rsrv
);
149 freeblks
= hfs_freeblks(hfsmp
, 0);
150 if (freeblks
<= rsrvblks
) {
151 /* When running low, disallow adding new items. */
152 if ((inserts
> 0) && (deletes
== 0)) {
157 freeblks
-= rsrvblks
;
159 reqblks
= clumpsize
/ hfsmp
->blockSize
;
161 if (reqblks
> freeblks
) {
162 reqblks
= ((rsrvNodes
- availNodes
) * btree
->nodeSize
) / hfsmp
->blockSize
;
163 /* When running low, disallow adding new items. */
164 if ((reqblks
> freeblks
) && (inserts
> 0) && (deletes
== 0)) {
167 file
->ff_clumpsize
= freeblks
* hfsmp
->blockSize
;
169 totalNodes
= rsrvNodes
+ btree
->totalNodes
- availNodes
;
171 /* See if we also need a map node */
172 if (totalNodes
> (int)CalcMapBits(btree
)) {
175 if ((err
= ExtendBTree(btree
, totalNodes
))) {
179 /* Save this reserve if this is a persistent request. */
181 btree
->reservedNodes
+= rsrvNodes
;
182 nr_insert(btree
->fileRefNum
, (struct nreserve
*)data
, rsrvNodes
);
185 /* Put clump size back if it was changed. */
186 if (file
->ff_clumpsize
!= clumpsize
)
187 file
->ff_clumpsize
= clumpsize
;
194 * BTReleaseReserve - release the node reserve held by current thread
196 * Used by the Catalog Layer (hfs_catalog.c) to relinquish reserved space.
199 BTReleaseReserve(FCB
*file
, void* data
)
201 BTreeControlBlock
*btree
;
204 btree
= (BTreeControlBlockPtr
)file
->fcbBTCBPtr
;
206 REQUIRE_FILE_LOCK(btree
->fileRefNum
, true);
208 nr_delete(btree
->fileRefNum
, (struct nreserve
*)data
, &nodecnt
);
211 btree
->reservedNodes
-= nodecnt
;
217 * BTUpdateReserve - update a node reserve for allocations that occurred.
220 BTUpdateReserve(BTreeControlBlockPtr btreePtr
, int nodes
)
222 nr_update(btreePtr
->fileRefNum
, nodes
);
226 /*----------------------------------------------------------------------------*/
227 /* Node Reserve Hash Functions (private) */
234 * Insert a new node reserve.
237 nr_insert(struct vnode
* btvp
, struct nreserve
*nrp
, int nodecnt
)
239 struct nodereserve
*nrhead
;
240 struct nreserve
*tmp_nrp
;
241 void * tag
= NR_GET_TAG();
244 * Check the cache - there may already be a reserve
246 lf_lck_mtx_lock(&nr_mutex
);
247 nrhead
= NR_HASH(btvp
, tag
);
248 for (tmp_nrp
= nrhead
->lh_first
; tmp_nrp
;
249 tmp_nrp
= tmp_nrp
->nr_hash
.le_next
) {
250 if ((tmp_nrp
->nr_tag
== tag
) && (tmp_nrp
->nr_btvp
== btvp
)) {
252 tmp_nrp
->nr_nodecnt
+= nodecnt
;
253 lf_lck_mtx_unlock(&nr_mutex
);
258 nrp
->nr_nodecnt
= nodecnt
;
259 nrp
->nr_newnodes
= 0;
262 LIST_INSERT_HEAD(nrhead
, nrp
, nr_hash
);
264 lf_lck_mtx_unlock(&nr_mutex
);
268 * Delete a node reserve.
271 nr_delete(struct vnode
* btvp
, struct nreserve
*nrp
, int *nodecnt
)
273 void * tag
= NR_GET_TAG();
275 lf_lck_mtx_lock(&nr_mutex
);
277 if ((nrp
->nr_tag
!= tag
) || (nrp
->nr_btvp
!= btvp
))
279 LFHFS_LOG(LEVEL_ERROR
,"nr_delete: invalid NR (%p)", nrp
);
282 LIST_REMOVE(nrp
, nr_hash
);
283 *nodecnt
= nrp
->nr_nodecnt
;
284 bzero(nrp
, sizeof(struct nreserve
));
289 lf_lck_mtx_unlock(&nr_mutex
);
294 * Update a node reserve for any allocations that occurred.
297 nr_update(struct vnode
* btvp
, int nodecnt
)
299 struct nodereserve
*nrhead
;
300 struct nreserve
*nrp
;
301 void* tag
= NR_GET_TAG();
303 lf_lck_mtx_lock(&nr_mutex
);
305 nrhead
= NR_HASH(btvp
, tag
);
306 for (nrp
= nrhead
->lh_first
; nrp
; nrp
= nrp
->nr_hash
.le_next
) {
307 if ((nrp
->nr_tag
== tag
) && (nrp
->nr_btvp
== btvp
)) {
308 nrp
->nr_newnodes
+= nodecnt
;
312 lf_lck_mtx_unlock(&nr_mutex
);