]>
Commit | Line | Data |
---|---|---|
de8ee011 A |
1 | // |
2 | // lf_hfs_btree_node_reserve.c | |
3 | // livefiles_hfs | |
4 | // | |
5 | // Created by Yakov Ben Zaken on 22/03/2018. | |
6 | // | |
7 | ||
8 | #include <stdio.h> | |
9 | #include "lf_hfs_btrees_private.h" | |
10 | #include "lf_hfs_utils.h" | |
11 | #include "lf_hfs_vfsutils.h" | |
12 | ||
13 | /* | |
14 | * B-tree Node Reserve | |
15 | * | |
16 | * BTReserveSpace | |
17 | * BTReleaseReserve | |
18 | * BTUpdateReserve | |
19 | * | |
20 | * Each kernel thread can have it's own reserve of b-tree | |
21 | * nodes. This reserve info is kept in a hash table. | |
22 | * | |
23 | * Don't forget to call BTReleaseReserve when you're finished | |
24 | * or you will leave stale node reserves in the hash. | |
25 | */ | |
26 | ||
27 | ||
28 | /* | |
29 | * BE CAREFUL WHEN INCREASING THE SIZE OF THIS STRUCT! | |
30 | * | |
31 | * It must remain equal in size to the opaque cat_cookie_t | |
32 | * struct (in hfs_catalog.h). | |
33 | */ | |
34 | struct nreserve { | |
35 | LIST_ENTRY(nreserve) nr_hash; /* hash chain */ | |
36 | int nr_nodecnt; /* count of nodes held in reserve */ | |
37 | int nr_newnodes; /* nodes that were allocated */ | |
38 | struct vnode *nr_btvp; /* b-tree file vnode */ | |
39 | void *nr_tag; /* unique tag (per thread) */ | |
40 | }; | |
41 | ||
42 | #define NR_GET_TAG() (pthread_self()) | |
43 | ||
44 | #define NR_CACHE 17 | |
45 | ||
46 | #define NR_HASH(btvp, tag) \ | |
47 | (&nr_hashtbl[((((intptr_t)(btvp)) >> 8) ^ ((intptr_t)(tag) >> 4)) & nr_hashmask]) | |
48 | ||
49 | LIST_HEAD(nodereserve, nreserve) *nr_hashtbl; | |
50 | ||
51 | u_long nr_hashmask; | |
52 | ||
53 | pthread_mutex_t nr_mutex; | |
54 | ||
55 | /* Internal Node Reserve Hash Routines (private) */ | |
56 | static void nr_insert (struct vnode *, struct nreserve *nrp, int); | |
57 | static void nr_delete (struct vnode *, struct nreserve *nrp, int *); | |
58 | static void nr_update (struct vnode *, int); | |
59 | ||
60 | ||
61 | /* | |
62 | * BTReserveSetup - initialize the node reserve hash table | |
63 | */ | |
64 | void BTReserveSetup(void) | |
65 | { | |
66 | if (sizeof(struct nreserve) != sizeof(cat_cookie_t)) | |
67 | { | |
68 | LFHFS_LOG(LEVEL_ERROR,"BTReserveSetup: nreserve size != opaque struct size"); | |
69 | hfs_assert(0); | |
70 | } | |
71 | ||
72 | nr_hashtbl = hashinit(NR_CACHE, &nr_hashmask); | |
73 | ||
74 | lf_lck_mtx_init(&nr_mutex); | |
75 | } | |
76 | ||
77 | ||
78 | /* | |
79 | * BTReserveSpace - obtain a node reserve (for current thread) | |
80 | * | |
81 | * Used by the Catalog Layer (hfs_catalog.c) to reserve space. | |
82 | * | |
83 | * When data is NULL, we only insure that there's enough space | |
84 | * but it is not reserved (assumes you keep the b-tree lock). | |
85 | */ | |
86 | int | |
87 | BTReserveSpace(FCB *file, int operations, void* data) | |
88 | { | |
89 | BTreeControlBlock *btree; | |
90 | int rsrvNodes, availNodes, totalNodes; | |
91 | int height; | |
92 | int inserts, deletes; | |
93 | u_int32_t clumpsize; | |
94 | int err = 0; | |
95 | ||
96 | btree = (BTreeControlBlockPtr)file->fcbBTCBPtr; | |
97 | clumpsize = file->ff_clumpsize; | |
98 | ||
99 | REQUIRE_FILE_LOCK(btree->fileRefNum, true); | |
100 | ||
101 | /* | |
102 | * The node reserve is based on the number of b-tree | |
103 | * operations (insert/deletes) and the height of the | |
104 | * tree. | |
105 | */ | |
106 | height = btree->treeDepth; | |
107 | if (height < 2) | |
108 | height = 2; /* prevent underflow in rsrvNodes calculation */ | |
109 | inserts = operations & 0xffff; | |
110 | deletes = operations >> 16; | |
111 | ||
112 | /* | |
113 | * Allow for at least one root split. | |
114 | * | |
115 | * Each delete operation can propogate a big key up the | |
116 | * index. This can cause a split at each level up. | |
117 | * | |
118 | * Each insert operation can cause a local split and a | |
119 | * split at each level up. | |
120 | */ | |
121 | rsrvNodes = 1 + (deletes * (height - 2)) + (inserts * (height - 1)); | |
122 | ||
123 | availNodes = btree->freeNodes - btree->reservedNodes; | |
124 | ||
125 | if (rsrvNodes > availNodes) { | |
126 | u_int32_t reqblks, freeblks, rsrvblks; | |
127 | uint32_t bt_rsrv; | |
128 | struct hfsmount *hfsmp; | |
129 | ||
130 | /* | |
131 | * For UNIX conformance, we try and reserve the MIN of either 5% of | |
132 | * total file blocks or 10MB worth of blocks, for growing existing | |
133 | * files. On non-HFS filesystems, creating a new directory entry may | |
134 | * not cause additional disk space to be allocated, but on HFS, creating | |
135 | * a new entry could cause the b-tree to grow. As a result, we take | |
136 | * some precautions here to prevent that on configurations that try to | |
137 | * satisfy conformance. | |
138 | */ | |
139 | hfsmp = VTOVCB(btree->fileRefNum); | |
140 | rsrvblks = (uint32_t)(((u_int64_t)hfsmp->allocLimit * 5) / 100); | |
141 | if (hfsmp->blockSize > HFS_BT_MAXRESERVE) { | |
142 | bt_rsrv = 1; | |
143 | } | |
144 | else { | |
145 | bt_rsrv = (HFS_BT_MAXRESERVE / hfsmp->blockSize); | |
146 | } | |
147 | rsrvblks = MIN(rsrvblks, bt_rsrv); | |
148 | ||
149 | freeblks = hfs_freeblks(hfsmp, 0); | |
150 | if (freeblks <= rsrvblks) { | |
151 | /* When running low, disallow adding new items. */ | |
152 | if ((inserts > 0) && (deletes == 0)) { | |
153 | return (ENOSPC); | |
154 | } | |
155 | freeblks = 0; | |
156 | } else { | |
157 | freeblks -= rsrvblks; | |
158 | } | |
159 | reqblks = clumpsize / hfsmp->blockSize; | |
160 | ||
161 | if (reqblks > freeblks) { | |
162 | reqblks = ((rsrvNodes - availNodes) * btree->nodeSize) / hfsmp->blockSize; | |
163 | /* When running low, disallow adding new items. */ | |
164 | if ((reqblks > freeblks) && (inserts > 0) && (deletes == 0)) { | |
165 | return (ENOSPC); | |
166 | } | |
167 | file->ff_clumpsize = freeblks * hfsmp->blockSize; | |
168 | } | |
169 | totalNodes = rsrvNodes + btree->totalNodes - availNodes; | |
170 | ||
171 | /* See if we also need a map node */ | |
172 | if (totalNodes > (int)CalcMapBits(btree)) { | |
173 | ++totalNodes; | |
174 | } | |
175 | if ((err = ExtendBTree(btree, totalNodes))) { | |
176 | goto out; | |
177 | } | |
178 | } | |
179 | /* Save this reserve if this is a persistent request. */ | |
180 | if (data) { | |
181 | btree->reservedNodes += rsrvNodes; | |
182 | nr_insert(btree->fileRefNum, (struct nreserve *)data, rsrvNodes); | |
183 | } | |
184 | out: | |
185 | /* Put clump size back if it was changed. */ | |
186 | if (file->ff_clumpsize != clumpsize) | |
187 | file->ff_clumpsize = clumpsize; | |
188 | ||
189 | return (err); | |
190 | } | |
191 | ||
192 | ||
193 | /* | |
194 | * BTReleaseReserve - release the node reserve held by current thread | |
195 | * | |
196 | * Used by the Catalog Layer (hfs_catalog.c) to relinquish reserved space. | |
197 | */ | |
198 | int | |
199 | BTReleaseReserve(FCB *file, void* data) | |
200 | { | |
201 | BTreeControlBlock *btree; | |
202 | int nodecnt; | |
203 | ||
204 | btree = (BTreeControlBlockPtr)file->fcbBTCBPtr; | |
205 | ||
206 | REQUIRE_FILE_LOCK(btree->fileRefNum, true); | |
207 | ||
208 | nr_delete(btree->fileRefNum, (struct nreserve *)data, &nodecnt); | |
209 | ||
210 | if (nodecnt) | |
211 | btree->reservedNodes -= nodecnt; | |
212 | ||
213 | return (0); | |
214 | } | |
215 | ||
216 | /* | |
217 | * BTUpdateReserve - update a node reserve for allocations that occurred. | |
218 | */ | |
219 | void | |
220 | BTUpdateReserve(BTreeControlBlockPtr btreePtr, int nodes) | |
221 | { | |
222 | nr_update(btreePtr->fileRefNum, nodes); | |
223 | } | |
224 | ||
225 | ||
226 | /*----------------------------------------------------------------------------*/ | |
227 | /* Node Reserve Hash Functions (private) */ | |
228 | ||
229 | ||
230 | int nrinserts = 0; | |
231 | int nrdeletes = 0; | |
232 | ||
233 | /* | |
234 | * Insert a new node reserve. | |
235 | */ | |
236 | static void | |
237 | nr_insert(struct vnode * btvp, struct nreserve *nrp, int nodecnt) | |
238 | { | |
239 | struct nodereserve *nrhead; | |
240 | struct nreserve *tmp_nrp; | |
241 | void * tag = NR_GET_TAG(); | |
242 | ||
243 | /* | |
244 | * Check the cache - there may already be a reserve | |
245 | */ | |
246 | lf_lck_mtx_lock(&nr_mutex); | |
247 | nrhead = NR_HASH(btvp, tag); | |
248 | for (tmp_nrp = nrhead->lh_first; tmp_nrp; | |
249 | tmp_nrp = tmp_nrp->nr_hash.le_next) { | |
250 | if ((tmp_nrp->nr_tag == tag) && (tmp_nrp->nr_btvp == btvp)) { | |
251 | nrp->nr_tag = 0; | |
252 | tmp_nrp->nr_nodecnt += nodecnt; | |
253 | lf_lck_mtx_unlock(&nr_mutex); | |
254 | return; | |
255 | } | |
256 | } | |
257 | ||
258 | nrp->nr_nodecnt = nodecnt; | |
259 | nrp->nr_newnodes = 0; | |
260 | nrp->nr_btvp = btvp; | |
261 | nrp->nr_tag = tag; | |
262 | LIST_INSERT_HEAD(nrhead, nrp, nr_hash); | |
263 | ++nrinserts; | |
264 | lf_lck_mtx_unlock(&nr_mutex); | |
265 | } | |
266 | ||
267 | /* | |
268 | * Delete a node reserve. | |
269 | */ | |
270 | static void | |
271 | nr_delete(struct vnode * btvp, struct nreserve *nrp, int *nodecnt) | |
272 | { | |
273 | void * tag = NR_GET_TAG(); | |
274 | ||
275 | lf_lck_mtx_lock(&nr_mutex); | |
276 | if (nrp->nr_tag) { | |
277 | if ((nrp->nr_tag != tag) || (nrp->nr_btvp != btvp)) | |
278 | { | |
279 | LFHFS_LOG(LEVEL_ERROR,"nr_delete: invalid NR (%p)", nrp); | |
280 | hfs_assert(0); | |
281 | } | |
282 | LIST_REMOVE(nrp, nr_hash); | |
283 | *nodecnt = nrp->nr_nodecnt; | |
284 | bzero(nrp, sizeof(struct nreserve)); | |
285 | ++nrdeletes; | |
286 | } else { | |
287 | *nodecnt = 0; | |
288 | } | |
289 | lf_lck_mtx_unlock(&nr_mutex); | |
290 | } | |
291 | ||
292 | ||
293 | /* | |
294 | * Update a node reserve for any allocations that occurred. | |
295 | */ | |
296 | static void | |
297 | nr_update(struct vnode * btvp, int nodecnt) | |
298 | { | |
299 | struct nodereserve *nrhead; | |
300 | struct nreserve *nrp; | |
301 | void* tag = NR_GET_TAG(); | |
302 | ||
303 | lf_lck_mtx_lock(&nr_mutex); | |
304 | ||
305 | nrhead = NR_HASH(btvp, tag); | |
306 | for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) { | |
307 | if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp)) { | |
308 | nrp->nr_newnodes += nodecnt; | |
309 | break; | |
310 | } | |
311 | } | |
312 | lf_lck_mtx_unlock(&nr_mutex); | |
313 | } |