]> git.saurik.com Git - apple/hfs.git/blob - core/BTreeNodeReserve.c
c75af1f9d1b6efbfca1aa84cb403ed46dbbec986
[apple/hfs.git] / core / BTreeNodeReserve.c
1 /*
2 * Copyright (c) 2004-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include "BTreesPrivate.h"
29 #include "sys/malloc.h"
30 #include <kern/locks.h>
31
32
33 /*
34 * B-tree Node Reserve
35 *
36 * BTReserveSpace
37 * BTReleaseReserve
38 * BTUpdateReserve
39 *
40 * Each kernel thread can have it's own reserve of b-tree
41 * nodes. This reserve info is kept in a hash table.
42 *
43 * Don't forget to call BTReleaseReserve when you're finished
44 * or you will leave stale node reserves in the hash.
45 */
46
47
48 /*
49 * BE CAREFUL WHEN INCREASING THE SIZE OF THIS STRUCT!
50 *
51 * It must remain equal in size to the opaque cat_cookie_t
52 * struct (in hfs_catalog.h).
53 */
54 struct nreserve {
55 LIST_ENTRY(nreserve) nr_hash; /* hash chain */
56 int nr_nodecnt; /* count of nodes held in reserve */
57 int nr_newnodes; /* nodes that were allocated */
58 struct vnode *nr_btvp; /* b-tree file vnode */
59 void *nr_tag; /* unique tag (per thread) */
60 };
61
62 #define NR_GET_TAG() (current_thread())
63
64 #define NR_CACHE 17
65
66 #define NR_HASH(btvp, tag) \
67 (&nr_hashtbl[((((intptr_t)(btvp)) >> 8) ^ ((intptr_t)(tag) >> 4)) & nr_hashmask])
68
69 LIST_HEAD(nodereserve, nreserve) *nr_hashtbl;
70
71 u_long nr_hashmask;
72
73 lck_grp_t * nr_lck_grp;
74 lck_grp_attr_t * nr_lck_grp_attr;
75 lck_attr_t * nr_lck_attr;
76
77 lck_mtx_t nr_mutex;
78
79 /* Internal Node Reserve Hash Routines (private) */
80 static void nr_insert (struct vnode *, struct nreserve *nrp, int);
81 static void nr_delete (struct vnode *, struct nreserve *nrp, int *);
82 static void nr_update (struct vnode *, int);
83
84
85 /*
86 * BTReserveSetup - initialize the node reserve hash table
87 */
88 void BTReserveSetup(void)
89 {
90 if (sizeof(struct nreserve) != sizeof(cat_cookie_t))
91 panic("hfs: BTReserveSetup: nreserve size != opaque struct size");
92
93 nr_hashtbl = hashinit(NR_CACHE, M_TEMP, &nr_hashmask);
94
95 nr_lck_grp_attr= lck_grp_attr_alloc_init();
96 nr_lck_grp = lck_grp_alloc_init("btree_node_reserve", nr_lck_grp_attr);
97
98 nr_lck_attr = lck_attr_alloc_init();
99
100 lck_mtx_init(&nr_mutex, nr_lck_grp, nr_lck_attr);
101 }
102
103
104 /*
105 * BTReserveSpace - obtain a node reserve (for current thread)
106 *
107 * Used by the Catalog Layer (hfs_catalog.c) to reserve space.
108 *
109 * When data is NULL, we only insure that there's enough space
110 * but it is not reserved (assumes you keep the b-tree lock).
111 */
112 int
113 BTReserveSpace(FCB *file, int operations, void* data)
114 {
115 BTreeControlBlock *btree;
116 int rsrvNodes, availNodes, totalNodes;
117 int height;
118 int inserts, deletes;
119 u_int32_t clumpsize;
120 int err = 0;
121
122 btree = (BTreeControlBlockPtr)file->fcbBTCBPtr;
123 clumpsize = file->ff_clumpsize;
124
125 REQUIRE_FILE_LOCK(btree->fileRefNum, true);
126
127 /*
128 * The node reserve is based on the number of b-tree
129 * operations (insert/deletes) and the height of the
130 * tree.
131 */
132 height = btree->treeDepth;
133 if (height < 2)
134 height = 2; /* prevent underflow in rsrvNodes calculation */
135 inserts = operations & 0xffff;
136 deletes = operations >> 16;
137
138 /*
139 * Allow for at least one root split.
140 *
141 * Each delete operation can propogate a big key up the
142 * index. This can cause a split at each level up.
143 *
144 * Each insert operation can cause a local split and a
145 * split at each level up.
146 */
147 rsrvNodes = 1 + (deletes * (height - 2)) + (inserts * (height - 1));
148
149 availNodes = btree->freeNodes - btree->reservedNodes;
150
151 if (rsrvNodes > availNodes) {
152 u_int32_t reqblks, freeblks, rsrvblks;
153 uint32_t bt_rsrv;
154 struct hfsmount *hfsmp;
155
156 /*
157 * For UNIX conformance, we try and reserve the MIN of either 5% of
158 * total file blocks or 10MB worth of blocks, for growing existing
159 * files. On non-HFS filesystems, creating a new directory entry may
160 * not cause additional disk space to be allocated, but on HFS, creating
161 * a new entry could cause the b-tree to grow. As a result, we take
162 * some precautions here to prevent that on configurations that try to
163 * satisfy conformance.
164 */
165 hfsmp = VTOVCB(btree->fileRefNum);
166 rsrvblks = ((u_int64_t)hfsmp->allocLimit * 5) / 100;
167 if (hfsmp->blockSize > HFS_BT_MAXRESERVE) {
168 bt_rsrv = 1;
169 }
170 else {
171 bt_rsrv = (HFS_BT_MAXRESERVE / hfsmp->blockSize);
172 }
173 rsrvblks = MIN(rsrvblks, bt_rsrv);
174
175 freeblks = hfs_freeblks(hfsmp, 0);
176 if (freeblks <= rsrvblks) {
177 /* When running low, disallow adding new items. */
178 if ((inserts > 0) && (deletes == 0)) {
179 return (ENOSPC);
180 }
181 freeblks = 0;
182 } else {
183 freeblks -= rsrvblks;
184 }
185 reqblks = clumpsize / hfsmp->blockSize;
186
187 if (reqblks > freeblks) {
188 reqblks = ((rsrvNodes - availNodes) * btree->nodeSize) / hfsmp->blockSize;
189 /* When running low, disallow adding new items. */
190 if ((reqblks > freeblks) && (inserts > 0) && (deletes == 0)) {
191 return (ENOSPC);
192 }
193 file->ff_clumpsize = freeblks * hfsmp->blockSize;
194 }
195 totalNodes = rsrvNodes + btree->totalNodes - availNodes;
196
197 /* See if we also need a map node */
198 if (totalNodes > (int)CalcMapBits(btree)) {
199 ++totalNodes;
200 }
201 if ((err = ExtendBTree(btree, totalNodes))) {
202 goto out;
203 }
204 }
205 /* Save this reserve if this is a persistent request. */
206 if (data) {
207 btree->reservedNodes += rsrvNodes;
208 nr_insert(btree->fileRefNum, (struct nreserve *)data, rsrvNodes);
209 }
210 out:
211 /* Put clump size back if it was changed. */
212 if (file->ff_clumpsize != clumpsize)
213 file->ff_clumpsize = clumpsize;
214
215 return (err);
216 }
217
218
219 /*
220 * BTReleaseReserve - release the node reserve held by current thread
221 *
222 * Used by the Catalog Layer (hfs_catalog.c) to relinquish reserved space.
223 */
224 int
225 BTReleaseReserve(FCB *file, void* data)
226 {
227 BTreeControlBlock *btree;
228 int nodecnt;
229
230 btree = (BTreeControlBlockPtr)file->fcbBTCBPtr;
231
232 REQUIRE_FILE_LOCK(btree->fileRefNum, true);
233
234 nr_delete(btree->fileRefNum, (struct nreserve *)data, &nodecnt);
235
236 if (nodecnt)
237 btree->reservedNodes -= nodecnt;
238
239 return (0);
240 }
241
242 /*
243 * BTUpdateReserve - update a node reserve for allocations that occurred.
244 */
245 void
246 BTUpdateReserve(BTreeControlBlockPtr btreePtr, int nodes)
247 {
248 nr_update(btreePtr->fileRefNum, nodes);
249 }
250
251
252 /*----------------------------------------------------------------------------*/
253 /* Node Reserve Hash Functions (private) */
254
255
256 int nrinserts = 0;
257 int nrdeletes = 0;
258
259 /*
260 * Insert a new node reserve.
261 */
262 static void
263 nr_insert(struct vnode * btvp, struct nreserve *nrp, int nodecnt)
264 {
265 struct nodereserve *nrhead;
266 struct nreserve *tmp_nrp;
267 void * tag = NR_GET_TAG();
268
269 /*
270 * Check the cache - there may already be a reserve
271 */
272 lck_mtx_lock(&nr_mutex);
273 nrhead = NR_HASH(btvp, tag);
274 for (tmp_nrp = nrhead->lh_first; tmp_nrp;
275 tmp_nrp = tmp_nrp->nr_hash.le_next) {
276 if ((tmp_nrp->nr_tag == tag) && (tmp_nrp->nr_btvp == btvp)) {
277 nrp->nr_tag = 0;
278 tmp_nrp->nr_nodecnt += nodecnt;
279 lck_mtx_unlock(&nr_mutex);
280 return;
281 }
282 }
283
284 nrp->nr_nodecnt = nodecnt;
285 nrp->nr_newnodes = 0;
286 nrp->nr_btvp = btvp;
287 nrp->nr_tag = tag;
288 LIST_INSERT_HEAD(nrhead, nrp, nr_hash);
289 ++nrinserts;
290 lck_mtx_unlock(&nr_mutex);
291 }
292
293 /*
294 * Delete a node reserve.
295 */
296 static void
297 nr_delete(struct vnode * btvp, struct nreserve *nrp, int *nodecnt)
298 {
299 void * tag = NR_GET_TAG();
300
301 lck_mtx_lock(&nr_mutex);
302 if (nrp->nr_tag) {
303 if ((nrp->nr_tag != tag) || (nrp->nr_btvp != btvp))
304 panic("hfs: nr_delete: invalid NR (%p)", nrp);
305 LIST_REMOVE(nrp, nr_hash);
306 *nodecnt = nrp->nr_nodecnt;
307 bzero(nrp, sizeof(struct nreserve));
308 ++nrdeletes;
309 } else {
310 *nodecnt = 0;
311 }
312 lck_mtx_unlock(&nr_mutex);
313 }
314
315
316 /*
317 * Update a node reserve for any allocations that occurred.
318 */
319 static void
320 nr_update(struct vnode * btvp, int nodecnt)
321 {
322 struct nodereserve *nrhead;
323 struct nreserve *nrp;
324 void* tag = NR_GET_TAG();
325
326 lck_mtx_lock(&nr_mutex);
327
328 nrhead = NR_HASH(btvp, tag);
329 for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) {
330 if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp)) {
331 nrp->nr_newnodes += nodecnt;
332 break;
333 }
334 }
335 lck_mtx_unlock(&nr_mutex);
336 }