]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfscommon/BTree/BTreeNodeReserve.c
605554033d0ddc90abc3db476cc85a77a0b8ce75
[apple/xnu.git] / bsd / hfs / hfscommon / BTree / BTreeNodeReserve.c
1 /*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 #include "../headers/BTreesPrivate.h"
24 #include "sys/malloc.h"
25 #include <kern/locks.h>
26
27
28 /*
29 * B-tree Node Reserve
30 *
31 * BTReserveSpace
32 * BTReleaseReserve
33 * BTUpdateReserve
34 * BTAvailableNodes
35 *
36 * Each kernel thread can have it's own reserve of b-tree
37 * nodes. This reserve info is kept in a hash table.
38 *
39 * Don't forget to call BTReleaseReserve when you're finished
40 * or you will leave stale node reserves in the hash.
41 */
42
43
44 /*
45 * BE CAREFUL WHEN INCREASING THE SIZE OF THIS STRUCT!
46 *
47 * It must remain equal in size to the opaque cat_cookie_t
48 * struct (in hfs_catalog.h).
49 */
50 struct nreserve {
51 LIST_ENTRY(nreserve) nr_hash; /* hash chain */
52 int nr_nodecnt; /* count of nodes held in reserve */
53 int nr_newnodes; /* nodes that were allocated */
54 struct vnode *nr_btvp; /* b-tree file vnode */
55 void *nr_tag; /* unique tag (per thread) */
56 };
57
58 #define NR_GET_TAG() (current_thread())
59
60 #define NR_CACHE 17
61
62 #define NR_HASH(btvp, tag) \
63 (&nr_hashtbl[((((int)(btvp)) >> 8) ^ ((int)(tag) >> 4)) & nr_hashmask])
64
65 LIST_HEAD(nodereserve, nreserve) *nr_hashtbl;
66
67 u_long nr_hashmask;
68
69 lck_grp_t * nr_lck_grp;
70 lck_grp_attr_t * nr_lck_grp_attr;
71 lck_attr_t * nr_lck_attr;
72
73 lck_mtx_t nr_mutex;
74
75 /* Internal Node Reserve Hash Routines (private) */
76 static void nr_insert (struct vnode *, struct nreserve *nrp, int);
77 static void nr_delete (struct vnode *, struct nreserve *nrp, int *);
78 static int nr_lookup (struct vnode *);
79 static void nr_update (struct vnode *, int);
80
81
82 /*
83 * BTReserveSetup - initialize the node reserve hash table
84 */
85 __private_extern__
86 void
87 BTReserveSetup()
88 {
89 if (sizeof(struct nreserve) != sizeof(cat_cookie_t))
90 panic("BTReserveSetup: nreserve size != opaque struct size");
91
92 nr_hashtbl = hashinit(NR_CACHE, M_HFSMNT, &nr_hashmask);
93
94 nr_lck_grp_attr= lck_grp_attr_alloc_init();
95 lck_grp_attr_setstat(nr_lck_grp_attr);
96 nr_lck_grp = lck_grp_alloc_init("btree_node_reserve", nr_lck_grp_attr);
97
98 nr_lck_attr = lck_attr_alloc_init();
99 lck_attr_setdebug(nr_lck_attr);
100
101 lck_mtx_init(&nr_mutex, nr_lck_grp, nr_lck_attr);
102 }
103
104
105 /*
106 * BTAvailNodes - obtain the actual available nodes (for current thread)
107 *
108 */
109 __private_extern__
110 SInt32
111 BTAvailableNodes(BTreeControlBlock *btree)
112 {
113 SInt32 availNodes;
114
115 availNodes = (SInt32)btree->freeNodes - (SInt32)btree->reservedNodes;
116
117 return (availNodes + nr_lookup(btree->fileRefNum));
118 }
119
120
121 /*
122 * BTReserveSpace - obtain a node reserve (for current thread)
123 *
124 * Used by the Catalog Layer (hfs_catalog.c) to reserve space.
125 */
126 __private_extern__
127 int
128 BTReserveSpace(FCB *file, int operations, void* data)
129 {
130 BTreeControlBlock *btree;
131 int rsrvNodes, availNodes, totalNodes;
132 int height;
133 int inserts, deletes;
134 int err = 0;
135
136 btree = (BTreeControlBlockPtr)file->fcbBTCBPtr;
137
138 REQUIRE_FILE_LOCK(btree->fileRefNum, true);
139
140 /*
141 * The node reserve is based on the number of b-tree
142 * operations (insert/deletes) and the height of the
143 * tree.
144 */
145 height = btree->treeDepth;
146 inserts = operations & 0xffff;
147 deletes = operations >> 16;
148
149 rsrvNodes = 1; /* allow for at least one root split */
150 if (deletes)
151 rsrvNodes += (deletes * (height - 1)) - 1;
152 if (inserts)
153 rsrvNodes += (inserts * height) + 1;
154
155 availNodes = btree->freeNodes - btree->reservedNodes;
156
157 if (rsrvNodes > availNodes) {
158 totalNodes = rsrvNodes + btree->totalNodes - availNodes;
159
160 /* See if we also need a map node */
161 if (totalNodes > (int)CalcMapBits(btree))
162 ++totalNodes;
163 if ((err = ExtendBTree(btree, totalNodes)))
164 return (err);
165 }
166
167 btree->reservedNodes += rsrvNodes;
168 nr_insert(btree->fileRefNum, (struct nreserve *)data, rsrvNodes);
169 return (0);
170 }
171
172
173 /*
174 * BTReleaseReserve - release the node reserve held by current thread
175 *
176 * Used by the Catalog Layer (hfs_catalog.c) to relinquish reserved space.
177 */
178 __private_extern__
179 int
180 BTReleaseReserve(FCB *file, void* data)
181 {
182 BTreeControlBlock *btree;
183 int nodecnt;
184
185 btree = (BTreeControlBlockPtr)file->fcbBTCBPtr;
186
187 REQUIRE_FILE_LOCK(btree->fileRefNum, true);
188
189 nr_delete(btree->fileRefNum, (struct nreserve *)data, &nodecnt);
190
191 if (nodecnt)
192 btree->reservedNodes -= nodecnt;
193
194 return (0);
195 }
196
197 /*
198 * BTUpdateReserve - update a node reserve for allocations that occurred.
199 */
200 __private_extern__
201 void
202 BTUpdateReserve(BTreeControlBlockPtr btreePtr, int nodes)
203 {
204 nr_update(btreePtr->fileRefNum, nodes);
205 }
206
207
208 /*----------------------------------------------------------------------------*/
209 /* Node Reserve Hash Functions (private) */
210
211
212 int nrinserts = 0;
213 int nrdeletes = 0;
214
215 /*
216 * Insert a new node reserve.
217 */
218 static void
219 nr_insert(struct vnode * btvp, struct nreserve *nrp, int nodecnt)
220 {
221 struct nodereserve *nrhead;
222 struct nreserve *tmp_nrp;
223 void * tag = NR_GET_TAG();
224
225 /*
226 * Check the cache - there may already be a reserve
227 */
228 lck_mtx_lock(&nr_mutex);
229 nrhead = NR_HASH(btvp, tag);
230 for (tmp_nrp = nrhead->lh_first; tmp_nrp;
231 tmp_nrp = tmp_nrp->nr_hash.le_next) {
232 if ((tmp_nrp->nr_tag == tag) && (tmp_nrp->nr_btvp == btvp)) {
233 nrp->nr_tag = 0;
234 lck_mtx_unlock(&nr_mutex);
235 return;
236 }
237 }
238
239 nrp->nr_nodecnt = nodecnt;
240 nrp->nr_newnodes = 0;
241 nrp->nr_btvp = btvp;
242 nrp->nr_tag = tag;
243 LIST_INSERT_HEAD(nrhead, nrp, nr_hash);
244 ++nrinserts;
245 lck_mtx_unlock(&nr_mutex);
246 }
247
248 /*
249 * Delete a node reserve.
250 */
251 static void
252 nr_delete(struct vnode * btvp, struct nreserve *nrp, int *nodecnt)
253 {
254 void * tag = NR_GET_TAG();
255
256 lck_mtx_lock(&nr_mutex);
257 if (nrp->nr_tag) {
258 if ((nrp->nr_tag != tag) || (nrp->nr_btvp != btvp))
259 panic("nr_delete: invalid NR (%08x)", nrp);
260 LIST_REMOVE(nrp, nr_hash);
261 *nodecnt = nrp->nr_nodecnt;
262 bzero(nrp, sizeof(struct nreserve));
263 ++nrdeletes;
264 } else {
265 *nodecnt = 0;
266 }
267 lck_mtx_unlock(&nr_mutex);
268 }
269
270 /*
271 * Lookup a node reserve.
272 */
273 static int
274 nr_lookup(struct vnode * btvp)
275 {
276 struct nodereserve *nrhead;
277 struct nreserve *nrp;
278 void* tag = NR_GET_TAG();
279
280 lck_mtx_lock(&nr_mutex);
281
282 nrhead = NR_HASH(btvp, tag);
283 for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) {
284 if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp)) {
285 lck_mtx_unlock(&nr_mutex);
286 return (nrp->nr_nodecnt - nrp->nr_newnodes);
287 }
288 }
289 lck_mtx_unlock(&nr_mutex);
290 return (0);
291 }
292
293 /*
294 * Update a node reserve for any allocations that occurred.
295 */
296 static void
297 nr_update(struct vnode * btvp, int nodecnt)
298 {
299 struct nodereserve *nrhead;
300 struct nreserve *nrp;
301 void* tag = NR_GET_TAG();
302
303 lck_mtx_lock(&nr_mutex);
304
305 nrhead = NR_HASH(btvp, tag);
306 for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) {
307 if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp)) {
308 nrp->nr_newnodes += nodecnt;
309 break;
310 }
311 }
312 lck_mtx_unlock(&nr_mutex);
313 }