]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfscommon/BTree/BTreeNodeReserve.c
88c3f8a26bf8d88ec865b75f6cfc5bf7fab06d1a
[apple/xnu.git] / bsd / hfs / hfscommon / BTree / BTreeNodeReserve.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 #include "../headers/BTreesPrivate.h"
31 #include "sys/malloc.h"
32 #include <kern/locks.h>
33
34
35 /*
36 * B-tree Node Reserve
37 *
38 * BTReserveSpace
39 * BTReleaseReserve
40 * BTUpdateReserve
41 * BTAvailableNodes
42 *
43 * Each kernel thread can have it's own reserve of b-tree
44 * nodes. This reserve info is kept in a hash table.
45 *
46 * Don't forget to call BTReleaseReserve when you're finished
47 * or you will leave stale node reserves in the hash.
48 */
49
50
51 /*
52 * BE CAREFUL WHEN INCREASING THE SIZE OF THIS STRUCT!
53 *
54 * It must remain equal in size to the opaque cat_cookie_t
55 * struct (in hfs_catalog.h).
56 */
57 struct nreserve {
58 LIST_ENTRY(nreserve) nr_hash; /* hash chain */
59 int nr_nodecnt; /* count of nodes held in reserve */
60 int nr_newnodes; /* nodes that were allocated */
61 struct vnode *nr_btvp; /* b-tree file vnode */
62 void *nr_tag; /* unique tag (per thread) */
63 };
64
65 #define NR_GET_TAG() (current_thread())
66
67 #define NR_CACHE 17
68
69 #define NR_HASH(btvp, tag) \
70 (&nr_hashtbl[((((int)(btvp)) >> 8) ^ ((int)(tag) >> 4)) & nr_hashmask])
71
72 LIST_HEAD(nodereserve, nreserve) *nr_hashtbl;
73
74 u_long nr_hashmask;
75
76 lck_grp_t * nr_lck_grp;
77 lck_grp_attr_t * nr_lck_grp_attr;
78 lck_attr_t * nr_lck_attr;
79
80 lck_mtx_t nr_mutex;
81
82 /* Internal Node Reserve Hash Routines (private) */
83 static void nr_insert (struct vnode *, struct nreserve *nrp, int);
84 static void nr_delete (struct vnode *, struct nreserve *nrp, int *);
85 static int nr_lookup (struct vnode *);
86 static void nr_update (struct vnode *, int);
87
88
89 /*
90 * BTReserveSetup - initialize the node reserve hash table
91 */
92 __private_extern__
93 void
94 BTReserveSetup()
95 {
96 if (sizeof(struct nreserve) != sizeof(cat_cookie_t))
97 panic("BTReserveSetup: nreserve size != opaque struct size");
98
99 nr_hashtbl = hashinit(NR_CACHE, M_HFSMNT, &nr_hashmask);
100
101 nr_lck_grp_attr= lck_grp_attr_alloc_init();
102 lck_grp_attr_setstat(nr_lck_grp_attr);
103 nr_lck_grp = lck_grp_alloc_init("btree_node_reserve", nr_lck_grp_attr);
104
105 nr_lck_attr = lck_attr_alloc_init();
106 lck_attr_setdebug(nr_lck_attr);
107
108 lck_mtx_init(&nr_mutex, nr_lck_grp, nr_lck_attr);
109 }
110
111
112 /*
113 * BTAvailNodes - obtain the actual available nodes (for current thread)
114 *
115 */
116 __private_extern__
117 SInt32
118 BTAvailableNodes(BTreeControlBlock *btree)
119 {
120 SInt32 availNodes;
121
122 availNodes = (SInt32)btree->freeNodes - (SInt32)btree->reservedNodes;
123
124 return (availNodes + nr_lookup(btree->fileRefNum));
125 }
126
127
128 /*
129 * BTReserveSpace - obtain a node reserve (for current thread)
130 *
131 * Used by the Catalog Layer (hfs_catalog.c) to reserve space.
132 */
133 __private_extern__
134 int
135 BTReserveSpace(FCB *file, int operations, void* data)
136 {
137 BTreeControlBlock *btree;
138 int rsrvNodes, availNodes, totalNodes;
139 int height;
140 int inserts, deletes;
141 int err = 0;
142
143 btree = (BTreeControlBlockPtr)file->fcbBTCBPtr;
144
145 REQUIRE_FILE_LOCK(btree->fileRefNum, true);
146
147 /*
148 * The node reserve is based on the number of b-tree
149 * operations (insert/deletes) and the height of the
150 * tree.
151 */
152 height = btree->treeDepth;
153 inserts = operations & 0xffff;
154 deletes = operations >> 16;
155
156 rsrvNodes = 1; /* allow for at least one root split */
157 if (deletes)
158 rsrvNodes += (deletes * (height - 1)) - 1;
159 if (inserts)
160 rsrvNodes += (inserts * height) + 1;
161
162 availNodes = btree->freeNodes - btree->reservedNodes;
163
164 if (rsrvNodes > availNodes) {
165 totalNodes = rsrvNodes + btree->totalNodes - availNodes;
166
167 /* See if we also need a map node */
168 if (totalNodes > (int)CalcMapBits(btree))
169 ++totalNodes;
170 if ((err = ExtendBTree(btree, totalNodes)))
171 return (err);
172 }
173
174 btree->reservedNodes += rsrvNodes;
175 nr_insert(btree->fileRefNum, (struct nreserve *)data, rsrvNodes);
176 return (0);
177 }
178
179
180 /*
181 * BTReleaseReserve - release the node reserve held by current thread
182 *
183 * Used by the Catalog Layer (hfs_catalog.c) to relinquish reserved space.
184 */
185 __private_extern__
186 int
187 BTReleaseReserve(FCB *file, void* data)
188 {
189 BTreeControlBlock *btree;
190 int nodecnt;
191
192 btree = (BTreeControlBlockPtr)file->fcbBTCBPtr;
193
194 REQUIRE_FILE_LOCK(btree->fileRefNum, true);
195
196 nr_delete(btree->fileRefNum, (struct nreserve *)data, &nodecnt);
197
198 if (nodecnt)
199 btree->reservedNodes -= nodecnt;
200
201 return (0);
202 }
203
204 /*
205 * BTUpdateReserve - update a node reserve for allocations that occurred.
206 */
207 __private_extern__
208 void
209 BTUpdateReserve(BTreeControlBlockPtr btreePtr, int nodes)
210 {
211 nr_update(btreePtr->fileRefNum, nodes);
212 }
213
214
215 /*----------------------------------------------------------------------------*/
216 /* Node Reserve Hash Functions (private) */
217
218
219 int nrinserts = 0;
220 int nrdeletes = 0;
221
222 /*
223 * Insert a new node reserve.
224 */
225 static void
226 nr_insert(struct vnode * btvp, struct nreserve *nrp, int nodecnt)
227 {
228 struct nodereserve *nrhead;
229 struct nreserve *tmp_nrp;
230 void * tag = NR_GET_TAG();
231
232 /*
233 * Check the cache - there may already be a reserve
234 */
235 lck_mtx_lock(&nr_mutex);
236 nrhead = NR_HASH(btvp, tag);
237 for (tmp_nrp = nrhead->lh_first; tmp_nrp;
238 tmp_nrp = tmp_nrp->nr_hash.le_next) {
239 if ((tmp_nrp->nr_tag == tag) && (tmp_nrp->nr_btvp == btvp)) {
240 nrp->nr_tag = 0;
241 lck_mtx_unlock(&nr_mutex);
242 return;
243 }
244 }
245
246 nrp->nr_nodecnt = nodecnt;
247 nrp->nr_newnodes = 0;
248 nrp->nr_btvp = btvp;
249 nrp->nr_tag = tag;
250 LIST_INSERT_HEAD(nrhead, nrp, nr_hash);
251 ++nrinserts;
252 lck_mtx_unlock(&nr_mutex);
253 }
254
255 /*
256 * Delete a node reserve.
257 */
258 static void
259 nr_delete(struct vnode * btvp, struct nreserve *nrp, int *nodecnt)
260 {
261 void * tag = NR_GET_TAG();
262
263 lck_mtx_lock(&nr_mutex);
264 if (nrp->nr_tag) {
265 if ((nrp->nr_tag != tag) || (nrp->nr_btvp != btvp))
266 panic("nr_delete: invalid NR (%08x)", nrp);
267 LIST_REMOVE(nrp, nr_hash);
268 *nodecnt = nrp->nr_nodecnt;
269 bzero(nrp, sizeof(struct nreserve));
270 ++nrdeletes;
271 } else {
272 *nodecnt = 0;
273 }
274 lck_mtx_unlock(&nr_mutex);
275 }
276
277 /*
278 * Lookup a node reserve.
279 */
280 static int
281 nr_lookup(struct vnode * btvp)
282 {
283 struct nodereserve *nrhead;
284 struct nreserve *nrp;
285 void* tag = NR_GET_TAG();
286
287 lck_mtx_lock(&nr_mutex);
288
289 nrhead = NR_HASH(btvp, tag);
290 for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) {
291 if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp)) {
292 lck_mtx_unlock(&nr_mutex);
293 return (nrp->nr_nodecnt - nrp->nr_newnodes);
294 }
295 }
296 lck_mtx_unlock(&nr_mutex);
297 return (0);
298 }
299
300 /*
301 * Update a node reserve for any allocations that occurred.
302 */
303 static void
304 nr_update(struct vnode * btvp, int nodecnt)
305 {
306 struct nodereserve *nrhead;
307 struct nreserve *nrp;
308 void* tag = NR_GET_TAG();
309
310 lck_mtx_lock(&nr_mutex);
311
312 nrhead = NR_HASH(btvp, tag);
313 for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) {
314 if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp)) {
315 nrp->nr_newnodes += nodecnt;
316 break;
317 }
318 }
319 lck_mtx_unlock(&nr_mutex);
320 }