]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfscommon/BTree/BTreeNodeReserve.c
00874f229b3d1b4ec1fd2eb78a90042cbe3f9aed
[apple/xnu.git] / bsd / hfs / hfscommon / BTree / BTreeNodeReserve.c
1 /*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include "../headers/BTreesPrivate.h"
23 #include "sys/malloc.h"
24 #include <kern/locks.h>
25
26
27 /*
28 * B-tree Node Reserve
29 *
30 * BTReserveSpace
31 * BTReleaseReserve
32 * BTUpdateReserve
33 * BTAvailableNodes
34 *
35 * Each kernel thread can have it's own reserve of b-tree
36 * nodes. This reserve info is kept in a hash table.
37 *
38 * Don't forget to call BTReleaseReserve when you're finished
39 * or you will leave stale node reserves in the hash.
40 */
41
42
43 /*
44 * BE CAREFUL WHEN INCREASING THE SIZE OF THIS STRUCT!
45 *
46 * It must remain equal in size to the opaque cat_cookie_t
47 * struct (in hfs_catalog.h).
48 */
49 struct nreserve {
50 LIST_ENTRY(nreserve) nr_hash; /* hash chain */
51 int nr_nodecnt; /* count of nodes held in reserve */
52 int nr_newnodes; /* nodes that were allocated */
53 struct vnode *nr_btvp; /* b-tree file vnode */
54 void *nr_tag; /* unique tag (per thread) */
55 };
56
57 #define NR_GET_TAG() (current_thread())
58
59 #define NR_CACHE 17
60
61 #define NR_HASH(btvp, tag) \
62 (&nr_hashtbl[((((int)(btvp)) >> 8) ^ ((int)(tag) >> 4)) & nr_hashmask])
63
64 LIST_HEAD(nodereserve, nreserve) *nr_hashtbl;
65
66 u_long nr_hashmask;
67
68 lck_grp_t * nr_lck_grp;
69 lck_grp_attr_t * nr_lck_grp_attr;
70 lck_attr_t * nr_lck_attr;
71
72 lck_mtx_t nr_mutex;
73
74 /* Internal Node Reserve Hash Routines (private) */
75 static void nr_insert (struct vnode *, struct nreserve *nrp, int);
76 static void nr_delete (struct vnode *, struct nreserve *nrp, int *);
77 static int nr_lookup (struct vnode *);
78 static void nr_update (struct vnode *, int);
79
80
81 /*
82 * BTReserveSetup - initialize the node reserve hash table
83 */
84 __private_extern__
85 void
86 BTReserveSetup()
87 {
88 if (sizeof(struct nreserve) != sizeof(cat_cookie_t))
89 panic("BTReserveSetup: nreserve size != opaque struct size");
90
91 nr_hashtbl = hashinit(NR_CACHE, M_HFSMNT, &nr_hashmask);
92
93 nr_lck_grp_attr= lck_grp_attr_alloc_init();
94 nr_lck_grp = lck_grp_alloc_init("btree_node_reserve", nr_lck_grp_attr);
95
96 nr_lck_attr = lck_attr_alloc_init();
97
98 lck_mtx_init(&nr_mutex, nr_lck_grp, nr_lck_attr);
99 }
100
101
102 /*
103 * BTAvailNodes - obtain the actual available nodes (for current thread)
104 *
105 */
106 __private_extern__
107 SInt32
108 BTAvailableNodes(BTreeControlBlock *btree)
109 {
110 SInt32 availNodes;
111
112 availNodes = (SInt32)btree->freeNodes - (SInt32)btree->reservedNodes;
113
114 return (availNodes + nr_lookup(btree->fileRefNum));
115 }
116
117
118 /*
119 * BTReserveSpace - obtain a node reserve (for current thread)
120 *
121 * Used by the Catalog Layer (hfs_catalog.c) to reserve space.
122 */
123 __private_extern__
124 int
125 BTReserveSpace(FCB *file, int operations, void* data)
126 {
127 BTreeControlBlock *btree;
128 int rsrvNodes, availNodes, totalNodes;
129 int height;
130 int inserts, deletes;
131 int err = 0;
132
133 btree = (BTreeControlBlockPtr)file->fcbBTCBPtr;
134
135 REQUIRE_FILE_LOCK(btree->fileRefNum, true);
136
137 /*
138 * The node reserve is based on the number of b-tree
139 * operations (insert/deletes) and the height of the
140 * tree.
141 */
142 height = btree->treeDepth;
143 inserts = operations & 0xffff;
144 deletes = operations >> 16;
145
146 rsrvNodes = 1; /* allow for at least one root split */
147 if (deletes)
148 rsrvNodes += (deletes * (height - 1)) - 1;
149 if (inserts)
150 rsrvNodes += (inserts * height) + 1;
151
152 availNodes = btree->freeNodes - btree->reservedNodes;
153
154 if (rsrvNodes > availNodes) {
155 totalNodes = rsrvNodes + btree->totalNodes - availNodes;
156
157 /* See if we also need a map node */
158 if (totalNodes > (int)CalcMapBits(btree))
159 ++totalNodes;
160 if ((err = ExtendBTree(btree, totalNodes)))
161 return (err);
162 }
163
164 btree->reservedNodes += rsrvNodes;
165 nr_insert(btree->fileRefNum, (struct nreserve *)data, rsrvNodes);
166 return (0);
167 }
168
169
170 /*
171 * BTReleaseReserve - release the node reserve held by current thread
172 *
173 * Used by the Catalog Layer (hfs_catalog.c) to relinquish reserved space.
174 */
175 __private_extern__
176 int
177 BTReleaseReserve(FCB *file, void* data)
178 {
179 BTreeControlBlock *btree;
180 int nodecnt;
181
182 btree = (BTreeControlBlockPtr)file->fcbBTCBPtr;
183
184 REQUIRE_FILE_LOCK(btree->fileRefNum, true);
185
186 nr_delete(btree->fileRefNum, (struct nreserve *)data, &nodecnt);
187
188 if (nodecnt)
189 btree->reservedNodes -= nodecnt;
190
191 return (0);
192 }
193
194 /*
195 * BTUpdateReserve - update a node reserve for allocations that occurred.
196 */
197 __private_extern__
198 void
199 BTUpdateReserve(BTreeControlBlockPtr btreePtr, int nodes)
200 {
201 nr_update(btreePtr->fileRefNum, nodes);
202 }
203
204
205 /*----------------------------------------------------------------------------*/
206 /* Node Reserve Hash Functions (private) */
207
208
209 int nrinserts = 0;
210 int nrdeletes = 0;
211
212 /*
213 * Insert a new node reserve.
214 */
215 static void
216 nr_insert(struct vnode * btvp, struct nreserve *nrp, int nodecnt)
217 {
218 struct nodereserve *nrhead;
219 struct nreserve *tmp_nrp;
220 void * tag = NR_GET_TAG();
221
222 /*
223 * Check the cache - there may already be a reserve
224 */
225 lck_mtx_lock(&nr_mutex);
226 nrhead = NR_HASH(btvp, tag);
227 for (tmp_nrp = nrhead->lh_first; tmp_nrp;
228 tmp_nrp = tmp_nrp->nr_hash.le_next) {
229 if ((tmp_nrp->nr_tag == tag) && (tmp_nrp->nr_btvp == btvp)) {
230 nrp->nr_tag = 0;
231 lck_mtx_unlock(&nr_mutex);
232 return;
233 }
234 }
235
236 nrp->nr_nodecnt = nodecnt;
237 nrp->nr_newnodes = 0;
238 nrp->nr_btvp = btvp;
239 nrp->nr_tag = tag;
240 LIST_INSERT_HEAD(nrhead, nrp, nr_hash);
241 ++nrinserts;
242 lck_mtx_unlock(&nr_mutex);
243 }
244
245 /*
246 * Delete a node reserve.
247 */
248 static void
249 nr_delete(struct vnode * btvp, struct nreserve *nrp, int *nodecnt)
250 {
251 void * tag = NR_GET_TAG();
252
253 lck_mtx_lock(&nr_mutex);
254 if (nrp->nr_tag) {
255 if ((nrp->nr_tag != tag) || (nrp->nr_btvp != btvp))
256 panic("nr_delete: invalid NR (%08x)", nrp);
257 LIST_REMOVE(nrp, nr_hash);
258 *nodecnt = nrp->nr_nodecnt;
259 bzero(nrp, sizeof(struct nreserve));
260 ++nrdeletes;
261 } else {
262 *nodecnt = 0;
263 }
264 lck_mtx_unlock(&nr_mutex);
265 }
266
267 /*
268 * Lookup a node reserve.
269 */
270 static int
271 nr_lookup(struct vnode * btvp)
272 {
273 struct nodereserve *nrhead;
274 struct nreserve *nrp;
275 void* tag = NR_GET_TAG();
276
277 lck_mtx_lock(&nr_mutex);
278
279 nrhead = NR_HASH(btvp, tag);
280 for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) {
281 if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp)) {
282 lck_mtx_unlock(&nr_mutex);
283 return (nrp->nr_nodecnt - nrp->nr_newnodes);
284 }
285 }
286 lck_mtx_unlock(&nr_mutex);
287 return (0);
288 }
289
290 /*
291 * Update a node reserve for any allocations that occurred.
292 */
293 static void
294 nr_update(struct vnode * btvp, int nodecnt)
295 {
296 struct nodereserve *nrhead;
297 struct nreserve *nrp;
298 void* tag = NR_GET_TAG();
299
300 lck_mtx_lock(&nr_mutex);
301
302 nrhead = NR_HASH(btvp, tag);
303 for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) {
304 if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp)) {
305 nrp->nr_newnodes += nodecnt;
306 break;
307 }
308 }
309 lck_mtx_unlock(&nr_mutex);
310 }