]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfscommon/BTree/BTreeNodeReserve.c
xnu-517.3.7.tar.gz
[apple/xnu.git] / bsd / hfs / hfscommon / BTree / BTreeNodeReserve.c
1 /*
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 #include "../headers/BTreesPrivate.h"
26 #include "sys/malloc.h"
27
28
29 /*
30 * B-tree Node Reserve
31 *
32 * BTReserveSpace
33 * BTReleaseReserve
34 * BTUpdateReserve
35 * BTAvailableNodes
36 *
37 * Each kernel thread can have it's own reserve of b-tree
38 * nodes. This reserve info is kept in a hash table.
39 *
40 * Don't forget to call BTReleaseReserve when you're finished
41 * or you will leave stale node reserves in the hash.
42 */
43
44
45 /*
46 * BE CAREFUL WHEN INCREASING THE SIZE OF THIS STRUCT!
47 *
48 * It must remain equal in size to the opaque cat_cookie_t
49 * struct (in hfs_catalog.h).
50 */
51 struct nreserve {
52 LIST_ENTRY(nreserve) nr_hash; /* hash chain */
53 int nr_nodecnt; /* count of nodes held in reserve */
54 int nr_newnodes; /* nodes that were allocated */
55 struct vnode *nr_btvp; /* b-tree file vnode */
56 void *nr_tag; /* unique tag (per thread) */
57 };
58
59 #define NR_GET_TAG() (current_act())
60
61 #define NR_CACHE 17
62
63 #define NR_HASH(btvp, tag) \
64 (&nr_hashtbl[((((int)(btvp)) >> 8) ^ ((int)(tag) >> 4)) & nr_hashmask])
65
66 LIST_HEAD(nodereserve, nreserve) *nr_hashtbl;
67
68 u_long nr_hashmask;
69
70
71 /* Internal Node Reserve Hash Routines (private) */
72 static void nr_insert (struct vnode *, struct nreserve *nrp, int);
73 static void nr_delete (struct vnode *, struct nreserve *nrp, int *);
74 static int nr_lookup (struct vnode *);
75 static void nr_update (struct vnode *, int);
76
77
78 /*
79 * BTReserveSetup - initialize the node reserve hash table
80 */
81 __private_extern__
82 void
83 BTReserveSetup()
84 {
85 if (sizeof(struct nreserve) != sizeof(cat_cookie_t))
86 panic("BTReserveSetup: nreserve size != opaque struct size");
87
88 nr_hashtbl = hashinit(NR_CACHE, M_HFSMNT, &nr_hashmask);
89 }
90
91
92 /*
93 * BTAvailNodes - obtain the actual available nodes (for current thread)
94 *
95 */
96 __private_extern__
97 SInt32
98 BTAvailableNodes(BTreeControlBlock *btree)
99 {
100 SInt32 availNodes;
101
102 availNodes = (SInt32)btree->freeNodes - (SInt32)btree->reservedNodes;
103
104 return (availNodes + nr_lookup(btree->fileRefNum));
105 }
106
107
108 /*
109 * BTReserveSpace - obtain a node reserve (for current thread)
110 *
111 * Used by the Catalog Layer (hfs_catalog.c) to reserve space.
112 */
113 __private_extern__
114 int
115 BTReserveSpace(FCB *file, int operations, void* data)
116 {
117 BTreeControlBlock *btree;
118 int rsrvNodes, availNodes, totalNodes;
119 int height;
120 int inserts, deletes;
121 int err = 0;
122
123 btree = (BTreeControlBlockPtr)file->fcbBTCBPtr;
124
125 REQUIRE_FILE_LOCK(btree->fileRefNum, true);
126
127 /*
128 * The node reserve is based on the number of b-tree
129 * operations (insert/deletes) and the height of the
130 * tree.
131 */
132 height = btree->treeDepth;
133 inserts = operations & 0xffff;
134 deletes = operations >> 16;
135
136 rsrvNodes = 1; /* allow for at least one root split */
137 if (deletes)
138 rsrvNodes += (deletes * (height - 1)) - 1;
139 if (inserts)
140 rsrvNodes += (inserts * height) + 1;
141
142 availNodes = btree->freeNodes - btree->reservedNodes;
143
144 if (rsrvNodes > availNodes) {
145 totalNodes = rsrvNodes + btree->totalNodes - availNodes;
146
147 /* See if we also need a map node */
148 if (totalNodes > CalcMapBits(btree))
149 ++totalNodes;
150 if ((err = ExtendBTree(btree, totalNodes)))
151 return (err);
152 }
153
154 btree->reservedNodes += rsrvNodes;
155 nr_insert(btree->fileRefNum, (struct nreserve *)data, rsrvNodes);
156 return (0);
157 }
158
159
160 /*
161 * BTReleaseReserve - release the node reserve held by current thread
162 *
163 * Used by the Catalog Layer (hfs_catalog.c) to relinquish reserved space.
164 */
165 __private_extern__
166 int
167 BTReleaseReserve(FCB *file, void* data)
168 {
169 BTreeControlBlock *btree;
170 int nodecnt;
171
172 btree = (BTreeControlBlockPtr)file->fcbBTCBPtr;
173
174 REQUIRE_FILE_LOCK(btree->fileRefNum, true);
175
176 nr_delete(btree->fileRefNum, (struct nreserve *)data, &nodecnt);
177
178 if (nodecnt)
179 btree->reservedNodes -= nodecnt;
180
181 return (0);
182 }
183
184 /*
185 * BTUpdateReserve - update a node reserve for allocations that occured.
186 */
187 __private_extern__
188 void
189 BTUpdateReserve(BTreeControlBlockPtr btreePtr, int nodes)
190 {
191 nr_update(btreePtr->fileRefNum, nodes);
192 }
193
194
195 /*----------------------------------------------------------------------------*/
196 /* Node Reserve Hash Functions (private) */
197
198
199 int nrinserts = 0;
200 int nrdeletes = 0;
201
202 /*
203 * Insert a new node reserve.
204 */
205 static void
206 nr_insert(struct vnode * btvp, struct nreserve *nrp, int nodecnt)
207 {
208 struct nodereserve *nrhead;
209 struct nreserve *tmp_nrp;
210 void * tag = NR_GET_TAG();
211
212 /*
213 * Check the cache - there may already be a reserve
214 */
215 nrhead = NR_HASH(btvp, tag);
216 for (tmp_nrp = nrhead->lh_first; tmp_nrp;
217 tmp_nrp = tmp_nrp->nr_hash.le_next) {
218 if ((tmp_nrp->nr_tag == tag) && (tmp_nrp->nr_btvp == btvp)) {
219 nrp->nr_tag = 0;
220 return;
221 }
222 }
223
224 nrp->nr_nodecnt = nodecnt;
225 nrp->nr_newnodes = 0;
226 nrp->nr_btvp = btvp;
227 nrp->nr_tag = tag;
228 LIST_INSERT_HEAD(nrhead, nrp, nr_hash);
229 ++nrinserts;
230 }
231
232 /*
233 * Delete a node reserve.
234 */
235 static void
236 nr_delete(struct vnode * btvp, struct nreserve *nrp, int *nodecnt)
237 {
238 void * tag = NR_GET_TAG();
239
240 if (nrp->nr_tag) {
241 if ((nrp->nr_tag != tag) || (nrp->nr_btvp != btvp))
242 panic("nr_delete: invalid NR (%08x)", nrp);
243 LIST_REMOVE(nrp, nr_hash);
244 *nodecnt = nrp->nr_nodecnt;
245 bzero(nrp, sizeof(struct nreserve));
246 ++nrdeletes;
247 } else {
248 *nodecnt = 0;
249 }
250 }
251
252 /*
253 * Lookup a node reserve.
254 */
255 static int
256 nr_lookup(struct vnode * btvp)
257 {
258 struct nodereserve *nrhead;
259 struct nreserve *nrp;
260 void* tag = NR_GET_TAG();
261
262 nrhead = NR_HASH(btvp, tag);
263 for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) {
264 if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp))
265 return (nrp->nr_nodecnt - nrp->nr_newnodes);
266 }
267 return (0);
268 }
269
270 /*
271 * Update a node reserve for any allocations that occured.
272 */
273 static void
274 nr_update(struct vnode * btvp, int nodecnt)
275 {
276 struct nodereserve *nrhead;
277 struct nreserve *nrp;
278 void* tag = NR_GET_TAG();
279
280 nrhead = NR_HASH(btvp, tag);
281 for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) {
282 if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp)) {
283 nrp->nr_newnodes += nodecnt;
284 break;
285 }
286 }
287 }