]> git.saurik.com Git - apple/xnu.git/blame - bsd/hfs/hfscommon/BTree/BTreeNodeReserve.c
xnu-792.24.17.tar.gz
[apple/xnu.git] / bsd / hfs / hfscommon / BTree / BTreeNodeReserve.c
CommitLineData
55e303ae 1/*
5d5c5d0d
A
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
6601e61a 4 * @APPLE_LICENSE_HEADER_START@
55e303ae 5 *
6601e61a
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
8f6c56a5 11 *
6601e61a
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
6601e61a
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
8f6c56a5 19 *
6601e61a 20 * @APPLE_LICENSE_HEADER_END@
55e303ae
A
21 */
22#include "../headers/BTreesPrivate.h"
23#include "sys/malloc.h"
91447636 24#include <kern/locks.h>
55e303ae
A
25
26
27/*
28 * B-tree Node Reserve
29 *
30 * BTReserveSpace
31 * BTReleaseReserve
32 * BTUpdateReserve
33 * BTAvailableNodes
34 *
35 * Each kernel thread can have it's own reserve of b-tree
36 * nodes. This reserve info is kept in a hash table.
37 *
38 * Don't forget to call BTReleaseReserve when you're finished
39 * or you will leave stale node reserves in the hash.
40 */
41
42
43/*
44 * BE CAREFUL WHEN INCREASING THE SIZE OF THIS STRUCT!
45 *
46 * It must remain equal in size to the opaque cat_cookie_t
47 * struct (in hfs_catalog.h).
48 */
49struct nreserve {
50 LIST_ENTRY(nreserve) nr_hash; /* hash chain */
51 int nr_nodecnt; /* count of nodes held in reserve */
52 int nr_newnodes; /* nodes that were allocated */
53 struct vnode *nr_btvp; /* b-tree file vnode */
54 void *nr_tag; /* unique tag (per thread) */
55};
56
91447636 57#define NR_GET_TAG() (current_thread())
55e303ae
A
58
59#define NR_CACHE 17
60
61#define NR_HASH(btvp, tag) \
62 (&nr_hashtbl[((((int)(btvp)) >> 8) ^ ((int)(tag) >> 4)) & nr_hashmask])
63
64LIST_HEAD(nodereserve, nreserve) *nr_hashtbl;
65
66u_long nr_hashmask;
67
91447636
A
68lck_grp_t * nr_lck_grp;
69lck_grp_attr_t * nr_lck_grp_attr;
70lck_attr_t * nr_lck_attr;
71
72lck_mtx_t nr_mutex;
55e303ae
A
73
74/* Internal Node Reserve Hash Routines (private) */
75static void nr_insert (struct vnode *, struct nreserve *nrp, int);
76static void nr_delete (struct vnode *, struct nreserve *nrp, int *);
77static int nr_lookup (struct vnode *);
78static void nr_update (struct vnode *, int);
79
80
81/*
82 * BTReserveSetup - initialize the node reserve hash table
83 */
84__private_extern__
85void
86BTReserveSetup()
87{
88 if (sizeof(struct nreserve) != sizeof(cat_cookie_t))
89 panic("BTReserveSetup: nreserve size != opaque struct size");
90
91 nr_hashtbl = hashinit(NR_CACHE, M_HFSMNT, &nr_hashmask);
91447636
A
92
93 nr_lck_grp_attr= lck_grp_attr_alloc_init();
6601e61a 94 lck_grp_attr_setstat(nr_lck_grp_attr);
91447636
A
95 nr_lck_grp = lck_grp_alloc_init("btree_node_reserve", nr_lck_grp_attr);
96
97 nr_lck_attr = lck_attr_alloc_init();
6601e61a 98 lck_attr_setdebug(nr_lck_attr);
91447636
A
99
100 lck_mtx_init(&nr_mutex, nr_lck_grp, nr_lck_attr);
55e303ae
A
101}
102
103
104/*
105 * BTAvailNodes - obtain the actual available nodes (for current thread)
106 *
107 */
108__private_extern__
109SInt32
110BTAvailableNodes(BTreeControlBlock *btree)
111{
112 SInt32 availNodes;
113
114 availNodes = (SInt32)btree->freeNodes - (SInt32)btree->reservedNodes;
115
116 return (availNodes + nr_lookup(btree->fileRefNum));
117}
118
119
120/*
121 * BTReserveSpace - obtain a node reserve (for current thread)
122 *
123 * Used by the Catalog Layer (hfs_catalog.c) to reserve space.
124 */
125__private_extern__
126int
127BTReserveSpace(FCB *file, int operations, void* data)
128{
129 BTreeControlBlock *btree;
130 int rsrvNodes, availNodes, totalNodes;
131 int height;
132 int inserts, deletes;
133 int err = 0;
134
135 btree = (BTreeControlBlockPtr)file->fcbBTCBPtr;
136
137 REQUIRE_FILE_LOCK(btree->fileRefNum, true);
138
139 /*
140 * The node reserve is based on the number of b-tree
141 * operations (insert/deletes) and the height of the
142 * tree.
143 */
144 height = btree->treeDepth;
145 inserts = operations & 0xffff;
146 deletes = operations >> 16;
147
148 rsrvNodes = 1; /* allow for at least one root split */
149 if (deletes)
150 rsrvNodes += (deletes * (height - 1)) - 1;
151 if (inserts)
152 rsrvNodes += (inserts * height) + 1;
153
154 availNodes = btree->freeNodes - btree->reservedNodes;
155
156 if (rsrvNodes > availNodes) {
157 totalNodes = rsrvNodes + btree->totalNodes - availNodes;
158
159 /* See if we also need a map node */
91447636 160 if (totalNodes > (int)CalcMapBits(btree))
55e303ae
A
161 ++totalNodes;
162 if ((err = ExtendBTree(btree, totalNodes)))
163 return (err);
164 }
165
166 btree->reservedNodes += rsrvNodes;
167 nr_insert(btree->fileRefNum, (struct nreserve *)data, rsrvNodes);
168 return (0);
169}
170
171
172/*
173 * BTReleaseReserve - release the node reserve held by current thread
174 *
175 * Used by the Catalog Layer (hfs_catalog.c) to relinquish reserved space.
176 */
177__private_extern__
178int
179BTReleaseReserve(FCB *file, void* data)
180{
181 BTreeControlBlock *btree;
182 int nodecnt;
183
184 btree = (BTreeControlBlockPtr)file->fcbBTCBPtr;
185
186 REQUIRE_FILE_LOCK(btree->fileRefNum, true);
187
188 nr_delete(btree->fileRefNum, (struct nreserve *)data, &nodecnt);
189
190 if (nodecnt)
191 btree->reservedNodes -= nodecnt;
192
193 return (0);
194}
195
196/*
91447636 197 * BTUpdateReserve - update a node reserve for allocations that occurred.
55e303ae
A
198 */
199__private_extern__
200void
201BTUpdateReserve(BTreeControlBlockPtr btreePtr, int nodes)
202{
203 nr_update(btreePtr->fileRefNum, nodes);
204}
205
206
207/*----------------------------------------------------------------------------*/
208/* Node Reserve Hash Functions (private) */
209
210
211int nrinserts = 0;
212int nrdeletes = 0;
213
214/*
215 * Insert a new node reserve.
216 */
217static void
218nr_insert(struct vnode * btvp, struct nreserve *nrp, int nodecnt)
219{
220 struct nodereserve *nrhead;
221 struct nreserve *tmp_nrp;
222 void * tag = NR_GET_TAG();
223
224 /*
225 * Check the cache - there may already be a reserve
226 */
91447636 227 lck_mtx_lock(&nr_mutex);
55e303ae
A
228 nrhead = NR_HASH(btvp, tag);
229 for (tmp_nrp = nrhead->lh_first; tmp_nrp;
230 tmp_nrp = tmp_nrp->nr_hash.le_next) {
231 if ((tmp_nrp->nr_tag == tag) && (tmp_nrp->nr_btvp == btvp)) {
232 nrp->nr_tag = 0;
91447636 233 lck_mtx_unlock(&nr_mutex);
55e303ae
A
234 return;
235 }
236 }
237
238 nrp->nr_nodecnt = nodecnt;
239 nrp->nr_newnodes = 0;
240 nrp->nr_btvp = btvp;
241 nrp->nr_tag = tag;
242 LIST_INSERT_HEAD(nrhead, nrp, nr_hash);
243 ++nrinserts;
91447636 244 lck_mtx_unlock(&nr_mutex);
55e303ae
A
245}
246
247/*
248 * Delete a node reserve.
249 */
250static void
251nr_delete(struct vnode * btvp, struct nreserve *nrp, int *nodecnt)
252{
253 void * tag = NR_GET_TAG();
254
91447636 255 lck_mtx_lock(&nr_mutex);
55e303ae
A
256 if (nrp->nr_tag) {
257 if ((nrp->nr_tag != tag) || (nrp->nr_btvp != btvp))
258 panic("nr_delete: invalid NR (%08x)", nrp);
259 LIST_REMOVE(nrp, nr_hash);
260 *nodecnt = nrp->nr_nodecnt;
261 bzero(nrp, sizeof(struct nreserve));
262 ++nrdeletes;
263 } else {
264 *nodecnt = 0;
265 }
91447636 266 lck_mtx_unlock(&nr_mutex);
55e303ae
A
267}
268
269/*
270 * Lookup a node reserve.
271 */
272static int
273nr_lookup(struct vnode * btvp)
274{
275 struct nodereserve *nrhead;
276 struct nreserve *nrp;
277 void* tag = NR_GET_TAG();
278
91447636
A
279 lck_mtx_lock(&nr_mutex);
280
55e303ae
A
281 nrhead = NR_HASH(btvp, tag);
282 for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) {
91447636
A
283 if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp)) {
284 lck_mtx_unlock(&nr_mutex);
55e303ae 285 return (nrp->nr_nodecnt - nrp->nr_newnodes);
91447636 286 }
55e303ae 287 }
91447636 288 lck_mtx_unlock(&nr_mutex);
55e303ae
A
289 return (0);
290}
291
292/*
91447636 293 * Update a node reserve for any allocations that occurred.
55e303ae
A
294 */
295static void
296nr_update(struct vnode * btvp, int nodecnt)
297{
298 struct nodereserve *nrhead;
299 struct nreserve *nrp;
300 void* tag = NR_GET_TAG();
301
91447636
A
302 lck_mtx_lock(&nr_mutex);
303
55e303ae
A
304 nrhead = NR_HASH(btvp, tag);
305 for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) {
306 if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp)) {
307 nrp->nr_newnodes += nodecnt;
308 break;
309 }
310 }
91447636 311 lck_mtx_unlock(&nr_mutex);
55e303ae 312}