]> git.saurik.com Git - apple/xnu.git/blame - bsd/hfs/hfscommon/BTree/BTreeNodeReserve.c
xnu-517.7.7.tar.gz
[apple/xnu.git] / bsd / hfs / hfscommon / BTree / BTreeNodeReserve.c
CommitLineData
55e303ae
A
1/*
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
55e303ae 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
55e303ae
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
55e303ae
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22#include "../headers/BTreesPrivate.h"
23#include "sys/malloc.h"
24
25
26/*
27 * B-tree Node Reserve
28 *
29 * BTReserveSpace
30 * BTReleaseReserve
31 * BTUpdateReserve
32 * BTAvailableNodes
33 *
34 * Each kernel thread can have it's own reserve of b-tree
35 * nodes. This reserve info is kept in a hash table.
36 *
37 * Don't forget to call BTReleaseReserve when you're finished
38 * or you will leave stale node reserves in the hash.
39 */
40
41
42/*
43 * BE CAREFUL WHEN INCREASING THE SIZE OF THIS STRUCT!
44 *
45 * It must remain equal in size to the opaque cat_cookie_t
46 * struct (in hfs_catalog.h).
47 */
48struct nreserve {
49 LIST_ENTRY(nreserve) nr_hash; /* hash chain */
50 int nr_nodecnt; /* count of nodes held in reserve */
51 int nr_newnodes; /* nodes that were allocated */
52 struct vnode *nr_btvp; /* b-tree file vnode */
53 void *nr_tag; /* unique tag (per thread) */
54};
55
56#define NR_GET_TAG() (current_act())
57
58#define NR_CACHE 17
59
60#define NR_HASH(btvp, tag) \
61 (&nr_hashtbl[((((int)(btvp)) >> 8) ^ ((int)(tag) >> 4)) & nr_hashmask])
62
63LIST_HEAD(nodereserve, nreserve) *nr_hashtbl;
64
65u_long nr_hashmask;
66
67
68/* Internal Node Reserve Hash Routines (private) */
69static void nr_insert (struct vnode *, struct nreserve *nrp, int);
70static void nr_delete (struct vnode *, struct nreserve *nrp, int *);
71static int nr_lookup (struct vnode *);
72static void nr_update (struct vnode *, int);
73
74
75/*
76 * BTReserveSetup - initialize the node reserve hash table
77 */
78__private_extern__
79void
80BTReserveSetup()
81{
82 if (sizeof(struct nreserve) != sizeof(cat_cookie_t))
83 panic("BTReserveSetup: nreserve size != opaque struct size");
84
85 nr_hashtbl = hashinit(NR_CACHE, M_HFSMNT, &nr_hashmask);
86}
87
88
89/*
90 * BTAvailNodes - obtain the actual available nodes (for current thread)
91 *
92 */
93__private_extern__
94SInt32
95BTAvailableNodes(BTreeControlBlock *btree)
96{
97 SInt32 availNodes;
98
99 availNodes = (SInt32)btree->freeNodes - (SInt32)btree->reservedNodes;
100
101 return (availNodes + nr_lookup(btree->fileRefNum));
102}
103
104
105/*
106 * BTReserveSpace - obtain a node reserve (for current thread)
107 *
108 * Used by the Catalog Layer (hfs_catalog.c) to reserve space.
109 */
110__private_extern__
111int
112BTReserveSpace(FCB *file, int operations, void* data)
113{
114 BTreeControlBlock *btree;
115 int rsrvNodes, availNodes, totalNodes;
116 int height;
117 int inserts, deletes;
118 int err = 0;
119
120 btree = (BTreeControlBlockPtr)file->fcbBTCBPtr;
121
122 REQUIRE_FILE_LOCK(btree->fileRefNum, true);
123
124 /*
125 * The node reserve is based on the number of b-tree
126 * operations (insert/deletes) and the height of the
127 * tree.
128 */
129 height = btree->treeDepth;
130 inserts = operations & 0xffff;
131 deletes = operations >> 16;
132
133 rsrvNodes = 1; /* allow for at least one root split */
134 if (deletes)
135 rsrvNodes += (deletes * (height - 1)) - 1;
136 if (inserts)
137 rsrvNodes += (inserts * height) + 1;
138
139 availNodes = btree->freeNodes - btree->reservedNodes;
140
141 if (rsrvNodes > availNodes) {
142 totalNodes = rsrvNodes + btree->totalNodes - availNodes;
143
144 /* See if we also need a map node */
145 if (totalNodes > CalcMapBits(btree))
146 ++totalNodes;
147 if ((err = ExtendBTree(btree, totalNodes)))
148 return (err);
149 }
150
151 btree->reservedNodes += rsrvNodes;
152 nr_insert(btree->fileRefNum, (struct nreserve *)data, rsrvNodes);
153 return (0);
154}
155
156
157/*
158 * BTReleaseReserve - release the node reserve held by current thread
159 *
160 * Used by the Catalog Layer (hfs_catalog.c) to relinquish reserved space.
161 */
162__private_extern__
163int
164BTReleaseReserve(FCB *file, void* data)
165{
166 BTreeControlBlock *btree;
167 int nodecnt;
168
169 btree = (BTreeControlBlockPtr)file->fcbBTCBPtr;
170
171 REQUIRE_FILE_LOCK(btree->fileRefNum, true);
172
173 nr_delete(btree->fileRefNum, (struct nreserve *)data, &nodecnt);
174
175 if (nodecnt)
176 btree->reservedNodes -= nodecnt;
177
178 return (0);
179}
180
181/*
182 * BTUpdateReserve - update a node reserve for allocations that occured.
183 */
184__private_extern__
185void
186BTUpdateReserve(BTreeControlBlockPtr btreePtr, int nodes)
187{
188 nr_update(btreePtr->fileRefNum, nodes);
189}
190
191
192/*----------------------------------------------------------------------------*/
193/* Node Reserve Hash Functions (private) */
194
195
196int nrinserts = 0;
197int nrdeletes = 0;
198
199/*
200 * Insert a new node reserve.
201 */
202static void
203nr_insert(struct vnode * btvp, struct nreserve *nrp, int nodecnt)
204{
205 struct nodereserve *nrhead;
206 struct nreserve *tmp_nrp;
207 void * tag = NR_GET_TAG();
208
209 /*
210 * Check the cache - there may already be a reserve
211 */
212 nrhead = NR_HASH(btvp, tag);
213 for (tmp_nrp = nrhead->lh_first; tmp_nrp;
214 tmp_nrp = tmp_nrp->nr_hash.le_next) {
215 if ((tmp_nrp->nr_tag == tag) && (tmp_nrp->nr_btvp == btvp)) {
216 nrp->nr_tag = 0;
217 return;
218 }
219 }
220
221 nrp->nr_nodecnt = nodecnt;
222 nrp->nr_newnodes = 0;
223 nrp->nr_btvp = btvp;
224 nrp->nr_tag = tag;
225 LIST_INSERT_HEAD(nrhead, nrp, nr_hash);
226 ++nrinserts;
227}
228
229/*
230 * Delete a node reserve.
231 */
232static void
233nr_delete(struct vnode * btvp, struct nreserve *nrp, int *nodecnt)
234{
235 void * tag = NR_GET_TAG();
236
237 if (nrp->nr_tag) {
238 if ((nrp->nr_tag != tag) || (nrp->nr_btvp != btvp))
239 panic("nr_delete: invalid NR (%08x)", nrp);
240 LIST_REMOVE(nrp, nr_hash);
241 *nodecnt = nrp->nr_nodecnt;
242 bzero(nrp, sizeof(struct nreserve));
243 ++nrdeletes;
244 } else {
245 *nodecnt = 0;
246 }
247}
248
249/*
250 * Lookup a node reserve.
251 */
252static int
253nr_lookup(struct vnode * btvp)
254{
255 struct nodereserve *nrhead;
256 struct nreserve *nrp;
257 void* tag = NR_GET_TAG();
258
259 nrhead = NR_HASH(btvp, tag);
260 for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) {
261 if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp))
262 return (nrp->nr_nodecnt - nrp->nr_newnodes);
263 }
264 return (0);
265}
266
267/*
268 * Update a node reserve for any allocations that occured.
269 */
270static void
271nr_update(struct vnode * btvp, int nodecnt)
272{
273 struct nodereserve *nrhead;
274 struct nreserve *nrp;
275 void* tag = NR_GET_TAG();
276
277 nrhead = NR_HASH(btvp, tag);
278 for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) {
279 if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp)) {
280 nrp->nr_newnodes += nodecnt;
281 break;
282 }
283 }
284}