]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_OSREFERENCE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the | |
10 | * License may not be used to create, or enable the creation or | |
11 | * redistribution of, unlawful or unlicensed copies of an Apple operating | |
12 | * system, or to circumvent, violate, or enable the circumvention or | |
13 | * violation of, any terms of an Apple operating system software license | |
14 | * agreement. | |
15 | * | |
16 | * Please obtain a copy of the License at | |
17 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
18 | * file. | |
19 | * | |
20 | * The Original Code and all software distributed under the License are | |
21 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
22 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
23 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
25 | * Please see the License for the specific language governing rights and | |
26 | * limitations under the License. | |
27 | * | |
28 | * @APPLE_LICENSE_OSREFERENCE_HEADER_END@ | |
29 | */ | |
30 | #include "../headers/BTreesPrivate.h" | |
31 | #include "sys/malloc.h" | |
32 | #include <kern/locks.h> | |
33 | ||
34 | ||
35 | /* | |
36 | * B-tree Node Reserve | |
37 | * | |
38 | * BTReserveSpace | |
39 | * BTReleaseReserve | |
40 | * BTUpdateReserve | |
41 | * BTAvailableNodes | |
42 | * | |
43 | * Each kernel thread can have it's own reserve of b-tree | |
44 | * nodes. This reserve info is kept in a hash table. | |
45 | * | |
46 | * Don't forget to call BTReleaseReserve when you're finished | |
47 | * or you will leave stale node reserves in the hash. | |
48 | */ | |
49 | ||
50 | ||
51 | /* | |
52 | * BE CAREFUL WHEN INCREASING THE SIZE OF THIS STRUCT! | |
53 | * | |
54 | * It must remain equal in size to the opaque cat_cookie_t | |
55 | * struct (in hfs_catalog.h). | |
56 | */ | |
57 | struct nreserve { | |
58 | LIST_ENTRY(nreserve) nr_hash; /* hash chain */ | |
59 | int nr_nodecnt; /* count of nodes held in reserve */ | |
60 | int nr_newnodes; /* nodes that were allocated */ | |
61 | struct vnode *nr_btvp; /* b-tree file vnode */ | |
62 | void *nr_tag; /* unique tag (per thread) */ | |
63 | }; | |
64 | ||
65 | #define NR_GET_TAG() (current_thread()) | |
66 | ||
67 | #define NR_CACHE 17 | |
68 | ||
69 | #define NR_HASH(btvp, tag) \ | |
70 | (&nr_hashtbl[((((int)(btvp)) >> 8) ^ ((int)(tag) >> 4)) & nr_hashmask]) | |
71 | ||
72 | LIST_HEAD(nodereserve, nreserve) *nr_hashtbl; | |
73 | ||
74 | u_long nr_hashmask; | |
75 | ||
76 | lck_grp_t * nr_lck_grp; | |
77 | lck_grp_attr_t * nr_lck_grp_attr; | |
78 | lck_attr_t * nr_lck_attr; | |
79 | ||
80 | lck_mtx_t nr_mutex; | |
81 | ||
82 | /* Internal Node Reserve Hash Routines (private) */ | |
83 | static void nr_insert (struct vnode *, struct nreserve *nrp, int); | |
84 | static void nr_delete (struct vnode *, struct nreserve *nrp, int *); | |
85 | static int nr_lookup (struct vnode *); | |
86 | static void nr_update (struct vnode *, int); | |
87 | ||
88 | ||
89 | /* | |
90 | * BTReserveSetup - initialize the node reserve hash table | |
91 | */ | |
92 | __private_extern__ | |
93 | void | |
94 | BTReserveSetup() | |
95 | { | |
96 | if (sizeof(struct nreserve) != sizeof(cat_cookie_t)) | |
97 | panic("BTReserveSetup: nreserve size != opaque struct size"); | |
98 | ||
99 | nr_hashtbl = hashinit(NR_CACHE, M_HFSMNT, &nr_hashmask); | |
100 | ||
101 | nr_lck_grp_attr= lck_grp_attr_alloc_init(); | |
102 | nr_lck_grp = lck_grp_alloc_init("btree_node_reserve", nr_lck_grp_attr); | |
103 | ||
104 | nr_lck_attr = lck_attr_alloc_init(); | |
105 | ||
106 | lck_mtx_init(&nr_mutex, nr_lck_grp, nr_lck_attr); | |
107 | } | |
108 | ||
109 | ||
110 | /* | |
111 | * BTAvailNodes - obtain the actual available nodes (for current thread) | |
112 | * | |
113 | */ | |
114 | __private_extern__ | |
115 | SInt32 | |
116 | BTAvailableNodes(BTreeControlBlock *btree) | |
117 | { | |
118 | SInt32 availNodes; | |
119 | ||
120 | availNodes = (SInt32)btree->freeNodes - (SInt32)btree->reservedNodes; | |
121 | ||
122 | return (availNodes + nr_lookup(btree->fileRefNum)); | |
123 | } | |
124 | ||
125 | ||
126 | /* | |
127 | * BTReserveSpace - obtain a node reserve (for current thread) | |
128 | * | |
129 | * Used by the Catalog Layer (hfs_catalog.c) to reserve space. | |
130 | */ | |
131 | __private_extern__ | |
132 | int | |
133 | BTReserveSpace(FCB *file, int operations, void* data) | |
134 | { | |
135 | BTreeControlBlock *btree; | |
136 | int rsrvNodes, availNodes, totalNodes; | |
137 | int height; | |
138 | int inserts, deletes; | |
139 | int err = 0; | |
140 | ||
141 | btree = (BTreeControlBlockPtr)file->fcbBTCBPtr; | |
142 | ||
143 | REQUIRE_FILE_LOCK(btree->fileRefNum, true); | |
144 | ||
145 | /* | |
146 | * The node reserve is based on the number of b-tree | |
147 | * operations (insert/deletes) and the height of the | |
148 | * tree. | |
149 | */ | |
150 | height = btree->treeDepth; | |
151 | inserts = operations & 0xffff; | |
152 | deletes = operations >> 16; | |
153 | ||
154 | rsrvNodes = 1; /* allow for at least one root split */ | |
155 | if (deletes) | |
156 | rsrvNodes += (deletes * (height - 1)) - 1; | |
157 | if (inserts) | |
158 | rsrvNodes += (inserts * height) + 1; | |
159 | ||
160 | availNodes = btree->freeNodes - btree->reservedNodes; | |
161 | ||
162 | if (rsrvNodes > availNodes) { | |
163 | totalNodes = rsrvNodes + btree->totalNodes - availNodes; | |
164 | ||
165 | /* See if we also need a map node */ | |
166 | if (totalNodes > (int)CalcMapBits(btree)) | |
167 | ++totalNodes; | |
168 | if ((err = ExtendBTree(btree, totalNodes))) | |
169 | return (err); | |
170 | } | |
171 | ||
172 | btree->reservedNodes += rsrvNodes; | |
173 | nr_insert(btree->fileRefNum, (struct nreserve *)data, rsrvNodes); | |
174 | return (0); | |
175 | } | |
176 | ||
177 | ||
178 | /* | |
179 | * BTReleaseReserve - release the node reserve held by current thread | |
180 | * | |
181 | * Used by the Catalog Layer (hfs_catalog.c) to relinquish reserved space. | |
182 | */ | |
183 | __private_extern__ | |
184 | int | |
185 | BTReleaseReserve(FCB *file, void* data) | |
186 | { | |
187 | BTreeControlBlock *btree; | |
188 | int nodecnt; | |
189 | ||
190 | btree = (BTreeControlBlockPtr)file->fcbBTCBPtr; | |
191 | ||
192 | REQUIRE_FILE_LOCK(btree->fileRefNum, true); | |
193 | ||
194 | nr_delete(btree->fileRefNum, (struct nreserve *)data, &nodecnt); | |
195 | ||
196 | if (nodecnt) | |
197 | btree->reservedNodes -= nodecnt; | |
198 | ||
199 | return (0); | |
200 | } | |
201 | ||
202 | /* | |
203 | * BTUpdateReserve - update a node reserve for allocations that occurred. | |
204 | */ | |
205 | __private_extern__ | |
206 | void | |
207 | BTUpdateReserve(BTreeControlBlockPtr btreePtr, int nodes) | |
208 | { | |
209 | nr_update(btreePtr->fileRefNum, nodes); | |
210 | } | |
211 | ||
212 | ||
213 | /*----------------------------------------------------------------------------*/ | |
214 | /* Node Reserve Hash Functions (private) */ | |
215 | ||
216 | ||
217 | int nrinserts = 0; | |
218 | int nrdeletes = 0; | |
219 | ||
220 | /* | |
221 | * Insert a new node reserve. | |
222 | */ | |
223 | static void | |
224 | nr_insert(struct vnode * btvp, struct nreserve *nrp, int nodecnt) | |
225 | { | |
226 | struct nodereserve *nrhead; | |
227 | struct nreserve *tmp_nrp; | |
228 | void * tag = NR_GET_TAG(); | |
229 | ||
230 | /* | |
231 | * Check the cache - there may already be a reserve | |
232 | */ | |
233 | lck_mtx_lock(&nr_mutex); | |
234 | nrhead = NR_HASH(btvp, tag); | |
235 | for (tmp_nrp = nrhead->lh_first; tmp_nrp; | |
236 | tmp_nrp = tmp_nrp->nr_hash.le_next) { | |
237 | if ((tmp_nrp->nr_tag == tag) && (tmp_nrp->nr_btvp == btvp)) { | |
238 | nrp->nr_tag = 0; | |
239 | lck_mtx_unlock(&nr_mutex); | |
240 | return; | |
241 | } | |
242 | } | |
243 | ||
244 | nrp->nr_nodecnt = nodecnt; | |
245 | nrp->nr_newnodes = 0; | |
246 | nrp->nr_btvp = btvp; | |
247 | nrp->nr_tag = tag; | |
248 | LIST_INSERT_HEAD(nrhead, nrp, nr_hash); | |
249 | ++nrinserts; | |
250 | lck_mtx_unlock(&nr_mutex); | |
251 | } | |
252 | ||
253 | /* | |
254 | * Delete a node reserve. | |
255 | */ | |
256 | static void | |
257 | nr_delete(struct vnode * btvp, struct nreserve *nrp, int *nodecnt) | |
258 | { | |
259 | void * tag = NR_GET_TAG(); | |
260 | ||
261 | lck_mtx_lock(&nr_mutex); | |
262 | if (nrp->nr_tag) { | |
263 | if ((nrp->nr_tag != tag) || (nrp->nr_btvp != btvp)) | |
264 | panic("nr_delete: invalid NR (%08x)", nrp); | |
265 | LIST_REMOVE(nrp, nr_hash); | |
266 | *nodecnt = nrp->nr_nodecnt; | |
267 | bzero(nrp, sizeof(struct nreserve)); | |
268 | ++nrdeletes; | |
269 | } else { | |
270 | *nodecnt = 0; | |
271 | } | |
272 | lck_mtx_unlock(&nr_mutex); | |
273 | } | |
274 | ||
275 | /* | |
276 | * Lookup a node reserve. | |
277 | */ | |
278 | static int | |
279 | nr_lookup(struct vnode * btvp) | |
280 | { | |
281 | struct nodereserve *nrhead; | |
282 | struct nreserve *nrp; | |
283 | void* tag = NR_GET_TAG(); | |
284 | ||
285 | lck_mtx_lock(&nr_mutex); | |
286 | ||
287 | nrhead = NR_HASH(btvp, tag); | |
288 | for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) { | |
289 | if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp)) { | |
290 | lck_mtx_unlock(&nr_mutex); | |
291 | return (nrp->nr_nodecnt - nrp->nr_newnodes); | |
292 | } | |
293 | } | |
294 | lck_mtx_unlock(&nr_mutex); | |
295 | return (0); | |
296 | } | |
297 | ||
298 | /* | |
299 | * Update a node reserve for any allocations that occurred. | |
300 | */ | |
301 | static void | |
302 | nr_update(struct vnode * btvp, int nodecnt) | |
303 | { | |
304 | struct nodereserve *nrhead; | |
305 | struct nreserve *nrp; | |
306 | void* tag = NR_GET_TAG(); | |
307 | ||
308 | lck_mtx_lock(&nr_mutex); | |
309 | ||
310 | nrhead = NR_HASH(btvp, tag); | |
311 | for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) { | |
312 | if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp)) { | |
313 | nrp->nr_newnodes += nodecnt; | |
314 | break; | |
315 | } | |
316 | } | |
317 | lck_mtx_unlock(&nr_mutex); | |
318 | } |