2 * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
30 #include "../headers/BTreesPrivate.h"
31 #include "sys/malloc.h"
32 #include <kern/locks.h>
43 * Each kernel thread can have it's own reserve of b-tree
44 * nodes. This reserve info is kept in a hash table.
46 * Don't forget to call BTReleaseReserve when you're finished
47 * or you will leave stale node reserves in the hash.
52 * BE CAREFUL WHEN INCREASING THE SIZE OF THIS STRUCT!
54 * It must remain equal in size to the opaque cat_cookie_t
55 * struct (in hfs_catalog.h).
58 LIST_ENTRY(nreserve
) nr_hash
; /* hash chain */
59 int nr_nodecnt
; /* count of nodes held in reserve */
60 int nr_newnodes
; /* nodes that were allocated */
61 struct vnode
*nr_btvp
; /* b-tree file vnode */
62 void *nr_tag
; /* unique tag (per thread) */
65 #define NR_GET_TAG() (current_thread())
69 #define NR_HASH(btvp, tag) \
70 (&nr_hashtbl[((((int)(btvp)) >> 8) ^ ((int)(tag) >> 4)) & nr_hashmask])
72 LIST_HEAD(nodereserve
, nreserve
) *nr_hashtbl
;
76 lck_grp_t
* nr_lck_grp
;
77 lck_grp_attr_t
* nr_lck_grp_attr
;
78 lck_attr_t
* nr_lck_attr
;
82 /* Internal Node Reserve Hash Routines (private) */
83 static void nr_insert (struct vnode
*, struct nreserve
*nrp
, int);
84 static void nr_delete (struct vnode
*, struct nreserve
*nrp
, int *);
85 static int nr_lookup (struct vnode
*);
86 static void nr_update (struct vnode
*, int);
90 * BTReserveSetup - initialize the node reserve hash table
96 if (sizeof(struct nreserve
) != sizeof(cat_cookie_t
))
97 panic("BTReserveSetup: nreserve size != opaque struct size");
99 nr_hashtbl
= hashinit(NR_CACHE
, M_HFSMNT
, &nr_hashmask
);
101 nr_lck_grp_attr
= lck_grp_attr_alloc_init();
102 lck_grp_attr_setstat(nr_lck_grp_attr
);
103 nr_lck_grp
= lck_grp_alloc_init("btree_node_reserve", nr_lck_grp_attr
);
105 nr_lck_attr
= lck_attr_alloc_init();
106 lck_attr_setdebug(nr_lck_attr
);
108 lck_mtx_init(&nr_mutex
, nr_lck_grp
, nr_lck_attr
);
113 * BTAvailNodes - obtain the actual available nodes (for current thread)
118 BTAvailableNodes(BTreeControlBlock
*btree
)
122 availNodes
= (SInt32
)btree
->freeNodes
- (SInt32
)btree
->reservedNodes
;
124 return (availNodes
+ nr_lookup(btree
->fileRefNum
));
129 * BTReserveSpace - obtain a node reserve (for current thread)
131 * Used by the Catalog Layer (hfs_catalog.c) to reserve space.
135 BTReserveSpace(FCB
*file
, int operations
, void* data
)
137 BTreeControlBlock
*btree
;
138 int rsrvNodes
, availNodes
, totalNodes
;
140 int inserts
, deletes
;
143 btree
= (BTreeControlBlockPtr
)file
->fcbBTCBPtr
;
145 REQUIRE_FILE_LOCK(btree
->fileRefNum
, true);
148 * The node reserve is based on the number of b-tree
149 * operations (insert/deletes) and the height of the
152 height
= btree
->treeDepth
;
153 inserts
= operations
& 0xffff;
154 deletes
= operations
>> 16;
156 rsrvNodes
= 1; /* allow for at least one root split */
158 rsrvNodes
+= (deletes
* (height
- 1)) - 1;
160 rsrvNodes
+= (inserts
* height
) + 1;
162 availNodes
= btree
->freeNodes
- btree
->reservedNodes
;
164 if (rsrvNodes
> availNodes
) {
165 totalNodes
= rsrvNodes
+ btree
->totalNodes
- availNodes
;
167 /* See if we also need a map node */
168 if (totalNodes
> (int)CalcMapBits(btree
))
170 if ((err
= ExtendBTree(btree
, totalNodes
)))
174 btree
->reservedNodes
+= rsrvNodes
;
175 nr_insert(btree
->fileRefNum
, (struct nreserve
*)data
, rsrvNodes
);
181 * BTReleaseReserve - release the node reserve held by current thread
183 * Used by the Catalog Layer (hfs_catalog.c) to relinquish reserved space.
187 BTReleaseReserve(FCB
*file
, void* data
)
189 BTreeControlBlock
*btree
;
192 btree
= (BTreeControlBlockPtr
)file
->fcbBTCBPtr
;
194 REQUIRE_FILE_LOCK(btree
->fileRefNum
, true);
196 nr_delete(btree
->fileRefNum
, (struct nreserve
*)data
, &nodecnt
);
199 btree
->reservedNodes
-= nodecnt
;
205 * BTUpdateReserve - update a node reserve for allocations that occurred.
209 BTUpdateReserve(BTreeControlBlockPtr btreePtr
, int nodes
)
211 nr_update(btreePtr
->fileRefNum
, nodes
);
215 /*----------------------------------------------------------------------------*/
216 /* Node Reserve Hash Functions (private) */
223 * Insert a new node reserve.
226 nr_insert(struct vnode
* btvp
, struct nreserve
*nrp
, int nodecnt
)
228 struct nodereserve
*nrhead
;
229 struct nreserve
*tmp_nrp
;
230 void * tag
= NR_GET_TAG();
233 * Check the cache - there may already be a reserve
235 lck_mtx_lock(&nr_mutex
);
236 nrhead
= NR_HASH(btvp
, tag
);
237 for (tmp_nrp
= nrhead
->lh_first
; tmp_nrp
;
238 tmp_nrp
= tmp_nrp
->nr_hash
.le_next
) {
239 if ((tmp_nrp
->nr_tag
== tag
) && (tmp_nrp
->nr_btvp
== btvp
)) {
241 lck_mtx_unlock(&nr_mutex
);
246 nrp
->nr_nodecnt
= nodecnt
;
247 nrp
->nr_newnodes
= 0;
250 LIST_INSERT_HEAD(nrhead
, nrp
, nr_hash
);
252 lck_mtx_unlock(&nr_mutex
);
256 * Delete a node reserve.
259 nr_delete(struct vnode
* btvp
, struct nreserve
*nrp
, int *nodecnt
)
261 void * tag
= NR_GET_TAG();
263 lck_mtx_lock(&nr_mutex
);
265 if ((nrp
->nr_tag
!= tag
) || (nrp
->nr_btvp
!= btvp
))
266 panic("nr_delete: invalid NR (%08x)", nrp
);
267 LIST_REMOVE(nrp
, nr_hash
);
268 *nodecnt
= nrp
->nr_nodecnt
;
269 bzero(nrp
, sizeof(struct nreserve
));
274 lck_mtx_unlock(&nr_mutex
);
278 * Lookup a node reserve.
281 nr_lookup(struct vnode
* btvp
)
283 struct nodereserve
*nrhead
;
284 struct nreserve
*nrp
;
285 void* tag
= NR_GET_TAG();
287 lck_mtx_lock(&nr_mutex
);
289 nrhead
= NR_HASH(btvp
, tag
);
290 for (nrp
= nrhead
->lh_first
; nrp
; nrp
= nrp
->nr_hash
.le_next
) {
291 if ((nrp
->nr_tag
== tag
) && (nrp
->nr_btvp
== btvp
)) {
292 lck_mtx_unlock(&nr_mutex
);
293 return (nrp
->nr_nodecnt
- nrp
->nr_newnodes
);
296 lck_mtx_unlock(&nr_mutex
);
301 * Update a node reserve for any allocations that occurred.
304 nr_update(struct vnode
* btvp
, int nodecnt
)
306 struct nodereserve
*nrhead
;
307 struct nreserve
*nrp
;
308 void* tag
= NR_GET_TAG();
310 lck_mtx_lock(&nr_mutex
);
312 nrhead
= NR_HASH(btvp
, tag
);
313 for (nrp
= nrhead
->lh_first
; nrp
; nrp
= nrp
->nr_hash
.le_next
) {
314 if ((nrp
->nr_tag
== tag
) && (nrp
->nr_btvp
== btvp
)) {
315 nrp
->nr_newnodes
+= nodecnt
;
319 lck_mtx_unlock(&nr_mutex
);