2 * Copyright (c) 2004-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include "BTreesPrivate.h"
29 #include "sys/malloc.h"
30 #include <kern/locks.h>
40 * Each kernel thread can have it's own reserve of b-tree
41 * nodes. This reserve info is kept in a hash table.
43 * Don't forget to call BTReleaseReserve when you're finished
44 * or you will leave stale node reserves in the hash.
49 * BE CAREFUL WHEN INCREASING THE SIZE OF THIS STRUCT!
51 * It must remain equal in size to the opaque cat_cookie_t
52 * struct (in hfs_catalog.h).
55 LIST_ENTRY(nreserve
) nr_hash
; /* hash chain */
56 int nr_nodecnt
; /* count of nodes held in reserve */
57 int nr_newnodes
; /* nodes that were allocated */
58 struct vnode
*nr_btvp
; /* b-tree file vnode */
59 void *nr_tag
; /* unique tag (per thread) */
62 #define NR_GET_TAG() (current_thread())
66 #define NR_HASH(btvp, tag) \
67 (&nr_hashtbl[((((intptr_t)(btvp)) >> 8) ^ ((intptr_t)(tag) >> 4)) & nr_hashmask])
69 LIST_HEAD(nodereserve
, nreserve
) *nr_hashtbl
;
73 lck_grp_t
* nr_lck_grp
;
74 lck_grp_attr_t
* nr_lck_grp_attr
;
75 lck_attr_t
* nr_lck_attr
;
79 /* Internal Node Reserve Hash Routines (private) */
80 static void nr_insert (struct vnode
*, struct nreserve
*nrp
, int);
81 static void nr_delete (struct vnode
*, struct nreserve
*nrp
, int *);
82 static void nr_update (struct vnode
*, int);
86 * BTReserveSetup - initialize the node reserve hash table
88 void BTReserveSetup(void)
90 if (sizeof(struct nreserve
) != sizeof(cat_cookie_t
))
91 panic("hfs: BTReserveSetup: nreserve size != opaque struct size");
93 nr_hashtbl
= hashinit(NR_CACHE
, M_TEMP
, &nr_hashmask
);
95 nr_lck_grp_attr
= lck_grp_attr_alloc_init();
96 nr_lck_grp
= lck_grp_alloc_init("btree_node_reserve", nr_lck_grp_attr
);
98 nr_lck_attr
= lck_attr_alloc_init();
100 lck_mtx_init(&nr_mutex
, nr_lck_grp
, nr_lck_attr
);
105 * BTReserveSpace - obtain a node reserve (for current thread)
107 * Used by the Catalog Layer (hfs_catalog.c) to reserve space.
109 * When data is NULL, we only insure that there's enough space
110 * but it is not reserved (assumes you keep the b-tree lock).
113 BTReserveSpace(FCB
*file
, int operations
, void* data
)
115 BTreeControlBlock
*btree
;
116 int rsrvNodes
, availNodes
, totalNodes
;
118 int inserts
, deletes
;
122 btree
= (BTreeControlBlockPtr
)file
->fcbBTCBPtr
;
123 clumpsize
= file
->ff_clumpsize
;
125 REQUIRE_FILE_LOCK(btree
->fileRefNum
, true);
128 * The node reserve is based on the number of b-tree
129 * operations (insert/deletes) and the height of the
132 height
= btree
->treeDepth
;
134 height
= 2; /* prevent underflow in rsrvNodes calculation */
135 inserts
= operations
& 0xffff;
136 deletes
= operations
>> 16;
139 * Allow for at least one root split.
141 * Each delete operation can propogate a big key up the
142 * index. This can cause a split at each level up.
144 * Each insert operation can cause a local split and a
145 * split at each level up.
147 rsrvNodes
= 1 + (deletes
* (height
- 2)) + (inserts
* (height
- 1));
149 availNodes
= btree
->freeNodes
- btree
->reservedNodes
;
151 if (rsrvNodes
> availNodes
) {
152 u_int32_t reqblks
, freeblks
, rsrvblks
;
154 struct hfsmount
*hfsmp
;
157 * For UNIX conformance, we try and reserve the MIN of either 5% of
158 * total file blocks or 10MB worth of blocks, for growing existing
159 * files. On non-HFS filesystems, creating a new directory entry may
160 * not cause additional disk space to be allocated, but on HFS, creating
161 * a new entry could cause the b-tree to grow. As a result, we take
162 * some precautions here to prevent that on configurations that try to
163 * satisfy conformance.
165 hfsmp
= VTOVCB(btree
->fileRefNum
);
166 rsrvblks
= ((u_int64_t
)hfsmp
->allocLimit
* 5) / 100;
167 if (hfsmp
->blockSize
> HFS_BT_MAXRESERVE
) {
171 bt_rsrv
= (HFS_BT_MAXRESERVE
/ hfsmp
->blockSize
);
173 rsrvblks
= MIN(rsrvblks
, bt_rsrv
);
175 freeblks
= hfs_freeblks(hfsmp
, 0);
176 if (freeblks
<= rsrvblks
) {
177 /* When running low, disallow adding new items. */
178 if ((inserts
> 0) && (deletes
== 0)) {
183 freeblks
-= rsrvblks
;
185 reqblks
= clumpsize
/ hfsmp
->blockSize
;
187 if (reqblks
> freeblks
) {
188 reqblks
= ((rsrvNodes
- availNodes
) * btree
->nodeSize
) / hfsmp
->blockSize
;
189 /* When running low, disallow adding new items. */
190 if ((reqblks
> freeblks
) && (inserts
> 0) && (deletes
== 0)) {
193 file
->ff_clumpsize
= freeblks
* hfsmp
->blockSize
;
195 totalNodes
= rsrvNodes
+ btree
->totalNodes
- availNodes
;
197 /* See if we also need a map node */
198 if (totalNodes
> (int)CalcMapBits(btree
)) {
201 if ((err
= ExtendBTree(btree
, totalNodes
))) {
205 /* Save this reserve if this is a persistent request. */
207 btree
->reservedNodes
+= rsrvNodes
;
208 nr_insert(btree
->fileRefNum
, (struct nreserve
*)data
, rsrvNodes
);
211 /* Put clump size back if it was changed. */
212 if (file
->ff_clumpsize
!= clumpsize
)
213 file
->ff_clumpsize
= clumpsize
;
220 * BTReleaseReserve - release the node reserve held by current thread
222 * Used by the Catalog Layer (hfs_catalog.c) to relinquish reserved space.
225 BTReleaseReserve(FCB
*file
, void* data
)
227 BTreeControlBlock
*btree
;
230 btree
= (BTreeControlBlockPtr
)file
->fcbBTCBPtr
;
232 REQUIRE_FILE_LOCK(btree
->fileRefNum
, true);
234 nr_delete(btree
->fileRefNum
, (struct nreserve
*)data
, &nodecnt
);
237 btree
->reservedNodes
-= nodecnt
;
243 * BTUpdateReserve - update a node reserve for allocations that occurred.
246 BTUpdateReserve(BTreeControlBlockPtr btreePtr
, int nodes
)
248 nr_update(btreePtr
->fileRefNum
, nodes
);
252 /*----------------------------------------------------------------------------*/
253 /* Node Reserve Hash Functions (private) */
260 * Insert a new node reserve.
263 nr_insert(struct vnode
* btvp
, struct nreserve
*nrp
, int nodecnt
)
265 struct nodereserve
*nrhead
;
266 struct nreserve
*tmp_nrp
;
267 void * tag
= NR_GET_TAG();
270 * Check the cache - there may already be a reserve
272 lck_mtx_lock(&nr_mutex
);
273 nrhead
= NR_HASH(btvp
, tag
);
274 for (tmp_nrp
= nrhead
->lh_first
; tmp_nrp
;
275 tmp_nrp
= tmp_nrp
->nr_hash
.le_next
) {
276 if ((tmp_nrp
->nr_tag
== tag
) && (tmp_nrp
->nr_btvp
== btvp
)) {
278 tmp_nrp
->nr_nodecnt
+= nodecnt
;
279 lck_mtx_unlock(&nr_mutex
);
284 nrp
->nr_nodecnt
= nodecnt
;
285 nrp
->nr_newnodes
= 0;
288 LIST_INSERT_HEAD(nrhead
, nrp
, nr_hash
);
290 lck_mtx_unlock(&nr_mutex
);
294 * Delete a node reserve.
297 nr_delete(struct vnode
* btvp
, struct nreserve
*nrp
, int *nodecnt
)
299 void * tag
= NR_GET_TAG();
301 lck_mtx_lock(&nr_mutex
);
303 if ((nrp
->nr_tag
!= tag
) || (nrp
->nr_btvp
!= btvp
))
304 panic("hfs: nr_delete: invalid NR (%p)", nrp
);
305 LIST_REMOVE(nrp
, nr_hash
);
306 *nodecnt
= nrp
->nr_nodecnt
;
307 bzero(nrp
, sizeof(struct nreserve
));
312 lck_mtx_unlock(&nr_mutex
);
317 * Update a node reserve for any allocations that occurred.
320 nr_update(struct vnode
* btvp
, int nodecnt
)
322 struct nodereserve
*nrhead
;
323 struct nreserve
*nrp
;
324 void* tag
= NR_GET_TAG();
326 lck_mtx_lock(&nr_mutex
);
328 nrhead
= NR_HASH(btvp
, tag
);
329 for (nrp
= nrhead
->lh_first
; nrp
; nrp
= nrp
->nr_hash
.le_next
) {
330 if ((nrp
->nr_tag
== tag
) && (nrp
->nr_btvp
== btvp
)) {
331 nrp
->nr_newnodes
+= nodecnt
;
335 lck_mtx_unlock(&nr_mutex
);