/*
- * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2004-2008 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#include "../headers/BTreesPrivate.h"
#include "sys/malloc.h"
+#include <kern/locks.h>
/*
* BTReserveSpace
* BTReleaseReserve
* BTUpdateReserve
- * BTAvailableNodes
*
* Each kernel thread can have it's own reserve of b-tree
* nodes. This reserve info is kept in a hash table.
void *nr_tag; /* unique tag (per thread) */
};
-#define NR_GET_TAG() (current_act())
+#define NR_GET_TAG() (current_thread())
#define NR_CACHE 17
#define NR_HASH(btvp, tag) \
- (&nr_hashtbl[((((int)(btvp)) >> 8) ^ ((int)(tag) >> 4)) & nr_hashmask])
+ (&nr_hashtbl[((((intptr_t)(btvp)) >> 8) ^ ((intptr_t)(tag) >> 4)) & nr_hashmask])
LIST_HEAD(nodereserve, nreserve) *nr_hashtbl;
u_long nr_hashmask;
+lck_grp_t * nr_lck_grp;
+lck_grp_attr_t * nr_lck_grp_attr;
+lck_attr_t * nr_lck_attr;
+
+lck_mtx_t nr_mutex;
/* Internal Node Reserve Hash Routines (private) */
static void nr_insert (struct vnode *, struct nreserve *nrp, int);
static void nr_delete (struct vnode *, struct nreserve *nrp, int *);
-static int nr_lookup (struct vnode *);
static void nr_update (struct vnode *, int);
BTReserveSetup()
{
if (sizeof(struct nreserve) != sizeof(cat_cookie_t))
- panic("BTReserveSetup: nreserve size != opaque struct size");
+ panic("hfs: BTReserveSetup: nreserve size != opaque struct size");
nr_hashtbl = hashinit(NR_CACHE, M_HFSMNT, &nr_hashmask);
-}
+ nr_lck_grp_attr= lck_grp_attr_alloc_init();
+ nr_lck_grp = lck_grp_alloc_init("btree_node_reserve", nr_lck_grp_attr);
-/*
- * BTAvailNodes - obtain the actual available nodes (for current thread)
- *
- */
-__private_extern__
-SInt32
-BTAvailableNodes(BTreeControlBlock *btree)
-{
- SInt32 availNodes;
-
- availNodes = (SInt32)btree->freeNodes - (SInt32)btree->reservedNodes;
+ nr_lck_attr = lck_attr_alloc_init();
- return (availNodes + nr_lookup(btree->fileRefNum));
+ lck_mtx_init(&nr_mutex, nr_lck_grp, nr_lck_attr);
}
* BTReserveSpace - obtain a node reserve (for current thread)
*
* Used by the Catalog Layer (hfs_catalog.c) to reserve space.
+ *
+ * When data is NULL, we only insure that there's enough space
+ * but it is not reserved (assumes you keep the b-tree lock).
*/
__private_extern__
int
int rsrvNodes, availNodes, totalNodes;
int height;
int inserts, deletes;
+ u_int32_t clumpsize;
int err = 0;
btree = (BTreeControlBlockPtr)file->fcbBTCBPtr;
+ clumpsize = file->ff_clumpsize;
REQUIRE_FILE_LOCK(btree->fileRefNum, true);
* tree.
*/
height = btree->treeDepth;
+ if (height < 2)
+ height = 2; /* prevent underflow in rsrvNodes calculation */
inserts = operations & 0xffff;
deletes = operations >> 16;
- rsrvNodes = 1; /* allow for at least one root split */
- if (deletes)
- rsrvNodes += (deletes * (height - 1)) - 1;
- if (inserts)
- rsrvNodes += (inserts * height) + 1;
+ /*
+ * Allow for at least one root split.
+ *
+ * Each delete operation can propogate a big key up the
+ * index. This can cause a split at each level up.
+ *
+ * Each insert operation can cause a local split and a
+ * split at each level up.
+ */
+ rsrvNodes = 1 + (deletes * (height - 2)) + (inserts * (height - 1));
availNodes = btree->freeNodes - btree->reservedNodes;
if (rsrvNodes > availNodes) {
+ u_int32_t reqblks, freeblks, rsrvblks;
+ uint32_t bt_rsrv;
+ struct hfsmount *hfsmp;
+
+ /*
+ * For UNIX conformance, we try and reserve the MIN of either 5% of
+ * total file blocks or 10MB worth of blocks, for growing existing
+ * files. On non-HFS filesystems, creating a new directory entry may
+ * not cause additional disk space to be allocated, but on HFS, creating
+ * a new entry could cause the b-tree to grow. As a result, we take
+ * some precautions here to prevent that on configurations that try to
+ * satisfy conformance.
+ */
+ hfsmp = VTOVCB(btree->fileRefNum);
+ rsrvblks = ((u_int64_t)hfsmp->allocLimit * 5) / 100;
+ if (hfsmp->blockSize > HFS_BT_MAXRESERVE) {
+ bt_rsrv = 1;
+ }
+ else {
+ bt_rsrv = (HFS_BT_MAXRESERVE / hfsmp->blockSize);
+ }
+ rsrvblks = MIN(rsrvblks, bt_rsrv);
+
+ freeblks = hfs_freeblks(hfsmp, 0);
+ if (freeblks <= rsrvblks) {
+ /* When running low, disallow adding new items. */
+ if ((inserts > 0) && (deletes == 0)) {
+ return (ENOSPC);
+ }
+ freeblks = 0;
+ } else {
+ freeblks -= rsrvblks;
+ }
+ reqblks = clumpsize / hfsmp->blockSize;
+
+ if (reqblks > freeblks) {
+ reqblks = ((rsrvNodes - availNodes) * btree->nodeSize) / hfsmp->blockSize;
+ /* When running low, disallow adding new items. */
+ if ((reqblks > freeblks) && (inserts > 0) && (deletes == 0)) {
+ return (ENOSPC);
+ }
+ file->ff_clumpsize = freeblks * hfsmp->blockSize;
+ }
totalNodes = rsrvNodes + btree->totalNodes - availNodes;
/* See if we also need a map node */
- if (totalNodes > CalcMapBits(btree))
+ if (totalNodes > (int)CalcMapBits(btree)) {
++totalNodes;
- if ((err = ExtendBTree(btree, totalNodes)))
- return (err);
- }
+ }
+ if ((err = ExtendBTree(btree, totalNodes))) {
+ goto out;
+ }
+ }
+ /* Save this reserve if this is a persistent request. */
+ if (data) {
+ btree->reservedNodes += rsrvNodes;
+ nr_insert(btree->fileRefNum, (struct nreserve *)data, rsrvNodes);
+ }
+out:
+ /* Put clump size back if it was changed. */
+ if (file->ff_clumpsize != clumpsize)
+ file->ff_clumpsize = clumpsize;
- btree->reservedNodes += rsrvNodes;
- nr_insert(btree->fileRefNum, (struct nreserve *)data, rsrvNodes);
- return (0);
+ return (err);
}
}
/*
- * BTUpdateReserve - update a node reserve for allocations that occured.
+ * BTUpdateReserve - update a node reserve for allocations that occurred.
*/
__private_extern__
void
/*
* Check the cache - there may already be a reserve
*/
+ lck_mtx_lock(&nr_mutex);
nrhead = NR_HASH(btvp, tag);
for (tmp_nrp = nrhead->lh_first; tmp_nrp;
tmp_nrp = tmp_nrp->nr_hash.le_next) {
if ((tmp_nrp->nr_tag == tag) && (tmp_nrp->nr_btvp == btvp)) {
nrp->nr_tag = 0;
+ tmp_nrp->nr_nodecnt += nodecnt;
+ lck_mtx_unlock(&nr_mutex);
return;
}
}
nrp->nr_tag = tag;
LIST_INSERT_HEAD(nrhead, nrp, nr_hash);
++nrinserts;
+ lck_mtx_unlock(&nr_mutex);
}
/*
{
void * tag = NR_GET_TAG();
+ lck_mtx_lock(&nr_mutex);
if (nrp->nr_tag) {
if ((nrp->nr_tag != tag) || (nrp->nr_btvp != btvp))
- panic("nr_delete: invalid NR (%08x)", nrp);
+ panic("hfs: nr_delete: invalid NR (%p)", nrp);
LIST_REMOVE(nrp, nr_hash);
*nodecnt = nrp->nr_nodecnt;
bzero(nrp, sizeof(struct nreserve));
} else {
*nodecnt = 0;
}
+ lck_mtx_unlock(&nr_mutex);
}
-/*
- * Lookup a node reserve.
- */
-static int
-nr_lookup(struct vnode * btvp)
-{
- struct nodereserve *nrhead;
- struct nreserve *nrp;
- void* tag = NR_GET_TAG();
-
- nrhead = NR_HASH(btvp, tag);
- for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) {
- if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp))
- return (nrp->nr_nodecnt - nrp->nr_newnodes);
- }
- return (0);
-}
/*
- * Update a node reserve for any allocations that occured.
+ * Update a node reserve for any allocations that occurred.
*/
static void
nr_update(struct vnode * btvp, int nodecnt)
struct nreserve *nrp;
void* tag = NR_GET_TAG();
+ lck_mtx_lock(&nr_mutex);
+
nrhead = NR_HASH(btvp, tag);
for (nrp = nrhead->lh_first; nrp; nrp = nrp->nr_hash.le_next) {
if ((nrp->nr_tag == tag) && (nrp->nr_btvp == btvp)) {
break;
}
}
+ lck_mtx_unlock(&nr_mutex);
}