/*
- * Copyright (c) 2002-2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2002-2012 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
lck_grp_attr_t * chash_lck_grp_attr;
lck_attr_t * chash_lck_attr;
-/*
- * Structures associated with cnode caching.
- */
-LIST_HEAD(cnodehashhead, cnode) *cnodehashtbl;
-u_long cnodehash; /* size of hash table - 1 */
-#define CNODEHASH(device, inum) (&cnodehashtbl[((device) + (inum)) & cnodehash])
-lck_mtx_t hfs_chash_mutex;
+#define CNODEHASH(hfsmp, inum) (&hfsmp->hfs_cnodehashtbl[(inum) & hfsmp->hfs_cnodehash])
+
/*
* Initialize cnode hash table.
chash_lck_grp_attr= lck_grp_attr_alloc_init();
chash_lck_grp = lck_grp_alloc_init("cnode_hash", chash_lck_grp_attr);
chash_lck_attr = lck_attr_alloc_init();
+}
+
+static void hfs_chash_lock(struct hfsmount *hfsmp)
+{
+ lck_mtx_lock(&hfsmp->hfs_chash_mutex);
+}
+
+static void hfs_chash_lock_spin(struct hfsmount *hfsmp)
+{
+ lck_mtx_lock_spin(&hfsmp->hfs_chash_mutex);
+}
- lck_mtx_init(&hfs_chash_mutex, chash_lck_grp, chash_lck_attr);
+static void hfs_chash_lock_convert (__unused struct hfsmount *hfsmp)
+{
+ lck_mtx_convert_spin(&hfsmp->hfs_chash_mutex);
}
-static void hfs_chash_lock(void)
+static void hfs_chash_unlock(struct hfsmount *hfsmp)
{
- lck_mtx_lock(&hfs_chash_mutex);
+ lck_mtx_unlock(&hfsmp->hfs_chash_mutex);
}
-static void hfs_chash_unlock(void)
+__private_extern__
+void
+hfs_chashinit_finish(struct hfsmount *hfsmp)
{
- lck_mtx_unlock(&hfs_chash_mutex);
+ lck_mtx_init(&hfsmp->hfs_chash_mutex, chash_lck_grp, chash_lck_attr);
+
+ hfsmp->hfs_cnodehashtbl = hashinit(desiredvnodes / 4, M_HFSMNT, &hfsmp->hfs_cnodehash);
}
__private_extern__
void
-hfs_chashinit_finish()
+hfs_delete_chash(struct hfsmount *hfsmp)
{
- hfs_chash_lock();
- if (!cnodehashtbl)
- cnodehashtbl = hashinit(desiredvnodes, M_HFSMNT, &cnodehash);
- hfs_chash_unlock();
+ lck_mtx_destroy(&hfsmp->hfs_chash_mutex, chash_lck_grp);
+
+ FREE(hfsmp->hfs_cnodehashtbl, M_HFSMNT);
}
*
* If it is in core, but locked, wait for it.
*/
-__private_extern__
struct vnode *
-hfs_chash_getvnode(dev_t dev, ino_t inum, int wantrsrc, int skiplock)
+hfs_chash_getvnode(struct hfsmount *hfsmp, ino_t inum, int wantrsrc, int skiplock, int allow_deleted)
{
struct cnode *cp;
struct vnode *vp;
* allocated, wait for it to be finished and then try again.
*/
loop:
- hfs_chash_lock();
- for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
- if ((cp->c_fileid != inum) || (cp->c_dev != dev))
+ hfs_chash_lock_spin(hfsmp);
+
+ for (cp = CNODEHASH(hfsmp, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
+ if (cp->c_fileid != inum)
continue;
/* Wait if cnode is being created or reclaimed. */
if (ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) {
SET(cp->c_hflag, H_WAITING);
- (void) msleep(cp, &hfs_chash_mutex, PDROP | PINOD,
+ (void) msleep(cp, &hfsmp->hfs_chash_mutex, PDROP | PINOD,
"hfs_chash_getvnode", 0);
goto loop;
}
goto exit;
vid = vnode_vid(vp);
- hfs_chash_unlock();
+ hfs_chash_unlock(hfsmp);
if ((error = vnode_getwithvid(vp, vid))) {
/*
*/
return (NULL);
}
- if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK) != 0) {
+ if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0) {
vnode_put(vp);
return (NULL);
}
* lock on the cnode which would allow the node to be
* unlinked
*/
- if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
- if (!skiplock)
- hfs_unlock(cp);
- vnode_put(vp);
-
- return (NULL);
- }
+ if (!allow_deleted) {
+ if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
+ if (!skiplock) {
+ hfs_unlock(cp);
+ }
+ vnode_put(vp);
+ return (NULL);
+ }
+ }
return (vp);
}
exit:
- hfs_chash_unlock();
+ hfs_chash_unlock(hfsmp);
return (NULL);
}
/*
* Use the device, fileid pair to snoop an incore cnode.
+ *
+ * A cnode can exists in chash even after it has been
+ * deleted from the catalog, so this function returns
+ * ENOENT if C_NOEXIST is set in the cnode's flag.
+ *
*/
-__private_extern__
int
-hfs_chash_snoop(dev_t dev, ino_t inum, int (*callout)(const struct cat_desc *,
+hfs_chash_snoop(struct hfsmount *hfsmp, ino_t inum, int existence_only, int (*callout)(const struct cat_desc *,
const struct cat_attr *, void *), void * arg)
{
struct cnode *cp;
* If a cnode is in the process of being cleaned out or being
* allocated, wait for it to be finished and then try again.
*/
- hfs_chash_lock();
- for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
- if ((cp->c_fileid != inum) || (cp->c_dev != dev))
+ hfs_chash_lock(hfsmp);
+
+ for (cp = CNODEHASH(hfsmp, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
+ if (cp->c_fileid != inum)
continue;
+
+ /*
+ * Under normal circumstances, we would want to return ENOENT if a cnode is in
+ * the hash and it is marked C_NOEXISTS or C_DELETED. However, if the CNID
+ * namespace has wrapped around, then we have the possibility of collisions.
+ * In that case, we may use this function to validate whether or not we
+ * should trust the nextCNID value in the hfs mount point.
+ *
+ * If we didn't do this, then it would be possible for a cnode that is no longer backed
+ * by anything on-disk (C_NOEXISTS) to still exist in the hash along with its
+ * vnode. The cat_create routine could then create a new entry in the catalog
+ * re-using that CNID. Then subsequent hfs_getnewvnode calls will repeatedly fail
+ * trying to look it up/validate it because it is marked C_NOEXISTS. So we want
+ * to prevent that from happening as much as possible.
+ */
+ if (existence_only) {
+ result = 0;
+ break;
+ }
+
+ /* Skip cnodes that have been removed from the catalog */
+ if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
+ break;
+ }
/* Skip cnodes being created or reclaimed. */
if (!ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) {
result = callout(&cp->c_desc, &cp->c_attr, arg);
}
break;
}
- hfs_chash_unlock();
+ hfs_chash_unlock(hfsmp);
+
return (result);
}
*
* If the cnode is C_DELETED, then return NULL since that
* inum is no longer valid for lookups (open-unlinked file).
+ *
+ * If the cnode is C_DELETED but also marked C_RENAMED, then that means
+ * the cnode was renamed over and a new entry exists in its place. The caller
+ * should re-drive the lookup to get the newer entry. In that case, we'll still
+ * return NULL for the cnode, but also return GNV_CHASH_RENAMED in the output flags
+ * of this function to indicate the caller that they should re-drive.
*/
-__private_extern__
struct cnode *
-hfs_chash_getcnode(dev_t dev, ino_t inum, struct vnode **vpp, int wantrsrc, int skiplock)
+hfs_chash_getcnode(struct hfsmount *hfsmp, ino_t inum, struct vnode **vpp,
+ int wantrsrc, int skiplock, int *out_flags, int *hflags)
{
struct cnode *cp;
struct cnode *ncp = NULL;
* allocated, wait for it to be finished and then try again.
*/
loop:
- hfs_chash_lock();
+ hfs_chash_lock_spin(hfsmp);
loop_with_lock:
- for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
- if ((cp->c_fileid != inum) || (cp->c_dev != dev))
+ for (cp = CNODEHASH(hfsmp, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
+ if (cp->c_fileid != inum)
continue;
/*
* Wait if cnode is being created, attached to or reclaimed.
if (ISSET(cp->c_hflag, H_ALLOC | H_ATTACH | H_TRANSIT)) {
SET(cp->c_hflag, H_WAITING);
- (void) msleep(cp, &hfs_chash_mutex, PINOD,
+ (void) msleep(cp, &hfsmp->hfs_chash_mutex, PINOD,
"hfs_chash_getcnode", 0);
goto loop_with_lock;
}
* The desired vnode isn't there so tag the cnode.
*/
SET(cp->c_hflag, H_ATTACH);
+ *hflags |= H_ATTACH;
- hfs_chash_unlock();
+ hfs_chash_unlock(hfsmp);
} else {
vid = vnode_vid(vp);
- hfs_chash_unlock();
+ hfs_chash_unlock(hfsmp);
if (vnode_getwithvid(vp, vid))
goto loop;
}
if (ncp) {
- /*
+ /*
* someone else won the race to create
* this cnode and add it to the hash
* just dump our allocation
*/
- FREE_ZONE(ncp, sizeof(struct cnode), M_HFSNODE);
+ FREE_ZONE(ncp, sizeof(struct cnode), M_HFSNODE);
ncp = NULL;
}
if (!skiplock) {
- hfs_lock(cp, HFS_FORCE_LOCK);
+ hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
}
/*
* is no longer valid for lookups.
*/
if ((cp->c_flag & (C_NOEXISTS | C_DELETED)) && !wantrsrc) {
+ int renamed = 0;
+ if (cp->c_flag & C_RENAMED) {
+ renamed = 1;
+ }
if (!skiplock)
hfs_unlock(cp);
if (vp != NULLVP) {
vnode_put(vp);
} else {
- hfs_chash_lock();
- CLR(cp->c_hflag, H_ATTACH);
+ hfs_chash_lock_spin(hfsmp);
+ CLR(cp->c_hflag, H_ATTACH);
+ *hflags &= ~H_ATTACH;
if (ISSET(cp->c_hflag, H_WAITING)) {
CLR(cp->c_hflag, H_WAITING);
wakeup((caddr_t)cp);
}
- hfs_chash_unlock();
+ hfs_chash_unlock(hfsmp);
}
vp = NULL;
cp = NULL;
+ if (renamed) {
+ *out_flags = GNV_CHASH_RENAMED;
+ }
}
*vpp = vp;
return (cp);
panic("%s - should never get here when skiplock is set \n", __FUNCTION__);
if (ncp == NULL) {
- hfs_chash_unlock();
+ hfs_chash_unlock(hfsmp);
MALLOC_ZONE(ncp, struct cnode *, sizeof(struct cnode), M_HFSNODE, M_WAITOK);
/*
*/
goto loop;
}
+ hfs_chash_lock_convert(hfsmp);
+
bzero(ncp, sizeof(struct cnode));
SET(ncp->c_hflag, H_ALLOC);
+ *hflags |= H_ALLOC;
ncp->c_fileid = inum;
- ncp->c_dev = dev;
TAILQ_INIT(&ncp->c_hintlist); /* make the list empty */
TAILQ_INIT(&ncp->c_originlist);
lck_rw_init(&ncp->c_rwlock, hfs_rwlock_group, hfs_lock_attr);
if (!skiplock)
- (void) hfs_lock(ncp, HFS_EXCLUSIVE_LOCK);
+ (void) hfs_lock(ncp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
/* Insert the new cnode with it's H_ALLOC flag set */
- LIST_INSERT_HEAD(CNODEHASH(dev, inum), ncp, c_hash);
- hfs_chash_unlock();
+ LIST_INSERT_HEAD(CNODEHASH(hfsmp, inum), ncp, c_hash);
+ hfs_chash_unlock(hfsmp);
*vpp = NULL;
return (ncp);
__private_extern__
void
-hfs_chashwakeup(struct cnode *cp, int hflags)
+hfs_chashwakeup(struct hfsmount *hfsmp, struct cnode *cp, int hflags)
{
- hfs_chash_lock();
+ hfs_chash_lock_spin(hfsmp);
CLR(cp->c_hflag, hflags);
CLR(cp->c_hflag, H_WAITING);
wakeup((caddr_t)cp);
}
- hfs_chash_unlock();
+ hfs_chash_unlock(hfsmp);
}
*/
__private_extern__
void
-hfs_chash_rehash(struct cnode *cp1, struct cnode *cp2)
+hfs_chash_rehash(struct hfsmount *hfsmp, struct cnode *cp1, struct cnode *cp2)
{
- hfs_chash_lock();
+ hfs_chash_lock_spin(hfsmp);
LIST_REMOVE(cp1, c_hash);
LIST_REMOVE(cp2, c_hash);
- LIST_INSERT_HEAD(CNODEHASH(cp1->c_dev, cp1->c_fileid), cp1, c_hash);
- LIST_INSERT_HEAD(CNODEHASH(cp2->c_dev, cp2->c_fileid), cp2, c_hash);
+ LIST_INSERT_HEAD(CNODEHASH(hfsmp, cp1->c_fileid), cp1, c_hash);
+ LIST_INSERT_HEAD(CNODEHASH(hfsmp, cp2->c_fileid), cp2, c_hash);
- hfs_chash_unlock();
+ hfs_chash_unlock(hfsmp);
}
*/
__private_extern__
int
-hfs_chashremove(struct cnode *cp)
+hfs_chashremove(struct hfsmount *hfsmp, struct cnode *cp)
{
- hfs_chash_lock();
+ hfs_chash_lock_spin(hfsmp);
/* Check if a vnode is getting attached */
if (ISSET(cp->c_hflag, H_ATTACH)) {
- hfs_chash_unlock();
+ hfs_chash_unlock(hfsmp);
return (EBUSY);
}
if (cp->c_hash.le_next || cp->c_hash.le_prev) {
cp->c_hash.le_next = NULL;
cp->c_hash.le_prev = NULL;
}
- hfs_chash_unlock();
+ hfs_chash_unlock(hfsmp);
+
return (0);
}
*/
__private_extern__
void
-hfs_chash_abort(struct cnode *cp)
+hfs_chash_abort(struct hfsmount *hfsmp, struct cnode *cp)
{
- hfs_chash_lock();
+ hfs_chash_lock_spin(hfsmp);
LIST_REMOVE(cp, c_hash);
cp->c_hash.le_next = NULL;
CLR(cp->c_hflag, H_WAITING);
wakeup((caddr_t)cp);
}
- hfs_chash_unlock();
+ hfs_chash_unlock(hfsmp);
}
*/
__private_extern__
void
-hfs_chash_mark_in_transit(struct cnode *cp)
+hfs_chash_mark_in_transit(struct hfsmount *hfsmp, struct cnode *cp)
{
- hfs_chash_lock();
+ hfs_chash_lock_spin(hfsmp);
SET(cp->c_hflag, H_TRANSIT);
- hfs_chash_unlock();
+ hfs_chash_unlock(hfsmp);
}
/* Search a cnode in the hash. This function does not return cnode which
*/
static
struct cnode *
-hfs_chash_search_cnid(dev_t dev, cnid_t cnid)
+hfs_chash_search_cnid(struct hfsmount *hfsmp, cnid_t cnid)
{
struct cnode *cp;
- for (cp = CNODEHASH(dev, cnid)->lh_first; cp; cp = cp->c_hash.le_next) {
- if ((cp->c_fileid == cnid) && (cp->c_dev == dev)) {
+ for (cp = CNODEHASH(hfsmp, cnid)->lh_first; cp; cp = cp->c_hash.le_next) {
+ if (cp->c_fileid == cnid) {
break;
}
}
*/
__private_extern__
int
-hfs_chash_set_childlinkbit(dev_t dev, cnid_t cnid)
+hfs_chash_set_childlinkbit(struct hfsmount *hfsmp, cnid_t cnid)
{
int retval = -1;
struct cnode *cp;
- hfs_chash_lock();
- cp = hfs_chash_search_cnid(dev, cnid);
+ hfs_chash_lock_spin(hfsmp);
+
+ cp = hfs_chash_search_cnid(hfsmp, cnid);
if (cp) {
if (cp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
retval = 0;
retval = 1;
}
}
- hfs_chash_unlock();
+ hfs_chash_unlock(hfsmp);
return retval;
}