/*
- * Copyright (c) 2002-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2002-2012 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
lck_grp_attr_t * chash_lck_grp_attr;
lck_attr_t * chash_lck_attr;
-/*
- * Structures associated with cnode caching.
- */
-LIST_HEAD(cnodehashhead, cnode) *cnodehashtbl;
-u_long cnodehash; /* size of hash table - 1 */
-#define CNODEHASH(device, inum) (&cnodehashtbl[((device) + (inum)) & cnodehash])
-lck_mtx_t hfs_chash_mutex;
+#define CNODEHASH(hfsmp, inum) (&hfsmp->hfs_cnodehashtbl[(inum) & hfsmp->hfs_cnodehash])
/*
void
hfs_chashinit()
{
- cnodehashtbl = hashinit(desiredvnodes, M_HFSMNT, &cnodehash);
-
chash_lck_grp_attr= lck_grp_attr_alloc_init();
- lck_grp_attr_setstat(chash_lck_grp_attr);
chash_lck_grp = lck_grp_alloc_init("cnode_hash", chash_lck_grp_attr);
-
chash_lck_attr = lck_attr_alloc_init();
- //lck_attr_setdebug(chash_lck_attr);
+}
+
+static void hfs_chash_lock(struct hfsmount *hfsmp)
+{
+ lck_mtx_lock(&hfsmp->hfs_chash_mutex);
+}
+
+static void hfs_chash_lock_spin(struct hfsmount *hfsmp)
+{
+ lck_mtx_lock_spin(&hfsmp->hfs_chash_mutex);
+}
+
+static void hfs_chash_lock_convert (__unused struct hfsmount *hfsmp)
+{
+ lck_mtx_convert_spin(&hfsmp->hfs_chash_mutex);
+}
+
+static void hfs_chash_unlock(struct hfsmount *hfsmp)
+{
+ lck_mtx_unlock(&hfsmp->hfs_chash_mutex);
+}
+
+__private_extern__
+void
+hfs_chashinit_finish(struct hfsmount *hfsmp)
+{
+ lck_mtx_init(&hfsmp->hfs_chash_mutex, chash_lck_grp, chash_lck_attr);
+
+ hfsmp->hfs_cnodehashtbl = hashinit(desiredvnodes / 4, M_HFSMNT, &hfsmp->hfs_cnodehash);
+}
+
+__private_extern__
+void
+hfs_delete_chash(struct hfsmount *hfsmp)
+{
+ lck_mtx_destroy(&hfsmp->hfs_chash_mutex, chash_lck_grp);
- lck_mtx_init(&hfs_chash_mutex, chash_lck_grp, chash_lck_attr);
+ FREE(hfsmp->hfs_cnodehashtbl, M_HFSMNT);
}
*
* If it is in core, but locked, wait for it.
*/
-__private_extern__
struct vnode *
-hfs_chash_getvnode(dev_t dev, ino_t inum, int wantrsrc, int skiplock)
+hfs_chash_getvnode(struct hfsmount *hfsmp, ino_t inum, int wantrsrc, int skiplock, int allow_deleted)
{
struct cnode *cp;
struct vnode *vp;
int error;
- uint32_t vid;
+ u_int32_t vid;
/*
* Go through the hash list
* allocated, wait for it to be finished and then try again.
*/
loop:
- lck_mtx_lock(&hfs_chash_mutex);
- for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
- if ((cp->c_fileid != inum) || (cp->c_dev != dev))
+ hfs_chash_lock_spin(hfsmp);
+
+ for (cp = CNODEHASH(hfsmp, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
+ if (cp->c_fileid != inum)
continue;
/* Wait if cnode is being created or reclaimed. */
if (ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) {
SET(cp->c_hflag, H_WAITING);
- (void) msleep(cp, &hfs_chash_mutex, PDROP | PINOD,
+ (void) msleep(cp, &hfsmp->hfs_chash_mutex, PDROP | PINOD,
"hfs_chash_getvnode", 0);
goto loop;
}
- /*
- * Skip cnodes that are not in the name space anymore
- * note that this check is done outside of the proper
- * lock to catch nodes already in this state... this
- * state must be rechecked after we acquire the cnode lock
- */
- if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
- continue;
- }
/* Obtain the desired vnode. */
vp = wantrsrc ? cp->c_rsrc_vp : cp->c_vp;
if (vp == NULLVP)
goto exit;
vid = vnode_vid(vp);
- lck_mtx_unlock(&hfs_chash_mutex);
+ hfs_chash_unlock(hfsmp);
if ((error = vnode_getwithvid(vp, vid))) {
/*
*/
return (NULL);
}
- if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK) != 0) {
+ if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0) {
vnode_put(vp);
return (NULL);
}
/*
* Skip cnodes that are not in the name space anymore
- * we need to check again with the cnode lock held
- * because we may have blocked acquiring the vnode ref
- * or the lock on the cnode which would allow the node
- * to be unlinked
+ * we need to check with the cnode lock held because
+ * we may have blocked acquiring the vnode ref or the
+ * lock on the cnode which would allow the node to be
+ * unlinked
*/
- if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
- if (!skiplock)
- hfs_unlock(cp);
- vnode_put(vp);
-
- return (NULL);
- }
+ if (!allow_deleted) {
+ if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
+ if (!skiplock) {
+ hfs_unlock(cp);
+ }
+ vnode_put(vp);
+ return (NULL);
+ }
+ }
return (vp);
}
exit:
- lck_mtx_unlock(&hfs_chash_mutex);
+ hfs_chash_unlock(hfsmp);
return (NULL);
}
/*
- * Use the device, fileid pair to find the incore cnode.
- * If no cnode if found one is created
+ * Use the device, fileid pair to snoop an incore cnode.
*
- * If it is in core, but locked, wait for it.
+ * A cnode can exists in chash even after it has been
+ * deleted from the catalog, so this function returns
+ * ENOENT if C_NOEXIST is set in the cnode's flag.
+ *
*/
-__private_extern__
int
-hfs_chash_snoop(dev_t dev, ino_t inum, int (*callout)(const struct cat_desc *,
+hfs_chash_snoop(struct hfsmount *hfsmp, ino_t inum, int existence_only, int (*callout)(const struct cat_desc *,
const struct cat_attr *, void *), void * arg)
{
struct cnode *cp;
* If a cnode is in the process of being cleaned out or being
* allocated, wait for it to be finished and then try again.
*/
- lck_mtx_lock(&hfs_chash_mutex);
- for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
- if ((cp->c_fileid != inum) || (cp->c_dev != dev))
+ hfs_chash_lock(hfsmp);
+
+ for (cp = CNODEHASH(hfsmp, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
+ if (cp->c_fileid != inum)
continue;
+
+ /*
+ * Under normal circumstances, we would want to return ENOENT if a cnode is in
+ * the hash and it is marked C_NOEXISTS or C_DELETED. However, if the CNID
+ * namespace has wrapped around, then we have the possibility of collisions.
+ * In that case, we may use this function to validate whether or not we
+ * should trust the nextCNID value in the hfs mount point.
+ *
+ * If we didn't do this, then it would be possible for a cnode that is no longer backed
+ * by anything on-disk (C_NOEXISTS) to still exist in the hash along with its
+ * vnode. The cat_create routine could then create a new entry in the catalog
+ * re-using that CNID. Then subsequent hfs_getnewvnode calls will repeatedly fail
+ * trying to look it up/validate it because it is marked C_NOEXISTS. So we want
+ * to prevent that from happening as much as possible.
+ */
+ if (existence_only) {
+ result = 0;
+ break;
+ }
+
+ /* Skip cnodes that have been removed from the catalog */
+ if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
+ break;
+ }
/* Skip cnodes being created or reclaimed. */
if (!ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) {
result = callout(&cp->c_desc, &cp->c_attr, arg);
}
break;
}
- lck_mtx_unlock(&hfs_chash_mutex);
+ hfs_chash_unlock(hfsmp);
+
return (result);
}
* If no cnode if found one is created
*
* If it is in core, but locked, wait for it.
+ *
+ * If the cnode is C_DELETED, then return NULL since that
+ * inum is no longer valid for lookups (open-unlinked file).
+ *
+ * If the cnode is C_DELETED but also marked C_RENAMED, then that means
+ * the cnode was renamed over and a new entry exists in its place. The caller
+ * should re-drive the lookup to get the newer entry. In that case, we'll still
+ * return NULL for the cnode, but also return GNV_CHASH_RENAMED in the output flags
+ * of this function to indicate the caller that they should re-drive.
*/
-__private_extern__
struct cnode *
-hfs_chash_getcnode(dev_t dev, ino_t inum, struct vnode **vpp, int wantrsrc, int skiplock)
+hfs_chash_getcnode(struct hfsmount *hfsmp, ino_t inum, struct vnode **vpp,
+ int wantrsrc, int skiplock, int *out_flags, int *hflags)
{
struct cnode *cp;
struct cnode *ncp = NULL;
vnode_t vp;
- uint32_t vid;
+ u_int32_t vid;
/*
* Go through the hash list
* allocated, wait for it to be finished and then try again.
*/
loop:
- lck_mtx_lock(&hfs_chash_mutex);
+ hfs_chash_lock_spin(hfsmp);
loop_with_lock:
- for (cp = CNODEHASH(dev, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
- if ((cp->c_fileid != inum) || (cp->c_dev != dev))
+ for (cp = CNODEHASH(hfsmp, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
+ if (cp->c_fileid != inum)
continue;
/*
* Wait if cnode is being created, attached to or reclaimed.
if (ISSET(cp->c_hflag, H_ALLOC | H_ATTACH | H_TRANSIT)) {
SET(cp->c_hflag, H_WAITING);
- (void) msleep(cp, &hfs_chash_mutex, PINOD,
+ (void) msleep(cp, &hfsmp->hfs_chash_mutex, PINOD,
"hfs_chash_getcnode", 0);
goto loop_with_lock;
}
- /*
- * Skip cnodes that are not in the name space anymore
- * note that this check is done outside of the proper
- * lock to catch nodes already in this state... this
- * state must be rechecked after we acquire the cnode lock
- */
- if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
- continue;
- }
vp = wantrsrc ? cp->c_rsrc_vp : cp->c_vp;
if (vp == NULL) {
/*
* The desired vnode isn't there so tag the cnode.
*/
SET(cp->c_hflag, H_ATTACH);
+ *hflags |= H_ATTACH;
- lck_mtx_unlock(&hfs_chash_mutex);
+ hfs_chash_unlock(hfsmp);
} else {
vid = vnode_vid(vp);
- lck_mtx_unlock(&hfs_chash_mutex);
+ hfs_chash_unlock(hfsmp);
if (vnode_getwithvid(vp, vid))
goto loop;
}
if (ncp) {
- /*
+ /*
* someone else won the race to create
* this cnode and add it to the hash
* just dump our allocation
*/
- FREE_ZONE(ncp, sizeof(struct cnode), M_HFSNODE);
+ FREE_ZONE(ncp, sizeof(struct cnode), M_HFSNODE);
ncp = NULL;
}
- if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK) != 0) {
- if (vp != NULLVP)
- vnode_put(vp);
- lck_mtx_lock(&hfs_chash_mutex);
- if (vp == NULLVP)
- CLR(cp->c_hflag, H_ATTACH);
- goto loop_with_lock;
+ if (!skiplock) {
+ hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
}
+
/*
* Skip cnodes that are not in the name space anymore
- * we need to check again with the cnode lock held
- * because we may have blocked acquiring the vnode ref
- * or the lock on the cnode which would allow the node
- * to be unlinked
+ * we need to check with the cnode lock held because
+ * we may have blocked acquiring the vnode ref or the
+ * lock on the cnode which would allow the node to be
+ * unlinked.
+ *
+ * Don't return a cnode in this case since the inum
+ * is no longer valid for lookups.
*/
- if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
+ if ((cp->c_flag & (C_NOEXISTS | C_DELETED)) && !wantrsrc) {
+ int renamed = 0;
+ if (cp->c_flag & C_RENAMED) {
+ renamed = 1;
+ }
if (!skiplock)
hfs_unlock(cp);
- if (vp != NULLVP)
+ if (vp != NULLVP) {
vnode_put(vp);
- lck_mtx_lock(&hfs_chash_mutex);
-
- if (vp == NULLVP)
- CLR(cp->c_hflag, H_ATTACH);
- goto loop_with_lock;
+ } else {
+ hfs_chash_lock_spin(hfsmp);
+ CLR(cp->c_hflag, H_ATTACH);
+ *hflags &= ~H_ATTACH;
+ if (ISSET(cp->c_hflag, H_WAITING)) {
+ CLR(cp->c_hflag, H_WAITING);
+ wakeup((caddr_t)cp);
+ }
+ hfs_chash_unlock(hfsmp);
+ }
+ vp = NULL;
+ cp = NULL;
+ if (renamed) {
+ *out_flags = GNV_CHASH_RENAMED;
+ }
}
*vpp = vp;
return (cp);
/*
* Allocate a new cnode
*/
- if (skiplock)
+ if (skiplock && !wantrsrc)
panic("%s - should never get here when skiplock is set \n", __FUNCTION__);
if (ncp == NULL) {
- lck_mtx_unlock(&hfs_chash_mutex);
+ hfs_chash_unlock(hfsmp);
MALLOC_ZONE(ncp, struct cnode *, sizeof(struct cnode), M_HFSNODE, M_WAITOK);
/*
*/
goto loop;
}
+ hfs_chash_lock_convert(hfsmp);
+
bzero(ncp, sizeof(struct cnode));
SET(ncp->c_hflag, H_ALLOC);
+ *hflags |= H_ALLOC;
ncp->c_fileid = inum;
- ncp->c_dev = dev;
TAILQ_INIT(&ncp->c_hintlist); /* make the list empty */
+ TAILQ_INIT(&ncp->c_originlist);
lck_rw_init(&ncp->c_rwlock, hfs_rwlock_group, hfs_lock_attr);
if (!skiplock)
- (void) hfs_lock(ncp, HFS_EXCLUSIVE_LOCK);
+ (void) hfs_lock(ncp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
/* Insert the new cnode with it's H_ALLOC flag set */
- LIST_INSERT_HEAD(CNODEHASH(dev, inum), ncp, c_hash);
- lck_mtx_unlock(&hfs_chash_mutex);
+ LIST_INSERT_HEAD(CNODEHASH(hfsmp, inum), ncp, c_hash);
+ hfs_chash_unlock(hfsmp);
*vpp = NULL;
return (ncp);
__private_extern__
void
-hfs_chashwakeup(struct cnode *cp, int hflags)
+hfs_chashwakeup(struct hfsmount *hfsmp, struct cnode *cp, int hflags)
{
- lck_mtx_lock(&hfs_chash_mutex);
+ hfs_chash_lock_spin(hfsmp);
CLR(cp->c_hflag, hflags);
CLR(cp->c_hflag, H_WAITING);
wakeup((caddr_t)cp);
}
- lck_mtx_unlock(&hfs_chash_mutex);
+ hfs_chash_unlock(hfsmp);
}
*/
__private_extern__
void
-hfs_chash_rehash(struct cnode *cp1, struct cnode *cp2)
+hfs_chash_rehash(struct hfsmount *hfsmp, struct cnode *cp1, struct cnode *cp2)
{
- lck_mtx_lock(&hfs_chash_mutex);
+ hfs_chash_lock_spin(hfsmp);
LIST_REMOVE(cp1, c_hash);
LIST_REMOVE(cp2, c_hash);
- LIST_INSERT_HEAD(CNODEHASH(cp1->c_dev, cp1->c_fileid), cp1, c_hash);
- LIST_INSERT_HEAD(CNODEHASH(cp2->c_dev, cp2->c_fileid), cp2, c_hash);
+ LIST_INSERT_HEAD(CNODEHASH(hfsmp, cp1->c_fileid), cp1, c_hash);
+ LIST_INSERT_HEAD(CNODEHASH(hfsmp, cp2->c_fileid), cp2, c_hash);
- lck_mtx_unlock(&hfs_chash_mutex);
+ hfs_chash_unlock(hfsmp);
}
*/
__private_extern__
int
-hfs_chashremove(struct cnode *cp)
+hfs_chashremove(struct hfsmount *hfsmp, struct cnode *cp)
{
- lck_mtx_lock(&hfs_chash_mutex);
+ hfs_chash_lock_spin(hfsmp);
/* Check if a vnode is getting attached */
if (ISSET(cp->c_hflag, H_ATTACH)) {
- lck_mtx_unlock(&hfs_chash_mutex);
+ hfs_chash_unlock(hfsmp);
return (EBUSY);
}
- LIST_REMOVE(cp, c_hash);
- cp->c_hash.le_next = NULL;
- cp->c_hash.le_prev = NULL;
-
- lck_mtx_unlock(&hfs_chash_mutex);
+ if (cp->c_hash.le_next || cp->c_hash.le_prev) {
+ LIST_REMOVE(cp, c_hash);
+ cp->c_hash.le_next = NULL;
+ cp->c_hash.le_prev = NULL;
+ }
+ hfs_chash_unlock(hfsmp);
+
return (0);
}
*/
__private_extern__
void
-hfs_chash_abort(struct cnode *cp)
+hfs_chash_abort(struct hfsmount *hfsmp, struct cnode *cp)
{
- lck_mtx_lock(&hfs_chash_mutex);
+ hfs_chash_lock_spin(hfsmp);
LIST_REMOVE(cp, c_hash);
cp->c_hash.le_next = NULL;
CLR(cp->c_hflag, H_WAITING);
wakeup((caddr_t)cp);
}
- lck_mtx_unlock(&hfs_chash_mutex);
+ hfs_chash_unlock(hfsmp);
}
/*
- * mark a cnode as in transistion
+ * mark a cnode as in transition
*/
__private_extern__
void
-hfs_chash_mark_in_transit(struct cnode *cp)
+hfs_chash_mark_in_transit(struct hfsmount *hfsmp, struct cnode *cp)
{
- lck_mtx_lock(&hfs_chash_mutex);
+ hfs_chash_lock_spin(hfsmp);
SET(cp->c_hflag, H_TRANSIT);
- lck_mtx_unlock(&hfs_chash_mutex);
+ hfs_chash_unlock(hfsmp);
+}
+
+/* Search a cnode in the hash. This function does not return cnode which
+ * are getting created, destroyed or in transition. Note that this function
+ * does not acquire the cnode hash mutex, and expects the caller to acquire it.
+ * On success, returns pointer to the cnode found. On failure, returns NULL.
+ */
+static
+struct cnode *
+hfs_chash_search_cnid(struct hfsmount *hfsmp, cnid_t cnid)
+{
+ struct cnode *cp;
+
+ for (cp = CNODEHASH(hfsmp, cnid)->lh_first; cp; cp = cp->c_hash.le_next) {
+ if (cp->c_fileid == cnid) {
+ break;
+ }
+ }
+
+ /* If cnode is being created or reclaimed, return error. */
+ if (cp && ISSET(cp->c_hflag, H_ALLOC | H_TRANSIT | H_ATTACH)) {
+ cp = NULL;
+ }
+
+ return cp;
+}
+
+/* Search a cnode corresponding to given device and ID in the hash. If the
+ * found cnode has kHFSHasChildLinkBit cleared, set it. If the cnode is not
+ * found, no new cnode is created and error is returned.
+ *
+ * Return values -
+ * -1 : The cnode was not found.
+ * 0 : The cnode was found, and the kHFSHasChildLinkBit was already set.
+ * 1 : The cnode was found, the kHFSHasChildLinkBit was not set, and the
+ * function had to set that bit.
+ */
+__private_extern__
+int
+hfs_chash_set_childlinkbit(struct hfsmount *hfsmp, cnid_t cnid)
+{
+ int retval = -1;
+ struct cnode *cp;
+
+ hfs_chash_lock_spin(hfsmp);
+
+ cp = hfs_chash_search_cnid(hfsmp, cnid);
+ if (cp) {
+ if (cp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
+ retval = 0;
+ } else {
+ cp->c_attr.ca_recflags |= kHFSHasChildLinkMask;
+ retval = 1;
+ }
+ }
+ hfs_chash_unlock(hfsmp);
+
+ return retval;
}