/*
- * Copyright (c) 2002-2008 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2002-2012 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
lck_mtx_lock_spin(&hfsmp->hfs_chash_mutex);
}
-#ifdef i386
-static void hfs_chash_lock_convert (struct hfsmount *hfsmp)
-#else
static void hfs_chash_lock_convert (__unused struct hfsmount *hfsmp)
-#endif
{
lck_mtx_convert_spin(&hfsmp->hfs_chash_mutex);
}
*/
return (NULL);
}
- if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK) != 0) {
+ if (!skiplock && hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0) {
vnode_put(vp);
return (NULL);
}
*/
if (!allow_deleted) {
if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
- if (!skiplock)
- hfs_unlock(cp);
+ if (!skiplock) {
+ hfs_unlock(cp);
+ }
vnode_put(vp);
-
return (NULL);
- }
+ }
}
return (vp);
}
*
*/
int
-hfs_chash_snoop(struct hfsmount *hfsmp, ino_t inum, int (*callout)(const struct cat_desc *,
+hfs_chash_snoop(struct hfsmount *hfsmp, ino_t inum, int existence_only, int (*callout)(const struct cat_desc *,
const struct cat_attr *, void *), void * arg)
{
struct cnode *cp;
for (cp = CNODEHASH(hfsmp, inum)->lh_first; cp; cp = cp->c_hash.le_next) {
if (cp->c_fileid != inum)
continue;
- /* Skip cnodes that have been removed from the catalog */
+
+ /*
+ * Under normal circumstances, we would want to return ENOENT if a cnode is in
+ * the hash and it is marked C_NOEXISTS or C_DELETED. However, if the CNID
+ * namespace has wrapped around, then we have the possibility of collisions.
+ * In that case, we may use this function to validate whether or not we
+ * should trust the nextCNID value in the hfs mount point.
+ *
+ * If we didn't do this, then it would be possible for a cnode that is no longer backed
+ * by anything on-disk (C_NOEXISTS) to still exist in the hash along with its
+ * vnode. The cat_create routine could then create a new entry in the catalog
+ * re-using that CNID. Then subsequent hfs_getnewvnode calls will repeatedly fail
+ * trying to look it up/validate it because it is marked C_NOEXISTS. So we want
+ * to prevent that from happening as much as possible.
+ */
+ if (existence_only) {
+ result = 0;
+ break;
+ }
+
+ /* Skip cnodes that have been removed from the catalog */
if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
break;
}
goto loop;
}
if (ncp) {
- /*
+ /*
* someone else won the race to create
* this cnode and add it to the hash
* just dump our allocation
*/
- FREE_ZONE(ncp, sizeof(struct cnode), M_HFSNODE);
+ FREE_ZONE(ncp, sizeof(struct cnode), M_HFSNODE);
ncp = NULL;
}
if (!skiplock) {
- hfs_lock(cp, HFS_FORCE_LOCK);
+ hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
}
/*
vnode_put(vp);
} else {
hfs_chash_lock_spin(hfsmp);
- CLR(cp->c_hflag, H_ATTACH);
+ CLR(cp->c_hflag, H_ATTACH);
*hflags &= ~H_ATTACH;
-
if (ISSET(cp->c_hflag, H_WAITING)) {
CLR(cp->c_hflag, H_WAITING);
wakeup((caddr_t)cp);
if (ncp == NULL) {
hfs_chash_unlock(hfsmp);
- MALLOC_ZONE(ncp, struct cnode *, sizeof(struct cnode), M_HFSNODE, M_WAITOK);
+
+ MALLOC_ZONE(ncp, struct cnode *, sizeof(struct cnode), M_HFSNODE, M_WAITOK);
/*
* since we dropped the chash lock,
* we need to go back and re-verify
lck_rw_init(&ncp->c_rwlock, hfs_rwlock_group, hfs_lock_attr);
if (!skiplock)
- (void) hfs_lock(ncp, HFS_EXCLUSIVE_LOCK);
+ (void) hfs_lock(ncp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
/* Insert the new cnode with it's H_ALLOC flag set */
LIST_INSERT_HEAD(CNODEHASH(hfsmp, inum), ncp, c_hash);