/*
- * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/dirent.h>
#include <sys/stat.h>
#include <sys/buf.h>
+#include <sys/buf_internal.h>
#include <sys/mount.h>
#include <sys/vnode_if.h>
#include <sys/vnode_internal.h>
#include <sys/uio_internal.h>
#include <sys/fsctl.h>
#include <sys/cprotect.h>
-
+#include <sys/xattr.h>
#include <string.h>
+#include <sys/fsevents.h>
+#include <kern/kalloc.h>
#include <miscfs/specfs/specdev.h>
#include <miscfs/fifofs/fifo.h>
int hfs_removefile(struct vnode *, struct vnode *, struct componentname *,
int, int, int, struct vnode *, int);
+/* Used here and in cnode teardown -- for symlinks */
+int hfs_removefile_callback(struct buf *bp, void *hfsmp);
+
int hfs_movedata (struct vnode *, struct vnode*);
static int hfs_move_fork (struct filefork *srcfork, struct cnode *src,
struct filefork *dstfork, struct cnode *dst);
+decmpfs_cnode* hfs_lazy_init_decmpfs_cnode (struct cnode *cp);
#if FIFO
static int hfsfifo_read(struct vnop_read_args *);
-
/*****************************************************************************
*
* Common Operations on vnodes
*
*****************************************************************************/
+/*
+ * Is the given cnode either the .journal or .journal_info_block file on
+ * a volume with an active journal? Many VNOPs use this to deny access
+ * to those files.
+ *
+ * Note: the .journal file on a volume with an external journal still
+ * returns true here, even though it does not actually hold the contents
+ * of the volume's journal.
+ */
+static _Bool
+hfs_is_journal_file(struct hfsmount *hfsmp, struct cnode *cp)
+{
+ if (hfsmp->jnl != NULL &&
+ (cp->c_fileid == hfsmp->hfs_jnlinfoblkid ||
+ cp->c_fileid == hfsmp->hfs_jnlfileid)) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
/*
* Create a regular file.
*/
/* maybe we should take the hfs cnode lock here, and if so, use the skiplock parameter to tell us not to */
- if (!skiplock) hfs_lock(cp, HFS_SHARED_LOCK);
+ if (!skiplock) hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
struct vnode *c_vp = cp->c_vp;
if (c_vp) {
/* we already have a data vnode */
/*
* hfs_lazy_init_decmpfs_cnode(): returns the decmpfs_cnode for a cnode,
- * allocating it if necessary; returns NULL if there was an allocation error
+ * allocating it if necessary; returns NULL if there was an allocation error.
+ * function is non-static so that it can be used from the FCNTL handler.
*/
-static decmpfs_cnode *
+decmpfs_cnode *
hfs_lazy_init_decmpfs_cnode(struct cnode *cp)
{
if (!cp->c_decmp) {
int ret = 0;
/* fast check to see if file is compressed. If flag is clear, just answer no */
- if (!(cp->c_flags & UF_COMPRESSED)) {
+ if (!(cp->c_bsdflags & UF_COMPRESSED)) {
return 0;
}
}
#endif /* HFS_COMPRESSION */
+
+//
+// This function gets the doc_tombstone structure for the
+// current thread. If the thread doesn't have one, the
+// structure is allocated.
+//
+static struct doc_tombstone *
+get_uthread_doc_tombstone(void)
+{
+ struct uthread *ut;
+ ut = get_bsdthread_info(current_thread());
+
+ if (ut->t_tombstone == NULL) {
+ ut->t_tombstone = kalloc(sizeof(struct doc_tombstone));
+ if (ut->t_tombstone) {
+ memset(ut->t_tombstone, 0, sizeof(struct doc_tombstone));
+ }
+ }
+
+ return ut->t_tombstone;
+}
+
+//
+// This routine clears out the current tombstone for the
+// current thread and if necessary passes the doc-id of
+// the tombstone on to the dst_cnode.
+//
+// If the doc-id transfers to dst_cnode, we also generate
+// a doc-id changed fsevent. Unlike all the other fsevents,
+// doc-id changed events can only be generated here in HFS
+// where we have the necessary info.
+//
+static void
+clear_tombstone_docid(struct doc_tombstone *ut, struct hfsmount *hfsmp, struct cnode *dst_cnode)
+{
+ uint32_t old_id = ut->t_lastop_document_id;
+
+ ut->t_lastop_document_id = 0;
+ ut->t_lastop_parent = NULL;
+ ut->t_lastop_parent_vid = 0;
+ ut->t_lastop_filename[0] = '\0';
+
+ //
+ // If the lastop item is still the same and needs to be cleared,
+ // clear it.
+ //
+ if (dst_cnode && old_id && ut->t_lastop_item && vnode_vid(ut->t_lastop_item) == ut->t_lastop_item_vid) {
+ //
+ // clear the document_id from the file that used to have it.
+ // XXXdbg - we need to lock the other vnode and make sure to
+ // update it on disk.
+ //
+ struct cnode *ocp = VTOC(ut->t_lastop_item);
+ struct FndrExtendedFileInfo *ofip = (struct FndrExtendedFileInfo *)((char *)&ocp->c_attr.ca_finderinfo + 16);
+
+ // printf("clearing doc-id from ino %d\n", ocp->c_desc.cd_cnid);
+ ofip->document_id = 0;
+ ocp->c_bsdflags &= ~UF_TRACKED;
+ ocp->c_flag |= C_MODIFIED | C_FORCEUPDATE; // mark it dirty
+ /* cat_update(hfsmp, &ocp->c_desc, &ocp->c_attr, NULL, NULL); */
+
+ }
+
+#if CONFIG_FSE
+ if (dst_cnode && old_id) {
+ struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&dst_cnode->c_attr.ca_finderinfo + 16);
+
+ add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
+ FSE_ARG_DEV, hfsmp->hfs_raw_dev,
+ FSE_ARG_INO, (ino64_t)ut->t_lastop_fileid, // src inode #
+ FSE_ARG_INO, (ino64_t)dst_cnode->c_fileid, // dst inode #
+ FSE_ARG_INT32, (uint32_t)fip->document_id,
+ FSE_ARG_DONE);
+ }
+#endif
+ // last, clear these now that we're all done
+ ut->t_lastop_item = NULL;
+ ut->t_lastop_fileid = 0;
+ ut->t_lastop_item_vid = 0;
+}
+
+
+//
+// This function is used to filter out operations on temp
+// filenames. We have to filter out operations on certain
+// temp filenames to work-around questionable application
+// behavior from apps like Autocad that perform unusual
+// sequences of file system operations for a "safe save".
+static int
+is_ignorable_temp_name(const char *nameptr, int len)
+{
+ if (len == 0) {
+ len = strlen(nameptr);
+ }
+
+ if ( strncmp(nameptr, "atmp", 4) == 0
+ || (len > 4 && strncmp(nameptr+len-4, ".bak", 4) == 0)
+ || (len > 4 && strncmp(nameptr+len-4, ".tmp", 4) == 0)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+//
+// Decide if we need to save a tombstone or not. Normally we always
+// save a tombstone - but if there already is one and the name we're
+// given is an ignorable name, then we will not save a tombstone.
+//
+static int
+should_save_docid_tombstone(struct doc_tombstone *ut, struct vnode *vp, struct componentname *cnp)
+{
+ if (cnp->cn_nameptr == NULL) {
+ return 0;
+ }
+
+ if (ut->t_lastop_document_id && ut->t_lastop_item == vp && is_ignorable_temp_name(cnp->cn_nameptr, cnp->cn_namelen)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+
+//
+// This function saves a tombstone for the given vnode and name. The
+// tombstone represents the parent directory and name where the document
+// used to live and the document-id of that file. This info is recorded
+// in the doc_tombstone structure hanging off the uthread (which assumes
+// that all safe-save operations happen on the same thread).
+//
+// If later on the same parent/name combo comes back into existence then
+// we'll preserve the doc-id from this vnode onto the new vnode.
+//
+static void
+save_tombstone(struct hfsmount *hfsmp, struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int for_unlink)
+{
+ struct cnode *cp = VTOC(vp);
+ struct doc_tombstone *ut;
+ ut = get_uthread_doc_tombstone();
+
+ if (for_unlink && vp->v_type == VREG && cp->c_linkcount > 1) {
+ //
+ // a regular file that is being unlinked and that is also
+ // hardlinked should not clear the UF_TRACKED state or
+ // mess with the tombstone because somewhere else in the
+ // file system the file is still alive.
+ //
+ return;
+ }
+
+ ut->t_lastop_parent = dvp;
+ ut->t_lastop_parent_vid = vnode_vid(dvp);
+ ut->t_lastop_fileid = cp->c_fileid;
+ if (for_unlink) {
+ ut->t_lastop_item = NULL;
+ ut->t_lastop_item_vid = 0;
+ } else {
+ ut->t_lastop_item = vp;
+ ut->t_lastop_item_vid = vnode_vid(vp);
+ }
+
+ strlcpy((char *)&ut->t_lastop_filename[0], cnp->cn_nameptr, sizeof(ut->t_lastop_filename));
+
+ struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
+ ut->t_lastop_document_id = fip->document_id;
+
+ if (for_unlink) {
+ // clear this so it's never returned again
+ fip->document_id = 0;
+ cp->c_bsdflags &= ~UF_TRACKED;
+
+ if (ut->t_lastop_document_id) {
+ (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
+
+#if CONFIG_FSE
+ // this event is more of a "pending-delete"
+ add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
+ FSE_ARG_DEV, hfsmp->hfs_raw_dev,
+ FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
+ FSE_ARG_INO, (ino64_t)0, // dst inode #
+ FSE_ARG_INT32, ut->t_lastop_document_id, // document id
+ FSE_ARG_DONE);
+#endif
+ }
+ }
+}
+
+
/*
* Open a file/directory.
*/
/*
* Files marked append-only must be opened for appending.
*/
- if ((cp->c_flags & APPEND) && !vnode_isdir(vp) &&
+ if ((cp->c_bsdflags & APPEND) && !vnode_isdir(vp) &&
(ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
return (EPERM);
if (vnode_isreg(vp) && !UBCINFOEXISTS(vp))
return (EBUSY); /* file is in use by the kernel */
- /* Don't allow journal file to be opened externally. */
- if (cp->c_fileid == hfsmp->hfs_jnlfileid)
+ /* Don't allow journal to be opened externally. */
+ if (hfs_is_journal_file(hfsmp, cp))
return (EPERM);
- /* If we're going to write to the file, initialize quotas. */
-#if QUOTA
- if ((ap->a_mode & FWRITE) && (hfsmp->hfs_flags & HFS_QUOTAS))
- (void)hfs_getinoquota(cp);
-#endif /* QUOTA */
-
- /*
- * On the first (non-busy) open of a fragmented
- * file attempt to de-frag it (if its less than 20MB).
- */
if ((hfsmp->hfs_flags & HFS_READ_ONLY) ||
(hfsmp->jnl == NULL) ||
#if NAMEDSTREAMS
return (0);
}
- if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK)))
+ if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
return (error);
+
+#if QUOTA
+ /* If we're going to write to the file, initialize quotas. */
+ if ((ap->a_mode & FWRITE) && (hfsmp->hfs_flags & HFS_QUOTAS))
+ (void)hfs_getinoquota(cp);
+#endif /* QUOTA */
+
+ /*
+ * On the first (non-busy) open of a fragmented
+ * file attempt to de-frag it (if its less than 20MB).
+ */
fp = VTOF(vp);
if (fp->ff_blocks &&
fp->ff_extents[7].blockCount != 0 &&
vfs_context_proc(ap->a_context));
}
}
+
hfs_unlock(cp);
return (0);
int tooktrunclock = 0;
int knownrefs = 0;
- if ( hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) != 0)
+ if ( hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0)
return (0);
cp = VTOC(vp);
hfsmp = VTOHFS(vp);
// release cnode lock; must acquire truncate lock BEFORE cnode lock
hfs_unlock(cp);
- hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK);
+ hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
tooktrunclock = 1;
- if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) != 0) {
- hfs_unlock_truncate(cp, 0);
+ if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0) {
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
// bail out if we can't re-acquire cnode lock
return 0;
}
}
if (tooktrunclock){
- hfs_unlock_truncate(cp, 0);
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
}
hfs_unlock(cp);
vap->va_uid = cp->c_uid;
vap->va_gid = cp->c_gid;
vap->va_mode = cp->c_mode;
- vap->va_flags = cp->c_flags;
+ vap->va_flags = cp->c_bsdflags;
vap->va_supported |= VNODE_ATTR_AUTH & ~VNODE_ATTR_va_acl;
if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
*/
if ((vap->va_active & VNODE_ATTR_TIMES) &&
(cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime)) {
- if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK)))
+ if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
return (error);
hfs_touchtimes(hfsmp, cp);
}
else {
- if ((error = hfs_lock(cp, HFS_SHARED_LOCK)))
+ if ((error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT)))
return (error);
}
}
}
-
/* XXX is this really a good 'optimal I/O size'? */
vap->va_iosize = hfsmp->hfs_logBlockSize;
vap->va_uid = cp->c_uid;
vap->va_gid = cp->c_gid;
vap->va_mode = cp->c_mode;
- vap->va_flags = cp->c_flags;
+ vap->va_flags = cp->c_bsdflags;
/*
* Exporting file IDs from HFS Plus:
vap->va_data_size = data_size;
vap->va_supported |= VNODE_ATTR_va_data_size;
#endif
-
+
+ if (VATTR_IS_ACTIVE(vap, va_gen)) {
+ if (UBCINFOEXISTS(vp) && (vp->v_ubcinfo->ui_flags & UI_ISMAPPED)) {
+ /* While file is mmapped the generation count is invalid.
+ * However, bump the value so that the write-gen counter
+ * will be different once the file is unmapped (since,
+ * when unmapped the pageouts may not yet have happened)
+ */
+ if (vp->v_ubcinfo->ui_flags & UI_MAPPEDWRITE) {
+ hfs_incr_gencount (cp);
+ }
+ vap->va_gen = 0;
+ } else {
+ vap->va_gen = hfs_get_gencount(cp);
+ }
+
+ VATTR_SET_SUPPORTED(vap, va_gen);
+ }
+ if (VATTR_IS_ACTIVE(vap, va_document_id)) {
+ vap->va_document_id = hfs_get_document_id(cp);
+ VATTR_SET_SUPPORTED(vap, va_document_id);
+ }
+
/* Mark them all at once instead of individual VATTR_SET_SUPPORTED calls. */
vap->va_supported |= VNODE_ATTR_va_create_time | VNODE_ATTR_va_modify_time |
VNODE_ATTR_va_change_time| VNODE_ATTR_va_backup_time |
if ((cp->c_flag & C_HARDLINK) &&
((cp->c_desc.cd_namelen == 0) || (vap->va_linkid != cp->c_cnid))) {
- /* If we have no name and our link ID is the raw inode number, then we may
+ /*
+ * If we have no name and our link ID is the raw inode number, then we may
* have an open-unlinked file. Go to the next link in this case.
*/
if ((cp->c_desc.cd_namelen == 0) && (vap->va_linkid == cp->c_fileid)) {
error = decmpfs_update_attributes(vp, vap);
if (error)
return error;
-
+#endif
//
// if this is not a size-changing setattr and it is not just
// an atime update, then check for a snapshot.
//
if (!VATTR_IS_ACTIVE(vap, va_data_size) && !(vap->va_active == VNODE_ATTR_va_access_time)) {
- check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_METADATA_MOD, NULL);
+ check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_METADATA_MOD, NSPACE_REARM_NO_ARG);
}
-#endif
-
#if CONFIG_PROTECT
- if ((error = cp_handle_vnop(VTOC(vp), CP_WRITE_ACCESS)) != 0) {
+ if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) {
return (error);
}
#endif /* CONFIG_PROTECT */
hfsmp = VTOHFS(vp);
- /* Don't allow modification of the journal file. */
- if (hfsmp->hfs_jnlfileid == VTOC(vp)->c_fileid) {
+ /* Don't allow modification of the journal. */
+ if (hfs_is_journal_file(hfsmp, VTOC(vp))) {
return (EPERM);
}
+ //
+ // Check if we'll need a document_id and if so, get it before we lock the
+ // the cnode to avoid any possible deadlock with the root vnode which has
+ // to get locked to get the document id
+ //
+ u_int32_t document_id=0;
+ if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & UF_TRACKED) && !(VTOC(vp)->c_bsdflags & UF_TRACKED)) {
+ struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&(VTOC(vp)->c_attr.ca_finderinfo) + 16);
+ //
+ // If the document_id is not set, get a new one. It will be set
+ // on the file down below once we hold the cnode lock.
+ //
+ if (fip->document_id == 0) {
+ if (hfs_generate_document_id(hfsmp, &document_id) != 0) {
+ document_id = 0;
+ }
+ }
+ }
+
+
/*
* File size change request.
* We are guaranteed that this is not a directory, and that
#endif
/* Take truncate lock before taking cnode lock. */
- hfs_lock_truncate(VTOC(vp), HFS_EXCLUSIVE_LOCK);
+ hfs_lock_truncate(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
/* Perform the ubc_setsize before taking the cnode lock. */
ubc_setsize(vp, vap->va_data_size);
- if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) {
- hfs_unlock_truncate(VTOC(vp), 0);
+ if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
+ hfs_unlock_truncate(VTOC(vp), HFS_LOCK_DEFAULT);
#if HFS_COMPRESSION
decmpfs_unlock_compressed_data(dp, 1);
#endif
error = hfs_truncate(vp, vap->va_data_size, vap->va_vaflags & 0xffff, 1, 0, ap->a_context);
- hfs_unlock_truncate(cp, 0);
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
#if HFS_COMPRESSION
decmpfs_unlock_compressed_data(dp, 1);
#endif
goto out;
}
if (cp == NULL) {
- if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
+ if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
return (error);
cp = VTOC(vp);
}
u_int16_t *fdFlags;
#if HFS_COMPRESSION
- if ((cp->c_flags ^ vap->va_flags) & UF_COMPRESSED) {
+ if ((cp->c_bsdflags ^ vap->va_flags) & UF_COMPRESSED) {
/*
* the UF_COMPRESSED was toggled, so reset our cached compressed state
* but we don't want to actually do the update until we've released the cnode lock down below
decmpfs_reset_state = 1;
}
#endif
+ if ((vap->va_flags & UF_TRACKED) && !(cp->c_bsdflags & UF_TRACKED)) {
+ struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
+
+ //
+ // we're marking this item UF_TRACKED. if the document_id is
+ // not set, get a new one and put it on the file.
+ //
+ if (fip->document_id == 0) {
+ if (document_id != 0) {
+ // printf("SETATTR: assigning doc-id %d to %s (ino %d)\n", document_id, vp->v_name, cp->c_desc.cd_cnid);
+ fip->document_id = (uint32_t)document_id;
+#if CONFIG_FSE
+ add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
+ FSE_ARG_DEV, hfsmp->hfs_raw_dev,
+ FSE_ARG_INO, (ino64_t)0, // src inode #
+ FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
+ FSE_ARG_INT32, document_id,
+ FSE_ARG_DONE);
+#endif
+ } else {
+ // printf("hfs: could not acquire a new document_id for %s (ino %d)\n", vp->v_name, cp->c_desc.cd_cnid);
+ }
+ }
+
+ } else if (!(vap->va_flags & UF_TRACKED) && (cp->c_bsdflags & UF_TRACKED)) {
+ //
+ // UF_TRACKED is being cleared so clear the document_id
+ //
+ struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
+ if (fip->document_id) {
+ // printf("SETATTR: clearing doc-id %d from %s (ino %d)\n", fip->document_id, vp->v_name, cp->c_desc.cd_cnid);
+#if CONFIG_FSE
+ add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
+ FSE_ARG_DEV, hfsmp->hfs_raw_dev,
+ FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
+ FSE_ARG_INO, (ino64_t)0, // dst inode #
+ FSE_ARG_INT32, fip->document_id, // document id
+ FSE_ARG_DONE);
+#endif
+ fip->document_id = 0;
+ cp->c_bsdflags &= ~UF_TRACKED;
+ }
+ }
- cp->c_flags = vap->va_flags;
+ cp->c_bsdflags = vap->va_flags;
cp->c_touch_chgtime = TRUE;
+
/*
* Mirror the UF_HIDDEN flag to the invisible bit of the Finder Info.
if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
return (0);
- // XXXdbg - don't allow modification of the journal or journal_info_block
- if (VTOHFS(vp)->jnl && cp && cp->c_datafork) {
- struct HFSPlusExtentDescriptor *extd;
-
- extd = &cp->c_datafork->ff_extents[0];
- if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) {
- return EPERM;
- }
+ // Don't allow modification of the journal or journal_info_block
+ if (hfs_is_journal_file(VTOHFS(vp), cp)) {
+ return EPERM;
}
#if OVERRIDE_UNKNOWN_PERMISSIONS
}
/* If immutable bit set, nobody gets to write it. */
- if (considerFlags && (cp->c_flags & IMMUTABLE))
+ if (considerFlags && (cp->c_bsdflags & IMMUTABLE))
return (EPERM);
/* Otherwise, user id 0 always gets access. */
/*
- * The hfs_exchange routine swaps the fork data in two files by
- * exchanging some of the information in the cnode. It is used
- * to preserve the file ID when updating an existing file, in
- * case the file is being tracked through its file ID. Typically
- * its used after creating a new file during a safe-save.
+ * hfs_vnop_exchange:
+ *
+ * Inputs:
+ * 'from' vnode/cnode
+ * 'to' vnode/cnode
+ * options flag bits
+ * vfs_context
+ *
+ * Discussion:
+ * hfs_vnop_exchange is used to service the exchangedata(2) system call.
+ * Per the requirements of that system call, this function "swaps" some
+ * of the information that lives in one catalog record for some that
+ * lives in another. Note that not everything is swapped; in particular,
+ * the extent information stored in each cnode is kept local to that
+ * cnode. This allows existing file descriptor references to continue
+ * to operate on the same content, regardless of the location in the
+ * namespace that the file may have moved to. See inline comments
+ * in the function for more information.
*/
int
hfs_vnop_exchange(ap)
const unsigned char *to_nameptr;
char from_iname[32];
char to_iname[32];
- u_int32_t tempflag;
+ uint32_t to_flag_special;
+ uint32_t from_flag_special;
cnid_t from_parid;
cnid_t to_parid;
int lockflags;
cat_cookie_t cookie;
time_t orig_from_ctime, orig_to_ctime;
- /* The files must be on the same volume. */
- if (vnode_mount(from_vp) != vnode_mount(to_vp))
- return (EXDEV);
-
- if (from_vp == to_vp)
- return (EINVAL);
+ /*
+ * VFS does the following checks:
+ * 1. Validate that both are files.
+ * 2. Validate that both are on the same mount.
+ * 3. Validate that they're not the same vnode.
+ */
orig_from_ctime = VTOC(from_vp)->c_ctime;
orig_to_ctime = VTOC(to_vp)->c_ctime;
+
+#if CONFIG_PROTECT
+ /*
+ * Do not allow exchangedata/F_MOVEDATAEXTENTS on data-protected filesystems
+ * because the EAs will not be swapped. As a result, the persistent keys would not
+ * match and the files will be garbage.
+ */
+ if (cp_fs_protected (vnode_mount(from_vp))) {
+ return EINVAL;
+ }
+#endif
+
#if HFS_COMPRESSION
if ( hfs_file_is_compressed(VTOC(from_vp), 0) ) {
if ( 0 != ( error = decmpfs_decompress_file(from_vp, VTOCMP(from_vp), -1, 0, 1) ) ) {
if ((ap->a_options & FSOPT_EXCHANGE_DATA_ONLY) == 0) {
check_for_tracked_file(from_vp, orig_from_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
check_for_tracked_file(to_vp, orig_to_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
- }
- else {
+ } else {
/*
* We're doing a data-swap.
* Take the truncate lock/cnode lock, then verify there are no mmap references.
* Allow the rest of the codeflow to re-acquire the cnode locks in order.
*/
- hfs_lock_truncate (VTOC(from_vp), HFS_SHARED_LOCK);
+ hfs_lock_truncate (VTOC(from_vp), HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
- if ((error = hfs_lock(VTOC(from_vp), HFS_EXCLUSIVE_LOCK))) {
- hfs_unlock_truncate (VTOC(from_vp), 0);
+ if ((error = hfs_lock(VTOC(from_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
+ hfs_unlock_truncate (VTOC(from_vp), HFS_LOCK_DEFAULT);
return error;
}
if (vnode_isinuse(from_vp, 1)) {
error = EBUSY;
hfs_unlock(VTOC(from_vp));
- hfs_unlock_truncate (VTOC(from_vp), 0);
+ hfs_unlock_truncate (VTOC(from_vp), HFS_LOCK_DEFAULT);
return error;
}
error = hfs_filedone (from_vp, ap->a_context);
VTOC(from_vp)->c_flag &= ~C_SWAPINPROGRESS;
hfs_unlock(VTOC(from_vp));
- hfs_unlock_truncate(VTOC(from_vp), 0);
+ hfs_unlock_truncate(VTOC(from_vp), HFS_LOCK_DEFAULT);
if (error) {
return error;
}
}
-
if ((error = hfs_lockpair(VTOC(from_vp), VTOC(to_vp), HFS_EXCLUSIVE_LOCK)))
return (error);
to_cp = VTOC(to_vp);
hfsmp = VTOHFS(from_vp);
- /* Only normal files can be exchanged. */
- if (!vnode_isreg(from_vp) || !vnode_isreg(to_vp) ||
- VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp)) {
+ /* Resource forks cannot be exchanged. */
+ if ( VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp)) {
error = EINVAL;
goto exit;
}
- // XXXdbg - don't allow modification of the journal or journal_info_block
- if (hfsmp->jnl) {
- struct HFSPlusExtentDescriptor *extd;
-
- if (from_cp->c_datafork) {
- extd = &from_cp->c_datafork->ff_extents[0];
- if (extd->startBlock == VTOVCB(from_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
- error = EPERM;
- goto exit;
- }
- }
-
- if (to_cp->c_datafork) {
- extd = &to_cp->c_datafork->ff_extents[0];
- if (extd->startBlock == VTOVCB(to_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
- error = EPERM;
- goto exit;
- }
- }
+ // Don't allow modification of the journal or journal_info_block
+ if (hfs_is_journal_file(hfsmp, from_cp) ||
+ hfs_is_journal_file(hfsmp, to_cp)) {
+ error = EPERM;
+ goto exit;
}
-
+
/*
* Ok, now that all of the pre-flighting is done, call the underlying
* function if needed.
goto exit;
}
-
+
if ((error = hfs_start_transaction(hfsmp)) != 0) {
goto exit;
}
to_parid = to_cp->c_parentcnid;
}
- /* Do the exchange */
+ /*
+ * ExchangeFileIDs swaps the extent information attached to two
+ * different file IDs. It also swaps the extent information that
+ * may live in the extents-overflow B-Tree.
+ *
+ * We do this in a transaction as this may require a lot of B-Tree nodes
+ * to do completely, particularly if one of the files in question
+ * has a lot of extents.
+ *
+ * For example, assume "file1" has fileID 50, and "file2" has fileID 52.
+ * For the on-disk records, which are assumed to be synced, we will
+ * first swap the resident inline-8 extents as part of the catalog records.
+ * Then we will swap any extents overflow records for each file.
+ *
+ * When this function is done, "file1" will have fileID 52, and "file2" will
+ * have fileID 50.
+ */
error = ExchangeFileIDs(hfsmp, from_nameptr, to_nameptr, from_parid,
to_parid, from_cp->c_hint, to_cp->c_hint);
hfs_systemfile_unlock(hfsmp, lockflags);
if (to_vp)
cache_purge(to_vp);
- /* Save a copy of from attributes before swapping. */
+ /* Bump both source and destination write counts before any swaps. */
+ {
+ hfs_incr_gencount (from_cp);
+ hfs_incr_gencount (to_cp);
+ }
+
+
+ /* Save a copy of "from" attributes before swapping. */
bcopy(&from_cp->c_desc, &tempdesc, sizeof(struct cat_desc));
bcopy(&from_cp->c_attr, &tempattr, sizeof(struct cat_attr));
- tempflag = from_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
+
+ /* Save whether or not each cnode is a hardlink or has EAs */
+ from_flag_special = from_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
+ to_flag_special = to_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
+
+ /* Drop the special bits from each cnode */
+ from_cp->c_flag &= ~(C_HARDLINK | C_HASXATTRS);
+ to_cp->c_flag &= ~(C_HARDLINK | C_HASXATTRS);
/*
- * Swap the descriptors and all non-fork related attributes.
- * (except the modify date)
+ * Complete the in-memory portion of the copy.
+ *
+ * ExchangeFileIDs swaps the on-disk records involved. We complete the
+ * operation by swapping the in-memory contents of the two files here.
+ * We swap the cnode descriptors, which contain name, BSD attributes,
+ * timestamps, etc, about the file.
+ *
+ * NOTE: We do *NOT* swap the fileforks of the two cnodes. We have
+ * already swapped the on-disk extent information. As long as we swap the
+ * IDs, the in-line resident 8 extents that live in the filefork data
+ * structure will point to the right data for the new file ID if we leave
+ * them alone.
+ *
+ * As a result, any file descriptor that points to a particular
+ * vnode (even though it should change names), will continue
+ * to point to the same content.
*/
+
+ /* Copy the "to" -> "from" cnode */
bcopy(&to_cp->c_desc, &from_cp->c_desc, sizeof(struct cat_desc));
from_cp->c_hint = 0;
- from_cp->c_fileid = from_cp->c_cnid;
+ /*
+ * If 'to' was a hardlink, then we copied over its link ID/CNID/(namespace ID)
+ * when we bcopy'd the descriptor above. However, the cnode attributes
+ * are not bcopied. As a result, make sure to swap the file IDs of each item.
+ *
+ * Further, other hardlink attributes must be moved along in this swap:
+ * the linkcount, the linkref, and the firstlink all need to move
+ * along with the file IDs. See note below regarding the flags and
+ * what moves vs. what does not.
+ *
+ * For Reference:
+ * linkcount == total # of hardlinks.
+ * linkref == the indirect inode pointer.
+ * firstlink == the first hardlink in the chain (written to the raw inode).
+ * These three are tied to the fileID and must move along with the rest of the data.
+ */
+ from_cp->c_fileid = to_cp->c_attr.ca_fileid;
+
from_cp->c_itime = to_cp->c_itime;
from_cp->c_btime = to_cp->c_btime;
from_cp->c_atime = to_cp->c_atime;
from_cp->c_ctime = to_cp->c_ctime;
from_cp->c_gid = to_cp->c_gid;
from_cp->c_uid = to_cp->c_uid;
- from_cp->c_flags = to_cp->c_flags;
+ from_cp->c_bsdflags = to_cp->c_bsdflags;
from_cp->c_mode = to_cp->c_mode;
from_cp->c_linkcount = to_cp->c_linkcount;
- from_cp->c_flag = to_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
+ from_cp->c_attr.ca_linkref = to_cp->c_attr.ca_linkref;
+ from_cp->c_attr.ca_firstlink = to_cp->c_attr.ca_firstlink;
+
+ /*
+ * The cnode flags need to stay with the cnode and not get transferred
+ * over along with everything else because they describe the content; they are
+ * not attributes that reflect changes specific to the file ID. In general,
+ * fields that are tied to the file ID are the ones that will move.
+ *
+ * This reflects the fact that the file may have borrowed blocks, dirty metadata,
+ * or other extents, which may not yet have been written to the catalog. If
+ * they were, they would have been transferred above in the ExchangeFileIDs call above...
+ *
+ * The flags that are special are:
+ * C_HARDLINK, C_HASXATTRS
+ *
+ * These flags move with the item and file ID in the namespace since their
+ * state is tied to that of the file ID.
+ *
+ * So to transfer the flags, we have to take the following steps
+ * 1) Store in a localvar whether or not the special bits are set.
+ * 2) Drop the special bits from the current flags
+ * 3) swap the special flag bits to their destination
+ */
+ from_cp->c_flag |= to_flag_special;
from_cp->c_attr.ca_recflags = to_cp->c_attr.ca_recflags;
bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32);
+
+ /* Copy the "from" -> "to" cnode */
bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc));
to_cp->c_hint = 0;
- to_cp->c_fileid = to_cp->c_cnid;
+ /*
+ * Pull the file ID from the tempattr we copied above. We can't assume
+ * it is the same as the CNID.
+ */
+ to_cp->c_fileid = tempattr.ca_fileid;
to_cp->c_itime = tempattr.ca_itime;
to_cp->c_btime = tempattr.ca_btime;
to_cp->c_atime = tempattr.ca_atime;
to_cp->c_ctime = tempattr.ca_ctime;
to_cp->c_gid = tempattr.ca_gid;
to_cp->c_uid = tempattr.ca_uid;
- to_cp->c_flags = tempattr.ca_flags;
+ to_cp->c_bsdflags = tempattr.ca_flags;
to_cp->c_mode = tempattr.ca_mode;
to_cp->c_linkcount = tempattr.ca_linkcount;
- to_cp->c_flag = tempflag;
+ to_cp->c_attr.ca_linkref = tempattr.ca_linkref;
+ to_cp->c_attr.ca_firstlink = tempattr.ca_firstlink;
+
+ /*
+ * Only OR in the "from" flags into our cnode flags below.
+ * Leave the rest of the flags alone.
+ */
+ to_cp->c_flag |= from_flag_special;
+
to_cp->c_attr.ca_recflags = tempattr.ca_recflags;
bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32);
+
/* Rehash the cnodes using their new file IDs */
hfs_chash_rehash(hfsmp, from_cp, to_cp);
* When a file moves out of "Cleanup At Startup"
* we can drop its NODUMP status.
*/
- if ((from_cp->c_flags & UF_NODUMP) &&
+ if ((from_cp->c_bsdflags & UF_NODUMP) &&
(from_cp->c_parentcnid != to_cp->c_parentcnid)) {
- from_cp->c_flags &= ~UF_NODUMP;
+ from_cp->c_bsdflags &= ~UF_NODUMP;
from_cp->c_touch_chgtime = TRUE;
}
- if ((to_cp->c_flags & UF_NODUMP) &&
+ if ((to_cp->c_bsdflags & UF_NODUMP) &&
(to_cp->c_parentcnid != from_cp->c_parentcnid)) {
- to_cp->c_flags &= ~UF_NODUMP;
+ to_cp->c_bsdflags &= ~UF_NODUMP;
to_cp->c_touch_chgtime = TRUE;
}
int compressed = hfs_file_is_compressed(VTOC(vp), 1); /* 1 == don't take the cnode lock */
time_t orig_ctime = VTOC(vp)->c_ctime;
- if (!compressed && (VTOC(vp)->c_flags & UF_COMPRESSED)) {
+ if (!compressed && (VTOC(vp)->c_bsdflags & UF_COMPRESSED)) {
error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
if (error != 0) {
return error;
if (ap->a_fflags & PROT_WRITE) {
check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
+
+ /* even though we're manipulating a cnode field here, we're only monotonically increasing
+ * the generation counter. The vnode can't be recycled (because we hold a FD in order to cause the
+ * map to happen). So it's safe to do this without holding the cnode lock. The caller's only
+ * requirement is that the number has been changed.
+ */
+ struct cnode *cp = VTOC(vp);
+ if (S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode)) {
+ hfs_incr_gencount(cp);
+ }
}
}
*
*/
int hfs_movedata (struct vnode *from_vp, struct vnode *to_vp) {
-
+
struct cnode *from_cp;
struct cnode *to_cp;
struct hfsmount *hfsmp = NULL;
int lockflags = 0;
int overflow_blocks;
int rsrc = 0;
-
-
+
+
/* Get the HFS pointers */
from_cp = VTOC(from_vp);
to_cp = VTOC(to_vp);
hfsmp = VTOHFS(from_vp);
-
+
/* Verify that neither source/dest file is open-unlinked */
if (from_cp->c_flag & (C_DELETED | C_NOEXISTS)) {
error = EBUSY;
if (from_cp->c_rsrc_vp == from_vp) {
rsrc = 1;
}
-
+
/*
* We assume that the destination file is already empty.
* Verify that it is.
goto movedata_exit;
}
}
-
+
/* If the source has the rsrc open, make sure the destination is also the rsrc */
if (rsrc) {
if (to_vp != to_cp->c_rsrc_vp) {
if (to_vp != to_cp->c_vp) {
error = EINVAL;
goto movedata_exit;
- }
+ }
}
-
+
/*
* See if the source file has overflow extents. If it doesn't, we don't
* need to call into MoveData, and the catalog will be enough.
else {
overflow_blocks = overflow_extents(from_cp->c_datafork);
}
-
+
if ((error = hfs_start_transaction (hfsmp)) != 0) {
goto movedata_exit;
}
started_tr = 1;
-
+
/* Lock the system files: catalog, extents, attributes */
lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
-
+
/* Copy over any catalog allocation data into the new spot. */
if (rsrc) {
if ((error = hfs_move_fork (from_cp->c_rsrcfork, from_cp, to_cp->c_rsrcfork, to_cp))){
goto movedata_exit;
}
}
-
+
/*
* Note that because all we're doing is moving the extents around, we can
* probably do this in a single transaction: Each extent record (group of 8)
error = MoveData (hfsmp, from_cp->c_cnid, to_cp->c_cnid, 0);
}
}
-
+
if (error) {
/* Reverse the operation. Copy the fork data back into the source */
if (rsrc) {
struct cat_fork *src_rsrc = NULL;
struct cat_fork *dst_data = NULL;
struct cat_fork *dst_rsrc = NULL;
-
+
/* Touch the times*/
to_cp->c_touch_acctime = TRUE;
to_cp->c_touch_chgtime = TRUE;
to_cp->c_touch_modtime = TRUE;
-
+
from_cp->c_touch_acctime = TRUE;
from_cp->c_touch_chgtime = TRUE;
from_cp->c_touch_modtime = TRUE;
-
+
hfs_touchtimes(hfsmp, to_cp);
hfs_touchtimes(hfsmp, from_cp);
-
+
if (from_cp->c_datafork) {
src_data = &from_cp->c_datafork->ff_data;
}
if (from_cp->c_rsrcfork) {
src_rsrc = &from_cp->c_rsrcfork->ff_data;
}
-
+
if (to_cp->c_datafork) {
dst_data = &to_cp->c_datafork->ff_data;
}
if (to_cp->c_rsrcfork) {
dst_rsrc = &to_cp->c_rsrcfork->ff_data;
}
-
+
/* Update the catalog nodes */
(void) cat_update(hfsmp, &from_cp->c_desc, &from_cp->c_attr,
- src_data, src_rsrc);
-
+ src_data, src_rsrc);
+
(void) cat_update(hfsmp, &to_cp->c_desc, &to_cp->c_attr,
- dst_data, dst_rsrc);
-
+ dst_data, dst_rsrc);
+
}
/* unlock the system files */
hfs_systemfile_unlock(hfsmp, lockflags);
-
-
+
+
movedata_exit:
if (started_tr) {
hfs_end_transaction(hfsmp);
}
-
+
return error;
-
+
}
/*
* non overflow-extent extents into the destination here.
*/
static int hfs_move_fork (struct filefork *srcfork, struct cnode *src_cp,
- struct filefork *dstfork, struct cnode *dst_cp) {
+ struct filefork *dstfork, struct cnode *dst_cp) {
struct rl_entry *invalid_range;
int size = sizeof(struct HFSPlusExtentDescriptor);
size = size * kHFSPlusExtentDensity;
-
+
/* If the dstfork has any invalid ranges, bail out */
invalid_range = TAILQ_FIRST(&dstfork->ff_invalidranges);
if (invalid_range != NULL) {
return EFBIG;
}
-
+
if (dstfork->ff_data.cf_size != 0 || dstfork->ff_data.cf_new_size != 0) {
return EFBIG;
}
-
+
/* First copy the invalid ranges */
while ((invalid_range = TAILQ_FIRST(&srcfork->ff_invalidranges))) {
off_t start = invalid_range->rl_start;
off_t end = invalid_range->rl_end;
-
+
/* Remove it from the srcfork and add it to dstfork */
rl_remove(start, end, &srcfork->ff_invalidranges);
rl_add(start, end, &dstfork->ff_invalidranges);
}
-
+
/*
* Ignore the ff_union. We don't move symlinks or system files.
* Now copy the in-catalog extent information
dstfork->ff_data.cf_new_size = srcfork->ff_data.cf_new_size;
dstfork->ff_data.cf_vblocks = srcfork->ff_data.cf_vblocks;
dstfork->ff_data.cf_blocks = srcfork->ff_data.cf_blocks;
-
+
/* just memcpy the whole array of extents to the new location. */
memcpy (dstfork->ff_data.cf_extents, srcfork->ff_data.cf_extents, size);
-
+
/*
* Copy the cnode attribute data.
*
*/
src_cp->c_blocks -= srcfork->ff_data.cf_vblocks;
src_cp->c_blocks -= srcfork->ff_data.cf_blocks;
-
+
dst_cp->c_blocks += srcfork->ff_data.cf_vblocks;
dst_cp->c_blocks += srcfork->ff_data.cf_blocks;
-
+
/* Now delete the entries in the source fork */
srcfork->ff_data.cf_size = 0;
srcfork->ff_data.cf_new_size = 0;
bzero (srcfork->ff_data.cf_extents, size);
return 0;
}
-
-
+
/*
* cnode must be locked
int wait; /* all other attributes (e.g. atime, etc.) */
int lockflag;
int took_trunc_lock = 0;
+ int locked_buffers = 0;
/*
* Applications which only care about data integrity rather than full
}
} else if (UBCINFOEXISTS(vp)) {
hfs_unlock(cp);
- hfs_lock_truncate(cp, HFS_SHARED_LOCK);
+ hfs_lock_truncate(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
took_trunc_lock = 1;
if (fp->ff_unallocblocks != 0) {
- hfs_unlock_truncate(cp, 0);
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
- hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK);
+ hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
}
/* Don't hold cnode lock when calling into cluster layer. */
(void) cluster_push(vp, waitdata ? IO_SYNC : 0);
- hfs_lock(cp, HFS_FORCE_LOCK);
+ hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
}
/*
* When MNT_WAIT is requested and the zero fill timeout
*/
if (fp && (((cp->c_flag & C_ALWAYS_ZEROFILL) && !TAILQ_EMPTY(&fp->ff_invalidranges)) ||
((wait || (cp->c_flag & C_ZFWANTSYNC)) &&
- ((cp->c_flags & UF_NODUMP) == 0) &&
+ ((cp->c_bsdflags & UF_NODUMP) == 0) &&
UBCINFOEXISTS(vp) && (vnode_issystem(vp) ==0) &&
cp->c_zftimeout != 0))) {
if (!took_trunc_lock || (cp->c_truncatelockowner == HFS_SHARED_OWNER)) {
hfs_unlock(cp);
if (took_trunc_lock) {
- hfs_unlock_truncate(cp, 0);
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
}
- hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK);
- hfs_lock(cp, HFS_FORCE_LOCK);
+ hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
+ hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
took_trunc_lock = 1;
}
while ((invalid_range = TAILQ_FIRST(&fp->ff_invalidranges))) {
(void) cluster_write(vp, (struct uio *) 0,
fp->ff_size, end + 1, start, (off_t)0,
IO_HEADZEROFILL | IO_NOZERODIRTY | IO_NOCACHE);
- hfs_lock(cp, HFS_FORCE_LOCK);
+ hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
cp->c_flag |= C_MODIFIED;
}
hfs_unlock(cp);
(void) cluster_push(vp, waitdata ? IO_SYNC : 0);
- hfs_lock(cp, HFS_FORCE_LOCK);
+ hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
}
cp->c_flag &= ~C_ZFWANTSYNC;
cp->c_zftimeout = 0;
}
datasync:
if (took_trunc_lock) {
- hfs_unlock_truncate(cp, 0);
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
took_trunc_lock = 0;
}
/*
/*
* Flush all dirty buffers associated with a vnode.
+ * Record how many of them were dirty AND locked (if necessary).
*/
- buf_flushdirtyblks(vp, waitdata, lockflag, "hfs_fsync");
+ locked_buffers = buf_flushdirtyblks_skipinfo(vp, waitdata, lockflag, "hfs_fsync");
+ if ((lockflag & BUF_SKIP_LOCKED) && (locked_buffers) && (vnode_vtype(vp) == VLNK)) {
+ /*
+ * If there are dirty symlink buffers, then we may need to take action
+ * to prevent issues later on if we are journaled. If we're fsyncing a
+ * symlink vnode then we are in one of three cases:
+ *
+ * 1) automatic sync has fired. In this case, we don't want the behavior to change.
+ *
+ * 2) Someone has opened the FD for the symlink (not what it points to)
+ * and has issued an fsync against it. This should be rare, and we don't
+ * want the behavior to change.
+ *
+ * 3) We are being called by a vclean which is trying to reclaim this
+ * symlink vnode. If this is the case, then allowing this fsync to
+ * proceed WITHOUT flushing the journal could result in the vclean
+ * invalidating the buffer's blocks before the journal transaction is
+ * written to disk. To prevent this, we force a journal flush
+ * if the vnode is in the middle of a recycle (VL_TERMINATE or VL_DEAD is set).
+ */
+ if (vnode_isrecycled(vp)) {
+ fullsync = 1;
+ }
+ }
metasync:
if (vnode_isreg(vp) && vnode_issystem(vp)) {
hfs_unlockpair (dcp, cp);
return ENOENT;
}
+
+ //
+ // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
+ //
+ if ((cp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id == 0) {
+ uint32_t newid;
+
+ hfs_unlockpair(dcp, cp);
+
+ if (hfs_generate_document_id(VTOHFS(vp), &newid) == 0) {
+ hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
+ ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id = newid;
+#if CONFIG_FSE
+ add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
+ FSE_ARG_DEV, VTOHFS(vp)->hfs_raw_dev,
+ FSE_ARG_INO, (ino64_t)0, // src inode #
+ FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
+ FSE_ARG_INT32, newid,
+ FSE_ARG_DONE);
+#endif
+ } else {
+ // XXXdbg - couldn't get a new docid... what to do? can't really fail the rm...
+ hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
+ }
+ }
+
error = hfs_removedir(dvp, vp, ap->a_cnp, 0, 0);
hfs_unlockpair(dcp, cp);
* the current directory and thus be
* non-empty.)
*/
- if ((dcp->c_flags & APPEND) || (cp->c_flags & (IMMUTABLE | APPEND))) {
+ if ((dcp->c_bsdflags & APPEND) || (cp->c_bsdflags & (IMMUTABLE | APPEND))) {
error = EPERM;
goto out;
}
}
error = cat_delete(hfsmp, &desc, &cp->c_attr);
- if (error == 0) {
+
+ if (!error) {
+ //
+ // if skip_reserve == 1 then we're being called from hfs_vnop_rename() and thus
+ // we don't need to touch the document_id as it's handled by the rename code.
+ // otherwise it's a normal remove and we need to save the document id in the
+ // per thread struct and clear it from the cnode.
+ //
+ struct doc_tombstone *ut;
+ ut = get_uthread_doc_tombstone();
+ if (!skip_reserve && (cp->c_bsdflags & UF_TRACKED) && should_save_docid_tombstone(ut, vp, cnp)) {
+
+ if (ut->t_lastop_document_id) {
+ clear_tombstone_docid(ut, hfsmp, NULL);
+ }
+ save_tombstone(hfsmp, dvp, vp, cnp, 1);
+
+ }
+
/* The parent lost a child */
if (dcp->c_entries > 0)
dcp->c_entries--;
DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
dcp->c_dirchangecnt++;
+ {
+ struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)dcp->c_finderinfo + 16);
+ extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
+ }
dcp->c_touch_chgtime = TRUE;
dcp->c_touch_modtime = TRUE;
hfs_touchtimes(hfsmp, cp);
struct cnode *dcp = VTOC(dvp);
struct cnode *cp;
struct vnode *rvp = NULL;
- struct hfsmount *hfsmp = VTOHFS(vp);
int error=0, recycle_rsrc=0;
- int drop_rsrc_vnode = 0;
+ int recycle_vnode = 0;
+ uint32_t rsrc_vid = 0;
time_t orig_ctime;
if (dvp == vp) {
}
orig_ctime = VTOC(vp)->c_ctime;
- if (!vnode_isnamedstream(vp)) {
+ if (!vnode_isnamedstream(vp) && ((ap->a_flags & VNODE_REMOVE_SKIP_NAMESPACE_EVENT) == 0)) {
error = check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
if (error) {
// XXXdbg - decide on a policy for handling namespace handler failures!
cp = VTOC(vp);
- /*
- * We need to grab the cnode lock on 'cp' before the lockpair()
- * to get an iocount on the rsrc fork BEFORE we enter hfs_removefile.
- * To prevent other deadlocks, it's best to call hfs_vgetrsrc in a way that
- * allows it to drop the cnode lock that it expects to be held coming in.
- * If we don't, we could commit a lock order violation, causing a deadlock.
- * In order to safely get the rsrc vnode with an iocount, we need to only hold the
- * lock on the file temporarily. Unlike hfs_vnop_rename, we don't have to worry
- * about one rsrc fork getting recycled for another, but we do want to ensure
- * that there are no deadlocks due to lock ordering issues.
- *
+relock:
+
+ hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
+
+ if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
+ if (rvp) {
+ vnode_put (rvp);
+ }
+ return (error);
+ }
+ //
+ // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
+ //
+ if ((cp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id == 0) {
+ uint32_t newid;
+
+ hfs_unlockpair(dcp, cp);
+
+ if (hfs_generate_document_id(VTOHFS(vp), &newid) == 0) {
+ hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
+ ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id = newid;
+#if CONFIG_FSE
+ add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
+ FSE_ARG_DEV, VTOHFS(vp)->hfs_raw_dev,
+ FSE_ARG_INO, (ino64_t)0, // src inode #
+ FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
+ FSE_ARG_INT32, newid,
+ FSE_ARG_DONE);
+#endif
+ } else {
+ // XXXdbg - couldn't get a new docid... what to do? can't really fail the rm...
+ hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
+ }
+ }
+
+ /*
+ * Lazily respond to determining if there is a valid resource fork
+ * vnode attached to 'cp' if it is a regular file or symlink.
+ * If the vnode does not exist, then we may proceed without having to
+ * create it.
+ *
+ * If, however, it does exist, then we need to acquire an iocount on the
+ * vnode after acquiring its vid. This ensures that if we have to do I/O
+ * against it, it can't get recycled from underneath us in the middle
+ * of this call.
+ *
* Note: this function may be invoked for directory hardlinks, so just skip these
* steps if 'vp' is a directory.
*/
-
if ((vp->v_type == VLNK) || (vp->v_type == VREG)) {
-
- if ((error = hfs_lock (cp, HFS_EXCLUSIVE_LOCK))) {
- return (error);
- }
-
- error = hfs_vgetrsrc(hfsmp, vp, &rvp, TRUE, TRUE);
- hfs_unlock(cp);
- if (error) {
- /* we may have gotten an rsrc vp even though we got an error */
- if (rvp) {
- vnode_put(rvp);
+ if ((cp->c_rsrc_vp) && (rvp == NULL)) {
+ /* We need to acquire the rsrc vnode */
+ rvp = cp->c_rsrc_vp;
+ rsrc_vid = vnode_vid (rvp);
+
+ /* Unlock everything to acquire iocount on the rsrc vnode */
+ hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
+ hfs_unlockpair (dcp, cp);
+ /* Use the vid to maintain identity on rvp */
+ if (vnode_getwithvid(rvp, rsrc_vid)) {
+ /*
+ * If this fails, then it was recycled or
+ * reclaimed in the interim. Reset fields and
+ * start over.
+ */
rvp = NULL;
+ rsrc_vid = 0;
}
- return (error);
+ goto relock;
}
- drop_rsrc_vnode = 1;
- }
- /* Now that we may have an iocount on rvp, do the lock pair */
-
- hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK);
-
- if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
- hfs_unlock_truncate(cp, 0);
- /* drop the iocount on rvp if necessary */
- if (drop_rsrc_vnode) {
- vnode_put (rvp);
- }
- return (error);
}
/*
goto rm_done;
}
- error = hfs_removefile(dvp, vp, ap->a_cnp, ap->a_flags, 0, 0, rvp, 0);
+ error = hfs_removefile(dvp, vp, ap->a_cnp, ap->a_flags, 0, 0, NULL, 0);
/*
* If the remove succeeded in deleting the file, then we may need to mark
* If the cnode was instead marked C_NOEXISTS, then there wouldn't be any
* more work.
*/
- if ((error == 0) && (rvp)) {
- recycle_rsrc = 1;
+ if (error == 0) {
+ if (rvp) {
+ recycle_rsrc = 1;
+ }
+ /*
+ * If the target was actually removed from the catalog schedule it for
+ * full reclamation/inactivation. We hold an iocount on it so it should just
+ * get marked with MARKTERM
+ */
+ if (cp->c_flag & C_NOEXISTS) {
+ recycle_vnode = 1;
+ }
}
+
/*
* Drop the truncate lock before unlocking the cnode
* (which can potentially perform a vnode_put and
* truncate lock)
*/
rm_done:
- hfs_unlock_truncate(cp, 0);
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
hfs_unlockpair(dcp, cp);
if (recycle_rsrc) {
/* inactive or reclaim on rvp will clean up the blocks from the rsrc fork */
vnode_recycle(rvp);
}
-
- if (drop_rsrc_vnode) {
+ if (recycle_vnode) {
+ vnode_recycle (vp);
+ }
+
+ if (rvp) {
/* drop iocount on rsrc fork, was obtained at beginning of fxn */
vnode_put(rvp);
}
}
-static int
+int
hfs_removefile_callback(struct buf *bp, void *hfsmp) {
if ( !(buf_flags(bp) & B_META))
* This function may be used to remove directories if they have
* lots of EA's -- note the 'allow_dirs' argument.
*
- * The 'rvp' argument is used to pass in a resource fork vnode with
- * an iocount to prevent it from getting recycled during usage. If it
- * is NULL, then it is assumed the caller is a VNOP that cannot operate
- * on resource forks, like hfs_vnop_symlink or hfs_removedir. Otherwise in
- * a VNOP that takes multiple vnodes, we could violate lock order and
- * cause a deadlock.
+ * This function is able to delete blocks & fork data for the resource
+ * fork even if it does not exist in core (and have a backing vnode).
+ * It should infer the correct behavior based on the number of blocks
+ * in the cnode and whether or not the resource fork pointer exists or
+ * not. As a result, one only need pass in the 'vp' corresponding to the
+ * data fork of this file (or main vnode in the case of a directory).
+ * Passing in a resource fork will result in an error.
+ *
+ * Because we do not create any vnodes in this function, we are not at
+ * risk of deadlocking against ourselves by double-locking.
*
* Requires cnode and truncate locks to be held.
*/
int
hfs_removefile(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
int flags, int skip_reserve, int allow_dirs,
- struct vnode *rvp, int only_unlink)
+ __unused struct vnode *rvp, int only_unlink)
{
struct cnode *cp;
struct cnode *dcp;
+ struct vnode *rsrc_vp = NULL;
struct hfsmount *hfsmp;
struct cat_desc desc;
struct timeval tv;
int started_tr = 0;
int isbigfile = 0, defer_remove=0, isdir=0;
int update_vh = 0;
-
+
cp = VTOC(vp);
dcp = VTOC(dvp);
hfsmp = VTOHFS(vp);
if (VNODE_IS_RSRC(vp)) {
return (EPERM);
}
+ else {
+ /*
+ * We know it's a data fork.
+ * Probe the cnode to see if we have a valid resource fork
+ * in hand or not.
+ */
+ rsrc_vp = cp->c_rsrc_vp;
+ }
+
/* Don't allow deleting the journal or journal_info_block. */
- if (hfsmp->jnl &&
- (cp->c_fileid == hfsmp->hfs_jnlfileid || cp->c_fileid == hfsmp->hfs_jnlinfoblkid)) {
+ if (hfs_is_journal_file(hfsmp, cp)) {
return (EPERM);
}
+
+ /*
+ * If removing a symlink, then we need to ensure that the
+ * data blocks for the symlink are not still in-flight or pending.
+ * If so, we will unlink the symlink here, making its blocks
+ * available for re-allocation by a subsequent transaction. That is OK, but
+ * then the I/O for the data blocks could then go out before the journal
+ * transaction that created it was flushed, leading to I/O ordering issues.
+ */
+ if (vp->v_type == VLNK) {
+ /*
+ * This will block if the asynchronous journal flush is in progress.
+ * If this symlink is not being renamed over and doesn't have any open FDs,
+ * then we'll remove it from the journal's bufs below in kill_block.
+ */
+ buf_wait_for_shadow_io (vp, 0);
+ }
+
/*
* Hard links require special handling.
*/
return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
}
}
+
/* Directories should call hfs_rmdir! (unless they have a lot of attributes) */
if (vnode_isdir(vp)) {
if (allow_dirs == 0)
/* Remove our entry from the namei cache. */
cache_purge(vp);
-
+
/*
- * We expect the caller, if operating on files,
- * will have passed in a resource fork vnode with
- * an iocount, even if there was no content.
- * We only do the hfs_truncate on the rsrc fork
- * if we know that it DID have content, however.
- * This has the bonus of not requiring us to defer
- * its removal, unless it is in use.
+ * If the caller was operating on a file (as opposed to a
+ * directory with EAs), then we need to figure out
+ * whether or not it has a valid resource fork vnode.
+ *
+ * If there was a valid resource fork vnode, then we need
+ * to use hfs_truncate to eliminate its data. If there is
+ * no vnode, then we hold the cnode lock which would
+ * prevent it from being created. As a result,
+ * we can use the data deletion functions which do not
+ * require that a cnode/vnode pair exist.
*/
/* Check if this file is being used. */
if (isdir == 0) {
dataforkbusy = vnode_isinuse(vp, 0);
- /* Only need to defer resource fork removal if in use and has content */
- if (rvp && (cp->c_blocks - VTOF(vp)->ff_blocks)) {
- rsrcforkbusy = vnode_isinuse(rvp, 0);
+ /*
+ * At this point, we know that 'vp' points to the
+ * a data fork because we checked it up front. And if
+ * there is no rsrc fork, rsrc_vp will be NULL.
+ */
+ if (rsrc_vp && (cp->c_blocks - VTOF(vp)->ff_blocks)) {
+ rsrcforkbusy = vnode_isinuse(rsrc_vp, 0);
}
}
if (!dataforkbusy && cp->c_datafork->ff_blocks && !isbigfile) {
cp->c_flag |= C_NEED_DATA_SETSIZE;
}
- if (!rsrcforkbusy && rvp) {
+ if (!rsrcforkbusy && rsrc_vp) {
cp->c_flag |= C_NEED_RSRC_SETSIZE;
}
}
}
update_vh = 1;
}
- if (!rsrcforkbusy && rvp) {
- error = hfs_prepare_release_storage (hfsmp, rvp);
+
+ /*
+ * If the resource fork vnode does not exist, we can skip this step.
+ */
+ if (!rsrcforkbusy && rsrc_vp) {
+ error = hfs_prepare_release_storage (hfsmp, rsrc_vp);
if (error) {
goto out;
}
DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
}
dcp->c_dirchangecnt++;
+ {
+ struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)dcp->c_finderinfo + 16);
+ extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
+ }
dcp->c_ctime = tv.tv_sec;
dcp->c_mtime = tv.tv_sec;
(void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
goto out;
}
- else /* Not busy */ {
-
+ else {
+ /*
+ * Nobody is using this item; we can safely remove everything.
+ */
+ struct filefork *temp_rsrc_fork = NULL;
#if QUOTA
off_t savedbytes;
int blksize = hfsmp->blockSize;
#endif
u_int32_t fileid = cp->c_fileid;
-
+
+ /*
+ * Figure out if we need to read the resource fork data into
+ * core before wiping out the catalog record.
+ *
+ * 1) Must not be a directory
+ * 2) cnode's c_rsrcfork ptr must be NULL.
+ * 3) rsrc fork must have actual blocks
+ */
+ if ((isdir == 0) && (cp->c_rsrcfork == NULL) &&
+ (cp->c_blocks - VTOF(vp)->ff_blocks)) {
+ /*
+ * The resource fork vnode & filefork did not exist.
+ * Create a temporary one for use in this function only.
+ */
+ MALLOC_ZONE (temp_rsrc_fork, struct filefork *, sizeof (struct filefork), M_HFSFORK, M_WAITOK);
+ bzero(temp_rsrc_fork, sizeof(struct filefork));
+ temp_rsrc_fork->ff_cp = cp;
+ rl_init(&temp_rsrc_fork->ff_invalidranges);
+ }
+
lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
+
+ /* Look up the resource fork first, if necessary */
+ if (temp_rsrc_fork) {
+ error = cat_lookup (hfsmp, &desc, 1, 0, (struct cat_desc*) NULL,
+ (struct cat_attr*) NULL, &temp_rsrc_fork->ff_data, NULL);
+ if (error) {
+ FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
+ hfs_systemfile_unlock (hfsmp, lockflags);
+ goto out;
+ }
+ }
+
if (!skip_reserve) {
if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
+ if (temp_rsrc_fork) {
+ FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
+ }
hfs_systemfile_unlock(hfsmp, lockflags);
goto out;
}
error = cat_delete(hfsmp, &desc, &cp->c_attr);
if (error && error != ENXIO && error != ENOENT) {
- printf("hfs_removefile: deleting file %s (%d), err: %d\n",
- cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, error);
+ printf("hfs_removefile: deleting file %s (id=%d) vol=%s err=%d\n",
+ cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, hfsmp->vcbVN, error);
}
if (error == 0) {
if (dcp->c_entries > 0)
dcp->c_entries--;
dcp->c_dirchangecnt++;
+ {
+ struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)dcp->c_finderinfo + 16);
+ extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
+ }
dcp->c_ctime = tv.tv_sec;
dcp->c_mtime = tv.tv_sec;
(void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
}
hfs_systemfile_unlock(hfsmp, lockflags);
+
if (error) {
+ if (temp_rsrc_fork) {
+ FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
+ }
goto out;
}
(void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
}
- if (cp->c_rsrcfork && (cp->c_rsrcfork->ff_blocks > 0)) {
- savedbytes = ((off_t)cp->c_rsrcfork->ff_blocks * (off_t)blksize);
- (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
+ /*
+ * We may have just deleted the catalog record for a resource fork even
+ * though it did not exist in core as a vnode. However, just because there
+ * was a resource fork pointer in the cnode does not mean that it had any blocks.
+ */
+ if (temp_rsrc_fork || cp->c_rsrcfork) {
+ if (cp->c_rsrcfork) {
+ if (cp->c_rsrcfork->ff_blocks > 0) {
+ savedbytes = ((off_t)cp->c_rsrcfork->ff_blocks * (off_t)blksize);
+ (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
+ }
+ }
+ else {
+ /* we must have used a temporary fork */
+ savedbytes = ((off_t)temp_rsrc_fork->ff_blocks * (off_t)blksize);
+ (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
+ }
}
if (hfsmp->hfs_flags & HFS_QUOTAS) {
}
#endif
-
/*
* If we didn't get any errors deleting the catalog entry, then go ahead
* and release the backing store now. The filefork pointers are still valid.
- */
- error = hfs_release_storage (hfsmp, cp->c_datafork, cp->c_rsrcfork, fileid);
-
+ */
+ if (temp_rsrc_fork) {
+ error = hfs_release_storage (hfsmp, cp->c_datafork, temp_rsrc_fork, fileid);
+ }
+ else {
+ /* if cp->c_rsrcfork == NULL, hfs_release_storage will skip over it. */
+ error = hfs_release_storage (hfsmp, cp->c_datafork, cp->c_rsrcfork, fileid);
+ }
if (error) {
/*
* If we encountered an error updating the extents and bitmap,
/* reset update_vh to 0, since hfs_release_storage should have done it for us */
update_vh = 0;
}
-
+
+ /* Get rid of the temporary rsrc fork */
+ if (temp_rsrc_fork) {
+ FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
+ }
+
cp->c_flag |= C_NOEXISTS;
cp->c_flag &= ~C_DELETED;
}
+ //
+ // if skip_reserve == 1 then we're being called from hfs_vnop_rename() and thus
+ // we don't need to touch the document_id as it's handled by the rename code.
+ // otherwise it's a normal remove and we need to save the document id in the
+ // per thread struct and clear it from the cnode.
+ //
+ struct doc_tombstone *ut;
+ ut = get_uthread_doc_tombstone();
+ if (!error && !skip_reserve && (cp->c_bsdflags & UF_TRACKED) && should_save_docid_tombstone(ut, vp, cnp)) {
+
+ if (ut->t_lastop_document_id) {
+ clear_tombstone_docid(ut, hfsmp, NULL);
+ }
+ save_tombstone(hfsmp, dvp, vp, cnp, 1);
+
+ }
+
+
/*
* All done with this cnode's descriptor...
*
cdp->cd_flags &= ~CD_HASBUF;
}
+
/*
* Rename a cnode.
*
struct vnode *tdvp = ap->a_tdvp;
struct vnode *fvp = ap->a_fvp;
struct vnode *fdvp = ap->a_fdvp;
- struct vnode *fvp_rsrc = NULLVP;
- struct vnode *tvp_rsrc = NULLVP;
+ /*
+ * Note that we only need locals for the target/destination's
+ * resource fork vnode (and only if necessary). We don't care if the
+ * source has a resource fork vnode or not.
+ */
+ struct vnode *tvp_rsrc = NULLVP;
+ uint32_t tvp_rsrc_vid = 0;
struct componentname *tcnp = ap->a_tcnp;
struct componentname *fcnp = ap->a_fcnp;
struct proc *p = vfs_context_proc(ap->a_context);
int lockflags;
int error;
time_t orig_from_ctime, orig_to_ctime;
+ int emit_rename = 1;
+ int emit_delete = 1;
+ int is_tracked = 0;
+ int unlocked;
orig_from_ctime = VTOC(fvp)->c_ctime;
if (tvp && VTOC(tvp)) {
orig_to_ctime = ~0;
}
- check_for_tracked_file(fvp, orig_from_ctime, NAMESPACE_HANDLER_RENAME_OP, NULL);
-
- if (tvp && VTOC(tvp)) {
- check_for_tracked_file(tvp, orig_to_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
- }
-
+ hfsmp = VTOHFS(tdvp);
/*
- * Before grabbing the four locks, we may need to get an iocount on the resource fork
- * vnodes in question, just like hfs_vnop_remove. If fvp and tvp are not
- * directories, then go ahead and grab the resource fork vnodes now
- * one at a time. We don't actively need the fvp_rsrc to do the rename operation,
- * but we need the iocount to prevent the vnode from getting recycled/reclaimed
- * during the middle of the VNOP.
+ * Do special case checks here. If fvp == tvp then we need to check the
+ * cnode with locks held.
*/
-
-
- if ((vnode_isreg(fvp)) || (vnode_islnk(fvp))) {
-
- if ((error = hfs_lock (VTOC(fvp), HFS_EXCLUSIVE_LOCK))) {
- return (error);
- }
- /*
- * We care if we race against rename/delete with this cp, so we'll error out
- * if the file becomes open-unlinked during this call.
- */
- error = hfs_vgetrsrc(VTOHFS(fvp), fvp, &fvp_rsrc, TRUE, TRUE);
- hfs_unlock (VTOC(fvp));
- if (error) {
- if (fvp_rsrc) {
- vnode_put(fvp_rsrc);
- }
+ if (fvp == tvp) {
+ int is_hardlink = 0;
+ /*
+ * In this case, we do *NOT* ever emit a DELETE event.
+ * We may not necessarily emit a RENAME event
+ */
+ emit_delete = 0;
+ if ((error = hfs_lock(VTOC(fvp), HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) {
return error;
}
- }
+ /* Check to see if the item is a hardlink or not */
+ is_hardlink = (VTOC(fvp)->c_flag & C_HARDLINK);
+ hfs_unlock (VTOC(fvp));
- if (tvp && (vnode_isreg(tvp) || vnode_islnk(tvp))) {
/*
- * Lock failure is OK on tvp, since we may race with a remove on the dst.
- * But this shouldn't stop rename from proceeding, so only try to
- * grab the resource fork if the lock succeeded.
+ * If the item is not a hardlink, then case sensitivity must be off, otherwise
+ * two names should not resolve to the same cnode unless they were case variants.
*/
- if (hfs_lock (VTOC(tvp), HFS_EXCLUSIVE_LOCK) == 0) {
- tcp = VTOC(tvp);
- /*
- * We only care if we get an open-unlinked file on the dst so we
- * know to null out tvp/tcp to make the rename operation act
- * as if they never existed. Because they're effectively out of the
- * namespace already it's fine to do this. If this is true, then
- * make sure to unlock the cnode and drop the iocount only after the unlock.
+ if (is_hardlink) {
+ emit_rename = 0;
+ /*
+ * Hardlinks are a little trickier. We only want to emit a rename event
+ * if the item is a hardlink, the parent directories are the same, case sensitivity
+ * is off, and the case folded names are the same. See the fvp == tvp case below for more
+ * info.
*/
- error = hfs_vgetrsrc(VTOHFS(tvp), tvp, &tvp_rsrc, TRUE, TRUE);
- hfs_unlock (tcp);
- if (error) {
- /*
- * Since we specify TRUE for error_on_unlinked in hfs_vgetrsrc,
- * we can get a rsrc fork vnode even if it returns an error.
- */
- tcp = NULL;
- tvp = NULL;
- if (tvp_rsrc) {
- vnode_put (tvp_rsrc);
- tvp_rsrc = NULL;
+ if ((fdvp == tdvp) && ((hfsmp->hfs_flags & HFS_CASE_SENSITIVE) == 0)) {
+ if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
+ (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
+ /* Then in this case only it is ok to emit a rename */
+ emit_rename = 1;
}
- /* just bypass truncate lock and act as if we never got tcp/tvp */
- goto retry;
}
}
}
+ if (emit_rename) {
+ /* c_bsdflags should only be assessed while holding the cnode lock.
+ * This is not done consistently throughout the code and can result
+ * in race. This will be fixed via rdar://12181064
+ */
+ if (VTOC(fvp)->c_bsdflags & UF_TRACKED) {
+ is_tracked = 1;
+ }
+ check_for_tracked_file(fvp, orig_from_ctime, NAMESPACE_HANDLER_RENAME_OP, NULL);
+ }
+ if (tvp && VTOC(tvp)) {
+ if (emit_delete) {
+ check_for_tracked_file(tvp, orig_to_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
+ }
+ }
+
+retry:
/* When tvp exists, take the truncate lock for hfs_removefile(). */
if (tvp && (vnode_isreg(tvp) || vnode_islnk(tvp))) {
- hfs_lock_truncate(VTOC(tvp), HFS_EXCLUSIVE_LOCK);
+ hfs_lock_truncate(VTOC(tvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
took_trunc_lock = 1;
}
- retry:
+relock:
error = hfs_lockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL,
HFS_EXCLUSIVE_LOCK, &error_cnode);
if (error) {
if (took_trunc_lock) {
- hfs_unlock_truncate(VTOC(tvp), 0);
+ hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
took_trunc_lock = 0;
}
+
+ /*
+ * We hit an error path. If we were trying to re-acquire the locks
+ * after coming through here once, we might have already obtained
+ * an iocount on tvp's resource fork vnode. Drop that before dealing
+ * with the failure. Note this is safe -- since we are in an
+ * error handling path, we can't be holding the cnode locks.
+ */
+ if (tvp_rsrc) {
+ vnode_put (tvp_rsrc);
+ tvp_rsrc_vid = 0;
+ tvp_rsrc = NULL;
+ }
+
/*
* tvp might no longer exist. If the cause of the lock failure
* was tvp, then we can try again with tvp/tcp set to NULL.
tvp = NULL;
goto retry;
}
- /* otherwise, drop iocounts on the rsrc forks and bail out */
- if (fvp_rsrc) {
- vnode_put (fvp_rsrc);
- }
- if (tvp_rsrc) {
- vnode_put (tvp_rsrc);
+
+ if (emit_rename && is_tracked) {
+ resolve_nspace_item(fvp, NAMESPACE_HANDLER_RENAME_FAILED_OP | NAMESPACE_HANDLER_TRACK_EVENT);
}
+
return (error);
}
fcp = VTOC(fvp);
tdcp = VTOC(tdvp);
tcp = tvp ? VTOC(tvp) : NULL;
- hfsmp = VTOHFS(tdvp);
+
+ //
+ // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
+ //
+ unlocked = 0;
+ if ((fcp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16))->document_id == 0) {
+ uint32_t newid;
+
+ hfs_unlockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL);
+ unlocked = 1;
+
+ if (hfs_generate_document_id(hfsmp, &newid) == 0) {
+ hfs_lock(fcp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
+ ((struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16))->document_id = newid;
+#if CONFIG_FSE
+ add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
+ FSE_ARG_DEV, hfsmp->hfs_raw_dev,
+ FSE_ARG_INO, (ino64_t)0, // src inode #
+ FSE_ARG_INO, (ino64_t)fcp->c_fileid, // dst inode #
+ FSE_ARG_INT32, newid,
+ FSE_ARG_DONE);
+#endif
+ hfs_unlock(fcp);
+ } else {
+ // XXXdbg - couldn't get a new docid... what to do? can't really fail the rename...
+ }
+
+ //
+ // check if we're going to need to fix tcp as well. if we aren't, go back relock
+ // everything. otherwise continue on and fix up tcp as well before relocking.
+ //
+ if (tcp == NULL || !(tcp->c_bsdflags & UF_TRACKED) || ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id != 0) {
+ goto relock;
+ }
+ }
+
+ //
+ // same thing for tcp if it's set
+ //
+ if (tcp && (tcp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id == 0) {
+ uint32_t newid;
+
+ if (!unlocked) {
+ hfs_unlockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL);
+ unlocked = 1;
+ }
+
+ if (hfs_generate_document_id(hfsmp, &newid) == 0) {
+ hfs_lock(tcp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
+ ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id = newid;
+#if CONFIG_FSE
+ add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
+ FSE_ARG_DEV, hfsmp->hfs_raw_dev,
+ FSE_ARG_INO, (ino64_t)0, // src inode #
+ FSE_ARG_INO, (ino64_t)tcp->c_fileid, // dst inode #
+ FSE_ARG_INT32, newid,
+ FSE_ARG_DONE);
+#endif
+ hfs_unlock(tcp);
+ } else {
+ // XXXdbg - couldn't get a new docid... what to do? can't really fail the rename...
+ }
+
+ // go back up and relock everything. next time through the if statement won't be true
+ // and we'll skip over this block of code.
+ goto relock;
+ }
+
+
+
+ /*
+ * Acquire iocounts on the destination's resource fork vnode
+ * if necessary. If dst/src are files and the dst has a resource
+ * fork vnode, then we need to try and acquire an iocount on the rsrc vnode.
+ * If it does not exist, then we don't care and can skip it.
+ */
+ if ((vnode_isreg(fvp)) || (vnode_islnk(fvp))) {
+ if ((tvp) && (tcp->c_rsrc_vp) && (tvp_rsrc == NULL)) {
+ tvp_rsrc = tcp->c_rsrc_vp;
+ /*
+ * We can look at the vid here because we're holding the
+ * cnode lock on the underlying cnode for this rsrc vnode.
+ */
+ tvp_rsrc_vid = vnode_vid (tvp_rsrc);
+
+ /* Unlock everything to acquire iocount on this rsrc vnode */
+ if (took_trunc_lock) {
+ hfs_unlock_truncate (VTOC(tvp), HFS_LOCK_DEFAULT);
+ took_trunc_lock = 0;
+ }
+ hfs_unlockfour(fdcp, fcp, tdcp, tcp);
+
+ if (vnode_getwithvid (tvp_rsrc, tvp_rsrc_vid)) {
+ /* iocount acquisition failed. Reset fields and start over.. */
+ tvp_rsrc_vid = 0;
+ tvp_rsrc = NULL;
+ }
+ goto retry;
+ }
+ }
+
+
/* Ensure we didn't race src or dst parent directories with rmdir. */
if (fdcp->c_flag & (C_NOEXISTS | C_DELETED)) {
// never existed in the first place.
//
if (took_trunc_lock) {
- hfs_unlock_truncate(VTOC(tvp), 0);
+ hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
took_trunc_lock = 0;
}
error = 0;
/*
* Make sure "from" vnode and its parent are changeable.
*/
- if ((fcp->c_flags & (IMMUTABLE | APPEND)) || (fdcp->c_flags & APPEND)) {
+ if ((fcp->c_bsdflags & (IMMUTABLE | APPEND)) || (fdcp->c_bsdflags & APPEND)) {
error = EPERM;
goto out;
}
goto out;
}
+ /* Don't allow modification of the journal or journal_info_block */
+ if (hfs_is_journal_file(hfsmp, fcp) ||
+ (tcp && hfs_is_journal_file(hfsmp, tcp))) {
+ error = EPERM;
+ goto out;
+ }
+
#if QUOTA
if (tvp)
(void)hfs_getinoquota(tcp);
lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
- if (cat_lookup(hfsmp, &tmpdesc, 0, NULL, NULL, NULL, &real_cnid) != 0) {
+ if (cat_lookup(hfsmp, &tmpdesc, 0, 0, NULL, NULL, NULL, &real_cnid) != 0) {
hfs_systemfile_unlock(hfsmp, lockflags);
goto out;
}
* capable of clearing out unused blocks for an open-unlinked file or dir.
*/
if (tvp) {
+ //
+ // if the destination has a document id, we need to preserve it
+ //
+ if (fvp != tvp) {
+ uint32_t document_id;
+ struct FndrExtendedDirInfo *ffip = (struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16);
+ struct FndrExtendedDirInfo *tfip = (struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16);
+
+ if (ffip->document_id && tfip->document_id) {
+ // both documents are tracked. only save a tombstone from tcp and do nothing else.
+ save_tombstone(hfsmp, tdvp, tvp, tcnp, 0);
+ } else {
+ struct doc_tombstone *ut;
+ ut = get_uthread_doc_tombstone();
+
+ document_id = tfip->document_id;
+ tfip->document_id = 0;
+
+ if (document_id != 0) {
+ // clear UF_TRACKED as well since tcp is now no longer tracked
+ tcp->c_bsdflags &= ~UF_TRACKED;
+ (void) cat_update(hfsmp, &tcp->c_desc, &tcp->c_attr, NULL, NULL);
+ }
+
+ if (ffip->document_id == 0 && document_id != 0) {
+ // printf("RENAME: preserving doc-id %d onto %s (from ino %d, to ino %d)\n", document_id, tcp->c_desc.cd_nameptr, tcp->c_desc.cd_cnid, fcp->c_desc.cd_cnid);
+ fcp->c_bsdflags |= UF_TRACKED;
+ ffip->document_id = document_id;
+
+ (void) cat_update(hfsmp, &fcp->c_desc, &fcp->c_attr, NULL, NULL);
+#if CONFIG_FSE
+ add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
+ FSE_ARG_DEV, hfsmp->hfs_raw_dev,
+ FSE_ARG_INO, (ino64_t)tcp->c_fileid, // src inode #
+ FSE_ARG_INO, (ino64_t)fcp->c_fileid, // dst inode #
+ FSE_ARG_INT32, (uint32_t)ffip->document_id,
+ FSE_ARG_DONE);
+#endif
+ } else if ((fcp->c_bsdflags & UF_TRACKED) && should_save_docid_tombstone(ut, fvp, fcnp)) {
+
+ if (ut->t_lastop_document_id) {
+ clear_tombstone_docid(ut, hfsmp, NULL);
+ }
+ save_tombstone(hfsmp, fdvp, fvp, fcnp, 0);
+
+ //printf("RENAME: (dest-exists): saving tombstone doc-id %lld @ %s (ino %d)\n",
+ // ut->t_lastop_document_id, ut->t_lastop_filename, fcp->c_desc.cd_cnid);
+ }
+ }
+ }
+
/*
* When fvp matches tvp they could be case variants
* or matching hard links.
*/
if (fvp == tvp) {
if (!(fcp->c_flag & C_HARDLINK)) {
+ /*
+ * If they're not hardlinks, then fvp == tvp must mean we
+ * are using case-insensitive HFS because case-sensitive would
+ * not use the same vnode for both. In this case we just update
+ * the catalog for: a -> A
+ */
goto skip_rm; /* simple case variant */
- } else if ((fdvp != tdvp) ||
+ }
+ /* For all cases below, we must be using hardlinks */
+ else if ((fdvp != tdvp) ||
(hfsmp->hfs_flags & HFS_CASE_SENSITIVE)) {
+ /*
+ * If the parent directories are not the same, AND the two items
+ * are hardlinks, posix says to do nothing:
+ * dir1/fred <-> dir2/bob and the op was mv dir1/fred -> dir2/bob
+ * We just return 0 in this case.
+ *
+ * If case sensitivity is on, and we are using hardlinks
+ * then renaming is supposed to do nothing.
+ * dir1/fred <-> dir2/FRED, and op == mv dir1/fred -> dir2/FRED
+ */
goto out; /* matching hardlinks, nothing to do */
} else if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
(const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
+ /*
+ * If we get here, then the following must be true:
+ * a) We are running case-insensitive HFS+.
+ * b) Both paths 'fvp' and 'tvp' are in the same parent directory.
+ * c) the two names are case-variants of each other.
+ *
+ * In this case, we are really only dealing with a single catalog record
+ * whose name is being updated.
+ *
+ * op is dir1/fred -> dir1/FRED
+ *
+ * We need to special case the name matching, because if
+ * dir1/fred <-> dir1/bob were the two links, and the
+ * op was dir1/fred -> dir1/bob
+ * That would fail/do nothing.
+ */
goto skip_rm; /* case-variant hardlink in the same dir */
} else {
goto out; /* matching hardlink, nothing to do */
error = hfs_removedir(tdvp, tvp, tcnp, HFSRM_SKIP_RESERVE, 1);
}
else {
- error = hfs_removefile(tdvp, tvp, tcnp, 0, HFSRM_SKIP_RESERVE, 0, tvp_rsrc, 1);
+ error = hfs_removefile(tdvp, tvp, tcnp, 0, HFSRM_SKIP_RESERVE, 0, NULL, 1);
/*
* If the destination file had a resource fork vnode, then we need to get rid of
* as quickly as possible.
*/
vnode_recycle(tvp);
+ } else {
+ struct doc_tombstone *ut;
+ ut = get_uthread_doc_tombstone();
+
+ //
+ // There is nothing at the destination. If the file being renamed is
+ // tracked, save a "tombstone" of the document_id. If the file is
+ // not a tracked file, then see if it needs to inherit a tombstone.
+ //
+ // NOTE: we do not save a tombstone if the file being renamed begins
+ // with "atmp" which is done to work-around AutoCad's bizarre
+ // 5-step un-safe save behavior
+ //
+ if (fcp->c_bsdflags & UF_TRACKED) {
+ if (should_save_docid_tombstone(ut, fvp, fcnp)) {
+ save_tombstone(hfsmp, fdvp, fvp, fcnp, 0);
+
+ //printf("RENAME: (no dest): saving tombstone doc-id %lld @ %s (ino %d)\n",
+ // ut->t_lastop_document_id, ut->t_lastop_filename, fcp->c_desc.cd_cnid);
+ } else {
+ // intentionally do nothing
+ }
+ } else if ( ut->t_lastop_document_id != 0
+ && tdvp == ut->t_lastop_parent
+ && vnode_vid(tdvp) == ut->t_lastop_parent_vid
+ && strcmp((char *)ut->t_lastop_filename, (char *)tcnp->cn_nameptr) == 0) {
+
+ //printf("RENAME: %s (ino %d) inheriting doc-id %lld\n", tcnp->cn_nameptr, fcp->c_desc.cd_cnid, ut->t_lastop_document_id);
+ struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16);
+ fcp->c_bsdflags |= UF_TRACKED;
+ fip->document_id = ut->t_lastop_document_id;
+ cat_update(hfsmp, &fcp->c_desc, &fcp->c_attr, NULL, NULL);
+
+ clear_tombstone_docid(ut, hfsmp, fcp); // will send the docid-changed fsevent
+
+ } else if (ut->t_lastop_document_id && should_save_docid_tombstone(ut, fvp, fcnp) && should_save_docid_tombstone(ut, tvp, tcnp)) {
+ // no match, clear the tombstone
+ //printf("RENAME: clearing the tombstone %lld @ %s\n", ut->t_lastop_document_id, ut->t_lastop_filename);
+ clear_tombstone_docid(ut, hfsmp, NULL);
+ }
+
}
skip_rm:
/*
replace_desc(fcp, &out_desc);
fcp->c_parentcnid = tdcp->c_fileid;
fcp->c_hint = 0;
-
+
/* Now indicate this cnode needs to have date-added written to the finderinfo */
fcp->c_flag |= C_NEEDS_DATEADDED;
(void) hfs_update (fvp, 0);
}
tdcp->c_entries++;
tdcp->c_dirchangecnt++;
+ {
+ struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)tdcp->c_finderinfo + 16);
+ extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
+ }
if (fdcp->c_entries > 0)
fdcp->c_entries--;
fdcp->c_dirchangecnt++;
fdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out!
(void) hfs_update(fdvp, 0);
}
+ {
+ struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)fdcp->c_finderinfo + 16);
+ extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
+ }
+
tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
tdcp->c_touch_chgtime = TRUE;
tdcp->c_touch_modtime = TRUE;
tdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out!
(void) hfs_update(tdvp, 0);
+
+ /* Update the vnode's name now that the rename has completed. */
+ vnode_update_identity(fvp, tdvp, tcnp->cn_nameptr, tcnp->cn_namelen,
+ tcnp->cn_hash, (VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME));
+
+ /*
+ * At this point, we may have a resource fork vnode attached to the
+ * 'from' vnode. If it exists, we will want to update its name, because
+ * it contains the old name + _PATH_RSRCFORKSPEC. ("/..namedfork/rsrc").
+ *
+ * Note that the only thing we need to update here is the name attached to
+ * the vnode, since a resource fork vnode does not have a separate resource
+ * cnode -- it's still 'fcp'.
+ */
+ if (fcp->c_rsrc_vp) {
+ char* rsrc_path = NULL;
+ int len;
+
+ /* Create a new temporary buffer that's going to hold the new name */
+ MALLOC_ZONE (rsrc_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
+ len = snprintf (rsrc_path, MAXPATHLEN, "%s%s", tcnp->cn_nameptr, _PATH_RSRCFORKSPEC);
+ len = MIN(len, MAXPATHLEN);
+
+ /*
+ * vnode_update_identity will do the following for us:
+ * 1) release reference on the existing rsrc vnode's name.
+ * 2) copy/insert new name into the name cache
+ * 3) attach the new name to the resource vnode
+ * 4) update the vnode's vid
+ */
+ vnode_update_identity (fcp->c_rsrc_vp, fvp, rsrc_path, len, 0, (VNODE_UPDATE_NAME | VNODE_UPDATE_CACHE));
+
+ /* Free the memory associated with the resource fork's name */
+ FREE_ZONE (rsrc_path, MAXPATHLEN, M_NAMEI);
+ }
out:
if (got_cookie) {
cat_postflight(hfsmp, &cookie, p);
}
if (took_trunc_lock) {
- hfs_unlock_truncate(VTOC(tvp), 0);
+ hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
}
hfs_unlockfour(fdcp, fcp, tdcp, tcp);
/* Now vnode_put the resource forks vnodes if necessary */
if (tvp_rsrc) {
vnode_put(tvp_rsrc);
- }
- if (fvp_rsrc) {
- vnode_put(fvp_rsrc);
+ tvp_rsrc = NULL;
}
/* After tvp is removed the only acceptable error is EIO */
if (error && tvp_deleted)
error = EIO;
+ if (emit_rename && is_tracked) {
+ if (error) {
+ resolve_nspace_item(fvp, NAMESPACE_HANDLER_RENAME_FAILED_OP | NAMESPACE_HANDLER_TRACK_EVENT);
+ } else {
+ resolve_nspace_item(fvp, NAMESPACE_HANDLER_RENAME_SUCCESS_OP | NAMESPACE_HANDLER_TRACK_EVENT);
+ }
+ }
+
return (error);
}
goto out;
}
vp = *vpp;
- if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) {
+ if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
goto out;
}
cp = VTOC(vp);
/* hfs_removefile() requires holding the truncate lock */
hfs_unlock(cp);
- hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK);
- hfs_lock(cp, HFS_FORCE_LOCK);
+ hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
+ hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
if (hfs_start_transaction(hfsmp) != 0) {
started_tr = 0;
- hfs_unlock_truncate(cp, TRUE);
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
goto out;
}
(void) hfs_removefile(dvp, vp, ap->a_cnp, 0, 0, 0, NULL, 0);
- hfs_unlock_truncate(cp, 0);
+ hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
goto out;
}
*
* In fact, the offset used by HFS is essentially an index (26 bits)
* with a tag (6 bits). The tag is for associating the next request
- * with the current request. This enables us to have multiple threads
+ * with the current request. This enables us to have multiple threads
* reading the directory while the directory is also being modified.
*
* Each tag/index pair is tied to a unique directory hint. The hint
if (uio_iovcnt(uio) > 1)
return (EINVAL);
- if (VTOC(vp)->c_flags & UF_COMPRESSED) {
+ if (VTOC(vp)->c_bsdflags & UF_COMPRESSED) {
int compressed = hfs_file_is_compressed(VTOC(vp), 0); /* 0 == take the cnode lock */
if (VTOCMP(vp) != NULL && !compressed) {
error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
hfsmp = VTOHFS(vp);
/* Note that the dirhint calls require an exclusive lock. */
- if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
+ if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
return (error);
/* Pick up cnid hint (if any). */
}
/* Pack the buffer with dirent entries. */
- error = cat_getdirentries(hfsmp, cp->c_entries, dirhint, uio, extended, &items, &eofflag);
+ error = cat_getdirentries(hfsmp, cp->c_entries, dirhint, uio, ap->a_flags, &items, &eofflag);
if (index == 0 && error == 0) {
cp->c_dirthreadhint = dirhint->dh_threadhint;
if (!vnode_islnk(vp))
return (EINVAL);
- if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
+ if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
return (error);
cp = VTOC(vp);
fp = VTOF(vp);
vfs_context_t a_context;
} */ *ap;
{
+
+ int std_hfs = (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD);
switch (ap->a_name) {
case _PC_LINK_MAX:
- if (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD)
- *ap->a_retval = 1;
- else
+ if (std_hfs == 0){
*ap->a_retval = HFS_LINK_MAX;
+ }
+#if CONFIG_HFS_STD
+ else {
+ *ap->a_retval = 1;
+ }
+#endif
break;
case _PC_NAME_MAX:
- if (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD)
- *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
- else
+ if (std_hfs == 0) {
*ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */
+ }
+#if CONFIG_HFS_STD
+ else {
+ *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
+ }
+#endif
break;
case _PC_PATH_MAX:
*ap->a_retval = PATH_MAX; /* 1024 */
*ap->a_retval = 200112; /* _POSIX_NO_TRUNC */
break;
case _PC_NAME_CHARS_MAX:
- if (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD)
- *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
- else
+ if (std_hfs == 0) {
*ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */
+ }
+#if CONFIG_HFS_STD
+ else {
+ *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
+ }
+#endif
break;
case _PC_CASE_SENSITIVE:
if (VTOHFS(ap->a_vp)->hfs_flags & HFS_CASE_SENSITIVE)
*ap->a_retval = 1;
break;
case _PC_FILESIZEBITS:
- if (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD)
+ /* number of bits to store max file size */
+ if (std_hfs == 0) {
+ *ap->a_retval = 64;
+ }
+#if CONFIG_HFS_STD
+ else {
*ap->a_retval = 32;
- else
- *ap->a_retval = 64; /* number of bits to store max file size */
+ }
+#endif
break;
case _PC_XATTR_SIZE_BITS:
/* Number of bits to store maximum extended attribute size */
struct hfsmount *hfsmp;
int lockflags;
int error;
+ uint32_t tstate = 0;
p = current_proc();
hfsmp = VTOHFS(vp);
cp->c_touch_modtime = 0;
return (0);
}
-
+ if (kdebug_enable) {
+ if (cp->c_touch_acctime)
+ tstate |= DBG_HFS_UPDATE_ACCTIME;
+ if (cp->c_touch_modtime)
+ tstate |= DBG_HFS_UPDATE_MODTIME;
+ if (cp->c_touch_chgtime)
+ tstate |= DBG_HFS_UPDATE_CHGTIME;
+
+ if (cp->c_flag & C_MODIFIED)
+ tstate |= DBG_HFS_UPDATE_MODIFIED;
+ if (cp->c_flag & C_FORCEUPDATE)
+ tstate |= DBG_HFS_UPDATE_FORCE;
+ if (cp->c_flag & C_NEEDS_DATEADDED)
+ tstate |= DBG_HFS_UPDATE_DATEADDED;
+ }
hfs_touchtimes(hfsmp, cp);
/* Nothing to update. */
return (0);
}
- if ((error = hfs_start_transaction(hfsmp)) != 0) {
- return error;
- }
-
- /*
- * Modify the values passed to cat_update based on whether or not
- * the file has invalid ranges or borrowed blocks.
- */
- if (dataforkp) {
- off_t numbytes = 0;
-
- /* copy the datafork into a temporary copy so we don't pollute the cnode's */
- bcopy(dataforkp, &datafork, sizeof(datafork));
- dataforkp = &datafork;
+ KERNEL_DEBUG_CONSTANT(0x3018000 | DBG_FUNC_START, vp, tstate, 0, 0, 0);
- /*
- * If there are borrowed blocks, ensure that they are subtracted
- * from the total block count before writing the cnode entry to disk.
- * Only extents that have actually been marked allocated in the bitmap
- * should be reflected in the total block count for this fork.
- */
- if (cp->c_datafork->ff_unallocblocks != 0) {
- // make sure that we don't assign a negative block count
- if (cp->c_datafork->ff_blocks < cp->c_datafork->ff_unallocblocks) {
- panic("hfs: ff_blocks %d is less than unalloc blocks %d\n",
- cp->c_datafork->ff_blocks, cp->c_datafork->ff_unallocblocks);
- }
+ if ((error = hfs_start_transaction(hfsmp)) != 0) {
- /* Also cap the LEOF to the total number of bytes that are allocated. */
- datafork.cf_blocks = (cp->c_datafork->ff_blocks - cp->c_datafork->ff_unallocblocks);
- datafork.cf_size = datafork.cf_blocks * HFSTOVCB(hfsmp)->blockSize;
- }
-
- /*
- * For files with invalid ranges (holes) the on-disk
- * field representing the size of the file (cf_size)
- * must be no larger than the start of the first hole.
- * However, note that if the first invalid range exists
- * solely within borrowed blocks, then our LEOF and block
- * count should both be zero. As a result, set it to the
- * min of the current cf_size and the start of the first
- * invalid range, because it may have already been reduced
- * to zero by the borrowed blocks check above.
- */
- if (!TAILQ_EMPTY(&cp->c_datafork->ff_invalidranges)) {
- numbytes = TAILQ_FIRST(&cp->c_datafork->ff_invalidranges)->rl_start;
- datafork.cf_size = MIN((numbytes), (datafork.cf_size));
- }
+ KERNEL_DEBUG_CONSTANT(0x3018000 | DBG_FUNC_END, vp, tstate, error, -1, 0);
+ return error;
}
+ /*
+ * Modify the values passed to cat_update based on whether or not
+ * the file has invalid ranges or borrowed blocks.
+ */
+ if (dataforkp) {
+ off_t numbytes = 0;
+
+ /* copy the datafork into a temporary copy so we don't pollute the cnode's */
+ bcopy(dataforkp, &datafork, sizeof(datafork));
+ dataforkp = &datafork;
+
+ /*
+ * If there are borrowed blocks, ensure that they are subtracted
+ * from the total block count before writing the cnode entry to disk.
+ * Only extents that have actually been marked allocated in the bitmap
+ * should be reflected in the total block count for this fork.
+ */
+ if (cp->c_datafork->ff_unallocblocks != 0) {
+ // make sure that we don't assign a negative block count
+ if (cp->c_datafork->ff_blocks < cp->c_datafork->ff_unallocblocks) {
+ panic("hfs: ff_blocks %d is less than unalloc blocks %d\n",
+ cp->c_datafork->ff_blocks, cp->c_datafork->ff_unallocblocks);
+ }
+
+ /* Also cap the LEOF to the total number of bytes that are allocated. */
+ datafork.cf_blocks = (cp->c_datafork->ff_blocks - cp->c_datafork->ff_unallocblocks);
+ datafork.cf_size = datafork.cf_blocks * HFSTOVCB(hfsmp)->blockSize;
+ }
+
+ /*
+ * For files with invalid ranges (holes) the on-disk
+ * field representing the size of the file (cf_size)
+ * must be no larger than the start of the first hole.
+ * However, note that if the first invalid range exists
+ * solely within borrowed blocks, then our LEOF and block
+ * count should both be zero. As a result, set it to the
+ * min of the current cf_size and the start of the first
+ * invalid range, because it may have already been reduced
+ * to zero by the borrowed blocks check above.
+ */
+ if (!TAILQ_EMPTY(&cp->c_datafork->ff_invalidranges)) {
+ numbytes = TAILQ_FIRST(&cp->c_datafork->ff_invalidranges)->rl_start;
+ datafork.cf_size = MIN((numbytes), (datafork.cf_size));
+ }
+ }
+
/*
* For resource forks with delayed allocations, make sure
* the block count and file size match the number of blocks
rsrcfork.cf_size = rsrcfork.cf_blocks * HFSTOVCB(hfsmp)->blockSize;
rsrcforkp = &rsrcfork;
}
+ if (kdebug_enable) {
+ long dbg_parms[NUMPARMS];
+ int dbg_namelen;
+
+ dbg_namelen = NUMPARMS * sizeof(long);
+ vn_getpath(vp, (char *)dbg_parms, &dbg_namelen);
+
+ if (dbg_namelen < (int)sizeof(dbg_parms))
+ memset((char *)dbg_parms + dbg_namelen, 0, sizeof(dbg_parms) - dbg_namelen);
+
+ kdebug_lookup_gen_events(dbg_parms, dbg_namelen, (void *)vp, TRUE);
+ }
/*
* Lock the Catalog b-tree file.
hfs_end_transaction(hfsmp);
+ KERNEL_DEBUG_CONSTANT(0x3018000 | DBG_FUNC_END, vp, tstate, error, 0, 0);
+
return (error);
}
enum vtype vnodetype;
int mode;
int newvnode_flags = 0;
- int nocache = 0;
u_int32_t gnv_flags = 0;
+ int protectable_target = 0;
+ int nocache = 0;
- if ((error = hfs_lock(VTOC(dvp), HFS_EXCLUSIVE_LOCK)))
+#if CONFIG_PROTECT
+ struct cprotect *entry = NULL;
+ int32_t cp_class = -1;
+ if (VATTR_IS_ACTIVE(vap, va_dataprotect_class)) {
+ cp_class = (int32_t)vap->va_dataprotect_class;
+ }
+ int protected_mount = 0;
+#endif
+
+
+ if ((error = hfs_lock(VTOC(dvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
return (error);
/* set the cnode pointer only after successfully acquiring lock */
}
dcp->c_flag |= C_DIR_MODIFICATION;
-
+
hfsmp = VTOHFS(dvp);
+
*vpp = NULL;
tvp = NULL;
out_desc.cd_flags = 0;
vnodetype = VREG;
mode = MAKEIMODE(vnodetype, vap->va_mode);
-#if CONFIG_PROTECT
- /* If we're creating a regular file on a CP filesystem, then delay caching */
- if ((vnodetype == VREG ) && (cp_fs_protected (VTOVFS(dvp)))) {
- nocache = 1;
+ if (S_ISDIR (mode) || S_ISREG (mode)) {
+ protectable_target = 1;
}
-#endif
+
/* Check if were out of usable disk space. */
if ((hfs_freeblks(hfsmp, 1) == 0) && (vfs_context_suser(ctx) != 0)) {
error = ENOSPC;
} else {
attr.ca_itime = tv.tv_sec;
}
+#if CONFIG_HFS_STD
if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
attr.ca_itime += 3600; /* Same as what hfs_update does */
}
+#endif
attr.ca_atime = attr.ca_ctime = attr.ca_mtime = attr.ca_itime;
attr.ca_atimeondisk = attr.ca_atime;
if (VATTR_IS_ACTIVE(vap, va_flags)) {
VATTR_SET_SUPPORTED(vap, va_flags);
attr.ca_flags = vap->va_flags;
}
-
+
/*
* HFS+ only: all files get ThreadExists
* HFSX only: dirs get HasFolderCount
}
}
- /* Add the date added to the item */
+#if CONFIG_PROTECT
+ if (cp_fs_protected(hfsmp->hfs_mp)) {
+ protected_mount = 1;
+ }
+ /*
+ * On a content-protected HFS+/HFSX filesystem, files and directories
+ * cannot be created without atomically setting/creating the EA that
+ * contains the protection class metadata and keys at the same time, in
+ * the same transaction. As a result, pre-set the "EAs exist" flag
+ * on the cat_attr for protectable catalog record creations. This will
+ * cause the cnode creation routine in hfs_getnewvnode to mark the cnode
+ * as having EAs.
+ */
+ if ((protected_mount) && (protectable_target)) {
+ attr.ca_recflags |= kHFSHasAttributesMask;
+ /* delay entering in the namecache */
+ nocache = 1;
+ }
+#endif
+
+
+ /*
+ * Add the date added to the item. See above, as
+ * all of the dates are set to the itime.
+ */
hfs_write_dateadded (&attr, attr.ca_atime);
+ /* Initialize the gen counter to 1 */
+ hfs_write_gencount(&attr, (uint32_t)1);
+
attr.ca_uid = vap->va_uid;
attr.ca_gid = vap->va_gid;
VATTR_SET_SUPPORTED(vap, va_mode);
in_desc.cd_hint = dcp->c_childhint;
in_desc.cd_encoding = 0;
+#if CONFIG_PROTECT
+ /*
+ * To preserve file creation atomicity with regards to the content protection EA,
+ * we must create the file in the catalog and then write out its EA in the same
+ * transaction.
+ *
+ * We only denote the target class in this EA; key generation is not completed
+ * until the file has been inserted into the catalog and will be done
+ * in a separate transaction.
+ */
+ if ((protected_mount) && (protectable_target)) {
+ error = cp_setup_newentry(hfsmp, dcp, cp_class, attr.ca_mode, &entry);
+ if (error) {
+ goto exit;
+ }
+ }
+#endif
+
if ((error = hfs_start_transaction(hfsmp)) != 0) {
goto exit;
}
// to check that any fileID it wants to use does not have orphaned
// attributes in it.
lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
+ cnid_t new_id;
/* Reserve some space in the Catalog file. */
if ((error = cat_preflight(hfsmp, CAT_CREATE, NULL, 0))) {
hfs_systemfile_unlock(hfsmp, lockflags);
goto exit;
}
- error = cat_create(hfsmp, &in_desc, &attr, &out_desc);
+
+ if ((error = cat_acquire_cnid(hfsmp, &new_id))) {
+ hfs_systemfile_unlock (hfsmp, lockflags);
+ goto exit;
+ }
+
+ error = cat_create(hfsmp, new_id, &in_desc, &attr, &out_desc);
if (error == 0) {
/* Update the parent directory */
dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
dcp->c_entries++;
+ {
+ struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)dcp->c_finderinfo + 16);
+ extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
+ }
if (vnodetype == VDIR) {
INC_FOLDERCOUNT(hfsmp, dcp->c_attr);
}
dcp->c_dirchangecnt++;
+ {
+ struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)dcp->c_finderinfo + 16);
+ extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
+ }
dcp->c_ctime = tv.tv_sec;
dcp->c_mtime = tv.tv_sec;
(void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
+
+#if CONFIG_PROTECT
+ /*
+ * If we are creating a content protected file, now is when
+ * we create the EA. We must create it in the same transaction
+ * that creates the file. We can also guarantee that the file
+ * MUST exist because we are still holding the catalog lock
+ * at this point.
+ */
+ if ((attr.ca_fileid != 0) && (protected_mount) && (protectable_target)) {
+ error = cp_setxattr (NULL, entry, hfsmp, attr.ca_fileid, XATTR_CREATE);
+
+ if (error) {
+ int delete_err;
+ /*
+ * If we fail the EA creation, then we need to delete the file.
+ * Luckily, we are still holding all of the right locks.
+ */
+ delete_err = cat_delete (hfsmp, &out_desc, &attr);
+ if (delete_err == 0) {
+ /* Update the parent directory */
+ if (dcp->c_entries > 0)
+ dcp->c_entries--;
+ dcp->c_dirchangecnt++;
+ dcp->c_ctime = tv.tv_sec;
+ dcp->c_mtime = tv.tv_sec;
+ (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
+ }
+
+ /* Emit EINVAL if we fail to create EA*/
+ error = EINVAL;
+ }
+ }
+#endif
}
hfs_systemfile_unlock(hfsmp, lockflags);
if (error)
started_tr = 0;
}
+#if CONFIG_PROTECT
+ /*
+ * At this point, we must have encountered success with writing the EA.
+ * Destroy our temporary cprotect (which had no keys).
+ */
+
+ if ((attr.ca_fileid != 0) && (protected_mount) && (protectable_target)) {
+ cp_entry_destroy (entry);
+ entry = NULL;
+ }
+#endif
+
/* Do not create vnode for whiteouts */
if (S_ISWHT(mode)) {
goto exit;
goto exit;
cp = VTOC(tvp);
+
+ struct doc_tombstone *ut;
+ ut = get_uthread_doc_tombstone();
+ if ( ut->t_lastop_document_id != 0
+ && ut->t_lastop_parent == dvp
+ && ut->t_lastop_parent_vid == vnode_vid(dvp)
+ && strcmp((char *)ut->t_lastop_filename, (char *)cp->c_desc.cd_nameptr) == 0) {
+ struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
+
+ //printf("CREATE: preserving doc-id %lld on %s\n", ut->t_lastop_document_id, ut->t_lastop_filename);
+ fip->document_id = (uint32_t)(ut->t_lastop_document_id & 0xffffffff);
+
+ cp->c_bsdflags |= UF_TRACKED;
+ // mark the cnode dirty
+ cp->c_flag |= C_MODIFIED | C_FORCEUPDATE;
+
+ if ((error = hfs_start_transaction(hfsmp)) == 0) {
+ lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
+
+ (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
+
+ hfs_systemfile_unlock (hfsmp, lockflags);
+ (void) hfs_end_transaction(hfsmp);
+ }
+
+ clear_tombstone_docid(ut, hfsmp, cp); // will send the docid-changed fsevent
+ } else if (ut->t_lastop_document_id != 0) {
+ int len = cnp->cn_namelen;
+ if (len == 0) {
+ len = strlen(cnp->cn_nameptr);
+ }
+
+ if (is_ignorable_temp_name(cnp->cn_nameptr, cnp->cn_namelen)) {
+ // printf("CREATE: not clearing tombstone because %s is a temp name.\n", cnp->cn_nameptr);
+ } else {
+ // Clear the tombstone because the thread is not recreating the same path
+ // printf("CREATE: clearing tombstone because %s is NOT a temp name.\n", cnp->cn_nameptr);
+ clear_tombstone_docid(ut, hfsmp, NULL);
+ }
+ }
+
*vpp = tvp;
#if CONFIG_PROTECT
- error = cp_entry_create_keys(cp);
/*
- * If we fail to create keys, then do NOT allow this vnode to percolate out into the
- * namespace. Delete it and return the errno that cp_entry_create_keys generated.
- * Luckily, we can do this without issues because the entry was newly created
- * and we're still holding the directory cnode lock. Because we prevented it from
- * getting inserted into the namecache upon vnode creation, all accesss to this file
- * would have to go through the directory, whose lock we are still holding.
+ * Now that we have a vnode-in-hand, generate keys for this namespace item.
+ * If we fail to create the keys, then attempt to delete the item from the
+ * namespace. If we can't delete the item, that's not desirable but also not fatal..
+ * All of the places which deal with restoring/unwrapping keys must also be
+ * prepared to encounter an entry that does not have keys.
*/
- if (error) {
- /*
- * If we fail to remove/recycle the item here, we can't do much about it. Log
- * a message to the console and then we can backtrack it. The ultimate error
- * that will get emitted to userland will be from the failure to create the EA blob.
- */
- int err = hfs_removefile (dvp, tvp, cnp, 0, 0, 0, NULL, 0);
- if (err) {
- printf("hfs_makenode: removefile failed (%d) for CP file %p\n", err, tvp);
- }
- hfs_unlock (cp);
- err = vnode_recycle (tvp);
- if (err) {
- printf("hfs_makenode: vnode_recycle failed (%d) for CP file %p\n", err, tvp);
- }
- /* Drop the iocount on the new vnode to force reclamation/recycling */
- vnode_put (tvp);
- cp = NULL;
- *vpp = NULL;
- }
- else {
- /* insert item into name cache if it wasn't already inserted.*/
- if (nocache) {
- cache_enter (dvp, tvp, cnp);
+ if ((protectable_target) && (protected_mount)) {
+ struct cprotect *keyed_entry = NULL;
+
+ if (cp->c_cpentry == NULL) {
+ panic ("hfs_makenode: no cpentry for cnode (%p)", cp);
}
- }
+ error = cp_generate_keys (hfsmp, cp, cp->c_cpentry->cp_pclass, &keyed_entry);
+ if (error == 0) {
+ /*
+ * Upon success, the keys were generated and written out.
+ * Update the cp pointer in the cnode.
+ */
+ cp_replace_entry (cp, keyed_entry);
+ if (nocache) {
+ cache_enter (dvp, tvp, cnp);
+ }
+ }
+ else {
+ /* If key creation OR the setxattr failed, emit EPERM to userland */
+ error = EPERM;
+
+ /*
+ * Beware! This slightly violates the lock ordering for the
+ * cnode/vnode 'tvp'. Ordinarily, you must acquire the truncate lock
+ * which guards file size changes before acquiring the normal cnode lock
+ * and calling hfs_removefile on an item.
+ *
+ * However, in this case, we are still holding the directory lock so
+ * 'tvp' is not lookup-able and it was a newly created vnode so it
+ * cannot have any content yet. The only reason we are initiating
+ * the removefile is because we could not generate content protection keys
+ * for this namespace item. Note also that we pass a '1' in the allow_dirs
+ * argument for hfs_removefile because we may be creating a directory here.
+ *
+ * All this to say that while it is technically a violation it is
+ * impossible to race with another thread for this cnode so it is safe.
+ */
+ int err = hfs_removefile (dvp, tvp, cnp, 0, 0, 1, NULL, 0);
+ if (err) {
+ printf("hfs_makenode: removefile failed (%d) for CP entry %p\n", err, tvp);
+ }
+
+ /* Release the cnode lock and mark the vnode for termination */
+ hfs_unlock (cp);
+ err = vnode_recycle (tvp);
+ if (err) {
+ printf("hfs_makenode: vnode_recycle failed (%d) for CP entry %p\n", err, tvp);
+ }
+
+ /* Drop the iocount on the new vnode to force reclamation/recycling */
+ vnode_put (tvp);
+ cp = NULL;
+ *vpp = NULL;
+ }
+ }
#endif
-/*
- * If CONFIG_PROTECT is not enabled, then all items will get automatically added into
- * the namecache, as nocache will be set to 0.
- */
#if QUOTA
/*
* function) to see if creating this cnode/vnode would cause us to go over quota.
*/
if (hfsmp->hfs_flags & HFS_QUOTAS) {
- (void) hfs_getinoquota(cp);
+ if (cp) {
+ /* cp could have been zeroed earlier */
+ (void) hfs_getinoquota(cp);
+ }
}
#endif
exit:
cat_releasedesc(&out_desc);
+#if CONFIG_PROTECT
+ /*
+ * We may have jumped here in error-handling various situations above.
+ * If we haven't already dumped the temporary CP used to initialize
+ * the file atomically, then free it now. cp_entry_destroy should null
+ * out the pointer if it was called already.
+ */
+ if (entry) {
+ cp_entry_destroy (entry);
+ entry = NULL;
+ }
+#endif
+
/*
* Make sure we release cnode lock on dcp.
*/
error = vnode_getwithvid(rvp, vid);
if (can_drop_lock) {
- (void) hfs_lock(cp, HFS_FORCE_LOCK);
+ (void) hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
/*
* When we relinquished our cnode lock, the cnode could have raced
if (name)
printf("hfs_vgetrsrc: couldn't get resource"
- " fork for %s, err %d\n", name, error);
+ " fork for %s, vol=%s, err=%d\n", name, hfsmp->vcbVN, error);
return (error);
}
} else {
lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
+ /*
+ * We call cat_idlookup (instead of cat_lookup) below because we can't
+ * trust the descriptor in the provided cnode for lookups at this point.
+ * Between the time of the original lookup of this vnode and now, the
+ * descriptor could have gotten swapped or replaced. If this occurred,
+ * the parent/name combo originally desired may not necessarily be provided
+ * if we use the descriptor. Even worse, if the vnode represents
+ * a hardlink, we could have removed one of the links from the namespace
+ * but left the descriptor alone, since hfs_unlink does not invalidate
+ * the descriptor in the cnode if other links still point to the inode.
+ *
+ * Consider the following (slightly contrived) scenario:
+ * /tmp/a <--> /tmp/b (hardlinks).
+ * 1. Thread A: open rsrc fork on /tmp/b.
+ * 1a. Thread A: does lookup, goes out to lunch right before calling getnamedstream.
+ * 2. Thread B does 'mv /foo/b /tmp/b'
+ * 2. Thread B succeeds.
+ * 3. Thread A comes back and wants rsrc fork info for /tmp/b.
+ *
+ * Even though the hardlink backing /tmp/b is now eliminated, the descriptor
+ * is not removed/updated during the unlink process. So, if you were to
+ * do a lookup on /tmp/b, you'd acquire an entirely different record's resource
+ * fork.
+ *
+ * As a result, we use the fileid, which should be invariant for the lifetime
+ * of the cnode (possibly barring calls to exchangedata).
+ *
+ * Addendum: We can't do the above for HFS standard since we aren't guaranteed to
+ * have thread records for files. They were only required for directories. So
+ * we need to do the lookup with the catalog name. This is OK since hardlinks were
+ * never allowed on HFS standard.
+ */
+
/* Get resource fork data */
- error = cat_lookup(hfsmp, descptr, 1, (struct cat_desc *)0,
- (struct cat_attr *)0, &rsrcfork, NULL);
+ if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) {
+ error = cat_idlookup (hfsmp, cp->c_fileid, 0, 1, NULL, NULL, &rsrcfork);
+ }
+#if CONFIG_HFS_STD
+ else {
+ /*
+ * HFS standard only:
+ *
+ * Get the resource fork for this item with a cat_lookup call, but do not
+ * force a case lookup since HFS standard is case-insensitive only. We
+ * don't want the descriptor; just the fork data here. If we tried to
+ * do a ID lookup (via thread record -> catalog record), then we might fail
+ * prematurely since, as noted above, thread records were not strictly required
+ * on files in HFS.
+ */
+ error = cat_lookup (hfsmp, descptr, 1, 0, (struct cat_desc*)NULL,
+ (struct cat_attr*)NULL, &rsrcfork, NULL);
+ }
+#endif
hfs_systemfile_unlock(hfsmp, lockflags);
if (error) {
struct cnode *cp;
if (vnode_isinuse(ap->a_vp, 0)) {
- if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) == 0) {
+ if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0) {
cp = VTOC(vp);
hfs_touchtimes(VTOHFS(vp), cp);
hfs_unlock(cp);
struct cnode *cp;
if (vnode_isinuse(ap->a_vp, 1)) {
- if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) == 0) {
+ if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0) {
cp = VTOC(vp);
hfs_touchtimes(VTOHFS(vp), cp);
hfs_unlock(cp);
#endif /* FIFO */
+/*
+ * Getter for the document_id
+ * the document_id is stored in FndrExtendedFileInfo/FndrExtendedDirInfo
+ */
+static u_int32_t
+hfs_get_document_id_internal(const uint8_t *finderinfo, mode_t mode)
+{
+ u_int8_t *finfo = NULL;
+ u_int32_t doc_id = 0;
+
+ /* overlay the FinderInfo to the correct pointer, and advance */
+ finfo = ((uint8_t *)finderinfo) + 16;
+
+ if (S_ISDIR(mode) || S_ISREG(mode)) {
+ struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)finfo;
+ doc_id = extinfo->document_id;
+ } else if (S_ISDIR(mode)) {
+ struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)finderinfo + 16);
+ doc_id = extinfo->document_id;
+ }
+
+ return doc_id;
+}
+
+
+/* getter(s) for document id */
+u_int32_t
+hfs_get_document_id(struct cnode *cp)
+{
+ return (hfs_get_document_id_internal((u_int8_t*)cp->c_finderinfo,
+ cp->c_attr.ca_mode));
+}
+
+/* If you have finderinfo and mode, you can use this */
+u_int32_t
+hfs_get_document_id_from_blob(const uint8_t *finderinfo, mode_t mode)
+{
+ return (hfs_get_document_id_internal(finderinfo, mode));
+}
+
/*
* Synchronize a file's in-core state with that on disk.
*/
}
#if CONFIG_PROTECT
- if ((error = cp_handle_vnop(VTOC(vp), CP_WRITE_ACCESS)) != 0) {
+ if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) {
return (error);
}
#endif /* CONFIG_PROTECT */
* We need to allow ENOENT lock errors since unlink
* systenm call can call VNOP_FSYNC during vclean.
*/
- error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK);
+ error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
if (error)
return (0);
}
int (**hfs_vnodeop_p)(void *);
-int (**hfs_std_vnodeop_p) (void *);
#define VOPFUNC int (*)(void *)
+
+#if CONFIG_HFS_STD
+int (**hfs_std_vnodeop_p) (void *);
static int hfs_readonly_op (__unused void* ap) { return (EROFS); }
/*
{ &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
{ &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
{ &vnop_allocate_desc, (VOPFUNC)hfs_readonly_op }, /* allocate (READONLY) */
+#if CONFIG_SEARCHFS
{ &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
+#else
+ { &vnop_searchfs_desc, (VOPFUNC)err_searchfs }, /* search fs */
+#endif
{ &vnop_bwrite_desc, (VOPFUNC)hfs_readonly_op }, /* bwrite (READONLY) */
{ &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
{ &vnop_pageout_desc,(VOPFUNC) hfs_readonly_op }, /* pageout (READONLY) */
struct vnodeopv_desc hfs_std_vnodeop_opv_desc =
{ &hfs_std_vnodeop_p, hfs_standard_vnodeop_entries };
-
+#endif
/* VNOP table for HFS+ */
struct vnodeopv_entry_desc hfs_vnodeop_entries[] = {
{ &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
{ &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
{ &vnop_allocate_desc, (VOPFUNC)hfs_vnop_allocate }, /* allocate */
+#if CONFIG_SEARCHFS
{ &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
+#else
+ { &vnop_searchfs_desc, (VOPFUNC)err_searchfs }, /* search fs */
+#endif
{ &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite }, /* bwrite */
{ &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
{ &vnop_pageout_desc,(VOPFUNC) hfs_vnop_pageout }, /* pageout */
{ &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
{ &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
{ &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
+ { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
+ { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
+ { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
+ { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
{ (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
};
struct vnodeopv_desc hfs_specop_opv_desc =
{ &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
{ &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
{ &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
+ { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
+ { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
+ { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
+ { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
{ (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
};
struct vnodeopv_desc hfs_fifoop_opv_desc =