+ if ( (myPerms & flags) != flags) {
+ myResult = 0;
+ myErr = EACCES;
+ goto ExitThisRoutine; /* no access */
+ }
+
+ /* up the hierarchy we go */
+ thisNodeID = catkey.hfsPlus.parentID;
+ }
+ }
+
+ /* if here, we have access to this node */
+ myResult = 1;
+
+ ExitThisRoutine:
+ if (parents && myErr == 0 && scope_index == -1) {
+ myErr = ESRCH;
+ }
+
+ if (myErr) {
+ myResult = 0;
+ }
+ *err = myErr;
+
+ /* cache the parent directory(ies) */
+ for (i = 0; i < ids_to_cache; i++) {
+ if (myErr == 0 && parents && (scope_idx_start == -1 || i > scope_idx_start)) {
+ add_node(cache, -1, parent_ids[i], ESRCH);
+ } else {
+ add_node(cache, -1, parent_ids[i], myErr);
+ }
+ }
+
+ return (myResult);
+}
+
+static int
+do_bulk_access_check(struct hfsmount *hfsmp, struct vnode *vp,
+ struct vnop_ioctl_args *ap, int arg_size, vfs_context_t context)
+{
+ boolean_t is64bit;
+
+ /*
+ * NOTE: on entry, the vnode has an io_ref. In case this vnode
+ * happens to be in our list of file_ids, we'll note it
+ * avoid calling hfs_chashget_nowait() on that id as that
+ * will cause a "locking against myself" panic.
+ */
+ Boolean check_leaf = true;
+
+ struct user64_ext_access_t *user_access_structp;
+ struct user64_ext_access_t tmp_user_access;
+ struct access_cache cache;
+
+ int error = 0, prev_parent_check_ok=1;
+ unsigned int i;
+
+ short flags;
+ unsigned int num_files = 0;
+ int map_size = 0;
+ int num_parents = 0;
+ int *file_ids=NULL;
+ short *access=NULL;
+ char *bitmap=NULL;
+ cnid_t *parents=NULL;
+ int leaf_index;
+
+ cnid_t cnid;
+ cnid_t prevParent_cnid = 0;
+ unsigned int myPerms;
+ short myaccess = 0;
+ struct cat_attr cnattr;
+ CatalogKey catkey;
+ struct cnode *skip_cp = VTOC(vp);
+ kauth_cred_t cred = vfs_context_ucred(context);
+ proc_t p = vfs_context_proc(context);
+
+ is64bit = proc_is64bit(p);
+
+ /* initialize the local cache and buffers */
+ cache.numcached = 0;
+ cache.cachehits = 0;
+ cache.lookups = 0;
+ cache.acache = NULL;
+ cache.haveaccess = NULL;
+
+ /* struct copyin done during dispatch... need to copy file_id array separately */
+ if (ap->a_data == NULL) {
+ error = EINVAL;
+ goto err_exit_bulk_access;
+ }
+
+ if (is64bit) {
+ if (arg_size != sizeof(struct user64_ext_access_t)) {
+ error = EINVAL;
+ goto err_exit_bulk_access;
+ }
+
+ user_access_structp = (struct user64_ext_access_t *)ap->a_data;
+
+ } else if (arg_size == sizeof(struct user32_access_t)) {
+ struct user32_access_t *accessp = (struct user32_access_t *)ap->a_data;
+
+ // convert an old style bulk-access struct to the new style
+ tmp_user_access.flags = accessp->flags;
+ tmp_user_access.num_files = accessp->num_files;
+ tmp_user_access.map_size = 0;
+ tmp_user_access.file_ids = CAST_USER_ADDR_T(accessp->file_ids);
+ tmp_user_access.bitmap = USER_ADDR_NULL;
+ tmp_user_access.access = CAST_USER_ADDR_T(accessp->access);
+ tmp_user_access.num_parents = 0;
+ user_access_structp = &tmp_user_access;
+
+ } else if (arg_size == sizeof(struct user32_ext_access_t)) {
+ struct user32_ext_access_t *accessp = (struct user32_ext_access_t *)ap->a_data;
+
+ // up-cast from a 32-bit version of the struct
+ tmp_user_access.flags = accessp->flags;
+ tmp_user_access.num_files = accessp->num_files;
+ tmp_user_access.map_size = accessp->map_size;
+ tmp_user_access.num_parents = accessp->num_parents;
+
+ tmp_user_access.file_ids = CAST_USER_ADDR_T(accessp->file_ids);
+ tmp_user_access.bitmap = CAST_USER_ADDR_T(accessp->bitmap);
+ tmp_user_access.access = CAST_USER_ADDR_T(accessp->access);
+ tmp_user_access.parents = CAST_USER_ADDR_T(accessp->parents);
+
+ user_access_structp = &tmp_user_access;
+ } else {
+ error = EINVAL;
+ goto err_exit_bulk_access;
+ }
+
+ map_size = user_access_structp->map_size;
+
+ num_files = user_access_structp->num_files;
+
+ num_parents= user_access_structp->num_parents;
+
+ if (num_files < 1) {
+ goto err_exit_bulk_access;
+ }
+ if (num_files > 1024) {
+ error = EINVAL;
+ goto err_exit_bulk_access;
+ }
+
+ if (num_parents > 1024) {
+ error = EINVAL;
+ goto err_exit_bulk_access;
+ }
+
+ file_ids = (int *) kalloc(sizeof(int) * num_files);
+ access = (short *) kalloc(sizeof(short) * num_files);
+ if (map_size) {
+ bitmap = (char *) kalloc(sizeof(char) * map_size);
+ }
+
+ if (num_parents) {
+ parents = (cnid_t *) kalloc(sizeof(cnid_t) * num_parents);
+ }
+
+ cache.acache = (unsigned int *) kalloc(sizeof(int) * NUM_CACHE_ENTRIES);
+ cache.haveaccess = (unsigned char *) kalloc(sizeof(unsigned char) * NUM_CACHE_ENTRIES);
+
+ if (file_ids == NULL || access == NULL || (map_size != 0 && bitmap == NULL) || cache.acache == NULL || cache.haveaccess == NULL) {
+ if (file_ids) {
+ kfree(file_ids, sizeof(int) * num_files);
+ }
+ if (bitmap) {
+ kfree(bitmap, sizeof(char) * map_size);
+ }
+ if (access) {
+ kfree(access, sizeof(short) * num_files);
+ }
+ if (cache.acache) {
+ kfree(cache.acache, sizeof(int) * NUM_CACHE_ENTRIES);
+ }
+ if (cache.haveaccess) {
+ kfree(cache.haveaccess, sizeof(unsigned char) * NUM_CACHE_ENTRIES);
+ }
+ if (parents) {
+ kfree(parents, sizeof(cnid_t) * num_parents);
+ }
+ return ENOMEM;
+ }
+
+ // make sure the bitmap is zero'ed out...
+ if (bitmap) {
+ bzero(bitmap, (sizeof(char) * map_size));
+ }
+
+ if ((error = copyin(user_access_structp->file_ids, (caddr_t)file_ids,
+ num_files * sizeof(int)))) {
+ goto err_exit_bulk_access;
+ }
+
+ if (num_parents) {
+ if ((error = copyin(user_access_structp->parents, (caddr_t)parents,
+ num_parents * sizeof(cnid_t)))) {
+ goto err_exit_bulk_access;
+ }
+ }
+
+ flags = user_access_structp->flags;
+ if ((flags & (F_OK | R_OK | W_OK | X_OK)) == 0) {
+ flags = R_OK;
+ }
+
+ /* check if we've been passed leaf node ids or parent ids */
+ if (flags & PARENT_IDS_FLAG) {
+ check_leaf = false;
+ }
+
+ /* Check access to each file_id passed in */
+ for (i = 0; i < num_files; i++) {
+ leaf_index=-1;
+ cnid = (cnid_t) file_ids[i];
+
+ /* root always has access */
+ if ((!parents) && (!suser(cred, NULL))) {
+ access[i] = 0;
+ continue;
+ }
+
+ if (check_leaf) {
+ /* do the lookup (checks the cnode hash, then the catalog) */
+ error = do_attr_lookup(hfsmp, &cache, cnid, skip_cp, &catkey, &cnattr);
+ if (error) {
+ access[i] = (short) error;
+ continue;
+ }
+
+ if (parents) {
+ // Check if the leaf matches one of the parent scopes
+ leaf_index = cache_binSearch(parents, num_parents-1, cnid, NULL);
+ if (leaf_index >= 0 && parents[leaf_index] == cnid)
+ prev_parent_check_ok = 0;
+ else if (leaf_index >= 0)
+ prev_parent_check_ok = 1;
+ }
+
+ // if the thing has acl's, do the full permission check
+ if ((cnattr.ca_recflags & kHFSHasSecurityMask) != 0) {
+ struct vnode *cvp;
+ int myErr = 0;
+ /* get the vnode for this cnid */
+ myErr = hfs_vget(hfsmp, cnid, &cvp, 0, 0);
+ if ( myErr ) {
+ access[i] = myErr;
+ continue;
+ }
+
+ hfs_unlock(VTOC(cvp));
+
+ if (vnode_vtype(cvp) == VDIR) {
+ myErr = vnode_authorize(cvp, NULL, (KAUTH_VNODE_SEARCH | KAUTH_VNODE_LIST_DIRECTORY), context);
+ } else {
+ myErr = vnode_authorize(cvp, NULL, KAUTH_VNODE_READ_DATA, context);
+ }
+
+ vnode_put(cvp);
+ if (myErr) {
+ access[i] = myErr;
+ continue;
+ }
+ } else {
+ /* before calling CheckAccess(), check the target file for read access */
+ myPerms = DerivePermissionSummary(cnattr.ca_uid, cnattr.ca_gid,
+ cnattr.ca_mode, hfsmp->hfs_mp, cred, p);
+
+ /* fail fast if no access */
+ if ((myPerms & flags) == 0) {
+ access[i] = EACCES;
+ continue;
+ }
+ }
+ } else {
+ /* we were passed an array of parent ids */
+ catkey.hfsPlus.parentID = cnid;
+ }
+
+ /* if the last guy had the same parent and had access, we're done */
+ if (i > 0 && catkey.hfsPlus.parentID == prevParent_cnid && access[i-1] == 0 && prev_parent_check_ok) {
+ cache.cachehits++;
+ access[i] = 0;
+ continue;
+ }
+
+ myaccess = do_access_check(hfsmp, &error, &cache, catkey.hfsPlus.parentID,
+ skip_cp, p, cred, context,bitmap, map_size, parents, num_parents);
+
+ if (myaccess || (error == ESRCH && leaf_index != -1)) {
+ access[i] = 0; // have access.. no errors to report
+ } else {
+ access[i] = (error != 0 ? (short) error : EACCES);
+ }
+
+ prevParent_cnid = catkey.hfsPlus.parentID;
+ }
+
+ /* copyout the access array */
+ if ((error = copyout((caddr_t)access, user_access_structp->access,
+ num_files * sizeof (short)))) {
+ goto err_exit_bulk_access;
+ }
+ if (map_size && bitmap) {
+ if ((error = copyout((caddr_t)bitmap, user_access_structp->bitmap,
+ map_size * sizeof (char)))) {
+ goto err_exit_bulk_access;
+ }
+ }
+
+
+ err_exit_bulk_access:
+
+ if (file_ids)
+ kfree(file_ids, sizeof(int) * num_files);
+ if (parents)
+ kfree(parents, sizeof(cnid_t) * num_parents);
+ if (bitmap)
+ kfree(bitmap, sizeof(char) * map_size);
+ if (access)
+ kfree(access, sizeof(short) * num_files);
+ if (cache.acache)
+ kfree(cache.acache, sizeof(int) * NUM_CACHE_ENTRIES);
+ if (cache.haveaccess)
+ kfree(cache.haveaccess, sizeof(unsigned char) * NUM_CACHE_ENTRIES);
+
+ return (error);
+}
+
+
+/* end "bulk-access" support */
+
+
+/*
+ * Callback for use with freeze ioctl.
+ */
+static int
+hfs_freezewrite_callback(struct vnode *vp, __unused void *cargs)
+{
+ vnode_waitforwrites(vp, 0, 0, 0, "hfs freeze");
+
+ return 0;
+}
+
+/*
+ * Control filesystem operating characteristics.
+ */
+int
+hfs_vnop_ioctl( struct vnop_ioctl_args /* {
+ vnode_t a_vp;
+ int a_command;
+ caddr_t a_data;
+ int a_fflag;
+ vfs_context_t a_context;
+ } */ *ap)
+{
+ struct vnode * vp = ap->a_vp;
+ struct hfsmount *hfsmp = VTOHFS(vp);
+ vfs_context_t context = ap->a_context;
+ kauth_cred_t cred = vfs_context_ucred(context);
+ proc_t p = vfs_context_proc(context);
+ struct vfsstatfs *vfsp;
+ boolean_t is64bit;
+ off_t jnl_start, jnl_size;
+ struct hfs_journal_info *jip;
+#if HFS_COMPRESSION
+ int compressed = 0;
+ off_t uncompressed_size = -1;
+ int decmpfs_error = 0;
+
+ if (ap->a_command == F_RDADVISE) {
+ /* we need to inspect the decmpfs state of the file as early as possible */
+ compressed = hfs_file_is_compressed(VTOC(vp), 0);
+ if (compressed) {
+ if (VNODE_IS_RSRC(vp)) {
+ /* if this is the resource fork, treat it as if it were empty */
+ uncompressed_size = 0;
+ } else {
+ decmpfs_error = hfs_uncompressed_size_of_compressed_file(NULL, vp, 0, &uncompressed_size, 0);
+ if (decmpfs_error != 0) {
+ /* failed to get the uncompressed size, we'll check for this later */
+ uncompressed_size = -1;
+ }
+ }
+ }
+ }
+#endif /* HFS_COMPRESSION */
+
+ is64bit = proc_is64bit(p);
+
+#if CONFIG_PROTECT
+ {
+ int error = 0;
+ if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) {
+ return error;
+ }
+ }
+#endif /* CONFIG_PROTECT */
+
+ switch (ap->a_command) {
+
+ case HFS_GETPATH:
+ {
+ struct vnode *file_vp;
+ cnid_t cnid;
+ int outlen;
+ char *bufptr;
+ int error;
+ int flags = 0;
+
+ /* Caller must be owner of file system. */
+ vfsp = vfs_statfs(HFSTOVFS(hfsmp));
+ if (suser(cred, NULL) &&
+ kauth_cred_getuid(cred) != vfsp->f_owner) {
+ return (EACCES);
+ }
+ /* Target vnode must be file system's root. */
+ if (!vnode_isvroot(vp)) {
+ return (EINVAL);
+ }
+ bufptr = (char *)ap->a_data;
+ cnid = strtoul(bufptr, NULL, 10);
+ if (ap->a_fflag & HFS_GETPATH_VOLUME_RELATIVE) {
+ flags |= BUILDPATH_VOLUME_RELATIVE;
+ }
+
+ /* We need to call hfs_vfs_vget to leverage the code that will
+ * fix the origin list for us if needed, as opposed to calling
+ * hfs_vget, since we will need the parent for build_path call.
+ */
+
+ if ((error = hfs_vfs_vget(HFSTOVFS(hfsmp), cnid, &file_vp, context))) {
+ return (error);
+ }
+ error = build_path(file_vp, bufptr, sizeof(pathname_t), &outlen, flags, context);
+ vnode_put(file_vp);
+
+ return (error);
+ }
+
+ case HFS_GET_WRITE_GEN_COUNTER:
+ {
+ struct cnode *cp = NULL;
+ int error;
+ u_int32_t *counter = (u_int32_t *)ap->a_data;
+
+ cp = VTOC(vp);
+
+ if (!vnode_isdir(vp) && !(vnode_isreg(vp)) &&
+ !(vnode_islnk(vp))) {
+ error = EBADF;
+ *counter = 0;
+ return error;
+ }
+
+ error = hfs_lock (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
+ if (error == 0) {
+ struct ubc_info *uip;
+ int is_mapped_writable = 0;
+
+ if (UBCINFOEXISTS(vp)) {
+ uip = vp->v_ubcinfo;
+ if ((uip->ui_flags & UI_ISMAPPED) && (uip->ui_flags & UI_MAPPEDWRITE)) {
+ is_mapped_writable = 1;
+ }
+ }
+
+
+ if (S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode)) {
+ uint32_t gcount = hfs_get_gencount(cp);
+ //
+ // Even though we return EBUSY for files that are mmap'ed
+ // we also want to bump the value so that the write-gen
+ // counter will always be different once the file is unmapped
+ // (since the file may be unmapped but the pageouts have not
+ // yet happened).
+ //
+ if (is_mapped_writable) {
+ hfs_incr_gencount (cp);
+ gcount = hfs_get_gencount(cp);
+ }
+
+ *counter = gcount;
+ } else if (S_ISDIR(cp->c_attr.ca_mode)) {
+ *counter = hfs_get_gencount(cp);
+ } else {
+ /* not a file or dir? silently return */
+ *counter = 0;
+ }
+ hfs_unlock (cp);
+
+ if (is_mapped_writable) {
+ error = EBUSY;
+ }
+ }
+
+ return error;
+ }
+
+ case HFS_GET_DOCUMENT_ID:
+ {
+ struct cnode *cp = NULL;
+ int error=0;
+ u_int32_t *document_id = (u_int32_t *)ap->a_data;
+
+ cp = VTOC(vp);
+
+ if (cp->c_desc.cd_cnid == kHFSRootFolderID) {
+ // the root-dir always has document id '2' (aka kHFSRootFolderID)
+ *document_id = kHFSRootFolderID;
+
+ } else if ((S_ISDIR(cp->c_attr.ca_mode) || S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode))) {
+ int mark_it = 0;
+ uint32_t tmp_doc_id;
+
+ //
+ // we can use the FndrExtendedFileInfo because the doc-id is the first
+ // thing in both it and the FndrExtendedDirInfo struct which is fixed
+ // in format and can not change layout
+ //
+ struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)((u_int8_t*)cp->c_finderinfo + 16);
+
+ hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
+
+ //
+ // if the cnode isn't UF_TRACKED and the doc-id-allocate flag isn't set
+ // then just return a zero for the doc-id
+ //
+ if (!(cp->c_bsdflags & UF_TRACKED) && !(ap->a_fflag & HFS_DOCUMENT_ID_ALLOCATE)) {
+ *document_id = 0;
+ hfs_unlock(cp);
+ return 0;
+ }
+
+ //
+ // if the cnode isn't UF_TRACKED and the doc-id-allocate flag IS set,
+ // then set mark_it so we know to set the UF_TRACKED flag once the
+ // cnode is locked.
+ //
+ if (!(cp->c_bsdflags & UF_TRACKED) && (ap->a_fflag & HFS_DOCUMENT_ID_ALLOCATE)) {
+ mark_it = 1;
+ }
+
+ tmp_doc_id = extinfo->document_id; // get a copy of this
+
+ hfs_unlock(cp); // in case we have to call hfs_generate_document_id()
+
+ //
+ // If the document_id isn't set, get a new one and then set it.
+ // Note: we first get the document id, then lock the cnode to
+ // avoid any deadlock potential between cp and the root vnode.
+ //
+ uint32_t new_id;
+ if (tmp_doc_id == 0 && (error = hfs_generate_document_id(hfsmp, &new_id)) == 0) {
+
+ if ((error = hfs_lock (cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) == 0) {
+ extinfo->document_id = tmp_doc_id = new_id;
+ //printf("ASSIGNING: doc-id %d to ino %d\n", extinfo->document_id, cp->c_fileid);
+
+ if (mark_it) {
+ cp->c_bsdflags |= UF_TRACKED;
+ }
+
+ // mark the cnode dirty
+ cp->c_flag |= C_MODIFIED | C_FORCEUPDATE;
+
+ int lockflags;
+ if ((error = hfs_start_transaction(hfsmp)) == 0) {
+ lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
+
+ (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
+
+ hfs_systemfile_unlock (hfsmp, lockflags);
+ (void) hfs_end_transaction(hfsmp);
+ }
+
+#if CONFIG_FSE
+ add_fsevent(FSE_DOCID_CHANGED, context,
+ FSE_ARG_DEV, hfsmp->hfs_raw_dev,
+ FSE_ARG_INO, (ino64_t)0, // src inode #
+ FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
+ FSE_ARG_INT32, extinfo->document_id,
+ FSE_ARG_DONE);
+
+ hfs_unlock (cp); // so we can send the STAT_CHANGED event without deadlocking
+
+ if (need_fsevent(FSE_STAT_CHANGED, vp)) {
+ add_fsevent(FSE_STAT_CHANGED, context, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
+ }
+#else
+ hfs_unlock (cp);
+#endif
+ }
+ }
+
+ *document_id = tmp_doc_id;
+ } else {
+ *document_id = 0;
+ }
+
+ return error;
+ }
+
+ case HFS_TRANSFER_DOCUMENT_ID:
+ {
+ struct cnode *cp = NULL;
+ int error;
+ u_int32_t to_fd = *(u_int32_t *)ap->a_data;
+ struct fileproc *to_fp;
+ struct vnode *to_vp;
+ struct cnode *to_cp;
+
+ cp = VTOC(vp);
+
+ if ((error = fp_getfvp(p, to_fd, &to_fp, &to_vp)) != 0) {
+ //printf("could not get the vnode for fd %d (err %d)\n", to_fd, error);
+ return error;
+ }
+ if ( (error = vnode_getwithref(to_vp)) ) {
+ file_drop(to_fd);
+ return error;
+ }
+
+ if (VTOHFS(to_vp) != hfsmp) {
+ error = EXDEV;
+ goto transfer_cleanup;
+ }
+
+ int need_unlock = 1;
+ to_cp = VTOC(to_vp);
+ error = hfs_lockpair(cp, to_cp, HFS_EXCLUSIVE_LOCK);
+ if (error != 0) {
+ //printf("could not lock the pair of cnodes (error %d)\n", error);
+ goto transfer_cleanup;
+ }
+
+ if (!(cp->c_bsdflags & UF_TRACKED)) {
+ error = EINVAL;
+ } else if (to_cp->c_bsdflags & UF_TRACKED) {
+ //
+ // if the destination is already tracked, return an error
+ // as otherwise it's a silent deletion of the target's
+ // document-id
+ //
+ error = EEXIST;
+ } else if (S_ISDIR(cp->c_attr.ca_mode) || S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode)) {
+ //
+ // we can use the FndrExtendedFileInfo because the doc-id is the first
+ // thing in both it and the ExtendedDirInfo struct which is fixed in
+ // format and can not change layout
+ //
+ struct FndrExtendedFileInfo *f_extinfo = (struct FndrExtendedFileInfo *)((u_int8_t*)cp->c_finderinfo + 16);
+ struct FndrExtendedFileInfo *to_extinfo = (struct FndrExtendedFileInfo *)((u_int8_t*)to_cp->c_finderinfo + 16);
+
+ if (f_extinfo->document_id == 0) {
+ uint32_t new_id;
+
+ hfs_unlockpair(cp, to_cp); // have to unlock to be able to get a new-id
+
+ if ((error = hfs_generate_document_id(hfsmp, &new_id)) == 0) {
+ //
+ // re-lock the pair now that we have the document-id
+ //
+ hfs_lockpair(cp, to_cp, HFS_EXCLUSIVE_LOCK);
+ f_extinfo->document_id = new_id;
+ } else {
+ goto transfer_cleanup;
+ }
+ }
+
+ to_extinfo->document_id = f_extinfo->document_id;
+ f_extinfo->document_id = 0;
+ //printf("TRANSFERRING: doc-id %d from ino %d to ino %d\n", to_extinfo->document_id, cp->c_fileid, to_cp->c_fileid);
+
+ // make sure the destination is also UF_TRACKED
+ to_cp->c_bsdflags |= UF_TRACKED;
+ cp->c_bsdflags &= ~UF_TRACKED;
+
+ // mark the cnodes dirty
+ cp->c_flag |= C_MODIFIED | C_FORCEUPDATE;
+ to_cp->c_flag |= C_MODIFIED | C_FORCEUPDATE;
+
+ int lockflags;
+ if ((error = hfs_start_transaction(hfsmp)) == 0) {
+
+ lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
+
+ (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
+ (void) cat_update(hfsmp, &to_cp->c_desc, &to_cp->c_attr, NULL, NULL);
+
+ hfs_systemfile_unlock (hfsmp, lockflags);
+ (void) hfs_end_transaction(hfsmp);
+ }
+
+#if CONFIG_FSE
+ add_fsevent(FSE_DOCID_CHANGED, context,
+ FSE_ARG_DEV, hfsmp->hfs_raw_dev,
+ FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
+ FSE_ARG_INO, (ino64_t)to_cp->c_fileid, // dst inode #
+ FSE_ARG_INT32, to_extinfo->document_id,
+ FSE_ARG_DONE);
+
+ hfs_unlockpair(cp, to_cp); // unlock this so we can send the fsevents
+ need_unlock = 0;
+
+ if (need_fsevent(FSE_STAT_CHANGED, vp)) {
+ add_fsevent(FSE_STAT_CHANGED, context, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
+ }
+ if (need_fsevent(FSE_STAT_CHANGED, to_vp)) {
+ add_fsevent(FSE_STAT_CHANGED, context, FSE_ARG_VNODE, to_vp, FSE_ARG_DONE);
+ }
+#else
+ hfs_unlockpair(cp, to_cp); // unlock this so we can send the fsevents
+ need_unlock = 0;
+#endif
+ }
+
+ if (need_unlock) {
+ hfs_unlockpair(cp, to_cp);
+ }
+
+ transfer_cleanup:
+ vnode_put(to_vp);
+ file_drop(to_fd);
+
+ return error;
+ }
+
+ case HFS_PREV_LINK:
+ case HFS_NEXT_LINK:
+ {
+ cnid_t linkfileid;
+ cnid_t nextlinkid;
+ cnid_t prevlinkid;
+ int error;
+
+ /* Caller must be owner of file system. */
+ vfsp = vfs_statfs(HFSTOVFS(hfsmp));
+ if (suser(cred, NULL) &&
+ kauth_cred_getuid(cred) != vfsp->f_owner) {
+ return (EACCES);
+ }
+ /* Target vnode must be file system's root. */
+ if (!vnode_isvroot(vp)) {
+ return (EINVAL);
+ }
+ linkfileid = *(cnid_t *)ap->a_data;
+ if (linkfileid < kHFSFirstUserCatalogNodeID) {
+ return (EINVAL);
+ }
+ if ((error = hfs_lookup_siblinglinks(hfsmp, linkfileid, &prevlinkid, &nextlinkid))) {
+ return (error);
+ }
+ if (ap->a_command == HFS_NEXT_LINK) {
+ *(cnid_t *)ap->a_data = nextlinkid;
+ } else {
+ *(cnid_t *)ap->a_data = prevlinkid;
+ }
+ return (0);
+ }
+
+ case HFS_RESIZE_PROGRESS: {
+
+ vfsp = vfs_statfs(HFSTOVFS(hfsmp));
+ if (suser(cred, NULL) &&
+ kauth_cred_getuid(cred) != vfsp->f_owner) {
+ return (EACCES); /* must be owner of file system */
+ }
+ if (!vnode_isvroot(vp)) {
+ return (EINVAL);
+ }
+ /* file system must not be mounted read-only */
+ if (hfsmp->hfs_flags & HFS_READ_ONLY) {
+ return (EROFS);
+ }
+
+ return hfs_resize_progress(hfsmp, (u_int32_t *)ap->a_data);
+ }
+
+ case HFS_RESIZE_VOLUME: {
+ u_int64_t newsize;
+ u_int64_t cursize;
+
+ vfsp = vfs_statfs(HFSTOVFS(hfsmp));
+ if (suser(cred, NULL) &&
+ kauth_cred_getuid(cred) != vfsp->f_owner) {
+ return (EACCES); /* must be owner of file system */
+ }
+ if (!vnode_isvroot(vp)) {
+ return (EINVAL);
+ }
+
+ /* filesystem must not be mounted read only */
+ if (hfsmp->hfs_flags & HFS_READ_ONLY) {
+ return (EROFS);