+ return index;
+}
+
+
+static int
+lookup_bucket(struct access_cache *cache, int *indexp, cnid_t parent_id)
+{
+ unsigned int hi;
+ int matches = 0;
+ int index, no_match_index;
+
+ if (cache->numcached == 0) {
+ *indexp = 0;
+ return 0; // table is empty, so insert at index=0 and report no match
+ }
+
+ if (cache->numcached > NUM_CACHE_ENTRIES) {
+ cache->numcached = NUM_CACHE_ENTRIES;
+ }
+
+ hi = cache->numcached - 1;
+
+ index = cache_binSearch(cache->acache, hi, parent_id, &no_match_index);
+
+ /* if no existing entry found, find index for new one */
+ if (index == -1) {
+ index = no_match_index;
+ matches = 0;
+ } else {
+ matches = 1;
+ }
+
+ *indexp = index;
+ return matches;
+}
+
+/*
+ * Add a node to the access_cache at the given index (or do a lookup first
+ * to find the index if -1 is passed in). We currently do a replace rather
+ * than an insert if the cache is full.
+ */
+static void
+add_node(struct access_cache *cache, int index, cnid_t nodeID, int access)
+{
+ int lookup_index = -1;
+
+ /* need to do a lookup first if -1 passed for index */
+ if (index == -1) {
+ if (lookup_bucket(cache, &lookup_index, nodeID)) {
+ if (cache->haveaccess[lookup_index] != access && cache->haveaccess[lookup_index] == ESRCH) {
+ // only update an entry if the previous access was ESRCH (i.e. a scope checking error)
+ cache->haveaccess[lookup_index] = access;
+ }
+
+ /* mission accomplished */
+ return;
+ } else {
+ index = lookup_index;
+ }
+
+ }
+
+ /* if the cache is full, do a replace rather than an insert */
+ if (cache->numcached >= NUM_CACHE_ENTRIES) {
+ cache->numcached = NUM_CACHE_ENTRIES-1;
+
+ if (index > cache->numcached) {
+ index = cache->numcached;
+ }
+ }
+
+ if (index < cache->numcached && index < NUM_CACHE_ENTRIES && nodeID > cache->acache[index]) {
+ index++;
+ }
+
+ if (index >= 0 && index < cache->numcached) {
+ /* only do bcopy if we're inserting */
+ bcopy( cache->acache+index, cache->acache+(index+1), (cache->numcached - index)*sizeof(int) );
+ bcopy( cache->haveaccess+index, cache->haveaccess+(index+1), (cache->numcached - index)*sizeof(unsigned char) );
+ }
+
+ cache->acache[index] = nodeID;
+ cache->haveaccess[index] = access;
+ cache->numcached++;
+}
+
+
+struct cinfo {
+ uid_t uid;
+ gid_t gid;
+ mode_t mode;
+ cnid_t parentcnid;
+ u_int16_t recflags;
+};
+
+static int
+snoop_callback(const cnode_t *cp, void *arg)
+{
+ struct cinfo *cip = arg;
+
+ cip->uid = cp->c_uid;
+ cip->gid = cp->c_gid;
+ cip->mode = cp->c_mode;
+ cip->parentcnid = cp->c_parentcnid;
+ cip->recflags = cp->c_attr.ca_recflags;
+
+ return (0);
+}
+
+/*
+ * Lookup the cnid's attr info (uid, gid, and mode) as well as its parent id. If the item
+ * isn't incore, then go to the catalog.
+ */
+static int
+do_attr_lookup(struct hfsmount *hfsmp, struct access_cache *cache, cnid_t cnid,
+ struct cnode *skip_cp, CatalogKey *keyp, struct cat_attr *cnattrp)
+{
+ int error = 0;
+
+ /* if this id matches the one the fsctl was called with, skip the lookup */
+ if (cnid == skip_cp->c_cnid) {
+ cnattrp->ca_uid = skip_cp->c_uid;
+ cnattrp->ca_gid = skip_cp->c_gid;
+ cnattrp->ca_mode = skip_cp->c_mode;
+ cnattrp->ca_recflags = skip_cp->c_attr.ca_recflags;
+ keyp->hfsPlus.parentID = skip_cp->c_parentcnid;
+ } else {
+ struct cinfo c_info;
+
+ /* otherwise, check the cnode hash incase the file/dir is incore */
+ error = hfs_chash_snoop(hfsmp, cnid, 0, snoop_callback, &c_info);
+
+ if (error == EACCES) {
+ // File is deleted
+ return ENOENT;
+ } else if (!error) {
+ cnattrp->ca_uid = c_info.uid;
+ cnattrp->ca_gid = c_info.gid;
+ cnattrp->ca_mode = c_info.mode;
+ cnattrp->ca_recflags = c_info.recflags;
+ keyp->hfsPlus.parentID = c_info.parentcnid;
+ } else {
+ int lockflags;
+
+ if (throttle_io_will_be_throttled(-1, HFSTOVFS(hfsmp)))
+ throttle_lowpri_io(1);
+
+ lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
+
+ /* lookup this cnid in the catalog */
+ error = cat_getkeyplusattr(hfsmp, cnid, keyp, cnattrp);
+
+ hfs_systemfile_unlock(hfsmp, lockflags);
+
+ cache->lookups++;
+ }
+ }
+
+ return (error);
+}
+
+
+/*
+ * Compute whether we have access to the given directory (nodeID) and all its parents. Cache
+ * up to CACHE_LEVELS as we progress towards the root.
+ */
+static int
+do_access_check(struct hfsmount *hfsmp, int *err, struct access_cache *cache, HFSCatalogNodeID nodeID,
+ struct cnode *skip_cp, struct proc *theProcPtr, kauth_cred_t myp_ucred,
+ struct vfs_context *my_context,
+ char *bitmap,
+ uint32_t map_size,
+ cnid_t* parents,
+ uint32_t num_parents)
+{
+ int myErr = 0;
+ int myResult;
+ HFSCatalogNodeID thisNodeID;
+ unsigned int myPerms;
+ struct cat_attr cnattr;
+ int cache_index = -1, scope_index = -1, scope_idx_start = -1;
+ CatalogKey catkey;
+
+ int i = 0, ids_to_cache = 0;
+ int parent_ids[CACHE_LEVELS];
+
+ thisNodeID = nodeID;
+ while (thisNodeID >= kRootDirID) {
+ myResult = 0; /* default to "no access" */
+
+ /* check the cache before resorting to hitting the catalog */
+
+ /* ASSUMPTION: access info of cached entries is "final"... i.e. no need
+ * to look any further after hitting cached dir */
+
+ if (lookup_bucket(cache, &cache_index, thisNodeID)) {
+ cache->cachehits++;
+ myErr = cache->haveaccess[cache_index];
+ if (scope_index != -1) {
+ if (myErr == ESRCH) {
+ myErr = 0;
+ }
+ } else {
+ scope_index = 0; // so we'll just use the cache result
+ scope_idx_start = ids_to_cache;
+ }
+ myResult = (myErr == 0) ? 1 : 0;
+ goto ExitThisRoutine;
+ }
+
+
+ if (parents) {
+ int tmp;
+ tmp = cache_binSearch(parents, num_parents-1, thisNodeID, NULL);
+ if (scope_index == -1)
+ scope_index = tmp;
+ if (tmp != -1 && scope_idx_start == -1 && ids_to_cache < CACHE_LEVELS) {
+ scope_idx_start = ids_to_cache;
+ }
+ }
+
+ /* remember which parents we want to cache */
+ if (ids_to_cache < CACHE_LEVELS) {
+ parent_ids[ids_to_cache] = thisNodeID;
+ ids_to_cache++;
+ }
+ // Inefficient (using modulo) and we might want to use a hash function, not rely on the node id to be "nice"...
+ if (bitmap && map_size) {
+ bitmap[(thisNodeID/8)%(map_size)]|=(1<<(thisNodeID&7));
+ }
+
+
+ /* do the lookup (checks the cnode hash, then the catalog) */
+ myErr = do_attr_lookup(hfsmp, cache, thisNodeID, skip_cp, &catkey, &cnattr);
+ if (myErr) {
+ goto ExitThisRoutine; /* no access */
+ }
+
+ /* Root always gets access. */
+ if (suser(myp_ucred, NULL) == 0) {
+ thisNodeID = catkey.hfsPlus.parentID;
+ myResult = 1;
+ continue;
+ }
+
+ // if the thing has acl's, do the full permission check
+ if ((cnattr.ca_recflags & kHFSHasSecurityMask) != 0) {
+ struct vnode *vp;
+
+ /* get the vnode for this cnid */
+ myErr = hfs_vget(hfsmp, thisNodeID, &vp, 0, 0);
+ if ( myErr ) {
+ myResult = 0;
+ goto ExitThisRoutine;
+ }
+
+ thisNodeID = VTOC(vp)->c_parentcnid;
+
+ hfs_unlock(VTOC(vp));
+
+ if (vnode_vtype(vp) == VDIR) {
+ myErr = vnode_authorize(vp, NULL, (KAUTH_VNODE_SEARCH | KAUTH_VNODE_LIST_DIRECTORY), my_context);
+ } else {
+ myErr = vnode_authorize(vp, NULL, KAUTH_VNODE_READ_DATA, my_context);
+ }
+
+ vnode_put(vp);
+ if (myErr) {
+ myResult = 0;
+ goto ExitThisRoutine;
+ }
+ } else {
+ unsigned int flags;
+ int mode = cnattr.ca_mode & S_IFMT;
+ myPerms = DerivePermissionSummary(cnattr.ca_uid, cnattr.ca_gid, cnattr.ca_mode, hfsmp->hfs_mp,myp_ucred, theProcPtr);
+
+ if (mode == S_IFDIR) {
+ flags = R_OK | X_OK;
+ } else {
+ flags = R_OK;
+ }
+ if ( (myPerms & flags) != flags) {
+ myResult = 0;
+ myErr = EACCES;
+ goto ExitThisRoutine; /* no access */
+ }
+
+ /* up the hierarchy we go */
+ thisNodeID = catkey.hfsPlus.parentID;
+ }
+ }
+
+ /* if here, we have access to this node */
+ myResult = 1;
+
+ ExitThisRoutine:
+ if (parents && myErr == 0 && scope_index == -1) {
+ myErr = ESRCH;
+ }
+
+ if (myErr) {
+ myResult = 0;
+ }
+ *err = myErr;
+
+ /* cache the parent directory(ies) */
+ for (i = 0; i < ids_to_cache; i++) {
+ if (myErr == 0 && parents && (scope_idx_start == -1 || i > scope_idx_start)) {
+ add_node(cache, -1, parent_ids[i], ESRCH);
+ } else {
+ add_node(cache, -1, parent_ids[i], myErr);
+ }
+ }
+
+ return (myResult);
+}
+
+static int
+do_bulk_access_check(struct hfsmount *hfsmp, struct vnode *vp,
+ struct vnop_ioctl_args *ap, int arg_size, vfs_context_t context)
+{
+ boolean_t is64bit;
+
+ /*
+ * NOTE: on entry, the vnode has an io_ref. In case this vnode
+ * happens to be in our list of file_ids, we'll note it
+ * avoid calling hfs_chashget_nowait() on that id as that
+ * will cause a "locking against myself" panic.
+ */
+ Boolean check_leaf = true;
+
+ struct user64_ext_access_t *user_access_structp;
+ struct user64_ext_access_t tmp_user_access;
+ struct access_cache cache;
+
+ int error = 0, prev_parent_check_ok=1;
+ unsigned int i;
+
+ short flags;
+ unsigned int num_files = 0;
+ int map_size = 0;
+ int num_parents = 0;
+ int *file_ids=NULL;
+ short *access=NULL;
+ char *bitmap=NULL;
+ cnid_t *parents=NULL;
+ int leaf_index;
+
+ cnid_t cnid;
+ cnid_t prevParent_cnid = 0;
+ unsigned int myPerms;
+ short myaccess = 0;
+ struct cat_attr cnattr;
+ CatalogKey catkey;
+ struct cnode *skip_cp = VTOC(vp);
+ kauth_cred_t cred = vfs_context_ucred(context);
+ proc_t p = vfs_context_proc(context);
+
+ is64bit = proc_is64bit(p);
+
+ /* initialize the local cache and buffers */
+ cache.numcached = 0;
+ cache.cachehits = 0;
+ cache.lookups = 0;
+ cache.acache = NULL;
+ cache.haveaccess = NULL;
+
+ /* struct copyin done during dispatch... need to copy file_id array separately */
+ if (ap->a_data == NULL) {
+ error = EINVAL;
+ goto err_exit_bulk_access;
+ }
+
+ if (is64bit) {
+ if (arg_size != sizeof(struct user64_ext_access_t)) {
+ error = EINVAL;
+ goto err_exit_bulk_access;
+ }
+
+ user_access_structp = (struct user64_ext_access_t *)ap->a_data;
+
+ } else if (arg_size == sizeof(struct user32_access_t)) {
+ struct user32_access_t *accessp = (struct user32_access_t *)ap->a_data;
+
+ // convert an old style bulk-access struct to the new style
+ tmp_user_access.flags = accessp->flags;
+ tmp_user_access.num_files = accessp->num_files;
+ tmp_user_access.map_size = 0;
+ tmp_user_access.file_ids = CAST_USER_ADDR_T(accessp->file_ids);
+ tmp_user_access.bitmap = USER_ADDR_NULL;
+ tmp_user_access.access = CAST_USER_ADDR_T(accessp->access);
+ tmp_user_access.num_parents = 0;
+ user_access_structp = &tmp_user_access;
+
+ } else if (arg_size == sizeof(struct user32_ext_access_t)) {
+ struct user32_ext_access_t *accessp = (struct user32_ext_access_t *)ap->a_data;
+
+ // up-cast from a 32-bit version of the struct
+ tmp_user_access.flags = accessp->flags;
+ tmp_user_access.num_files = accessp->num_files;
+ tmp_user_access.map_size = accessp->map_size;
+ tmp_user_access.num_parents = accessp->num_parents;
+
+ tmp_user_access.file_ids = CAST_USER_ADDR_T(accessp->file_ids);
+ tmp_user_access.bitmap = CAST_USER_ADDR_T(accessp->bitmap);
+ tmp_user_access.access = CAST_USER_ADDR_T(accessp->access);
+ tmp_user_access.parents = CAST_USER_ADDR_T(accessp->parents);
+
+ user_access_structp = &tmp_user_access;
+ } else {
+ error = EINVAL;
+ goto err_exit_bulk_access;
+ }
+
+ map_size = user_access_structp->map_size;
+
+ num_files = user_access_structp->num_files;
+
+ num_parents= user_access_structp->num_parents;
+
+ if (num_files < 1) {
+ goto err_exit_bulk_access;
+ }
+ if (num_files > 1024) {
+ error = EINVAL;
+ goto err_exit_bulk_access;
+ }
+
+ if (num_parents > 1024) {
+ error = EINVAL;
+ goto err_exit_bulk_access;
+ }
+
+ file_ids = (int *) kalloc(sizeof(int) * num_files);
+ access = (short *) kalloc(sizeof(short) * num_files);
+ if (map_size) {
+ bitmap = (char *) kalloc(sizeof(char) * map_size);
+ }
+
+ if (num_parents) {
+ parents = (cnid_t *) kalloc(sizeof(cnid_t) * num_parents);
+ }
+
+ cache.acache = (unsigned int *) kalloc(sizeof(int) * NUM_CACHE_ENTRIES);
+ cache.haveaccess = (unsigned char *) kalloc(sizeof(unsigned char) * NUM_CACHE_ENTRIES);
+
+ if (file_ids == NULL || access == NULL || (map_size != 0 && bitmap == NULL) || cache.acache == NULL || cache.haveaccess == NULL) {
+ if (file_ids) {
+ kfree(file_ids, sizeof(int) * num_files);
+ }
+ if (bitmap) {
+ kfree(bitmap, sizeof(char) * map_size);
+ }
+ if (access) {
+ kfree(access, sizeof(short) * num_files);
+ }
+ if (cache.acache) {
+ kfree(cache.acache, sizeof(int) * NUM_CACHE_ENTRIES);
+ }
+ if (cache.haveaccess) {
+ kfree(cache.haveaccess, sizeof(unsigned char) * NUM_CACHE_ENTRIES);
+ }
+ if (parents) {
+ kfree(parents, sizeof(cnid_t) * num_parents);
+ }
+ return ENOMEM;
+ }
+
+ // make sure the bitmap is zero'ed out...
+ if (bitmap) {
+ bzero(bitmap, (sizeof(char) * map_size));
+ }
+
+ if ((error = copyin(user_access_structp->file_ids, (caddr_t)file_ids,
+ num_files * sizeof(int)))) {
+ goto err_exit_bulk_access;
+ }
+
+ if (num_parents) {
+ if ((error = copyin(user_access_structp->parents, (caddr_t)parents,
+ num_parents * sizeof(cnid_t)))) {
+ goto err_exit_bulk_access;
+ }
+ }
+
+ flags = user_access_structp->flags;
+ if ((flags & (F_OK | R_OK | W_OK | X_OK)) == 0) {
+ flags = R_OK;
+ }
+
+ /* check if we've been passed leaf node ids or parent ids */
+ if (flags & PARENT_IDS_FLAG) {
+ check_leaf = false;
+ }
+
+ /* Check access to each file_id passed in */
+ for (i = 0; i < num_files; i++) {
+ leaf_index=-1;
+ cnid = (cnid_t) file_ids[i];
+
+ /* root always has access */
+ if ((!parents) && (!suser(cred, NULL))) {
+ access[i] = 0;
+ continue;
+ }
+
+ if (check_leaf) {
+ /* do the lookup (checks the cnode hash, then the catalog) */
+ error = do_attr_lookup(hfsmp, &cache, cnid, skip_cp, &catkey, &cnattr);
+ if (error) {
+ access[i] = (short) error;
+ continue;
+ }
+
+ if (parents) {
+ // Check if the leaf matches one of the parent scopes
+ leaf_index = cache_binSearch(parents, num_parents-1, cnid, NULL);
+ if (leaf_index >= 0 && parents[leaf_index] == cnid)
+ prev_parent_check_ok = 0;
+ else if (leaf_index >= 0)
+ prev_parent_check_ok = 1;
+ }
+
+ // if the thing has acl's, do the full permission check
+ if ((cnattr.ca_recflags & kHFSHasSecurityMask) != 0) {
+ struct vnode *cvp;
+ int myErr = 0;
+ /* get the vnode for this cnid */
+ myErr = hfs_vget(hfsmp, cnid, &cvp, 0, 0);
+ if ( myErr ) {
+ access[i] = myErr;
+ continue;
+ }
+
+ hfs_unlock(VTOC(cvp));
+
+ if (vnode_vtype(cvp) == VDIR) {
+ myErr = vnode_authorize(cvp, NULL, (KAUTH_VNODE_SEARCH | KAUTH_VNODE_LIST_DIRECTORY), context);
+ } else {
+ myErr = vnode_authorize(cvp, NULL, KAUTH_VNODE_READ_DATA, context);
+ }
+
+ vnode_put(cvp);
+ if (myErr) {
+ access[i] = myErr;
+ continue;
+ }
+ } else {
+ /* before calling CheckAccess(), check the target file for read access */
+ myPerms = DerivePermissionSummary(cnattr.ca_uid, cnattr.ca_gid,
+ cnattr.ca_mode, hfsmp->hfs_mp, cred, p);
+
+ /* fail fast if no access */
+ if ((myPerms & flags) == 0) {
+ access[i] = EACCES;
+ continue;
+ }
+ }
+ } else {
+ /* we were passed an array of parent ids */
+ catkey.hfsPlus.parentID = cnid;
+ }
+
+ /* if the last guy had the same parent and had access, we're done */
+ if (i > 0 && catkey.hfsPlus.parentID == prevParent_cnid && access[i-1] == 0 && prev_parent_check_ok) {
+ cache.cachehits++;
+ access[i] = 0;
+ continue;
+ }
+
+ myaccess = do_access_check(hfsmp, &error, &cache, catkey.hfsPlus.parentID,
+ skip_cp, p, cred, context,bitmap, map_size, parents, num_parents);
+
+ if (myaccess || (error == ESRCH && leaf_index != -1)) {
+ access[i] = 0; // have access.. no errors to report
+ } else {
+ access[i] = (error != 0 ? (short) error : EACCES);
+ }
+
+ prevParent_cnid = catkey.hfsPlus.parentID;
+ }
+
+ /* copyout the access array */
+ if ((error = copyout((caddr_t)access, user_access_structp->access,
+ num_files * sizeof (short)))) {
+ goto err_exit_bulk_access;
+ }
+ if (map_size && bitmap) {
+ if ((error = copyout((caddr_t)bitmap, user_access_structp->bitmap,
+ map_size * sizeof (char)))) {
+ goto err_exit_bulk_access;
+ }
+ }
+
+
+ err_exit_bulk_access:
+
+ if (file_ids)
+ kfree(file_ids, sizeof(int) * num_files);
+ if (parents)
+ kfree(parents, sizeof(cnid_t) * num_parents);
+ if (bitmap)
+ kfree(bitmap, sizeof(char) * map_size);
+ if (access)
+ kfree(access, sizeof(short) * num_files);
+ if (cache.acache)
+ kfree(cache.acache, sizeof(int) * NUM_CACHE_ENTRIES);
+ if (cache.haveaccess)
+ kfree(cache.haveaccess, sizeof(unsigned char) * NUM_CACHE_ENTRIES);
+
+ return (error);
+}
+
+
+/* end "bulk-access" support */
+
+
+/*
+ * Control filesystem operating characteristics.
+ */
+int
+hfs_vnop_ioctl( struct vnop_ioctl_args /* {
+ vnode_t a_vp;
+ long a_command;
+ caddr_t a_data;
+ int a_fflag;
+ vfs_context_t a_context;
+ } */ *ap)
+{
+ struct vnode * vp = ap->a_vp;
+ struct hfsmount *hfsmp = VTOHFS(vp);
+ vfs_context_t context = ap->a_context;
+ kauth_cred_t cred = vfs_context_ucred(context);
+ proc_t p = vfs_context_proc(context);
+ struct vfsstatfs *vfsp;
+ boolean_t is64bit;
+ off_t jnl_start, jnl_size;
+ struct hfs_journal_info *jip;
+#if HFS_COMPRESSION
+ int compressed = 0;
+ off_t uncompressed_size = -1;
+ int decmpfs_error = 0;
+
+ if (ap->a_command == F_RDADVISE) {
+ /* we need to inspect the decmpfs state of the file as early as possible */
+ compressed = hfs_file_is_compressed(VTOC(vp), 0);
+ if (compressed) {
+ if (VNODE_IS_RSRC(vp)) {
+ /* if this is the resource fork, treat it as if it were empty */
+ uncompressed_size = 0;
+ } else {
+ decmpfs_error = hfs_uncompressed_size_of_compressed_file(NULL, vp, 0, &uncompressed_size, 0);
+ if (decmpfs_error != 0) {
+ /* failed to get the uncompressed size, we'll check for this later */
+ uncompressed_size = -1;
+ }
+ }
+ }
+ }
+#endif /* HFS_COMPRESSION */
+
+ is64bit = proc_is64bit(p);
+
+#if CONFIG_PROTECT
+ {
+ int error = 0;
+ if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) {
+ return error;
+ }
+ }
+#endif /* CONFIG_PROTECT */
+
+ switch (ap->a_command) {
+
+ case HFS_GETPATH:
+ {
+ struct vnode *file_vp;
+ cnid_t cnid;
+ int outlen;
+ char *bufptr;
+ int error;
+ int flags = 0;
+
+ /* Caller must be owner of file system. */
+ vfsp = vfs_statfs(HFSTOVFS(hfsmp));
+ if (suser(cred, NULL) &&
+ kauth_cred_getuid(cred) != vfsp->f_owner) {
+ return (EACCES);
+ }
+ /* Target vnode must be file system's root. */
+ if (!vnode_isvroot(vp)) {
+ return (EINVAL);
+ }
+ bufptr = (char *)ap->a_data;
+ cnid = strtoul(bufptr, NULL, 10);
+ if (ap->a_fflag & HFS_GETPATH_VOLUME_RELATIVE) {
+ flags |= BUILDPATH_VOLUME_RELATIVE;
+ }
+
+ /* We need to call hfs_vfs_vget to leverage the code that will
+ * fix the origin list for us if needed, as opposed to calling
+ * hfs_vget, since we will need the parent for build_path call.
+ */
+
+ if ((error = hfs_vfs_vget(HFSTOVFS(hfsmp), cnid, &file_vp, context))) {
+ return (error);
+ }
+ error = build_path(file_vp, bufptr, sizeof(pathname_t), &outlen, flags, context);
+ vnode_put(file_vp);
+
+ return (error);
+ }
+
+ case HFS_TRANSFER_DOCUMENT_ID:
+ {
+ struct cnode *cp = NULL;
+ int error;
+ u_int32_t to_fd = *(u_int32_t *)ap->a_data;
+ struct fileproc *to_fp;
+ struct vnode *to_vp;
+ struct cnode *to_cp;
+
+ cp = VTOC(vp);
+
+ if ((error = fp_getfvp(p, to_fd, &to_fp, &to_vp)) != 0) {
+ //printf("could not get the vnode for fd %d (err %d)\n", to_fd, error);
+ return error;
+ }
+ if ( (error = vnode_getwithref(to_vp)) ) {
+ file_drop(to_fd);
+ return error;
+ }
+
+ if (VTOHFS(to_vp) != hfsmp) {
+ error = EXDEV;
+ goto transfer_cleanup;
+ }
+
+ int need_unlock = 1;
+ to_cp = VTOC(to_vp);
+ error = hfs_lockpair(cp, to_cp, HFS_EXCLUSIVE_LOCK);
+ if (error != 0) {
+ //printf("could not lock the pair of cnodes (error %d)\n", error);
+ goto transfer_cleanup;
+ }
+
+ if (!(cp->c_bsdflags & UF_TRACKED)) {
+ error = EINVAL;
+ } else if (to_cp->c_bsdflags & UF_TRACKED) {
+ //
+ // if the destination is already tracked, return an error
+ // as otherwise it's a silent deletion of the target's
+ // document-id
+ //
+ error = EEXIST;
+ } else if (S_ISDIR(cp->c_attr.ca_mode) || S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode)) {
+ //
+ // we can use the FndrExtendedFileInfo because the doc-id is the first
+ // thing in both it and the ExtendedDirInfo struct which is fixed in
+ // format and can not change layout
+ //
+ struct FndrExtendedFileInfo *f_extinfo = (struct FndrExtendedFileInfo *)((u_int8_t*)cp->c_finderinfo + 16);
+ struct FndrExtendedFileInfo *to_extinfo = (struct FndrExtendedFileInfo *)((u_int8_t*)to_cp->c_finderinfo + 16);
+
+ if (f_extinfo->document_id == 0) {
+ uint32_t new_id;
+
+ hfs_unlockpair(cp, to_cp); // have to unlock to be able to get a new-id
+
+ if ((error = hfs_generate_document_id(hfsmp, &new_id)) == 0) {
+ //
+ // re-lock the pair now that we have the document-id
+ //
+ hfs_lockpair(cp, to_cp, HFS_EXCLUSIVE_LOCK);
+ f_extinfo->document_id = new_id;
+ } else {
+ goto transfer_cleanup;
+ }
+ }
+
+ to_extinfo->document_id = f_extinfo->document_id;
+ f_extinfo->document_id = 0;
+ //printf("TRANSFERRING: doc-id %d from ino %d to ino %d\n", to_extinfo->document_id, cp->c_fileid, to_cp->c_fileid);
+
+ // make sure the destination is also UF_TRACKED
+ to_cp->c_bsdflags |= UF_TRACKED;
+ cp->c_bsdflags &= ~UF_TRACKED;
+
+ // mark the cnodes dirty
+ cp->c_flag |= C_MODIFIED | C_FORCEUPDATE;
+ to_cp->c_flag |= C_MODIFIED | C_FORCEUPDATE;
+
+ int lockflags;
+ if ((error = hfs_start_transaction(hfsmp)) == 0) {
+
+ lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
+
+ (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
+ (void) cat_update(hfsmp, &to_cp->c_desc, &to_cp->c_attr, NULL, NULL);
+
+ hfs_systemfile_unlock (hfsmp, lockflags);
+ (void) hfs_end_transaction(hfsmp);
+ }
+
+#if CONFIG_FSE
+ add_fsevent(FSE_DOCID_CHANGED, context,
+ FSE_ARG_DEV, hfsmp->hfs_raw_dev,
+ FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
+ FSE_ARG_INO, (ino64_t)to_cp->c_fileid, // dst inode #
+ FSE_ARG_INT32, to_extinfo->document_id,
+ FSE_ARG_DONE);
+
+ hfs_unlockpair(cp, to_cp); // unlock this so we can send the fsevents
+ need_unlock = 0;
+
+ if (need_fsevent(FSE_STAT_CHANGED, vp)) {
+ add_fsevent(FSE_STAT_CHANGED, context, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
+ }
+ if (need_fsevent(FSE_STAT_CHANGED, to_vp)) {
+ add_fsevent(FSE_STAT_CHANGED, context, FSE_ARG_VNODE, to_vp, FSE_ARG_DONE);
+ }
+#else
+ hfs_unlockpair(cp, to_cp); // unlock this so we can send the fsevents
+ need_unlock = 0;
+#endif
+ }
+
+ if (need_unlock) {
+ hfs_unlockpair(cp, to_cp);
+ }
+
+ transfer_cleanup:
+ vnode_put(to_vp);
+ file_drop(to_fd);
+
+ return error;
+ }
+
+
+
+ case HFS_PREV_LINK:
+ case HFS_NEXT_LINK:
+ {
+ cnid_t linkfileid;
+ cnid_t nextlinkid;
+ cnid_t prevlinkid;
+ int error;
+
+ /* Caller must be owner of file system. */
+ vfsp = vfs_statfs(HFSTOVFS(hfsmp));
+ if (suser(cred, NULL) &&
+ kauth_cred_getuid(cred) != vfsp->f_owner) {
+ return (EACCES);
+ }
+ /* Target vnode must be file system's root. */
+ if (!vnode_isvroot(vp)) {
+ return (EINVAL);
+ }
+ linkfileid = *(cnid_t *)ap->a_data;
+ if (linkfileid < kHFSFirstUserCatalogNodeID) {
+ return (EINVAL);
+ }
+ if ((error = hfs_lookup_siblinglinks(hfsmp, linkfileid, &prevlinkid, &nextlinkid))) {
+ return (error);
+ }
+ if (ap->a_command == HFS_NEXT_LINK) {
+ *(cnid_t *)ap->a_data = nextlinkid;
+ } else {
+ *(cnid_t *)ap->a_data = prevlinkid;
+ }
+ return (0);
+ }
+
+ case HFS_RESIZE_PROGRESS: {
+
+ vfsp = vfs_statfs(HFSTOVFS(hfsmp));
+ if (suser(cred, NULL) &&
+ kauth_cred_getuid(cred) != vfsp->f_owner) {
+ return (EACCES); /* must be owner of file system */
+ }
+ if (!vnode_isvroot(vp)) {
+ return (EINVAL);
+ }
+ /* file system must not be mounted read-only */
+ if (hfsmp->hfs_flags & HFS_READ_ONLY) {
+ return (EROFS);
+ }
+
+ return hfs_resize_progress(hfsmp, (u_int32_t *)ap->a_data);
+ }
+
+ case HFS_RESIZE_VOLUME: {
+ u_int64_t newsize;
+ u_int64_t cursize;
+
+ vfsp = vfs_statfs(HFSTOVFS(hfsmp));
+ if (suser(cred, NULL) &&
+ kauth_cred_getuid(cred) != vfsp->f_owner) {
+ return (EACCES); /* must be owner of file system */
+ }
+ if (!vnode_isvroot(vp)) {
+ return (EINVAL);
+ }
+
+ /* filesystem must not be mounted read only */
+ if (hfsmp->hfs_flags & HFS_READ_ONLY) {
+ return (EROFS);
+ }
+ newsize = *(u_int64_t *)ap->a_data;
+ cursize = (u_int64_t)hfsmp->totalBlocks * (u_int64_t)hfsmp->blockSize;
+
+ if (newsize > cursize) {
+ return hfs_extendfs(hfsmp, *(u_int64_t *)ap->a_data, context);
+ } else if (newsize < cursize) {
+ return hfs_truncatefs(hfsmp, *(u_int64_t *)ap->a_data, context);
+ } else {
+ return (0);
+ }
+ }
+ case HFS_CHANGE_NEXT_ALLOCATION: {
+ int error = 0; /* Assume success */
+ u_int32_t location;
+
+ if (vnode_vfsisrdonly(vp)) {
+ return (EROFS);
+ }
+ vfsp = vfs_statfs(HFSTOVFS(hfsmp));
+ if (suser(cred, NULL) &&
+ kauth_cred_getuid(cred) != vfsp->f_owner) {
+ return (EACCES); /* must be owner of file system */
+ }
+ if (!vnode_isvroot(vp)) {
+ return (EINVAL);
+ }
+ hfs_lock_mount(hfsmp);
+ location = *(u_int32_t *)ap->a_data;
+ if ((location >= hfsmp->allocLimit) &&
+ (location != HFS_NO_UPDATE_NEXT_ALLOCATION)) {
+ error = EINVAL;
+ goto fail_change_next_allocation;
+ }
+ /* Return previous value. */
+ *(u_int32_t *)ap->a_data = hfsmp->nextAllocation;
+ if (location == HFS_NO_UPDATE_NEXT_ALLOCATION) {
+ /* On magic value for location, set nextAllocation to next block
+ * after metadata zone and set flag in mount structure to indicate
+ * that nextAllocation should not be updated again.
+ */
+ if (hfsmp->hfs_metazone_end != 0) {
+ HFS_UPDATE_NEXT_ALLOCATION(hfsmp, hfsmp->hfs_metazone_end + 1);
+ }
+ hfsmp->hfs_flags |= HFS_SKIP_UPDATE_NEXT_ALLOCATION;
+ } else {
+ hfsmp->hfs_flags &= ~HFS_SKIP_UPDATE_NEXT_ALLOCATION;
+ HFS_UPDATE_NEXT_ALLOCATION(hfsmp, location);
+ }
+ MarkVCBDirty(hfsmp);
+fail_change_next_allocation:
+ hfs_unlock_mount(hfsmp);
+ return (error);
+ }
+
+#if HFS_SPARSE_DEV
+ case HFS_SETBACKINGSTOREINFO: {
+ struct vnode * bsfs_rootvp;
+ struct vnode * di_vp;
+ struct hfs_backingstoreinfo *bsdata;
+ int error = 0;
+
+ if (hfsmp->hfs_flags & HFS_READ_ONLY) {
+ return (EROFS);
+ }
+ if (hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) {
+ return (EALREADY);
+ }
+ vfsp = vfs_statfs(HFSTOVFS(hfsmp));
+ if (suser(cred, NULL) &&
+ kauth_cred_getuid(cred) != vfsp->f_owner) {
+ return (EACCES); /* must be owner of file system */
+ }
+ bsdata = (struct hfs_backingstoreinfo *)ap->a_data;
+ if (bsdata == NULL) {
+ return (EINVAL);
+ }
+ if ((error = file_vnode(bsdata->backingfd, &di_vp))) {
+ return (error);
+ }
+ if ((error = vnode_getwithref(di_vp))) {
+ file_drop(bsdata->backingfd);
+ return(error);
+ }
+
+ if (vnode_mount(vp) == vnode_mount(di_vp)) {
+ (void)vnode_put(di_vp);
+ file_drop(bsdata->backingfd);
+ return (EINVAL);
+ }
+
+ /*
+ * Obtain the backing fs root vnode and keep a reference
+ * on it. This reference will be dropped in hfs_unmount.
+ */
+ error = VFS_ROOT(vnode_mount(di_vp), &bsfs_rootvp, NULL); /* XXX use context! */
+ if (error) {
+ (void)vnode_put(di_vp);
+ file_drop(bsdata->backingfd);
+ return (error);
+ }
+ vnode_ref(bsfs_rootvp);
+ vnode_put(bsfs_rootvp);
+
+ hfs_lock_mount(hfsmp);
+ hfsmp->hfs_backingfs_rootvp = bsfs_rootvp;
+ hfsmp->hfs_flags |= HFS_HAS_SPARSE_DEVICE;
+ hfsmp->hfs_sparsebandblks = bsdata->bandsize / hfsmp->blockSize * 4;
+ hfs_unlock_mount(hfsmp);
+
+ /* We check the MNTK_VIRTUALDEV bit instead of marking the dependent process */
+
+ /*
+ * If the sparse image is on a sparse image file (as opposed to a sparse
+ * bundle), then we may need to limit the free space to the maximum size
+ * of a file on that volume. So we query (using pathconf), and if we get
+ * a meaningful result, we cache the number of blocks for later use in
+ * hfs_freeblks().
+ */
+ hfsmp->hfs_backingfs_maxblocks = 0;
+ if (vnode_vtype(di_vp) == VREG) {
+ int terr;
+ int hostbits;
+ terr = vn_pathconf(di_vp, _PC_FILESIZEBITS, &hostbits, context);
+ if (terr == 0 && hostbits != 0 && hostbits < 64) {
+ u_int64_t hostfilesizemax = ((u_int64_t)1) << hostbits;
+
+ hfsmp->hfs_backingfs_maxblocks = hostfilesizemax / hfsmp->blockSize;
+ }
+ }
+
+ /* The free extent cache is managed differently for sparse devices.
+ * There is a window between which the volume is mounted and the
+ * device is marked as sparse, so the free extent cache for this
+ * volume is currently initialized as normal volume (sorted by block
+ * count). Reset the cache so that it will be rebuilt again
+ * for sparse device (sorted by start block).
+ */
+ ResetVCBFreeExtCache(hfsmp);
+
+ (void)vnode_put(di_vp);
+ file_drop(bsdata->backingfd);
+ return (0);
+ }
+ case HFS_CLRBACKINGSTOREINFO: {
+ struct vnode * tmpvp;
+
+ vfsp = vfs_statfs(HFSTOVFS(hfsmp));
+ if (suser(cred, NULL) &&
+ kauth_cred_getuid(cred) != vfsp->f_owner) {
+ return (EACCES); /* must be owner of file system */
+ }
+ if (hfsmp->hfs_flags & HFS_READ_ONLY) {
+ return (EROFS);
+ }
+
+ if ((hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) &&
+ hfsmp->hfs_backingfs_rootvp) {
+
+ hfs_lock_mount(hfsmp);