+ case HFS_RESIZE_VOLUME: {
+ u_int64_t newsize;
+ u_int64_t cursize;
+
+ vfsp = vfs_statfs(HFSTOVFS(hfsmp));
+ if (suser(cred, NULL) &&
+ kauth_cred_getuid(cred) != vfsp->f_owner) {
+ return (EACCES); /* must be owner of file system */
+ }
+ if (!vnode_isvroot(vp)) {
+ return (EINVAL);
+ }
+ newsize = *(u_int64_t *)ap->a_data;
+ cursize = (u_int64_t)hfsmp->totalBlocks * (u_int64_t)hfsmp->blockSize;
+
+ if (newsize > cursize) {
+ return hfs_extendfs(hfsmp, *(u_int64_t *)ap->a_data, context);
+ } else if (newsize < cursize) {
+ return hfs_truncatefs(hfsmp, *(u_int64_t *)ap->a_data, context);
+ } else {
+ return (0);
+ }
+ }
+ case HFS_CHANGE_NEXT_ALLOCATION: {
+ u_int32_t location;
+
+ if (vnode_vfsisrdonly(vp)) {
+ return (EROFS);
+ }
+ vfsp = vfs_statfs(HFSTOVFS(hfsmp));
+ if (suser(cred, NULL) &&
+ kauth_cred_getuid(cred) != vfsp->f_owner) {
+ return (EACCES); /* must be owner of file system */
+ }
+ if (!vnode_isvroot(vp)) {
+ return (EINVAL);
+ }
+ location = *(u_int32_t *)ap->a_data;
+ if (location > hfsmp->totalBlocks - 1) {
+ return (EINVAL);
+ }
+ /* Return previous value. */
+ *(u_int32_t *)ap->a_data = hfsmp->nextAllocation;
+ HFS_MOUNT_LOCK(hfsmp, TRUE);
+ hfsmp->nextAllocation = location;
+ hfsmp->vcbFlags |= 0xFF00;
+ HFS_MOUNT_UNLOCK(hfsmp, TRUE);
+ return (0);
+ }
+
+#ifdef HFS_SPARSE_DEV
+ case HFS_SETBACKINGSTOREINFO: {
+ struct vnode * bsfs_rootvp;
+ struct vnode * di_vp;
+ struct hfs_backingstoreinfo *bsdata;
+ int error = 0;
+
+ if (hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) {
+ return (EALREADY);
+ }
+ vfsp = vfs_statfs(HFSTOVFS(hfsmp));
+ if (suser(cred, NULL) &&
+ kauth_cred_getuid(cred) != vfsp->f_owner) {
+ return (EACCES); /* must be owner of file system */
+ }
+ bsdata = (struct hfs_backingstoreinfo *)ap->a_data;
+ if (bsdata == NULL) {
+ return (EINVAL);
+ }
+ if ((error = file_vnode(bsdata->backingfd, &di_vp))) {
+ return (error);
+ }
+ if ((error = vnode_getwithref(di_vp))) {
+ file_drop(bsdata->backingfd);
+ return(error);
+ }
+
+ if (vnode_mount(vp) == vnode_mount(di_vp)) {
+ (void)vnode_put(di_vp);
+ file_drop(bsdata->backingfd);
+ return (EINVAL);
+ }
+
+ /*
+ * Obtain the backing fs root vnode and keep a reference
+ * on it. This reference will be dropped in hfs_unmount.
+ */
+ error = VFS_ROOT(vnode_mount(di_vp), &bsfs_rootvp, NULL); /* XXX use context! */
+ if (error) {
+ (void)vnode_put(di_vp);
+ file_drop(bsdata->backingfd);
+ return (error);
+ }
+ vnode_ref(bsfs_rootvp);
+ vnode_put(bsfs_rootvp);
+
+ hfsmp->hfs_backingfs_rootvp = bsfs_rootvp;
+ hfsmp->hfs_flags |= HFS_HAS_SPARSE_DEVICE;
+ hfsmp->hfs_sparsebandblks = bsdata->bandsize / HFSTOVCB(hfsmp)->blockSize;
+ hfsmp->hfs_sparsebandblks *= 4;
+
+ (void)vnode_put(di_vp);
+ file_drop(bsdata->backingfd);
+ return (0);
+ }
+ case HFS_CLRBACKINGSTOREINFO: {
+ struct vnode * tmpvp;
+
+ vfsp = vfs_statfs(HFSTOVFS(hfsmp));
+ if (suser(cred, NULL) &&
+ kauth_cred_getuid(cred) != vfsp->f_owner) {
+ return (EACCES); /* must be owner of file system */
+ }
+ if ((hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) &&
+ hfsmp->hfs_backingfs_rootvp) {
+
+ hfsmp->hfs_flags &= ~HFS_HAS_SPARSE_DEVICE;
+ tmpvp = hfsmp->hfs_backingfs_rootvp;
+ hfsmp->hfs_backingfs_rootvp = NULLVP;
+ hfsmp->hfs_sparsebandblks = 0;
+ vnode_rele(tmpvp);
+ }
+ return (0);
+ }
+#endif /* HFS_SPARSE_DEV */
+
+ case F_FREEZE_FS: {
+ struct mount *mp;
+ task_t task;
+
+ if (!is_suser())
+ return (EACCES);
+
+ mp = vnode_mount(vp);
+ hfsmp = VFSTOHFS(mp);
+
+ if (!(hfsmp->jnl))
+ return (ENOTSUP);
+
+ lck_rw_lock_exclusive(&hfsmp->hfs_insync);
+
+ task = current_task();
+ task_working_set_disable(task);
+
+ // flush things before we get started to try and prevent
+ // dirty data from being paged out while we're frozen.
+ // note: can't do this after taking the lock as it will
+ // deadlock against ourselves.
+ vnode_iterate(mp, 0, hfs_freezewrite_callback, NULL);
+ hfs_global_exclusive_lock_acquire(hfsmp);
+ journal_flush(hfsmp->jnl);
+
+ // don't need to iterate on all vnodes, we just need to
+ // wait for writes to the system files and the device vnode
+ if (HFSTOVCB(hfsmp)->extentsRefNum)
+ vnode_waitforwrites(HFSTOVCB(hfsmp)->extentsRefNum, 0, 0, 0, "hfs freeze");
+ if (HFSTOVCB(hfsmp)->catalogRefNum)
+ vnode_waitforwrites(HFSTOVCB(hfsmp)->catalogRefNum, 0, 0, 0, "hfs freeze");
+ if (HFSTOVCB(hfsmp)->allocationsRefNum)
+ vnode_waitforwrites(HFSTOVCB(hfsmp)->allocationsRefNum, 0, 0, 0, "hfs freeze");
+ if (hfsmp->hfs_attribute_vp)
+ vnode_waitforwrites(hfsmp->hfs_attribute_vp, 0, 0, 0, "hfs freeze");
+ vnode_waitforwrites(hfsmp->hfs_devvp, 0, 0, 0, "hfs freeze");
+
+ hfsmp->hfs_freezing_proc = current_proc();
+
+ return (0);
+ }
+
+ case F_THAW_FS: {
+ if (!is_suser())
+ return (EACCES);
+
+ // if we're not the one who froze the fs then we
+ // can't thaw it.
+ if (hfsmp->hfs_freezing_proc != current_proc()) {
+ return EPERM;
+ }
+
+ // NOTE: if you add code here, also go check the
+ // code that "thaws" the fs in hfs_vnop_close()
+ //
+ hfsmp->hfs_freezing_proc = NULL;
+ hfs_global_exclusive_lock_release(hfsmp);
+ lck_rw_unlock_exclusive(&hfsmp->hfs_insync);
+
+ return (0);
+ }
+
+#define HFSIOC_BULKACCESS _IOW('h', 9, struct access_t)
+#define HFS_BULKACCESS_FSCTL IOCBASECMD(HFSIOC_BULKACCESS)
+
+ case HFS_BULKACCESS_FSCTL:
+ case HFS_BULKACCESS: {
+ /*
+ * NOTE: on entry, the vnode is locked. Incase this vnode
+ * happens to be in our list of file_ids, we'll note it
+ * avoid calling hfs_chashget_nowait() on that id as that
+ * will cause a "locking against myself" panic.
+ */
+ Boolean check_leaf = true;
+
+ struct user_access_t *user_access_structp;
+ struct user_access_t tmp_user_access_t;
+ struct access_cache cache;
+
+ int error = 0, i;
+
+ dev_t dev = VTOC(vp)->c_dev;
+
+ short flags;
+ struct ucred myucred; /* XXX ILLEGAL */
+ int num_files;
+ int *file_ids = NULL;
+ short *access = NULL;
+
+ cnid_t cnid;
+ cnid_t prevParent_cnid = 0;
+ unsigned long myPerms;
+ short myaccess = 0;
+ struct cat_attr cnattr;
+ CatalogKey catkey;
+ struct cnode *skip_cp = VTOC(vp);
+ struct vfs_context my_context;
+
+ /* first, return error if not run as root */
+ if (cred->cr_ruid != 0) {
+ return EPERM;
+ }
+
+ /* initialize the local cache and buffers */
+ cache.numcached = 0;
+ cache.cachehits = 0;
+ cache.lookups = 0;
+
+ file_ids = (int *) get_pathbuff();
+ access = (short *) get_pathbuff();
+ cache.acache = (int *) get_pathbuff();
+ cache.haveaccess = (Boolean *) get_pathbuff();
+
+ if (file_ids == NULL || access == NULL || cache.acache == NULL || cache.haveaccess == NULL) {
+ release_pathbuff((char *) file_ids);
+ release_pathbuff((char *) access);
+ release_pathbuff((char *) cache.acache);
+ release_pathbuff((char *) cache.haveaccess);
+
+ return ENOMEM;
+ }
+
+ /* struct copyin done during dispatch... need to copy file_id array separately */
+ if (ap->a_data == NULL) {
+ error = EINVAL;
+ goto err_exit_bulk_access;
+ }
+
+ if (is64bit) {
+ user_access_structp = (struct user_access_t *)ap->a_data;
+ }
+ else {
+ struct access_t * accessp = (struct access_t *)ap->a_data;
+ tmp_user_access_t.uid = accessp->uid;
+ tmp_user_access_t.flags = accessp->flags;
+ tmp_user_access_t.num_groups = accessp->num_groups;
+ tmp_user_access_t.num_files = accessp->num_files;
+ tmp_user_access_t.file_ids = CAST_USER_ADDR_T(accessp->file_ids);
+ tmp_user_access_t.groups = CAST_USER_ADDR_T(accessp->groups);
+ tmp_user_access_t.access = CAST_USER_ADDR_T(accessp->access);
+ user_access_structp = &tmp_user_access_t;
+ }
+
+ num_files = user_access_structp->num_files;
+ if (num_files < 1) {
+ goto err_exit_bulk_access;
+ }
+ if (num_files > 256) {
+ error = EINVAL;
+ goto err_exit_bulk_access;
+ }
+
+ if ((error = copyin(user_access_structp->file_ids, (caddr_t)file_ids,
+ num_files * sizeof(int)))) {
+ goto err_exit_bulk_access;
+ }
+
+ /* fill in the ucred structure */
+ flags = user_access_structp->flags;
+ if ((flags & (F_OK | R_OK | W_OK | X_OK)) == 0) {
+ flags = R_OK;
+ }
+
+ /* check if we've been passed leaf node ids or parent ids */
+ if (flags & PARENT_IDS_FLAG) {
+ check_leaf = false;
+ }
+
+ memset(&myucred, 0, sizeof(myucred));
+ myucred.cr_ref = 1;
+ myucred.cr_uid = myucred.cr_ruid = myucred.cr_svuid = user_access_structp->uid;
+ myucred.cr_ngroups = user_access_structp->num_groups;
+ if (myucred.cr_ngroups < 1 || myucred.cr_ngroups > 16) {
+ myucred.cr_ngroups = 0;
+ } else if ((error = copyin(user_access_structp->groups, (caddr_t)myucred.cr_groups,
+ myucred.cr_ngroups * sizeof(gid_t)))) {
+ goto err_exit_bulk_access;
+ }
+ myucred.cr_rgid = myucred.cr_svgid = myucred.cr_groups[0];
+ myucred.cr_gmuid = myucred.cr_uid;
+
+ my_context.vc_proc = p;
+ my_context.vc_ucred = &myucred;
+
+ /* Check access to each file_id passed in */
+ for (i = 0; i < num_files; i++) {
+#if 0
+ cnid = (cnid_t) file_ids[i];
+
+ /* root always has access */
+ if (!suser(&myucred, NULL)) {
+ access[i] = 0;
+ continue;
+ }
+
+ if (check_leaf) {
+
+ /* do the lookup (checks the cnode hash, then the catalog) */
+ error = do_attr_lookup(hfsmp, &cache, dev, cnid, skip_cp, &catkey, &cnattr, p);
+ if (error) {
+ access[i] = (short) error;
+ continue;
+ }
+
+ /* before calling CheckAccess(), check the target file for read access */
+ myPerms = DerivePermissionSummary(cnattr.ca_uid, cnattr.ca_gid,
+ cnattr.ca_mode, hfsmp->hfs_mp, &myucred, p );
+
+
+ /* fail fast if no access */
+ if ((myPerms & flags) == 0) {
+ access[i] = EACCES;
+ continue;
+ }
+ } else {
+ /* we were passed an array of parent ids */
+ catkey.hfsPlus.parentID = cnid;
+ }
+
+ /* if the last guy had the same parent and had access, we're done */
+ if (i > 0 && catkey.hfsPlus.parentID == prevParent_cnid && access[i-1] == 0) {
+ cache.cachehits++;
+ access[i] = 0;
+ continue;
+ }
+
+ myaccess = do_access_check(hfsmp, &error, &cache, catkey.hfsPlus.parentID,
+ skip_cp, p, &myucred, dev);
+
+ if ( myaccess ) {
+ access[i] = 0; // have access.. no errors to report
+ } else {
+ access[i] = (error != 0 ? (short) error : EACCES);
+ }
+
+ prevParent_cnid = catkey.hfsPlus.parentID;
+#else
+ int myErr;
+
+ cnid = (cnid_t)file_ids[i];
+
+ while (cnid >= kRootDirID) {
+ /* get the vnode for this cnid */
+ myErr = hfs_vget(hfsmp, cnid, &vp, 0);
+ if ( myErr ) {
+ access[i] = EACCES;
+ break;
+ }
+
+ cnid = VTOC(vp)->c_parentcnid;
+
+ hfs_unlock(VTOC(vp));
+ if (vnode_vtype(vp) == VDIR) {
+ myErr = vnode_authorize(vp, NULL, (KAUTH_VNODE_SEARCH | KAUTH_VNODE_LIST_DIRECTORY), &my_context);
+ } else {
+ myErr = vnode_authorize(vp, NULL, KAUTH_VNODE_READ_DATA, &my_context);
+ }
+ vnode_put(vp);
+ access[i] = myErr;
+ if (myErr) {
+ break;
+ }
+ }
+#endif
+ }
+
+ /* copyout the access array */
+ if ((error = copyout((caddr_t)access, user_access_structp->access,
+ num_files * sizeof (short)))) {
+ goto err_exit_bulk_access;
+ }
+
+ err_exit_bulk_access:
+
+ //printf("on exit (err %d), numfiles/numcached/cachehits/lookups is %d/%d/%d/%d\n", error, num_files, cache.numcached, cache.cachehits, cache.lookups);
+
+ release_pathbuff((char *) cache.acache);
+ release_pathbuff((char *) cache.haveaccess);
+ release_pathbuff((char *) file_ids);
+ release_pathbuff((char *) access);
+
+ return (error);
+ } /* HFS_BULKACCESS */
+
+ case HFS_SETACLSTATE: {
+ int state;
+
+ if (ap->a_data == NULL) {
+ return (EINVAL);
+ }
+
+ vfsp = vfs_statfs(HFSTOVFS(hfsmp));
+ state = *(int *)ap->a_data;
+
+ // super-user can enable or disable acl's on a volume.
+ // the volume owner can only enable acl's
+ if (!is_suser() && (state == 0 || kauth_cred_getuid(cred) != vfsp->f_owner)) {
+ return (EPERM);
+ }
+ if (state == 0 || state == 1)
+ return hfs_setextendedsecurity(hfsmp, state);
+ else
+ return (EINVAL);
+ }
+
+ case F_FULLFSYNC: {
+ int error;
+
+ error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK);
+ if (error == 0) {
+ error = hfs_fsync(vp, MNT_NOWAIT, TRUE, p);
+ hfs_unlock(VTOC(vp));
+ }
+
+ return error;
+ }
+
+ case F_CHKCLEAN: {
+ register struct cnode *cp;
+ int error;
+
+ if (!vnode_isreg(vp))
+ return EINVAL;
+
+ error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK);
+ if (error == 0) {
+ cp = VTOC(vp);
+ /*
+ * used by regression test to determine if
+ * all the dirty pages (via write) have been cleaned
+ * after a call to 'fsysnc'.
+ */
+ error = is_file_clean(vp, VTOF(vp)->ff_size);
+ hfs_unlock(cp);
+ }
+ return (error);
+ }
+
+ case F_RDADVISE: {
+ register struct radvisory *ra;
+ struct filefork *fp;
+ int error;
+
+ if (!vnode_isreg(vp))
+ return EINVAL;
+
+ ra = (struct radvisory *)(ap->a_data);
+ fp = VTOF(vp);
+
+ /* Protect against a size change. */
+ hfs_lock_truncate(VTOC(vp), TRUE);
+
+ if (ra->ra_offset >= fp->ff_size) {
+ error = EFBIG;
+ } else {
+ error = advisory_read(vp, fp->ff_size, ra->ra_offset, ra->ra_count);
+ }
+
+ hfs_unlock_truncate(VTOC(vp));
+ return (error);
+ }
+
+ case F_READBOOTSTRAP:
+ case F_WRITEBOOTSTRAP:
+ {
+ struct vnode *devvp = NULL;
+ user_fbootstraptransfer_t *user_bootstrapp;
+ int devBlockSize;
+ int error;
+ uio_t auio;
+ daddr64_t blockNumber;
+ u_long blockOffset;
+ u_long xfersize;
+ struct buf *bp;
+ user_fbootstraptransfer_t user_bootstrap;
+
+ if (!vnode_isvroot(vp))
+ return (EINVAL);
+ /* LP64 - when caller is a 64 bit process then we are passed a pointer
+ * to a user_fbootstraptransfer_t else we get a pointer to a
+ * fbootstraptransfer_t which we munge into a user_fbootstraptransfer_t
+ */
+ if (is64bit) {
+ user_bootstrapp = (user_fbootstraptransfer_t *)ap->a_data;
+ }
+ else {
+ fbootstraptransfer_t *bootstrapp = (fbootstraptransfer_t *)ap->a_data;
+ user_bootstrapp = &user_bootstrap;
+ user_bootstrap.fbt_offset = bootstrapp->fbt_offset;
+ user_bootstrap.fbt_length = bootstrapp->fbt_length;
+ user_bootstrap.fbt_buffer = CAST_USER_ADDR_T(bootstrapp->fbt_buffer);
+ }
+ if (user_bootstrapp->fbt_offset + user_bootstrapp->fbt_length > 1024)
+ return EINVAL;
+
+ devvp = VTOHFS(vp)->hfs_devvp;
+ auio = uio_create(1, user_bootstrapp->fbt_offset,
+ is64bit ? UIO_USERSPACE64 : UIO_USERSPACE32,
+ (ap->a_command == F_WRITEBOOTSTRAP) ? UIO_WRITE : UIO_READ);
+ uio_addiov(auio, user_bootstrapp->fbt_buffer, user_bootstrapp->fbt_length);
+
+ devBlockSize = vfs_devblocksize(vnode_mount(vp));
+
+ while (uio_resid(auio) > 0) {
+ blockNumber = uio_offset(auio) / devBlockSize;
+ error = (int)buf_bread(devvp, blockNumber, devBlockSize, cred, &bp);
+ if (error) {
+ if (bp) buf_brelse(bp);
+ uio_free(auio);
+ return error;
+ };
+
+ blockOffset = uio_offset(auio) % devBlockSize;
+ xfersize = devBlockSize - blockOffset;
+ error = uiomove((caddr_t)buf_dataptr(bp) + blockOffset, (int)xfersize, auio);
+ if (error) {
+ buf_brelse(bp);
+ uio_free(auio);
+ return error;
+ };
+ if (uio_rw(auio) == UIO_WRITE) {
+ error = VNOP_BWRITE(bp);
+ if (error) {
+ uio_free(auio);
+ return error;
+ }
+ } else {
+ buf_brelse(bp);
+ };
+ };
+ uio_free(auio);
+ };
+ return 0;
+
+ case _IOC(IOC_OUT,'h', 4, 0): /* Create date in local time */
+ {
+ if (is64bit) {
+ *(user_time_t *)(ap->a_data) = (user_time_t) (to_bsd_time(VTOVCB(vp)->localCreateDate));
+ }
+ else {
+ *(time_t *)(ap->a_data) = to_bsd_time(VTOVCB(vp)->localCreateDate);
+ }
+ return 0;
+ }
+
+ case HFS_GET_MOUNT_TIME:
+ return copyout(&hfsmp->hfs_mount_time, CAST_USER_ADDR_T(ap->a_data), sizeof(hfsmp->hfs_mount_time));
+ break;
+
+ case HFS_GET_LAST_MTIME:
+ return copyout(&hfsmp->hfs_last_mounted_mtime, CAST_USER_ADDR_T(ap->a_data), sizeof(hfsmp->hfs_last_mounted_mtime));
+ break;
+
+ case HFS_SET_BOOT_INFO:
+ if (!vnode_isvroot(vp))
+ return(EINVAL);
+ if (!kauth_cred_issuser(cred) && (kauth_cred_getuid(cred) != vfs_statfs(HFSTOVFS(hfsmp))->f_owner))
+ return(EACCES); /* must be superuser or owner of filesystem */
+ HFS_MOUNT_LOCK(hfsmp, TRUE);
+ bcopy(ap->a_data, &hfsmp->vcbFndrInfo, sizeof(hfsmp->vcbFndrInfo));
+ HFS_MOUNT_UNLOCK(hfsmp, TRUE);
+ (void) hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
+ break;
+
+ case HFS_GET_BOOT_INFO:
+ if (!vnode_isvroot(vp))
+ return(EINVAL);
+ HFS_MOUNT_LOCK(hfsmp, TRUE);
+ bcopy(&hfsmp->vcbFndrInfo, ap->a_data, sizeof(hfsmp->vcbFndrInfo));
+ HFS_MOUNT_UNLOCK(hfsmp, TRUE);
+ break;
+
+ default:
+ return (ENOTTY);
+ }
+
+ /* Should never get here */
+ return 0;
+}
+
+/*
+ * select
+ */
+int
+hfs_vnop_select(__unused struct vnop_select_args *ap)
+/*
+ struct vnop_select_args {
+ vnode_t a_vp;
+ int a_which;
+ int a_fflags;
+ void *a_wql;
+ vfs_context_t a_context;
+ };
+*/
+{
+ /*
+ * We should really check to see if I/O is possible.
+ */
+ return (1);
+}
+
+/*
+ * Converts a logical block number to a physical block, and optionally returns
+ * the amount of remaining blocks in a run. The logical block is based on hfsNode.logBlockSize.
+ * The physical block number is based on the device block size, currently its 512.
+ * The block run is returned in logical blocks, and is the REMAINING amount of blocks
+ */
+int
+hfs_bmap(struct vnode *vp, daddr_t bn, struct vnode **vpp, daddr64_t *bnp, int *runp)
+{
+ struct cnode *cp = VTOC(vp);
+ struct filefork *fp = VTOF(vp);
+ struct hfsmount *hfsmp = VTOHFS(vp);
+ int retval = E_NONE;
+ daddr_t logBlockSize;
+ size_t bytesContAvail = 0;
+ off_t blockposition;
+ int lockExtBtree;
+ int lockflags = 0;
+
+ /*
+ * Check for underlying vnode requests and ensure that logical
+ * to physical mapping is requested.
+ */
+ if (vpp != NULL)
+ *vpp = cp->c_devvp;
+ if (bnp == NULL)
+ return (0);
+
+ logBlockSize = GetLogicalBlockSize(vp);
+ blockposition = (off_t)bn * (off_t)logBlockSize;
+
+ lockExtBtree = overflow_extents(fp);
+
+ if (lockExtBtree)
+ lockflags = hfs_systemfile_lock(hfsmp, SFL_EXTENTS, HFS_SHARED_LOCK);
+
+ retval = MacToVFSError(
+ MapFileBlockC (HFSTOVCB(hfsmp),
+ (FCB*)fp,
+ MAXPHYSIO,