+
+ /*
+ * We only allow the filesystem to be reloaded if it
+ * is currently mounted read-only.
+ */
+ if ((flags & MNT_RELOAD) &&
+ ((mp->mnt_flag & MNT_RDONLY) == 0)) {
+ error = ENOTSUP;
+ goto out;
+ }
+
+ /*
+ * Only root, or the user that did the original mount is
+ * permitted to update it.
+ */
+ if (mp->mnt_vfsstat.f_owner != kauth_cred_getuid(vfs_context_ucred(ctx)) &&
+ (!vfs_context_issuser(ctx))) {
+ error = EPERM;
+ goto out;
+ }
+#if CONFIG_MACF
+ error = mac_mount_check_remount(ctx, mp);
+ if (error != 0) {
+ goto out;
+ }
+#endif
+
+out:
+ if (error) {
+ lck_rw_done(&mp->mnt_rwlock);
+ }
+
+ return error;
+}
+
+static void
+mount_end_update(mount_t mp)
+{
+ lck_rw_done(&mp->mnt_rwlock);
+}
+
+static int
+get_imgsrc_rootvnode(uint32_t height, vnode_t *rvpp)
+{
+ vnode_t vp;
+
+ if (height >= MAX_IMAGEBOOT_NESTING) {
+ return EINVAL;
+ }
+
+ vp = imgsrc_rootvnodes[height];
+ if ((vp != NULLVP) && (vnode_get(vp) == 0)) {
+ *rvpp = vp;
+ return 0;
+ } else {
+ return ENOENT;
+ }
+}
+
+static int
+relocate_imageboot_source(vnode_t pvp, vnode_t vp, struct componentname *cnp,
+ const char *fsname, vfs_context_t ctx,
+ boolean_t is64bit, user_addr_t fsmountargs, boolean_t by_index)
+{
+ int error;
+ mount_t mp;
+ boolean_t placed = FALSE;
+ vnode_t devvp = NULLVP;
+ struct vfstable *vfsp;
+ user_addr_t devpath;
+ char *old_mntonname;
+ vnode_t rvp;
+ uint32_t height;
+ uint32_t flags;
+
+ /* If we didn't imageboot, nothing to move */
+ if (imgsrc_rootvnodes[0] == NULLVP) {
+ return EINVAL;
+ }
+
+ /* Only root can do this */
+ if (!vfs_context_issuser(ctx)) {
+ return EPERM;
+ }
+
+ IMGSRC_DEBUG("looking for root vnode.\n");
+
+ /*
+ * Get root vnode of filesystem we're moving.
+ */
+ if (by_index) {
+ if (is64bit) {
+ struct user64_mnt_imgsrc_args mia64;
+ error = copyin(fsmountargs, &mia64, sizeof(mia64));
+ if (error != 0) {
+ IMGSRC_DEBUG("Failed to copy in arguments.\n");
+ return error;
+ }
+
+ height = mia64.mi_height;
+ flags = mia64.mi_flags;
+ devpath = mia64.mi_devpath;
+ } else {
+ struct user32_mnt_imgsrc_args mia32;
+ error = copyin(fsmountargs, &mia32, sizeof(mia32));
+ if (error != 0) {
+ IMGSRC_DEBUG("Failed to copy in arguments.\n");
+ return error;
+ }
+
+ height = mia32.mi_height;
+ flags = mia32.mi_flags;
+ devpath = mia32.mi_devpath;
+ }
+ } else {
+ /*
+ * For binary compatibility--assumes one level of nesting.
+ */
+ if (is64bit) {
+ if ( (error = copyin(fsmountargs, (caddr_t)&devpath, sizeof(devpath))) )
+ return error;
+ } else {
+ user32_addr_t tmp;
+ if ( (error = copyin(fsmountargs, (caddr_t)&tmp, sizeof(tmp))) )
+ return error;
+
+ /* munge into LP64 addr */
+ devpath = CAST_USER_ADDR_T(tmp);
+ }
+
+ height = 0;
+ flags = 0;
+ }
+
+ if (flags != 0) {
+ IMGSRC_DEBUG("%s: Got nonzero flags.\n", __FUNCTION__);
+ return EINVAL;
+ }
+
+ error = get_imgsrc_rootvnode(height, &rvp);
+ if (error != 0) {
+ IMGSRC_DEBUG("getting root vnode failed with %d\n", error);
+ return error;
+ }
+
+ IMGSRC_DEBUG("got root vnode.\n");
+
+ MALLOC(old_mntonname, char*, MAXPATHLEN, M_TEMP, M_WAITOK);
+
+ /* Can only move once */
+ mp = vnode_mount(rvp);
+ if ((mp->mnt_kern_flag & MNTK_HAS_MOVED) == MNTK_HAS_MOVED) {
+ IMGSRC_DEBUG("Already moved.\n");
+ error = EBUSY;
+ goto out0;
+ }
+
+ IMGSRC_DEBUG("Starting updated.\n");
+
+ /* Get exclusive rwlock on mount, authorize update on mp */
+ error = mount_begin_update(mp , ctx, 0);
+ if (error != 0) {
+ IMGSRC_DEBUG("Starting updated failed with %d\n", error);
+ goto out0;
+ }
+
+ /*
+ * It can only be moved once. Flag is set under the rwlock,
+ * so we're now safe to proceed.
+ */
+ if ((mp->mnt_kern_flag & MNTK_HAS_MOVED) == MNTK_HAS_MOVED) {
+ IMGSRC_DEBUG("Already moved [2]\n");
+ goto out1;
+ }
+
+
+ IMGSRC_DEBUG("Preparing coveredvp.\n");
+
+ /* Mark covered vnode as mount in progress, authorize placing mount on top */
+ error = prepare_coveredvp(vp, ctx, cnp, fsname, FALSE);
+ if (error != 0) {
+ IMGSRC_DEBUG("Preparing coveredvp failed with %d.\n", error);
+ goto out1;
+ }
+
+ IMGSRC_DEBUG("Covered vp OK.\n");
+
+ /* Sanity check the name caller has provided */
+ vfsp = mp->mnt_vtable;
+ if (strncmp(vfsp->vfc_name, fsname, MFSNAMELEN) != 0) {
+ IMGSRC_DEBUG("Wrong fs name.\n");
+ error = EINVAL;
+ goto out2;
+ }
+
+ /* Check the device vnode and update mount-from name, for local filesystems */
+ if (vfsp->vfc_vfsflags & VFC_VFSLOCALARGS) {
+ IMGSRC_DEBUG("Local, doing device validation.\n");
+
+ if (devpath != USER_ADDR_NULL) {
+ error = authorize_devpath_and_update_mntfromname(mp, devpath, &devvp, ctx);
+ if (error) {
+ IMGSRC_DEBUG("authorize_devpath_and_update_mntfromname() failed.\n");
+ goto out2;
+ }
+
+ vnode_put(devvp);
+ }
+ }
+
+ /*
+ * Place mp on top of vnode, ref the vnode, call checkdirs(),
+ * and increment the name cache's mount generation
+ */
+
+ IMGSRC_DEBUG("About to call place_mount_and_checkdirs().\n");
+ error = place_mount_and_checkdirs(mp, vp, ctx);
+ if (error != 0) {
+ goto out2;
+ }
+
+ placed = TRUE;
+
+ strlcpy(old_mntonname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN);
+ strlcpy(mp->mnt_vfsstat.f_mntonname, cnp->cn_pnbuf, MAXPATHLEN);
+
+ /* Forbid future moves */
+ mount_lock(mp);
+ mp->mnt_kern_flag |= MNTK_HAS_MOVED;
+ mount_unlock(mp);
+
+ /* Finally, add to mount list, completely ready to go */
+ if (mount_list_add(mp) != 0) {
+ /*
+ * The system is shutting down trying to umount
+ * everything, so fail with a plausible errno.
+ */
+ error = EBUSY;
+ goto out3;
+ }
+
+ mount_end_update(mp);
+ vnode_put(rvp);
+ FREE(old_mntonname, M_TEMP);
+
+ vfs_notify_mount(pvp);
+
+ return 0;
+out3:
+ strlcpy(mp->mnt_vfsstat.f_mntonname, old_mntonname, MAXPATHLEN);
+
+ mount_lock(mp);
+ mp->mnt_kern_flag &= ~(MNTK_HAS_MOVED);
+ mount_unlock(mp);
+
+out2:
+ /*
+ * Placing the mp on the vnode clears VMOUNT,
+ * so cleanup is different after that point
+ */
+ if (placed) {
+ /* Rele the vp, clear VMOUNT and v_mountedhere */
+ undo_place_on_covered_vp(mp, vp);
+ } else {
+ vnode_lock_spin(vp);
+ CLR(vp->v_flag, VMOUNT);
+ vnode_unlock(vp);
+ }
+out1:
+ mount_end_update(mp);
+
+out0:
+ vnode_put(rvp);
+ FREE(old_mntonname, M_TEMP);
+ return error;
+}
+
+#endif /* CONFIG_IMGSRC_ACCESS */
+
+void
+enablequotas(struct mount *mp, vfs_context_t ctx)
+{
+ struct nameidata qnd;
+ int type;
+ char qfpath[MAXPATHLEN];
+ const char *qfname = QUOTAFILENAME;
+ const char *qfopsname = QUOTAOPSNAME;
+ const char *qfextension[] = INITQFNAMES;
+
+ /* XXX Shoulkd be an MNTK_ flag, instead of strncmp()'s */
+ if (strncmp(mp->mnt_vfsstat.f_fstypename, "hfs", sizeof("hfs")) != 0 ) {
+ return;
+ }
+ /*
+ * Enable filesystem disk quotas if necessary.
+ * We ignore errors as this should not interfere with final mount
+ */
+ for (type=0; type < MAXQUOTAS; type++) {
+ snprintf(qfpath, sizeof(qfpath), "%s/%s.%s", mp->mnt_vfsstat.f_mntonname, qfopsname, qfextension[type]);
+ NDINIT(&qnd, LOOKUP, OP_MOUNT, FOLLOW, UIO_SYSSPACE,
+ CAST_USER_ADDR_T(qfpath), ctx);
+ if (namei(&qnd) != 0)
+ continue; /* option file to trigger quotas is not present */
+ vnode_put(qnd.ni_vp);
+ nameidone(&qnd);
+ snprintf(qfpath, sizeof(qfpath), "%s/%s.%s", mp->mnt_vfsstat.f_mntonname, qfname, qfextension[type]);
+
+ (void) VFS_QUOTACTL(mp, QCMD(Q_QUOTAON, type), 0, qfpath, ctx);
+ }
+ return;
+}
+
+
+static int
+checkdirs_callback(proc_t p, void * arg)
+{
+ struct cdirargs * cdrp = (struct cdirargs * )arg;
+ vnode_t olddp = cdrp->olddp;
+ vnode_t newdp = cdrp->newdp;
+ struct filedesc *fdp;
+ vnode_t tvp;
+ vnode_t fdp_cvp;
+ vnode_t fdp_rvp;
+ int cdir_changed = 0;
+ int rdir_changed = 0;
+
+ /*
+ * XXX Also needs to iterate each thread in the process to see if it
+ * XXX is using a per-thread current working directory, and, if so,
+ * XXX update that as well.
+ */
+
+ proc_fdlock(p);
+ fdp = p->p_fd;
+ if (fdp == (struct filedesc *)0) {
+ proc_fdunlock(p);
+ return(PROC_RETURNED);
+ }
+ fdp_cvp = fdp->fd_cdir;
+ fdp_rvp = fdp->fd_rdir;
+ proc_fdunlock(p);
+
+ if (fdp_cvp == olddp) {
+ vnode_ref(newdp);
+ tvp = fdp->fd_cdir;
+ fdp_cvp = newdp;
+ cdir_changed = 1;
+ vnode_rele(tvp);
+ }
+ if (fdp_rvp == olddp) {
+ vnode_ref(newdp);
+ tvp = fdp->fd_rdir;
+ fdp_rvp = newdp;
+ rdir_changed = 1;
+ vnode_rele(tvp);
+ }
+ if (cdir_changed || rdir_changed) {
+ proc_fdlock(p);
+ fdp->fd_cdir = fdp_cvp;
+ fdp->fd_rdir = fdp_rvp;
+ proc_fdunlock(p);
+ }
+ return(PROC_RETURNED);
+}
+
+
+
+/*
+ * Scan all active processes to see if any of them have a current
+ * or root directory onto which the new filesystem has just been
+ * mounted. If so, replace them with the new mount point.
+ */
+static int
+checkdirs(vnode_t olddp, vfs_context_t ctx)
+{
+ vnode_t newdp;
+ vnode_t tvp;
+ int err;
+ struct cdirargs cdr;
+
+ if (olddp->v_usecount == 1)
+ return(0);
+ err = VFS_ROOT(olddp->v_mountedhere, &newdp, ctx);
+
+ if (err != 0) {
+#if DIAGNOSTIC
+ panic("mount: lost mount: error %d", err);
+#endif
+ return(err);
+ }
+
+ cdr.olddp = olddp;
+ cdr.newdp = newdp;
+ /* do not block for exec/fork trans as the vp in cwd & rootdir are not changing */
+ proc_iterate(PROC_ALLPROCLIST | PROC_NOWAITTRANS, checkdirs_callback, (void *)&cdr, NULL, NULL);
+
+ if (rootvnode == olddp) {
+ vnode_ref(newdp);
+ tvp = rootvnode;
+ rootvnode = newdp;
+ vnode_rele(tvp);
+ }
+
+ vnode_put(newdp);
+ return(0);
+}
+
+/*
+ * Unmount a file system.
+ *
+ * Note: unmount takes a path to the vnode mounted on as argument,
+ * not special file (as before).
+ */
+/* ARGSUSED */
+int
+unmount(__unused proc_t p, struct unmount_args *uap, __unused int32_t *retval)
+{
+ vnode_t vp;
+ struct mount *mp;
+ int error;
+ struct nameidata nd;
+ vfs_context_t ctx = vfs_context_current();
+
+ NDINIT(&nd, LOOKUP, OP_UNMOUNT, FOLLOW | AUDITVNPATH1,
+ UIO_USERSPACE, uap->path, ctx);
+ error = namei(&nd);
+ if (error)
+ return (error);
+ vp = nd.ni_vp;
+ mp = vp->v_mount;
+ nameidone(&nd);
+
+#if CONFIG_MACF
+ error = mac_mount_check_umount(ctx, mp);
+ if (error != 0) {
+ vnode_put(vp);
+ return (error);
+ }
+#endif
+ /*
+ * Must be the root of the filesystem
+ */
+ if ((vp->v_flag & VROOT) == 0) {
+ vnode_put(vp);
+ return (EINVAL);
+ }
+ mount_ref(mp, 0);
+ vnode_put(vp);
+ /* safedounmount consumes the mount ref */
+ return (safedounmount(mp, uap->flags, ctx));
+}
+
+int
+vfs_unmountbyfsid(fsid_t * fsid, int flags, vfs_context_t ctx)
+{
+ mount_t mp;
+
+ mp = mount_list_lookupby_fsid(fsid, 0, 1);
+ if (mp == (mount_t)0) {
+ return(ENOENT);
+ }
+ mount_ref(mp, 0);
+ mount_iterdrop(mp);
+ /* safedounmount consumes the mount ref */
+ return(safedounmount(mp, flags, ctx));
+}
+
+
+/*
+ * The mount struct comes with a mount ref which will be consumed.
+ * Do the actual file system unmount, prevent some common foot shooting.
+ */
+int
+safedounmount(struct mount *mp, int flags, vfs_context_t ctx)
+{
+ int error;
+ proc_t p = vfs_context_proc(ctx);
+
+ /*
+ * If the file system is not responding and MNT_NOBLOCK
+ * is set and not a forced unmount then return EBUSY.
+ */
+ if ((mp->mnt_kern_flag & MNT_LNOTRESP) &&
+ (flags & MNT_NOBLOCK) && ((flags & MNT_FORCE) == 0)) {
+ error = EBUSY;
+ goto out;
+ }
+
+ /*
+ * Skip authorization if the mount is tagged as permissive and
+ * this is not a forced-unmount attempt.
+ */
+ if (!(((mp->mnt_kern_flag & MNTK_PERMIT_UNMOUNT) != 0) && ((flags & MNT_FORCE) == 0))) {
+ /*
+ * Only root, or the user that did the original mount is
+ * permitted to unmount this filesystem.
+ */
+ if ((mp->mnt_vfsstat.f_owner != kauth_cred_getuid(kauth_cred_get())) &&
+ (error = suser(kauth_cred_get(), &p->p_acflag)))
+ goto out;
+ }
+ /*
+ * Don't allow unmounting the root file system.
+ */
+ if (mp->mnt_flag & MNT_ROOTFS) {
+ error = EBUSY; /* the root is always busy */
+ goto out;
+ }
+
+#ifdef CONFIG_IMGSRC_ACCESS
+ if (mp->mnt_kern_flag & MNTK_BACKS_ROOT) {
+ error = EBUSY;
+ goto out;
+ }
+#endif /* CONFIG_IMGSRC_ACCESS */
+
+ return (dounmount(mp, flags, 1, ctx));
+
+out:
+ mount_drop(mp, 0);
+ return(error);
+}
+
+/*
+ * Do the actual file system unmount.
+ */
+int
+dounmount(struct mount *mp, int flags, int withref, vfs_context_t ctx)
+{
+ vnode_t coveredvp = (vnode_t)0;
+ int error;
+ int needwakeup = 0;
+ int forcedunmount = 0;
+ int lflags = 0;
+ struct vnode *devvp = NULLVP;
+#if CONFIG_TRIGGERS
+ proc_t p = vfs_context_proc(ctx);
+ int did_vflush = 0;
+ int pflags_save = 0;
+#endif /* CONFIG_TRIGGERS */
+
+ mount_lock(mp);
+
+ /*
+ * If already an unmount in progress just return EBUSY.
+ * Even a forced unmount cannot override.
+ */
+ if (mp->mnt_lflag & MNT_LUNMOUNT) {
+ if (withref != 0)
+ mount_drop(mp, 1);
+ mount_unlock(mp);
+ return (EBUSY);
+ }
+
+ if (flags & MNT_FORCE) {
+ forcedunmount = 1;
+ mp->mnt_lflag |= MNT_LFORCE;
+ }
+
+#if CONFIG_TRIGGERS
+ if (flags & MNT_NOBLOCK && p != kernproc)
+ pflags_save = OSBitOrAtomic(P_NOREMOTEHANG, &p->p_flag);
+#endif
+
+ mp->mnt_kern_flag |= MNTK_UNMOUNT;
+ mp->mnt_lflag |= MNT_LUNMOUNT;
+ mp->mnt_flag &=~ MNT_ASYNC;
+ /*
+ * anyone currently in the fast path that
+ * trips over the cached rootvp will be
+ * dumped out and forced into the slow path
+ * to regenerate a new cached value
+ */
+ mp->mnt_realrootvp = NULLVP;
+ mount_unlock(mp);
+
+ if (forcedunmount && (flags & MNT_LNOSUB) == 0) {
+ /*
+ * Force unmount any mounts in this filesystem.
+ * If any unmounts fail - just leave them dangling.
+ * Avoids recursion.
+ */
+ (void) dounmount_submounts(mp, flags | MNT_LNOSUB, ctx);
+ }
+
+ /*
+ * taking the name_cache_lock exclusively will
+ * insure that everyone is out of the fast path who
+ * might be trying to use a now stale copy of
+ * vp->v_mountedhere->mnt_realrootvp
+ * bumping mount_generation causes the cached values
+ * to be invalidated
+ */
+ name_cache_lock();
+ mount_generation++;
+ name_cache_unlock();
+
+
+ lck_rw_lock_exclusive(&mp->mnt_rwlock);
+ if (withref != 0)
+ mount_drop(mp, 0);
+#if CONFIG_FSE
+ fsevent_unmount(mp); /* has to come first! */
+#endif
+ error = 0;
+ if (forcedunmount == 0) {
+ ubc_umount(mp); /* release cached vnodes */
+ if ((mp->mnt_flag & MNT_RDONLY) == 0) {
+ error = VFS_SYNC(mp, MNT_WAIT, ctx);