/*
- * Copyright (c) 1995-2012 Apple Inc. All rights reserved.
+ * Copyright (c) 1995-2017 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
#include <sys/fsctl.h>
#include <sys/ubc_internal.h>
#include <sys/disk.h>
+#include <sys/content_protection.h>
+#include <sys/clonefile.h>
+#include <sys/snapshot.h>
+#include <sys/priv.h>
#include <machine/cons.h>
#include <machine/limits.h>
#include <miscfs/specfs/specdev.h>
+#include <vfs/vfs_disk_conditioner.h>
+
#include <security/audit/audit.h>
#include <bsm/audit_kevents.h>
#include <kern/task.h>
#include <vm/vm_pageout.h>
+#include <vm/vm_protos.h>
#include <libkern/OSAtomic.h>
#include <pexpert/pexpert.h>
+#include <IOKit/IOBSD.h>
+
+#if ROUTEFS
+#include <miscfs/routefs/routefs.h>
+#endif /* ROUTEFS */
#if CONFIG_MACF
#include <security/mac.h>
#include <security/mac_framework.h>
#endif
-#if CONFIG_FSE
+#if CONFIG_FSE
#define GET_PATH(x) \
- (x) = get_pathbuff();
+ (x) = get_pathbuff();
#define RELEASE_PATH(x) \
release_pathbuff(x);
-#else
+#else
#define GET_PATH(x) \
- MALLOC_ZONE((x), char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
+ MALLOC_ZONE((x), char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
#define RELEASE_PATH(x) \
FREE_ZONE((x), MAXPATHLEN, M_NAMEI);
#endif /* CONFIG_FSE */
+#ifndef HFS_GET_BOOT_INFO
+#define HFS_GET_BOOT_INFO (FCNTL_FS_SPECIFIC_BASE + 0x00004)
+#endif
+
+#ifndef HFS_SET_BOOT_INFO
+#define HFS_SET_BOOT_INFO (FCNTL_FS_SPECIFIC_BASE + 0x00005)
+#endif
+
+#ifndef APFSIOC_REVERT_TO_SNAPSHOT
+#define APFSIOC_REVERT_TO_SNAPSHOT _IOW('J', 1, u_int64_t)
+#endif
+
+extern void disk_conditioner_unmount(mount_t mp);
+
/* struct for checkdirs iteration */
struct cdirargs {
vnode_t olddp;
static int getutimes(user_addr_t usrtvp, struct timespec *tsp);
static int setutimes(vfs_context_t ctx, vnode_t vp, const struct timespec *ts, int nullflag);
static int sync_callback(mount_t, void *);
-static int munge_statfs(struct mount *mp, struct vfsstatfs *sfsp,
- user_addr_t bufp, int *sizep, boolean_t is_64_bit,
+static int munge_statfs(struct mount *mp, struct vfsstatfs *sfsp,
+ user_addr_t bufp, int *sizep, boolean_t is_64_bit,
boolean_t partial_copy);
static int statfs64_common(struct mount *mp, struct vfsstatfs *sfsp,
user_addr_t bufp);
int prepare_coveredvp(vnode_t vp, vfs_context_t ctx, struct componentname *cnp, const char *fsname, boolean_t skip_auth);
+struct fd_vn_data * fg_vn_data_alloc(void);
+
+/*
+ * Max retries for ENOENT returns from vn_authorize_{rmdir, unlink, rename}
+ * Concurrent lookups (or lookups by ids) on hard links can cause the
+ * vn_getpath (which does not re-enter the filesystem as vn_getpath_fsenter
+ * does) to return ENOENT as the path cannot be returned from the name cache
+ * alone. We have no option but to retry and hope to get one namei->reverse path
+ * generation done without an intervening lookup, lookup by id on the hard link
+ * item. This is only an issue for MAC hooks which cannot reenter the filesystem
+ * which currently are the MAC hooks for rename, unlink and rmdir.
+ */
+#define MAX_AUTHORIZE_ENOENT_RETRIES 1024
+
+static int rmdirat_internal(vfs_context_t, int, user_addr_t, enum uio_seg);
+
+static int fsgetpath_internal(vfs_context_t, int, uint64_t, vm_size_t, caddr_t, int *);
+
#ifdef CONFIG_IMGSRC_ACCESS
static int authorize_devpath_and_update_mntfromname(mount_t mp, user_addr_t devpath, vnode_t *devvpp, vfs_context_t ctx);
static int place_mount_and_checkdirs(mount_t mp, vnode_t vp, vfs_context_t ctx);
static int relocate_imageboot_source(vnode_t pvp, vnode_t vp, struct componentname *cnp, const char *fsname, vfs_context_t ctx, boolean_t is64bit, user_addr_t fsmountargs, boolean_t by_index);
#endif /* CONFIG_IMGSRC_ACCESS */
+//snapshot functions
+#if CONFIG_MNT_ROOTSNAP
+static int snapshot_root(int dirfd, user_addr_t name, uint32_t flags, vfs_context_t ctx);
+#else
+static int snapshot_root(int dirfd, user_addr_t name, uint32_t flags, vfs_context_t ctx) __attribute__((unused));
+#endif
+
int (*union_dircheckp)(struct vnode **, struct fileproc *, vfs_context_t);
__private_extern__
int sync_internal(void);
__private_extern__
-int unlink1(vfs_context_t, struct nameidata *, int);
+int unlink1(vfs_context_t, vnode_t, user_addr_t, enum uio_seg, int);
+
+extern lck_grp_t *fd_vn_lck_grp;
+extern lck_grp_attr_t *fd_vn_lck_grp_attr;
+extern lck_attr_t *fd_vn_lck_attr;
/*
* incremented each time a mount or unmount operation occurs
extern const struct fileops vnops;
#if CONFIG_APPLEDOUBLE
-extern errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *);
+extern errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *);
#endif /* CONFIG_APPLEDOUBLE */
/*
* Virtual File System System Calls
*/
-#if NFSCLIENT
+#if NFSCLIENT || DEVFS || ROUTEFS
/*
* Private in-kernel mounting spi (NFS only, not exported)
*/
boolean_t did_namei;
int error;
- NDINIT(&nd, LOOKUP, OP_MOUNT, FOLLOW | AUDITVNPATH1 | WANTPARENT,
+ NDINIT(&nd, LOOKUP, OP_MOUNT, FOLLOW | AUDITVNPATH1 | WANTPARENT,
UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
/*
return (error);
}
-#endif /* NFSCLIENT */
+#endif /* NFSCLIENT || DEVFS */
/*
* Mount a file system.
return (__mac_mount(p, &muap, retval));
}
+int
+fmount(__unused proc_t p, struct fmount_args *uap, __unused int32_t *retval)
+{
+ struct componentname cn;
+ vfs_context_t ctx = vfs_context_current();
+ size_t dummy = 0;
+ int error;
+ int flags = uap->flags;
+ char fstypename[MFSNAMELEN];
+ char *labelstr = NULL; /* regular mount call always sets it to NULL for __mac_mount() */
+ vnode_t pvp;
+ vnode_t vp;
+
+ AUDIT_ARG(fd, uap->fd);
+ AUDIT_ARG(fflags, flags);
+ /* fstypename will get audited by mount_common */
+
+ /* Sanity check the flags */
+ if (flags & (MNT_IMGSRC_BY_INDEX|MNT_ROOTFS)) {
+ return (ENOTSUP);
+ }
+
+ if (flags & MNT_UNION) {
+ return (EPERM);
+ }
+
+ error = copyinstr(uap->type, fstypename, MFSNAMELEN, &dummy);
+ if (error) {
+ return (error);
+ }
+
+ if ((error = file_vnode(uap->fd, &vp)) != 0) {
+ return (error);
+ }
+
+ if ((error = vnode_getwithref(vp)) != 0) {
+ file_drop(uap->fd);
+ return (error);
+ }
+
+ pvp = vnode_getparent(vp);
+ if (pvp == NULL) {
+ vnode_put(vp);
+ file_drop(uap->fd);
+ return (EINVAL);
+ }
+
+ memset(&cn, 0, sizeof(struct componentname));
+ MALLOC(cn.cn_pnbuf, char *, MAXPATHLEN, M_TEMP, M_WAITOK);
+ cn.cn_pnlen = MAXPATHLEN;
+
+ if((error = vn_getpath(vp, cn.cn_pnbuf, &cn.cn_pnlen)) != 0) {
+ FREE(cn.cn_pnbuf, M_TEMP);
+ vnode_put(pvp);
+ vnode_put(vp);
+ file_drop(uap->fd);
+ return (error);
+ }
+
+ error = mount_common(fstypename, pvp, vp, &cn, uap->data, flags, 0, labelstr, FALSE, ctx);
+
+ FREE(cn.cn_pnbuf, M_TEMP);
+ vnode_put(pvp);
+ vnode_put(vp);
+ file_drop(uap->fd);
+
+ return (error);
+}
+
void
-vfs_notify_mount(vnode_t pdvp)
+vfs_notify_mount(vnode_t pdvp)
{
vfs_event_signal(NULL, VQ_MOUNT, (intptr_t)NULL);
lock_vnode_and_post(pdvp, NOTE_WRITE);
*
* Parameters: p Process requesting the mount
* uap User argument descriptor (see below)
- * retval (ignored)
+ * retval (ignored)
*
* Indirect: uap->type Filesystem type
* uap->path Path to mount
- * uap->data Mount arguments
- * uap->mac_p MAC info
+ * uap->data Mount arguments
+ * uap->mac_p MAC info
* uap->flags Mount flags
- *
+ *
*
* Returns: 0 Success
* !0 Not success
char *labelstr = NULL;
int flags = uap->flags;
int error;
-#if CONFIG_IMGSRC_ACCESS || CONFIG_MACF
+#if CONFIG_IMGSRC_ACCESS || CONFIG_MACF
boolean_t is_64bit = IS_64BIT_PROCESS(p);
#else
#pragma unused(p)
/*
* Get the vnode to be covered
*/
- NDINIT(&nd, LOOKUP, OP_MOUNT, NOTRIGGER | FOLLOW | AUDITVNPATH1 | WANTPARENT,
+ NDINIT(&nd, LOOKUP, OP_MOUNT, FOLLOW | AUDITVNPATH1 | WANTPARENT,
UIO_USERSPACE, uap->path, ctx);
error = namei(&nd);
if (error) {
need_nameidone = 1;
vp = nd.ni_vp;
pvp = nd.ni_dvp;
-
+
#ifdef CONFIG_IMGSRC_ACCESS
/* Mounting image source cannot be batched with other operations */
if (flags == MNT_IMGSRC_BY_INDEX) {
AUDIT_ARG(fflags, flags);
+#if SECURE_KERNEL
+ if (flags & MNT_UNION) {
+ /* No union mounts on release kernels */
+ error = EPERM;
+ goto out;
+ }
+#endif
+
if ((vp->v_flag & VROOT) &&
(vp->v_mount->mnt_flag & MNT_ROOTFS)) {
if (!(flags & MNT_UNION)) {
flags |= MNT_UPDATE;
}
else {
- /*
+ /*
* For a union mount on '/', treat it as fresh
- * mount instead of update.
- * Otherwise, union mouting on '/' used to panic the
- * system before, since mnt_vnodecovered was found to
- * be NULL for '/' which is required for unionlookup
+ * mount instead of update.
+ * Otherwise, union mouting on '/' used to panic the
+ * system before, since mnt_vnodecovered was found to
+ * be NULL for '/' which is required for unionlookup
* after it gets ENOENT on union mount.
*/
flags = (flags & ~(MNT_UPDATE));
}
-#if 0
-//#ifdef SECURE_KERNEL
+#if SECURE_KERNEL
if ((flags & MNT_RDONLY) == 0) {
/* Release kernels are not allowed to mount "/" as rw */
error = EPERM;
- goto out;
+ goto out;
}
-//#endif
#endif
/*
* See 7392553 for more details on why this check exists.
* Suffice to say: If this check is ON and something tries
* to mount the rootFS RW, we'll turn off the codesign
- * bitmap optimization.
- */
+ * bitmap optimization.
+ */
#if CHECK_CS_VALIDATION_BITMAP
if ((flags & MNT_RDONLY) == 0 ) {
root_fs_upgrade_try = TRUE;
/*
* common mount implementation (final stage of mounting)
-
+
* Arguments:
* fstypename file system type (ie it's vfs name)
* pvp parent of covered vnode
* If content protection is enabled, update mounts are not
* allowed to turn it off.
*/
- if ((mp->mnt_flag & MNT_CPROTECT) &&
+ if ((mp->mnt_flag & MNT_CPROTECT) &&
((flags & MNT_CPROTECT) == 0)) {
error = EINVAL;
goto out1;
}
-#ifdef CONFIG_IMGSRC_ACCESS
+#ifdef CONFIG_IMGSRC_ACCESS
/* Can't downgrade the backer of the root FS */
if ((mp->mnt_kern_flag & MNTK_BACKS_ROOT) &&
(!vfs_isrdonly(mp)) && (flags & MNT_RDONLY)) {
vfsp = mp->mnt_vtable;
goto update;
}
+
/*
* For non-root users, silently enforce MNT_NOSUID and MNT_NODEV, and
* MNT_NOEXEC if mount point is already MNT_NOEXEC.
mp->mnt_vtable = vfsp;
//mp->mnt_stat.f_type = vfsp->vfc_typenum;
mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
- strncpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
- strncpy(mp->mnt_vfsstat.f_mntonname, cnp->cn_pnbuf, MAXPATHLEN);
+ strlcpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
+ strlcpy(mp->mnt_vfsstat.f_mntonname, cnp->cn_pnbuf, MAXPATHLEN);
mp->mnt_vnodecovered = vp;
mp->mnt_vfsstat.f_owner = kauth_cred_getuid(vfs_context_ucred(ctx));
mp->mnt_throttle_mask = LOWPRI_MAX_NUM_DEV - 1;
/* XXX 3762912 hack to support HFS filesystem 'owner' - filesystem may update later */
vfs_setowner(mp, KAUTH_UID_NONE, KAUTH_GID_NONE);
-#if NFSCLIENT
+#if NFSCLIENT || DEVFS || ROUTEFS
if (kernelmount)
mp->mnt_kern_flag |= MNTK_KERNEL_MOUNT;
if ((internal_flags & KERNEL_MOUNT_PERMIT_UNMOUNT) != 0)
mp->mnt_kern_flag |= MNTK_PERMIT_UNMOUNT;
-#endif /* NFSCLIENT */
+#endif /* NFSCLIENT || DEVFS */
update:
+
/*
* Set the mount level flags.
*/
MNT_UNKNOWNPERMISSIONS | MNT_DONTBROWSE |
MNT_AUTOMOUNTED | MNT_DEFWRITE | MNT_NOATIME |
MNT_QUARANTINE | MNT_CPROTECT);
+
+#if SECURE_KERNEL
+#if !CONFIG_MNT_SUID
+ /*
+ * On release builds of iOS based platforms, always enforce NOSUID on
+ * all mounts. We do this here because we can catch update mounts as well as
+ * non-update mounts in this case.
+ */
+ mp->mnt_flag |= (MNT_NOSUID);
+#endif
+#endif
+
mp->mnt_flag |= flags & (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC |
MNT_UNKNOWNPERMISSIONS | MNT_DONTBROWSE |
/*
* Process device path for local file systems if requested
*/
- if (vfsp->vfc_vfsflags & VFC_VFSLOCALARGS) {
+ if (vfsp->vfc_vfsflags & VFC_VFSLOCALARGS &&
+ !(internal_flags & KERNEL_MOUNT_SNAPSHOT)) {
if (vfs_context_is64bit(ctx)) {
if ( (error = copyin(fsmountargs, (caddr_t)&devpath, sizeof(devpath))) )
- goto out1;
+ goto out1;
fsmountargs += sizeof(devpath);
} else {
user32_addr_t tmp;
if ( (error = copyin(fsmountargs, (caddr_t)&tmp, sizeof(tmp))) )
- goto out1;
+ goto out1;
/* munge into LP64 addr */
devpath = CAST_USER_ADDR_T(tmp);
fsmountargs += sizeof(tmp);
if ( (error = namei(&nd)) )
goto out1;
- strncpy(mp->mnt_vfsstat.f_mntfromname, nd.ni_cnd.cn_pnbuf, MAXPATHLEN);
+ strlcpy(mp->mnt_vfsstat.f_mntfromname, nd.ni_cnd.cn_pnbuf, MAXPATHLEN);
devvp = nd.ni_vp;
nameidone(&nd);
*/
if ( (error = vfs_mountedon(devvp)) )
goto out3;
-
+
if (vcount(devvp) > 1 && !(vfs_flags(mp) & MNT_ROOTFS)) {
error = EBUSY;
goto out3;
vnode_getalways(device_vnode);
if (suser(vfs_context_ucred(ctx), NULL) &&
- (error = vnode_authorize(device_vnode, NULL,
+ (error = vnode_authorize(device_vnode, NULL,
KAUTH_VNODE_READ_DATA | KAUTH_VNODE_WRITE_DATA,
ctx)) != 0) {
vnode_put(device_vnode);
/*
* Mount the filesystem.
*/
- error = VFS_MOUNT(mp, device_vnode, fsmountargs, ctx);
+ if (internal_flags & KERNEL_MOUNT_SNAPSHOT) {
+ error = VFS_IOCTL(mp, VFSIOC_MOUNT_SNAPSHOT,
+ (caddr_t)fsmountargs, 0, ctx);
+ } else {
+ error = VFS_MOUNT(mp, device_vnode, fsmountargs, ctx);
+ }
if (flags & MNT_UPDATE) {
if (mp->mnt_kern_flag & MNTK_WANTRDWR)
/* Unmount the filesystem as cdir/rdirs cannot be updated */
goto out4;
}
- /*
- * there is no cleanup code here so I have made it void
+ /*
+ * there is no cleanup code here so I have made it void
* we need to revisit this
*/
(void)VFS_START(mp, 0, ctx);
VFSATTR_INIT(&vfsattr);
VFSATTR_WANTED(&vfsattr, f_capabilities);
if (strncmp(mp->mnt_vfsstat.f_fstypename, "webdav", sizeof("webdav")) != 0 &&
- vfs_getattr(mp, &vfsattr, ctx) == 0 &&
+ vfs_getattr(mp, &vfsattr, ctx) == 0 &&
VFSATTR_IS_SUPPORTED(&vfsattr, f_capabilities)) {
if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR) &&
(vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR)) {
/* Legacy MNT_DOVOLFS flag also implies path from id lookups. */
mp->mnt_kern_flag |= MNTK_PATH_FROM_ID;
}
+
+ if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS) &&
+ (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS)) {
+ mp->mnt_kern_flag |= MNTK_DIR_HARDLINKS;
+ }
}
if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR) {
mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
* defaults will have been set, so no reason to bail or care
*/
vfs_init_io_attributes(device_vnode, mp);
- }
+ }
/* Now that mount is setup, notify the listeners */
vfs_notify_mount(pvp);
+ IOBSDMountChange(mp, kIOMountChangeMount);
+
} else {
/* If we fail a fresh mount, there should be no vnodes left hooked into the mountpoint. */
if (mp->mnt_vnodelist.tqh_first != NULL) {
- panic("mount_common(): mount of %s filesystem failed with %d, but vnode list is not empty.",
+ panic("mount_common(): mount of %s filesystem failed with %d, but vnode list is not empty.",
mp->mnt_vtable->vfc_name, error);
}
}
lck_rw_done(&mp->mnt_rwlock);
is_rwlock_locked = FALSE;
-
+
/*
* if we get here, we have a mount structure that needs to be freed,
* but since the coveredvp hasn't yet been updated to point at it,
/* Error condition exits */
out4:
(void)VFS_UNMOUNT(mp, MNT_FORCE, ctx);
-
- /*
+
+ /*
* If the mount has been placed on the covered vp,
* it may have been discovered by now, so we have
* to treat this just like an unmount
if (is_rwlock_locked == TRUE) {
lck_rw_done(&mp->mnt_rwlock);
}
-
+
if (mntalloc) {
if (mp->mnt_crossref)
mount_dropcrossref(mp, vp, 0);
return(error);
}
-/*
+/*
* Flush in-core data, check for competing mount attempts,
* and set VMOUNT
*/
VATTR_WANTED(&va, va_uid);
if ((error = vnode_getattr(vp, &va, ctx)) ||
(va.va_uid != kauth_cred_getuid(vfs_context_ucred(ctx)) &&
- (!vfs_context_issuser(ctx)))) {
+ (!vfs_context_issuser(ctx)))) {
error = EPERM;
goto out;
}
#define IMGSRC_DEBUG(args...) printf(args)
#else
#define IMGSRC_DEBUG(args...) do { } while(0)
-#endif
+#endif
static int
authorize_devpath_and_update_mntfromname(mount_t mp, user_addr_t devpath, vnode_t *devvpp, vfs_context_t ctx)
* permitted to update it.
*/
if (mp->mnt_vfsstat.f_owner != kauth_cred_getuid(vfs_context_ucred(ctx)) &&
- (!vfs_context_issuser(ctx))) {
+ (!vfs_context_issuser(ctx))) {
error = EPERM;
goto out;
}
return error;
}
-static void
+static void
mount_end_update(mount_t mp)
{
lck_rw_done(&mp->mnt_rwlock);
}
static int
-relocate_imageboot_source(vnode_t pvp, vnode_t vp, struct componentname *cnp,
- const char *fsname, vfs_context_t ctx,
+relocate_imageboot_source(vnode_t pvp, vnode_t vp, struct componentname *cnp,
+ const char *fsname, vfs_context_t ctx,
boolean_t is64bit, user_addr_t fsmountargs, boolean_t by_index)
{
int error;
goto out0;
}
- /*
+ /*
* It can only be moved once. Flag is set under the rwlock,
* so we're now safe to proceed.
*/
IMGSRC_DEBUG("Already moved [2]\n");
goto out1;
}
-
-
+
+
IMGSRC_DEBUG("Preparing coveredvp.\n");
/* Mark covered vnode as mount in progress, authorize placing mount on top */
IMGSRC_DEBUG("Preparing coveredvp failed with %d.\n", error);
goto out1;
}
-
+
IMGSRC_DEBUG("Covered vp OK.\n");
/* Sanity check the name caller has provided */
}
}
- /*
+ /*
* Place mp on top of vnode, ref the vnode, call checkdirs(),
- * and increment the name cache's mount generation
+ * and increment the name cache's mount generation
*/
IMGSRC_DEBUG("About to call place_mount_and_checkdirs().\n");
placed = TRUE;
- strncpy(old_mntonname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN);
- strncpy(mp->mnt_vfsstat.f_mntonname, cnp->cn_pnbuf, MAXPATHLEN);
+ strlcpy(old_mntonname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN);
+ strlcpy(mp->mnt_vfsstat.f_mntonname, cnp->cn_pnbuf, MAXPATHLEN);
/* Forbid future moves */
mount_lock(mp);
return 0;
out3:
- strncpy(mp->mnt_vfsstat.f_mntonname, old_mntonname, MAXPATHLEN);
+ strlcpy(mp->mnt_vfsstat.f_mntonname, old_mntonname, MAXPATHLEN);
mount_lock(mp);
mp->mnt_kern_flag &= ~(MNTK_HAS_MOVED);
mount_unlock(mp);
out2:
- /*
+ /*
* Placing the mp on the vnode clears VMOUNT,
- * so cleanup is different after that point
+ * so cleanup is different after that point
*/
if (placed) {
/* Rele the vp, clear VMOUNT and v_mountedhere */
if (strncmp(mp->mnt_vfsstat.f_fstypename, "hfs", sizeof("hfs")) != 0 ) {
return;
}
- /*
+ /*
* Enable filesystem disk quotas if necessary.
* We ignore errors as this should not interfere with final mount
*/
static int
-checkdirs_callback(proc_t p, void * arg)
+checkdirs_callback(proc_t p, void * arg)
{
struct cdirargs * cdrp = (struct cdirargs * )arg;
vnode_t olddp = cdrp->olddp;
vnode_t tvp;
int err;
struct cdirargs cdr;
- struct uthread * uth = get_bsdthread_info(current_thread());
if (olddp->v_usecount == 1)
return(0);
- if (uth != (struct uthread *)0)
- uth->uu_notrigger = 1;
err = VFS_ROOT(olddp->v_mountedhere, &newdp, ctx);
- if (uth != (struct uthread *)0)
- uth->uu_notrigger = 0;
if (err != 0) {
#if DIAGNOSTIC
struct nameidata nd;
vfs_context_t ctx = vfs_context_current();
- NDINIT(&nd, LOOKUP, OP_UNMOUNT, NOTRIGGER | FOLLOW | AUDITVNPATH1,
+ NDINIT(&nd, LOOKUP, OP_UNMOUNT, FOLLOW | AUDITVNPATH1,
UIO_USERSPACE, uap->path, ctx);
error = namei(&nd);
if (error)
}
int
-vfs_unmountbyfsid(fsid_t * fsid, int flags, vfs_context_t ctx)
+vfs_unmountbyfsid(fsid_t *fsid, int flags, vfs_context_t ctx)
{
mount_t mp;
}
/*
- * Skip authorization if the mount is tagged as permissive and
+ * Skip authorization if the mount is tagged as permissive and
* this is not a forced-unmount attempt.
*/
if (!(((mp->mnt_kern_flag & MNTK_PERMIT_UNMOUNT) != 0) && ((flags & MNT_FORCE) == 0))) {
int pflags_save = 0;
#endif /* CONFIG_TRIGGERS */
- if (flags & MNT_FORCE)
- forcedunmount = 1;
+#if CONFIG_FSE
+ if (!(flags & MNT_FORCE)) {
+ fsevent_unmount(mp, ctx); /* has to come first! */
+ }
+#endif
mount_lock(mp);
- /* XXX post jaguar fix LK_DRAIN - then clean this up */
- if ((flags & MNT_FORCE)) {
- mp->mnt_kern_flag |= MNTK_FRCUNMOUNT;
- mp->mnt_lflag |= MNT_LFORCE;
- }
+
+ /*
+ * If already an unmount in progress just return EBUSY.
+ * Even a forced unmount cannot override.
+ */
if (mp->mnt_lflag & MNT_LUNMOUNT) {
- mp->mnt_lflag |= MNT_LWAIT;
- if(withref != 0)
+ if (withref != 0)
mount_drop(mp, 1);
- msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "dounmount", NULL);
- /*
- * The prior unmount attempt has probably succeeded.
- * Do not dereference mp here - returning EBUSY is safest.
- */
+ mount_unlock(mp);
return (EBUSY);
}
+ if (flags & MNT_FORCE) {
+ forcedunmount = 1;
+ mp->mnt_lflag |= MNT_LFORCE;
+ }
+
#if CONFIG_TRIGGERS
if (flags & MNT_NOBLOCK && p != kernproc)
pflags_save = OSBitOrAtomic(P_NOREMOTEHANG, &p->p_flag);
*/
mp->mnt_realrootvp = NULLVP;
mount_unlock(mp);
-
+
+ if (forcedunmount && (flags & MNT_LNOSUB) == 0) {
+ /*
+ * Force unmount any mounts in this filesystem.
+ * If any unmounts fail - just leave them dangling.
+ * Avoids recursion.
+ */
+ (void) dounmount_submounts(mp, flags | MNT_LNOSUB, ctx);
+ }
+
/*
* taking the name_cache_lock exclusively will
* insure that everyone is out of the fast path who
lck_rw_lock_exclusive(&mp->mnt_rwlock);
if (withref != 0)
mount_drop(mp, 0);
-#if CONFIG_FSE
- fsevent_unmount(mp); /* has to come first! */
-#endif
error = 0;
if (forcedunmount == 0) {
ubc_umount(mp); /* release cached vnodes */
}
}
+ /* free disk_conditioner_info structure for this mount */
+ disk_conditioner_unmount(mp);
+
+ IOBSDMountChange(mp, kIOMountChangeUnmount);
+
#if CONFIG_TRIGGERS
vfs_nested_trigger_unmounts(mp, flags, ctx);
did_vflush = 1;
-#endif
+#endif
if (forcedunmount)
lflags |= FORCECLOSE;
error = vflush(mp, NULLVP, SKIPSWAP | SKIPSYSTEM | SKIPROOT | lflags);
/* mark the mount point hook in the vp but not drop the ref yet */
if ((coveredvp = mp->mnt_vnodecovered) != NULLVP) {
- vnode_getwithref(coveredvp);
+ /*
+ * The covered vnode needs special handling. Trying to get an
+ * iocount must not block here as this may lead to deadlocks
+ * if the Filesystem to which the covered vnode belongs is
+ * undergoing forced unmounts. Since we hold a usecount, the
+ * vnode cannot be reused (it can, however, still be terminated)
+ */
+ vnode_getalways(coveredvp);
vnode_lock_spin(coveredvp);
mp->mnt_crossref++;
coveredvp->v_mountedhere = (struct mount *)0;
+ CLR(coveredvp->v_flag, VMOUNT);
vnode_unlock(coveredvp);
vnode_put(coveredvp);
out:
if (mp->mnt_lflag & MNT_LWAIT) {
mp->mnt_lflag &= ~MNT_LWAIT;
- needwakeup = 1;
+ needwakeup = 1;
}
#if CONFIG_TRIGGERS
OSBitAndAtomic(~((uint32_t) P_NOREMOTEHANG), &p->p_flag);
}
- /*
+ /*
* Callback and context are set together under the mount lock, and
- * never cleared, so we're safe to examine them here, drop the lock,
+ * never cleared, so we're safe to examine them here, drop the lock,
* and call out.
*/
if (mp->mnt_triggercallback != NULL) {
} else {
mount_unlock(mp);
}
-#else
+#else
mount_unlock(mp);
#endif /* CONFIG_TRIGGERS */
if (!error) {
if ((coveredvp != NULLVP)) {
- vnode_t pvp;
+ vnode_t pvp = NULLVP;
- vnode_getwithref(coveredvp);
- pvp = vnode_getparent(coveredvp);
- vnode_rele(coveredvp);
+ /*
+ * The covered vnode needs special handling. Trying to
+ * get an iocount must not block here as this may lead
+ * to deadlocks if the Filesystem to which the covered
+ * vnode belongs is undergoing forced unmounts. Since we
+ * hold a usecount, the vnode cannot be reused
+ * (it can, however, still be terminated).
+ */
+ vnode_getalways(coveredvp);
mount_dropcrossref(mp, coveredvp, 0);
+ /*
+ * We'll _try_ to detect if this really needs to be
+ * done. The coveredvp can only be in termination (or
+ * terminated) if the coveredvp's mount point is in a
+ * forced unmount (or has been) since we still hold the
+ * ref.
+ */
+ if (!vnode_isrecycled(coveredvp)) {
+ pvp = vnode_getparent(coveredvp);
#if CONFIG_TRIGGERS
- if (coveredvp->v_resolve)
- vnode_trigger_rearm(coveredvp, ctx);
-#endif
+ if (coveredvp->v_resolve) {
+ vnode_trigger_rearm(coveredvp, ctx);
+ }
+#endif
+ }
+
+ vnode_rele(coveredvp);
vnode_put(coveredvp);
+ coveredvp = NULLVP;
if (pvp) {
lock_vnode_and_post(pvp, NOTE_WRITE);
return (error);
}
+/*
+ * Unmount any mounts in this filesystem.
+ */
+void
+dounmount_submounts(struct mount *mp, int flags, vfs_context_t ctx)
+{
+ mount_t smp;
+ fsid_t *fsids, fsid;
+ int fsids_sz;
+ int count = 0, i, m = 0;
+ vnode_t vp;
+
+ mount_list_lock();
+
+ // Get an array to hold the submounts fsids.
+ TAILQ_FOREACH(smp, &mountlist, mnt_list)
+ count++;
+ fsids_sz = count * sizeof(fsid_t);
+ MALLOC(fsids, fsid_t *, fsids_sz, M_TEMP, M_NOWAIT);
+ if (fsids == NULL) {
+ mount_list_unlock();
+ goto out;
+ }
+ fsids[0] = mp->mnt_vfsstat.f_fsid; // Prime the pump
+
+ /*
+ * Fill the array with submount fsids.
+ * Since mounts are always added to the tail of the mount list, the
+ * list is always in mount order.
+ * For each mount check if the mounted-on vnode belongs to a
+ * mount that's already added to our array of mounts to be unmounted.
+ */
+ for (smp = TAILQ_NEXT(mp, mnt_list); smp; smp = TAILQ_NEXT(smp, mnt_list)) {
+ vp = smp->mnt_vnodecovered;
+ if (vp == NULL)
+ continue;
+ fsid = vnode_mount(vp)->mnt_vfsstat.f_fsid; // Underlying fsid
+ for (i = 0; i <= m; i++) {
+ if (fsids[i].val[0] == fsid.val[0] &&
+ fsids[i].val[1] == fsid.val[1]) {
+ fsids[++m] = smp->mnt_vfsstat.f_fsid;
+ break;
+ }
+ }
+ }
+ mount_list_unlock();
+
+ // Unmount the submounts in reverse order. Ignore errors.
+ for (i = m; i > 0; i--) {
+ smp = mount_list_lookupby_fsid(&fsids[i], 0, 1);
+ if (smp) {
+ mount_ref(smp, 0);
+ mount_iterdrop(smp);
+ (void) dounmount(smp, flags, 1, ctx);
+ }
+ }
+out:
+ if (fsids)
+ FREE(fsids, M_TEMP);
+}
+
void
mount_dropcrossref(mount_t mp, vnode_t dp, int need_put)
{
panic("mount cross refs -ve");
if ((mp != dp->v_mountedhere) && (mp->mnt_crossref == 0)) {
-
+
if (need_put)
vnode_put_locked(dp);
vnode_unlock(dp);
*/
#if DIAGNOSTIC
int syncprt = 0;
-struct ctldebug debug0 = { "syncprt", &syncprt };
#endif
int print_vmpage_stat=0;
-static int
-sync_callback(mount_t mp, void * arg)
+static int
+sync_callback(mount_t mp, __unused void *arg)
{
- int asyncflag;
-
if ((mp->mnt_flag & MNT_RDONLY) == 0) {
- asyncflag = mp->mnt_flag & MNT_ASYNC;
- mp->mnt_flag &= ~MNT_ASYNC;
- VFS_SYNC(mp, arg ? MNT_WAIT : MNT_NOWAIT, vfs_context_current());
- if (asyncflag)
- mp->mnt_flag |= MNT_ASYNC;
+ int asyncflag = mp->mnt_flag & MNT_ASYNC;
+
+ mp->mnt_flag &= ~MNT_ASYNC;
+ VFS_SYNC(mp, arg ? MNT_WAIT : MNT_NOWAIT, vfs_context_kernel());
+ if (asyncflag)
+ mp->mnt_flag |= MNT_ASYNC;
}
- return(VFS_RETURNED);
-}
+ return (VFS_RETURNED);
+}
/* ARGSUSED */
int
sync(__unused proc_t p, __unused struct sync_args *uap, __unused int32_t *retval)
{
- vfs_iterate(LK_NOWAIT, sync_callback, (void *)0);
+ vfs_iterate(LK_NOWAIT, sync_callback, NULL);
- if(print_vmpage_stat) {
+ if (print_vmpage_stat) {
+ vm_countdirtypages();
+ }
+
+#if DIAGNOSTIC
+ if (syncprt)
+ vfs_bufstats();
+#endif /* DIAGNOSTIC */
+ return 0;
+}
+
+typedef enum {
+ SYNC_ALL = 0,
+ SYNC_ONLY_RELIABLE_MEDIA = 1,
+ SYNC_ONLY_UNRELIABLE_MEDIA = 2
+} sync_type_t;
+
+static int
+sync_internal_callback(mount_t mp, void *arg)
+{
+ if (arg) {
+ int is_reliable = !(mp->mnt_kern_flag & MNTK_VIRTUALDEV) &&
+ (mp->mnt_flag & MNT_LOCAL);
+ sync_type_t sync_type = *((sync_type_t *)arg);
+
+ if ((sync_type == SYNC_ONLY_RELIABLE_MEDIA) && !is_reliable)
+ return (VFS_RETURNED);
+ else if ((sync_type = SYNC_ONLY_UNRELIABLE_MEDIA) && is_reliable)
+ return (VFS_RETURNED);
+ }
+
+ (void)sync_callback(mp, NULL);
+
+ return (VFS_RETURNED);
+}
+
+int sync_thread_state = 0;
+int sync_timeout_seconds = 5;
+
+#define SYNC_THREAD_RUN 0x0001
+#define SYNC_THREAD_RUNNING 0x0002
+
+static void
+sync_thread(__unused void *arg, __unused wait_result_t wr)
+{
+ sync_type_t sync_type;
+
+ lck_mtx_lock(sync_mtx_lck);
+ while (sync_thread_state & SYNC_THREAD_RUN) {
+ sync_thread_state &= ~SYNC_THREAD_RUN;
+ lck_mtx_unlock(sync_mtx_lck);
+
+ sync_type = SYNC_ONLY_RELIABLE_MEDIA;
+ vfs_iterate(LK_NOWAIT, sync_internal_callback, &sync_type);
+ sync_type = SYNC_ONLY_UNRELIABLE_MEDIA;
+ vfs_iterate(LK_NOWAIT, sync_internal_callback, &sync_type);
+
+ lck_mtx_lock(sync_mtx_lck);
+ }
+ /*
+ * This wakeup _has_ to be issued before the lock is released otherwise
+ * we may end up waking up a thread in sync_internal which is
+ * expecting a wakeup from a thread it just created and not from this
+ * thread which is about to exit.
+ */
+ wakeup(&sync_thread_state);
+ sync_thread_state &= ~SYNC_THREAD_RUNNING;
+ lck_mtx_unlock(sync_mtx_lck);
+
+ if (print_vmpage_stat) {
vm_countdirtypages();
}
if (syncprt)
vfs_bufstats();
#endif /* DIAGNOSTIC */
- return (0);
}
+struct timeval sync_timeout_last_print = {0, 0};
+
/*
- * Change filesystem quotas.
+ * An in-kernel sync for power management to call.
+ * This function always returns within sync_timeout seconds.
*/
-#if QUOTA
-static int quotactl_funneled(proc_t p, struct quotactl_args *uap, int32_t *retval);
-
-int
-quotactl(proc_t p, struct quotactl_args *uap, int32_t *retval)
+__private_extern__ int
+sync_internal(void)
{
- boolean_t funnel_state;
+ thread_t thd;
int error;
-
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- error = quotactl_funneled(p, uap, retval);
- thread_funnel_set(kernel_flock, funnel_state);
- return(error);
-}
+ int thread_created = FALSE;
+ struct timespec ts = {sync_timeout_seconds, 0};
-static int
-quotactl_funneled(proc_t p, struct quotactl_args *uap, __unused int32_t *retval)
+ lck_mtx_lock(sync_mtx_lck);
+ sync_thread_state |= SYNC_THREAD_RUN;
+ if (!(sync_thread_state & SYNC_THREAD_RUNNING)) {
+ int kr;
+
+ sync_thread_state |= SYNC_THREAD_RUNNING;
+ kr = kernel_thread_start(sync_thread, NULL, &thd);
+ if (kr != KERN_SUCCESS) {
+ sync_thread_state &= ~SYNC_THREAD_RUNNING;
+ lck_mtx_unlock(sync_mtx_lck);
+ printf("sync_thread failed\n");
+ return (0);
+ }
+ thread_created = TRUE;
+ }
+
+ error = msleep((caddr_t)&sync_thread_state, sync_mtx_lck,
+ (PVFS | PDROP | PCATCH), "sync_thread", &ts);
+ if (error) {
+ struct timeval now;
+
+ microtime(&now);
+ if (now.tv_sec - sync_timeout_last_print.tv_sec > 120) {
+ printf("sync timed out: %d sec\n", sync_timeout_seconds);
+ sync_timeout_last_print.tv_sec = now.tv_sec;
+ }
+ }
+
+ if (thread_created)
+ thread_deallocate(thd);
+
+ return (0);
+} /* end of sync_internal call */
+
+/*
+ * Change filesystem quotas.
+ */
+#if QUOTA
+int
+quotactl(proc_t p, struct quotactl_args *uap, __unused int32_t *retval)
{
struct mount *mp;
- int error, quota_cmd, quota_status;
+ int error, quota_cmd, quota_status = 0;
caddr_t datap;
size_t fnamelen;
struct nameidata nd;
vfs_context_t ctx = vfs_context_current();
- struct dqblk my_dqblk;
+ struct dqblk my_dqblk = {};
AUDIT_ARG(uid, uap->uid);
AUDIT_ARG(cmd, uap->cmd);
if (error == 0) {
if (proc_is64bit(p)) {
struct user_dqblk my_dqblk64;
+
+ memset(&my_dqblk64, 0, sizeof(my_dqblk64));
munge_dqblk(&my_dqblk, &my_dqblk64, TRUE);
error = copyout((caddr_t)&my_dqblk64, uap->arg, sizeof (my_dqblk64));
}
vfs_context_t ctx = vfs_context_current();
vnode_t vp;
- NDINIT(&nd, LOOKUP, OP_STATFS, NOTRIGGER | FOLLOW | AUDITVNPATH1,
+ NDINIT(&nd, LOOKUP, OP_STATFS, FOLLOW | AUDITVNPATH1,
UIO_USERSPACE, uap->path, ctx);
error = namei(&nd);
- if (error)
+ if (error != 0)
return (error);
vp = nd.ni_vp;
mp = vp->v_mount;
sp = &mp->mnt_vfsstat;
nameidone(&nd);
+#if CONFIG_MACF
+ error = mac_mount_check_stat(ctx, mp);
+ if (error != 0)
+ return (error);
+#endif
+
error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT);
- if (error != 0) {
+ if (error != 0) {
vnode_put(vp);
return (error);
}
error = EBADF;
goto out;
}
+
+#if CONFIG_MACF
+ error = mac_mount_check_stat(vfs_context_current(), mp);
+ if (error != 0)
+ goto out;
+#endif
+
sp = &mp->mnt_vfsstat;
- if ((error = vfs_update_vfsstat(mp,vfs_context_current(),VFS_USER_EVENT)) != 0) {
+ if ((error = vfs_update_vfsstat(mp, vfs_context_current(), VFS_USER_EVENT)) != 0) {
goto out;
}
return (error);
}
-/*
- * Common routine to handle copying of statfs64 data to user space
+/*
+ * Common routine to handle copying of statfs64 data to user space
*/
-static int
+static int
statfs64_common(struct mount *mp, struct vfsstatfs *sfsp, user_addr_t bufp)
{
int error;
struct statfs64 sfs;
-
+
bzero(&sfs, sizeof(sfs));
sfs.f_bsize = sfsp->f_bsize;
return(error);
}
-/*
- * Get file system statistics in 64-bit mode
+/*
+ * Get file system statistics in 64-bit mode
*/
int
statfs64(__unused struct proc *p, struct statfs64_args *uap, __unused int32_t *retval)
vfs_context_t ctxp = vfs_context_current();
vnode_t vp;
- NDINIT(&nd, LOOKUP, OP_STATFS, NOTRIGGER | FOLLOW | AUDITVNPATH1,
+ NDINIT(&nd, LOOKUP, OP_STATFS, FOLLOW | AUDITVNPATH1,
UIO_USERSPACE, uap->path, ctxp);
error = namei(&nd);
- if (error)
+ if (error != 0)
return (error);
vp = nd.ni_vp;
mp = vp->v_mount;
sp = &mp->mnt_vfsstat;
nameidone(&nd);
+#if CONFIG_MACF
+ error = mac_mount_check_stat(ctxp, mp);
+ if (error != 0)
+ return (error);
+#endif
+
error = vfs_update_vfsstat(mp, ctxp, VFS_USER_EVENT);
- if (error != 0) {
+ if (error != 0) {
vnode_put(vp);
return (error);
}
return (error);
}
-/*
- * Get file system statistics in 64-bit mode
+/*
+ * Get file system statistics in 64-bit mode
*/
int
fstatfs64(__unused struct proc *p, struct fstatfs64_args *uap, __unused int32_t *retval)
error = EBADF;
goto out;
}
+
+#if CONFIG_MACF
+ error = mac_mount_check_stat(vfs_context_current(), mp);
+ if (error != 0)
+ goto out;
+#endif
+
sp = &mp->mnt_vfsstat;
if ((error = vfs_update_vfsstat(mp, vfs_context_current(), VFS_USER_EVENT)) != 0) {
goto out;
static int
getfsstat_callback(mount_t mp, void * arg)
{
-
+
struct getfsstat_struct *fstp = (struct getfsstat_struct *)arg;
struct vfsstatfs *sp;
int error, my_size;
vfs_context_t ctx = vfs_context_current();
if (fstp->sfsp && fstp->count < fstp->maxcount) {
+#if CONFIG_MACF
+ error = mac_mount_check_stat(ctx, mp);
+ if (error != 0) {
+ fstp->error = error;
+ return(VFS_RETURNED_DONE);
+ }
+#endif
sp = &mp->mnt_vfsstat;
/*
* If MNT_NOWAIT is specified, do not refresh the
*
* Parameters: p (ignored)
* uap User argument descriptor (see below)
- * retval Count of file system statistics (N stats)
+ * retval Count of file system statistics (N stats)
*
* Indirect: uap->bufsize Buffer size
* uap->macsize MAC info size
* uap->buf Buffer where information will be returned
* uap->mac MAC info
* uap->flags File system flags
- *
+ *
*
* Returns: 0 Success
* !0 Not success
fst.error = 0;
fst.maxcount = maxcount;
-
+
vfs_iterate(0, getfsstat_callback, &fst);
if (mp)
int error;
if (fstp->sfsp && fstp->count < fstp->maxcount) {
+#if CONFIG_MACF
+ error = mac_mount_check_stat(vfs_context_current(), mp);
+ if (error != 0) {
+ fstp->error = error;
+ return(VFS_RETURNED_DONE);
+ }
+#endif
sp = &mp->mnt_vfsstat;
/*
* If MNT_NOWAIT is specified, do not refresh the fsstat
return (0);
}
+/*
+ * gets the associated vnode with the file descriptor passed.
+ * as input
+ *
+ * INPUT
+ * ctx - vfs context of caller
+ * fd - file descriptor for which vnode is required.
+ * vpp - Pointer to pointer to vnode to be returned.
+ *
+ * The vnode is returned with an iocount so any vnode obtained
+ * by this call needs a vnode_put
+ *
+ */
+int
+vnode_getfromfd(vfs_context_t ctx, int fd, vnode_t *vpp)
+{
+ int error;
+ vnode_t vp;
+ struct fileproc *fp;
+ proc_t p = vfs_context_proc(ctx);
+
+ *vpp = NULLVP;
+
+ error = fp_getfvp(p, fd, &fp, &vp);
+ if (error)
+ return (error);
+
+ error = vnode_getwithref(vp);
+ if (error) {
+ (void)fp_drop(p, fd, fp, 0);
+ return (error);
+ }
+
+ (void)fp_drop(p, fd, fp, 0);
+ *vpp = vp;
+ return (error);
+}
+
+/*
+ * Wrapper function around namei to start lookup from a directory
+ * specified by a file descriptor ni_dirfd.
+ *
+ * In addition to all the errors returned by namei, this call can
+ * return ENOTDIR if the file descriptor does not refer to a directory.
+ * and EBADF if the file descriptor is not valid.
+ */
+int
+nameiat(struct nameidata *ndp, int dirfd)
+{
+ if ((dirfd != AT_FDCWD) &&
+ !(ndp->ni_flag & NAMEI_CONTLOOKUP) &&
+ !(ndp->ni_cnd.cn_flags & USEDVP)) {
+ int error = 0;
+ char c;
+
+ if (UIO_SEG_IS_USER_SPACE(ndp->ni_segflg)) {
+ error = copyin(ndp->ni_dirp, &c, sizeof(char));
+ if (error)
+ return (error);
+ } else {
+ c = *((char *)(ndp->ni_dirp));
+ }
+
+ if (c != '/') {
+ vnode_t dvp_at;
+
+ error = vnode_getfromfd(ndp->ni_cnd.cn_context, dirfd,
+ &dvp_at);
+ if (error)
+ return (error);
+
+ if (vnode_vtype(dvp_at) != VDIR) {
+ vnode_put(dvp_at);
+ return (ENOTDIR);
+ }
+
+ ndp->ni_dvp = dvp_at;
+ ndp->ni_cnd.cn_flags |= USEDVP;
+ error = namei(ndp);
+ ndp->ni_cnd.cn_flags &= ~USEDVP;
+ vnode_put(dvp_at);
+ return (error);
+ }
+ }
+
+ return (namei(ndp));
+}
+
/*
* Change current working directory to a given file descriptor.
*/
vnode_t tvp;
vfs_context_t ctx = vfs_context_current();
- NDINIT(&nd, LOOKUP, OP_CHDIR, FOLLOW | AUDITVNPATH1,
+ NDINIT(&nd, LOOKUP, OP_CHDIR, FOLLOW | AUDITVNPATH1,
UIO_USERSPACE, uap->path, ctx);
error = change_dir(&nd, ctx);
if (error)
if ((error = suser(kauth_cred_get(), &p->p_acflag)))
return (error);
- NDINIT(&nd, LOOKUP, OP_CHROOT, FOLLOW | AUDITVNPATH1,
+ NDINIT(&nd, LOOKUP, OP_CHROOT, FOLLOW | AUDITVNPATH1,
UIO_USERSPACE, uap->path, ctx);
error = change_dir(&nd, ctx);
if (error)
return (error);
}
+/*
+ * Free the vnode data (for directories) associated with the file glob.
+ */
+struct fd_vn_data *
+fg_vn_data_alloc(void)
+{
+ struct fd_vn_data *fvdata;
+
+ /* Allocate per fd vnode data */
+ MALLOC(fvdata, struct fd_vn_data *, (sizeof(struct fd_vn_data)),
+ M_FD_VN_DATA, M_WAITOK | M_ZERO);
+ lck_mtx_init(&fvdata->fv_lock, fd_vn_lck_grp, fd_vn_lck_attr);
+ return fvdata;
+}
+
+/*
+ * Free the vnode data (for directories) associated with the file glob.
+ */
+void
+fg_vn_data_free(void *fgvndata)
+{
+ struct fd_vn_data *fvdata = (struct fd_vn_data *)fgvndata;
+
+ if (fvdata->fv_buf)
+ FREE(fvdata->fv_buf, M_FD_DIRBUF);
+ lck_mtx_destroy(&fvdata->fv_lock, fd_vn_lck_grp);
+ FREE(fvdata, M_FD_VN_DATA);
+}
+
/*
* Check permissions, allocate an open file structure,
* and call the device open routine if any.
int flags, oflags;
int type, indx, error;
struct flock lf;
- int no_controlling_tty = 0;
- int deny_controlling_tty = 0;
- struct session *sessp = SESSION_NULL;
+ struct vfs_context context;
oflags = uflags;
if ((oflags & O_ACCMODE) == O_ACCMODE)
return(EINVAL);
+
flags = FFLAGS(uflags);
+ CLR(flags, FENCRYPTED);
+ CLR(flags, FUNENCRYPTED);
AUDIT_ARG(fflags, oflags);
AUDIT_ARG(mode, vap->va_mode);
}
uu->uu_dupfd = -indx - 1;
- if (!(p->p_flag & P_CONTROLT)) {
- sessp = proc_session(p);
- no_controlling_tty = 1;
- /*
- * If conditions would warrant getting a controlling tty if
- * the device being opened is a tty (see ttyopen in tty.c),
- * but the open flags deny it, set a flag in the session to
- * prevent it.
- */
- if (SESS_LEADER(p, sessp) &&
- sessp->s_ttyvp == NULL &&
- (flags & O_NOCTTY)) {
- session_lock(sessp);
- sessp->s_flags |= S_NOCTTY;
- session_unlock(sessp);
- deny_controlling_tty = 1;
- }
- }
-
if ((error = vn_open_auth(ndp, &flags, vap))) {
if ((error == ENODEV || error == ENXIO) && (uu->uu_dupfd >= 0)){ /* XXX from fdopen */
if ((error = dupfdopen(p->p_fd, indx, uu->uu_dupfd, flags, error)) == 0) {
fp_drop(p, indx, NULL, 0);
*retval = indx;
- if (deny_controlling_tty) {
- session_lock(sessp);
- sessp->s_flags &= ~S_NOCTTY;
- session_unlock(sessp);
- }
- if (sessp != SESSION_NULL)
- session_rele(sessp);
return (0);
}
}
if (error == ERESTART)
error = EINTR;
fp_free(p, indx, fp);
-
- if (deny_controlling_tty) {
- session_lock(sessp);
- sessp->s_flags &= ~S_NOCTTY;
- session_unlock(sessp);
- }
- if (sessp != SESSION_NULL)
- session_rele(sessp);
return (error);
}
uu->uu_dupfd = 0;
vp = ndp->ni_vp;
- fp->f_fglob->fg_flag = flags & (FMASK | O_EVTONLY);
+ fp->f_fglob->fg_flag = flags & (FMASK | O_EVTONLY | FENCRYPTED | FUNENCRYPTED);
fp->f_fglob->fg_ops = &vnops;
fp->f_fglob->fg_data = (caddr_t)vp;
-#if CONFIG_PROTECT
- if (VATTR_IS_ACTIVE (vap, va_dataprotect_flags)) {
- if (vap->va_dataprotect_flags & VA_DP_RAWENCRYPTED) {
- fp->f_fglob->fg_flag |= FENCRYPTED;
- }
- }
-#endif
-
if (flags & (O_EXLOCK | O_SHLOCK)) {
lf.l_whence = SEEK_SET;
lf.l_start = 0;
fp->f_fglob->fg_flag |= FHASLOCK;
}
+#if DEVELOPMENT || DEBUG
+ /*
+ * XXX VSWAP: Check for entitlements or special flag here
+ * so we can restrict access appropriately.
+ */
+#else /* DEVELOPMENT || DEBUG */
+
+ if (vnode_isswap(vp) && (flags & (FWRITE | O_TRUNC)) && (ctx != vfs_context_kernel())) {
+ /* block attempt to write/truncate swapfile */
+ error = EPERM;
+ goto bad;
+ }
+#endif /* DEVELOPMENT || DEBUG */
+
/* try to truncate by setting the size attribute */
if ((flags & O_TRUNC) && ((error = vnode_setsize(vp, (off_t)0, 0, ctx)) != 0))
goto bad;
/*
- * If the open flags denied the acquisition of a controlling tty,
- * clear the flag in the session structure that prevented the lower
- * level code from assigning one.
+ * For directories we hold some additional information in the fd.
*/
- if (deny_controlling_tty) {
- session_lock(sessp);
- sessp->s_flags &= ~S_NOCTTY;
- session_unlock(sessp);
+ if (vnode_vtype(vp) == VDIR) {
+ fp->f_fglob->fg_vn_data = fg_vn_data_alloc();
+ } else {
+ fp->f_fglob->fg_vn_data = NULL;
}
+ vnode_put(vp);
+
/*
- * If a controlling tty was set by the tty line discipline, then we
- * want to set the vp of the tty into the session structure. We have
- * a race here because we can't get to the vp for the tp in ttyopen,
- * because it's not passed as a parameter in the open path.
+ * The first terminal open (without a O_NOCTTY) by a session leader
+ * results in it being set as the controlling terminal.
*/
- if (no_controlling_tty && (p->p_flag & P_CONTROLT)) {
- vnode_t ttyvp;
-
- /*
- * We already have a ref from vn_open_auth(), so we can demand another reference.
- */
- error = vnode_ref_ext(vp, 0, VNODE_REF_FORCE);
- if (error != 0) {
- panic("vnode_ref_ext() with VNODE_REF_FORCE failed?!");
- }
+ if (vnode_istty(vp) && !(p->p_flag & P_CONTROLT) &&
+ !(flags & O_NOCTTY)) {
+ int tmp = 0;
- session_lock(sessp);
- ttyvp = sessp->s_ttyvp;
- sessp->s_ttyvp = vp;
- sessp->s_ttyvid = vnode_vid(vp);
- session_unlock(sessp);
- if (ttyvp != NULLVP)
- vnode_rele(ttyvp);
+ (void)(*fp->f_fglob->fg_ops->fo_ioctl)(fp, (int)TIOCSCTTY,
+ (caddr_t)&tmp, ctx);
}
- vnode_put(vp);
-
proc_fdlock(p);
if (flags & O_CLOEXEC)
*fdflags(p, indx) |= UF_EXCLOSE;
if (flags & O_CLOFORK)
*fdflags(p, indx) |= UF_FORKCLOSE;
procfdtbl_releasefd(p, indx, NULL);
+
+#if CONFIG_SECLUDED_MEMORY
+ if (secluded_for_filecache &&
+ FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_VNODE &&
+ vnode_vtype(vp) == VREG) {
+ memory_object_control_t moc;
+
+ moc = ubc_getobject(vp, UBC_FLAGS_NONE);
+
+ if (moc == MEMORY_OBJECT_CONTROL_NULL) {
+ /* nothing to do... */
+ } else if (fp->f_fglob->fg_flag & FWRITE) {
+ /* writable -> no longer eligible for secluded pages */
+ memory_object_mark_eligible_for_secluded(moc,
+ FALSE);
+ } else if (secluded_for_filecache == 1) {
+ char pathname[32] = { 0, };
+ size_t copied;
+ /* XXX FBDP: better way to detect /Applications/ ? */
+ if (UIO_SEG_IS_USER_SPACE(ndp->ni_segflg)) {
+ copyinstr(ndp->ni_dirp,
+ pathname,
+ sizeof (pathname),
+ &copied);
+ } else {
+ copystr(CAST_DOWN(void *, ndp->ni_dirp),
+ pathname,
+ sizeof (pathname),
+ &copied);
+ }
+ pathname[sizeof (pathname) - 1] = '\0';
+ if (strncmp(pathname,
+ "/Applications/",
+ strlen("/Applications/")) == 0 &&
+ strncmp(pathname,
+ "/Applications/Camera.app/",
+ strlen("/Applications/Camera.app/")) != 0) {
+ /*
+ * not writable
+ * AND from "/Applications/"
+ * AND not from "/Applications/Camera.app/"
+ * ==> eligible for secluded
+ */
+ memory_object_mark_eligible_for_secluded(moc,
+ TRUE);
+ }
+ } else if (secluded_for_filecache == 2) {
+#if __arm64__
+#define DYLD_SHARED_CACHE_NAME "dyld_shared_cache_arm64"
+#elif __arm__
+#define DYLD_SHARED_CACHE_NAME "dyld_shared_cache_armv7"
+#else
+/* not implemented... */
+#endif
+ if (!strncmp(vp->v_name,
+ DYLD_SHARED_CACHE_NAME,
+ strlen(DYLD_SHARED_CACHE_NAME)) ||
+ !strncmp(vp->v_name,
+ "dyld",
+ strlen(vp->v_name)) ||
+ !strncmp(vp->v_name,
+ "launchd",
+ strlen(vp->v_name)) ||
+ !strncmp(vp->v_name,
+ "Camera",
+ strlen(vp->v_name)) ||
+ !strncmp(vp->v_name,
+ "mediaserverd",
+ strlen(vp->v_name)) ||
+ !strncmp(vp->v_name,
+ "SpringBoard",
+ strlen(vp->v_name)) ||
+ !strncmp(vp->v_name,
+ "backboardd",
+ strlen(vp->v_name))) {
+ /*
+ * This file matters when launching Camera:
+ * do not store its contents in the secluded
+ * pool that will be drained on Camera launch.
+ */
+ memory_object_mark_eligible_for_secluded(moc,
+ FALSE);
+ }
+ }
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
fp_drop(p, indx, fp, 1);
proc_fdunlock(p);
*retval = indx;
- if (sessp != SESSION_NULL)
- session_rele(sessp);
return (0);
bad:
- if (deny_controlling_tty) {
- session_lock(sessp);
- sessp->s_flags &= ~S_NOCTTY;
- session_unlock(sessp);
- }
- if (sessp != SESSION_NULL)
- session_rele(sessp);
-
- struct vfs_context context = *vfs_context_current();
+ context = *vfs_context_current();
context.vc_ucred = fp->f_fglob->fg_cred;
+ if ((fp->f_fglob->fg_flag & FHASLOCK) &&
+ (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_VNODE)) {
+ lf.l_whence = SEEK_SET;
+ lf.l_start = 0;
+ lf.l_len = 0;
+ lf.l_type = F_UNLCK;
+
+ (void)VNOP_ADVLOCK(
+ vp, (caddr_t)fp->f_fglob, F_UNLCK, &lf, F_FLOCK, ctx, NULL);
+ }
+
vn_close(vp, fp->f_fglob->fg_flag, &context);
vnode_put(vp);
fp_free(p, indx, fp);
return (error);
}
+/*
+ * While most of the *at syscall handlers can call nameiat() which
+ * is a wrapper around namei, the use of namei and initialisation
+ * of nameidata are far removed and in different functions - namei
+ * gets called in vn_open_auth for open1. So we'll just do here what
+ * nameiat() does.
+ */
+static int
+open1at(vfs_context_t ctx, struct nameidata *ndp, int uflags,
+ struct vnode_attr *vap, fp_allocfn_t fp_zalloc, void *cra, int32_t *retval,
+ int dirfd)
+{
+ if ((dirfd != AT_FDCWD) && !(ndp->ni_cnd.cn_flags & USEDVP)) {
+ int error;
+ char c;
+
+ if (UIO_SEG_IS_USER_SPACE(ndp->ni_segflg)) {
+ error = copyin(ndp->ni_dirp, &c, sizeof(char));
+ if (error)
+ return (error);
+ } else {
+ c = *((char *)(ndp->ni_dirp));
+ }
+
+ if (c != '/') {
+ vnode_t dvp_at;
+
+ error = vnode_getfromfd(ndp->ni_cnd.cn_context, dirfd,
+ &dvp_at);
+ if (error)
+ return (error);
+
+ if (vnode_vtype(dvp_at) != VDIR) {
+ vnode_put(dvp_at);
+ return (ENOTDIR);
+ }
+
+ ndp->ni_dvp = dvp_at;
+ ndp->ni_cnd.cn_flags |= USEDVP;
+ error = open1(ctx, ndp, uflags, vap, fp_zalloc, cra,
+ retval);
+ vnode_put(dvp_at);
+ return (error);
+ }
+ }
+
+ return (open1(ctx, ndp, uflags, vap, fp_zalloc, cra, retval));
+}
+
/*
* open_extended: open a file given a path name; with extended argument list (including extended security (ACL)).
*
return ciferror;
}
-/*
+/*
* Go through the data-protected atomically controlled open (2)
- *
+ *
* int open_dprotected_np(user_addr_t path, int flags, int class, int dpflags, int mode)
*/
int open_dprotected_np (__unused proc_t p, struct open_dprotected_np_args *uap, int32_t *retval) {
int class = uap->class;
int dpflags = uap->dpflags;
- /*
+ /*
* Follow the same path as normal open(2)
* Look up the item if it exists, and acquire the vnode.
*/
struct nameidata nd;
int cmode;
int error;
-
+
VATTR_INIT(&va);
/* Mask off all but regular access permissions */
cmode = ((uap->mode &~ fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT;
NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
uap->path, vfs_context_current());
- /*
- * Initialize the extra fields in vnode_attr to pass down our
+ /*
+ * Initialize the extra fields in vnode_attr to pass down our
* extra fields.
* 1. target cprotect class.
- * 2. set a flag to mark it as requiring open-raw-encrypted semantics.
- */
- if (flags & O_CREAT) {
- VATTR_SET(&va, va_dataprotect_class, class);
- }
-
- if (dpflags & O_DP_GETRAWENCRYPTED) {
+ * 2. set a flag to mark it as requiring open-raw-encrypted semantics.
+ */
+ if (flags & O_CREAT) {
+ /* lower level kernel code validates that the class is valid before applying it. */
+ if (class != PROTECTION_CLASS_DEFAULT) {
+ /*
+ * PROTECTION_CLASS_DEFAULT implies that we make the class for this
+ * file behave the same as open (2)
+ */
+ VATTR_SET(&va, va_dataprotect_class, class);
+ }
+ }
+
+ if (dpflags & (O_DP_GETRAWENCRYPTED|O_DP_GETRAWUNENCRYPTED)) {
if ( flags & (O_RDWR | O_WRONLY)) {
/* Not allowed to write raw encrypted bytes */
- return EINVAL;
- }
- VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED);
+ return EINVAL;
+ }
+ if (uap->dpflags & O_DP_GETRAWENCRYPTED) {
+ VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED);
+ }
+ if (uap->dpflags & O_DP_GETRAWUNENCRYPTED) {
+ VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWUNENCRYPTED);
+ }
}
error = open1(vfs_context_current(), &nd, uap->flags, &va,
return error;
}
+static int
+openat_internal(vfs_context_t ctx, user_addr_t path, int flags, int mode,
+ int fd, enum uio_seg segflg, int *retval)
+{
+ struct filedesc *fdp = (vfs_context_proc(ctx))->p_fd;
+ struct vnode_attr va;
+ struct nameidata nd;
+ int cmode;
+
+ VATTR_INIT(&va);
+ /* Mask off all but regular access permissions */
+ cmode = ((mode &~ fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT;
+ VATTR_SET(&va, va_mode, cmode & ACCESSPERMS);
+
+ NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1,
+ segflg, path, ctx);
+
+ return (open1at(ctx, &nd, flags, &va, fileproc_alloc_init, NULL,
+ retval, fd));
+}
int
open(proc_t p, struct open_args *uap, int32_t *retval)
}
int
-open_nocancel(proc_t p, struct open_nocancel_args *uap, int32_t *retval)
+open_nocancel(__unused proc_t p, struct open_nocancel_args *uap,
+ int32_t *retval)
{
- struct filedesc *fdp = p->p_fd;
- struct vnode_attr va;
- struct nameidata nd;
- int cmode;
-
- VATTR_INIT(&va);
- /* Mask off all but regular access permissions */
- cmode = ((uap->mode &~ fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT;
- VATTR_SET(&va, va_mode, cmode & ACCESSPERMS);
-
- NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
- uap->path, vfs_context_current());
+ return (openat_internal(vfs_context_current(), uap->path, uap->flags,
+ uap->mode, AT_FDCWD, UIO_USERSPACE, retval));
+}
- return (open1(vfs_context_current(), &nd, uap->flags, &va,
- fileproc_alloc_init, NULL, retval));
+int
+openat_nocancel(__unused proc_t p, struct openat_nocancel_args *uap,
+ int32_t *retval)
+{
+ return (openat_internal(vfs_context_current(), uap->path, uap->flags,
+ uap->mode, uap->fd, UIO_USERSPACE, retval));
}
+int
+openat(proc_t p, struct openat_args *uap, int32_t *retval)
+{
+ __pthread_testcancel(1);
+ return(openat_nocancel(p, (struct openat_nocancel_args *)uap, retval));
+}
/*
- * Create a special file.
+ * openbyid_np: open a file given a file system id and a file system object id
+ * the hfs file system object id is an fsobj_id_t {uint32, uint32}
+ * file systems that don't support object ids it is a node id (uint64_t).
+ *
+ * Parameters: p Process requesting the open
+ * uap User argument descriptor (see below)
+ * retval Pointer to an area to receive the
+ * return calue from the system call
+ *
+ * Indirect: uap->path Path to open (same as 'open')
+ *
+ * uap->fsid id of target file system
+ * uap->objid id of target file system object
+ * uap->flags Flags to open (same as 'open')
+ *
+ * Returns: 0 Success
+ * !0 errno value
+ *
+ *
+ * XXX: We should enummerate the possible errno values here, and where
+ * in the code they originated.
*/
-static int mkfifo1(vfs_context_t ctx, user_addr_t upath, struct vnode_attr *vap);
-
int
-mknod(proc_t p, struct mknod_args *uap, __unused int32_t *retval)
+openbyid_np(__unused proc_t p, struct openbyid_np_args *uap, int *retval)
{
- struct vnode_attr va;
- vfs_context_t ctx = vfs_context_current();
+ fsid_t fsid;
+ uint64_t objid;
int error;
- struct nameidata nd;
- vnode_t vp, dvp;
-
- VATTR_INIT(&va);
- VATTR_SET(&va, va_mode, (uap->mode & ALLPERMS) & ~p->p_fd->fd_cmask);
- VATTR_SET(&va, va_rdev, uap->dev);
+ char *buf = NULL;
+ int buflen = MAXPATHLEN;
+ int pathlen = 0;
+ vfs_context_t ctx = vfs_context_current();
- /* If it's a mknod() of a FIFO, call mkfifo1() instead */
- if ((uap->mode & S_IFMT) == S_IFIFO)
- return(mkfifo1(ctx, uap->path, &va));
+ if ((error = priv_check_cred(vfs_context_ucred(ctx), PRIV_VFS_OPEN_BY_ID, 0))) {
+ return (error);
+ }
- AUDIT_ARG(mode, uap->mode);
- AUDIT_ARG(value32, uap->dev);
+ if ((error = copyin(uap->fsid, (caddr_t)&fsid, sizeof(fsid)))) {
+ return (error);
+ }
- if ((error = suser(vfs_context_ucred(ctx), &p->p_acflag)))
+ /*uap->obj is an fsobj_id_t defined as struct {uint32_t, uint32_t} */
+ if ((error = copyin(uap->objid, (caddr_t)&objid, sizeof(uint64_t)))) {
return (error);
- NDINIT(&nd, CREATE, OP_MKNOD, LOCKPARENT | AUDITVNPATH1,
- UIO_USERSPACE, uap->path, ctx);
+ }
+
+ AUDIT_ARG(value32, fsid.val[0]);
+ AUDIT_ARG(value64, objid);
+
+ /*resolve path from fsis, objid*/
+ do {
+ MALLOC(buf, char *, buflen + 1, M_TEMP, M_WAITOK);
+ if (buf == NULL) {
+ return (ENOMEM);
+ }
+
+ error = fsgetpath_internal(
+ ctx, fsid.val[0], objid,
+ buflen, buf, &pathlen);
+
+ if (error) {
+ FREE(buf, M_TEMP);
+ buf = NULL;
+ }
+ } while (error == ENOSPC && (buflen += MAXPATHLEN));
+
+ if (error) {
+ return error;
+ }
+
+ buf[pathlen] = 0;
+
+ error = openat_internal(
+ ctx, (user_addr_t)buf, uap->oflags, 0, AT_FDCWD, UIO_SYSSPACE, retval);
+
+ FREE(buf, M_TEMP);
+
+ return error;
+}
+
+
+/*
+ * Create a special file.
+ */
+static int mkfifo1(vfs_context_t ctx, user_addr_t upath, struct vnode_attr *vap);
+
+int
+mknod(proc_t p, struct mknod_args *uap, __unused int32_t *retval)
+{
+ struct vnode_attr va;
+ vfs_context_t ctx = vfs_context_current();
+ int error;
+ struct nameidata nd;
+ vnode_t vp, dvp;
+
+ VATTR_INIT(&va);
+ VATTR_SET(&va, va_mode, (uap->mode & ALLPERMS) & ~p->p_fd->fd_cmask);
+ VATTR_SET(&va, va_rdev, uap->dev);
+
+ /* If it's a mknod() of a FIFO, call mkfifo1() instead */
+ if ((uap->mode & S_IFMT) == S_IFIFO)
+ return(mkfifo1(ctx, uap->path, &va));
+
+ AUDIT_ARG(mode, uap->mode);
+ AUDIT_ARG(value32, uap->dev);
+
+ if ((error = suser(vfs_context_ucred(ctx), &p->p_acflag)))
+ return (error);
+ NDINIT(&nd, CREATE, OP_MKNOD, LOCKPARENT | AUDITVNPATH1,
+ UIO_USERSPACE, uap->path, ctx);
error = namei(&nd);
if (error)
return (error);
}
switch (uap->mode & S_IFMT) {
- case S_IFMT: /* used by badsect to flag bad sectors */
- VATTR_SET(&va, va_type, VBAD);
- break;
case S_IFCHR:
VATTR_SET(&va, va_type, VCHR);
break;
int error;
struct nameidata nd;
- NDINIT(&nd, CREATE, OP_MKFIFO, LOCKPARENT | AUDITVNPATH1,
+ NDINIT(&nd, CREATE, OP_MKFIFO, LOCKPARENT | AUDITVNPATH1,
UIO_USERSPACE, upath, ctx);
error = namei(&nd);
if (error)
len += strlcpy(&path[len], leafname, MAXPATHLEN-len) + 1;
if (len > MAXPATHLEN) {
char *ptr;
-
+
// the string got truncated!
*truncated_path = 1;
ptr = my_strrchr(path, '/');
if (ret != ENOSPC) {
printf("safe_getpath: failed to get the path for vp %p (%s) : err %d\n",
dvp, dvp->v_name ? dvp->v_name : "no-name", ret);
- }
+ }
*truncated_path = 1;
-
+
do {
if (mydvp->v_parent != NULL) {
mydvp = mydvp->v_parent;
len = 2;
mydvp = NULL;
}
-
+
if (mydvp == NULL) {
break;
}
* VNOP_LINK:???
*/
/* ARGSUSED */
-int
-link(__unused proc_t p, struct link_args *uap, __unused int32_t *retval)
+static int
+linkat_internal(vfs_context_t ctx, int fd1, user_addr_t path, int fd2,
+ user_addr_t link, int flag, enum uio_seg segflg)
{
vnode_t vp, dvp, lvp;
struct nameidata nd;
- vfs_context_t ctx = vfs_context_current();
+ int follow;
int error;
#if CONFIG_FSE
fse_info finfo;
vp = dvp = lvp = NULLVP;
/* look up the object we are linking to */
- NDINIT(&nd, LOOKUP, OP_LOOKUP, FOLLOW | AUDITVNPATH1,
- UIO_USERSPACE, uap->path, ctx);
- error = namei(&nd);
+ follow = (flag & AT_SYMLINK_FOLLOW) ? FOLLOW : NOFOLLOW;
+ NDINIT(&nd, LOOKUP, OP_LOOKUP, AUDITVNPATH1 | follow,
+ segflg, path, ctx);
+
+ error = nameiat(&nd, fd1);
if (error)
return (error);
vp = nd.ni_vp;
* However, some file systems may have limited support.
*/
if (vp->v_type == VDIR) {
- if (!(vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSDIRLINKS)) {
+ if (!ISSET(vp->v_mount->mnt_kern_flag, MNTK_DIR_HARDLINKS)) {
error = EPERM; /* POSIX */
goto out;
}
+
/* Linking to a directory requires ownership. */
if (!kauth_cred_issuser(vfs_context_ucred(ctx))) {
struct vnode_attr dva;
#endif
nd.ni_cnd.cn_nameiop = CREATE;
nd.ni_cnd.cn_flags = LOCKPARENT | AUDITVNPATH2 | CN_NBMOUNTLOOK;
- nd.ni_dirp = uap->link;
- error = namei(&nd);
+ nd.ni_dirp = link;
+ error = nameiat(&nd, fd2);
if (error != 0)
goto out;
dvp = nd.ni_dvp;
error = EXDEV;
goto out2;
}
-
+
/* authorize creation of the target note */
if ((error = vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx)) != 0)
goto out2;
}
link_name_len = MAXPATHLEN;
- vn_getpath(vp, link_to_path, &link_name_len);
-
- /*
- * Call out to allow 3rd party notification of rename.
- * Ignore result of kauth_authorize_fileop call.
- */
- kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_LINK,
- (uintptr_t)link_to_path, (uintptr_t)target_path);
+ if (vn_getpath(vp, link_to_path, &link_name_len) == 0) {
+ /*
+ * Call out to allow 3rd party notification of rename.
+ * Ignore result of kauth_authorize_fileop call.
+ */
+ kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_LINK,
+ (uintptr_t)link_to_path,
+ (uintptr_t)target_path);
+ }
if (link_to_path != NULL) {
RELEASE_PATH(link_to_path);
}
return (error);
}
+int
+link(__unused proc_t p, struct link_args *uap, __unused int32_t *retval)
+{
+ return (linkat_internal(vfs_context_current(), AT_FDCWD, uap->path,
+ AT_FDCWD, uap->link, AT_SYMLINK_FOLLOW, UIO_USERSPACE));
+}
+
+int
+linkat(__unused proc_t p, struct linkat_args *uap, __unused int32_t *retval)
+{
+ if (uap->flag & ~AT_SYMLINK_FOLLOW)
+ return (EINVAL);
+
+ return (linkat_internal(vfs_context_current(), uap->fd1, uap->path,
+ uap->fd2, uap->link, uap->flag, UIO_USERSPACE));
+}
+
/*
* Make a symbolic link.
*
* We could add support for ACLs here too...
*/
/* ARGSUSED */
-int
-symlink(proc_t p, struct symlink_args *uap, __unused int32_t *retval)
+static int
+symlinkat_internal(vfs_context_t ctx, user_addr_t path_data, int fd,
+ user_addr_t link, enum uio_seg segflg)
{
struct vnode_attr va;
char *path;
int error;
struct nameidata nd;
- vfs_context_t ctx = vfs_context_current();
vnode_t vp, dvp;
size_t dummy=0;
-
- MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
- error = copyinstr(uap->path, path, MAXPATHLEN, &dummy);
+ proc_t p;
+
+ error = 0;
+ if (UIO_SEG_IS_USER_SPACE(segflg)) {
+ MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
+ error = copyinstr(path_data, path, MAXPATHLEN, &dummy);
+ } else {
+ path = (char *)path_data;
+ }
if (error)
goto out;
AUDIT_ARG(text, path); /* This is the link string */
- NDINIT(&nd, CREATE, OP_SYMLINK, LOCKPARENT | AUDITVNPATH1,
- UIO_USERSPACE, uap->link, ctx);
- error = namei(&nd);
+ NDINIT(&nd, CREATE, OP_SYMLINK, LOCKPARENT | AUDITVNPATH1,
+ segflg, link, ctx);
+
+ error = nameiat(&nd, fd);
if (error)
goto out;
dvp = nd.ni_dvp;
vp = nd.ni_vp;
+ p = vfs_context_proc(ctx);
VATTR_INIT(&va);
VATTR_SET(&va, va_type, VLNK);
VATTR_SET(&va, va_mode, ACCESSPERMS & ~p->p_fd->fd_cmask);
+
#if CONFIG_MACF
error = mac_vnode_check_create(ctx,
dvp, &nd.ni_cnd, &va);
error = VNOP_SYMLINK(dvp, &vp, &nd.ni_cnd, &va, path, ctx);
#if CONFIG_MACF
- if (error == 0)
+ if (error == 0 && vp)
error = vnode_label(vnode_mount(vp), dvp, vp, &nd.ni_cnd, VNODE_LABEL_CREATE, ctx);
#endif
/* do fallback attribute handling */
- if (error == 0)
+ if (error == 0 && vp)
error = vnode_setattr_fallback(vp, &va, ctx);
if (error == 0) {
int update_flags = 0;
+ /*check if a new vnode was created, else try to get one*/
if (vp == NULL) {
nd.ni_cnd.cn_nameiop = LOOKUP;
#if CONFIG_TRIGGERS
nd.ni_op = OP_LOOKUP;
#endif
nd.ni_cnd.cn_flags = 0;
- error = namei(&nd);
+ error = nameiat(&nd, fd);
vp = nd.ni_vp;
if (vp == NULL)
goto skipit;
}
-
+
#if 0 /* XXX - kauth_todo - is KAUTH_FILEOP_SYMLINK needed? */
- /* call out to allow 3rd party notification of rename.
+ /* call out to allow 3rd party notification of rename.
* Ignore result of kauth_authorize_fileop call.
*/
if (kauth_authorize_fileop_has_listeners() &&
namei(&nd) == 0) {
char *new_link_path = NULL;
int len;
-
+
/* build the path to the new link file */
new_link_path = get_pathbuff();
len = MAXPATHLEN;
new_link_path[len - 1] = '/';
strlcpy(&new_link_path[len], nd.ni_cnd.cn_nameptr, MAXPATHLEN-len);
}
-
- kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_SYMLINK,
+
+ kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_SYMLINK,
(uintptr_t)path, (uintptr_t)new_link_path);
if (new_link_path != NULL)
release_pathbuff(new_link_path);
}
-#endif
+#endif
// Make sure the name & parent pointers are hooked up
if (vp->v_name == NULL)
update_flags |= VNODE_UPDATE_NAME;
if (vp->v_parent == NULLVP)
update_flags |= VNODE_UPDATE_PARENT;
-
+
if (update_flags)
vnode_update_identity(vp, dvp, nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen, nd.ni_cnd.cn_hash, update_flags);
vnode_put(vp);
vnode_put(dvp);
out:
- FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
+ if (path && (path != (char *)path_data))
+ FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
return (error);
}
+int
+symlink(__unused proc_t p, struct symlink_args *uap, __unused int32_t *retval)
+{
+ return (symlinkat_internal(vfs_context_current(), uap->path, AT_FDCWD,
+ uap->link, UIO_USERSPACE));
+}
+
+int
+symlinkat(__unused proc_t p, struct symlinkat_args *uap,
+ __unused int32_t *retval)
+{
+ return (symlinkat_internal(vfs_context_current(), uap->path1, uap->fd,
+ uap->path2, UIO_USERSPACE));
+}
+
/*
* Delete a whiteout from the filesystem.
- * XXX authorization not implmented for whiteouts
+ * No longer supported.
*/
int
-undelete(__unused proc_t p, struct undelete_args *uap, __unused int32_t *retval)
+undelete(__unused proc_t p, __unused struct undelete_args *uap, __unused int32_t *retval)
{
- int error;
- struct nameidata nd;
- vfs_context_t ctx = vfs_context_current();
- vnode_t vp, dvp;
-
- NDINIT(&nd, DELETE, OP_UNLINK, LOCKPARENT | DOWHITEOUT | AUDITVNPATH1,
- UIO_USERSPACE, uap->path, ctx);
- error = namei(&nd);
- if (error)
- return (error);
- dvp = nd.ni_dvp;
- vp = nd.ni_vp;
-
- if (vp == NULLVP && (nd.ni_cnd.cn_flags & ISWHITEOUT)) {
- error = VNOP_WHITEOUT(dvp, &nd.ni_cnd, DELETE, ctx);
- } else
- error = EEXIST;
-
- /*
- * nameidone has to happen before we vnode_put(dvp)
- * since it may need to release the fs_nodelock on the dvp
- */
- nameidone(&nd);
-
- if (vp)
- vnode_put(vp);
- vnode_put(dvp);
-
- return (error);
+ return (ENOTSUP);
}
-
/*
* Delete a name from the filesystem.
*/
/* ARGSUSED */
-int
-unlink1(vfs_context_t ctx, struct nameidata *ndp, int unlink_flags)
+static int
+unlinkat_internal(vfs_context_t ctx, int fd, vnode_t start_dvp,
+ user_addr_t path_arg, enum uio_seg segflg, int unlink_flags)
{
+ struct nameidata nd;
vnode_t vp, dvp;
int error;
struct componentname *cnp;
fse_info finfo;
struct vnode_attr va;
#endif
- int flags = 0;
- int need_event = 0;
- int has_listeners = 0;
- int truncated_path=0;
+ int flags;
+ int need_event;
+ int has_listeners;
+ int truncated_path;
int batched;
- struct vnode_attr *vap = NULL;
+ struct vnode_attr *vap;
+ int do_retry;
+ int retry_count = 0;
+ int cn_flags;
+
+ cn_flags = LOCKPARENT;
+ if (!(unlink_flags & VNODE_REMOVE_NO_AUDIT_PATH))
+ cn_flags |= AUDITVNPATH1;
+ /* If a starting dvp is passed, it trumps any fd passed. */
+ if (start_dvp)
+ cn_flags |= USEDVP;
#if NAMEDRSRCFORK
/* unlink or delete is allowed on rsrc forks and named streams */
- ndp->ni_cnd.cn_flags |= CN_ALLOWRSRCFORK;
+ cn_flags |= CN_ALLOWRSRCFORK;
#endif
- ndp->ni_cnd.cn_flags |= LOCKPARENT;
- ndp->ni_flag |= NAMEI_COMPOUNDREMOVE;
- cnp = &ndp->ni_cnd;
+retry:
+ do_retry = 0;
+ flags = 0;
+ need_event = 0;
+ has_listeners = 0;
+ truncated_path = 0;
+ vap = NULL;
-lookup_continue:
- error = namei(ndp);
+ NDINIT(&nd, DELETE, OP_UNLINK, cn_flags, segflg, path_arg, ctx);
+
+ nd.ni_dvp = start_dvp;
+ nd.ni_flag |= NAMEI_COMPOUNDREMOVE;
+ cnp = &nd.ni_cnd;
+
+continue_lookup:
+ error = nameiat(&nd, fd);
if (error)
return (error);
- dvp = ndp->ni_dvp;
- vp = ndp->ni_vp;
+ dvp = nd.ni_dvp;
+ vp = nd.ni_vp;
/* With Carbon delete semantics, busy files cannot be deleted */
if (unlink_flags & VNODE_REMOVE_NODELETEBUSY) {
flags |= VNODE_REMOVE_NODELETEBUSY;
}
-
+
/* Skip any potential upcalls if told to. */
if (unlink_flags & VNODE_REMOVE_SKIP_NAMESPACE_EVENT) {
flags |= VNODE_REMOVE_SKIP_NAMESPACE_EVENT;
error = EBUSY;
}
+#if DEVELOPMENT || DEBUG
+ /*
+ * XXX VSWAP: Check for entitlements or special flag here
+ * so we can restrict access appropriately.
+ */
+#else /* DEVELOPMENT || DEBUG */
+
+ if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) {
+ error = EPERM;
+ goto out;
+ }
+#endif /* DEVELOPMENT || DEBUG */
+
if (!batched) {
error = vn_authorize_unlink(dvp, vp, cnp, ctx, NULL);
if (error) {
+ if (error == ENOENT) {
+ assert(retry_count < MAX_AUTHORIZE_ENOENT_RETRIES);
+ if (retry_count < MAX_AUTHORIZE_ENOENT_RETRIES) {
+ do_retry = 1;
+ retry_count++;
+ }
+ }
goto out;
}
}
goto out;
}
}
- len = safe_getpath(dvp, ndp->ni_cnd.cn_nameptr, path, MAXPATHLEN, &truncated_path);
+ len = safe_getpath(dvp, nd.ni_cnd.cn_nameptr, path, MAXPATHLEN, &truncated_path);
}
#if NAMEDRSRCFORK
- if (ndp->ni_cnd.cn_flags & CN_WANTSRSRCFORK)
+ if (nd.ni_cnd.cn_flags & CN_WANTSRSRCFORK)
error = vnode_removenamedstream(dvp, vp, XATTR_RESOURCEFORK_NAME, 0, ctx);
else
#endif
{
- error = vn_remove(dvp, &ndp->ni_vp, ndp, flags, vap, ctx);
- vp = ndp->ni_vp;
+ error = vn_remove(dvp, &nd.ni_vp, &nd, flags, vap, ctx);
+ vp = nd.ni_vp;
if (error == EKEEPLOOKING) {
if (!batched) {
panic("EKEEPLOOKING, but not a filesystem that supports compound VNOPs?");
}
- if ((ndp->ni_flag & NAMEI_CONTLOOKUP) == 0) {
+ if ((nd.ni_flag & NAMEI_CONTLOOKUP) == 0) {
panic("EKEEPLOOKING, but continue flag not set?");
}
error = EISDIR;
goto out;
}
- goto lookup_continue;
+ goto continue_lookup;
+ } else if (error == ENOENT && batched) {
+ assert(retry_count < MAX_AUTHORIZE_ENOENT_RETRIES);
+ if (retry_count < MAX_AUTHORIZE_ENOENT_RETRIES) {
+ /*
+ * For compound VNOPs, the authorization callback may
+ * return ENOENT in case of racing hardlink lookups
+ * hitting the name cache, redrive the lookup.
+ */
+ do_retry = 1;
+ retry_count += 1;
+ goto out;
+ }
}
}
/*
- * Call out to allow 3rd party notification of delete.
+ * Call out to allow 3rd party notification of delete.
* Ignore result of kauth_authorize_fileop call.
*/
if (!error) {
if (has_listeners) {
- kauth_authorize_fileop(vfs_context_ucred(ctx),
- KAUTH_FILEOP_DELETE,
+ kauth_authorize_fileop(vfs_context_ucred(ctx),
+ KAUTH_FILEOP_DELETE,
(uintptr_t)vp,
(uintptr_t)path);
}
RELEASE_PATH(path);
#if NAMEDRSRCFORK
- /* recycle the deleted rsrc fork vnode to force a reclaim, which
+ /* recycle the deleted rsrc fork vnode to force a reclaim, which
* will cause its shadow file to go away if necessary.
*/
if (vp && (vnode_isnamedstream(vp)) &&
(vp->v_parent != NULLVP) &&
vnode_isshadow(vp)) {
vnode_recycle(vp);
- }
+ }
#endif
/*
* nameidone has to happen before we vnode_put(dvp)
* since it may need to release the fs_nodelock on the dvp
*/
- nameidone(ndp);
+ nameidone(&nd);
vnode_put(dvp);
if (vp) {
vnode_put(vp);
}
+
+ if (do_retry) {
+ goto retry;
+ }
+
return (error);
}
-/*
- * Delete a name from the filesystem using POSIX semantics.
- */
int
-unlink(__unused proc_t p, struct unlink_args *uap, __unused int32_t *retval)
+unlink1(vfs_context_t ctx, vnode_t start_dvp, user_addr_t path_arg,
+ enum uio_seg segflg, int unlink_flags)
{
- struct nameidata nd;
- vfs_context_t ctx = vfs_context_current();
-
- NDINIT(&nd, DELETE, OP_UNLINK, AUDITVNPATH1, UIO_USERSPACE,
- uap->path, ctx);
- return unlink1(ctx, &nd, 0);
+ return (unlinkat_internal(ctx, AT_FDCWD, start_dvp, path_arg, segflg,
+ unlink_flags));
}
/*
int
delete(__unused proc_t p, struct delete_args *uap, __unused int32_t *retval)
{
- struct nameidata nd;
- vfs_context_t ctx = vfs_context_current();
+ return (unlinkat_internal(vfs_context_current(), AT_FDCWD, NULLVP,
+ uap->path, UIO_USERSPACE, VNODE_REMOVE_NODELETEBUSY));
+}
- NDINIT(&nd, DELETE, OP_UNLINK, AUDITVNPATH1, UIO_USERSPACE,
- uap->path, ctx);
- return unlink1(ctx, &nd, VNODE_REMOVE_NODELETEBUSY);
+/*
+ * Delete a name from the filesystem using POSIX semantics.
+ */
+int
+unlink(__unused proc_t p, struct unlink_args *uap, __unused int32_t *retval)
+{
+ return (unlinkat_internal(vfs_context_current(), AT_FDCWD, NULLVP,
+ uap->path, UIO_USERSPACE, 0));
+}
+
+int
+unlinkat(__unused proc_t p, struct unlinkat_args *uap, __unused int32_t *retval)
+{
+ if (uap->flag & ~AT_REMOVEDIR)
+ return (EINVAL);
+
+ if (uap->flag & AT_REMOVEDIR)
+ return (rmdirat_internal(vfs_context_current(), uap->fd,
+ uap->path, UIO_USERSPACE));
+ else
+ return (unlinkat_internal(vfs_context_current(), uap->fd,
+ NULLVP, uap->path, UIO_USERSPACE, 0));
}
/*
break;
case L_SET:
break;
+ case SEEK_HOLE:
+ error = VNOP_IOCTL(vp, FSIOC_FIOSEEKHOLE, (caddr_t)&offset, 0, ctx);
+ break;
+ case SEEK_DATA:
+ error = VNOP_IOCTL(vp, FSIOC_FIOSEEKDATA, (caddr_t)&offset, 0, ctx);
+ break;
default:
error = EINVAL;
}
}
}
- /*
+ /*
* An lseek can affect whether data is "available to read." Use
* hint of NOTE_NONE so no EVFILT_VNODE events fire
*/
/* take advantage of definition of uflags */
action = uflags >> 8;
}
-
+
#if CONFIG_MACF
error = mac_vnode_check_access(ctx, vp, uflags);
if (error)
* access_extended: Check access permissions in bulk.
*
* Description: uap->entries Pointer to an array of accessx
- * descriptor structs, plus one or
- * more NULL terminated strings (see
+ * descriptor structs, plus one or
+ * more NULL terminated strings (see
* "Notes" section below).
* uap->size Size of the area pointed to by
* uap->entries.
*
* since we must have at least one string, and the string must
* be at least one character plus the NULL terminator in length.
- *
+ *
* XXX: Need to support the check-as uid argument
*/
int
goto out;
}
+ /* Also do not let ad_name_offset point to something beyond the size of the input */
+ if (input[i].ad_name_offset >= uap->size) {
+ error = EINVAL;
+ goto out;
+ }
+
/*
* An offset of 0 means use the previous descriptor's offset;
* this is used to chain multiple requests for the same file
error = ENOMEM;
goto out;
}
- MALLOC(result, errno_t *, desc_actual * sizeof(errno_t), M_TEMP, M_WAITOK);
+ MALLOC(result, errno_t *, desc_actual * sizeof(errno_t), M_TEMP, M_WAITOK | M_ZERO);
if (result == NULL) {
error = ENOMEM;
goto out;
vnode_put(dvp);
dvp = NULL;
}
-
+
/*
* Scan forward in the descriptor list to see if we
* need the parent vnode. We will need it if we are
for (j = i + 1; (j < desc_actual) && (input[j].ad_name_offset == 0); j++)
if (input[j].ad_flags & _DELETE_OK)
wantdelete = 1;
-
+
niopts = FOLLOW | AUDITVNPATH1;
/* need parent for vnode_authorize for deletion test */
/* copy out results */
error = copyout(result, uap->results, desc_actual * sizeof(errno_t));
-
+
out:
if (input && input != stack_input)
FREE(input, M_TEMP);
* namei:???
* access1:
*/
-int
-access(__unused proc_t p, struct access_args *uap, __unused int32_t *retval)
+static int
+faccessat_internal(vfs_context_t ctx, int fd, user_addr_t path, int amode,
+ int flag, enum uio_seg segflg)
{
int error;
struct nameidata nd;
#endif
/*
- * Access is defined as checking against the process'
- * real identity, even if operations are checking the
- * effective identity. So we need to tweak the credential
- * in the context.
+ * Unless the AT_EACCESS option is used, Access is defined as checking
+ * against the process' real identity, even if operations are checking
+ * the effective identity. So we need to tweak the credential
+ * in the context for that case.
*/
- context.vc_ucred = kauth_cred_copy_real(kauth_cred_get());
- context.vc_thread = current_thread();
+ if (!(flag & AT_EACCESS))
+ context.vc_ucred = kauth_cred_copy_real(kauth_cred_get());
+ else
+ context.vc_ucred = ctx->vc_ucred;
+ context.vc_thread = ctx->vc_thread;
+
niopts = FOLLOW | AUDITVNPATH1;
/* need parent for vnode_authorize for deletion test */
- if (uap->flags & _DELETE_OK)
+ if (amode & _DELETE_OK)
niopts |= WANTPARENT;
- NDINIT(&nd, LOOKUP, OP_ACCESS, niopts, UIO_USERSPACE,
- uap->path, &context);
+ NDINIT(&nd, LOOKUP, OP_ACCESS, niopts, segflg,
+ path, &context);
#if NAMEDRSRCFORK
/* access(F_OK) calls are allowed for resource forks. */
- if (uap->flags == F_OK)
+ if (amode == F_OK)
nd.ni_cnd.cn_flags |= CN_ALLOWRSRCFORK;
#endif
- error = namei(&nd);
+ error = nameiat(&nd, fd);
if (error)
goto out;
#if NAMEDRSRCFORK
- /* Grab reference on the shadow stream file vnode to
+ /* Grab reference on the shadow stream file vnode to
* force an inactive on release which will mark it
* for recycle.
*/
}
#endif
- error = access1(nd.ni_vp, nd.ni_dvp, uap->flags, &context);
+ error = access1(nd.ni_vp, nd.ni_dvp, amode, &context);
#if NAMEDRSRCFORK
if (is_namedstream) {
#endif
vnode_put(nd.ni_vp);
- if (uap->flags & _DELETE_OK)
+ if (amode & _DELETE_OK)
vnode_put(nd.ni_dvp);
nameidone(&nd);
-
+
out:
- kauth_cred_unref(&context.vc_ucred);
- return(error);
+ if (!(flag & AT_EACCESS))
+ kauth_cred_unref(&context.vc_ucred);
+ return (error);
}
+int
+access(__unused proc_t p, struct access_args *uap, __unused int32_t *retval)
+{
+ return (faccessat_internal(vfs_context_current(), AT_FDCWD,
+ uap->path, uap->flags, 0, UIO_USERSPACE));
+}
+
+int
+faccessat(__unused proc_t p, struct faccessat_args *uap,
+ __unused int32_t *retval)
+{
+ if (uap->flag & ~AT_EACCESS)
+ return (EINVAL);
+
+ return (faccessat_internal(vfs_context_current(), uap->fd,
+ uap->path, uap->amode, uap->flag, UIO_USERSPACE));
+}
/*
* Returns: 0 Success
* vn_stat:???
*/
static int
-stat2(vfs_context_t ctx, struct nameidata *ndp, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size, int isstat64)
+fstatat_internal(vfs_context_t ctx, user_addr_t path, user_addr_t ub,
+ user_addr_t xsecurity, user_addr_t xsecurity_size, int isstat64,
+ enum uio_seg segflg, int fd, int flag)
{
+ struct nameidata nd;
+ int follow;
union {
struct stat sb;
struct stat64 sb64;
- } source;
+ } source = {};
union {
struct user64_stat user64_sb;
struct user32_stat user32_sb;
struct user64_stat64 user64_sb64;
struct user32_stat64 user32_sb64;
- } dest;
+ } dest = {};
caddr_t sbp;
int error, my_size;
kauth_filesec_t fsec;
size_t xsecurity_bufsize;
void * statptr;
+ follow = (flag & AT_SYMLINK_NOFOLLOW) ? NOFOLLOW : FOLLOW;
+ NDINIT(&nd, LOOKUP, OP_GETATTR, follow | AUDITVNPATH1,
+ segflg, path, ctx);
+
#if NAMEDRSRCFORK
int is_namedstream = 0;
/* stat calls are allowed for resource forks. */
- ndp->ni_cnd.cn_flags |= CN_ALLOWRSRCFORK;
+ nd.ni_cnd.cn_flags |= CN_ALLOWRSRCFORK;
#endif
- error = namei(ndp);
+ error = nameiat(&nd, fd);
if (error)
return (error);
fsec = KAUTH_FILESEC_NONE;
statptr = (void *)&source;
#if NAMEDRSRCFORK
- /* Grab reference on the shadow stream file vnode to
- * force an inactive on release which will mark it
+ /* Grab reference on the shadow stream file vnode to
+ * force an inactive on release which will mark it
* for recycle.
*/
- if (vnode_isnamedstream(ndp->ni_vp) &&
- (ndp->ni_vp->v_parent != NULLVP) &&
- vnode_isshadow(ndp->ni_vp)) {
+ if (vnode_isnamedstream(nd.ni_vp) &&
+ (nd.ni_vp->v_parent != NULLVP) &&
+ vnode_isshadow(nd.ni_vp)) {
is_namedstream = 1;
- vnode_ref(ndp->ni_vp);
+ vnode_ref(nd.ni_vp);
}
#endif
- error = vn_stat(ndp->ni_vp, statptr, (xsecurity != USER_ADDR_NULL ? &fsec : NULL), isstat64, ctx);
+ error = vn_stat(nd.ni_vp, statptr, (xsecurity != USER_ADDR_NULL ? &fsec : NULL), isstat64, ctx);
#if NAMEDRSRCFORK
if (is_namedstream) {
- vnode_rele(ndp->ni_vp);
+ vnode_rele(nd.ni_vp);
}
#endif
- vnode_put(ndp->ni_vp);
- nameidone(ndp);
+ vnode_put(nd.ni_vp);
+ nameidone(&nd);
if (error)
return (error);
source.sb64.st_qspare[0] = 0LL;
source.sb64.st_qspare[1] = 0LL;
if (IS_64BIT_PROCESS(vfs_context_proc(ctx))) {
- munge_user64_stat64(&source.sb64, &dest.user64_sb64);
+ munge_user64_stat64(&source.sb64, &dest.user64_sb64);
my_size = sizeof(dest.user64_sb64);
sbp = (caddr_t)&dest.user64_sb64;
} else {
- munge_user32_stat64(&source.sb64, &dest.user32_sb64);
+ munge_user32_stat64(&source.sb64, &dest.user32_sb64);
my_size = sizeof(dest.user32_sb64);
sbp = (caddr_t)&dest.user32_sb64;
}
source.sb.st_qspare[0] = 0LL;
source.sb.st_qspare[1] = 0LL;
if (IS_64BIT_PROCESS(vfs_context_proc(ctx))) {
- munge_user64_stat(&source.sb, &dest.user64_sb);
+ munge_user64_stat(&source.sb, &dest.user64_sb);
my_size = sizeof(dest.user64_sb);
sbp = (caddr_t)&dest.user64_sb;
} else {
- munge_user32_stat(&source.sb, &dest.user32_sb);
+ munge_user32_stat(&source.sb, &dest.user32_sb);
my_size = sizeof(dest.user32_sb);
sbp = (caddr_t)&dest.user32_sb;
}
}
/*
- * Get file status; this version follows links.
- *
- * Returns: 0 Success
- * stat2:??? [see stat2() in this file]
- */
-static int
-stat1(user_addr_t path, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size, int isstat64)
-{
- struct nameidata nd;
- vfs_context_t ctx = vfs_context_current();
-
- NDINIT(&nd, LOOKUP, OP_GETATTR, NOTRIGGER | FOLLOW | AUDITVNPATH1,
- UIO_USERSPACE, path, ctx);
- return(stat2(ctx, &nd, ub, xsecurity, xsecurity_size, isstat64));
-}
-
-/*
- * stat_extended: Get file status; with extended security (ACL).
+ * stat_extended: Get file status; with extended security (ACL).
*
* Parameters: p (ignored)
* uap User argument descriptor (see below)
- * retval (ignored)
+ * retval (ignored)
*
* Indirect: uap->path Path of file to get status from
* uap->ub User buffer (holds file status info)
* uap->xsecurity ACL to get (extended security)
* uap->xsecurity_size Size of ACL
- *
+ *
* Returns: 0 Success
* !0 errno value
*
*/
int
-stat_extended(__unused proc_t p, struct stat_extended_args *uap, __unused int32_t *retval)
+stat_extended(__unused proc_t p, struct stat_extended_args *uap,
+ __unused int32_t *retval)
{
- return (stat1(uap->path, uap->ub, uap->xsecurity, uap->xsecurity_size, 0));
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ uap->xsecurity, uap->xsecurity_size, 0, UIO_USERSPACE, AT_FDCWD,
+ 0));
}
/*
* Returns: 0 Success
- * stat1:??? [see stat1() in this file]
+ * fstatat_internal:??? [see fstatat_internal() in this file]
*/
int
stat(__unused proc_t p, struct stat_args *uap, __unused int32_t *retval)
{
- return(stat1(uap->path, uap->ub, 0, 0, 0));
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ 0, 0, 0, UIO_USERSPACE, AT_FDCWD, 0));
}
int
stat64(__unused proc_t p, struct stat64_args *uap, __unused int32_t *retval)
{
- return(stat1(uap->path, uap->ub, 0, 0, 1));
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ 0, 0, 1, UIO_USERSPACE, AT_FDCWD, 0));
}
/*
*
* Parameters: p (ignored)
* uap User argument descriptor (see below)
- * retval (ignored)
+ * retval (ignored)
*
* Indirect: uap->path Path of file to get status from
* uap->ub User buffer (holds file status info)
* uap->xsecurity ACL to get (extended security)
* uap->xsecurity_size Size of ACL
- *
+ *
* Returns: 0 Success
* !0 errno value
*
int
stat64_extended(__unused proc_t p, struct stat64_extended_args *uap, __unused int32_t *retval)
{
- return (stat1(uap->path, uap->ub, uap->xsecurity, uap->xsecurity_size, 1));
-}
-/*
- * Get file status; this version does not follow links.
- */
-static int
-lstat1(user_addr_t path, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size, int isstat64)
-{
- struct nameidata nd;
- vfs_context_t ctx = vfs_context_current();
-
- NDINIT(&nd, LOOKUP, OP_GETATTR, NOTRIGGER | NOFOLLOW | AUDITVNPATH1,
- UIO_USERSPACE, path, ctx);
-
- return(stat2(ctx, &nd, ub, xsecurity, xsecurity_size, isstat64));
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ uap->xsecurity, uap->xsecurity_size, 1, UIO_USERSPACE, AT_FDCWD,
+ 0));
}
/*
*
* Parameters: p (ignored)
* uap User argument descriptor (see below)
- * retval (ignored)
+ * retval (ignored)
*
* Indirect: uap->path Path of file to get status from
* uap->ub User buffer (holds file status info)
* uap->xsecurity ACL to get (extended security)
* uap->xsecurity_size Size of ACL
- *
+ *
* Returns: 0 Success
* !0 errno value
*
int
lstat_extended(__unused proc_t p, struct lstat_extended_args *uap, __unused int32_t *retval)
{
- return (lstat1(uap->path, uap->ub, uap->xsecurity, uap->xsecurity_size, 0));
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ uap->xsecurity, uap->xsecurity_size, 0, UIO_USERSPACE, AT_FDCWD,
+ AT_SYMLINK_NOFOLLOW));
}
+/*
+ * Get file status; this version does not follow links.
+ */
int
lstat(__unused proc_t p, struct lstat_args *uap, __unused int32_t *retval)
{
- return(lstat1(uap->path, uap->ub, 0, 0, 0));
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ 0, 0, 0, UIO_USERSPACE, AT_FDCWD, AT_SYMLINK_NOFOLLOW));
}
int
lstat64(__unused proc_t p, struct lstat64_args *uap, __unused int32_t *retval)
{
- return(lstat1(uap->path, uap->ub, 0, 0, 1));
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ 0, 0, 1, UIO_USERSPACE, AT_FDCWD, AT_SYMLINK_NOFOLLOW));
}
/*
*
* Parameters: p (ignored)
* uap User argument descriptor (see below)
- * retval (ignored)
+ * retval (ignored)
*
* Indirect: uap->path Path of file to get status from
* uap->ub User buffer (holds file status info)
* uap->xsecurity ACL to get (extended security)
* uap->xsecurity_size Size of ACL
- *
+ *
* Returns: 0 Success
* !0 errno value
*
int
lstat64_extended(__unused proc_t p, struct lstat64_extended_args *uap, __unused int32_t *retval)
{
- return (lstat1(uap->path, uap->ub, uap->xsecurity, uap->xsecurity_size, 1));
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ uap->xsecurity, uap->xsecurity_size, 1, UIO_USERSPACE, AT_FDCWD,
+ AT_SYMLINK_NOFOLLOW));
+}
+
+int
+fstatat(__unused proc_t p, struct fstatat_args *uap, __unused int32_t *retval)
+{
+ if (uap->flag & ~AT_SYMLINK_NOFOLLOW)
+ return (EINVAL);
+
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ 0, 0, 0, UIO_USERSPACE, uap->fd, uap->flag));
+}
+
+int
+fstatat64(__unused proc_t p, struct fstatat64_args *uap,
+ __unused int32_t *retval)
+{
+ if (uap->flag & ~AT_SYMLINK_NOFOLLOW)
+ return (EINVAL);
+
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ 0, 0, 1, UIO_USERSPACE, uap->fd, uap->flag));
}
/*
struct nameidata nd;
vfs_context_t ctx = vfs_context_current();
- NDINIT(&nd, LOOKUP, OP_PATHCONF, FOLLOW | AUDITVNPATH1,
+ NDINIT(&nd, LOOKUP, OP_PATHCONF, FOLLOW | AUDITVNPATH1,
UIO_USERSPACE, uap->path, ctx);
error = namei(&nd);
if (error)
* Return target name of a symbolic link.
*/
/* ARGSUSED */
-int
-readlink(proc_t p, struct readlink_args *uap, int32_t *retval)
+static int
+readlinkat_internal(vfs_context_t ctx, int fd, user_addr_t path,
+ enum uio_seg seg, user_addr_t buf, size_t bufsize, enum uio_seg bufseg,
+ int *retval)
{
vnode_t vp;
uio_t auio;
- int spacetype = proc_is64bit(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
int error;
struct nameidata nd;
- vfs_context_t ctx = vfs_context_current();
char uio_buf[ UIO_SIZEOF(1) ];
- NDINIT(&nd, LOOKUP, OP_READLINK, NOFOLLOW | AUDITVNPATH1,
- UIO_USERSPACE, uap->path, ctx);
- error = namei(&nd);
+ NDINIT(&nd, LOOKUP, OP_READLINK, NOFOLLOW | AUDITVNPATH1,
+ seg, path, ctx);
+
+ error = nameiat(&nd, fd);
if (error)
return (error);
vp = nd.ni_vp;
nameidone(&nd);
- auio = uio_createwithbuffer(1, 0, spacetype, UIO_READ,
- &uio_buf[0], sizeof(uio_buf));
- uio_addiov(auio, uap->buf, uap->count);
- if (vp->v_type != VLNK)
+ auio = uio_createwithbuffer(1, 0, bufseg, UIO_READ,
+ &uio_buf[0], sizeof(uio_buf));
+ uio_addiov(auio, buf, bufsize);
+ if (vp->v_type != VLNK) {
error = EINVAL;
- else {
+ } else {
#if CONFIG_MACF
- error = mac_vnode_check_readlink(ctx,
- vp);
+ error = mac_vnode_check_readlink(ctx, vp);
#endif
if (error == 0)
- error = vnode_authorize(vp, NULL, KAUTH_VNODE_READ_DATA, ctx);
+ error = vnode_authorize(vp, NULL, KAUTH_VNODE_READ_DATA,
+ ctx);
if (error == 0)
error = VNOP_READLINK(vp, auio, ctx);
}
vnode_put(vp);
- /* Safe: uio_resid() is bounded above by "count", and "count" is an int */
- *retval = uap->count - (int)uio_resid(auio);
+ *retval = bufsize - (int)uio_resid(auio);
return (error);
}
+int
+readlink(proc_t p, struct readlink_args *uap, int32_t *retval)
+{
+ enum uio_seg procseg;
+
+ procseg = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
+ return (readlinkat_internal(vfs_context_current(), AT_FDCWD,
+ CAST_USER_ADDR_T(uap->path), procseg, CAST_USER_ADDR_T(uap->buf),
+ uap->count, procseg, retval));
+}
+
+int
+readlinkat(proc_t p, struct readlinkat_args *uap, int32_t *retval)
+{
+ enum uio_seg procseg;
+
+ procseg = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
+ return (readlinkat_internal(vfs_context_current(), uap->fd, uap->path,
+ procseg, uap->buf, uap->bufsize, procseg, retval));
+}
+
/*
* Change file flags.
+ *
+ * NOTE: this will vnode_put() `vp'
*/
static int
chflags1(vnode_t vp, int flags, vfs_context_t ctx)
goto out;
error = vnode_setattr(vp, &va, ctx);
+#if CONFIG_MACF
+ if (error == 0)
+ mac_vnode_notify_setflags(ctx, vp, flags);
+#endif
+
if ((error == 0) && !VATTR_IS_SUPPORTED(&va, va_flags)) {
error = ENOTSUP;
}
struct nameidata nd;
AUDIT_ARG(fflags, uap->flags);
- NDINIT(&nd, LOOKUP, OP_SETATTR, FOLLOW | AUDITVNPATH1,
+ NDINIT(&nd, LOOKUP, OP_SETATTR, FOLLOW | AUDITVNPATH1,
UIO_USERSPACE, uap->path, ctx);
error = namei(&nd);
if (error)
vp = nd.ni_vp;
nameidone(&nd);
+ /* we don't vnode_put() here because chflags1 does internally */
error = chflags1(vp, uap->flags, ctx);
return(error);
AUDIT_ARG(vnpath, vp, ARG_VNODE1);
+ /* we don't vnode_put() here because chflags1 does internally */
error = chflags1(vp, uap->flags, vfs_context_current());
file_drop(uap->fd);
* translated to EPERM before being returned.
*/
static int
-chmod2(vfs_context_t ctx, vnode_t vp, struct vnode_attr *vap)
+chmod_vnode(vfs_context_t ctx, vnode_t vp, struct vnode_attr *vap)
{
kauth_action_t action;
int error;
-
+
AUDIT_ARG(mode, vap->va_mode);
/* XXX audit new args */
if (VATTR_IS_ACTIVE(vap, va_mode) &&
(error = mac_vnode_check_setmode(ctx, vp, (mode_t)vap->va_mode)) != 0)
return (error);
+
+ if (VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)) {
+ if ((error = mac_vnode_check_setowner(ctx, vp,
+ VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : -1,
+ VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : -1)))
+ return (error);
+ }
+
+ if (VATTR_IS_ACTIVE(vap, va_acl) &&
+ (error = mac_vnode_check_setacl(ctx, vp, vap->va_acl)))
+ return (error);
#endif
/* make sure that the caller is allowed to set this security information */
error = EPERM;
return(error);
}
-
- error = vnode_setattr(vp, vap, ctx);
+
+ if ((error = vnode_setattr(vp, vap, ctx)) != 0)
+ return (error);
+
+#if CONFIG_MACF
+ if (VATTR_IS_ACTIVE(vap, va_mode))
+ mac_vnode_notify_setmode(ctx, vp, (mode_t)vap->va_mode);
+
+ if (VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid))
+ mac_vnode_notify_setowner(ctx, vp,
+ VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : -1,
+ VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : -1);
+
+ if (VATTR_IS_ACTIVE(vap, va_acl))
+ mac_vnode_notify_setacl(ctx, vp, vap->va_acl);
+#endif
return (error);
}
*
* Returns: 0 Success
* namei:??? [anything namei can return]
- * chmod2:??? [anything chmod2 can return]
+ * chmod_vnode:??? [anything chmod_vnode can return]
*/
static int
-chmod1(vfs_context_t ctx, user_addr_t path, struct vnode_attr *vap)
+chmodat(vfs_context_t ctx, user_addr_t path, struct vnode_attr *vap,
+ int fd, int flag, enum uio_seg segflg)
{
struct nameidata nd;
- int error;
+ int follow, error;
- NDINIT(&nd, LOOKUP, OP_SETATTR, FOLLOW | AUDITVNPATH1,
- UIO_USERSPACE, path, ctx);
- if ((error = namei(&nd)))
+ follow = (flag & AT_SYMLINK_NOFOLLOW) ? NOFOLLOW : FOLLOW;
+ NDINIT(&nd, LOOKUP, OP_SETATTR, follow | AUDITVNPATH1,
+ segflg, path, ctx);
+ if ((error = nameiat(&nd, fd)))
return (error);
- error = chmod2(ctx, nd.ni_vp, vap);
+ error = chmod_vnode(ctx, nd.ni_vp, vap);
vnode_put(nd.ni_vp);
nameidone(&nd);
return(error);
}
/*
- * chmod_extended: Change the mode of a file given a path name; with extended
+ * chmod_extended: Change the mode of a file given a path name; with extended
* argument list (including extended security (ACL)).
*
* Parameters: p Process requesting the open
KAUTH_DEBUG("CHMOD - setting ACL with %d entries", va.va_acl->acl_entrycount);
}
- error = chmod1(vfs_context_current(), uap->path, &va);
+ error = chmodat(vfs_context_current(), uap->path, &va, AT_FDCWD, 0,
+ UIO_USERSPACE);
if (xsecdst != NULL)
kauth_filesec_free(xsecdst);
/*
* Returns: 0 Success
- * chmod1:??? [anything chmod1 can return]
+ * chmodat:??? [anything chmodat can return]
*/
-int
-chmod(__unused proc_t p, struct chmod_args *uap, __unused int32_t *retval)
+static int
+fchmodat_internal(vfs_context_t ctx, user_addr_t path, int mode, int fd,
+ int flag, enum uio_seg segflg)
{
struct vnode_attr va;
VATTR_INIT(&va);
- VATTR_SET(&va, va_mode, uap->mode & ALLPERMS);
+ VATTR_SET(&va, va_mode, mode & ALLPERMS);
+
+ return (chmodat(ctx, path, &va, fd, flag, segflg));
+}
+
+int
+chmod(__unused proc_t p, struct chmod_args *uap, __unused int32_t *retval)
+{
+ return (fchmodat_internal(vfs_context_current(), uap->path, uap->mode,
+ AT_FDCWD, 0, UIO_USERSPACE));
+}
+
+int
+fchmodat(__unused proc_t p, struct fchmodat_args *uap, __unused int32_t *retval)
+{
+ if (uap->flag & ~AT_SYMLINK_NOFOLLOW)
+ return (EINVAL);
- return(chmod1(vfs_context_current(), uap->path, &va));
+ return (fchmodat_internal(vfs_context_current(), uap->path, uap->mode,
+ uap->fd, uap->flag, UIO_USERSPACE));
}
/*
}
AUDIT_ARG(vnpath, vp, ARG_VNODE1);
- error = chmod2(vfs_context_current(), vp, vap);
+ error = chmod_vnode(vfs_context_current(), vp, vap);
(void)vnode_put(vp);
file_drop(fd);
*
* Parameters: p Process requesting to change file mode
* uap User argument descriptor (see below)
- * retval (ignored)
+ * retval (ignored)
*
* Indirect: uap->mode File mode to set (same as 'chmod')
* uap->uid UID to set
* uap->gid GID to set
* uap->xsecurity ACL to set (or delete)
* uap->fd File descriptor of file to change mode
- *
+ *
* Returns: 0 Success
* !0 errno value
*
error = fchmod1(p, uap->fd, &va);
-
+
switch(uap->xsecurity) {
case USER_ADDR_NULL:
case CAST_USER_ADDR_T(-1):
*/
/* ARGSUSED */
static int
-chown1(vfs_context_t ctx, struct chown_args *uap, __unused int32_t *retval, int follow)
+fchownat_internal(vfs_context_t ctx, int fd, user_addr_t path, uid_t uid,
+ gid_t gid, int flag, enum uio_seg segflg)
{
vnode_t vp;
struct vnode_attr va;
int error;
struct nameidata nd;
+ int follow;
kauth_action_t action;
- AUDIT_ARG(owner, uap->uid, uap->gid);
+ AUDIT_ARG(owner, uid, gid);
- NDINIT(&nd, LOOKUP, OP_SETATTR,
- (follow ? FOLLOW : 0) | NOTRIGGER | AUDITVNPATH1,
- UIO_USERSPACE, uap->path, ctx);
- error = namei(&nd);
+ follow = (flag & AT_SYMLINK_NOFOLLOW) ? NOFOLLOW : FOLLOW;
+ NDINIT(&nd, LOOKUP, OP_SETATTR, follow | AUDITVNPATH1, segflg,
+ path, ctx);
+ error = nameiat(&nd, fd);
if (error)
return (error);
vp = nd.ni_vp;
nameidone(&nd);
VATTR_INIT(&va);
- if (uap->uid != VNOVAL)
- VATTR_SET(&va, va_uid, uap->uid);
- if (uap->gid != VNOVAL)
- VATTR_SET(&va, va_gid, uap->gid);
+ if (uid != (uid_t)VNOVAL)
+ VATTR_SET(&va, va_uid, uid);
+ if (gid != (gid_t)VNOVAL)
+ VATTR_SET(&va, va_gid, gid);
#if CONFIG_MACF
- error = mac_vnode_check_setowner(ctx, vp, uap->uid, uap->gid);
+ error = mac_vnode_check_setowner(ctx, vp, uid, gid);
if (error)
goto out;
#endif
if (action && ((error = vnode_authorize(vp, NULL, action, ctx)) != 0))
goto out;
error = vnode_setattr(vp, &va, ctx);
-
+
+#if CONFIG_MACF
+ if (error == 0)
+ mac_vnode_notify_setowner(ctx, vp, uid, gid);
+#endif
+
out:
/*
* EACCES is only allowed from namei(); permissions failure should
*/
if (error == EACCES)
error = EPERM;
-
+
vnode_put(vp);
return (error);
}
int
-chown(__unused proc_t p, struct chown_args *uap, int32_t *retval)
+chown(__unused proc_t p, struct chown_args *uap, __unused int32_t *retval)
{
- return chown1(vfs_context_current(), uap, retval, 1);
+ return (fchownat_internal(vfs_context_current(), AT_FDCWD, uap->path,
+ uap->uid, uap->gid, 0, UIO_USERSPACE));
}
int
-lchown(__unused proc_t p, struct lchown_args *uap, int32_t *retval)
+lchown(__unused proc_t p, struct lchown_args *uap, __unused int32_t *retval)
{
- /* Argument list identical, but machine generated; cast for chown1() */
- return chown1(vfs_context_current(), (struct chown_args *)uap, retval, 0);
+ return (fchownat_internal(vfs_context_current(), AT_FDCWD, uap->path,
+ uap->owner, uap->group, AT_SYMLINK_NOFOLLOW, UIO_USERSPACE));
+}
+
+int
+fchownat(__unused proc_t p, struct fchownat_args *uap, __unused int32_t *retval)
+{
+ if (uap->flag & ~AT_SYMLINK_NOFOLLOW)
+ return (EINVAL);
+
+ return (fchownat_internal(vfs_context_current(), uap->fd, uap->path,
+ uap->uid, uap->gid, uap->flag, UIO_USERSPACE));
}
/*
}
error = vnode_setattr(vp, &va, ctx);
+#if CONFIG_MACF
+ if (error == 0)
+ mac_vnode_notify_setowner(ctx, vp, uap->uid, uap->gid);
+#endif
+
out:
(void)vnode_put(vp);
file_drop(uap->fd);
}
error = vnode_setattr(vp, &va, ctx);
+#if CONFIG_MACF
+ if (error == 0)
+ mac_vnode_notify_setutimes(ctx, vp, ts[0], ts[1]);
+#endif
+
out:
return error;
}
vfs_context_t ctx = vfs_context_current();
/*
- * AUDIT: Needed to change the order of operations to do the
+ * AUDIT: Needed to change the order of operations to do the
* name lookup first because auditing wants the path.
*/
- NDINIT(&nd, LOOKUP, OP_SETATTR, FOLLOW | AUDITVNPATH1,
+ NDINIT(&nd, LOOKUP, OP_SETATTR, FOLLOW | AUDITVNPATH1,
UIO_USERSPACE, uap->path, ctx);
error = namei(&nd);
if (error)
if (uap->length < 0)
return(EINVAL);
- NDINIT(&nd, LOOKUP, OP_TRUNCATE, FOLLOW | AUDITVNPATH1,
+ NDINIT(&nd, LOOKUP, OP_TRUNCATE, FOLLOW | AUDITVNPATH1,
UIO_USERSPACE, uap->path, ctx);
if ((error = namei(&nd)))
return (error);
if ((action != 0) && ((error = vnode_authorize(vp, NULL, action, ctx)) != 0))
goto out;
error = vnode_setattr(vp, &va, ctx);
+
+#if CONFIG_MACF
+ if (error == 0)
+ mac_vnode_notify_truncate(ctx, NOCRED, vp);
+#endif
+
out:
vnode_put(vp);
return (error);
AUDIT_ARG(fd, uap->fd);
if (uap->length < 0)
return(EINVAL);
-
+
if ( (error = fp_lookup(p,fd,&fp,0)) ) {
return(error);
}
VATTR_INIT(&va);
VATTR_SET(&va, va_data_size, uap->length);
error = vnode_setattr(vp, &va, ctx);
+
+#if CONFIG_MACF
+ if (error == 0)
+ mac_vnode_notify_truncate(ctx, fp->f_fglob->fg_cred, vp);
+#endif
+
(void)vnode_put(vp);
out:
file_drop(fd);
* thread cancellation points.
*/
/* ARGSUSED */
-int
+int
fsync_nocancel(proc_t p, struct fsync_nocancel_args *uap, __unused int32_t *retval)
{
return(fsync_common(p, (struct fsync_args *)uap, MNT_WAIT));
#if NAMEDRSRCFORK
/* Sync resource fork shadow file if necessary. */
if ((error == 0) &&
- (vp->v_flag & VISNAMEDSTREAM) &&
+ (vp->v_flag & VISNAMEDSTREAM) &&
(vp->v_parent != NULLVP) &&
vnode_isshadow(vp) &&
(fp->f_flags & FP_WRITTEN)) {
}
/*
- * Duplicate files. Source must be a file, target must be a file or
+ * Duplicate files. Source must be a file, target must be a file or
* must not exist.
*
* XXX Copyfile authorisation checking is woefully inadequate, and will not
struct nameidata fromnd, tond;
int error;
vfs_context_t ctx = vfs_context_current();
+#if CONFIG_MACF
+ struct filedesc *fdp = (vfs_context_proc(ctx))->p_fd;
+ struct vnode_attr va;
+#endif
/* Check that the flags are valid. */
return(EINVAL);
}
- NDINIT(&fromnd, LOOKUP, OP_COPYFILE, SAVESTART | AUDITVNPATH1,
+ NDINIT(&fromnd, LOOKUP, OP_COPYFILE, AUDITVNPATH1,
UIO_USERSPACE, uap->from, ctx);
if ((error = namei(&fromnd)))
return (error);
goto out;
}
}
+
if (fvp->v_type == VDIR || (tvp && tvp->v_type == VDIR)) {
error = EISDIR;
goto out;
}
+ /* This calls existing MAC hooks for open */
+ if ((error = vn_authorize_open_existing(fvp, &fromnd.ni_cnd, FREAD, ctx,
+ NULL))) {
+ goto out;
+ }
+
+ if (tvp) {
+ /*
+ * See unlinkat_internal for an explanation of the potential
+ * ENOENT from the MAC hook but the gist is that the MAC hook
+ * can fail because vn_getpath isn't able to return the full
+ * path. We choose to ignore this failure.
+ */
+ error = vn_authorize_unlink(tdvp, tvp, &tond.ni_cnd, ctx, NULL);
+ if (error && error != ENOENT)
+ goto out;
+ error = 0;
+ }
+
+#if CONFIG_MACF
+ VATTR_INIT(&va);
+ VATTR_SET(&va, va_type, fvp->v_type);
+ /* Mask off all but regular access permissions */
+ VATTR_SET(&va, va_mode,
+ ((((uap->mode &~ fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT) & ACCESSPERMS));
+ error = mac_vnode_check_create(ctx, tdvp, &tond.ni_cnd, &va);
+ if (error)
+ goto out;
+#endif /* CONFIG_MACF */
+
if ((error = vnode_authorize(tdvp, NULL, KAUTH_VNODE_ADD_FILE, ctx)) != 0)
goto out;
out1:
vnode_put(fvp);
- if (fromnd.ni_startdir)
- vnode_put(fromnd.ni_startdir);
nameidone(&fromnd);
if (error == -1)
return (error);
}
+#define CLONE_SNAPSHOT_FALLBACKS_ENABLED 1
/*
- * Rename files. Source and destination must either both be directories,
- * or both not be directories. If target is a directory, it must be empty.
+ * Helper function for doing clones. The caller is expected to provide an
+ * iocounted source vnode and release it.
*/
-/* ARGSUSED */
-int
-rename(__unused proc_t p, struct rename_args *uap, __unused int32_t *retval)
+static int
+clonefile_internal(vnode_t fvp, boolean_t data_read_authorised, int dst_dirfd,
+ user_addr_t dst, uint32_t flags, vfs_context_t ctx)
{
vnode_t tvp, tdvp;
- vnode_t fvp, fdvp;
- struct nameidata *fromnd, *tond;
- vfs_context_t ctx = vfs_context_current();
+ struct nameidata tond;
int error;
- int do_retry;
- int mntrename;
- int need_event;
- const char *oname = NULL;
- char *from_name = NULL, *to_name = NULL;
- int from_len=0, to_len=0;
- int holding_mntlock;
- mount_t locked_mp = NULL;
- vnode_t oparent = NULLVP;
-#if CONFIG_FSE
- fse_info from_finfo, to_finfo;
-#endif
- int from_truncated=0, to_truncated;
- int batched = 0;
- struct vnode_attr *fvap, *tvap;
- int continuing = 0;
- /* carving out a chunk for structs that are too big to be on stack. */
- struct {
- struct nameidata from_node, to_node;
- struct vnode_attr fv_attr, tv_attr;
- } * __rename_data;
- MALLOC(__rename_data, void *, sizeof(*__rename_data), M_TEMP, M_WAITOK);
- fromnd = &__rename_data->from_node;
- tond = &__rename_data->to_node;
-
- holding_mntlock = 0;
- do_retry = 0;
-retry:
- fvp = tvp = NULL;
- fdvp = tdvp = NULL;
- fvap = tvap = NULL;
- mntrename = FALSE;
+ int follow;
+ boolean_t free_src_acl;
+ boolean_t attr_cleanup;
+ enum vtype v_type;
+ kauth_action_t action;
+ struct componentname *cnp;
+ uint32_t defaulted;
+ struct vnode_attr va;
+ struct vnode_attr nva;
+ uint32_t vnop_flags;
+
+ v_type = vnode_vtype(fvp);
+ switch (v_type) {
+ case VLNK:
+ /* FALLTHRU */
+ case VREG:
+ action = KAUTH_VNODE_ADD_FILE;
+ break;
+ case VDIR:
+ if (vnode_isvroot(fvp) || vnode_ismount(fvp) ||
+ fvp->v_mountedhere) {
+ return (EINVAL);
+ }
+ action = KAUTH_VNODE_ADD_SUBDIRECTORY;
+ break;
+ default:
+ return (EINVAL);
+ }
- NDINIT(fromnd, DELETE, OP_UNLINK, WANTPARENT | AUDITVNPATH1,
- UIO_USERSPACE, uap->from, ctx);
- fromnd->ni_flag = NAMEI_COMPOUNDRENAME;
-
- NDINIT(tond, RENAME, OP_RENAME, WANTPARENT | AUDITVNPATH2 | CN_NBMOUNTLOOK,
- UIO_USERSPACE, uap->to, ctx);
- tond->ni_flag = NAMEI_COMPOUNDRENAME;
-
-continue_lookup:
- if ((fromnd->ni_flag & NAMEI_CONTLOOKUP) != 0 || !continuing) {
- if ( (error = namei(fromnd)) )
- goto out1;
- fdvp = fromnd->ni_dvp;
- fvp = fromnd->ni_vp;
+ AUDIT_ARG(fd2, dst_dirfd);
+ AUDIT_ARG(value32, flags);
- if (fvp && fvp->v_type == VDIR)
- tond->ni_cnd.cn_flags |= WILLBEDIR;
- }
+ follow = (flags & CLONE_NOFOLLOW) ? NOFOLLOW : FOLLOW;
+ NDINIT(&tond, CREATE, OP_LINK, follow | WANTPARENT | AUDITVNPATH2,
+ UIO_USERSPACE, dst, ctx);
+ if ((error = nameiat(&tond, dst_dirfd)))
+ return (error);
+ cnp = &tond.ni_cnd;
+ tdvp = tond.ni_dvp;
+ tvp = tond.ni_vp;
- if ((tond->ni_flag & NAMEI_CONTLOOKUP) != 0 || !continuing) {
- if ( (error = namei(tond)) ) {
- /*
- * Translate error code for rename("dir1", "dir2/.").
- */
- if (error == EISDIR && fvp->v_type == VDIR)
- error = EINVAL;
- goto out1;
- }
- tdvp = tond->ni_dvp;
- tvp = tond->ni_vp;
- }
+ free_src_acl = FALSE;
+ attr_cleanup = FALSE;
- batched = vnode_compound_rename_available(fdvp);
- if (!fvp) {
- /*
- * Claim: this check will never reject a valid rename.
- * For success, either fvp must be on the same mount as tdvp, or fvp must sit atop a vnode on the same mount as tdvp.
- * Suppose fdvp and tdvp are not on the same mount.
- * If fvp is on the same mount as tdvp, then fvp is not on the same mount as fdvp, so fvp is the root of its filesystem. If fvp is the root,
- * then you can't move it to within another dir on the same mountpoint.
- * If fvp sits atop a vnode on the same mount as fdvp, then that vnode must be part of the same mount as fdvp, which is a contradiction.
- *
- * If this check passes, then we are safe to pass these vnodes to the same FS.
- */
- if (fdvp->v_mount != tdvp->v_mount) {
- error = EXDEV;
- goto out1;
- }
- goto skipped_lookup;
+ if (tvp != NULL) {
+ error = EEXIST;
+ goto out;
}
- if (!batched) {
- error = vn_authorize_rename(fdvp, fvp, &fromnd->ni_cnd, tdvp, tvp, &tond->ni_cnd, ctx, NULL);
- if (error) {
- if (error == ENOENT) {
- /*
- * We encountered a race where after doing the namei, tvp stops
- * being valid. If so, simply re-drive the rename call from the
- * top.
- */
- do_retry = 1;
- }
- goto out1;
- }
+ if (vnode_mount(tdvp) != vnode_mount(fvp)) {
+ error = EXDEV;
+ goto out;
}
- /*
- * If the source and destination are the same (i.e. they're
- * links to the same vnode) and the target file system is
- * case sensitive, then there is nothing to do.
- *
- * XXX Come back to this.
- */
- if (fvp == tvp) {
- int pathconf_val;
-
- /*
- * Note: if _PC_CASE_SENSITIVE selector isn't supported,
- * then assume that this file system is case sensitive.
- */
- if (VNOP_PATHCONF(fvp, _PC_CASE_SENSITIVE, &pathconf_val, ctx) != 0 ||
- pathconf_val != 0) {
- goto out1;
- }
- }
+#if CONFIG_MACF
+ if ((error = mac_vnode_check_clone(ctx, tdvp, fvp, cnp)))
+ goto out;
+#endif
+ if ((error = vnode_authorize(tdvp, NULL, action, ctx)))
+ goto out;
+
+ action = KAUTH_VNODE_GENERIC_READ_BITS;
+ if (data_read_authorised)
+ action &= ~KAUTH_VNODE_READ_DATA;
+ if ((error = vnode_authorize(fvp, NULL, action, ctx)))
+ goto out;
/*
- * Allow the renaming of mount points.
- * - target must not exist
- * - target must reside in the same directory as source
- * - union mounts cannot be renamed
- * - "/" cannot be renamed
- *
- * XXX Handle this in VFS after a continued lookup (if we missed
- * in the cache to start off)
+ * certain attributes may need to be changed from the source, we ask for
+ * those here.
*/
- if ((fvp->v_flag & VROOT) &&
- (fvp->v_type == VDIR) &&
- (tvp == NULL) &&
- (fvp->v_mountedhere == NULL) &&
- (fdvp == tdvp) &&
- ((fvp->v_mount->mnt_flag & (MNT_UNION | MNT_ROOTFS)) == 0) &&
- (fvp->v_mount->mnt_vnodecovered != NULLVP)) {
- vnode_t coveredvp;
-
- /* switch fvp to the covered vnode */
- coveredvp = fvp->v_mount->mnt_vnodecovered;
- if ( (vnode_getwithref(coveredvp)) ) {
- error = ENOENT;
- goto out1;
- }
- vnode_put(fvp);
+ VATTR_INIT(&va);
+ VATTR_WANTED(&va, va_uid);
+ VATTR_WANTED(&va, va_gid);
+ VATTR_WANTED(&va, va_mode);
+ VATTR_WANTED(&va, va_flags);
+ VATTR_WANTED(&va, va_acl);
- fvp = coveredvp;
- mntrename = TRUE;
+ if ((error = vnode_getattr(fvp, &va, ctx)) != 0)
+ goto out;
+
+ VATTR_INIT(&nva);
+ VATTR_SET(&nva, va_type, v_type);
+ if (VATTR_IS_SUPPORTED(&va, va_acl) && va.va_acl != NULL) {
+ VATTR_SET(&nva, va_acl, va.va_acl);
+ free_src_acl = TRUE;
}
- /*
- * Check for cross-device rename.
- */
- if ((fvp->v_mount != tdvp->v_mount) ||
- (tvp && (fvp->v_mount != tvp->v_mount))) {
- error = EXDEV;
- goto out1;
+
+ /* Handle ACL inheritance, initialize vap. */
+ if (v_type == VLNK) {
+ error = vnode_authattr_new(tdvp, &nva, 0, ctx);
+ } else {
+ error = vn_attribute_prepare(tdvp, &nva, &defaulted, ctx);
+ if (error)
+ goto out;
+ attr_cleanup = TRUE;
}
+ vnop_flags = VNODE_CLONEFILE_DEFAULT;
/*
- * If source is the same as the destination (that is the
- * same inode number) then there is nothing to do...
- * EXCEPT if the underlying file system supports case
- * insensitivity and is case preserving. In this case
- * the file system needs to handle the special case of
- * getting the same vnode as target (fvp) and source (tvp).
- *
- * Only file systems that support pathconf selectors _PC_CASE_SENSITIVE
- * and _PC_CASE_PRESERVING can have this exception, and they need to
- * handle the special case of getting the same vnode as target and
- * source. NOTE: Then the target is unlocked going into vnop_rename,
- * so not to cause locking problems. There is a single reference on tvp.
- *
- * NOTE - that fvp == tvp also occurs if they are hard linked and
- * that correct behaviour then is just to return success without doing
- * anything.
- *
- * XXX filesystem should take care of this itself, perhaps...
+ * We've got initial values for all security parameters,
+ * If we are superuser, then we can change owners to be the
+ * same as the source. Both superuser and the owner have default
+ * WRITE_SECURITY privileges so all other fields can be taken
+ * from source as well.
*/
- if (fvp == tvp && fdvp == tdvp) {
- if (fromnd->ni_cnd.cn_namelen == tond->ni_cnd.cn_namelen &&
- !bcmp(fromnd->ni_cnd.cn_nameptr, tond->ni_cnd.cn_nameptr,
- fromnd->ni_cnd.cn_namelen)) {
- goto out1;
- }
+ if (!(flags & CLONE_NOOWNERCOPY) && vfs_context_issuser(ctx)) {
+ if (VATTR_IS_SUPPORTED(&va, va_uid))
+ VATTR_SET(&nva, va_uid, va.va_uid);
+ if (VATTR_IS_SUPPORTED(&va, va_gid))
+ VATTR_SET(&nva, va_gid, va.va_gid);
+ } else {
+ vnop_flags |= VNODE_CLONEFILE_NOOWNERCOPY;
}
- if (holding_mntlock && fvp->v_mount != locked_mp) {
- /*
- * we're holding a reference and lock
- * on locked_mp, but it no longer matches
- * what we want to do... so drop our hold
- */
- mount_unlock_renames(locked_mp);
- mount_drop(locked_mp, 0);
- holding_mntlock = 0;
+ if (VATTR_IS_SUPPORTED(&va, va_mode))
+ VATTR_SET(&nva, va_mode, va.va_mode);
+ if (VATTR_IS_SUPPORTED(&va, va_flags)) {
+ VATTR_SET(&nva, va_flags,
+ ((va.va_flags & ~(UF_DATAVAULT | SF_RESTRICTED)) | /* Turn off from source */
+ (nva.va_flags & (UF_DATAVAULT | SF_RESTRICTED))));
}
- if (tdvp != fdvp && fvp->v_type == VDIR) {
- /*
- * serialize renames that re-shape
- * the tree... if holding_mntlock is
- * set, then we're ready to go...
- * otherwise we
- * first need to drop the iocounts
- * we picked up, second take the
- * lock to serialize the access,
- * then finally start the lookup
- * process over with the lock held
- */
- if (!holding_mntlock) {
- /*
- * need to grab a reference on
- * the mount point before we
- * drop all the iocounts... once
- * the iocounts are gone, the mount
- * could follow
- */
- locked_mp = fvp->v_mount;
- mount_ref(locked_mp, 0);
-
- /*
- * nameidone has to happen before we vnode_put(tvp)
- * since it may need to release the fs_nodelock on the tvp
- */
- nameidone(tond);
- if (tvp)
- vnode_put(tvp);
- vnode_put(tdvp);
+ error = VNOP_CLONEFILE(fvp, tdvp, &tvp, cnp, &nva, vnop_flags, ctx);
- /*
- * nameidone has to happen before we vnode_put(fdvp)
- * since it may need to release the fs_nodelock on the fvp
- */
- nameidone(fromnd);
+ if (!error && tvp) {
+ int update_flags = 0;
+#if CONFIG_FSE
+ int fsevent;
+#endif /* CONFIG_FSE */
- vnode_put(fvp);
- vnode_put(fdvp);
+#if CONFIG_MACF
+ (void)vnode_label(vnode_mount(tvp), tdvp, tvp, cnp,
+ VNODE_LABEL_CREATE, ctx);
+#endif
+ /*
+ * If some of the requested attributes weren't handled by the
+ * VNOP, use our fallback code.
+ */
+ if (!VATTR_ALL_SUPPORTED(&va))
+ (void)vnode_setattr_fallback(tvp, &nva, ctx);
- mount_lock_renames(locked_mp);
- holding_mntlock = 1;
+ // Make sure the name & parent pointers are hooked up
+ if (tvp->v_name == NULL)
+ update_flags |= VNODE_UPDATE_NAME;
+ if (tvp->v_parent == NULLVP)
+ update_flags |= VNODE_UPDATE_PARENT;
- goto retry;
- }
- } else {
- /*
- * when we dropped the iocounts to take
- * the lock, we allowed the identity of
- * the various vnodes to change... if they did,
- * we may no longer be dealing with a rename
- * that reshapes the tree... once we're holding
- * the iocounts, the vnodes can't change type
- * so we're free to drop the lock at this point
- * and continue on
- */
- if (holding_mntlock) {
- mount_unlock_renames(locked_mp);
- mount_drop(locked_mp, 0);
- holding_mntlock = 0;
+ if (update_flags) {
+ (void)vnode_update_identity(tvp, tdvp, cnp->cn_nameptr,
+ cnp->cn_namelen, cnp->cn_hash, update_flags);
}
- }
-
- // save these off so we can later verify that fvp is the same
- oname = fvp->v_name;
- oparent = fvp->v_parent;
-skipped_lookup:
#if CONFIG_FSE
- need_event = need_fsevent(FSE_RENAME, fdvp);
- if (need_event) {
- if (fvp) {
- get_fse_info(fvp, &from_finfo, ctx);
- } else {
- error = vfs_get_notify_attributes(&__rename_data->fv_attr);
- if (error) {
- goto out1;
- }
-
- fvap = &__rename_data->fv_attr;
+ switch (vnode_vtype(tvp)) {
+ case VLNK:
+ /* FALLTHRU */
+ case VREG:
+ fsevent = FSE_CREATE_FILE;
+ break;
+ case VDIR:
+ fsevent = FSE_CREATE_DIR;
+ break;
+ default:
+ goto out;
}
- if (tvp) {
- get_fse_info(tvp, &to_finfo, ctx);
- } else if (batched) {
- error = vfs_get_notify_attributes(&__rename_data->tv_attr);
- if (error) {
- goto out1;
- }
-
- tvap = &__rename_data->tv_attr;
+ if (need_fsevent(fsevent, tvp)) {
+ /*
+ * The following is a sequence of three explicit events.
+ * A pair of FSE_CLONE events representing the source and destination
+ * followed by an FSE_CREATE_[FILE | DIR] for the destination.
+ * fseventsd may coalesce the destination clone and create events
+ * into a single event resulting in the following sequence for a client
+ * FSE_CLONE (src)
+ * FSE_CLONE | FSE_CREATE (dst)
+ */
+ add_fsevent(FSE_CLONE, ctx, FSE_ARG_VNODE, fvp, FSE_ARG_VNODE, tvp,
+ FSE_ARG_DONE);
+ add_fsevent(fsevent, ctx, FSE_ARG_VNODE, tvp,
+ FSE_ARG_DONE);
}
- }
-#else
- need_event = 0;
#endif /* CONFIG_FSE */
+ }
- if (need_event || kauth_authorize_fileop_has_listeners()) {
- if (from_name == NULL) {
- GET_PATH(from_name);
- if (from_name == NULL) {
- error = ENOMEM;
- goto out1;
- }
- }
+out:
+ if (attr_cleanup)
+ vn_attribute_cleanup(&nva, defaulted);
+ if (free_src_acl && va.va_acl)
+ kauth_acl_free(va.va_acl);
+ nameidone(&tond);
+ if (tvp)
+ vnode_put(tvp);
+ vnode_put(tdvp);
+ return (error);
+}
- from_len = safe_getpath(fdvp, fromnd->ni_cnd.cn_nameptr, from_name, MAXPATHLEN, &from_truncated);
+/*
+ * clone files or directories, target must not exist.
+ */
+/* ARGSUSED */
+int
+clonefileat(__unused proc_t p, struct clonefileat_args *uap,
+ __unused int32_t *retval)
+{
+ vnode_t fvp;
+ struct nameidata fromnd;
+ int follow;
+ int error;
+ vfs_context_t ctx = vfs_context_current();
- if (to_name == NULL) {
- GET_PATH(to_name);
- if (to_name == NULL) {
- error = ENOMEM;
- goto out1;
- }
- }
+ /* Check that the flags are valid. */
+ if (uap->flags & ~(CLONE_NOFOLLOW | CLONE_NOOWNERCOPY))
+ return (EINVAL);
- to_len = safe_getpath(tdvp, tond->ni_cnd.cn_nameptr, to_name, MAXPATHLEN, &to_truncated);
- }
-
- error = vn_rename(fdvp, &fvp, &fromnd->ni_cnd, fvap,
- tdvp, &tvp, &tond->ni_cnd, tvap,
- 0, ctx);
+ AUDIT_ARG(fd, uap->src_dirfd);
- if (holding_mntlock) {
- /*
- * we can drop our serialization
- * lock now
- */
- mount_unlock_renames(locked_mp);
- mount_drop(locked_mp, 0);
- holding_mntlock = 0;
- }
- if (error) {
- if (error == EKEEPLOOKING) {
- if ((fromnd->ni_flag & NAMEI_CONTLOOKUP) == 0) {
- if ((tond->ni_flag & NAMEI_CONTLOOKUP) == 0) {
- panic("EKEEPLOOKING without NAMEI_CONTLOOKUP on either ndp?");
- }
- }
+ follow = (uap->flags & CLONE_NOFOLLOW) ? NOFOLLOW : FOLLOW;
+ NDINIT(&fromnd, LOOKUP, OP_COPYFILE, follow | AUDITVNPATH1,
+ UIO_USERSPACE, uap->src, ctx);
+ if ((error = nameiat(&fromnd, uap->src_dirfd)))
+ return (error);
- fromnd->ni_vp = fvp;
- tond->ni_vp = tvp;
-
- goto continue_lookup;
- }
+ fvp = fromnd.ni_vp;
+ nameidone(&fromnd);
+ error = clonefile_internal(fvp, FALSE, uap->dst_dirfd, uap->dst,
+ uap->flags, ctx);
+
+ vnode_put(fvp);
+ return (error);
+}
+
+int
+fclonefileat(__unused proc_t p, struct fclonefileat_args *uap,
+ __unused int32_t *retval)
+{
+ vnode_t fvp;
+ struct fileproc *fp;
+ int error;
+ vfs_context_t ctx = vfs_context_current();
+
+ /* Check that the flags are valid. */
+ if (uap->flags & ~(CLONE_NOFOLLOW | CLONE_NOOWNERCOPY))
+ return (EINVAL);
+
+ AUDIT_ARG(fd, uap->src_fd);
+ error = fp_getfvp(p, uap->src_fd, &fp, &fvp);
+ if (error)
+ return (error);
+
+ if ((fp->f_fglob->fg_flag & FREAD) == 0) {
+ AUDIT_ARG(vnpath_withref, fvp, ARG_VNODE1);
+ error = EBADF;
+ goto out;
+ }
+
+ if ((error = vnode_getwithref(fvp)))
+ goto out;
+
+ AUDIT_ARG(vnpath, fvp, ARG_VNODE1);
+
+ error = clonefile_internal(fvp, TRUE, uap->dst_dirfd, uap->dst,
+ uap->flags, ctx);
+
+ vnode_put(fvp);
+out:
+ file_drop(uap->src_fd);
+ return (error);
+}
+
+/*
+ * Rename files. Source and destination must either both be directories,
+ * or both not be directories. If target is a directory, it must be empty.
+ */
+/* ARGSUSED */
+static int
+renameat_internal(vfs_context_t ctx, int fromfd, user_addr_t from,
+ int tofd, user_addr_t to, int segflg, vfs_rename_flags_t flags)
+{
+ if (flags & ~VFS_RENAME_FLAGS_MASK)
+ return EINVAL;
+
+ if (ISSET(flags, VFS_RENAME_SWAP) && ISSET(flags, VFS_RENAME_EXCL))
+ return EINVAL;
+
+ vnode_t tvp, tdvp;
+ vnode_t fvp, fdvp;
+ struct nameidata *fromnd, *tond;
+ int error;
+ int do_retry;
+ int retry_count;
+ int mntrename;
+ int need_event;
+ const char *oname = NULL;
+ char *from_name = NULL, *to_name = NULL;
+ int from_len=0, to_len=0;
+ int holding_mntlock;
+ mount_t locked_mp = NULL;
+ vnode_t oparent = NULLVP;
+#if CONFIG_FSE
+ fse_info from_finfo, to_finfo;
+#endif
+ int from_truncated=0, to_truncated;
+ int batched = 0;
+ struct vnode_attr *fvap, *tvap;
+ int continuing = 0;
+ /* carving out a chunk for structs that are too big to be on stack. */
+ struct {
+ struct nameidata from_node, to_node;
+ struct vnode_attr fv_attr, tv_attr;
+ } * __rename_data;
+ MALLOC(__rename_data, void *, sizeof(*__rename_data), M_TEMP, M_WAITOK);
+ fromnd = &__rename_data->from_node;
+ tond = &__rename_data->to_node;
+
+ holding_mntlock = 0;
+ do_retry = 0;
+ retry_count = 0;
+retry:
+ fvp = tvp = NULL;
+ fdvp = tdvp = NULL;
+ fvap = tvap = NULL;
+ mntrename = FALSE;
+
+ NDINIT(fromnd, DELETE, OP_UNLINK, WANTPARENT | AUDITVNPATH1,
+ segflg, from, ctx);
+ fromnd->ni_flag = NAMEI_COMPOUNDRENAME;
+
+ NDINIT(tond, RENAME, OP_RENAME, WANTPARENT | AUDITVNPATH2 | CN_NBMOUNTLOOK,
+ segflg, to, ctx);
+ tond->ni_flag = NAMEI_COMPOUNDRENAME;
+
+continue_lookup:
+ if ((fromnd->ni_flag & NAMEI_CONTLOOKUP) != 0 || !continuing) {
+ if ( (error = nameiat(fromnd, fromfd)) )
+ goto out1;
+ fdvp = fromnd->ni_dvp;
+ fvp = fromnd->ni_vp;
+
+ if (fvp && fvp->v_type == VDIR)
+ tond->ni_cnd.cn_flags |= WILLBEDIR;
+ }
+
+ if ((tond->ni_flag & NAMEI_CONTLOOKUP) != 0 || !continuing) {
+ if ( (error = nameiat(tond, tofd)) ) {
+ /*
+ * Translate error code for rename("dir1", "dir2/.").
+ */
+ if (error == EISDIR && fvp->v_type == VDIR)
+ error = EINVAL;
+ goto out1;
+ }
+ tdvp = tond->ni_dvp;
+ tvp = tond->ni_vp;
+ }
+
+#if DEVELOPMENT || DEBUG
+ /*
+ * XXX VSWAP: Check for entitlements or special flag here
+ * so we can restrict access appropriately.
+ */
+#else /* DEVELOPMENT || DEBUG */
+
+ if (fromnd->ni_vp && vnode_isswap(fromnd->ni_vp) && (ctx != vfs_context_kernel())) {
+ error = EPERM;
+ goto out1;
+ }
+
+ if (tond->ni_vp && vnode_isswap(tond->ni_vp) && (ctx != vfs_context_kernel())) {
+ error = EPERM;
+ goto out1;
+ }
+#endif /* DEVELOPMENT || DEBUG */
+
+ if (!tvp && ISSET(flags, VFS_RENAME_SWAP)) {
+ error = ENOENT;
+ goto out1;
+ }
+
+ if (tvp && ISSET(flags, VFS_RENAME_EXCL)) {
+ error = EEXIST;
+ goto out1;
+ }
+
+ batched = vnode_compound_rename_available(fdvp);
+
+#if CONFIG_FSE
+ need_event = need_fsevent(FSE_RENAME, fdvp);
+ if (need_event) {
+ if (fvp) {
+ get_fse_info(fvp, &from_finfo, ctx);
+ } else {
+ error = vfs_get_notify_attributes(&__rename_data->fv_attr);
+ if (error) {
+ goto out1;
+ }
+
+ fvap = &__rename_data->fv_attr;
+ }
+
+ if (tvp) {
+ get_fse_info(tvp, &to_finfo, ctx);
+ } else if (batched) {
+ error = vfs_get_notify_attributes(&__rename_data->tv_attr);
+ if (error) {
+ goto out1;
+ }
+
+ tvap = &__rename_data->tv_attr;
+ }
+ }
+#else
+ need_event = 0;
+#endif /* CONFIG_FSE */
+
+ if (need_event || kauth_authorize_fileop_has_listeners()) {
+ if (from_name == NULL) {
+ GET_PATH(from_name);
+ if (from_name == NULL) {
+ error = ENOMEM;
+ goto out1;
+ }
+ }
+
+ from_len = safe_getpath(fdvp, fromnd->ni_cnd.cn_nameptr, from_name, MAXPATHLEN, &from_truncated);
+
+ if (to_name == NULL) {
+ GET_PATH(to_name);
+ if (to_name == NULL) {
+ error = ENOMEM;
+ goto out1;
+ }
+ }
+
+ to_len = safe_getpath(tdvp, tond->ni_cnd.cn_nameptr, to_name, MAXPATHLEN, &to_truncated);
+ }
+ if (!fvp) {
+ /*
+ * Claim: this check will never reject a valid rename.
+ * For success, either fvp must be on the same mount as tdvp, or fvp must sit atop a vnode on the same mount as tdvp.
+ * Suppose fdvp and tdvp are not on the same mount.
+ * If fvp is on the same mount as tdvp, then fvp is not on the same mount as fdvp, so fvp is the root of its filesystem. If fvp is the root,
+ * then you can't move it to within another dir on the same mountpoint.
+ * If fvp sits atop a vnode on the same mount as fdvp, then that vnode must be part of the same mount as fdvp, which is a contradiction.
+ *
+ * If this check passes, then we are safe to pass these vnodes to the same FS.
+ */
+ if (fdvp->v_mount != tdvp->v_mount) {
+ error = EXDEV;
+ goto out1;
+ }
+ goto skipped_lookup;
+ }
+
+ if (!batched) {
+ error = vn_authorize_renamex_with_paths(fdvp, fvp, &fromnd->ni_cnd, from_name, tdvp, tvp, &tond->ni_cnd, to_name, ctx, flags, NULL);
+ if (error) {
+ if (error == ENOENT) {
+ assert(retry_count < MAX_AUTHORIZE_ENOENT_RETRIES);
+ if (retry_count < MAX_AUTHORIZE_ENOENT_RETRIES) {
+ /*
+ * We encountered a race where after doing the namei, tvp stops
+ * being valid. If so, simply re-drive the rename call from the
+ * top.
+ */
+ do_retry = 1;
+ retry_count += 1;
+ }
+ }
+ goto out1;
+ }
+ }
+
+ /*
+ * If the source and destination are the same (i.e. they're
+ * links to the same vnode) and the target file system is
+ * case sensitive, then there is nothing to do.
+ *
+ * XXX Come back to this.
+ */
+ if (fvp == tvp) {
+ int pathconf_val;
+
+ /*
+ * Note: if _PC_CASE_SENSITIVE selector isn't supported,
+ * then assume that this file system is case sensitive.
+ */
+ if (VNOP_PATHCONF(fvp, _PC_CASE_SENSITIVE, &pathconf_val, ctx) != 0 ||
+ pathconf_val != 0) {
+ goto out1;
+ }
+ }
+
+ /*
+ * Allow the renaming of mount points.
+ * - target must not exist
+ * - target must reside in the same directory as source
+ * - union mounts cannot be renamed
+ * - "/" cannot be renamed
+ *
+ * XXX Handle this in VFS after a continued lookup (if we missed
+ * in the cache to start off)
+ *
+ * N.B. If RENAME_SWAP is being used, then @tvp != NULL and so
+ * we'll skip past here. The file system is responsible for
+ * checking that @tvp is not a descendent of @fvp and vice versa
+ * so it should always return EINVAL if either @tvp or @fvp is the
+ * root of a volume.
+ */
+ if ((fvp->v_flag & VROOT) &&
+ (fvp->v_type == VDIR) &&
+ (tvp == NULL) &&
+ (fvp->v_mountedhere == NULL) &&
+ (fdvp == tdvp) &&
+ ((fvp->v_mount->mnt_flag & (MNT_UNION | MNT_ROOTFS)) == 0) &&
+ (fvp->v_mount->mnt_vnodecovered != NULLVP)) {
+ vnode_t coveredvp;
+
+ /* switch fvp to the covered vnode */
+ coveredvp = fvp->v_mount->mnt_vnodecovered;
+ if ( (vnode_getwithref(coveredvp)) ) {
+ error = ENOENT;
+ goto out1;
+ }
+ vnode_put(fvp);
+
+ fvp = coveredvp;
+ mntrename = TRUE;
+ }
+ /*
+ * Check for cross-device rename.
+ */
+ if ((fvp->v_mount != tdvp->v_mount) ||
+ (tvp && (fvp->v_mount != tvp->v_mount))) {
+ error = EXDEV;
+ goto out1;
+ }
+
+ /*
+ * If source is the same as the destination (that is the
+ * same inode number) then there is nothing to do...
+ * EXCEPT if the underlying file system supports case
+ * insensitivity and is case preserving. In this case
+ * the file system needs to handle the special case of
+ * getting the same vnode as target (fvp) and source (tvp).
+ *
+ * Only file systems that support pathconf selectors _PC_CASE_SENSITIVE
+ * and _PC_CASE_PRESERVING can have this exception, and they need to
+ * handle the special case of getting the same vnode as target and
+ * source. NOTE: Then the target is unlocked going into vnop_rename,
+ * so not to cause locking problems. There is a single reference on tvp.
+ *
+ * NOTE - that fvp == tvp also occurs if they are hard linked and
+ * that correct behaviour then is just to return success without doing
+ * anything.
+ *
+ * XXX filesystem should take care of this itself, perhaps...
+ */
+ if (fvp == tvp && fdvp == tdvp) {
+ if (fromnd->ni_cnd.cn_namelen == tond->ni_cnd.cn_namelen &&
+ !bcmp(fromnd->ni_cnd.cn_nameptr, tond->ni_cnd.cn_nameptr,
+ fromnd->ni_cnd.cn_namelen)) {
+ goto out1;
+ }
+ }
+
+ if (holding_mntlock && fvp->v_mount != locked_mp) {
+ /*
+ * we're holding a reference and lock
+ * on locked_mp, but it no longer matches
+ * what we want to do... so drop our hold
+ */
+ mount_unlock_renames(locked_mp);
+ mount_drop(locked_mp, 0);
+ holding_mntlock = 0;
+ }
+ if (tdvp != fdvp && fvp->v_type == VDIR) {
+ /*
+ * serialize renames that re-shape
+ * the tree... if holding_mntlock is
+ * set, then we're ready to go...
+ * otherwise we
+ * first need to drop the iocounts
+ * we picked up, second take the
+ * lock to serialize the access,
+ * then finally start the lookup
+ * process over with the lock held
+ */
+ if (!holding_mntlock) {
+ /*
+ * need to grab a reference on
+ * the mount point before we
+ * drop all the iocounts... once
+ * the iocounts are gone, the mount
+ * could follow
+ */
+ locked_mp = fvp->v_mount;
+ mount_ref(locked_mp, 0);
+
+ /*
+ * nameidone has to happen before we vnode_put(tvp)
+ * since it may need to release the fs_nodelock on the tvp
+ */
+ nameidone(tond);
+
+ if (tvp)
+ vnode_put(tvp);
+ vnode_put(tdvp);
+
+ /*
+ * nameidone has to happen before we vnode_put(fdvp)
+ * since it may need to release the fs_nodelock on the fvp
+ */
+ nameidone(fromnd);
+
+ vnode_put(fvp);
+ vnode_put(fdvp);
+
+ mount_lock_renames(locked_mp);
+ holding_mntlock = 1;
+
+ goto retry;
+ }
+ } else {
+ /*
+ * when we dropped the iocounts to take
+ * the lock, we allowed the identity of
+ * the various vnodes to change... if they did,
+ * we may no longer be dealing with a rename
+ * that reshapes the tree... once we're holding
+ * the iocounts, the vnodes can't change type
+ * so we're free to drop the lock at this point
+ * and continue on
+ */
+ if (holding_mntlock) {
+ mount_unlock_renames(locked_mp);
+ mount_drop(locked_mp, 0);
+ holding_mntlock = 0;
+ }
+ }
+
+ // save these off so we can later verify that fvp is the same
+ oname = fvp->v_name;
+ oparent = fvp->v_parent;
+
+skipped_lookup:
+ error = vn_rename(fdvp, &fvp, &fromnd->ni_cnd, fvap,
+ tdvp, &tvp, &tond->ni_cnd, tvap,
+ flags, ctx);
+
+ if (holding_mntlock) {
/*
- * We may encounter a race in the VNOP where the destination didn't
- * exist when we did the namei, but it does by the time we go and
+ * we can drop our serialization
+ * lock now
+ */
+ mount_unlock_renames(locked_mp);
+ mount_drop(locked_mp, 0);
+ holding_mntlock = 0;
+ }
+ if (error) {
+ if (error == EKEEPLOOKING) {
+ if ((fromnd->ni_flag & NAMEI_CONTLOOKUP) == 0) {
+ if ((tond->ni_flag & NAMEI_CONTLOOKUP) == 0) {
+ panic("EKEEPLOOKING without NAMEI_CONTLOOKUP on either ndp?");
+ }
+ }
+
+ fromnd->ni_vp = fvp;
+ tond->ni_vp = tvp;
+
+ goto continue_lookup;
+ }
+
+ /*
+ * We may encounter a race in the VNOP where the destination didn't
+ * exist when we did the namei, but it does by the time we go and
* try to create the entry. In this case, we should re-drive this rename
* call from the top again. Currently, only HFS bubbles out ERECYCLE,
- * but other filesystems susceptible to this race could return it, too.
+ * but other filesystems susceptible to this race could return it, too.
*/
if (error == ERECYCLE) {
do_retry = 1;
}
+ /*
+ * For compound VNOPs, the authorization callback may return
+ * ENOENT in case of racing hardlink lookups hitting the name
+ * cache, redrive the lookup.
+ */
+ if (batched && error == ENOENT) {
+ assert(retry_count < MAX_AUTHORIZE_ENOENT_RETRIES);
+ if (retry_count < MAX_AUTHORIZE_ENOENT_RETRIES) {
+ do_retry = 1;
+ retry_count += 1;
+ }
+ }
+
goto out1;
- }
-
- /* call out to allow 3rd party notification of rename.
+ }
+
+ /* call out to allow 3rd party notification of rename.
* Ignore result of kauth_authorize_fileop call.
*/
- kauth_authorize_fileop(vfs_context_ucred(ctx),
- KAUTH_FILEOP_RENAME,
+ kauth_authorize_fileop(vfs_context_ucred(ctx),
+ KAUTH_FILEOP_RENAME,
(uintptr_t)from_name, (uintptr_t)to_name);
+ if (flags & VFS_RENAME_SWAP) {
+ kauth_authorize_fileop(vfs_context_ucred(ctx),
+ KAUTH_FILEOP_RENAME,
+ (uintptr_t)to_name, (uintptr_t)from_name);
+ }
#if CONFIG_FSE
if (from_name != NULL && to_name != NULL) {
vnode_get_fse_info_from_vap(fvp, &from_finfo, fvap);
}
- if (tvp) {
- add_fsevent(FSE_RENAME, ctx,
- FSE_ARG_STRING, from_len, from_name,
- FSE_ARG_FINFO, &from_finfo,
- FSE_ARG_STRING, to_len, to_name,
- FSE_ARG_FINFO, &to_finfo,
- FSE_ARG_DONE);
+ if (tvp) {
+ add_fsevent(FSE_RENAME, ctx,
+ FSE_ARG_STRING, from_len, from_name,
+ FSE_ARG_FINFO, &from_finfo,
+ FSE_ARG_STRING, to_len, to_name,
+ FSE_ARG_FINFO, &to_finfo,
+ FSE_ARG_DONE);
+ if (flags & VFS_RENAME_SWAP) {
+ /*
+ * Strictly speaking, swap is the equivalent of
+ * *three* renames. FSEvents clients should only take
+ * the events as a hint, so we only bother reporting
+ * two.
+ */
+ add_fsevent(FSE_RENAME, ctx,
+ FSE_ARG_STRING, to_len, to_name,
+ FSE_ARG_FINFO, &to_finfo,
+ FSE_ARG_STRING, from_len, from_name,
+ FSE_ARG_FINFO, &from_finfo,
+ FSE_ARG_DONE);
+ }
} else {
add_fsevent(FSE_RENAME, ctx,
FSE_ARG_STRING, from_len, from_name,
}
}
#endif /* CONFIG_FSE */
-
+
/*
* update filesystem's mount point data
*/
}
MALLOC_ZONE(tobuf, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
- error = copyinstr(uap->to, tobuf, MAXPATHLEN, &len);
+ if (UIO_SEG_IS_USER_SPACE(segflg))
+ error = copyinstr(to, tobuf, MAXPATHLEN, &len);
+ else
+ error = copystr((void *)to, tobuf, MAXPATHLEN, &len);
if (!error) {
/* find current mount point prefix */
pathend = &mp->mnt_vfsstat.f_mntonname[0];
vfs_unbusy(mp);
}
/*
- * fix up name & parent pointers. note that we first
+ * fix up name & parent pointers. note that we first
* check that fvp has the same name/parent pointers it
* had before the rename call... this is a 'weak' check
* at best...
vnode_put(fvp);
vnode_put(fdvp);
}
-
-
+
/*
* If things changed after we did the namei, then we will re-drive
* this rename call from the top.
goto retry;
}
- FREE(__rename_data, M_TEMP);
- return (error);
+ FREE(__rename_data, M_TEMP);
+ return (error);
+}
+
+int
+rename(__unused proc_t p, struct rename_args *uap, __unused int32_t *retval)
+{
+ return (renameat_internal(vfs_context_current(), AT_FDCWD, uap->from,
+ AT_FDCWD, uap->to, UIO_USERSPACE, 0));
+}
+
+int renameatx_np(__unused proc_t p, struct renameatx_np_args *uap, __unused int32_t *retval)
+{
+ return renameat_internal(
+ vfs_context_current(),
+ uap->fromfd, uap->from,
+ uap->tofd, uap->to,
+ UIO_USERSPACE, uap->flags);
+}
+
+int
+renameat(__unused proc_t p, struct renameat_args *uap, __unused int32_t *retval)
+{
+ return (renameat_internal(vfs_context_current(), uap->fromfd, uap->from,
+ uap->tofd, uap->to, UIO_USERSPACE, 0));
}
/*
*/
/* ARGSUSED */
static int
-mkdir1(vfs_context_t ctx, user_addr_t path, struct vnode_attr *vap)
+mkdir1at(vfs_context_t ctx, user_addr_t path, struct vnode_attr *vap, int fd,
+ enum uio_seg segflg)
{
vnode_t vp, dvp;
int error;
struct nameidata nd;
AUDIT_ARG(mode, vap->va_mode);
- NDINIT(&nd, CREATE, OP_MKDIR, LOCKPARENT | AUDITVNPATH1, UIO_USERSPACE,
+ NDINIT(&nd, CREATE, OP_MKDIR, LOCKPARENT | AUDITVNPATH1, segflg,
path, ctx);
nd.ni_cnd.cn_flags |= WILLBEDIR;
nd.ni_flag = NAMEI_COMPOUNDMKDIR;
continue_lookup:
- error = namei(&nd);
+ error = nameiat(&nd, fd);
if (error)
return (error);
dvp = nd.ni_dvp;
vp = nd.ni_vp;
- if (vp != NULL) {
- error = EEXIST;
- goto out;
- }
-
+ if (vp != NULL) {
+ error = EEXIST;
+ goto out;
+ }
+
batched = vnode_compound_mkdir_available(dvp);
VATTR_SET(vap, va_type, VDIR);
-
+
/*
* XXX
* Don't authorize in VFS for compound VNOP.... mkdir -p today assumes that it will
* EACCESS/EPERM--so if we authorize for mkdir on "/" for "mkdir -p /tmp/foo/bar/baz"
* it will fail in a spurious manner. Need to figure out if this is valid behavior.
*/
- if ((error = vn_authorize_mkdir(dvp, &nd.ni_cnd, vap, ctx, NULL)) != 0) {
+ if ((error = vn_authorize_mkdir(dvp, &nd.ni_cnd, vap, ctx, NULL)) != 0) {
if (error == EACCES || error == EPERM) {
int error2;
vnode_put(dvp);
dvp = NULLVP;
- /*
- * Try a lookup without "NAMEI_COMPOUNDVNOP" to make sure we return EEXIST
+ /*
+ * Try a lookup without "NAMEI_COMPOUNDVNOP" to make sure we return EEXIST
* rather than EACCESS if the target exists.
*/
- NDINIT(&nd, LOOKUP, OP_MKDIR, AUDITVNPATH1, UIO_USERSPACE,
- path, ctx);
- error2 = namei(&nd);
+ NDINIT(&nd, LOOKUP, OP_MKDIR, AUDITVNPATH1, segflg,
+ path, ctx);
+ error2 = nameiat(&nd, fd);
if (error2) {
goto out;
} else {
}
/*
- * make the directory
+ * make the directory
*/
- if ((error = vn_create(dvp, &vp, &nd, vap, 0, 0, NULL, ctx)) != 0) {
+ if ((error = vn_create(dvp, &vp, &nd, vap, 0, 0, NULL, ctx)) != 0) {
if (error == EKEEPLOOKING) {
nd.ni_vp = vp;
goto continue_lookup;
}
- goto out;
+ goto out;
}
-
+
// Make sure the name & parent pointers are hooked up
if (vp->v_name == NULL)
update_flags |= VNODE_UPDATE_NAME;
if (vp)
vnode_put(vp);
- if (dvp)
+ if (dvp)
vnode_put(dvp);
return (error);
*
* Parameters: p Process requesting to create the directory
* uap User argument descriptor (see below)
- * retval (ignored)
+ * retval (ignored)
*
* Indirect: uap->path Path of directory to create
* uap->mode Access permissions to set
* uap->xsecurity ACL to set
- *
+ *
* Returns: 0 Success
* !0 Not success
*
return ciferror;
VATTR_INIT(&va);
- VATTR_SET(&va, va_mode, (uap->mode & ACCESSPERMS) & ~p->p_fd->fd_cmask);
+ VATTR_SET(&va, va_mode, (uap->mode & ACCESSPERMS) & ~p->p_fd->fd_cmask);
if (xsecdst != NULL)
VATTR_SET(&va, va_acl, &xsecdst->fsec_acl);
- ciferror = mkdir1(vfs_context_current(), uap->path, &va);
+ ciferror = mkdir1at(vfs_context_current(), uap->path, &va, AT_FDCWD,
+ UIO_USERSPACE);
if (xsecdst != NULL)
kauth_filesec_free(xsecdst);
return ciferror;
struct vnode_attr va;
VATTR_INIT(&va);
- VATTR_SET(&va, va_mode, (uap->mode & ACCESSPERMS) & ~p->p_fd->fd_cmask);
+ VATTR_SET(&va, va_mode, (uap->mode & ACCESSPERMS) & ~p->p_fd->fd_cmask);
- return(mkdir1(vfs_context_current(), uap->path, &va));
+ return (mkdir1at(vfs_context_current(), uap->path, &va, AT_FDCWD,
+ UIO_USERSPACE));
}
-/*
- * Remove a directory file.
- */
-/* ARGSUSED */
int
-rmdir(__unused proc_t p, struct rmdir_args *uap, __unused int32_t *retval)
+mkdirat(proc_t p, struct mkdirat_args *uap, __unused int32_t *retval)
+{
+ struct vnode_attr va;
+
+ VATTR_INIT(&va);
+ VATTR_SET(&va, va_mode, (uap->mode & ACCESSPERMS) & ~p->p_fd->fd_cmask);
+
+ return(mkdir1at(vfs_context_current(), uap->path, &va, uap->fd,
+ UIO_USERSPACE));
+}
+
+static int
+rmdirat_internal(vfs_context_t ctx, int fd, user_addr_t dirpath,
+ enum uio_seg segflg)
{
vnode_t vp, dvp;
int error;
int has_listeners = 0;
int need_event = 0;
int truncated = 0;
- vfs_context_t ctx = vfs_context_current();
#if CONFIG_FSE
struct vnode_attr va;
#endif /* CONFIG_FSE */
struct vnode_attr *vap = NULL;
+ int restart_count = 0;
int batched;
int restart_flag;
- /*
+ /*
* This loop exists to restart rmdir in the unlikely case that two
* processes are simultaneously trying to remove the same directory
* containing orphaned appleDouble files.
*/
do {
NDINIT(&nd, DELETE, OP_RMDIR, LOCKPARENT | AUDITVNPATH1,
- UIO_USERSPACE, uap->path, ctx);
+ segflg, dirpath, ctx);
nd.ni_flag = NAMEI_COMPOUNDRMDIR;
continue_lookup:
restart_flag = 0;
vap = NULL;
- error = namei(&nd);
+ error = nameiat(&nd, fd);
if (error)
return (error);
goto out;
}
+#if DEVELOPMENT || DEBUG
+ /*
+ * XXX VSWAP: Check for entitlements or special flag here
+ * so we can restrict access appropriately.
+ */
+#else /* DEVELOPMENT || DEBUG */
+
+ if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) {
+ error = EPERM;
+ goto out;
+ }
+#endif /* DEVELOPMENT || DEBUG */
+
/*
* Removed a check here; we used to abort if vp's vid
* was not the same as what we'd seen the last time around.
* I do not think that check was valid, because if we retry
* and all dirents are gone, the directory could legitimately
* be recycled but still be present in a situation where we would
- * have had permission to delete. Therefore, we won't make
+ * have had permission to delete. Therefore, we won't make
* an effort to preserve that check now that we may not have a
* vp here.
*/
if (!batched) {
error = vn_authorize_rmdir(dvp, vp, &nd.ni_cnd, ctx, NULL);
if (error) {
+ if (error == ENOENT) {
+ assert(restart_count < MAX_AUTHORIZE_ENOENT_RETRIES);
+ if (restart_count < MAX_AUTHORIZE_ENOENT_RETRIES) {
+ restart_flag = 1;
+ restart_count += 1;
+ }
+ }
goto out;
}
}
if (error == EKEEPLOOKING) {
goto continue_lookup;
+ } else if (batched && error == ENOENT) {
+ assert(restart_count < MAX_AUTHORIZE_ENOENT_RETRIES);
+ if (restart_count < MAX_AUTHORIZE_ENOENT_RETRIES) {
+ /*
+ * For compound VNOPs, the authorization callback
+ * may return ENOENT in case of racing hard link lookups
+ * redrive the lookup.
+ */
+ restart_flag = 1;
+ restart_count += 1;
+ goto out;
+ }
}
#if CONFIG_APPLEDOUBLE
/*
/*
- * Assuming everything went well, we will try the RMDIR again
+ * Assuming everything went well, we will try the RMDIR again
*/
if (!error)
error = vn_rmdir(dvp, &vp, &nd, vap, ctx);
}
#endif /* CONFIG_APPLEDOUBLE */
/*
- * Call out to allow 3rd party notification of delete.
+ * Call out to allow 3rd party notification of delete.
* Ignore result of kauth_authorize_fileop call.
*/
if (!error) {
if (has_listeners) {
- kauth_authorize_fileop(vfs_context_ucred(ctx),
- KAUTH_FILEOP_DELETE,
+ kauth_authorize_fileop(vfs_context_ucred(ctx),
+ KAUTH_FILEOP_DELETE,
(uintptr_t)vp,
(uintptr_t)path);
}
nameidone(&nd);
vnode_put(dvp);
- if (vp)
+ if (vp)
vnode_put(vp);
if (restart_flag == 0) {
}
+/*
+ * Remove a directory file.
+ */
+/* ARGSUSED */
+int
+rmdir(__unused proc_t p, struct rmdir_args *uap, __unused int32_t *retval)
+{
+ return (rmdirat_internal(vfs_context_current(), AT_FDCWD,
+ CAST_USER_ADDR_T(uap->path), UIO_USERSPACE));
+}
+
/* Get direntry length padded to 8 byte alignment */
#define DIRENT64_LEN(namlen) \
((sizeof(struct direntry) + (namlen) - (MAXPATHLEN-1) + 7) & ~7)
-static errno_t
+/* Get dirent length padded to 4 byte alignment */
+#define DIRENT_LEN(namelen) \
+ ((sizeof(struct dirent) + (namelen + 1) - (__DARWIN_MAXNAMLEN + 1) + 3) & ~3)
+
+/* Get the end of this dirent */
+#define DIRENT_END(dep) \
+ (((char *)(dep)) + (dep)->d_reclen - 1)
+
+errno_t
vnode_readdir64(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
int *numdirent, vfs_context_t ctxp)
{
/* Check if fs natively supports VNODE_READDIR_EXTENDED */
- if ((vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSREADDIR_EXTENDED) &&
+ if ((vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSREADDIR_EXTENDED) &&
((vp->v_mount->mnt_kern_flag & MNTK_DENY_READDIREXT) == 0)) {
return VNOP_READDIR(vp, uio, flags, eofflag, numdirent, ctxp);
} else {
size_t bufsize;
void * bufptr;
uio_t auio;
- struct direntry entry64;
+ struct direntry *entry64;
struct dirent *dep;
int bytesread;
int error;
/*
- * Our kernel buffer needs to be smaller since re-packing
- * will expand each dirent. The worse case (when the name
- * length is 3) corresponds to a struct direntry size of 32
+ * We're here because the underlying file system does not
+ * support direnties or we mounted denying support so we must
+ * fall back to dirents and convert them to direntries.
+ *
+ * Our kernel buffer needs to be smaller since re-packing will
+ * expand each dirent. The worse case (when the name length
+ * is 3 or less) corresponds to a struct direntry size of 32
* bytes (8-byte aligned) and a struct dirent size of 12 bytes
* (4-byte aligned). So having a buffer that is 3/8 the size
* will prevent us from reading more than we can pack.
*
* Since this buffer is wired memory, we will limit the
- * buffer size to a maximum of 32K. We would really like to
+ * buffer size to a maximum of 32K. We would really like to
* use 32K in the MIN(), but we use magic number 87371 to
- * prevent uio_resid() * 3 / 8 from overflowing.
+ * prevent uio_resid() * 3 / 8 from overflowing.
*/
bufsize = 3 * MIN((user_size_t)uio_resid(uio), 87371u) / 8;
MALLOC(bufptr, void *, bufsize, M_TEMP, M_WAITOK);
dep = (struct dirent *)bufptr;
bytesread = bufsize - uio_resid(auio);
+ MALLOC(entry64, struct direntry *, sizeof(struct direntry),
+ M_TEMP, M_WAITOK);
/*
* Convert all the entries and copy them out to user's buffer.
*/
while (error == 0 && (char *)dep < ((char *)bufptr + bytesread)) {
+ size_t enbufsize = DIRENT64_LEN(dep->d_namlen);
+
+ if (DIRENT_END(dep) > ((char *)bufptr + bytesread) ||
+ DIRENT_LEN(dep->d_namlen) > dep->d_reclen) {
+ printf("%s: %s: Bad dirent recived from directory %s\n", __func__,
+ vp->v_mount->mnt_vfsstat.f_mntonname,
+ vp->v_name ? vp->v_name : "<unknown>");
+ error = EIO;
+ break;
+ }
+
+ bzero(entry64, enbufsize);
/* Convert a dirent to a dirent64. */
- entry64.d_ino = dep->d_ino;
- entry64.d_seekoff = 0;
- entry64.d_reclen = DIRENT64_LEN(dep->d_namlen);
- entry64.d_namlen = dep->d_namlen;
- entry64.d_type = dep->d_type;
- bcopy(dep->d_name, entry64.d_name, dep->d_namlen + 1);
+ entry64->d_ino = dep->d_ino;
+ entry64->d_seekoff = 0;
+ entry64->d_reclen = enbufsize;
+ entry64->d_namlen = dep->d_namlen;
+ entry64->d_type = dep->d_type;
+ bcopy(dep->d_name, entry64->d_name, dep->d_namlen + 1);
/* Move to next entry. */
dep = (struct dirent *)((char *)dep + dep->d_reclen);
/* Copy entry64 to user's buffer. */
- error = uiomove((caddr_t)&entry64, entry64.d_reclen, uio);
+ error = uiomove((caddr_t)entry64, entry64->d_reclen, uio);
}
/* Update the real offset using the offset we got from VNOP_READDIR. */
}
uio_free(auio);
FREE(bufptr, M_TEMP);
+ FREE(entry64, M_TEMP);
return (error);
}
}
error = union_dircheckp(&vp, fp, &context);
if (error == -1)
goto unionread;
- if (error)
+ if (error) {
+ (void)vnode_put(vp);
goto out;
+ }
}
if ((vp->v_mount->mnt_flag & MNT_UNION)) {
if (offset) {
*offset = loff;
}
-
+
*bytesread = bufsize - uio_resid(auio);
out:
file_drop(fd);
*
* Indirect: uap->newmask umask to set
* uap->xsecurity ACL to set
- *
+ *
* Returns: 0 Success
* !0 Not success
*
struct fileproc *fp;
uio_t auio = NULL;
int spacetype = proc_is64bit(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
- uint32_t count, savecount;
- uint32_t newstate;
+ uint32_t count = 0, savecount = 0;
+ uint32_t newstate = 0;
int error, eofflag;
- uint32_t loff;
- struct attrlist attributelist;
+ uint32_t loff = 0;
+ struct attrlist attributelist;
vfs_context_t ctx = vfs_context_current();
int fd = uap->fd;
char uio_buf[ UIO_SIZEOF(1) ];
kauth_action_t action;
AUDIT_ARG(fd, fd);
-
+
/* Get the attributes into kernel space */
if ((error = copyin(uap->alist, (caddr_t)&attributelist, sizeof(attributelist)))) {
return(error);
loff = fp->f_fglob->fg_offset;
auio = uio_createwithbuffer(1, loff, spacetype, UIO_READ, &uio_buf[0], sizeof(uio_buf));
uio_addiov(auio, uap->buffer, uap->buffersize);
-
+
/*
* If the only item requested is file names, we can let that past with
* just LIST_DIRECTORY. If they want any other attributes, that means
if ((attributelist.commonattr & ~ATTR_CMN_NAME) ||
attributelist.fileattr || attributelist.dirattr)
action |= KAUTH_VNODE_SEARCH;
-
+
if ((error = vnode_authorize(vp, NULL, action, ctx)) == 0) {
/* Believe it or not, uap->options only has 32-bits of valid
(void)vnode_put(vp);
- if (error)
+ if (error)
goto out;
fp->f_fglob->fg_offset = uio_offset(auio); /* should be multiple of dirent, not variable */
#if CONFIG_FSE
fse_info f_finfo, s_finfo;
#endif
-
+
nameiflags = 0;
if ((uap->options & FSOPT_NOFOLLOW) == 0) nameiflags |= FOLLOW;
nameidone(&fnd);
fvp = fnd.ni_vp;
- NDINIT(&snd, LOOKUP, OP_EXCHANGEDATA, CN_NBMOUNTLOOK | nameiflags | AUDITVNPATH2,
+ NDINIT(&snd, LOOKUP, OP_EXCHANGEDATA, CN_NBMOUNTLOOK | nameiflags | AUDITVNPATH2,
UIO_USERSPACE, uap->path2, ctx);
error = namei(&snd);
if (svp == fvp) {
error = EINVAL;
goto out;
- }
+ }
/*
* if the files are on different volumes, return an error
if (
#if CONFIG_FSE
- need_fsevent(FSE_EXCHANGE, fvp) ||
+ need_fsevent(FSE_EXCHANGE, fvp) ||
#endif
kauth_authorize_fileop_has_listeners()) {
GET_PATH(fpath);
flen = safe_getpath(fvp, NULL, fpath, MAXPATHLEN, &from_truncated);
slen = safe_getpath(svp, NULL, spath, MAXPATHLEN, &to_truncated);
-
+
#if CONFIG_FSE
get_fse_info(fvp, &f_finfo, ctx);
get_fse_info(svp, &s_finfo, ctx);
const char *tmpname;
if (fpath != NULL && spath != NULL) {
- /* call out to allow 3rd party notification of exchangedata.
+ /* call out to allow 3rd party notification of exchangedata.
* Ignore result of kauth_authorize_fileop call.
*/
- kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_EXCHANGE,
+ kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_EXCHANGE,
(uintptr_t)fpath, (uintptr_t)spath);
}
name_cache_lock();
tmpname = fvp->v_name;
fvp->v_name = svp->v_name;
svp->v_name = tmpname;
-
+
if (fvp->v_parent != svp->v_parent) {
vnode_t tmp;
uint32_t
freespace_mb(vnode_t vp)
{
- vfs_update_vfsstat(vp->v_mount, vfs_context_current(), VFS_USER_EVENT);
+ vfs_update_vfsstat(vp->v_mount, vfs_context_current(), VFS_USER_EVENT);
return (((uint64_t)vp->v_mount->mnt_vfsstat.f_bavail *
vp->v_mount->mnt_vfsstat.f_bsize) >> 20);
}
searchblock.returnbuffer = CAST_USER_ADDR_T(tmp_searchblock.returnbuffer);
searchblock.returnbuffersize = tmp_searchblock.returnbuffersize;
searchblock.maxmatches = tmp_searchblock.maxmatches;
- /*
+ /*
* These casts are safe. We will promote the tv_sec into a 64 bit long if necessary
* from a 32 bit long, and tv_usec is already a signed 32 bit int.
*/
if (error)
return(error);
- /* Do a sanity check on sizeofsearchparams1 and sizeofsearchparams2.
+ /* Do a sanity check on sizeofsearchparams1 and sizeofsearchparams2.
*/
- if (searchblock.sizeofsearchparams1 > SEARCHFS_MAX_SEARCHPARMS ||
+ if (searchblock.sizeofsearchparams1 > SEARCHFS_MAX_SEARCHPARMS ||
searchblock.sizeofsearchparams2 > SEARCHFS_MAX_SEARCHPARMS)
return(EINVAL);
-
+
/* Now malloc a big bunch of space to hold the search parameters, the attrlists and the search state. */
/* It all has to do into local memory and it's not that big so we might as well put it all together. */
/* Searchparams1 shall be first so we might as well use that to hold the base address of the allocated*/
/* block. */
-
+ /* */
+ /* NOTE: we allocate an extra 8 bytes to account for the difference in size of the searchstate */
+ /* due to the changes in rdar://problem/12438273. That way if a 3rd party file system */
+ /* assumes the size is still 556 bytes it will continue to work */
+
mallocsize = searchblock.sizeofsearchparams1 + searchblock.sizeofsearchparams2 +
- sizeof(struct attrlist) + sizeof(struct searchstate);
+ sizeof(struct attrlist) + sizeof(struct searchstate) + (2*sizeof(uint32_t));
MALLOC(searchparams1, void *, mallocsize, M_TEMP, M_WAITOK);
if ((error = copyin(searchblock.returnattrs, (caddr_t) returnattrs, sizeof(struct attrlist))))
goto freeandexit;
-
+
if ((error = copyin(uap->state, (caddr_t) state, sizeof(struct searchstate))))
goto freeandexit;
*/
if (uap->options & SRCHFS_START)
state->ss_union_layer = 0;
- else
+ else
uap->options |= state->ss_union_flags;
state->ss_union_flags = 0;
/*
* Because searchparams1 and searchparams2 may contain an ATTR_CMN_NAME search parameter,
* which is passed in with an attrreference_t, we need to inspect the buffer manually here.
- * The KPI does not provide us the ability to pass in the length of the buffers searchparams1
- * and searchparams2. To obviate the need for all searchfs-supporting filesystems to
+ * The KPI does not provide us the ability to pass in the length of the buffers searchparams1
+ * and searchparams2. To obviate the need for all searchfs-supporting filesystems to
* validate the user-supplied data offset of the attrreference_t, we'll do it here.
*/
if (searchblock.searchattrs.commonattr & ATTR_CMN_NAME) {
attrreference_t* string_ref;
u_int32_t* start_length;
- user64_size_t param_length;
+ user64_size_t param_length;
/* validate searchparams1 */
- param_length = searchblock.sizeofsearchparams1;
+ param_length = searchblock.sizeofsearchparams1;
/* skip the word that specifies length of the buffer */
start_length= (u_int32_t*) searchparams1;
start_length= start_length+1;
/* ensure no negative offsets or too big offsets */
if (string_ref->attr_dataoffset < 0 ) {
error = EINVAL;
- goto freeandexit;
+ goto freeandexit;
}
if (string_ref->attr_length > MAXPATHLEN) {
error = EINVAL;
goto freeandexit;
}
-
+
/* Check for pointer overflow in the string ref */
if (((char*) string_ref + string_ref->attr_dataoffset) < (char*) string_ref) {
error = EINVAL;
* Switch to the root vnode for the volume
*/
error = VFS_ROOT(vnode_mount(vp), &tvp, ctx);
+ vnode_put(vp);
if (error)
goto freeandexit;
- vnode_put(vp);
vp = tvp;
/*
tvp = vp;
vp = vp->v_mount->mnt_vnodecovered;
if (vp == NULL) {
- vp = tvp;
+ vnode_put(tvp);
error = ENOENT;
goto freeandexit;
}
- vnode_getwithref(vp);
+ error = vnode_getwithref(vp);
vnode_put(tvp);
+ if (error)
+ goto freeandexit;
}
#if CONFIG_MACF
}
#endif
-
+
/*
- * If searchblock.maxmatches == 0, then skip the search. This has happened
+ * If searchblock.maxmatches == 0, then skip the search. This has happened
* before and sometimes the underlying code doesnt deal with it well.
*/
if (searchblock.maxmatches == 0) {
/*
* Allright, we have everything we need, so lets make that call.
- *
+ *
* We keep special track of the return value from the file system:
* EAGAIN is an acceptable error condition that shouldn't keep us
* from copying out any results...
auio,
(struct searchstate *) &state->ss_fsstate,
ctx);
-
+
/*
* If it's a union mount we need to be called again
* to search the mounted-on filesystem.
if ((error = suulong(uap->nummatches, (uint64_t)nummatches)) != 0)
goto freeandexit;
-
+
error = fserror;
freeandexit:
#define NSPACE_ITEM_NSPACE_EVENT 0x0040
#define NSPACE_ITEM_SNAPSHOT_EVENT 0x0080
-#define NSPACE_ITEM_TRACK_EVENT 0x0100
-#define NSPACE_ITEM_ALL_EVENT_TYPES (NSPACE_ITEM_NSPACE_EVENT | NSPACE_ITEM_SNAPSHOT_EVENT | NSPACE_ITEM_TRACK_EVENT)
+#define NSPACE_ITEM_ALL_EVENT_TYPES (NSPACE_ITEM_NSPACE_EVENT | NSPACE_ITEM_SNAPSHOT_EVENT)
//#pragma optimization_level 0
typedef enum {
NSPACE_HANDLER_NSPACE = 0,
NSPACE_HANDLER_SNAPSHOT = 1,
- NSPACE_HANDLER_TRACK = 2,
NSPACE_HANDLER_COUNT,
} nspace_type_t;
return (event_flags & NSPACE_ITEM_ALL_EVENT_TYPES) == NSPACE_ITEM_NSPACE_EVENT;
case NSPACE_HANDLER_SNAPSHOT:
return (event_flags & NSPACE_ITEM_ALL_EVENT_TYPES) == NSPACE_ITEM_SNAPSHOT_EVENT;
- case NSPACE_HANDLER_TRACK:
- return (event_flags & NSPACE_ITEM_ALL_EVENT_TYPES) == NSPACE_ITEM_TRACK_EVENT;
default:
printf("nspace_flags_matches_handler: invalid type %u\n", (int)nspace_type);
return 0;
return NSPACE_ITEM_NSPACE_EVENT;
case NSPACE_HANDLER_SNAPSHOT:
return NSPACE_ITEM_SNAPSHOT_EVENT;
- case NSPACE_HANDLER_TRACK:
- return NSPACE_ITEM_TRACK_EVENT;
default:
printf("nspace_item_flags_for_type: invalid type %u\n", (int)nspace_type);
return 0;
case NSPACE_HANDLER_NSPACE:
return FREAD | FWRITE | O_EVTONLY;
case NSPACE_HANDLER_SNAPSHOT:
- case NSPACE_HANDLER_TRACK:
return FREAD | O_EVTONLY;
default:
printf("nspace_open_flags_for_type: invalid type %u\n", (int)nspace_type);
return NSPACE_HANDLER_NSPACE;
case NAMESPACE_HANDLER_SNAPSHOT_EVENT:
return NSPACE_HANDLER_SNAPSHOT;
- case NAMESPACE_HANDLER_TRACK_EVENT:
- return NSPACE_HANDLER_TRACK;
default:
printf("nspace_type_for_op: invalid op mask %llx\n", op & NAMESPACE_HANDLER_EVENT_TYPE_MASK);
return NSPACE_HANDLER_NSPACE;
nspace_proc_exit(struct proc *p)
{
int i, event_mask = 0;
-
+
for (i = 0; i < NSPACE_HANDLER_COUNT; i++) {
if (p == nspace_handlers[i].handler_proc) {
event_mask |= nspace_item_flags_for_type(i);
if (event_mask == 0) {
return;
}
-
+
+ lck_mtx_lock(&nspace_handler_lock);
if (event_mask & NSPACE_ITEM_SNAPSHOT_EVENT) {
// if this process was the snapshot handler, zero snapshot_timeout
snapshot_timestamp = 0;
}
-
+
//
// unblock anyone that's waiting for the handler that died
//
- lck_mtx_lock(&nspace_handler_lock);
for(i=0; i < MAX_NSPACE_ITEMS; i++) {
if (nspace_items[i].flags & (NSPACE_ITEM_NEW | NSPACE_ITEM_PROCESSING)) {
nspace_items[i].vid = 0;
nspace_items[i].flags = NSPACE_ITEM_DONE;
nspace_items[i].token = 0;
-
+
wakeup((caddr_t)&(nspace_items[i].vp));
}
}
}
-
+
wakeup((caddr_t)&nspace_item_idx);
lck_mtx_unlock(&nspace_handler_lock);
}
-int
+int
resolve_nspace_item(struct vnode *vp, uint64_t op)
{
return resolve_nspace_item_ext(vp, op, NULL);
}
-int
+int
resolve_nspace_item_ext(struct vnode *vp, uint64_t op, void *arg)
{
int i, error, keep_waiting;
} else {
nspace_items[i].refcount++;
}
-
+
if (i >= MAX_NSPACE_ITEMS) {
ts.tv_sec = nspace_handler_timeout;
ts.tv_nsec = 0;
nspace_items[i].token = 0;
nspace_items[i].refcount = 1;
-
+
wakeup((caddr_t)&nspace_item_idx);
}
// hmmm, why did we get woken up?
printf("woken up for token %d but it's not done, cancelled or timedout and error == 0.\n",
nspace_items[i].token);
- }
+ }
if (--nspace_items[i].refcount == 0) {
nspace_items[i].vp = NULL; // clear this so that no one will match on it again
return error;
}
+int nspace_snapshot_event(vnode_t vp, time_t ctime, uint64_t op_type, void *arg)
+{
+ int snapshot_error = 0;
+
+ if (vp == NULL) {
+ return 0;
+ }
+
+ /* Swap files are special; skip them */
+ if (vnode_isswap(vp)) {
+ return 0;
+ }
+
+ if (ctime != 0 && snapshot_timestamp != 0 && (ctime <= snapshot_timestamp || vnode_needssnapshots(vp))) {
+ // the change time is within this epoch
+ int error;
+
+ error = resolve_nspace_item_ext(vp, op_type | NAMESPACE_HANDLER_SNAPSHOT_EVENT, arg);
+ if (error == EDEADLK) {
+ snapshot_error = 0;
+ } else if (error) {
+ if (error == EAGAIN) {
+ printf("nspace_snapshot_event: timed out waiting for namespace handler...\n");
+ } else if (error == EINTR) {
+ // printf("nspace_snapshot_event: got a signal while waiting for namespace handler...\n");
+ snapshot_error = EINTR;
+ }
+ }
+ }
+
+ return snapshot_error;
+}
int
get_nspace_item_status(struct vnode *vp, int32_t *status)
lck_mtx_unlock(&nspace_handler_lock);
return 0;
}
-
+
#if 0
static int
if ((error = vnode_authorize(vp, NULL, action, ctx)) != 0)
return error;
-
+
//
// if the vnode is tagged VOPENEVT and the current process
return error;
}
- /* Call out to allow 3rd party notification of open.
+ /* Call out to allow 3rd party notification of open.
* Ignore result of kauth_authorize_fileop call.
*/
#if CONFIG_MACF
mac_vnode_notify_open(ctx, vp, fmode);
#endif
- kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_OPEN,
+ kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_OPEN,
(uintptr_t)vp, 0);
static int
wait_for_namespace_event(namespace_handler_data *nhd, nspace_type_t nspace_type)
{
- int i, error=0, unblock=0;
+ int i;
+ int error = 0;
+ int unblock = 0;
task_t curtask;
-
+
lck_mtx_lock(&nspace_handler_exclusion_lock);
if (nspace_handlers[nspace_type].handler_busy) {
lck_mtx_unlock(&nspace_handler_exclusion_lock);
return EBUSY;
}
+
nspace_handlers[nspace_type].handler_busy = 1;
lck_mtx_unlock(&nspace_handler_exclusion_lock);
-
- /*
+
+ /*
* Any process that gets here will be one of the namespace handlers.
* As such, they should be prevented from acquiring DMG vnodes during vnode reclamation
* as we can cause deadlocks to occur, because the namespace handler may prevent
- * VNOP_INACTIVE from proceeding. Mark the current task as a P_DEPENDENCY_CAPABLE
+ * VNOP_INACTIVE from proceeding. Mark the current task as a P_DEPENDENCY_CAPABLE
* process.
*/
curtask = current_task();
- bsd_set_dependency_capable (curtask);
-
+ bsd_set_dependency_capable (curtask);
+
lck_mtx_lock(&nspace_handler_lock);
if (nspace_handlers[nspace_type].handler_proc == NULL) {
nspace_handlers[nspace_type].handler_tid = thread_tid(current_thread());
nspace_handlers[nspace_type].handler_proc = current_proc();
}
-
+
+ if (nspace_type == NSPACE_HANDLER_SNAPSHOT &&
+ (snapshot_timestamp == 0 || snapshot_timestamp == ~0)) {
+ error = EINVAL;
+ }
+
while (error == 0) {
-
- for(i=0; i < MAX_NSPACE_ITEMS; i++) {
+
+ /* Try to find matching namespace item */
+ for (i = 0; i < MAX_NSPACE_ITEMS; i++) {
if (nspace_items[i].flags & NSPACE_ITEM_NEW) {
- if (!nspace_flags_matches_handler(nspace_items[i].flags, nspace_type)) {
- continue;
+ if (nspace_flags_matches_handler(nspace_items[i].flags, nspace_type)) {
+ break;
}
+ }
+ }
+
+ if (i >= MAX_NSPACE_ITEMS) {
+ /* Nothing is there yet. Wait for wake up and retry */
+ error = msleep((caddr_t)&nspace_item_idx, &nspace_handler_lock, PVFS|PCATCH, "namespace-items", 0);
+ if ((nspace_type == NSPACE_HANDLER_SNAPSHOT) && (snapshot_timestamp == 0 || snapshot_timestamp == ~0)) {
+ /* Prevent infinite loop if snapshot handler exited */
+ error = EINVAL;
break;
}
+ continue;
}
-
- if (i < MAX_NSPACE_ITEMS) {
- nspace_items[i].flags &= ~NSPACE_ITEM_NEW;
- nspace_items[i].flags |= NSPACE_ITEM_PROCESSING;
- nspace_items[i].token = ++nspace_token_id;
-
- if (nspace_items[i].vp) {
- struct fileproc *fp;
- int32_t indx, fmode;
- struct proc *p = current_proc();
- vfs_context_t ctx = vfs_context_current();
- struct vnode_attr va;
+ nspace_items[i].flags &= ~NSPACE_ITEM_NEW;
+ nspace_items[i].flags |= NSPACE_ITEM_PROCESSING;
+ nspace_items[i].token = ++nspace_token_id;
- /*
- * Use vnode pointer to acquire a file descriptor for
- * hand-off to userland
- */
- fmode = nspace_open_flags_for_type(nspace_type);
- error = vnode_getwithvid(nspace_items[i].vp, nspace_items[i].vid);
- if (error) {
- unblock = 1;
- break;
- }
- error = vn_open_with_vp(nspace_items[i].vp, fmode, ctx);
- if (error) {
- unblock = 1;
- vnode_put(nspace_items[i].vp);
- break;
- }
-
- if ((error = falloc(p, &fp, &indx, ctx))) {
- vn_close(nspace_items[i].vp, fmode, ctx);
- vnode_put(nspace_items[i].vp);
- unblock = 1;
- break;
- }
-
- fp->f_fglob->fg_flag = fmode;
- fp->f_fglob->fg_ops = &vnops;
- fp->f_fglob->fg_data = (caddr_t)nspace_items[i].vp;
-
- proc_fdlock(p);
- procfdtbl_releasefd(p, indx, NULL);
- fp_drop(p, indx, fp, 1);
- proc_fdunlock(p);
-
- /*
- * All variants of the namespace handler struct support these three fields:
- * token, flags, and the FD pointer
- */
- error = copyout(&nspace_items[i].token, nhd->token, sizeof(uint32_t));
- error = copyout(&nspace_items[i].op, nhd->flags, sizeof(uint64_t));
- error = copyout(&indx, nhd->fdptr, sizeof(uint32_t));
-
- /*
- * Handle optional fields:
- * extended version support an info ptr (offset, length), and the
- *
- * namedata version supports a unique per-link object ID
- *
- */
- if (nhd->infoptr) {
- uio_t uio = (uio_t)nspace_items[i].arg;
- uint64_t u_offset, u_length;
-
- if (uio) {
- u_offset = uio_offset(uio);
- u_length = uio_resid(uio);
- } else {
- u_offset = 0;
- u_length = 0;
- }
- error = copyout(&u_offset, nhd->infoptr, sizeof(uint64_t));
- error = copyout(&u_length, nhd->infoptr+sizeof(uint64_t), sizeof(uint64_t));
- }
+ assert(nspace_items[i].vp);
+ struct fileproc *fp;
+ int32_t indx;
+ int32_t fmode;
+ struct proc *p = current_proc();
+ vfs_context_t ctx = vfs_context_current();
+ struct vnode_attr va;
+ bool vn_get_succsessful = false;
+ bool vn_open_successful = false;
+ bool fp_alloc_successful = false;
- if (nhd->objid) {
- VATTR_INIT(&va);
- VATTR_WANTED(&va, va_linkid);
- error = vnode_getattr(nspace_items[i].vp, &va, ctx);
- if (error == 0 ) {
- uint64_t linkid = 0;
- if (VATTR_IS_SUPPORTED (&va, va_linkid)) {
- linkid = (uint64_t)va.va_linkid;
- }
- error = copyout (&linkid, nhd->objid, sizeof(uint64_t));
- }
- }
+ /*
+ * Use vnode pointer to acquire a file descriptor for
+ * hand-off to userland
+ */
+ fmode = nspace_open_flags_for_type(nspace_type);
+ error = vnode_getwithvid(nspace_items[i].vp, nspace_items[i].vid);
+ if (error) goto cleanup;
+ vn_get_succsessful = true;
- if (error) {
- vn_close(nspace_items[i].vp, fmode, ctx);
- fp_free(p, indx, fp);
- unblock = 1;
- }
-
- vnode_put(nspace_items[i].vp);
-
- break;
+ error = vn_open_with_vp(nspace_items[i].vp, fmode, ctx);
+ if (error) goto cleanup;
+ vn_open_successful = true;
+
+ error = falloc(p, &fp, &indx, ctx);
+ if (error) goto cleanup;
+ fp_alloc_successful = true;
+
+ fp->f_fglob->fg_flag = fmode;
+ fp->f_fglob->fg_ops = &vnops;
+ fp->f_fglob->fg_data = (caddr_t)nspace_items[i].vp;
+
+ proc_fdlock(p);
+ procfdtbl_releasefd(p, indx, NULL);
+ fp_drop(p, indx, fp, 1);
+ proc_fdunlock(p);
+
+ /*
+ * All variants of the namespace handler struct support these three fields:
+ * token, flags, and the FD pointer
+ */
+ error = copyout(&nspace_items[i].token, nhd->token, sizeof(uint32_t));
+ if (error) goto cleanup;
+ error = copyout(&nspace_items[i].op, nhd->flags, sizeof(uint64_t));
+ if (error) goto cleanup;
+ error = copyout(&indx, nhd->fdptr, sizeof(uint32_t));
+ if (error) goto cleanup;
+
+ /*
+ * Handle optional fields:
+ * extended version support an info ptr (offset, length), and the
+ *
+ * namedata version supports a unique per-link object ID
+ *
+ */
+ if (nhd->infoptr) {
+ uio_t uio = (uio_t)nspace_items[i].arg;
+ uint64_t u_offset, u_length;
+
+ if (uio) {
+ u_offset = uio_offset(uio);
+ u_length = uio_resid(uio);
} else {
- printf("wait_for_nspace_event: failed (nspace_items[%d] == %p error %d, name %s)\n",
- i, nspace_items[i].vp, error, nspace_items[i].vp->v_name);
+ u_offset = 0;
+ u_length = 0;
}
-
- } else {
- error = msleep((caddr_t)&nspace_item_idx, &nspace_handler_lock, PVFS|PCATCH, "namespace-items", 0);
- if ((nspace_type == NSPACE_HANDLER_SNAPSHOT) && (snapshot_timestamp == 0 || snapshot_timestamp == ~0)) {
- error = EINVAL;
- break;
+ error = copyout(&u_offset, nhd->infoptr, sizeof(uint64_t));
+ if (error) goto cleanup;
+ error = copyout(&u_length, nhd->infoptr + sizeof(uint64_t), sizeof(uint64_t));
+ if (error) goto cleanup;
+ }
+
+ if (nhd->objid) {
+ VATTR_INIT(&va);
+ VATTR_WANTED(&va, va_linkid);
+ error = vnode_getattr(nspace_items[i].vp, &va, ctx);
+ if (error) goto cleanup;
+
+ uint64_t linkid = 0;
+ if (VATTR_IS_SUPPORTED (&va, va_linkid)) {
+ linkid = (uint64_t)va.va_linkid;
}
-
+ error = copyout(&linkid, nhd->objid, sizeof(uint64_t));
+ }
+cleanup:
+ if (error) {
+ if (fp_alloc_successful) fp_free(p, indx, fp);
+ if (vn_open_successful) vn_close(nspace_items[i].vp, fmode, ctx);
+ unblock = 1;
}
+
+ if (vn_get_succsessful) vnode_put(nspace_items[i].vp);
+
+ break;
}
-
+
if (unblock) {
if (nspace_items[i].vp && (nspace_items[i].vp->v_flag & VNEEDSSNAPSHOT)) {
vnode_lock_spin(nspace_items[i].vp);
nspace_items[i].vid = 0;
nspace_items[i].flags = NSPACE_ITEM_DONE;
nspace_items[i].token = 0;
-
+
wakeup((caddr_t)&(nspace_items[i].vp));
}
-
+
if (nspace_type == NSPACE_HANDLER_SNAPSHOT) {
// just go through every snapshot event and unblock it immediately.
if (error && (snapshot_timestamp == 0 || snapshot_timestamp == ~0)) {
- for(i=0; i < MAX_NSPACE_ITEMS; i++) {
+ for(i = 0; i < MAX_NSPACE_ITEMS; i++) {
if (nspace_items[i].flags & NSPACE_ITEM_NEW) {
if (nspace_flags_matches_handler(nspace_items[i].flags, nspace_type)) {
nspace_items[i].vp = NULL;
nspace_items[i].vid = 0;
nspace_items[i].flags = NSPACE_ITEM_DONE;
nspace_items[i].token = 0;
-
- wakeup((caddr_t)&(nspace_items[i].vp));
+
+ wakeup((caddr_t)&(nspace_items[i].vp));
}
}
}
}
}
-
+
lck_mtx_unlock(&nspace_handler_lock);
-
+
lck_mtx_lock(&nspace_handler_exclusion_lock);
nspace_handlers[nspace_type].handler_busy = 0;
lck_mtx_unlock(&nspace_handler_exclusion_lock);
-
+
return error;
}
{
int error = 0;
namespace_handler_data nhd;
-
+
bzero (&nhd, sizeof(namespace_handler_data));
- if (nspace_type == NSPACE_HANDLER_SNAPSHOT &&
- (snapshot_timestamp == 0 || snapshot_timestamp == ~0)) {
- return EINVAL;
- }
-
if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) {
return error;
}
-
+
error = validate_namespace_args (is64bit, size);
if (error) {
return error;
}
-
+
/* Copy in the userland pointers into our kernel-only struct */
if (is64bit) {
}
/* Otherwise the fields were pre-zeroed when we did the bzero above. */
}
- }
+ }
else {
/* 32 bit userland structures */
nhd.token = CAST_USER_ADDR_T(((user32_namespace_handler_info *)data)->token);
nhd.flags = CAST_USER_ADDR_T(((user32_namespace_handler_info *)data)->flags);
nhd.fdptr = CAST_USER_ADDR_T(((user32_namespace_handler_info *)data)->fdptr);
-
+
if (size > (sizeof(user32_namespace_handler_info))) {
if (size >= (sizeof(user32_namespace_handler_info_ext))) {
nhd.infoptr = CAST_USER_ADDR_T(((user32_namespace_handler_info_ext *)data)->infoptr);
/* Otherwise the fields were pre-zeroed when we did the bzero above. */
}
}
-
+
return wait_for_namespace_event(&nhd, nspace_type);
}
+static unsigned long
+fsctl_bogus_command_compat(unsigned long cmd)
+{
+
+ switch (cmd) {
+ case IOCBASECMD(FSIOC_SYNC_VOLUME):
+ return (FSIOC_SYNC_VOLUME);
+ case IOCBASECMD(FSIOC_ROUTEFS_SETROUTEID):
+ return (FSIOC_ROUTEFS_SETROUTEID);
+ case IOCBASECMD(FSIOC_SET_PACKAGE_EXTS):
+ return (FSIOC_SET_PACKAGE_EXTS);
+ case IOCBASECMD(FSIOC_NAMESPACE_HANDLER_GET):
+ return (FSIOC_NAMESPACE_HANDLER_GET);
+ case IOCBASECMD(FSIOC_OLD_SNAPSHOT_HANDLER_GET):
+ return (FSIOC_OLD_SNAPSHOT_HANDLER_GET);
+ case IOCBASECMD(FSIOC_SNAPSHOT_HANDLER_GET_EXT):
+ return (FSIOC_SNAPSHOT_HANDLER_GET_EXT);
+ case IOCBASECMD(FSIOC_NAMESPACE_HANDLER_UPDATE):
+ return (FSIOC_NAMESPACE_HANDLER_UPDATE);
+ case IOCBASECMD(FSIOC_NAMESPACE_HANDLER_UNBLOCK):
+ return (FSIOC_NAMESPACE_HANDLER_UNBLOCK);
+ case IOCBASECMD(FSIOC_NAMESPACE_HANDLER_CANCEL):
+ return (FSIOC_NAMESPACE_HANDLER_CANCEL);
+ case IOCBASECMD(FSIOC_NAMESPACE_HANDLER_SET_SNAPSHOT_TIME):
+ return (FSIOC_NAMESPACE_HANDLER_SET_SNAPSHOT_TIME);
+ case IOCBASECMD(FSIOC_NAMESPACE_ALLOW_DMG_SNAPSHOT_EVENTS):
+ return (FSIOC_NAMESPACE_ALLOW_DMG_SNAPSHOT_EVENTS);
+ case IOCBASECMD(FSIOC_SET_FSTYPENAME_OVERRIDE):
+ return (FSIOC_SET_FSTYPENAME_OVERRIDE);
+ case IOCBASECMD(DISK_CONDITIONER_IOC_GET):
+ return (DISK_CONDITIONER_IOC_GET);
+ case IOCBASECMD(DISK_CONDITIONER_IOC_SET):
+ return (DISK_CONDITIONER_IOC_SET);
+ case IOCBASECMD(FSIOC_FIOSEEKHOLE):
+ return (FSIOC_FIOSEEKHOLE);
+ case IOCBASECMD(FSIOC_FIOSEEKDATA):
+ return (FSIOC_FIOSEEKDATA);
+ case IOCBASECMD(SPOTLIGHT_IOC_GET_MOUNT_TIME):
+ return (SPOTLIGHT_IOC_GET_MOUNT_TIME);
+ case IOCBASECMD(SPOTLIGHT_IOC_GET_LAST_MTIME):
+ return (SPOTLIGHT_IOC_GET_LAST_MTIME);
+ }
+
+ return (cmd);
+}
+
/*
* Make a filesystem-specific control call:
*/
boolean_t is64bit;
u_int size;
#define STK_PARAMS 128
- char stkbuf[STK_PARAMS];
+ char stkbuf[STK_PARAMS] = {0};
caddr_t data, memp;
vnode_t vp = *arg_vp;
+ cmd = fsctl_bogus_command_compat(cmd);
+
size = IOCPARM_LEN(cmd);
if (size > IOCPARM_MAX) return (EINVAL);
is64bit = proc_is64bit(p);
memp = NULL;
+
if (size > sizeof (stkbuf)) {
if ((memp = (caddr_t)kalloc(size)) == 0) return ENOMEM;
data = memp;
} else {
data = &stkbuf[0];
};
-
+
if (cmd & IOC_IN) {
if (size) {
error = copyin(udata, data, size);
- if (error) goto FSCtl_Exit;
+ if (error) {
+ if (memp) {
+ kfree (memp, size);
+ }
+ return error;
+ }
} else {
if (is64bit) {
*(user_addr_t *)data = udata;
}
/* Check to see if it's a generic command */
- if (IOCBASECMD(cmd) == FSCTL_SYNC_VOLUME) {
- mount_t mp = vp->v_mount;
- int arg = *(uint32_t*)data;
-
- /* record vid of vp so we can drop it below. */
- uint32_t vvid = vp->v_id;
+ switch (cmd) {
- /*
- * Then grab mount_iterref so that we can release the vnode.
- * Without this, a thread may call vnode_iterate_prepare then
- * get into a deadlock because we've never released the root vp
- */
- error = mount_iterref (mp, 0);
- if (error) {
- goto FSCtl_Exit;
+ case FSIOC_SYNC_VOLUME: {
+ mount_t mp = vp->v_mount;
+ int arg = *(uint32_t*)data;
+
+ /* record vid of vp so we can drop it below. */
+ uint32_t vvid = vp->v_id;
+
+ /*
+ * Then grab mount_iterref so that we can release the vnode.
+ * Without this, a thread may call vnode_iterate_prepare then
+ * get into a deadlock because we've never released the root vp
+ */
+ error = mount_iterref (mp, 0);
+ if (error) {
+ break;
+ }
+ vnode_put(vp);
+
+ /* issue the sync for this volume */
+ (void)sync_callback(mp, (arg & FSCTL_SYNC_WAIT) ? &arg : NULL);
+
+ /*
+ * Then release the mount_iterref once we're done syncing; it's not
+ * needed for the VNOP_IOCTL below
+ */
+ mount_iterdrop(mp);
+
+ if (arg & FSCTL_SYNC_FULLSYNC) {
+ /* re-obtain vnode iocount on the root vp, if possible */
+ error = vnode_getwithvid (vp, vvid);
+ if (error == 0) {
+ error = VNOP_IOCTL(vp, F_FULLFSYNC, (caddr_t)NULL, 0, ctx);
+ vnode_put (vp);
+ }
+ }
+ /* mark the argument VP as having been released */
+ *arg_vp = NULL;
}
- vnode_put(vp);
+ break;
- /* issue the sync for this volume */
- (void)sync_callback(mp, (arg & FSCTL_SYNC_WAIT) ? &arg : NULL);
-
- /*
- * Then release the mount_iterref once we're done syncing; it's not
- * needed for the VNOP_IOCTL below
- */
- mount_iterdrop(mp);
+ case FSIOC_ROUTEFS_SETROUTEID: {
+#if ROUTEFS
+ char routepath[MAXPATHLEN];
+ size_t len = 0;
- if (arg & FSCTL_SYNC_FULLSYNC) {
- /* re-obtain vnode iocount on the root vp, if possible */
- error = vnode_getwithvid (vp, vvid);
- if (error == 0) {
- error = VNOP_IOCTL(vp, F_FULLFSYNC, (caddr_t)NULL, 0, ctx);
- vnode_put (vp);
+ if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) {
+ break;
+ }
+ bzero(routepath, MAXPATHLEN);
+ error = copyinstr(udata, &routepath[0], MAXPATHLEN, &len);
+ if (error) {
+ break;
+ }
+ error = routefs_kernel_mount(routepath);
+ if (error) {
+ break;
}
+#endif
+ }
+ break;
+
+ case FSIOC_SET_PACKAGE_EXTS: {
+ user_addr_t ext_strings;
+ uint32_t num_entries;
+ uint32_t max_width;
+
+ if ((error = priv_check_cred(kauth_cred_get(), PRIV_PACKAGE_EXTENSIONS, 0)))
+ break;
+
+ if ( (is64bit && size != sizeof(user64_package_ext_info))
+ || (is64bit == 0 && size != sizeof(user32_package_ext_info))) {
+
+ // either you're 64-bit and passed a 64-bit struct or
+ // you're 32-bit and passed a 32-bit struct. otherwise
+ // it's not ok.
+ error = EINVAL;
+ break;
+ }
+
+ if (is64bit) {
+ ext_strings = ((user64_package_ext_info *)data)->strings;
+ num_entries = ((user64_package_ext_info *)data)->num_entries;
+ max_width = ((user64_package_ext_info *)data)->max_width;
+ } else {
+ ext_strings = CAST_USER_ADDR_T(((user32_package_ext_info *)data)->strings);
+ num_entries = ((user32_package_ext_info *)data)->num_entries;
+ max_width = ((user32_package_ext_info *)data)->max_width;
+ }
+ error = set_package_extensions_table(ext_strings, num_entries, max_width);
}
- /* mark the argument VP as having been released */
- *arg_vp = NULL;
+ break;
- } else if (IOCBASECMD(cmd) == FSCTL_SET_PACKAGE_EXTS) {
- user_addr_t ext_strings;
- uint32_t num_entries;
- uint32_t max_width;
-
- if ( (is64bit && size != sizeof(user64_package_ext_info))
- || (is64bit == 0 && size != sizeof(user32_package_ext_info))) {
+ /* namespace handlers */
+ case FSIOC_NAMESPACE_HANDLER_GET: {
+ error = process_namespace_fsctl(NSPACE_HANDLER_NSPACE, is64bit, size, data);
+ }
+ break;
- // either you're 64-bit and passed a 64-bit struct or
- // you're 32-bit and passed a 32-bit struct. otherwise
- // it's not ok.
- error = EINVAL;
- goto FSCtl_Exit;
+ /* Snapshot handlers */
+ case FSIOC_OLD_SNAPSHOT_HANDLER_GET: {
+ error = process_namespace_fsctl(NSPACE_HANDLER_SNAPSHOT, is64bit, size, data);
}
+ break;
- if (is64bit) {
- ext_strings = ((user64_package_ext_info *)data)->strings;
- num_entries = ((user64_package_ext_info *)data)->num_entries;
- max_width = ((user64_package_ext_info *)data)->max_width;
- } else {
- ext_strings = CAST_USER_ADDR_T(((user32_package_ext_info *)data)->strings);
- num_entries = ((user32_package_ext_info *)data)->num_entries;
- max_width = ((user32_package_ext_info *)data)->max_width;
+ case FSIOC_SNAPSHOT_HANDLER_GET_EXT: {
+ error = process_namespace_fsctl(NSPACE_HANDLER_SNAPSHOT, is64bit, size, data);
}
-
- error = set_package_extensions_table(ext_strings, num_entries, max_width);
+ break;
+ case FSIOC_NAMESPACE_HANDLER_UPDATE: {
+ uint32_t token, val;
+ int i;
- }
+ if ((error = suser(kauth_cred_get(), &(p->p_acflag)))) {
+ break;
+ }
- /* namespace handlers */
- else if (IOCBASECMD(cmd) == FSCTL_NAMESPACE_HANDLER_GET) {
- error = process_namespace_fsctl(NSPACE_HANDLER_NSPACE, is64bit, size, data);
- }
+ if (!nspace_is_special_process(p)) {
+ error = EINVAL;
+ break;
+ }
- /* Snapshot handlers */
- else if (IOCBASECMD(cmd) == FSCTL_OLD_SNAPSHOT_HANDLER_GET) {
- error = process_namespace_fsctl(NSPACE_HANDLER_SNAPSHOT, is64bit, size, data);
- } else if (IOCBASECMD(cmd) == FSCTL_SNAPSHOT_HANDLER_GET_EXT) {
- error = process_namespace_fsctl(NSPACE_HANDLER_SNAPSHOT, is64bit, size, data);
- }
+ token = ((uint32_t *)data)[0];
+ val = ((uint32_t *)data)[1];
- /* Tracked File Handlers */
- else if (IOCBASECMD(cmd) == FSCTL_TRACKED_HANDLER_GET) {
- error = process_namespace_fsctl(NSPACE_HANDLER_TRACK, is64bit, size, data);
- }
- else if (IOCBASECMD(cmd) == FSCTL_NAMESPACE_HANDLER_GETDATA) {
- error = process_namespace_fsctl(NSPACE_HANDLER_TRACK, is64bit, size, data);
- } else if (IOCBASECMD(cmd) == FSCTL_NAMESPACE_HANDLER_UPDATE) {
- uint32_t token, val;
- int i;
+ lck_mtx_lock(&nspace_handler_lock);
- if ((error = suser(kauth_cred_get(), &(p->p_acflag)))) {
- goto FSCtl_Exit;
- }
+ for(i=0; i < MAX_NSPACE_ITEMS; i++) {
+ if (nspace_items[i].token == token) {
+ break; /* exit for loop, not case stmt */
+ }
+ }
- if (!nspace_is_special_process(p)) {
- error = EINVAL;
- goto FSCtl_Exit;
+ if (i >= MAX_NSPACE_ITEMS) {
+ error = ENOENT;
+ } else {
+ //
+ // if this bit is set, when resolve_nspace_item() times out
+ // it will loop and go back to sleep.
+ //
+ nspace_items[i].flags |= NSPACE_ITEM_RESET_TIMER;
+ }
+
+ lck_mtx_unlock(&nspace_handler_lock);
+
+ if (error) {
+ printf("nspace-handler-update: did not find token %u\n", token);
+ }
}
+ break;
- token = ((uint32_t *)data)[0];
- val = ((uint32_t *)data)[1];
+ case FSIOC_NAMESPACE_HANDLER_UNBLOCK: {
+ uint32_t token, val;
+ int i;
- lck_mtx_lock(&nspace_handler_lock);
+ if ((error = suser(kauth_cred_get(), &(p->p_acflag)))) {
+ break;
+ }
- for(i=0; i < MAX_NSPACE_ITEMS; i++) {
- if (nspace_items[i].token == token) {
+ if (!nspace_is_special_process(p)) {
+ error = EINVAL;
break;
}
- }
- if (i >= MAX_NSPACE_ITEMS) {
- error = ENOENT;
- } else {
- //
- // if this bit is set, when resolve_nspace_item() times out
- // it will loop and go back to sleep.
- //
- nspace_items[i].flags |= NSPACE_ITEM_RESET_TIMER;
- }
+ token = ((uint32_t *)data)[0];
+ val = ((uint32_t *)data)[1];
- lck_mtx_unlock(&nspace_handler_lock);
+ lck_mtx_lock(&nspace_handler_lock);
- if (error) {
- printf("nspace-handler-update: did not find token %u\n", token);
- }
+ for(i=0; i < MAX_NSPACE_ITEMS; i++) {
+ if (nspace_items[i].token == token) {
+ break; /* exit for loop, not case statement */
+ }
+ }
+
+ if (i >= MAX_NSPACE_ITEMS) {
+ printf("nspace-handler-unblock: did not find token %u\n", token);
+ error = ENOENT;
+ } else {
+ if (val == 0 && nspace_items[i].vp) {
+ vnode_lock_spin(nspace_items[i].vp);
+ nspace_items[i].vp->v_flag &= ~VNEEDSSNAPSHOT;
+ vnode_unlock(nspace_items[i].vp);
+ }
- } else if (IOCBASECMD(cmd) == FSCTL_NAMESPACE_HANDLER_UNBLOCK) {
- uint32_t token, val;
- int i;
+ nspace_items[i].vp = NULL;
+ nspace_items[i].arg = NULL;
+ nspace_items[i].op = 0;
+ nspace_items[i].vid = 0;
+ nspace_items[i].flags = NSPACE_ITEM_DONE;
+ nspace_items[i].token = 0;
- if ((error = suser(kauth_cred_get(), &(p->p_acflag)))) {
- goto FSCtl_Exit;
- }
+ wakeup((caddr_t)&(nspace_items[i].vp));
+ }
- if (!nspace_is_special_process(p)) {
- error = EINVAL;
- goto FSCtl_Exit;
+ lck_mtx_unlock(&nspace_handler_lock);
}
+ break;
- token = ((uint32_t *)data)[0];
- val = ((uint32_t *)data)[1];
-
- lck_mtx_lock(&nspace_handler_lock);
+ case FSIOC_NAMESPACE_HANDLER_CANCEL: {
+ uint32_t token, val;
+ int i;
- for(i=0; i < MAX_NSPACE_ITEMS; i++) {
- if (nspace_items[i].token == token) {
+ if ((error = suser(kauth_cred_get(), &(p->p_acflag)))) {
break;
}
- }
- if (i >= MAX_NSPACE_ITEMS) {
- printf("nspace-handler-unblock: did not find token %u\n", token);
- error = ENOENT;
- } else {
- if (val == 0 && nspace_items[i].vp) {
- vnode_lock_spin(nspace_items[i].vp);
- nspace_items[i].vp->v_flag &= ~VNEEDSSNAPSHOT;
- vnode_unlock(nspace_items[i].vp);
+ if (!nspace_is_special_process(p)) {
+ error = EINVAL;
+ break;
}
- nspace_items[i].vp = NULL;
- nspace_items[i].arg = NULL;
- nspace_items[i].op = 0;
- nspace_items[i].vid = 0;
- nspace_items[i].flags = NSPACE_ITEM_DONE;
- nspace_items[i].token = 0;
-
- wakeup((caddr_t)&(nspace_items[i].vp));
- }
+ token = ((uint32_t *)data)[0];
+ val = ((uint32_t *)data)[1];
- lck_mtx_unlock(&nspace_handler_lock);
+ lck_mtx_lock(&nspace_handler_lock);
- } else if (IOCBASECMD(cmd) == FSCTL_NAMESPACE_HANDLER_CANCEL) {
- uint32_t token, val;
- int i;
+ for(i=0; i < MAX_NSPACE_ITEMS; i++) {
+ if (nspace_items[i].token == token) {
+ break; /* exit for loop, not case stmt */
+ }
+ }
- if ((error = suser(kauth_cred_get(), &(p->p_acflag)))) {
- goto FSCtl_Exit;
- }
+ if (i >= MAX_NSPACE_ITEMS) {
+ printf("nspace-handler-cancel: did not find token %u\n", token);
+ error = ENOENT;
+ } else {
+ if (nspace_items[i].vp) {
+ vnode_lock_spin(nspace_items[i].vp);
+ nspace_items[i].vp->v_flag &= ~VNEEDSSNAPSHOT;
+ vnode_unlock(nspace_items[i].vp);
+ }
- if (!nspace_is_special_process(p)) {
- error = EINVAL;
- goto FSCtl_Exit;
- }
+ nspace_items[i].vp = NULL;
+ nspace_items[i].arg = NULL;
+ nspace_items[i].vid = 0;
+ nspace_items[i].token = val;
+ nspace_items[i].flags &= ~NSPACE_ITEM_PROCESSING;
+ nspace_items[i].flags |= NSPACE_ITEM_CANCELLED;
- token = ((uint32_t *)data)[0];
- val = ((uint32_t *)data)[1];
+ wakeup((caddr_t)&(nspace_items[i].vp));
+ }
- lck_mtx_lock(&nspace_handler_lock);
+ lck_mtx_unlock(&nspace_handler_lock);
+ }
+ break;
- for(i=0; i < MAX_NSPACE_ITEMS; i++) {
- if (nspace_items[i].token == token) {
+ case FSIOC_NAMESPACE_HANDLER_SET_SNAPSHOT_TIME: {
+ if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) {
break;
}
+
+ // we explicitly do not do the namespace_handler_proc check here
+
+ lck_mtx_lock(&nspace_handler_lock);
+ snapshot_timestamp = ((uint32_t *)data)[0];
+ wakeup(&nspace_item_idx);
+ lck_mtx_unlock(&nspace_handler_lock);
+ printf("nspace-handler-set-snapshot-time: %d\n", (int)snapshot_timestamp);
+
}
+ break;
- if (i >= MAX_NSPACE_ITEMS) {
- printf("nspace-handler-cancel: did not find token %u\n", token);
- error = ENOENT;
- } else {
- if (nspace_items[i].vp) {
- vnode_lock_spin(nspace_items[i].vp);
- nspace_items[i].vp->v_flag &= ~VNEEDSSNAPSHOT;
- vnode_unlock(nspace_items[i].vp);
+ case FSIOC_NAMESPACE_ALLOW_DMG_SNAPSHOT_EVENTS:
+ {
+ if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) {
+ break;
}
- nspace_items[i].vp = NULL;
- nspace_items[i].arg = NULL;
- nspace_items[i].vid = 0;
- nspace_items[i].token = val;
- nspace_items[i].flags &= ~NSPACE_ITEM_PROCESSING;
- nspace_items[i].flags |= NSPACE_ITEM_CANCELLED;
+ lck_mtx_lock(&nspace_handler_lock);
+ nspace_allow_virtual_devs = ((uint32_t *)data)[0];
+ lck_mtx_unlock(&nspace_handler_lock);
+ printf("nspace-snapshot-handler will%s allow events on disk-images\n",
+ nspace_allow_virtual_devs ? "" : " NOT");
+ error = 0;
- wakeup((caddr_t)&(nspace_items[i].vp));
}
+ break;
- lck_mtx_unlock(&nspace_handler_lock);
- } else if (IOCBASECMD(cmd) == FSCTL_NAMESPACE_HANDLER_SET_SNAPSHOT_TIME) {
- if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) {
- goto FSCtl_Exit;
+ case FSIOC_SET_FSTYPENAME_OVERRIDE:
+ {
+ if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) {
+ break;
+ }
+ if (vp->v_mount) {
+ mount_lock(vp->v_mount);
+ if (data[0] != 0) {
+ strlcpy(&vp->v_mount->fstypename_override[0], data, MFSTYPENAMELEN);
+ vp->v_mount->mnt_kern_flag |= MNTK_TYPENAME_OVERRIDE;
+ if (vfs_isrdonly(vp->v_mount) && strcmp(vp->v_mount->fstypename_override, "mtmfs") == 0) {
+ vp->v_mount->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
+ vp->v_mount->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
+ }
+ } else {
+ if (strcmp(vp->v_mount->fstypename_override, "mtmfs") == 0) {
+ vp->v_mount->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
+ }
+ vp->v_mount->mnt_kern_flag &= ~MNTK_TYPENAME_OVERRIDE;
+ vp->v_mount->fstypename_override[0] = '\0';
+ }
+ mount_unlock(vp->v_mount);
+ }
}
+ break;
- // we explicitly do not do the namespace_handler_proc check here
-
- lck_mtx_lock(&nspace_handler_lock);
- snapshot_timestamp = ((uint32_t *)data)[0];
- wakeup(&nspace_item_idx);
- lck_mtx_unlock(&nspace_handler_lock);
- printf("nspace-handler-set-snapshot-time: %d\n", (int)snapshot_timestamp);
+ case DISK_CONDITIONER_IOC_GET: {
+ error = disk_conditioner_get_info(vp->v_mount, (disk_conditioner_info *)data);
+ }
+ break;
- } else if (IOCBASECMD(cmd) == FSCTL_NAMESPACE_ALLOW_DMG_SNAPSHOT_EVENTS) {
- if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) {
- goto FSCtl_Exit;
+ case DISK_CONDITIONER_IOC_SET: {
+ error = disk_conditioner_set_info(vp->v_mount, (disk_conditioner_info *)data);
}
+ break;
- lck_mtx_lock(&nspace_handler_lock);
- nspace_allow_virtual_devs = ((uint32_t *)data)[0];
- lck_mtx_unlock(&nspace_handler_lock);
- printf("nspace-snapshot-handler will%s allow events on disk-images\n",
- nspace_allow_virtual_devs ? "" : " NOT");
- error = 0;
-
- } else if (IOCBASECMD(cmd) == FSCTL_SET_FSTYPENAME_OVERRIDE) {
- if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) {
- goto FSCtl_Exit;
- }
- if (vp->v_mount) {
- mount_lock(vp->v_mount);
- if (data[0] != 0) {
- strlcpy(&vp->v_mount->fstypename_override[0], data, MFSTYPENAMELEN);
- vp->v_mount->mnt_kern_flag |= MNTK_TYPENAME_OVERRIDE;
- if (vfs_isrdonly(vp->v_mount) && strcmp(vp->v_mount->fstypename_override, "mtmfs") == 0) {
- vp->v_mount->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
- vp->v_mount->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
- }
- } else {
- if (strcmp(vp->v_mount->fstypename_override, "mtmfs") == 0) {
- vp->v_mount->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
- }
- vp->v_mount->mnt_kern_flag &= ~MNTK_TYPENAME_OVERRIDE;
- vp->v_mount->fstypename_override[0] = '\0';
+ default: {
+ /* other, known commands shouldn't be passed down here */
+ switch (cmd) {
+ case F_PUNCHHOLE:
+ case F_TRIM_ACTIVE_FILE:
+ case F_RDADVISE:
+ case F_TRANSCODEKEY:
+ case F_GETPROTECTIONLEVEL:
+ case F_GETDEFAULTPROTLEVEL:
+ case F_MAKECOMPRESSED:
+ case F_SET_GREEDY_MODE:
+ case F_SETSTATICCONTENT:
+ case F_SETIOTYPE:
+ case F_SETBACKINGSTORE:
+ case F_GETPATH_MTMINFO:
+ case APFSIOC_REVERT_TO_SNAPSHOT:
+ case FSIOC_FIOSEEKHOLE:
+ case FSIOC_FIOSEEKDATA:
+ case HFS_GET_BOOT_INFO:
+ case HFS_SET_BOOT_INFO:
+ case FIOPINSWAP:
+ case F_CHKCLEAN:
+ case F_FULLFSYNC:
+ case F_BARRIERFSYNC:
+ case F_FREEZE_FS:
+ case F_THAW_FS:
+ error = EINVAL;
+ goto outdrop;
}
- mount_unlock(vp->v_mount);
+ /* Invoke the filesystem-specific code */
+ error = VNOP_IOCTL(vp, cmd, data, options, ctx);
}
- } else {
- /* Invoke the filesystem-specific code */
- error = VNOP_IOCTL(vp, IOCBASECMD(cmd), data, options, ctx);
- }
-
-
+
+ } /* end switch stmt */
+
/*
- * Copy any data to user, size was
+ * if no errors, copy any data to user. Size was
* already set and checked above.
*/
- if (error == 0 && (cmd & IOC_OUT) && size)
+ if (error == 0 && (cmd & IOC_OUT) && size)
error = copyout(data, udata, size);
-
-FSCtl_Exit:
- if (memp) kfree(memp, size);
-
+
+outdrop:
+ if (memp) {
+ kfree(memp, size);
+ }
+
return error;
}
fsctl (proc_t p, struct fsctl_args *uap, __unused int32_t *retval)
{
int error;
- struct nameidata nd;
+ struct nameidata nd;
u_long nameiflags;
vnode_t vp = NULL;
vfs_context_t ctx = vfs_context_current();
AUDIT_ARG(fd, uap->fd);
AUDIT_ARG(cmd, uap->cmd);
AUDIT_ARG(value32, uap->options);
-
+
/* Get the vnode for the file we are getting info on: */
if ((error = file_vnode(uap->fd, &vp)))
- goto done;
+ return error;
fd = uap->fd;
if ((error = vnode_getwithref(vp))) {
- goto done;
+ file_drop(fd);
+ return error;
}
#if CONFIG_MACF
- error = mac_mount_check_fsctl(ctx, vnode_mount(vp), uap->cmd);
- if (error) {
- goto done;
+ if ((error = mac_mount_check_fsctl(ctx, vnode_mount(vp), uap->cmd))) {
+ file_drop(fd);
+ vnode_put(vp);
+ return error;
}
#endif
error = fsctl_internal(p, &vp, uap->cmd, (user_addr_t)uap->data, uap->options, ctx);
-done:
- if (fd != -1)
- file_drop(fd);
+ file_drop(fd);
- if (vp)
+ /*validate vp; fsctl_internal() can drop iocount and reset vp to NULL*/
+ if (vp) {
vnode_put(vp);
+ }
+
return error;
}
/* end of fsctl system call */
-/*
- * An in-kernel sync for power management to call.
- */
-__private_extern__ int
-sync_internal(void)
-{
- int error;
-
- struct sync_args data;
-
- int retval[2];
-
-
- error = sync(current_proc(), &data, &retval[0]);
-
-
- return (error);
-} /* end of sync_internal call */
-
-
/*
* Retrieve the data of an extended attribute.
*/
vp = nd.ni_vp;
nameidone(&nd);
- if ((error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen) != 0)) {
+ error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen);
+ if (error != 0) {
goto out;
}
if (xattr_protected(attrname)) {
/*
* the specific check for 0xffffffff is a hack to preserve
* binaray compatibilty in K64 with applications that discovered
- * that passing in a buf pointer and a size of -1 resulted in
+ * that passing in a buf pointer and a size of -1 resulted in
* just the size of the indicated extended attribute being returned.
* this isn't part of the documented behavior, but because of the
* original implemtation's check for "uap->size > 0", this behavior
* was allowed. In K32 that check turned into a signed comparison
* even though uap->size is unsigned... in K64, we blow by that
* check because uap->size is unsigned and doesn't get sign smeared
- * in the munger for a 32 bit user app. we also need to add a
+ * in the munger for a 32 bit user app. we also need to add a
* check to limit the maximum size of the buffer being passed in...
* unfortunately, the underlying fileystems seem to just malloc
* the requested size even if the actual extended attribute is tiny.
if (uap->value) {
if (uap->size > (size_t)XATTR_MAXSIZE)
uap->size = XATTR_MAXSIZE;
-
+
auio = uio_createwithbuffer(1, uap->position, spacetype, UIO_READ,
&uio_buf[0], sizeof(uio_buf));
uio_addiov(auio, uap->value, uap->size);
file_drop(uap->fd);
return(error);
}
- if ((error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen) != 0)) {
+ error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen);
+ if (error != 0) {
goto out;
}
if (xattr_protected(attrname)) {
if (uap->options & (XATTR_NOSECURITY | XATTR_NODEFAULT))
return (EINVAL);
- if ((error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen) != 0)) {
+ error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen);
+ if (error != 0) {
if (error == EPERM) {
/* if the string won't fit in attrname, copyinstr emits EPERM */
return (ENAMETOOLONG);
if (uap->options & (XATTR_NOFOLLOW | XATTR_NOSECURITY | XATTR_NODEFAULT))
return (EINVAL);
- if ((error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen) != 0)) {
- return (error);
+ error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen);
+ if (error != 0) {
+ if (error == EPERM) {
+ /* if the string won't fit in attrname, copyinstr emits EPERM */
+ return (ENAMETOOLONG);
+ }
+ /* Otherwise return the default error from copyinstr to detect ERANGE, etc */
+ return error;
}
if (xattr_protected(attrname))
return(EPERM);
if (uap->options & (XATTR_NOSECURITY | XATTR_NODEFAULT))
return (EINVAL);
- nameiflags = ((uap->options & XATTR_NOFOLLOW) ? 0 : FOLLOW) | NOTRIGGER;
+ nameiflags = (uap->options & XATTR_NOFOLLOW) ? 0 : FOLLOW;
NDINIT(&nd, LOOKUP, OP_LISTXATTR, nameiflags, spacetype, uap->path, ctx);
if ((error = namei(&nd))) {
return (error);
return(error);
}
if (uap->namebuf != 0 && uap->bufsize > 0) {
- auio = uio_createwithbuffer(1, 0, spacetype,
+ auio = uio_createwithbuffer(1, 0, spacetype,
UIO_READ, &uio_buf[0], sizeof(uio_buf));
uio_addiov(auio, uap->namebuf, uap->bufsize);
}
return (error);
}
-/*
- * Obtain the full pathname of a file system object by id.
- *
- * This is a private SPI used by the File Manager.
- */
-__private_extern__
-int
-fsgetpath(__unused proc_t p, struct fsgetpath_args *uap, user_ssize_t *retval)
+static int fsgetpath_internal(
+ vfs_context_t ctx, int volfs_id, uint64_t objid,
+ vm_size_t bufsize, caddr_t buf, int *pathlen)
{
- vnode_t vp;
+ int error;
struct mount *mp = NULL;
- vfs_context_t ctx = vfs_context_current();
- fsid_t fsid;
- char *realpath;
- int bpflags;
+ vnode_t vp;
int length;
- int error;
+ int bpflags;
+ /* maximum number of times to retry build_path */
+ unsigned int retries = 0x10;
- if ((error = copyin(uap->fsid, (caddr_t)&fsid, sizeof(fsid)))) {
- return (error);
- }
- AUDIT_ARG(value32, fsid.val[0]);
- AUDIT_ARG(value64, uap->objid);
- /* Restrict output buffer size for now. */
- if (uap->bufsize > PAGE_SIZE) {
+ if (bufsize > PAGE_SIZE) {
return (EINVAL);
- }
- MALLOC(realpath, char *, uap->bufsize, M_TEMP, M_WAITOK);
- if (realpath == NULL) {
+ }
+
+ if (buf == NULL) {
return (ENOMEM);
}
- /* Find the target mountpoint. */
- if ((mp = mount_lookupby_volfsid(fsid.val[0], 1)) == NULL) {
+
+retry:
+ if ((mp = mount_lookupby_volfsid(volfs_id, 1)) == NULL) {
error = ENOTSUP; /* unexpected failure */
- goto out;
+ return ENOTSUP;
}
+
unionget:
- /* Find the target vnode. */
- if (uap->objid == 2) {
+ if (objid == 2) {
error = VFS_ROOT(mp, &vp, ctx);
} else {
- error = VFS_VGET(mp, (ino64_t)uap->objid, &vp, ctx);
+ error = VFS_VGET(mp, (ino64_t)objid, &vp, ctx);
}
if (error == ENOENT && (mp->mnt_flag & MNT_UNION)) {
vfs_unbusy(tmp);
if (vfs_busy(mp, LK_NOWAIT) == 0)
goto unionget;
- } else
+ } else {
vfs_unbusy(mp);
+ }
if (error) {
- goto out;
+ return error;
}
+
#if CONFIG_MACF
error = mac_vnode_check_fsgetpath(ctx, vp);
if (error) {
vnode_put(vp);
- goto out;
+ return error;
}
#endif
+
/* Obtain the absolute path to this vnode. */
bpflags = vfs_context_suser(ctx) ? BUILDPATH_CHECKACCESS : 0;
bpflags |= BUILDPATH_CHECK_MOVED;
- error = build_path(vp, realpath, uap->bufsize, &length, bpflags, ctx);
+ error = build_path(vp, buf, bufsize, &length, bpflags, ctx);
vnode_put(vp);
+
if (error) {
+ /* there was a race building the path, try a few more times */
+ if (error == EAGAIN) {
+ --retries;
+ if (retries > 0)
+ goto retry;
+
+ error = ENOENT;
+ }
goto out;
}
- AUDIT_ARG(text, realpath);
+
+ AUDIT_ARG(text, buf);
if (kdebug_enable) {
long dbg_parms[NUMPARMS];
- int dbg_namelen;
+ int dbg_namelen;
- dbg_namelen = (int)sizeof(dbg_parms);
+ dbg_namelen = (int)sizeof(dbg_parms);
+
+ if (length < dbg_namelen) {
+ memcpy((char *)dbg_parms, buf, length);
+ memset((char *)dbg_parms + length, 0, dbg_namelen - length);
+
+ dbg_namelen = length;
+ } else {
+ memcpy((char *)dbg_parms, buf + (length - dbg_namelen), dbg_namelen);
+ }
+
+ kdebug_vfs_lookup(dbg_parms, dbg_namelen, (void *)vp,
+ KDBG_VFS_LOOKUP_FLAG_LOOKUP);
+ }
+
+ *pathlen = (user_ssize_t)length; /* may be superseded by error */
+
+out:
+ return (error);
+}
+
+/*
+ * Obtain the full pathname of a file system object by id.
+ */
+int
+fsgetpath(__unused proc_t p, struct fsgetpath_args *uap, user_ssize_t *retval)
+{
+ vfs_context_t ctx = vfs_context_current();
+ fsid_t fsid;
+ char *realpath;
+ int length;
+ int error;
+
+ if ((error = copyin(uap->fsid, (caddr_t)&fsid, sizeof(fsid)))) {
+ return (error);
+ }
+ AUDIT_ARG(value32, fsid.val[0]);
+ AUDIT_ARG(value64, uap->objid);
+ /* Restrict output buffer size for now. */
- if (length < dbg_namelen) {
- memcpy((char *)dbg_parms, realpath, length);
- memset((char *)dbg_parms + length, 0, dbg_namelen - length);
+ if (uap->bufsize > PAGE_SIZE) {
+ return (EINVAL);
+ }
+ MALLOC(realpath, char *, uap->bufsize, M_TEMP, M_WAITOK | M_ZERO);
+ if (realpath == NULL) {
+ return (ENOMEM);
+ }
- dbg_namelen = length;
- } else
- memcpy((char *)dbg_parms, realpath + (length - dbg_namelen), dbg_namelen);
+ error = fsgetpath_internal(
+ ctx, fsid.val[0], uap->objid,
+ uap->bufsize, realpath, &length);
- kdebug_lookup_gen_events(dbg_parms, dbg_namelen, (void *)vp, TRUE);
+ if (error) {
+ goto out;
}
+
error = copyout((caddr_t)realpath, uap->buf, length);
*retval = (user_ssize_t)length; /* may be superseded by error */
* EFAULT
*/
static int
-munge_statfs(struct mount *mp, struct vfsstatfs *sfsp,
- user_addr_t bufp, int *sizep, boolean_t is_64_bit,
+munge_statfs(struct mount *mp, struct vfsstatfs *sfsp,
+ user_addr_t bufp, int *sizep, boolean_t is_64_bit,
boolean_t partial_copy)
{
int error;
sfs.f_fsid = sfsp->f_fsid;
sfs.f_owner = sfsp->f_owner;
if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) {
- strlcpy(&sfs.f_fstypename[0], &mp->fstypename_override[0], MFSTYPENAMELEN);
+ strlcpy(&sfs.f_fstypename[0], &mp->fstypename_override[0], MFSNAMELEN);
} else {
strlcpy(&sfs.f_fstypename[0], &sfsp->f_fstypename[0], MFSNAMELEN);
}
my_size = copy_size = sizeof(sfs);
bzero(&sfs, my_size);
-
+
sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
sfs.f_type = mp->mnt_vtable->vfc_typenum;
sfs.f_reserved1 = (short)sfsp->f_fssubtype;
-
+
/*
* It's possible for there to be more than 2^^31 blocks in the filesystem, so we
* have to fudge the numbers here in that case. We inflate the blocksize in order
* to reflect the filesystem size as best we can.
*/
- if ((sfsp->f_blocks > INT_MAX)
- /* Hack for 4061702 . I think the real fix is for Carbon to
+ if ((sfsp->f_blocks > INT_MAX)
+ /* Hack for 4061702 . I think the real fix is for Carbon to
* look for some volume capability and not depend on hidden
- * semantics agreed between a FS and carbon.
+ * semantics agreed between a FS and carbon.
* f_blocks, f_bfree, and f_bavail set to -1 is the trigger
* for Carbon to set bNoVolumeSizes volume attribute.
- * Without this the webdavfs files cannot be copied onto
+ * Without this the webdavfs files cannot be copied onto
* disk as they look huge. This change should not affect
* XSAN as they should not setting these to -1..
*/
sfs.f_fsid = sfsp->f_fsid;
sfs.f_owner = sfsp->f_owner;
if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) {
- strlcpy(&sfs.f_fstypename[0], &mp->fstypename_override[0], MFSTYPENAMELEN);
+ strlcpy(&sfs.f_fstypename[0], &mp->fstypename_override[0], MFSNAMELEN);
} else {
strlcpy(&sfs.f_fstypename[0], &sfsp->f_fstypename[0], MFSNAMELEN);
}
}
error = copyout((caddr_t)&sfs, bufp, copy_size);
}
-
+
if (sizep != NULL) {
*sizep = my_size;
}
usbp->st_qspare[1] = sbp->st_qspare[1];
}
-void munge_user32_stat(struct stat *sbp, struct user32_stat *usbp)
-{
- bzero(usbp, sizeof(*usbp));
+void munge_user32_stat(struct stat *sbp, struct user32_stat *usbp)
+{
+ bzero(usbp, sizeof(*usbp));
+
+ usbp->st_dev = sbp->st_dev;
+ usbp->st_ino = sbp->st_ino;
+ usbp->st_mode = sbp->st_mode;
+ usbp->st_nlink = sbp->st_nlink;
+ usbp->st_uid = sbp->st_uid;
+ usbp->st_gid = sbp->st_gid;
+ usbp->st_rdev = sbp->st_rdev;
+#ifndef _POSIX_C_SOURCE
+ usbp->st_atimespec.tv_sec = sbp->st_atimespec.tv_sec;
+ usbp->st_atimespec.tv_nsec = sbp->st_atimespec.tv_nsec;
+ usbp->st_mtimespec.tv_sec = sbp->st_mtimespec.tv_sec;
+ usbp->st_mtimespec.tv_nsec = sbp->st_mtimespec.tv_nsec;
+ usbp->st_ctimespec.tv_sec = sbp->st_ctimespec.tv_sec;
+ usbp->st_ctimespec.tv_nsec = sbp->st_ctimespec.tv_nsec;
+#else
+ usbp->st_atime = sbp->st_atime;
+ usbp->st_atimensec = sbp->st_atimensec;
+ usbp->st_mtime = sbp->st_mtime;
+ usbp->st_mtimensec = sbp->st_mtimensec;
+ usbp->st_ctime = sbp->st_ctime;
+ usbp->st_ctimensec = sbp->st_ctimensec;
+#endif
+ usbp->st_size = sbp->st_size;
+ usbp->st_blocks = sbp->st_blocks;
+ usbp->st_blksize = sbp->st_blksize;
+ usbp->st_flags = sbp->st_flags;
+ usbp->st_gen = sbp->st_gen;
+ usbp->st_lspare = sbp->st_lspare;
+ usbp->st_qspare[0] = sbp->st_qspare[0];
+ usbp->st_qspare[1] = sbp->st_qspare[1];
+}
+
+/*
+ * copy stat64 structure into user_stat64 structure.
+ */
+void munge_user64_stat64(struct stat64 *sbp, struct user64_stat64 *usbp)
+{
+ bzero(usbp, sizeof(*usbp));
+
+ usbp->st_dev = sbp->st_dev;
+ usbp->st_ino = sbp->st_ino;
+ usbp->st_mode = sbp->st_mode;
+ usbp->st_nlink = sbp->st_nlink;
+ usbp->st_uid = sbp->st_uid;
+ usbp->st_gid = sbp->st_gid;
+ usbp->st_rdev = sbp->st_rdev;
+#ifndef _POSIX_C_SOURCE
+ usbp->st_atimespec.tv_sec = sbp->st_atimespec.tv_sec;
+ usbp->st_atimespec.tv_nsec = sbp->st_atimespec.tv_nsec;
+ usbp->st_mtimespec.tv_sec = sbp->st_mtimespec.tv_sec;
+ usbp->st_mtimespec.tv_nsec = sbp->st_mtimespec.tv_nsec;
+ usbp->st_ctimespec.tv_sec = sbp->st_ctimespec.tv_sec;
+ usbp->st_ctimespec.tv_nsec = sbp->st_ctimespec.tv_nsec;
+ usbp->st_birthtimespec.tv_sec = sbp->st_birthtimespec.tv_sec;
+ usbp->st_birthtimespec.tv_nsec = sbp->st_birthtimespec.tv_nsec;
+#else
+ usbp->st_atime = sbp->st_atime;
+ usbp->st_atimensec = sbp->st_atimensec;
+ usbp->st_mtime = sbp->st_mtime;
+ usbp->st_mtimensec = sbp->st_mtimensec;
+ usbp->st_ctime = sbp->st_ctime;
+ usbp->st_ctimensec = sbp->st_ctimensec;
+ usbp->st_birthtime = sbp->st_birthtime;
+ usbp->st_birthtimensec = sbp->st_birthtimensec;
+#endif
+ usbp->st_size = sbp->st_size;
+ usbp->st_blocks = sbp->st_blocks;
+ usbp->st_blksize = sbp->st_blksize;
+ usbp->st_flags = sbp->st_flags;
+ usbp->st_gen = sbp->st_gen;
+ usbp->st_lspare = sbp->st_lspare;
+ usbp->st_qspare[0] = sbp->st_qspare[0];
+ usbp->st_qspare[1] = sbp->st_qspare[1];
+}
+
+void munge_user32_stat64(struct stat64 *sbp, struct user32_stat64 *usbp)
+{
+ bzero(usbp, sizeof(*usbp));
+
+ usbp->st_dev = sbp->st_dev;
+ usbp->st_ino = sbp->st_ino;
+ usbp->st_mode = sbp->st_mode;
+ usbp->st_nlink = sbp->st_nlink;
+ usbp->st_uid = sbp->st_uid;
+ usbp->st_gid = sbp->st_gid;
+ usbp->st_rdev = sbp->st_rdev;
+#ifndef _POSIX_C_SOURCE
+ usbp->st_atimespec.tv_sec = sbp->st_atimespec.tv_sec;
+ usbp->st_atimespec.tv_nsec = sbp->st_atimespec.tv_nsec;
+ usbp->st_mtimespec.tv_sec = sbp->st_mtimespec.tv_sec;
+ usbp->st_mtimespec.tv_nsec = sbp->st_mtimespec.tv_nsec;
+ usbp->st_ctimespec.tv_sec = sbp->st_ctimespec.tv_sec;
+ usbp->st_ctimespec.tv_nsec = sbp->st_ctimespec.tv_nsec;
+ usbp->st_birthtimespec.tv_sec = sbp->st_birthtimespec.tv_sec;
+ usbp->st_birthtimespec.tv_nsec = sbp->st_birthtimespec.tv_nsec;
+#else
+ usbp->st_atime = sbp->st_atime;
+ usbp->st_atimensec = sbp->st_atimensec;
+ usbp->st_mtime = sbp->st_mtime;
+ usbp->st_mtimensec = sbp->st_mtimensec;
+ usbp->st_ctime = sbp->st_ctime;
+ usbp->st_ctimensec = sbp->st_ctimensec;
+ usbp->st_birthtime = sbp->st_birthtime;
+ usbp->st_birthtimensec = sbp->st_birthtimensec;
+#endif
+ usbp->st_size = sbp->st_size;
+ usbp->st_blocks = sbp->st_blocks;
+ usbp->st_blksize = sbp->st_blksize;
+ usbp->st_flags = sbp->st_flags;
+ usbp->st_gen = sbp->st_gen;
+ usbp->st_lspare = sbp->st_lspare;
+ usbp->st_qspare[0] = sbp->st_qspare[0];
+ usbp->st_qspare[1] = sbp->st_qspare[1];
+}
+
+/*
+ * Purge buffer cache for simulating cold starts
+ */
+static int vnode_purge_callback(struct vnode *vp, __unused void *cargs)
+{
+ ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL /* off_t *resid_off */, UBC_PUSHALL | UBC_INVALIDATE);
+
+ return VNODE_RETURNED;
+}
+
+static int vfs_purge_callback(mount_t mp, __unused void * arg)
+{
+ vnode_iterate(mp, VNODE_WAIT | VNODE_ITERATE_ALL, vnode_purge_callback, NULL);
+
+ return VFS_RETURNED;
+}
+
+int
+vfs_purge(__unused struct proc *p, __unused struct vfs_purge_args *uap, __unused int32_t *retval)
+{
+ if (!kauth_cred_issuser(kauth_cred_get()))
+ return EPERM;
+
+ vfs_iterate(0/* flags */, vfs_purge_callback, NULL);
+
+ return 0;
+}
+
+/*
+ * gets the vnode associated with the (unnamed) snapshot directory
+ * for a Filesystem. The snapshot directory vnode is returned with
+ * an iocount on it.
+ */
+int
+vnode_get_snapdir(vnode_t rvp, vnode_t *sdvpp, vfs_context_t ctx)
+{
+ return (VFS_VGET_SNAPDIR(vnode_mount(rvp), sdvpp, ctx));
+}
+
+/*
+ * Get the snapshot vnode.
+ *
+ * If successful, the call returns with an iocount on *rvpp ,*sdvpp and
+ * needs nameidone() on ndp.
+ *
+ * If the snapshot vnode exists it is returned in ndp->ni_vp.
+ *
+ * If it returns with an error, *rvpp, *sdvpp are NULL and nameidone() is
+ * not needed.
+ */
+static int
+vnode_get_snapshot(int dirfd, vnode_t *rvpp, vnode_t *sdvpp,
+ user_addr_t name, struct nameidata *ndp, int32_t op,
+#if !CONFIG_TRIGGERS
+ __unused
+#endif
+ enum path_operation pathop,
+ vfs_context_t ctx)
+{
+ int error, i;
+ caddr_t name_buf;
+ size_t name_len;
+ struct vfs_attr vfa;
+
+ *sdvpp = NULLVP;
+ *rvpp = NULLVP;
+
+ error = vnode_getfromfd(ctx, dirfd, rvpp);
+ if (error)
+ return (error);
+
+ if (!vnode_isvroot(*rvpp)) {
+ error = EINVAL;
+ goto out;
+ }
+
+ /* Make sure the filesystem supports snapshots */
+ VFSATTR_INIT(&vfa);
+ VFSATTR_WANTED(&vfa, f_capabilities);
+ if ((vfs_getattr(vnode_mount(*rvpp), &vfa, ctx) != 0) ||
+ !VFSATTR_IS_SUPPORTED(&vfa, f_capabilities) ||
+ !((vfa.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] &
+ VOL_CAP_INT_SNAPSHOT)) ||
+ !((vfa.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] &
+ VOL_CAP_INT_SNAPSHOT))) {
+ error = ENOTSUP;
+ goto out;
+ }
+
+ error = vnode_get_snapdir(*rvpp, sdvpp, ctx);
+ if (error)
+ goto out;
+
+ MALLOC(name_buf, caddr_t, MAXPATHLEN, M_TEMP, M_WAITOK);
+ error = copyinstr(name, name_buf, MAXPATHLEN, &name_len);
+ if (error)
+ goto out1;
+
+ /*
+ * Some sanity checks- name can't be empty, "." or ".." or have slashes.
+ * (the length returned by copyinstr includes the terminating NUL)
+ */
+ if ((name_len == 1) || (name_len == 2 && name_buf[0] == '.') ||
+ (name_len == 3 && name_buf[0] == '.' && name_buf[1] == '.')) {
+ error = EINVAL;
+ goto out1;
+ }
+ for (i = 0; i < (int)name_len && name_buf[i] != '/'; i++);
+ if (i < (int)name_len) {
+ error = EINVAL;
+ goto out1;
+ }
+
+#if CONFIG_MACF
+ if (op == CREATE) {
+ error = mac_mount_check_snapshot_create(ctx, vnode_mount(*rvpp),
+ name_buf);
+ } else if (op == DELETE) {
+ error = mac_mount_check_snapshot_delete(ctx, vnode_mount(*rvpp),
+ name_buf);
+ }
+ if (error)
+ goto out1;
+#endif
+
+ /* Check if the snapshot already exists ... */
+ NDINIT(ndp, op, pathop, USEDVP | NOCACHE | AUDITVNPATH1,
+ UIO_SYSSPACE, CAST_USER_ADDR_T(name_buf), ctx);
+ ndp->ni_dvp = *sdvpp;
+
+ error = namei(ndp);
+out1:
+ FREE(name_buf, M_TEMP);
+out:
+ if (error) {
+ if (*sdvpp) {
+ vnode_put(*sdvpp);
+ *sdvpp = NULLVP;
+ }
+ if (*rvpp) {
+ vnode_put(*rvpp);
+ *rvpp = NULLVP;
+ }
+ }
+ return (error);
+}
+
+/*
+ * create a filesystem snapshot (for supporting filesystems)
+ *
+ * A much simplified version of openat(dirfd, name, O_CREAT | O_EXCL)
+ * We get to the (unnamed) snapshot directory vnode and create the vnode
+ * for the snapshot in it.
+ *
+ * Restrictions:
+ *
+ * a) Passed in name for snapshot cannot have slashes.
+ * b) name can't be "." or ".."
+ *
+ * Since this requires superuser privileges, vnode_authorize calls are not
+ * made.
+ */
+static int
+snapshot_create(int dirfd, user_addr_t name, __unused uint32_t flags,
+ vfs_context_t ctx)
+{
+ vnode_t rvp, snapdvp;
+ int error;
+ struct nameidata namend;
+
+ error = vnode_get_snapshot(dirfd, &rvp, &snapdvp, name, &namend, CREATE,
+ OP_LINK, ctx);
+ if (error)
+ return (error);
+
+ if (namend.ni_vp) {
+ vnode_put(namend.ni_vp);
+ error = EEXIST;
+ } else {
+ struct vnode_attr va;
+ vnode_t vp = NULLVP;
+
+ VATTR_INIT(&va);
+ VATTR_SET(&va, va_type, VREG);
+ VATTR_SET(&va, va_mode, 0);
+
+ error = vn_create(snapdvp, &vp, &namend, &va,
+ VN_CREATE_NOAUTH | VN_CREATE_NOINHERIT, 0, NULL, ctx);
+ if (!error && vp)
+ vnode_put(vp);
+ }
+
+ nameidone(&namend);
+ vnode_put(snapdvp);
+ vnode_put(rvp);
+ return (error);
+}
+
+/*
+ * Delete a Filesystem snapshot
+ *
+ * get the vnode for the unnamed snapshot directory and the snapshot and
+ * delete the snapshot.
+ */
+static int
+snapshot_delete(int dirfd, user_addr_t name, __unused uint32_t flags,
+ vfs_context_t ctx)
+{
+ vnode_t rvp, snapdvp;
+ int error;
+ struct nameidata namend;
+
+ error = vnode_get_snapshot(dirfd, &rvp, &snapdvp, name, &namend, DELETE,
+ OP_UNLINK, ctx);
+ if (error)
+ goto out;
+
+ error = VNOP_REMOVE(snapdvp, namend.ni_vp, &namend.ni_cnd,
+ VNODE_REMOVE_SKIP_NAMESPACE_EVENT, ctx);
- usbp->st_dev = sbp->st_dev;
- usbp->st_ino = sbp->st_ino;
- usbp->st_mode = sbp->st_mode;
- usbp->st_nlink = sbp->st_nlink;
- usbp->st_uid = sbp->st_uid;
- usbp->st_gid = sbp->st_gid;
- usbp->st_rdev = sbp->st_rdev;
-#ifndef _POSIX_C_SOURCE
- usbp->st_atimespec.tv_sec = sbp->st_atimespec.tv_sec;
- usbp->st_atimespec.tv_nsec = sbp->st_atimespec.tv_nsec;
- usbp->st_mtimespec.tv_sec = sbp->st_mtimespec.tv_sec;
- usbp->st_mtimespec.tv_nsec = sbp->st_mtimespec.tv_nsec;
- usbp->st_ctimespec.tv_sec = sbp->st_ctimespec.tv_sec;
- usbp->st_ctimespec.tv_nsec = sbp->st_ctimespec.tv_nsec;
-#else
- usbp->st_atime = sbp->st_atime;
- usbp->st_atimensec = sbp->st_atimensec;
- usbp->st_mtime = sbp->st_mtime;
- usbp->st_mtimensec = sbp->st_mtimensec;
- usbp->st_ctime = sbp->st_ctime;
- usbp->st_ctimensec = sbp->st_ctimensec;
-#endif
- usbp->st_size = sbp->st_size;
- usbp->st_blocks = sbp->st_blocks;
- usbp->st_blksize = sbp->st_blksize;
- usbp->st_flags = sbp->st_flags;
- usbp->st_gen = sbp->st_gen;
- usbp->st_lspare = sbp->st_lspare;
- usbp->st_qspare[0] = sbp->st_qspare[0];
- usbp->st_qspare[1] = sbp->st_qspare[1];
+ vnode_put(namend.ni_vp);
+ nameidone(&namend);
+ vnode_put(snapdvp);
+ vnode_put(rvp);
+out:
+ return (error);
}
/*
- * copy stat64 structure into user_stat64 structure.
+ * Revert a filesystem to a snapshot
+ *
+ * Marks the filesystem to revert to the given snapshot on next mount.
*/
-void munge_user64_stat64(struct stat64 *sbp, struct user64_stat64 *usbp)
-{
- bzero(usbp, sizeof(*usbp));
+static int
+snapshot_revert(int dirfd, user_addr_t name, __unused uint32_t flags,
+ vfs_context_t ctx)
+{
+ int error;
+ vnode_t rvp;
+ mount_t mp;
+ struct fs_snapshot_revert_args revert_data;
+ struct componentname cnp;
+ caddr_t name_buf;
+ size_t name_len;
+
+ error = vnode_getfromfd(ctx, dirfd, &rvp);
+ if (error) {
+ return (error);
+ }
+ mp = vnode_mount(rvp);
- usbp->st_dev = sbp->st_dev;
- usbp->st_ino = sbp->st_ino;
- usbp->st_mode = sbp->st_mode;
- usbp->st_nlink = sbp->st_nlink;
- usbp->st_uid = sbp->st_uid;
- usbp->st_gid = sbp->st_gid;
- usbp->st_rdev = sbp->st_rdev;
-#ifndef _POSIX_C_SOURCE
- usbp->st_atimespec.tv_sec = sbp->st_atimespec.tv_sec;
- usbp->st_atimespec.tv_nsec = sbp->st_atimespec.tv_nsec;
- usbp->st_mtimespec.tv_sec = sbp->st_mtimespec.tv_sec;
- usbp->st_mtimespec.tv_nsec = sbp->st_mtimespec.tv_nsec;
- usbp->st_ctimespec.tv_sec = sbp->st_ctimespec.tv_sec;
- usbp->st_ctimespec.tv_nsec = sbp->st_ctimespec.tv_nsec;
- usbp->st_birthtimespec.tv_sec = sbp->st_birthtimespec.tv_sec;
- usbp->st_birthtimespec.tv_nsec = sbp->st_birthtimespec.tv_nsec;
-#else
- usbp->st_atime = sbp->st_atime;
- usbp->st_atimensec = sbp->st_atimensec;
- usbp->st_mtime = sbp->st_mtime;
- usbp->st_mtimensec = sbp->st_mtimensec;
- usbp->st_ctime = sbp->st_ctime;
- usbp->st_ctimensec = sbp->st_ctimensec;
- usbp->st_birthtime = sbp->st_birthtime;
- usbp->st_birthtimensec = sbp->st_birthtimensec;
+ MALLOC(name_buf, caddr_t, MAXPATHLEN, M_TEMP, M_WAITOK);
+ error = copyinstr(name, name_buf, MAXPATHLEN, &name_len);
+ if (error) {
+ FREE(name_buf, M_TEMP);
+ vnode_put(rvp);
+ return (error);
+ }
+
+#if CONFIG_MACF
+ error = mac_mount_check_snapshot_revert(ctx, mp, name_buf);
+ if (error) {
+ FREE(name_buf, M_TEMP);
+ vnode_put(rvp);
+ return (error);
+ }
#endif
- usbp->st_size = sbp->st_size;
- usbp->st_blocks = sbp->st_blocks;
- usbp->st_blksize = sbp->st_blksize;
- usbp->st_flags = sbp->st_flags;
- usbp->st_gen = sbp->st_gen;
- usbp->st_lspare = sbp->st_lspare;
- usbp->st_qspare[0] = sbp->st_qspare[0];
- usbp->st_qspare[1] = sbp->st_qspare[1];
+
+ /*
+ * Grab mount_iterref so that we can release the vnode,
+ * since VFSIOC_REVERT_SNAPSHOT could conceivably cause a sync.
+ */
+ error = mount_iterref (mp, 0);
+ vnode_put(rvp);
+ if (error) {
+ FREE(name_buf, M_TEMP);
+ return (error);
+ }
+
+ memset(&cnp, 0, sizeof(cnp));
+ cnp.cn_pnbuf = (char *)name_buf;
+ cnp.cn_nameiop = LOOKUP;
+ cnp.cn_flags = ISLASTCN | HASBUF;
+ cnp.cn_pnlen = MAXPATHLEN;
+ cnp.cn_nameptr = cnp.cn_pnbuf;
+ cnp.cn_namelen = (int)name_len;
+ revert_data.sr_cnp = &cnp;
+
+ error = VFS_IOCTL(mp, VFSIOC_REVERT_SNAPSHOT, (caddr_t)&revert_data, 0, ctx);
+ mount_iterdrop(mp);
+ FREE(name_buf, M_TEMP);
+
+ if (error) {
+ /* If there was any error, try again using VNOP_IOCTL */
+
+ vnode_t snapdvp;
+ struct nameidata namend;
+
+ error = vnode_get_snapshot(dirfd, &rvp, &snapdvp, name, &namend, LOOKUP,
+ OP_LOOKUP, ctx);
+ if (error) {
+ return (error);
+ }
+
+
+ error = VNOP_IOCTL(namend.ni_vp, APFSIOC_REVERT_TO_SNAPSHOT, (caddr_t) NULL,
+ 0, ctx);
+
+ vnode_put(namend.ni_vp);
+ nameidone(&namend);
+ vnode_put(snapdvp);
+ vnode_put(rvp);
+ }
+
+ return (error);
}
-void munge_user32_stat64(struct stat64 *sbp, struct user32_stat64 *usbp)
+/*
+ * rename a Filesystem snapshot
+ *
+ * get the vnode for the unnamed snapshot directory and the snapshot and
+ * rename the snapshot. This is a very specialised (and simple) case of
+ * rename(2) (which has to deal with a lot more complications). It differs
+ * slightly from rename(2) in that EEXIST is returned if the new name exists.
+ */
+static int
+snapshot_rename(int dirfd, user_addr_t old, user_addr_t new,
+ __unused uint32_t flags, vfs_context_t ctx)
{
- bzero(usbp, sizeof(*usbp));
+ vnode_t rvp, snapdvp;
+ int error, i;
+ caddr_t newname_buf;
+ size_t name_len;
+ vnode_t fvp;
+ struct nameidata *fromnd, *tond;
+ /* carving out a chunk for structs that are too big to be on stack. */
+ struct {
+ struct nameidata from_node;
+ struct nameidata to_node;
+ } * __rename_data;
- usbp->st_dev = sbp->st_dev;
- usbp->st_ino = sbp->st_ino;
- usbp->st_mode = sbp->st_mode;
- usbp->st_nlink = sbp->st_nlink;
- usbp->st_uid = sbp->st_uid;
- usbp->st_gid = sbp->st_gid;
- usbp->st_rdev = sbp->st_rdev;
-#ifndef _POSIX_C_SOURCE
- usbp->st_atimespec.tv_sec = sbp->st_atimespec.tv_sec;
- usbp->st_atimespec.tv_nsec = sbp->st_atimespec.tv_nsec;
- usbp->st_mtimespec.tv_sec = sbp->st_mtimespec.tv_sec;
- usbp->st_mtimespec.tv_nsec = sbp->st_mtimespec.tv_nsec;
- usbp->st_ctimespec.tv_sec = sbp->st_ctimespec.tv_sec;
- usbp->st_ctimespec.tv_nsec = sbp->st_ctimespec.tv_nsec;
- usbp->st_birthtimespec.tv_sec = sbp->st_birthtimespec.tv_sec;
- usbp->st_birthtimespec.tv_nsec = sbp->st_birthtimespec.tv_nsec;
-#else
- usbp->st_atime = sbp->st_atime;
- usbp->st_atimensec = sbp->st_atimensec;
- usbp->st_mtime = sbp->st_mtime;
- usbp->st_mtimensec = sbp->st_mtimensec;
- usbp->st_ctime = sbp->st_ctime;
- usbp->st_ctimensec = sbp->st_ctimensec;
- usbp->st_birthtime = sbp->st_birthtime;
- usbp->st_birthtimensec = sbp->st_birthtimensec;
+ MALLOC(__rename_data, void *, sizeof(*__rename_data), M_TEMP, M_WAITOK);
+ fromnd = &__rename_data->from_node;
+ tond = &__rename_data->to_node;
+
+ error = vnode_get_snapshot(dirfd, &rvp, &snapdvp, old, fromnd, DELETE,
+ OP_UNLINK, ctx);
+ if (error)
+ goto out;
+ fvp = fromnd->ni_vp;
+
+ MALLOC(newname_buf, caddr_t, MAXPATHLEN, M_TEMP, M_WAITOK);
+ error = copyinstr(new, newname_buf, MAXPATHLEN, &name_len);
+ if (error)
+ goto out1;
+
+ /*
+ * Some sanity checks- new name can't be empty, "." or ".." or have
+ * slashes.
+ * (the length returned by copyinstr includes the terminating NUL)
+ *
+ * The FS rename VNOP is suppossed to handle this but we'll pick it
+ * off here itself.
+ */
+ if ((name_len == 1) || (name_len == 2 && newname_buf[0] == '.') ||
+ (name_len == 3 && newname_buf[0] == '.' && newname_buf[1] == '.')) {
+ error = EINVAL;
+ goto out1;
+ }
+ for (i = 0; i < (int)name_len && newname_buf[i] != '/'; i++);
+ if (i < (int)name_len) {
+ error = EINVAL;
+ goto out1;
+ }
+
+#if CONFIG_MACF
+ error = mac_mount_check_snapshot_create(ctx, vnode_mount(rvp),
+ newname_buf);
+ if (error)
+ goto out1;
#endif
- usbp->st_size = sbp->st_size;
- usbp->st_blocks = sbp->st_blocks;
- usbp->st_blksize = sbp->st_blksize;
- usbp->st_flags = sbp->st_flags;
- usbp->st_gen = sbp->st_gen;
- usbp->st_lspare = sbp->st_lspare;
- usbp->st_qspare[0] = sbp->st_qspare[0];
- usbp->st_qspare[1] = sbp->st_qspare[1];
+
+ NDINIT(tond, RENAME, OP_RENAME, USEDVP | NOCACHE | AUDITVNPATH2,
+ UIO_SYSSPACE, CAST_USER_ADDR_T(newname_buf), ctx);
+ tond->ni_dvp = snapdvp;
+
+ error = namei(tond);
+ if (error) {
+ goto out2;
+ } else if (tond->ni_vp) {
+ /*
+ * snapshot rename behaves differently than rename(2) - if the
+ * new name exists, EEXIST is returned.
+ */
+ vnode_put(tond->ni_vp);
+ error = EEXIST;
+ goto out2;
+ }
+
+ error = VNOP_RENAME(snapdvp, fvp, &fromnd->ni_cnd, snapdvp, NULLVP,
+ &tond->ni_cnd, ctx);
+
+out2:
+ nameidone(tond);
+out1:
+ FREE(newname_buf, M_TEMP);
+ vnode_put(fvp);
+ vnode_put(snapdvp);
+ vnode_put(rvp);
+ nameidone(fromnd);
+out:
+ FREE(__rename_data, M_TEMP);
+ return (error);
}
/*
- * Purge buffer cache for simulating cold starts
+ * Mount a Filesystem snapshot
+ *
+ * get the vnode for the unnamed snapshot directory and the snapshot and
+ * mount the snapshot.
*/
-static int vnode_purge_callback(struct vnode *vp, __unused void *cargs)
+static int
+snapshot_mount(int dirfd, user_addr_t name, user_addr_t directory,
+ __unused user_addr_t mnt_data, __unused uint32_t flags, vfs_context_t ctx)
{
- ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL /* off_t *resid_off */, UBC_PUSHALL | UBC_INVALIDATE);
+ vnode_t rvp, snapdvp, snapvp, vp, pvp;
+ int error;
+ struct nameidata *snapndp, *dirndp;
+ /* carving out a chunk for structs that are too big to be on stack. */
+ struct {
+ struct nameidata snapnd;
+ struct nameidata dirnd;
+ } * __snapshot_mount_data;
- return VNODE_RETURNED;
-}
+ MALLOC(__snapshot_mount_data, void *, sizeof(*__snapshot_mount_data),
+ M_TEMP, M_WAITOK);
+ snapndp = &__snapshot_mount_data->snapnd;
+ dirndp = &__snapshot_mount_data->dirnd;
-static int vfs_purge_callback(mount_t mp, __unused void * arg)
-{
- vnode_iterate(mp, VNODE_WAIT | VNODE_ITERATE_ALL, vnode_purge_callback, NULL);
+ error = vnode_get_snapshot(dirfd, &rvp, &snapdvp, name, snapndp, LOOKUP,
+ OP_LOOKUP, ctx);
+ if (error)
+ goto out;
- return VFS_RETURNED;
+ snapvp = snapndp->ni_vp;
+ if (!vnode_mount(rvp) || (vnode_mount(rvp) == dead_mountp)) {
+ error = EIO;
+ goto out1;
+ }
+
+ /* Get the vnode to be covered */
+ NDINIT(dirndp, LOOKUP, OP_MOUNT, FOLLOW | AUDITVNPATH1 | WANTPARENT,
+ UIO_USERSPACE, directory, ctx);
+ error = namei(dirndp);
+ if (error)
+ goto out1;
+
+ vp = dirndp->ni_vp;
+ pvp = dirndp->ni_dvp;
+
+ if ((vp->v_flag & VROOT) && (vp->v_mount->mnt_flag & MNT_ROOTFS)) {
+ error = EINVAL;
+ } else {
+ mount_t mp = vnode_mount(rvp);
+ struct fs_snapshot_mount_args smnt_data;
+
+ smnt_data.sm_mp = mp;
+ smnt_data.sm_cnp = &snapndp->ni_cnd;
+ error = mount_common(mp->mnt_vfsstat.f_fstypename, pvp, vp,
+ &dirndp->ni_cnd, CAST_USER_ADDR_T(&smnt_data), flags & MNT_DONTBROWSE,
+ KERNEL_MOUNT_SNAPSHOT, NULL, FALSE, ctx);
+ }
+
+ vnode_put(vp);
+ vnode_put(pvp);
+ nameidone(dirndp);
+out1:
+ vnode_put(snapvp);
+ vnode_put(snapdvp);
+ vnode_put(rvp);
+ nameidone(snapndp);
+out:
+ FREE(__snapshot_mount_data, M_TEMP);
+ return (error);
+}
+
+/*
+ * Root from a snapshot of the filesystem
+ *
+ * Marks the filesystem to root from the given snapshot on next boot.
+ */
+static int
+snapshot_root(int dirfd, user_addr_t name, __unused uint32_t flags,
+ vfs_context_t ctx)
+{
+ int error;
+ vnode_t rvp;
+ mount_t mp;
+ struct fs_snapshot_root_args root_data;
+ struct componentname cnp;
+ caddr_t name_buf;
+ size_t name_len;
+
+ error = vnode_getfromfd(ctx, dirfd, &rvp);
+ if (error) {
+ return (error);
+ }
+ mp = vnode_mount(rvp);
+
+ MALLOC(name_buf, caddr_t, MAXPATHLEN, M_TEMP, M_WAITOK);
+ error = copyinstr(name, name_buf, MAXPATHLEN, &name_len);
+ if (error) {
+ FREE(name_buf, M_TEMP);
+ vnode_put(rvp);
+ return (error);
+ }
+
+ // XXX MAC checks ?
+
+ /*
+ * Grab mount_iterref so that we can release the vnode,
+ * since VFSIOC_ROOT_SNAPSHOT could conceivably cause a sync.
+ */
+ error = mount_iterref (mp, 0);
+ vnode_put(rvp);
+ if (error) {
+ FREE(name_buf, M_TEMP);
+ return (error);
+ }
+
+ memset(&cnp, 0, sizeof(cnp));
+ cnp.cn_pnbuf = (char *)name_buf;
+ cnp.cn_nameiop = LOOKUP;
+ cnp.cn_flags = ISLASTCN | HASBUF;
+ cnp.cn_pnlen = MAXPATHLEN;
+ cnp.cn_nameptr = cnp.cn_pnbuf;
+ cnp.cn_namelen = (int)name_len;
+ root_data.sr_cnp = &cnp;
+
+ error = VFS_IOCTL(mp, VFSIOC_ROOT_SNAPSHOT, (caddr_t)&root_data, 0, ctx);
+
+ mount_iterdrop(mp);
+ FREE(name_buf, M_TEMP);
+
+ return (error);
}
+/*
+ * FS snapshot operations dispatcher
+ */
int
-vfs_purge(__unused struct proc *p, __unused struct vfs_purge_args *uap, __unused int32_t *retval)
+fs_snapshot(__unused proc_t p, struct fs_snapshot_args *uap,
+ __unused int32_t *retval)
{
- if (!kauth_cred_issuser(kauth_cred_get()))
- return EPERM;
+ int error;
+ vfs_context_t ctx = vfs_context_current();
- vfs_iterate(0/* flags */, vfs_purge_callback, NULL);
+ AUDIT_ARG(fd, uap->dirfd);
+ AUDIT_ARG(value32, uap->op);
- return 0;
-}
+ error = priv_check_cred(vfs_context_ucred(ctx), PRIV_VFS_SNAPSHOT, 0);
+ if (error)
+ return (error);
+
+ switch (uap->op) {
+ case SNAPSHOT_OP_CREATE:
+ error = snapshot_create(uap->dirfd, uap->name1, uap->flags, ctx);
+ break;
+ case SNAPSHOT_OP_DELETE:
+ error = snapshot_delete(uap->dirfd, uap->name1, uap->flags, ctx);
+ break;
+ case SNAPSHOT_OP_RENAME:
+ error = snapshot_rename(uap->dirfd, uap->name1, uap->name2,
+ uap->flags, ctx);
+ break;
+ case SNAPSHOT_OP_MOUNT:
+ error = snapshot_mount(uap->dirfd, uap->name1, uap->name2,
+ uap->data, uap->flags, ctx);
+ break;
+ case SNAPSHOT_OP_REVERT:
+ error = snapshot_revert(uap->dirfd, uap->name1, uap->flags, ctx);
+ break;
+#if CONFIG_MNT_ROOTSNAP
+ case SNAPSHOT_OP_ROOT:
+ error = snapshot_root(uap->dirfd, uap->name1, uap->flags, ctx);
+ break;
+#endif /* CONFIG_MNT_ROOTSNAP */
+ default:
+ error = ENOSYS;
+ }
+ return (error);
+}