/*
- * Copyright (c) 1995-2012 Apple Inc. All rights reserved.
+ * Copyright (c) 1995-2015 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/fsctl.h>
#include <sys/ubc_internal.h>
#include <sys/disk.h>
+#include <sys/content_protection.h>
#include <machine/cons.h>
#include <machine/limits.h>
#include <miscfs/specfs/specdev.h>
#include <libkern/OSAtomic.h>
#include <pexpert/pexpert.h>
+#include <IOKit/IOBSD.h>
#if CONFIG_MACF
#include <security/mac.h>
static int getutimes(user_addr_t usrtvp, struct timespec *tsp);
static int setutimes(vfs_context_t ctx, vnode_t vp, const struct timespec *ts, int nullflag);
static int sync_callback(mount_t, void *);
+static void sync_thread(void *, __unused wait_result_t);
+static int sync_async(int);
static int munge_statfs(struct mount *mp, struct vfsstatfs *sfsp,
user_addr_t bufp, int *sizep, boolean_t is_64_bit,
boolean_t partial_copy);
int prepare_coveredvp(vnode_t vp, vfs_context_t ctx, struct componentname *cnp, const char *fsname, boolean_t skip_auth);
+struct fd_vn_data * fg_vn_data_alloc(void);
+
+/*
+ * Max retries for ENOENT returns from vn_authorize_{rmdir, unlink, rename}
+ * Concurrent lookups (or lookups by ids) on hard links can cause the
+ * vn_getpath (which does not re-enter the filesystem as vn_getpath_fsenter
+ * does) to return ENOENT as the path cannot be returned from the name cache
+ * alone. We have no option but to retry and hope to get one namei->reverse path
+ * generation done without an intervening lookup, lookup by id on the hard link
+ * item. This is only an issue for MAC hooks which cannot reenter the filesystem
+ * which currently are the MAC hooks for rename, unlink and rmdir.
+ */
+#define MAX_AUTHORIZE_ENOENT_RETRIES 1024
+
+static int rmdirat_internal(vfs_context_t, int, user_addr_t, enum uio_seg);
+
+static int fsgetpath_internal(vfs_context_t, int, uint64_t, vm_size_t, caddr_t, int *);
+
#ifdef CONFIG_IMGSRC_ACCESS
static int authorize_devpath_and_update_mntfromname(mount_t mp, user_addr_t devpath, vnode_t *devvpp, vfs_context_t ctx);
static int place_mount_and_checkdirs(mount_t mp, vnode_t vp, vfs_context_t ctx);
int sync_internal(void);
__private_extern__
-int open1(vfs_context_t, struct nameidata *, int, struct vnode_attr *, int32_t *);
+int unlink1(vfs_context_t, vnode_t, user_addr_t, enum uio_seg, int);
-__private_extern__
-int unlink1(vfs_context_t, struct nameidata *, int);
+extern lck_grp_t *fd_vn_lck_grp;
+extern lck_grp_attr_t *fd_vn_lck_grp_attr;
+extern lck_attr_t *fd_vn_lck_attr;
/*
* incremented each time a mount or unmount operation occurs
/* counts number of mount and unmount operations */
unsigned int vfs_nummntops=0;
-extern struct fileops vnops;
+extern const struct fileops vnops;
+#if CONFIG_APPLEDOUBLE
extern errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *);
+#endif /* CONFIG_APPLEDOUBLE */
+typedef uint32_t vfs_rename_flags_t;
+#if CONFIG_SECLUDED_RENAME
+enum {
+ VFS_SECLUDE_RENAME = 0x00000001
+};
+#endif
/*
* Virtual File System System Calls
*/
-#if NFSCLIENT
+#if NFSCLIENT || DEVFS
/*
* Private in-kernel mounting spi (NFS only, not exported)
*/
return (error);
}
-#endif /* NFSCLIENT */
+#endif /* NFSCLIENT || DEVFS */
/*
* Mount a file system.
int
__mac_mount(struct proc *p, register struct __mac_mount_args *uap, __unused int32_t *retval)
{
- vnode_t pvp, vp;
+ vnode_t pvp = NULL;
+ vnode_t vp = NULL;
+ int need_nameidone = 0;
vfs_context_t ctx = vfs_context_current();
char fstypename[MFSNAMELEN];
struct nameidata nd;
char *labelstr = NULL;
int flags = uap->flags;
int error;
+#if CONFIG_IMGSRC_ACCESS || CONFIG_MACF
boolean_t is_64bit = IS_64BIT_PROCESS(p);
-
+#else
+#pragma unused(p)
+#endif
/*
* Get the fs type name from user space
*/
/*
* Get the vnode to be covered
*/
- NDINIT(&nd, LOOKUP, OP_MOUNT, NOTRIGGER | FOLLOW | AUDITVNPATH1 | WANTPARENT,
+ NDINIT(&nd, LOOKUP, OP_MOUNT, FOLLOW | AUDITVNPATH1 | WANTPARENT,
UIO_USERSPACE, uap->path, ctx);
error = namei(&nd);
- if (error)
- return (error);
+ if (error) {
+ goto out;
+ }
+ need_nameidone = 1;
vp = nd.ni_vp;
pvp = nd.ni_dvp;
AUDIT_ARG(fflags, flags);
+#if SECURE_KERNEL
+ if (flags & MNT_UNION) {
+ /* No union mounts on release kernels */
+ error = EPERM;
+ goto out;
+ }
+#endif
+
if ((vp->v_flag & VROOT) &&
- (vp->v_mount->mnt_flag & MNT_ROOTFS)) {
+ (vp->v_mount->mnt_flag & MNT_ROOTFS)) {
+ if (!(flags & MNT_UNION)) {
flags |= MNT_UPDATE;
- /*
- * See 7392553 for more details on why this check exists.
- * Suffice to say: If this check is ON and something tries
- * to mount the rootFS RW, we'll turn off the codesign
- * bitmap optimization.
- */
+ }
+ else {
+ /*
+ * For a union mount on '/', treat it as fresh
+ * mount instead of update.
+ * Otherwise, union mouting on '/' used to panic the
+ * system before, since mnt_vnodecovered was found to
+ * be NULL for '/' which is required for unionlookup
+ * after it gets ENOENT on union mount.
+ */
+ flags = (flags & ~(MNT_UPDATE));
+ }
+
+#if SECURE_KERNEL
+ if ((flags & MNT_RDONLY) == 0) {
+ /* Release kernels are not allowed to mount "/" as rw */
+ error = EPERM;
+ goto out;
+ }
+#endif
+ /*
+ * See 7392553 for more details on why this check exists.
+ * Suffice to say: If this check is ON and something tries
+ * to mount the rootFS RW, we'll turn off the codesign
+ * bitmap optimization.
+ */
#if CHECK_CS_VALIDATION_BITMAP
- if ( !(flags & MNT_RDONLY) ) {
+ if ((flags & MNT_RDONLY) == 0 ) {
root_fs_upgrade_try = TRUE;
}
#endif
error = mount_common(fstypename, pvp, vp, &nd.ni_cnd, uap->data, flags, 0,
labelstr, FALSE, ctx);
+
out:
+
#if CONFIG_MACF
if (labelstr)
FREE(labelstr, M_MACTEMP);
#endif /* CONFIG_MACF */
- vnode_put(vp);
- vnode_put(pvp);
- nameidone(&nd);
+ if (vp) {
+ vnode_put(vp);
+ }
+ if (pvp) {
+ vnode_put(pvp);
+ }
+ if (need_nameidone) {
+ nameidone(&nd);
+ }
return (error);
}
struct componentname *cnp, user_addr_t fsmountargs, int flags, uint32_t internal_flags,
char *labelstr, boolean_t kernelmount, vfs_context_t ctx)
{
+#if !CONFIG_MACF
+#pragma unused(labelstr)
+#endif
struct vnode *devvp = NULLVP;
struct vnode *device_vnode = NULLVP;
#if CONFIG_MACF
mp->mnt_vtable = vfsp;
//mp->mnt_stat.f_type = vfsp->vfc_typenum;
mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
- strncpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
- strncpy(mp->mnt_vfsstat.f_mntonname, cnp->cn_pnbuf, MAXPATHLEN);
+ strlcpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
+ strlcpy(mp->mnt_vfsstat.f_mntonname, cnp->cn_pnbuf, MAXPATHLEN);
mp->mnt_vnodecovered = vp;
mp->mnt_vfsstat.f_owner = kauth_cred_getuid(vfs_context_ucred(ctx));
mp->mnt_throttle_mask = LOWPRI_MAX_NUM_DEV - 1;
/* XXX 3762912 hack to support HFS filesystem 'owner' - filesystem may update later */
vfs_setowner(mp, KAUTH_UID_NONE, KAUTH_GID_NONE);
-#if NFSCLIENT
+#if NFSCLIENT || DEVFS
if (kernelmount)
mp->mnt_kern_flag |= MNTK_KERNEL_MOUNT;
if ((internal_flags & KERNEL_MOUNT_PERMIT_UNMOUNT) != 0)
mp->mnt_kern_flag |= MNTK_PERMIT_UNMOUNT;
-#endif /* NFSCLIENT */
+#endif /* NFSCLIENT || DEVFS */
update:
/*
if ( (error = namei(&nd)) )
goto out1;
- strncpy(mp->mnt_vfsstat.f_mntfromname, nd.ni_cnd.cn_pnbuf, MAXPATHLEN);
+ strlcpy(mp->mnt_vfsstat.f_mntfromname, nd.ni_cnd.cn_pnbuf, MAXPATHLEN);
devvp = nd.ni_vp;
nameidone(&nd);
/* Now that mount is setup, notify the listeners */
vfs_notify_mount(pvp);
+ IOBSDMountChange(mp, kIOMountChangeMount);
+
} else {
/* If we fail a fresh mount, there should be no vnodes left hooked into the mountpoint. */
if (mp->mnt_vnodelist.tqh_first != NULL) {
int
prepare_coveredvp(vnode_t vp, vfs_context_t ctx, struct componentname *cnp, const char *fsname, boolean_t skip_auth)
{
+#if !CONFIG_MACF
+#pragma unused(cnp,fsname)
+#endif
struct vnode_attr va;
int error;
placed = TRUE;
- strncpy(old_mntonname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN);
- strncpy(mp->mnt_vfsstat.f_mntonname, cnp->cn_pnbuf, MAXPATHLEN);
+ strlcpy(old_mntonname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN);
+ strlcpy(mp->mnt_vfsstat.f_mntonname, cnp->cn_pnbuf, MAXPATHLEN);
/* Forbid future moves */
mount_lock(mp);
return 0;
out3:
- strncpy(mp->mnt_vfsstat.f_mntonname, old_mntonname, MAXPATHLEN);
+ strlcpy(mp->mnt_vfsstat.f_mntonname, old_mntonname, MAXPATHLEN);
mount_lock(mp);
mp->mnt_kern_flag &= ~(MNTK_HAS_MOVED);
vnode_t tvp;
int err;
struct cdirargs cdr;
- struct uthread * uth = get_bsdthread_info(current_thread());
if (olddp->v_usecount == 1)
return(0);
- if (uth != (struct uthread *)0)
- uth->uu_notrigger = 1;
err = VFS_ROOT(olddp->v_mountedhere, &newdp, ctx);
- if (uth != (struct uthread *)0)
- uth->uu_notrigger = 0;
if (err != 0) {
#if DIAGNOSTIC
struct nameidata nd;
vfs_context_t ctx = vfs_context_current();
- NDINIT(&nd, LOOKUP, OP_UNMOUNT, NOTRIGGER | FOLLOW | AUDITVNPATH1,
+ NDINIT(&nd, LOOKUP, OP_UNMOUNT, FOLLOW | AUDITVNPATH1,
UIO_USERSPACE, uap->path, ctx);
error = namei(&nd);
if (error)
int lflags = 0;
struct vnode *devvp = NULLVP;
#if CONFIG_TRIGGERS
+ proc_t p = vfs_context_proc(ctx);
int did_vflush = 0;
+ int pflags_save = 0;
#endif /* CONFIG_TRIGGERS */
- if (flags & MNT_FORCE)
- forcedunmount = 1;
-
mount_lock(mp);
- /* XXX post jaguar fix LK_DRAIN - then clean this up */
- if ((flags & MNT_FORCE)) {
- mp->mnt_kern_flag |= MNTK_FRCUNMOUNT;
- mp->mnt_lflag |= MNT_LFORCE;
- }
+
+ /*
+ * If already an unmount in progress just return EBUSY.
+ * Even a forced unmount cannot override.
+ */
if (mp->mnt_lflag & MNT_LUNMOUNT) {
- mp->mnt_lflag |= MNT_LWAIT;
- if(withref != 0)
+ if (withref != 0)
mount_drop(mp, 1);
- msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "dounmount", NULL);
- /*
- * The prior unmount attempt has probably succeeded.
- * Do not dereference mp here - returning EBUSY is safest.
- */
+ mount_unlock(mp);
return (EBUSY);
}
+
+ if (flags & MNT_FORCE) {
+ forcedunmount = 1;
+ mp->mnt_lflag |= MNT_LFORCE;
+ }
+
+#if CONFIG_TRIGGERS
+ if (flags & MNT_NOBLOCK && p != kernproc)
+ pflags_save = OSBitOrAtomic(P_NOREMOTEHANG, &p->p_flag);
+#endif
+
mp->mnt_kern_flag |= MNTK_UNMOUNT;
mp->mnt_lflag |= MNT_LUNMOUNT;
mp->mnt_flag &=~ MNT_ASYNC;
mp->mnt_realrootvp = NULLVP;
mount_unlock(mp);
+ if (forcedunmount && (flags & MNT_LNOSUB) == 0) {
+ /*
+ * Force unmount any mounts in this filesystem.
+ * If any unmounts fail - just leave them dangling.
+ * Avoids recursion.
+ */
+ (void) dounmount_submounts(mp, flags | MNT_LNOSUB, ctx);
+ }
+
/*
* taking the name_cache_lock exclusively will
* insure that everyone is out of the fast path who
}
}
+ IOBSDMountChange(mp, kIOMountChangeUnmount);
+
#if CONFIG_TRIGGERS
vfs_nested_trigger_unmounts(mp, flags, ctx);
did_vflush = 1;
/* mark the mount point hook in the vp but not drop the ref yet */
if ((coveredvp = mp->mnt_vnodecovered) != NULLVP) {
- vnode_getwithref(coveredvp);
+ /*
+ * The covered vnode needs special handling. Trying to get an
+ * iocount must not block here as this may lead to deadlocks
+ * if the Filesystem to which the covered vnode belongs is
+ * undergoing forced unmounts. Since we hold a usecount, the
+ * vnode cannot be reused (it can, however, still be terminated)
+ */
+ vnode_getalways(coveredvp);
vnode_lock_spin(coveredvp);
mp->mnt_crossref++;
coveredvp->v_mountedhere = (struct mount *)0;
+ CLR(coveredvp->v_flag, VMOUNT);
vnode_unlock(coveredvp);
vnode_put(coveredvp);
needwakeup = 1;
}
-
#if CONFIG_TRIGGERS
+ if (flags & MNT_NOBLOCK && p != kernproc) {
+ // Restore P_NOREMOTEHANG bit to its previous value
+ if ((pflags_save & P_NOREMOTEHANG) == 0)
+ OSBitAndAtomic(~((uint32_t) P_NOREMOTEHANG), &p->p_flag);
+ }
+
/*
* Callback and context are set together under the mount lock, and
* never cleared, so we're safe to examine them here, drop the lock,
if (!error) {
if ((coveredvp != NULLVP)) {
- vnode_t pvp;
+ vnode_t pvp = NULLVP;
- vnode_getwithref(coveredvp);
- pvp = vnode_getparent(coveredvp);
- vnode_rele(coveredvp);
+ /*
+ * The covered vnode needs special handling. Trying to
+ * get an iocount must not block here as this may lead
+ * to deadlocks if the Filesystem to which the covered
+ * vnode belongs is undergoing forced unmounts. Since we
+ * hold a usecount, the vnode cannot be reused
+ * (it can, however, still be terminated).
+ */
+ vnode_getalways(coveredvp);
mount_dropcrossref(mp, coveredvp, 0);
+ /*
+ * We'll _try_ to detect if this really needs to be
+ * done. The coveredvp can only be in termination (or
+ * terminated) if the coveredvp's mount point is in a
+ * forced unmount (or has been) since we still hold the
+ * ref.
+ */
+ if (!vnode_isrecycled(coveredvp)) {
+ pvp = vnode_getparent(coveredvp);
#if CONFIG_TRIGGERS
- if (coveredvp->v_resolve)
- vnode_trigger_rearm(coveredvp, ctx);
-#endif
+ if (coveredvp->v_resolve) {
+ vnode_trigger_rearm(coveredvp, ctx);
+ }
+#endif
+ }
+
+ vnode_rele(coveredvp);
vnode_put(coveredvp);
+ coveredvp = NULLVP;
if (pvp) {
lock_vnode_and_post(pvp, NOTE_WRITE);
return (error);
}
+/*
+ * Unmount any mounts in this filesystem.
+ */
+void
+dounmount_submounts(struct mount *mp, int flags, vfs_context_t ctx)
+{
+ mount_t smp;
+ fsid_t *fsids, fsid;
+ int fsids_sz;
+ int count = 0, i, m = 0;
+ vnode_t vp;
+
+ mount_list_lock();
+
+ // Get an array to hold the submounts fsids.
+ TAILQ_FOREACH(smp, &mountlist, mnt_list)
+ count++;
+ fsids_sz = count * sizeof(fsid_t);
+ MALLOC(fsids, fsid_t *, fsids_sz, M_TEMP, M_NOWAIT);
+ if (fsids == NULL) {
+ mount_list_unlock();
+ goto out;
+ }
+ fsids[0] = mp->mnt_vfsstat.f_fsid; // Prime the pump
+
+ /*
+ * Fill the array with submount fsids.
+ * Since mounts are always added to the tail of the mount list, the
+ * list is always in mount order.
+ * For each mount check if the mounted-on vnode belongs to a
+ * mount that's already added to our array of mounts to be unmounted.
+ */
+ for (smp = TAILQ_NEXT(mp, mnt_list); smp; smp = TAILQ_NEXT(smp, mnt_list)) {
+ vp = smp->mnt_vnodecovered;
+ if (vp == NULL)
+ continue;
+ fsid = vnode_mount(vp)->mnt_vfsstat.f_fsid; // Underlying fsid
+ for (i = 0; i <= m; i++) {
+ if (fsids[i].val[0] == fsid.val[0] &&
+ fsids[i].val[1] == fsid.val[1]) {
+ fsids[++m] = smp->mnt_vfsstat.f_fsid;
+ break;
+ }
+ }
+ }
+ mount_list_unlock();
+
+ // Unmount the submounts in reverse order. Ignore errors.
+ for (i = m; i > 0; i--) {
+ smp = mount_list_lookupby_fsid(&fsids[i], 0, 1);
+ if (smp) {
+ mount_ref(smp, 0);
+ mount_iterdrop(smp);
+ (void) dounmount(smp, flags, 1, ctx);
+ }
+ }
+out:
+ if (fsids)
+ FREE(fsids, M_TEMP);
+}
+
void
mount_dropcrossref(mount_t mp, vnode_t dp, int need_put)
{
*/
#if DIAGNOSTIC
int syncprt = 0;
-struct ctldebug debug0 = { "syncprt", &syncprt };
#endif
int print_vmpage_stat=0;
+int sync_timeout = 60; // Sync time limit (sec)
static int
-sync_callback(mount_t mp, void * arg)
+sync_callback(mount_t mp, __unused void *arg)
{
- int asyncflag;
-
if ((mp->mnt_flag & MNT_RDONLY) == 0) {
- asyncflag = mp->mnt_flag & MNT_ASYNC;
- mp->mnt_flag &= ~MNT_ASYNC;
- VFS_SYNC(mp, arg ? MNT_WAIT : MNT_NOWAIT, vfs_context_current());
- if (asyncflag)
- mp->mnt_flag |= MNT_ASYNC;
- }
- return(VFS_RETURNED);
-}
+ int asyncflag = mp->mnt_flag & MNT_ASYNC;
+ mp->mnt_flag &= ~MNT_ASYNC;
+ VFS_SYNC(mp, arg ? MNT_WAIT : MNT_NOWAIT, vfs_context_kernel());
+ if (asyncflag)
+ mp->mnt_flag |= MNT_ASYNC;
+ }
-#include <kern/clock.h>
-
-clock_sec_t sync_wait_time = 0;
+ return (VFS_RETURNED);
+}
/* ARGSUSED */
int
sync(__unused proc_t p, __unused struct sync_args *uap, __unused int32_t *retval)
{
- clock_nsec_t nsecs;
-
- vfs_iterate(LK_NOWAIT, sync_callback, (void *)0);
+ vfs_iterate(LK_NOWAIT, sync_callback, NULL);
- {
- static fsid_t fsid = { { 0, 0 } };
-
- clock_get_calendar_microtime(&sync_wait_time, &nsecs);
- vfs_event_signal(&fsid, VQ_SYNCEVENT, (intptr_t)NULL);
- wakeup((caddr_t)&sync_wait_time);
+ if (print_vmpage_stat) {
+ vm_countdirtypages();
}
- {
- if(print_vmpage_stat) {
+#if DIAGNOSTIC
+ if (syncprt)
+ vfs_bufstats();
+#endif /* DIAGNOSTIC */
+ return 0;
+}
+
+static void
+sync_thread(void *arg, __unused wait_result_t wr)
+{
+ int *timeout = (int *) arg;
+
+ vfs_iterate(LK_NOWAIT, sync_callback, NULL);
+
+ if (timeout)
+ wakeup((caddr_t) timeout);
+ if (print_vmpage_stat) {
vm_countdirtypages();
}
- }
+
#if DIAGNOSTIC
if (syncprt)
vfs_bufstats();
#endif /* DIAGNOSTIC */
- return (0);
}
/*
- * Change filesystem quotas.
+ * Sync in a separate thread so we can time out if it blocks.
*/
-#if QUOTA
-static int quotactl_funneled(proc_t p, struct quotactl_args *uap, int32_t *retval);
-
-int
-quotactl(proc_t p, struct quotactl_args *uap, int32_t *retval)
+static int
+sync_async(int timeout)
{
- boolean_t funnel_state;
+ thread_t thd;
int error;
-
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- error = quotactl_funneled(p, uap, retval);
- thread_funnel_set(kernel_flock, funnel_state);
- return(error);
+ struct timespec ts = {timeout, 0};
+
+ lck_mtx_lock(sync_mtx_lck);
+ if (kernel_thread_start(sync_thread, &timeout, &thd) != KERN_SUCCESS) {
+ printf("sync_thread failed\n");
+ lck_mtx_unlock(sync_mtx_lck);
+ return (0);
+ }
+
+ error = msleep((caddr_t) &timeout, sync_mtx_lck, (PVFS | PDROP | PCATCH), "sync_thread", &ts);
+ if (error) {
+ printf("sync timed out: %d sec\n", timeout);
+ }
+ thread_deallocate(thd);
+
+ return (0);
}
-static int
-quotactl_funneled(proc_t p, struct quotactl_args *uap, __unused int32_t *retval)
+/*
+ * An in-kernel sync for power management to call.
+ */
+__private_extern__ int
+sync_internal(void)
+{
+ (void) sync_async(sync_timeout);
+
+ return 0;
+} /* end of sync_internal call */
+
+/*
+ * Change filesystem quotas.
+ */
+#if QUOTA
+int
+quotactl(proc_t p, struct quotactl_args *uap, __unused int32_t *retval)
{
struct mount *mp;
int error, quota_cmd, quota_status;
/* uap->arg is a pointer to a dqblk structure we need to copy out to */
if (error == 0) {
if (proc_is64bit(p)) {
- struct user_dqblk my_dqblk64;
+ struct user_dqblk my_dqblk64 = {.dqb_bhardlimit = 0};
munge_dqblk(&my_dqblk, &my_dqblk64, TRUE);
error = copyout((caddr_t)&my_dqblk64, uap->arg, sizeof (my_dqblk64));
}
vfs_context_t ctx = vfs_context_current();
vnode_t vp;
- NDINIT(&nd, LOOKUP, OP_STATFS, NOTRIGGER | FOLLOW | AUDITVNPATH1,
+ NDINIT(&nd, LOOKUP, OP_STATFS, FOLLOW | AUDITVNPATH1,
UIO_USERSPACE, uap->path, ctx);
error = namei(&nd);
if (error)
nameidone(&nd);
error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT);
- vnode_put(vp);
- if (error != 0)
+ if (error != 0) {
+ vnode_put(vp);
return (error);
+ }
error = munge_statfs(mp, sp, uap->buf, NULL, IS_64BIT_PROCESS(p), TRUE);
+ vnode_put(vp);
return (error);
}
vfs_context_t ctxp = vfs_context_current();
vnode_t vp;
- NDINIT(&nd, LOOKUP, OP_STATFS, NOTRIGGER | FOLLOW | AUDITVNPATH1,
+ NDINIT(&nd, LOOKUP, OP_STATFS, FOLLOW | AUDITVNPATH1,
UIO_USERSPACE, uap->path, ctxp);
error = namei(&nd);
if (error)
nameidone(&nd);
error = vfs_update_vfsstat(mp, ctxp, VFS_USER_EVENT);
- vnode_put(vp);
- if (error != 0)
+ if (error != 0) {
+ vnode_put(vp);
return (error);
+ }
error = statfs64_common(mp, sp, uap->buf);
+ vnode_put(vp);
return (error);
}
fstp->sfsp += my_size;
if (fstp->mp) {
+#if CONFIG_MACF
error = mac_mount_label_get(mp, *fstp->mp);
if (error) {
fstp->error = error;
return(VFS_RETURNED_DONE);
}
+#endif
fstp->mp++;
}
}
return (0);
}
+/*
+ * gets the associated vnode with the file descriptor passed.
+ * as input
+ *
+ * INPUT
+ * ctx - vfs context of caller
+ * fd - file descriptor for which vnode is required.
+ * vpp - Pointer to pointer to vnode to be returned.
+ *
+ * The vnode is returned with an iocount so any vnode obtained
+ * by this call needs a vnode_put
+ *
+ */
+static int
+vnode_getfromfd(vfs_context_t ctx, int fd, vnode_t *vpp)
+{
+ int error;
+ vnode_t vp;
+ struct fileproc *fp;
+ proc_t p = vfs_context_proc(ctx);
+
+ *vpp = NULLVP;
+
+ error = fp_getfvp(p, fd, &fp, &vp);
+ if (error)
+ return (error);
+
+ error = vnode_getwithref(vp);
+ if (error) {
+ (void)fp_drop(p, fd, fp, 0);
+ return (error);
+ }
+
+ (void)fp_drop(p, fd, fp, 0);
+ *vpp = vp;
+ return (error);
+}
+
+/*
+ * Wrapper function around namei to start lookup from a directory
+ * specified by a file descriptor ni_dirfd.
+ *
+ * In addition to all the errors returned by namei, this call can
+ * return ENOTDIR if the file descriptor does not refer to a directory.
+ * and EBADF if the file descriptor is not valid.
+ */
+int
+nameiat(struct nameidata *ndp, int dirfd)
+{
+ if ((dirfd != AT_FDCWD) &&
+ !(ndp->ni_flag & NAMEI_CONTLOOKUP) &&
+ !(ndp->ni_cnd.cn_flags & USEDVP)) {
+ int error = 0;
+ char c;
+
+ if (UIO_SEG_IS_USER_SPACE(ndp->ni_segflg)) {
+ error = copyin(ndp->ni_dirp, &c, sizeof(char));
+ if (error)
+ return (error);
+ } else {
+ c = *((char *)(ndp->ni_dirp));
+ }
+
+ if (c != '/') {
+ vnode_t dvp_at;
+
+ error = vnode_getfromfd(ndp->ni_cnd.cn_context, dirfd,
+ &dvp_at);
+ if (error)
+ return (error);
+
+ if (vnode_vtype(dvp_at) != VDIR) {
+ vnode_put(dvp_at);
+ return (ENOTDIR);
+ }
+
+ ndp->ni_dvp = dvp_at;
+ ndp->ni_cnd.cn_flags |= USEDVP;
+ error = namei(ndp);
+ ndp->ni_cnd.cn_flags &= ~USEDVP;
+ vnode_put(dvp_at);
+ return (error);
+ }
+ }
+
+ return (namei(ndp));
+}
+
/*
* Change current working directory to a given file descriptor.
*/
return (error);
}
+/*
+ * Free the vnode data (for directories) associated with the file glob.
+ */
+struct fd_vn_data *
+fg_vn_data_alloc(void)
+{
+ struct fd_vn_data *fvdata;
+
+ /* Allocate per fd vnode data */
+ MALLOC(fvdata, struct fd_vn_data *, (sizeof(struct fd_vn_data)),
+ M_FD_VN_DATA, M_WAITOK | M_ZERO);
+ lck_mtx_init(&fvdata->fv_lock, fd_vn_lck_grp, fd_vn_lck_attr);
+ return fvdata;
+}
+
+/*
+ * Free the vnode data (for directories) associated with the file glob.
+ */
+void
+fg_vn_data_free(void *fgvndata)
+{
+ struct fd_vn_data *fvdata = (struct fd_vn_data *)fgvndata;
+
+ if (fvdata->fv_buf)
+ FREE(fvdata->fv_buf, M_FD_DIRBUF);
+ lck_mtx_destroy(&fvdata->fv_lock, fd_vn_lck_grp);
+ FREE(fvdata, M_FD_VN_DATA);
+}
+
/*
* Check permissions, allocate an open file structure,
* and call the device open routine if any.
* XXX Need to implement uid, gid
*/
int
-open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, struct vnode_attr *vap, int32_t *retval)
+open1(vfs_context_t ctx, struct nameidata *ndp, int uflags,
+ struct vnode_attr *vap, fp_allocfn_t fp_zalloc, void *cra,
+ int32_t *retval)
{
proc_t p = vfs_context_proc(ctx);
uthread_t uu = get_bsdthread_info(vfs_context_thread(ctx));
- struct filedesc *fdp = p->p_fd;
struct fileproc *fp;
vnode_t vp;
int flags, oflags;
- struct fileproc *nfp;
int type, indx, error;
struct flock lf;
- int no_controlling_tty = 0;
- int deny_controlling_tty = 0;
- struct session *sessp = SESSION_NULL;
- struct vfs_context context = *vfs_context_current(); /* local copy */
+ struct vfs_context context;
oflags = uflags;
if ((oflags & O_ACCMODE) == O_ACCMODE)
return(EINVAL);
+
flags = FFLAGS(uflags);
+ CLR(flags, FENCRYPTED);
+ CLR(flags, FUNENCRYPTED);
AUDIT_ARG(fflags, oflags);
AUDIT_ARG(mode, vap->va_mode);
- if ( (error = falloc(p, &nfp, &indx, ctx)) ) {
+ if ((error = falloc_withalloc(p,
+ &fp, &indx, ctx, fp_zalloc, cra)) != 0) {
return (error);
}
- fp = nfp;
uu->uu_dupfd = -indx - 1;
- if (!(p->p_flag & P_CONTROLT)) {
- sessp = proc_session(p);
- no_controlling_tty = 1;
- /*
- * If conditions would warrant getting a controlling tty if
- * the device being opened is a tty (see ttyopen in tty.c),
- * but the open flags deny it, set a flag in the session to
- * prevent it.
- */
- if (SESS_LEADER(p, sessp) &&
- sessp->s_ttyvp == NULL &&
- (flags & O_NOCTTY)) {
- session_lock(sessp);
- sessp->s_flags |= S_NOCTTY;
- session_unlock(sessp);
- deny_controlling_tty = 1;
- }
- }
-
if ((error = vn_open_auth(ndp, &flags, vap))) {
if ((error == ENODEV || error == ENXIO) && (uu->uu_dupfd >= 0)){ /* XXX from fdopen */
- if ((error = dupfdopen(fdp, indx, uu->uu_dupfd, flags, error)) == 0) {
+ if ((error = dupfdopen(p->p_fd, indx, uu->uu_dupfd, flags, error)) == 0) {
fp_drop(p, indx, NULL, 0);
*retval = indx;
- if (deny_controlling_tty) {
- session_lock(sessp);
- sessp->s_flags &= ~S_NOCTTY;
- session_unlock(sessp);
- }
- if (sessp != SESSION_NULL)
- session_rele(sessp);
return (0);
}
}
if (error == ERESTART)
error = EINTR;
fp_free(p, indx, fp);
-
- if (deny_controlling_tty) {
- session_lock(sessp);
- sessp->s_flags &= ~S_NOCTTY;
- session_unlock(sessp);
- }
- if (sessp != SESSION_NULL)
- session_rele(sessp);
return (error);
}
uu->uu_dupfd = 0;
vp = ndp->ni_vp;
- fp->f_fglob->fg_flag = flags & (FMASK | O_EVTONLY);
- fp->f_fglob->fg_type = DTYPE_VNODE;
+ fp->f_fglob->fg_flag = flags & (FMASK | O_EVTONLY | FENCRYPTED | FUNENCRYPTED);
fp->f_fglob->fg_ops = &vnops;
fp->f_fglob->fg_data = (caddr_t)vp;
-#if CONFIG_PROTECT
- if (VATTR_IS_ACTIVE (vap, va_dataprotect_flags)) {
- if (vap->va_dataprotect_flags & VA_DP_RAWENCRYPTED) {
- fp->f_fglob->fg_flag |= FENCRYPTED;
- }
- }
-#endif
-
if (flags & (O_EXLOCK | O_SHLOCK)) {
lf.l_whence = SEEK_SET;
lf.l_start = 0;
if (error)
goto bad;
#endif
- if ((error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_SETLK, &lf, type, ctx)))
+ if ((error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_SETLK, &lf, type, ctx, NULL)))
goto bad;
fp->f_fglob->fg_flag |= FHASLOCK;
}
goto bad;
/*
- * If the open flags denied the acquisition of a controlling tty,
- * clear the flag in the session structure that prevented the lower
- * level code from assigning one.
+ * For directories we hold some additional information in the fd.
*/
- if (deny_controlling_tty) {
- session_lock(sessp);
- sessp->s_flags &= ~S_NOCTTY;
- session_unlock(sessp);
+ if (vnode_vtype(vp) == VDIR) {
+ fp->f_fglob->fg_vn_data = fg_vn_data_alloc();
+ } else {
+ fp->f_fglob->fg_vn_data = NULL;
}
+ vnode_put(vp);
+
/*
- * If a controlling tty was set by the tty line discipline, then we
- * want to set the vp of the tty into the session structure. We have
- * a race here because we can't get to the vp for the tp in ttyopen,
- * because it's not passed as a parameter in the open path.
+ * The first terminal open (without a O_NOCTTY) by a session leader
+ * results in it being set as the controlling terminal.
*/
- if (no_controlling_tty && (p->p_flag & P_CONTROLT)) {
- vnode_t ttyvp;
-
- /*
- * We already have a ref from vn_open_auth(), so we can demand another reference.
- */
- error = vnode_ref_ext(vp, 0, VNODE_REF_FORCE);
- if (error != 0) {
- panic("vnode_ref_ext() with VNODE_REF_FORCE failed?!");
- }
+ if (vnode_istty(vp) && !(p->p_flag & P_CONTROLT) &&
+ !(flags & O_NOCTTY)) {
+ int tmp = 0;
- session_lock(sessp);
- ttyvp = sessp->s_ttyvp;
- sessp->s_ttyvp = vp;
- sessp->s_ttyvid = vnode_vid(vp);
- session_unlock(sessp);
- if (ttyvp != NULLVP)
- vnode_rele(ttyvp);
+ (void)(*fp->f_fglob->fg_ops->fo_ioctl)(fp, (int)TIOCSCTTY,
+ (caddr_t)&tmp, ctx);
}
- vnode_put(vp);
-
proc_fdlock(p);
if (flags & O_CLOEXEC)
*fdflags(p, indx) |= UF_EXCLOSE;
+ if (flags & O_CLOFORK)
+ *fdflags(p, indx) |= UF_FORKCLOSE;
procfdtbl_releasefd(p, indx, NULL);
fp_drop(p, indx, fp, 1);
proc_fdunlock(p);
*retval = indx;
- if (sessp != SESSION_NULL)
- session_rele(sessp);
return (0);
bad:
- if (deny_controlling_tty) {
- session_lock(sessp);
- sessp->s_flags &= ~S_NOCTTY;
- session_unlock(sessp);
- }
- if (sessp != SESSION_NULL)
- session_rele(sessp);
-
- /* Modify local copy (to not damage thread copy) */
+ context = *vfs_context_current();
context.vc_ucred = fp->f_fglob->fg_cred;
+
+ if ((fp->f_fglob->fg_flag & FHASLOCK) &&
+ (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_VNODE)) {
+ lf.l_whence = SEEK_SET;
+ lf.l_start = 0;
+ lf.l_len = 0;
+ lf.l_type = F_UNLCK;
+
+ (void)VNOP_ADVLOCK(
+ vp, (caddr_t)fp->f_fglob, F_UNLCK, &lf, F_FLOCK, ctx, NULL);
+ }
vn_close(vp, fp->f_fglob->fg_flag, &context);
vnode_put(vp);
fp_free(p, indx, fp);
return (error);
+}
+
+/*
+ * While most of the *at syscall handlers can call nameiat() which
+ * is a wrapper around namei, the use of namei and initialisation
+ * of nameidata are far removed and in different functions - namei
+ * gets called in vn_open_auth for open1. So we'll just do here what
+ * nameiat() does.
+ */
+static int
+open1at(vfs_context_t ctx, struct nameidata *ndp, int uflags,
+ struct vnode_attr *vap, fp_allocfn_t fp_zalloc, void *cra, int32_t *retval,
+ int dirfd)
+{
+ if ((dirfd != AT_FDCWD) && !(ndp->ni_cnd.cn_flags & USEDVP)) {
+ int error;
+ char c;
+
+ if (UIO_SEG_IS_USER_SPACE(ndp->ni_segflg)) {
+ error = copyin(ndp->ni_dirp, &c, sizeof(char));
+ if (error)
+ return (error);
+ } else {
+ c = *((char *)(ndp->ni_dirp));
+ }
+
+ if (c != '/') {
+ vnode_t dvp_at;
+
+ error = vnode_getfromfd(ndp->ni_cnd.cn_context, dirfd,
+ &dvp_at);
+ if (error)
+ return (error);
+ if (vnode_vtype(dvp_at) != VDIR) {
+ vnode_put(dvp_at);
+ return (ENOTDIR);
+ }
+
+ ndp->ni_dvp = dvp_at;
+ ndp->ni_cnd.cn_flags |= USEDVP;
+ error = open1(ctx, ndp, uflags, vap, fp_zalloc, cra,
+ retval);
+ vnode_put(dvp_at);
+ return (error);
+ }
+ }
+
+ return (open1(ctx, ndp, uflags, vap, fp_zalloc, cra, retval));
}
/*
NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
uap->path, vfs_context_current());
- ciferror = open1(vfs_context_current(), &nd, uap->flags, &va, retval);
+ ciferror = open1(vfs_context_current(), &nd, uap->flags, &va,
+ fileproc_alloc_init, NULL, retval);
if (xsecdst != NULL)
kauth_filesec_free(xsecdst);
* 2. set a flag to mark it as requiring open-raw-encrypted semantics.
*/
if (flags & O_CREAT) {
- VATTR_SET(&va, va_dataprotect_class, class);
+ /* lower level kernel code validates that the class is valid before applying it. */
+ if (class != PROTECTION_CLASS_DEFAULT) {
+ /*
+ * PROTECTION_CLASS_DEFAULT implies that we make the class for this
+ * file behave the same as open (2)
+ */
+ VATTR_SET(&va, va_dataprotect_class, class);
+ }
}
- if (dpflags & O_DP_GETRAWENCRYPTED) {
+ if (dpflags & (O_DP_GETRAWENCRYPTED|O_DP_GETRAWUNENCRYPTED)) {
if ( flags & (O_RDWR | O_WRONLY)) {
/* Not allowed to write raw encrypted bytes */
return EINVAL;
}
- VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED);
+ if (uap->dpflags & O_DP_GETRAWENCRYPTED) {
+ VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED);
+ }
+ if (uap->dpflags & O_DP_GETRAWUNENCRYPTED) {
+ VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWUNENCRYPTED);
+ }
}
- error = open1(vfs_context_current(), &nd, uap->flags, &va, retval);
+ error = open1(vfs_context_current(), &nd, uap->flags, &va,
+ fileproc_alloc_init, NULL, retval);
return error;
}
+static int
+openat_internal(vfs_context_t ctx, user_addr_t path, int flags, int mode,
+ int fd, enum uio_seg segflg, int *retval)
+{
+ struct filedesc *fdp = (vfs_context_proc(ctx))->p_fd;
+ struct vnode_attr va;
+ struct nameidata nd;
+ int cmode;
+
+ VATTR_INIT(&va);
+ /* Mask off all but regular access permissions */
+ cmode = ((mode &~ fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT;
+ VATTR_SET(&va, va_mode, cmode & ACCESSPERMS);
+
+ NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1,
+ segflg, path, ctx);
+
+ return (open1at(ctx, &nd, flags, &va, fileproc_alloc_init, NULL,
+ retval, fd));
+}
int
open(proc_t p, struct open_args *uap, int32_t *retval)
}
int
-open_nocancel(proc_t p, struct open_nocancel_args *uap, int32_t *retval)
+open_nocancel(__unused proc_t p, struct open_nocancel_args *uap,
+ int32_t *retval)
{
- struct filedesc *fdp = p->p_fd;
- struct vnode_attr va;
- struct nameidata nd;
- int cmode;
+ return (openat_internal(vfs_context_current(), uap->path, uap->flags,
+ uap->mode, AT_FDCWD, UIO_USERSPACE, retval));
+}
- VATTR_INIT(&va);
- /* Mask off all but regular access permissions */
- cmode = ((uap->mode &~ fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT;
- VATTR_SET(&va, va_mode, cmode & ACCESSPERMS);
+int
+openat_nocancel(__unused proc_t p, struct openat_nocancel_args *uap,
+ int32_t *retval)
+{
+ return (openat_internal(vfs_context_current(), uap->path, uap->flags,
+ uap->mode, uap->fd, UIO_USERSPACE, retval));
+}
- NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
- uap->path, vfs_context_current());
+int
+openat(proc_t p, struct openat_args *uap, int32_t *retval)
+{
+ __pthread_testcancel(1);
+ return(openat_nocancel(p, (struct openat_nocancel_args *)uap, retval));
+}
+
+/*
+ * openbyid_np: open a file given a file system id and a file system object id
+ * the hfs file system object id is an fsobj_id_t {uint32, uint32}
+ * file systems that don't support object ids it is a node id (uint64_t).
+ *
+ * Parameters: p Process requesting the open
+ * uap User argument descriptor (see below)
+ * retval Pointer to an area to receive the
+ * return calue from the system call
+ *
+ * Indirect: uap->path Path to open (same as 'open')
+ *
+ * uap->fsid id of target file system
+ * uap->objid id of target file system object
+ * uap->flags Flags to open (same as 'open')
+ *
+ * Returns: 0 Success
+ * !0 errno value
+ *
+ *
+ * XXX: We should enummerate the possible errno values here, and where
+ * in the code they originated.
+ */
+int
+openbyid_np(__unused proc_t p, struct openbyid_np_args *uap, int *retval)
+{
+ fsid_t fsid;
+ uint64_t objid;
+ int error;
+ char *buf = NULL;
+ int buflen = MAXPATHLEN;
+ int pathlen = 0;
+ vfs_context_t ctx = vfs_context_current();
+
+ if ((error = copyin(uap->fsid, (caddr_t)&fsid, sizeof(fsid)))) {
+ return (error);
+ }
+
+ /*uap->obj is an fsobj_id_t defined as struct {uint32_t, uint32_t} */
+ if ((error = copyin(uap->objid, (caddr_t)&objid, sizeof(uint64_t)))) {
+ return (error);
+ }
+
+ AUDIT_ARG(value32, fsid.val[0]);
+ AUDIT_ARG(value64, objid);
+
+ /*resolve path from fsis, objid*/
+ do {
+ MALLOC(buf, char *, buflen + 1, M_TEMP, M_WAITOK);
+ if (buf == NULL) {
+ return (ENOMEM);
+ }
+
+ error = fsgetpath_internal(
+ ctx, fsid.val[0], objid,
+ buflen, buf, &pathlen);
+
+ if (error) {
+ FREE(buf, M_TEMP);
+ buf = NULL;
+ }
+ } while (error == ENOSPC && (buflen += MAXPATHLEN));
+
+ if (error) {
+ return error;
+ }
+
+ buf[pathlen] = 0;
+
+ error = openat_internal(
+ ctx, (user_addr_t)buf, uap->oflags, 0, AT_FDCWD, UIO_SYSSPACE, retval);
- return(open1(vfs_context_current(), &nd, uap->flags, &va, retval));
+ FREE(buf, M_TEMP);
+
+ return error;
}
}
switch (uap->mode & S_IFMT) {
- case S_IFMT: /* used by badsect to flag bad sectors */
- VATTR_SET(&va, va_type, VBAD);
- break;
case S_IFCHR:
VATTR_SET(&va, va_type, VCHR);
break;
* VNOP_LINK:???
*/
/* ARGSUSED */
-int
-link(__unused proc_t p, struct link_args *uap, __unused int32_t *retval)
+static int
+linkat_internal(vfs_context_t ctx, int fd1, user_addr_t path, int fd2,
+ user_addr_t link, int flag, enum uio_seg segflg)
{
vnode_t vp, dvp, lvp;
struct nameidata nd;
- vfs_context_t ctx = vfs_context_current();
+ int follow;
int error;
#if CONFIG_FSE
fse_info finfo;
vp = dvp = lvp = NULLVP;
/* look up the object we are linking to */
- NDINIT(&nd, LOOKUP, OP_LOOKUP, FOLLOW | AUDITVNPATH1,
- UIO_USERSPACE, uap->path, ctx);
- error = namei(&nd);
+ follow = (flag & AT_SYMLINK_FOLLOW) ? FOLLOW : NOFOLLOW;
+ NDINIT(&nd, LOOKUP, OP_LOOKUP, AUDITVNPATH1 | follow,
+ segflg, path, ctx);
+
+ error = nameiat(&nd, fd1);
if (error)
return (error);
vp = nd.ni_vp;
#endif
nd.ni_cnd.cn_nameiop = CREATE;
nd.ni_cnd.cn_flags = LOCKPARENT | AUDITVNPATH2 | CN_NBMOUNTLOOK;
- nd.ni_dirp = uap->link;
- error = namei(&nd);
+ nd.ni_dirp = link;
+ error = nameiat(&nd, fd2);
if (error != 0)
goto out;
dvp = nd.ni_dvp;
if (error)
goto out2;
+#if CONFIG_MACF
+ (void)mac_vnode_notify_link(ctx, vp, dvp, &nd.ni_cnd);
+#endif
+
#if CONFIG_FSE
need_event = need_fsevent(FSE_CREATE_FILE, dvp);
#else
}
link_name_len = MAXPATHLEN;
- vn_getpath(vp, link_to_path, &link_name_len);
-
- /*
- * Call out to allow 3rd party notification of rename.
- * Ignore result of kauth_authorize_fileop call.
- */
- kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_LINK,
- (uintptr_t)link_to_path, (uintptr_t)target_path);
+ if (vn_getpath(vp, link_to_path, &link_name_len) == 0) {
+ /*
+ * Call out to allow 3rd party notification of rename.
+ * Ignore result of kauth_authorize_fileop call.
+ */
+ kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_LINK,
+ (uintptr_t)link_to_path,
+ (uintptr_t)target_path);
+ }
if (link_to_path != NULL) {
RELEASE_PATH(link_to_path);
}
return (error);
}
+int
+link(__unused proc_t p, struct link_args *uap, __unused int32_t *retval)
+{
+ return (linkat_internal(vfs_context_current(), AT_FDCWD, uap->path,
+ AT_FDCWD, uap->link, AT_SYMLINK_FOLLOW, UIO_USERSPACE));
+}
+
+int
+linkat(__unused proc_t p, struct linkat_args *uap, __unused int32_t *retval)
+{
+ if (uap->flag & ~AT_SYMLINK_FOLLOW)
+ return (EINVAL);
+
+ return (linkat_internal(vfs_context_current(), uap->fd1, uap->path,
+ uap->fd2, uap->link, uap->flag, UIO_USERSPACE));
+}
+
/*
* Make a symbolic link.
*
* We could add support for ACLs here too...
*/
/* ARGSUSED */
-int
-symlink(proc_t p, struct symlink_args *uap, __unused int32_t *retval)
+static int
+symlinkat_internal(vfs_context_t ctx, user_addr_t path_data, int fd,
+ user_addr_t link, enum uio_seg segflg)
{
struct vnode_attr va;
char *path;
int error;
struct nameidata nd;
- vfs_context_t ctx = vfs_context_current();
vnode_t vp, dvp;
+ uint32_t dfflags; // Directory file flags
size_t dummy=0;
-
- MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
- error = copyinstr(uap->path, path, MAXPATHLEN, &dummy);
+ proc_t p;
+
+ error = 0;
+ if (UIO_SEG_IS_USER_SPACE(segflg)) {
+ MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
+ error = copyinstr(path_data, path, MAXPATHLEN, &dummy);
+ } else {
+ path = (char *)path_data;
+ }
if (error)
goto out;
AUDIT_ARG(text, path); /* This is the link string */
- NDINIT(&nd, CREATE, OP_SYMLINK, LOCKPARENT | AUDITVNPATH1,
- UIO_USERSPACE, uap->link, ctx);
- error = namei(&nd);
+ NDINIT(&nd, CREATE, OP_SYMLINK, LOCKPARENT | AUDITVNPATH1,
+ segflg, link, ctx);
+
+ error = nameiat(&nd, fd);
if (error)
goto out;
dvp = nd.ni_dvp;
vp = nd.ni_vp;
+ p = vfs_context_proc(ctx);
VATTR_INIT(&va);
VATTR_SET(&va, va_type, VLNK);
VATTR_SET(&va, va_mode, ACCESSPERMS & ~p->p_fd->fd_cmask);
+
+ /*
+ * Handle inheritance of restricted flag
+ */
+ error = vnode_flags(dvp, &dfflags, ctx);
+ if (error)
+ goto skipit;
+ if (dfflags & SF_RESTRICTED)
+ VATTR_SET(&va, va_flags, SF_RESTRICTED);
+
#if CONFIG_MACF
error = mac_vnode_check_create(ctx,
dvp, &nd.ni_cnd, &va);
if (error == 0)
error = VNOP_SYMLINK(dvp, &vp, &nd.ni_cnd, &va, path, ctx);
+#if CONFIG_MACF
+ if (error == 0 && vp)
+ error = vnode_label(vnode_mount(vp), dvp, vp, &nd.ni_cnd, VNODE_LABEL_CREATE, ctx);
+#endif
+
/* do fallback attribute handling */
- if (error == 0)
+ if (error == 0 && vp)
error = vnode_setattr_fallback(vp, &va, ctx);
-
+
if (error == 0) {
int update_flags = 0;
+ /*check if a new vnode was created, else try to get one*/
if (vp == NULL) {
nd.ni_cnd.cn_nameiop = LOOKUP;
#if CONFIG_TRIGGERS
nd.ni_op = OP_LOOKUP;
#endif
nd.ni_cnd.cn_flags = 0;
- error = namei(&nd);
+ error = nameiat(&nd, fd);
vp = nd.ni_vp;
if (vp == NULL)
goto skipit;
}
-
+
#if 0 /* XXX - kauth_todo - is KAUTH_FILEOP_SYMLINK needed? */
- /* call out to allow 3rd party notification of rename.
+ /* call out to allow 3rd party notification of rename.
* Ignore result of kauth_authorize_fileop call.
*/
if (kauth_authorize_fileop_has_listeners() &&
namei(&nd) == 0) {
char *new_link_path = NULL;
int len;
-
+
/* build the path to the new link file */
new_link_path = get_pathbuff();
len = MAXPATHLEN;
new_link_path[len - 1] = '/';
strlcpy(&new_link_path[len], nd.ni_cnd.cn_nameptr, MAXPATHLEN-len);
}
-
- kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_SYMLINK,
+
+ kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_SYMLINK,
(uintptr_t)path, (uintptr_t)new_link_path);
if (new_link_path != NULL)
release_pathbuff(new_link_path);
}
-#endif
+#endif
// Make sure the name & parent pointers are hooked up
if (vp->v_name == NULL)
update_flags |= VNODE_UPDATE_NAME;
if (vp->v_parent == NULLVP)
update_flags |= VNODE_UPDATE_PARENT;
-
+
if (update_flags)
vnode_update_identity(vp, dvp, nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen, nd.ni_cnd.cn_hash, update_flags);
vnode_put(vp);
vnode_put(dvp);
out:
- FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
+ if (path && (path != (char *)path_data))
+ FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
return (error);
}
+int
+symlink(__unused proc_t p, struct symlink_args *uap, __unused int32_t *retval)
+{
+ return (symlinkat_internal(vfs_context_current(), uap->path, AT_FDCWD,
+ uap->link, UIO_USERSPACE));
+}
+
+int
+symlinkat(__unused proc_t p, struct symlinkat_args *uap,
+ __unused int32_t *retval)
+{
+ return (symlinkat_internal(vfs_context_current(), uap->path1, uap->fd,
+ uap->path2, UIO_USERSPACE));
+}
+
/*
* Delete a whiteout from the filesystem.
- * XXX authorization not implmented for whiteouts
+ * No longer supported.
*/
int
-undelete(__unused proc_t p, struct undelete_args *uap, __unused int32_t *retval)
+undelete(__unused proc_t p, __unused struct undelete_args *uap, __unused int32_t *retval)
{
- int error;
- struct nameidata nd;
- vfs_context_t ctx = vfs_context_current();
- vnode_t vp, dvp;
-
- NDINIT(&nd, DELETE, OP_UNLINK, LOCKPARENT | DOWHITEOUT | AUDITVNPATH1,
- UIO_USERSPACE, uap->path, ctx);
- error = namei(&nd);
- if (error)
- return (error);
- dvp = nd.ni_dvp;
- vp = nd.ni_vp;
-
- if (vp == NULLVP && (nd.ni_cnd.cn_flags & ISWHITEOUT)) {
- error = VNOP_WHITEOUT(dvp, &nd.ni_cnd, DELETE, ctx);
- } else
- error = EEXIST;
-
- /*
- * nameidone has to happen before we vnode_put(dvp)
- * since it may need to release the fs_nodelock on the dvp
- */
- nameidone(&nd);
-
- if (vp)
- vnode_put(vp);
- vnode_put(dvp);
-
- return (error);
+ return (ENOTSUP);
}
-
/*
* Delete a name from the filesystem.
*/
/* ARGSUSED */
-int
-unlink1(vfs_context_t ctx, struct nameidata *ndp, int unlink_flags)
+static int
+unlinkat_internal(vfs_context_t ctx, int fd, vnode_t start_dvp,
+ user_addr_t path_arg, enum uio_seg segflg, int unlink_flags)
{
+ struct nameidata nd;
vnode_t vp, dvp;
int error;
struct componentname *cnp;
fse_info finfo;
struct vnode_attr va;
#endif
- int flags = 0;
- int need_event = 0;
- int has_listeners = 0;
- int truncated_path=0;
+ int flags;
+ int need_event;
+ int has_listeners;
+ int truncated_path;
int batched;
- struct vnode_attr *vap = NULL;
+ struct vnode_attr *vap;
+ int do_retry;
+ int retry_count = 0;
+ int cn_flags;
+
+ cn_flags = LOCKPARENT;
+ if (!(unlink_flags & VNODE_REMOVE_NO_AUDIT_PATH))
+ cn_flags |= AUDITVNPATH1;
+ /* If a starting dvp is passed, it trumps any fd passed. */
+ if (start_dvp)
+ cn_flags |= USEDVP;
#if NAMEDRSRCFORK
/* unlink or delete is allowed on rsrc forks and named streams */
- ndp->ni_cnd.cn_flags |= CN_ALLOWRSRCFORK;
+ cn_flags |= CN_ALLOWRSRCFORK;
#endif
- ndp->ni_cnd.cn_flags |= LOCKPARENT;
- ndp->ni_flag |= NAMEI_COMPOUNDREMOVE;
- cnp = &ndp->ni_cnd;
+retry:
+ do_retry = 0;
+ flags = 0;
+ need_event = 0;
+ has_listeners = 0;
+ truncated_path = 0;
+ vap = NULL;
+
+ NDINIT(&nd, DELETE, OP_UNLINK, cn_flags, segflg, path_arg, ctx);
+
+ nd.ni_dvp = start_dvp;
+ nd.ni_flag |= NAMEI_COMPOUNDREMOVE;
+ cnp = &nd.ni_cnd;
lookup_continue:
- error = namei(ndp);
+ error = nameiat(&nd, fd);
if (error)
return (error);
- dvp = ndp->ni_dvp;
- vp = ndp->ni_vp;
+ dvp = nd.ni_dvp;
+ vp = nd.ni_vp;
/* With Carbon delete semantics, busy files cannot be deleted */
flags |= VNODE_REMOVE_NODELETEBUSY;
}
- /* If we're told to, then skip any potential future upcalls */
+ /* Skip any potential upcalls if told to. */
if (unlink_flags & VNODE_REMOVE_SKIP_NAMESPACE_EVENT) {
flags |= VNODE_REMOVE_SKIP_NAMESPACE_EVENT;
}
-
if (vp) {
batched = vnode_compound_remove_available(vp);
/*
if (!batched) {
error = vn_authorize_unlink(dvp, vp, cnp, ctx, NULL);
if (error) {
+ if (error == ENOENT) {
+ assert(retry_count < MAX_AUTHORIZE_ENOENT_RETRIES);
+ if (retry_count < MAX_AUTHORIZE_ENOENT_RETRIES) {
+ do_retry = 1;
+ retry_count++;
+ }
+ }
goto out;
}
}
goto out;
}
}
- len = safe_getpath(dvp, ndp->ni_cnd.cn_nameptr, path, MAXPATHLEN, &truncated_path);
+ len = safe_getpath(dvp, nd.ni_cnd.cn_nameptr, path, MAXPATHLEN, &truncated_path);
}
#if NAMEDRSRCFORK
- if (ndp->ni_cnd.cn_flags & CN_WANTSRSRCFORK)
+ if (nd.ni_cnd.cn_flags & CN_WANTSRSRCFORK)
error = vnode_removenamedstream(dvp, vp, XATTR_RESOURCEFORK_NAME, 0, ctx);
else
#endif
{
- error = vn_remove(dvp, &ndp->ni_vp, ndp, flags, vap, ctx);
- vp = ndp->ni_vp;
+ error = vn_remove(dvp, &nd.ni_vp, &nd, flags, vap, ctx);
+ vp = nd.ni_vp;
if (error == EKEEPLOOKING) {
if (!batched) {
panic("EKEEPLOOKING, but not a filesystem that supports compound VNOPs?");
}
- if ((ndp->ni_flag & NAMEI_CONTLOOKUP) == 0) {
+ if ((nd.ni_flag & NAMEI_CONTLOOKUP) == 0) {
panic("EKEEPLOOKING, but continue flag not set?");
}
goto out;
}
goto lookup_continue;
+ } else if (error == ENOENT && batched) {
+ assert(retry_count < MAX_AUTHORIZE_ENOENT_RETRIES);
+ if (retry_count < MAX_AUTHORIZE_ENOENT_RETRIES) {
+ /*
+ * For compound VNOPs, the authorization callback may
+ * return ENOENT in case of racing hardlink lookups
+ * hitting the name cache, redrive the lookup.
+ */
+ do_retry = 1;
+ retry_count += 1;
+ goto out;
+ }
}
}
* nameidone has to happen before we vnode_put(dvp)
* since it may need to release the fs_nodelock on the dvp
*/
- nameidone(ndp);
+ nameidone(&nd);
vnode_put(dvp);
if (vp) {
vnode_put(vp);
}
+
+ if (do_retry) {
+ goto retry;
+ }
+
return (error);
}
-/*
- * Delete a name from the filesystem using POSIX semantics.
- */
int
-unlink(__unused proc_t p, struct unlink_args *uap, __unused int32_t *retval)
+unlink1(vfs_context_t ctx, vnode_t start_dvp, user_addr_t path_arg,
+ enum uio_seg segflg, int unlink_flags)
{
- struct nameidata nd;
- vfs_context_t ctx = vfs_context_current();
-
- NDINIT(&nd, DELETE, OP_UNLINK, AUDITVNPATH1, UIO_USERSPACE,
- uap->path, ctx);
- return unlink1(ctx, &nd, 0);
+ return (unlinkat_internal(ctx, AT_FDCWD, start_dvp, path_arg, segflg,
+ unlink_flags));
}
/*
int
delete(__unused proc_t p, struct delete_args *uap, __unused int32_t *retval)
{
- struct nameidata nd;
- vfs_context_t ctx = vfs_context_current();
+ return (unlinkat_internal(vfs_context_current(), AT_FDCWD, NULLVP,
+ uap->path, UIO_USERSPACE, VNODE_REMOVE_NODELETEBUSY));
+}
- NDINIT(&nd, DELETE, OP_UNLINK, AUDITVNPATH1, UIO_USERSPACE,
- uap->path, ctx);
- return unlink1(ctx, &nd, VNODE_REMOVE_NODELETEBUSY);
+/*
+ * Delete a name from the filesystem using POSIX semantics.
+ */
+int
+unlink(__unused proc_t p, struct unlink_args *uap, __unused int32_t *retval)
+{
+ return (unlinkat_internal(vfs_context_current(), AT_FDCWD, NULLVP,
+ uap->path, UIO_USERSPACE, 0));
+}
+
+int
+unlinkat(__unused proc_t p, struct unlinkat_args *uap, __unused int32_t *retval)
+{
+ if (uap->flag & ~AT_REMOVEDIR)
+ return (EINVAL);
+
+ if (uap->flag & AT_REMOVEDIR)
+ return (rmdirat_internal(vfs_context_current(), uap->fd,
+ uap->path, UIO_USERSPACE));
+ else
+ return (unlinkat_internal(vfs_context_current(), uap->fd,
+ NULLVP, uap->path, UIO_USERSPACE, 0));
}
/*
* namei:???
* access1:
*/
-int
-access(__unused proc_t p, struct access_args *uap, __unused int32_t *retval)
+static int
+faccessat_internal(vfs_context_t ctx, int fd, user_addr_t path, int amode,
+ int flag, enum uio_seg segflg)
{
int error;
struct nameidata nd;
#endif
/*
- * Access is defined as checking against the process'
- * real identity, even if operations are checking the
- * effective identity. So we need to tweak the credential
- * in the context.
+ * Unless the AT_EACCESS option is used, Access is defined as checking
+ * against the process' real identity, even if operations are checking
+ * the effective identity. So we need to tweak the credential
+ * in the context for that case.
*/
- context.vc_ucred = kauth_cred_copy_real(kauth_cred_get());
- context.vc_thread = current_thread();
+ if (!(flag & AT_EACCESS))
+ context.vc_ucred = kauth_cred_copy_real(kauth_cred_get());
+ else
+ context.vc_ucred = ctx->vc_ucred;
+ context.vc_thread = ctx->vc_thread;
+
niopts = FOLLOW | AUDITVNPATH1;
/* need parent for vnode_authorize for deletion test */
- if (uap->flags & _DELETE_OK)
+ if (amode & _DELETE_OK)
niopts |= WANTPARENT;
- NDINIT(&nd, LOOKUP, OP_ACCESS, niopts, UIO_USERSPACE,
- uap->path, &context);
+ NDINIT(&nd, LOOKUP, OP_ACCESS, niopts, segflg,
+ path, &context);
#if NAMEDRSRCFORK
/* access(F_OK) calls are allowed for resource forks. */
- if (uap->flags == F_OK)
+ if (amode == F_OK)
nd.ni_cnd.cn_flags |= CN_ALLOWRSRCFORK;
#endif
- error = namei(&nd);
+ error = nameiat(&nd, fd);
if (error)
goto out;
}
#endif
- error = access1(nd.ni_vp, nd.ni_dvp, uap->flags, &context);
+ error = access1(nd.ni_vp, nd.ni_dvp, amode, &context);
#if NAMEDRSRCFORK
if (is_namedstream) {
#endif
vnode_put(nd.ni_vp);
- if (uap->flags & _DELETE_OK)
+ if (amode & _DELETE_OK)
vnode_put(nd.ni_dvp);
nameidone(&nd);
out:
- kauth_cred_unref(&context.vc_ucred);
- return(error);
+ if (!(flag & AT_EACCESS))
+ kauth_cred_unref(&context.vc_ucred);
+ return (error);
+}
+
+int
+access(__unused proc_t p, struct access_args *uap, __unused int32_t *retval)
+{
+ return (faccessat_internal(vfs_context_current(), AT_FDCWD,
+ uap->path, uap->flags, 0, UIO_USERSPACE));
}
+int
+faccessat(__unused proc_t p, struct faccessat_args *uap,
+ __unused int32_t *retval)
+{
+ if (uap->flag & ~AT_EACCESS)
+ return (EINVAL);
+
+ return (faccessat_internal(vfs_context_current(), uap->fd,
+ uap->path, uap->amode, uap->flag, UIO_USERSPACE));
+}
/*
* Returns: 0 Success
* vn_stat:???
*/
static int
-stat2(vfs_context_t ctx, struct nameidata *ndp, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size, int isstat64)
+fstatat_internal(vfs_context_t ctx, user_addr_t path, user_addr_t ub,
+ user_addr_t xsecurity, user_addr_t xsecurity_size, int isstat64,
+ enum uio_seg segflg, int fd, int flag)
{
+ struct nameidata nd;
+ int follow;
union {
struct stat sb;
struct stat64 sb64;
size_t xsecurity_bufsize;
void * statptr;
+ follow = (flag & AT_SYMLINK_NOFOLLOW) ? NOFOLLOW : FOLLOW;
+ NDINIT(&nd, LOOKUP, OP_GETATTR, follow | AUDITVNPATH1,
+ segflg, path, ctx);
+
#if NAMEDRSRCFORK
int is_namedstream = 0;
/* stat calls are allowed for resource forks. */
- ndp->ni_cnd.cn_flags |= CN_ALLOWRSRCFORK;
+ nd.ni_cnd.cn_flags |= CN_ALLOWRSRCFORK;
#endif
- error = namei(ndp);
+ error = nameiat(&nd, fd);
if (error)
return (error);
fsec = KAUTH_FILESEC_NONE;
* force an inactive on release which will mark it
* for recycle.
*/
- if (vnode_isnamedstream(ndp->ni_vp) &&
- (ndp->ni_vp->v_parent != NULLVP) &&
- vnode_isshadow(ndp->ni_vp)) {
+ if (vnode_isnamedstream(nd.ni_vp) &&
+ (nd.ni_vp->v_parent != NULLVP) &&
+ vnode_isshadow(nd.ni_vp)) {
is_namedstream = 1;
- vnode_ref(ndp->ni_vp);
+ vnode_ref(nd.ni_vp);
}
#endif
- error = vn_stat(ndp->ni_vp, statptr, (xsecurity != USER_ADDR_NULL ? &fsec : NULL), isstat64, ctx);
+ error = vn_stat(nd.ni_vp, statptr, (xsecurity != USER_ADDR_NULL ? &fsec : NULL), isstat64, ctx);
#if NAMEDRSRCFORK
if (is_namedstream) {
- vnode_rele(ndp->ni_vp);
+ vnode_rele(nd.ni_vp);
}
#endif
- vnode_put(ndp->ni_vp);
- nameidone(ndp);
+ vnode_put(nd.ni_vp);
+ nameidone(&nd);
if (error)
return (error);
return (error);
}
-/*
- * Get file status; this version follows links.
- *
- * Returns: 0 Success
- * stat2:??? [see stat2() in this file]
- */
-static int
-stat1(user_addr_t path, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size, int isstat64)
-{
- struct nameidata nd;
- vfs_context_t ctx = vfs_context_current();
-
- NDINIT(&nd, LOOKUP, OP_GETATTR, NOTRIGGER | FOLLOW | AUDITVNPATH1,
- UIO_USERSPACE, path, ctx);
- return(stat2(ctx, &nd, ub, xsecurity, xsecurity_size, isstat64));
-}
-
/*
* stat_extended: Get file status; with extended security (ACL).
*
*
*/
int
-stat_extended(__unused proc_t p, struct stat_extended_args *uap, __unused int32_t *retval)
+stat_extended(__unused proc_t p, struct stat_extended_args *uap,
+ __unused int32_t *retval)
{
- return (stat1(uap->path, uap->ub, uap->xsecurity, uap->xsecurity_size, 0));
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ uap->xsecurity, uap->xsecurity_size, 0, UIO_USERSPACE, AT_FDCWD,
+ 0));
}
/*
* Returns: 0 Success
- * stat1:??? [see stat1() in this file]
+ * fstatat_internal:??? [see fstatat_internal() in this file]
*/
int
stat(__unused proc_t p, struct stat_args *uap, __unused int32_t *retval)
{
- return(stat1(uap->path, uap->ub, 0, 0, 0));
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ 0, 0, 0, UIO_USERSPACE, AT_FDCWD, 0));
}
int
stat64(__unused proc_t p, struct stat64_args *uap, __unused int32_t *retval)
{
- return(stat1(uap->path, uap->ub, 0, 0, 1));
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ 0, 0, 1, UIO_USERSPACE, AT_FDCWD, 0));
}
/*
int
stat64_extended(__unused proc_t p, struct stat64_extended_args *uap, __unused int32_t *retval)
{
- return (stat1(uap->path, uap->ub, uap->xsecurity, uap->xsecurity_size, 1));
-}
-/*
- * Get file status; this version does not follow links.
- */
-static int
-lstat1(user_addr_t path, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size, int isstat64)
-{
- struct nameidata nd;
- vfs_context_t ctx = vfs_context_current();
-
- NDINIT(&nd, LOOKUP, OP_GETATTR, NOTRIGGER | NOFOLLOW | AUDITVNPATH1,
- UIO_USERSPACE, path, ctx);
-
- return(stat2(ctx, &nd, ub, xsecurity, xsecurity_size, isstat64));
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ uap->xsecurity, uap->xsecurity_size, 1, UIO_USERSPACE, AT_FDCWD,
+ 0));
}
/*
int
lstat_extended(__unused proc_t p, struct lstat_extended_args *uap, __unused int32_t *retval)
{
- return (lstat1(uap->path, uap->ub, uap->xsecurity, uap->xsecurity_size, 0));
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ uap->xsecurity, uap->xsecurity_size, 0, UIO_USERSPACE, AT_FDCWD,
+ AT_SYMLINK_NOFOLLOW));
}
+/*
+ * Get file status; this version does not follow links.
+ */
int
lstat(__unused proc_t p, struct lstat_args *uap, __unused int32_t *retval)
{
- return(lstat1(uap->path, uap->ub, 0, 0, 0));
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ 0, 0, 0, UIO_USERSPACE, AT_FDCWD, AT_SYMLINK_NOFOLLOW));
}
int
lstat64(__unused proc_t p, struct lstat64_args *uap, __unused int32_t *retval)
{
- return(lstat1(uap->path, uap->ub, 0, 0, 1));
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ 0, 0, 1, UIO_USERSPACE, AT_FDCWD, AT_SYMLINK_NOFOLLOW));
}
/*
int
lstat64_extended(__unused proc_t p, struct lstat64_extended_args *uap, __unused int32_t *retval)
{
- return (lstat1(uap->path, uap->ub, uap->xsecurity, uap->xsecurity_size, 1));
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ uap->xsecurity, uap->xsecurity_size, 1, UIO_USERSPACE, AT_FDCWD,
+ AT_SYMLINK_NOFOLLOW));
+}
+
+int
+fstatat(__unused proc_t p, struct fstatat_args *uap, __unused int32_t *retval)
+{
+ if (uap->flag & ~AT_SYMLINK_NOFOLLOW)
+ return (EINVAL);
+
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ 0, 0, 0, UIO_USERSPACE, uap->fd, uap->flag));
+}
+
+int
+fstatat64(__unused proc_t p, struct fstatat64_args *uap,
+ __unused int32_t *retval)
+{
+ if (uap->flag & ~AT_SYMLINK_NOFOLLOW)
+ return (EINVAL);
+
+ return (fstatat_internal(vfs_context_current(), uap->path, uap->ub,
+ 0, 0, 1, UIO_USERSPACE, uap->fd, uap->flag));
}
/*
* Return target name of a symbolic link.
*/
/* ARGSUSED */
-int
-readlink(proc_t p, struct readlink_args *uap, int32_t *retval)
+static int
+readlinkat_internal(vfs_context_t ctx, int fd, user_addr_t path,
+ enum uio_seg seg, user_addr_t buf, size_t bufsize, enum uio_seg bufseg,
+ int *retval)
{
vnode_t vp;
uio_t auio;
- int spacetype = proc_is64bit(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
int error;
struct nameidata nd;
- vfs_context_t ctx = vfs_context_current();
char uio_buf[ UIO_SIZEOF(1) ];
- NDINIT(&nd, LOOKUP, OP_READLINK, NOFOLLOW | AUDITVNPATH1,
- UIO_USERSPACE, uap->path, ctx);
- error = namei(&nd);
+ NDINIT(&nd, LOOKUP, OP_READLINK, NOFOLLOW | AUDITVNPATH1,
+ seg, path, ctx);
+
+ error = nameiat(&nd, fd);
if (error)
return (error);
vp = nd.ni_vp;
nameidone(&nd);
- auio = uio_createwithbuffer(1, 0, spacetype, UIO_READ,
- &uio_buf[0], sizeof(uio_buf));
- uio_addiov(auio, uap->buf, uap->count);
- if (vp->v_type != VLNK)
+ auio = uio_createwithbuffer(1, 0, bufseg, UIO_READ,
+ &uio_buf[0], sizeof(uio_buf));
+ uio_addiov(auio, buf, bufsize);
+ if (vp->v_type != VLNK) {
error = EINVAL;
- else {
+ } else {
#if CONFIG_MACF
- error = mac_vnode_check_readlink(ctx,
- vp);
+ error = mac_vnode_check_readlink(ctx, vp);
#endif
if (error == 0)
- error = vnode_authorize(vp, NULL, KAUTH_VNODE_READ_DATA, ctx);
+ error = vnode_authorize(vp, NULL, KAUTH_VNODE_READ_DATA,
+ ctx);
if (error == 0)
error = VNOP_READLINK(vp, auio, ctx);
}
vnode_put(vp);
- /* Safe: uio_resid() is bounded above by "count", and "count" is an int */
- *retval = uap->count - (int)uio_resid(auio);
+ *retval = bufsize - (int)uio_resid(auio);
return (error);
}
+int
+readlink(proc_t p, struct readlink_args *uap, int32_t *retval)
+{
+ enum uio_seg procseg;
+
+ procseg = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
+ return (readlinkat_internal(vfs_context_current(), AT_FDCWD,
+ CAST_USER_ADDR_T(uap->path), procseg, CAST_USER_ADDR_T(uap->buf),
+ uap->count, procseg, retval));
+}
+
+int
+readlinkat(proc_t p, struct readlinkat_args *uap, int32_t *retval)
+{
+ enum uio_seg procseg;
+
+ procseg = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
+ return (readlinkat_internal(vfs_context_current(), uap->fd, uap->path,
+ procseg, uap->buf, uap->bufsize, procseg, retval));
+}
+
/*
* Change file flags.
*/
* translated to EPERM before being returned.
*/
static int
-chmod2(vfs_context_t ctx, vnode_t vp, struct vnode_attr *vap)
+chmod_vnode(vfs_context_t ctx, vnode_t vp, struct vnode_attr *vap)
{
kauth_action_t action;
int error;
*
* Returns: 0 Success
* namei:??? [anything namei can return]
- * chmod2:??? [anything chmod2 can return]
+ * chmod_vnode:??? [anything chmod_vnode can return]
*/
static int
-chmod1(vfs_context_t ctx, user_addr_t path, struct vnode_attr *vap)
+chmodat(vfs_context_t ctx, user_addr_t path, struct vnode_attr *vap,
+ int fd, int flag, enum uio_seg segflg)
{
struct nameidata nd;
- int error;
+ int follow, error;
- NDINIT(&nd, LOOKUP, OP_SETATTR, FOLLOW | AUDITVNPATH1,
- UIO_USERSPACE, path, ctx);
- if ((error = namei(&nd)))
+ follow = (flag & AT_SYMLINK_NOFOLLOW) ? NOFOLLOW : FOLLOW;
+ NDINIT(&nd, LOOKUP, OP_SETATTR, follow | AUDITVNPATH1,
+ segflg, path, ctx);
+ if ((error = nameiat(&nd, fd)))
return (error);
- error = chmod2(ctx, nd.ni_vp, vap);
+ error = chmod_vnode(ctx, nd.ni_vp, vap);
vnode_put(nd.ni_vp);
nameidone(&nd);
return(error);
KAUTH_DEBUG("CHMOD - setting ACL with %d entries", va.va_acl->acl_entrycount);
}
- error = chmod1(vfs_context_current(), uap->path, &va);
+ error = chmodat(vfs_context_current(), uap->path, &va, AT_FDCWD, 0,
+ UIO_USERSPACE);
if (xsecdst != NULL)
kauth_filesec_free(xsecdst);
/*
* Returns: 0 Success
- * chmod1:??? [anything chmod1 can return]
+ * chmodat:??? [anything chmodat can return]
*/
-int
-chmod(__unused proc_t p, struct chmod_args *uap, __unused int32_t *retval)
+static int
+fchmodat_internal(vfs_context_t ctx, user_addr_t path, int mode, int fd,
+ int flag, enum uio_seg segflg)
{
struct vnode_attr va;
VATTR_INIT(&va);
- VATTR_SET(&va, va_mode, uap->mode & ALLPERMS);
+ VATTR_SET(&va, va_mode, mode & ALLPERMS);
+
+ return (chmodat(ctx, path, &va, fd, flag, segflg));
+}
+
+int
+chmod(__unused proc_t p, struct chmod_args *uap, __unused int32_t *retval)
+{
+ return (fchmodat_internal(vfs_context_current(), uap->path, uap->mode,
+ AT_FDCWD, 0, UIO_USERSPACE));
+}
+
+int
+fchmodat(__unused proc_t p, struct fchmodat_args *uap, __unused int32_t *retval)
+{
+ if (uap->flag & ~AT_SYMLINK_NOFOLLOW)
+ return (EINVAL);
- return(chmod1(vfs_context_current(), uap->path, &va));
+ return (fchmodat_internal(vfs_context_current(), uap->path, uap->mode,
+ uap->fd, uap->flag, UIO_USERSPACE));
}
/*
}
AUDIT_ARG(vnpath, vp, ARG_VNODE1);
- error = chmod2(vfs_context_current(), vp, vap);
+ error = chmod_vnode(vfs_context_current(), vp, vap);
(void)vnode_put(vp);
file_drop(fd);
case USER_ADDR_NULL:
VATTR_SET(&va, va_acl, NULL);
break;
+ case CAST_USER_ADDR_T((void *)1): /* _FILESEC_REMOVE_ACL */
+ VATTR_SET(&va, va_acl, NULL);
+ break;
+ /* not being set */
case CAST_USER_ADDR_T(-1):
break;
default:
*/
/* ARGSUSED */
static int
-chown1(vfs_context_t ctx, struct chown_args *uap, __unused int32_t *retval, int follow)
+fchownat_internal(vfs_context_t ctx, int fd, user_addr_t path, uid_t uid,
+ gid_t gid, int flag, enum uio_seg segflg)
{
vnode_t vp;
struct vnode_attr va;
int error;
struct nameidata nd;
+ int follow;
kauth_action_t action;
- AUDIT_ARG(owner, uap->uid, uap->gid);
+ AUDIT_ARG(owner, uid, gid);
- NDINIT(&nd, LOOKUP, OP_SETATTR,
- (follow ? FOLLOW : 0) | NOTRIGGER | AUDITVNPATH1,
- UIO_USERSPACE, uap->path, ctx);
- error = namei(&nd);
+ follow = (flag & AT_SYMLINK_NOFOLLOW) ? NOFOLLOW : FOLLOW;
+ NDINIT(&nd, LOOKUP, OP_SETATTR, follow | AUDITVNPATH1, segflg,
+ path, ctx);
+ error = nameiat(&nd, fd);
if (error)
return (error);
vp = nd.ni_vp;
nameidone(&nd);
VATTR_INIT(&va);
- if (uap->uid != VNOVAL)
- VATTR_SET(&va, va_uid, uap->uid);
- if (uap->gid != VNOVAL)
- VATTR_SET(&va, va_gid, uap->gid);
+ if (uid != (uid_t)VNOVAL)
+ VATTR_SET(&va, va_uid, uid);
+ if (gid != (gid_t)VNOVAL)
+ VATTR_SET(&va, va_gid, gid);
#if CONFIG_MACF
- error = mac_vnode_check_setowner(ctx, vp, uap->uid, uap->gid);
+ error = mac_vnode_check_setowner(ctx, vp, uid, gid);
if (error)
goto out;
#endif
*/
if (error == EACCES)
error = EPERM;
-
+
vnode_put(vp);
return (error);
}
int
-chown(__unused proc_t p, struct chown_args *uap, int32_t *retval)
+chown(__unused proc_t p, struct chown_args *uap, __unused int32_t *retval)
+{
+ return (fchownat_internal(vfs_context_current(), AT_FDCWD, uap->path,
+ uap->uid, uap->gid, 0, UIO_USERSPACE));
+}
+
+int
+lchown(__unused proc_t p, struct lchown_args *uap, __unused int32_t *retval)
{
- return chown1(vfs_context_current(), uap, retval, 1);
+ return (fchownat_internal(vfs_context_current(), AT_FDCWD, uap->path,
+ uap->owner, uap->group, AT_SYMLINK_NOFOLLOW, UIO_USERSPACE));
}
int
-lchown(__unused proc_t p, struct lchown_args *uap, int32_t *retval)
+fchownat(__unused proc_t p, struct fchownat_args *uap, __unused int32_t *retval)
{
- /* Argument list identical, but machine generated; cast for chown1() */
- return chown1(vfs_context_current(), (struct chown_args *)uap, retval, 0);
+ if (uap->flag & ~AT_SYMLINK_NOFOLLOW)
+ return (EINVAL);
+
+ return (fchownat_internal(vfs_context_current(), uap->fd, uap->path,
+ uap->uid, uap->gid, uap->flag, UIO_USERSPACE));
}
/*
return(error);
}
- if (fp->f_fglob->fg_type == DTYPE_PSXSHM) {
+ switch (FILEGLOB_DTYPE(fp->f_fglob)) {
+ case DTYPE_PSXSHM:
error = pshm_truncate(p, fp, uap->fd, uap->length, retval);
goto out;
- }
- if (fp->f_fglob->fg_type != DTYPE_VNODE) {
+ case DTYPE_VNODE:
+ break;
+ default:
error = EINVAL;
goto out;
}
return(EINVAL);
}
- NDINIT(&fromnd, LOOKUP, OP_COPYFILE, SAVESTART | AUDITVNPATH1,
+ NDINIT(&fromnd, LOOKUP, OP_COPYFILE, AUDITVNPATH1,
UIO_USERSPACE, uap->from, ctx);
if ((error = namei(&fromnd)))
return (error);
out1:
vnode_put(fvp);
- if (fromnd.ni_startdir)
- vnode_put(fromnd.ni_startdir);
nameidone(&fromnd);
if (error == -1)
* or both not be directories. If target is a directory, it must be empty.
*/
/* ARGSUSED */
-int
-rename(__unused proc_t p, struct rename_args *uap, __unused int32_t *retval)
+static int
+renameat_internal(vfs_context_t ctx, int fromfd, user_addr_t from,
+ int tofd, user_addr_t to, int segflg, vfs_rename_flags_t flags)
{
vnode_t tvp, tdvp;
vnode_t fvp, fdvp;
struct nameidata *fromnd, *tond;
- vfs_context_t ctx = vfs_context_current();
int error;
int do_retry;
+ int retry_count;
int mntrename;
int need_event;
const char *oname = NULL;
holding_mntlock = 0;
do_retry = 0;
+ retry_count = 0;
retry:
fvp = tvp = NULL;
fdvp = tdvp = NULL;
mntrename = FALSE;
NDINIT(fromnd, DELETE, OP_UNLINK, WANTPARENT | AUDITVNPATH1,
- UIO_USERSPACE, uap->from, ctx);
+ segflg, from, ctx);
fromnd->ni_flag = NAMEI_COMPOUNDRENAME;
-
+
NDINIT(tond, RENAME, OP_RENAME, WANTPARENT | AUDITVNPATH2 | CN_NBMOUNTLOOK,
- UIO_USERSPACE, uap->to, ctx);
+ segflg, to, ctx);
tond->ni_flag = NAMEI_COMPOUNDRENAME;
-
+
continue_lookup:
if ((fromnd->ni_flag & NAMEI_CONTLOOKUP) != 0 || !continuing) {
- if ( (error = namei(fromnd)) )
+ if ( (error = nameiat(fromnd, fromfd)) )
goto out1;
fdvp = fromnd->ni_dvp;
fvp = fromnd->ni_vp;
}
if ((tond->ni_flag & NAMEI_CONTLOOKUP) != 0 || !continuing) {
- if ( (error = namei(tond)) ) {
+ if ( (error = nameiat(tond, tofd)) ) {
/*
* Translate error code for rename("dir1", "dir2/.").
*/
- if (error == EISDIR && fvp->v_type == VDIR)
+ if (error == EISDIR && fvp->v_type == VDIR)
error = EINVAL;
goto out1;
}
tdvp = tond->ni_dvp;
tvp = tond->ni_vp;
- }
+ }
batched = vnode_compound_rename_available(fdvp);
if (!fvp) {
- /*
+ /*
* Claim: this check will never reject a valid rename.
* For success, either fvp must be on the same mount as tdvp, or fvp must sit atop a vnode on the same mount as tdvp.
* Suppose fdvp and tdvp are not on the same mount.
- * If fvp is on the same mount as tdvp, then fvp is not on the same mount as fdvp, so fvp is the root of its filesystem. If fvp is the root,
+ * If fvp is on the same mount as tdvp, then fvp is not on the same mount as fdvp, so fvp is the root of its filesystem. If fvp is the root,
* then you can't move it to within another dir on the same mountpoint.
* If fvp sits atop a vnode on the same mount as fdvp, then that vnode must be part of the same mount as fdvp, which is a contradiction.
*
error = vn_authorize_rename(fdvp, fvp, &fromnd->ni_cnd, tdvp, tvp, &tond->ni_cnd, ctx, NULL);
if (error) {
if (error == ENOENT) {
- /*
- * We encountered a race where after doing the namei, tvp stops
- * being valid. If so, simply re-drive the rename call from the
- * top.
- */
- do_retry = 1;
+ assert(retry_count < MAX_AUTHORIZE_ENOENT_RETRIES);
+ if (retry_count < MAX_AUTHORIZE_ENOENT_RETRIES) {
+ /*
+ * We encountered a race where after doing the namei, tvp stops
+ * being valid. If so, simply re-drive the rename call from the
+ * top.
+ */
+ do_retry = 1;
+ retry_count += 1;
+ }
}
goto out1;
}
*/
if (fvp == tvp) {
int pathconf_val;
-
+
/*
* Note: if _PC_CASE_SENSITIVE selector isn't supported,
* then assume that this file system is case sensitive.
if (VNOP_PATHCONF(fvp, _PC_CASE_SENSITIVE, &pathconf_val, ctx) != 0 ||
pathconf_val != 0) {
goto out1;
- }
+ }
}
/*
((fvp->v_mount->mnt_flag & (MNT_UNION | MNT_ROOTFS)) == 0) &&
(fvp->v_mount->mnt_vnodecovered != NULLVP)) {
vnode_t coveredvp;
-
+
/* switch fvp to the covered vnode */
coveredvp = fvp->v_mount->mnt_vnodecovered;
if ( (vnode_getwithref(coveredvp)) ) {
* source. NOTE: Then the target is unlocked going into vnop_rename,
* so not to cause locking problems. There is a single reference on tvp.
*
- * NOTE - that fvp == tvp also occurs if they are hard linked and
+ * NOTE - that fvp == tvp also occurs if they are hard linked and
* that correct behaviour then is just to return success without doing
* anything.
*
} else {
/*
* when we dropped the iocounts to take
- * the lock, we allowed the identity of
+ * the lock, we allowed the identity of
* the various vnodes to change... if they did,
* we may no longer be dealing with a rename
* that reshapes the tree... once we're holding
skipped_lookup:
#if CONFIG_FSE
need_event = need_fsevent(FSE_RENAME, fdvp);
- if (need_event) {
+ if (need_event) {
if (fvp) {
get_fse_info(fvp, &from_finfo, ctx);
} else {
}
to_len = safe_getpath(tdvp, tond->ni_cnd.cn_nameptr, to_name, MAXPATHLEN, &to_truncated);
- }
-
+ }
+#if CONFIG_SECLUDED_RENAME
+ if (flags & VFS_SECLUDE_RENAME) {
+ fromnd->ni_cnd.cn_flags |= CN_SECLUDE_RENAME;
+ }
+#else
+ #pragma unused(flags)
+#endif
error = vn_rename(fdvp, &fvp, &fromnd->ni_cnd, fvap,
tdvp, &tvp, &tond->ni_cnd, tvap,
0, ctx);
fromnd->ni_vp = fvp;
tond->ni_vp = tvp;
-
+
goto continue_lookup;
}
/*
- * We may encounter a race in the VNOP where the destination didn't
- * exist when we did the namei, but it does by the time we go and
+ * We may encounter a race in the VNOP where the destination didn't
+ * exist when we did the namei, but it does by the time we go and
* try to create the entry. In this case, we should re-drive this rename
* call from the top again. Currently, only HFS bubbles out ERECYCLE,
- * but other filesystems susceptible to this race could return it, too.
+ * but other filesystems susceptible to this race could return it, too.
*/
if (error == ERECYCLE) {
do_retry = 1;
}
+ /*
+ * For compound VNOPs, the authorization callback may return
+ * ENOENT in case of racing hardlink lookups hitting the name
+ * cache, redrive the lookup.
+ */
+ if (batched && error == ENOENT) {
+ assert(retry_count < MAX_AUTHORIZE_ENOENT_RETRIES);
+ if (retry_count < MAX_AUTHORIZE_ENOENT_RETRIES) {
+ do_retry = 1;
+ retry_count += 1;
+ }
+ }
+
goto out1;
- }
-
- /* call out to allow 3rd party notification of rename.
+ }
+
+ /* call out to allow 3rd party notification of rename.
* Ignore result of kauth_authorize_fileop call.
*/
- kauth_authorize_fileop(vfs_context_ucred(ctx),
- KAUTH_FILEOP_RENAME,
+ kauth_authorize_fileop(vfs_context_ucred(ctx),
+ KAUTH_FILEOP_RENAME,
(uintptr_t)from_name, (uintptr_t)to_name);
#if CONFIG_FSE
}
}
#endif /* CONFIG_FSE */
-
+
/*
* update filesystem's mount point data
*/
}
MALLOC_ZONE(tobuf, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
- error = copyinstr(uap->to, tobuf, MAXPATHLEN, &len);
+ if (UIO_SEG_IS_USER_SPACE(segflg))
+ error = copyinstr(to, tobuf, MAXPATHLEN, &len);
+ else
+ error = copystr((void *)to, tobuf, MAXPATHLEN, &len);
if (!error) {
/* find current mount point prefix */
pathend = &mp->mnt_vfsstat.f_mntonname[0];
vfs_unbusy(mp);
}
/*
- * fix up name & parent pointers. note that we first
+ * fix up name & parent pointers. note that we first
* check that fvp has the same name/parent pointers it
* had before the rename call... this is a 'weak' check
* at best...
vnode_put(fvp);
vnode_put(fdvp);
}
-
-
+
/*
* If things changed after we did the namei, then we will re-drive
* this rename call from the top.
return (error);
}
+int
+rename(__unused proc_t p, struct rename_args *uap, __unused int32_t *retval)
+{
+ return (renameat_internal(vfs_context_current(), AT_FDCWD, uap->from,
+ AT_FDCWD, uap->to, UIO_USERSPACE, 0));
+}
+
+#if CONFIG_SECLUDED_RENAME
+int rename_ext(__unused proc_t p, struct rename_ext_args *uap, __unused int32_t *retval)
+{
+ return renameat_internal(
+ vfs_context_current(),
+ AT_FDCWD, uap->from,
+ AT_FDCWD, uap->to,
+ UIO_USERSPACE, uap->flags);
+}
+#endif
+
+int
+renameat(__unused proc_t p, struct renameat_args *uap, __unused int32_t *retval)
+{
+ return (renameat_internal(vfs_context_current(), uap->fromfd, uap->from,
+ uap->tofd, uap->to, UIO_USERSPACE, 0));
+}
+
/*
* Make a directory file.
*
*/
/* ARGSUSED */
static int
-mkdir1(vfs_context_t ctx, user_addr_t path, struct vnode_attr *vap)
+mkdir1at(vfs_context_t ctx, user_addr_t path, struct vnode_attr *vap, int fd,
+ enum uio_seg segflg)
{
vnode_t vp, dvp;
int error;
struct nameidata nd;
AUDIT_ARG(mode, vap->va_mode);
- NDINIT(&nd, CREATE, OP_MKDIR, LOCKPARENT | AUDITVNPATH1, UIO_USERSPACE,
+ NDINIT(&nd, CREATE, OP_MKDIR, LOCKPARENT | AUDITVNPATH1, segflg,
path, ctx);
nd.ni_cnd.cn_flags |= WILLBEDIR;
nd.ni_flag = NAMEI_COMPOUNDMKDIR;
continue_lookup:
- error = namei(&nd);
+ error = nameiat(&nd, fd);
if (error)
return (error);
dvp = nd.ni_dvp;
vp = nd.ni_vp;
- if (vp != NULL) {
- error = EEXIST;
- goto out;
- }
-
+ if (vp != NULL) {
+ error = EEXIST;
+ goto out;
+ }
+
batched = vnode_compound_mkdir_available(dvp);
VATTR_SET(vap, va_type, VDIR);
-
+
/*
* XXX
* Don't authorize in VFS for compound VNOP.... mkdir -p today assumes that it will
* EACCESS/EPERM--so if we authorize for mkdir on "/" for "mkdir -p /tmp/foo/bar/baz"
* it will fail in a spurious manner. Need to figure out if this is valid behavior.
*/
- if ((error = vn_authorize_mkdir(dvp, &nd.ni_cnd, vap, ctx, NULL)) != 0) {
+ if ((error = vn_authorize_mkdir(dvp, &nd.ni_cnd, vap, ctx, NULL)) != 0) {
if (error == EACCES || error == EPERM) {
int error2;
vnode_put(dvp);
dvp = NULLVP;
- /*
- * Try a lookup without "NAMEI_COMPOUNDVNOP" to make sure we return EEXIST
+ /*
+ * Try a lookup without "NAMEI_COMPOUNDVNOP" to make sure we return EEXIST
* rather than EACCESS if the target exists.
*/
- NDINIT(&nd, LOOKUP, OP_MKDIR, AUDITVNPATH1, UIO_USERSPACE,
- path, ctx);
- error2 = namei(&nd);
+ NDINIT(&nd, LOOKUP, OP_MKDIR, AUDITVNPATH1, segflg,
+ path, ctx);
+ error2 = nameiat(&nd, fd);
if (error2) {
goto out;
} else {
}
/*
- * make the directory
+ * make the directory
*/
- if ((error = vn_create(dvp, &vp, &nd, vap, 0, 0, NULL, ctx)) != 0) {
+ if ((error = vn_create(dvp, &vp, &nd, vap, 0, 0, NULL, ctx)) != 0) {
if (error == EKEEPLOOKING) {
nd.ni_vp = vp;
goto continue_lookup;
}
- goto out;
+ goto out;
}
-
+
// Make sure the name & parent pointers are hooked up
if (vp->v_name == NULL)
update_flags |= VNODE_UPDATE_NAME;
if (vp)
vnode_put(vp);
- if (dvp)
+ if (dvp)
vnode_put(dvp);
return (error);
*
* Parameters: p Process requesting to create the directory
* uap User argument descriptor (see below)
- * retval (ignored)
+ * retval (ignored)
*
* Indirect: uap->path Path of directory to create
* uap->mode Access permissions to set
* uap->xsecurity ACL to set
- *
+ *
* Returns: 0 Success
* !0 Not success
*
return ciferror;
VATTR_INIT(&va);
- VATTR_SET(&va, va_mode, (uap->mode & ACCESSPERMS) & ~p->p_fd->fd_cmask);
+ VATTR_SET(&va, va_mode, (uap->mode & ACCESSPERMS) & ~p->p_fd->fd_cmask);
if (xsecdst != NULL)
VATTR_SET(&va, va_acl, &xsecdst->fsec_acl);
- ciferror = mkdir1(vfs_context_current(), uap->path, &va);
+ ciferror = mkdir1at(vfs_context_current(), uap->path, &va, AT_FDCWD,
+ UIO_USERSPACE);
if (xsecdst != NULL)
kauth_filesec_free(xsecdst);
return ciferror;
struct vnode_attr va;
VATTR_INIT(&va);
- VATTR_SET(&va, va_mode, (uap->mode & ACCESSPERMS) & ~p->p_fd->fd_cmask);
+ VATTR_SET(&va, va_mode, (uap->mode & ACCESSPERMS) & ~p->p_fd->fd_cmask);
- return(mkdir1(vfs_context_current(), uap->path, &va));
+ return (mkdir1at(vfs_context_current(), uap->path, &va, AT_FDCWD,
+ UIO_USERSPACE));
}
-/*
- * Remove a directory file.
- */
-/* ARGSUSED */
int
-rmdir(__unused proc_t p, struct rmdir_args *uap, __unused int32_t *retval)
+mkdirat(proc_t p, struct mkdirat_args *uap, __unused int32_t *retval)
+{
+ struct vnode_attr va;
+
+ VATTR_INIT(&va);
+ VATTR_SET(&va, va_mode, (uap->mode & ACCESSPERMS) & ~p->p_fd->fd_cmask);
+
+ return(mkdir1at(vfs_context_current(), uap->path, &va, uap->fd,
+ UIO_USERSPACE));
+}
+
+static int
+rmdirat_internal(vfs_context_t ctx, int fd, user_addr_t dirpath,
+ enum uio_seg segflg)
{
vnode_t vp, dvp;
int error;
int has_listeners = 0;
int need_event = 0;
int truncated = 0;
- vfs_context_t ctx = vfs_context_current();
#if CONFIG_FSE
struct vnode_attr va;
#endif /* CONFIG_FSE */
struct vnode_attr *vap = NULL;
+ int restart_count = 0;
int batched;
int restart_flag;
- /*
+ /*
* This loop exists to restart rmdir in the unlikely case that two
* processes are simultaneously trying to remove the same directory
* containing orphaned appleDouble files.
*/
do {
NDINIT(&nd, DELETE, OP_RMDIR, LOCKPARENT | AUDITVNPATH1,
- UIO_USERSPACE, uap->path, ctx);
+ segflg, dirpath, ctx);
nd.ni_flag = NAMEI_COMPOUNDRMDIR;
continue_lookup:
restart_flag = 0;
vap = NULL;
- error = namei(&nd);
+ error = nameiat(&nd, fd);
if (error)
return (error);
* I do not think that check was valid, because if we retry
* and all dirents are gone, the directory could legitimately
* be recycled but still be present in a situation where we would
- * have had permission to delete. Therefore, we won't make
+ * have had permission to delete. Therefore, we won't make
* an effort to preserve that check now that we may not have a
* vp here.
*/
if (!batched) {
error = vn_authorize_rmdir(dvp, vp, &nd.ni_cnd, ctx, NULL);
if (error) {
+ if (error == ENOENT) {
+ assert(restart_count < MAX_AUTHORIZE_ENOENT_RETRIES);
+ if (restart_count < MAX_AUTHORIZE_ENOENT_RETRIES) {
+ restart_flag = 1;
+ restart_count += 1;
+ }
+ }
goto out;
}
}
if (error == EKEEPLOOKING) {
goto continue_lookup;
+ } else if (batched && error == ENOENT) {
+ assert(restart_count < MAX_AUTHORIZE_ENOENT_RETRIES);
+ if (restart_count < MAX_AUTHORIZE_ENOENT_RETRIES) {
+ /*
+ * For compound VNOPs, the authorization callback
+ * may return ENOENT in case of racing hard link lookups
+ * redrive the lookup.
+ */
+ restart_flag = 1;
+ restart_count += 1;
+ goto out;
+ }
}
-
+#if CONFIG_APPLEDOUBLE
/*
* Special case to remove orphaned AppleDouble
* files. I don't like putting this in the kernel,
/*
- * Assuming everything went well, we will try the RMDIR again
+ * Assuming everything went well, we will try the RMDIR again
*/
if (!error)
error = vn_rmdir(dvp, &vp, &nd, vap, ctx);
}
-
+#endif /* CONFIG_APPLEDOUBLE */
/*
- * Call out to allow 3rd party notification of delete.
+ * Call out to allow 3rd party notification of delete.
* Ignore result of kauth_authorize_fileop call.
*/
if (!error) {
if (has_listeners) {
- kauth_authorize_fileop(vfs_context_ucred(ctx),
- KAUTH_FILEOP_DELETE,
+ kauth_authorize_fileop(vfs_context_ucred(ctx),
+ KAUTH_FILEOP_DELETE,
(uintptr_t)vp,
(uintptr_t)path);
}
nameidone(&nd);
vnode_put(dvp);
- if (vp)
+ if (vp)
vnode_put(vp);
if (restart_flag == 0) {
}
+/*
+ * Remove a directory file.
+ */
+/* ARGSUSED */
+int
+rmdir(__unused proc_t p, struct rmdir_args *uap, __unused int32_t *retval)
+{
+ return (rmdirat_internal(vfs_context_current(), AT_FDCWD,
+ CAST_USER_ADDR_T(uap->path), UIO_USERSPACE));
+}
+
/* Get direntry length padded to 8 byte alignment */
#define DIRENT64_LEN(namlen) \
((sizeof(struct direntry) + (namlen) - (MAXPATHLEN-1) + 7) & ~7)
-static errno_t
+errno_t
vnode_readdir64(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
int *numdirent, vfs_context_t ctxp)
{
size_t bufsize;
void * bufptr;
uio_t auio;
- struct direntry entry64;
+ struct direntry *entry64;
struct dirent *dep;
int bytesread;
int error;
dep = (struct dirent *)bufptr;
bytesread = bufsize - uio_resid(auio);
+ MALLOC(entry64, struct direntry *, sizeof(struct direntry),
+ M_TEMP, M_WAITOK);
/*
* Convert all the entries and copy them out to user's buffer.
*/
while (error == 0 && (char *)dep < ((char *)bufptr + bytesread)) {
+ size_t enbufsize = DIRENT64_LEN(dep->d_namlen);
+
+ bzero(entry64, enbufsize);
/* Convert a dirent to a dirent64. */
- entry64.d_ino = dep->d_ino;
- entry64.d_seekoff = 0;
- entry64.d_reclen = DIRENT64_LEN(dep->d_namlen);
- entry64.d_namlen = dep->d_namlen;
- entry64.d_type = dep->d_type;
- bcopy(dep->d_name, entry64.d_name, dep->d_namlen + 1);
+ entry64->d_ino = dep->d_ino;
+ entry64->d_seekoff = 0;
+ entry64->d_reclen = enbufsize;
+ entry64->d_namlen = dep->d_namlen;
+ entry64->d_type = dep->d_type;
+ bcopy(dep->d_name, entry64->d_name, dep->d_namlen + 1);
/* Move to next entry. */
dep = (struct dirent *)((char *)dep + dep->d_reclen);
/* Copy entry64 to user's buffer. */
- error = uiomove((caddr_t)&entry64, entry64.d_reclen, uio);
+ error = uiomove((caddr_t)entry64, entry64->d_reclen, uio);
}
/* Update the real offset using the offset we got from VNOP_READDIR. */
}
uio_free(auio);
FREE(bufptr, M_TEMP);
+ FREE(entry64, M_TEMP);
return (error);
}
}
+#define GETDIRENTRIES_MAXBUFSIZE (128 * 1024 * 1024U)
+
/*
* Read a block of directory entries in a file system independent format.
*/
goto out;
}
+ if (bufsize > GETDIRENTRIES_MAXBUFSIZE)
+ bufsize = GETDIRENTRIES_MAXBUFSIZE;
+
#if CONFIG_MACF
error = mac_file_check_change_offset(vfs_context_ucred(&context), fp->f_fglob);
if (error)
goto out;
}
- if ((vp->v_flag & VROOT) && (vp->v_mount->mnt_flag & MNT_UNION)) {
+ if ((vp->v_mount->mnt_flag & MNT_UNION)) {
struct vnode *tvp = vp;
- vp = vp->v_mount->mnt_vnodecovered;
- vnode_getwithref(vp);
- vnode_ref(vp);
- fp->f_fglob->fg_data = (caddr_t) vp;
- fp->f_fglob->fg_offset = 0;
- vnode_rele(tvp);
- vnode_put(tvp);
- goto unionread;
- }
- }
+ if (lookup_traverse_union(tvp, &vp, &context) == 0) {
+ vnode_ref(vp);
+ fp->f_fglob->fg_data = (caddr_t) vp;
+ fp->f_fglob->fg_offset = 0;
+ vnode_rele(tvp);
+ vnode_put(tvp);
+ goto unionread;
+ }
+ vp = tvp;
+ }
+ }
vnode_put(vp);
if (offset) {
/*
-* Obtain attribute information on objects in a directory while enumerating
-* the directory. This call does not yet support union mounted directories.
-* TO DO
-* 1.union mounted directories.
-*/
-
+ * Obtain attribute information on objects in a directory while enumerating
+ * the directory.
+ */
/* ARGSUSED */
int
getdirentriesattr (proc_t p, struct getdirentriesattr_args *uap, int32_t *retval)
struct fileproc *fp;
uio_t auio = NULL;
int spacetype = proc_is64bit(p) ? UIO_USERSPACE64 : UIO_USERSPACE32;
- uint32_t count;
+ uint32_t count, savecount;
uint32_t newstate;
int error, eofflag;
uint32_t loff;
if ((error = copyin(uap->count, (caddr_t)&count, sizeof(count)))) {
return(error);
}
+ savecount = count;
if ( (error = fp_getfvp(p, fd, &fp, &vp)) ) {
return (error);
}
AUDIT_ARG(vnpath, vp, ARG_VNODE1);
+unionread:
if (vp->v_type != VDIR) {
(void)vnode_put(vp);
error = EINVAL;
/* set up the uio structure which will contain the users return buffer */
loff = fp->f_fglob->fg_offset;
- auio = uio_createwithbuffer(1, loff, spacetype, UIO_READ,
- &uio_buf[0], sizeof(uio_buf));
+ auio = uio_createwithbuffer(1, loff, spacetype, UIO_READ, &uio_buf[0], sizeof(uio_buf));
uio_addiov(auio, uap->buffer, uap->buffersize);
/*
/* Believe it or not, uap->options only has 32-bits of valid
* info, so truncate before extending again */
- error = VNOP_READDIRATTR(vp, &attributelist, auio,
- count,
- (u_long)(uint32_t)uap->options, &newstate, &eofflag,
- &count, ctx);
+
+ error = VNOP_READDIRATTR(vp, &attributelist, auio, count,
+ (u_long)(uint32_t)uap->options, &newstate, &eofflag, &count, ctx);
+ }
+
+ if (error) {
+ (void) vnode_put(vp);
+ goto out;
+ }
+
+ /*
+ * If we've got the last entry of a directory in a union mount
+ * then reset the eofflag and pretend there's still more to come.
+ * The next call will again set eofflag and the buffer will be empty,
+ * so traverse to the underlying directory and do the directory
+ * read there.
+ */
+ if (eofflag && vp->v_mount->mnt_flag & MNT_UNION) {
+ if (uio_resid(auio) < (user_ssize_t) uap->buffersize) { // Got some entries
+ eofflag = 0;
+ } else { // Empty buffer
+ struct vnode *tvp = vp;
+ if (lookup_traverse_union(tvp, &vp, ctx) == 0) {
+ vnode_ref_ext(vp, fp->f_fglob->fg_flag & O_EVTONLY, 0);
+ fp->f_fglob->fg_data = (caddr_t) vp;
+ fp->f_fglob->fg_offset = 0; // reset index for new dir
+ count = savecount;
+ vnode_rele_internal(tvp, fp->f_fglob->fg_flag & O_EVTONLY, 0, 0);
+ vnode_put(tvp);
+ goto unionread;
+ }
+ vp = tvp;
+ }
}
+
(void)vnode_put(vp);
if (error)
file_drop(fd);
return (error); /* return error earlier, an retval of 0 or 1 now */
-} /* end of getdirentryattr system call */
+} /* end of getdirentriesattr system call */
/*
* Exchange data between two files
goto out;
}
- /*
- * if the two vnodes are not files, return an error.
- */
- if ( (vnode_isreg(svp) == 0) || (vnode_isreg(fvp) == 0) ) {
+ /* If they're not files, return an error */
+ if ( (vnode_isreg(fvp) == 0) || (vnode_isreg(svp) == 0)) {
error = EINVAL;
goto out;
}
-
#if CONFIG_MACF
error = mac_vnode_check_exchangedata(ctx,
fvp, svp);
return (error);
}
+/*
+ * Return (in MB) the amount of freespace on the given vnode's volume.
+ */
+uint32_t freespace_mb(vnode_t vp);
+
+uint32_t
+freespace_mb(vnode_t vp)
+{
+ vfs_update_vfsstat(vp->v_mount, vfs_context_current(), VFS_USER_EVENT);
+ return (((uint64_t)vp->v_mount->mnt_vfsstat.f_bavail *
+ vp->v_mount->mnt_vfsstat.f_bsize) >> 20);
+}
+
#if CONFIG_SEARCHFS
/* ARGSUSED */
int
searchfs(proc_t p, struct searchfs_args *uap, __unused int32_t *retval)
{
- vnode_t vp;
- int error=0;
+ vnode_t vp, tvp;
+ int i, error=0;
int fserror = 0;
struct nameidata nd;
struct user64_fssearchblock searchblock;
vfs_context_t ctx = vfs_context_current();
char uio_buf[ UIO_SIZEOF(1) ];
- /* Start by copying in fsearchblock paramater list */
+ /* Start by copying in fsearchblock parameter list */
if (IS_64BIT_PROCESS(p)) {
error = copyin(uap->searchblock, (caddr_t) &searchblock, sizeof(searchblock));
timelimit.tv_sec = searchblock.timelimit.tv_sec;
/* It all has to do into local memory and it's not that big so we might as well put it all together. */
/* Searchparams1 shall be first so we might as well use that to hold the base address of the allocated*/
/* block. */
-
+ /* */
+ /* NOTE: we allocate an extra 8 bytes to account for the difference in size of the searchstate */
+ /* due to the changes in rdar://problem/12438273. That way if a 3rd party file system */
+ /* assumes the size is still 556 bytes it will continue to work */
+
mallocsize = searchblock.sizeofsearchparams1 + searchblock.sizeofsearchparams2 +
- sizeof(struct attrlist) + sizeof(struct searchstate);
+ sizeof(struct attrlist) + sizeof(struct searchstate) + (2*sizeof(uint32_t));
MALLOC(searchparams1, void *, mallocsize, M_TEMP, M_WAITOK);
if ((error = copyin(uap->state, (caddr_t) state, sizeof(struct searchstate))))
goto freeandexit;
+ /*
+ * When searching a union mount, need to set the
+ * start flag at the first call on each layer to
+ * reset state for the new volume.
+ */
+ if (uap->options & SRCHFS_START)
+ state->ss_union_layer = 0;
+ else
+ uap->options |= state->ss_union_flags;
+ state->ss_union_flags = 0;
/*
* Because searchparams1 and searchparams2 may contain an ATTR_CMN_NAME search parameter,
}
/* set up the uio structure which will contain the users return buffer */
- auio = uio_createwithbuffer(1, 0, spacetype, UIO_READ,
- &uio_buf[0], sizeof(uio_buf));
- uio_addiov(auio, searchblock.returnbuffer, searchblock.returnbuffersize);
+ auio = uio_createwithbuffer(1, 0, spacetype, UIO_READ, &uio_buf[0], sizeof(uio_buf));
+ uio_addiov(auio, searchblock.returnbuffer, searchblock.returnbuffersize);
nameiflags = 0;
if ((uap->options & FSOPT_NOFOLLOW) == 0) nameiflags |= FOLLOW;
error = namei(&nd);
if (error)
goto freeandexit;
-
+ vp = nd.ni_vp;
nameidone(&nd);
- vp = nd.ni_vp;
+
+ /*
+ * Switch to the root vnode for the volume
+ */
+ error = VFS_ROOT(vnode_mount(vp), &tvp, ctx);
+ vnode_put(vp);
+ if (error)
+ goto freeandexit;
+ vp = tvp;
+
+ /*
+ * If it's a union mount, the path lookup takes
+ * us to the top layer. But we may need to descend
+ * to a lower layer. For non-union mounts the layer
+ * is always zero.
+ */
+ for (i = 0; i < (int) state->ss_union_layer; i++) {
+ if ((vp->v_mount->mnt_flag & MNT_UNION) == 0)
+ break;
+ tvp = vp;
+ vp = vp->v_mount->mnt_vnodecovered;
+ if (vp == NULL) {
+ vnode_put(tvp);
+ error = ENOENT;
+ goto freeandexit;
+ }
+ vnode_getwithref(vp);
+ vnode_put(tvp);
+ }
#if CONFIG_MACF
error = mac_vnode_check_searchfs(ctx, vp, &searchblock.searchattrs);
/*
* If searchblock.maxmatches == 0, then skip the search. This has happened
- * before and sometimes the underlyning code doesnt deal with it well.
+ * before and sometimes the underlying code doesnt deal with it well.
*/
if (searchblock.maxmatches == 0) {
nummatches = 0;
}
/*
- Allright, we have everything we need, so lets make that call.
-
- We keep special track of the return value from the file system:
- EAGAIN is an acceptable error condition that shouldn't keep us
- from copying out any results...
+ * Allright, we have everything we need, so lets make that call.
+ *
+ * We keep special track of the return value from the file system:
+ * EAGAIN is an acceptable error condition that shouldn't keep us
+ * from copying out any results...
*/
fserror = VNOP_SEARCHFS(vp,
- searchparams1,
- searchparams2,
- &searchblock.searchattrs,
- (u_long)searchblock.maxmatches,
- &timelimit,
- returnattrs,
- &nummatches,
- (u_long)uap->scriptcode,
- (u_long)uap->options,
- auio,
- state,
- ctx);
+ searchparams1,
+ searchparams2,
+ &searchblock.searchattrs,
+ (u_long)searchblock.maxmatches,
+ &timelimit,
+ returnattrs,
+ &nummatches,
+ (u_long)uap->scriptcode,
+ (u_long)uap->options,
+ auio,
+ (struct searchstate *) &state->ss_fsstate,
+ ctx);
+ /*
+ * If it's a union mount we need to be called again
+ * to search the mounted-on filesystem.
+ */
+ if ((vp->v_mount->mnt_flag & MNT_UNION) && fserror == 0) {
+ state->ss_union_flags = SRCHFS_START;
+ state->ss_union_layer++; // search next layer down
+ fserror = EAGAIN;
+ }
+
saveandexit:
vnode_put(vp);
if ((error = copyout((caddr_t) state, uap->state, sizeof(struct searchstate))) != 0)
goto freeandexit;
- if ((error = suulong(uap->nummatches, (uint64_t)nummatches)) != 0)
+ if ((error = suulong(uap->nummatches, (uint64_t)nummatches)) != 0)
goto freeandexit;
error = fserror;
#define NSPACE_ITEM_NSPACE_EVENT 0x0040
#define NSPACE_ITEM_SNAPSHOT_EVENT 0x0080
-#define NSPACE_ITEM_TRACK_EVENT 0x0100
-#define NSPACE_ITEM_ALL_EVENT_TYPES (NSPACE_ITEM_NSPACE_EVENT | NSPACE_ITEM_SNAPSHOT_EVENT | NSPACE_ITEM_TRACK_EVENT)
+#define NSPACE_ITEM_ALL_EVENT_TYPES (NSPACE_ITEM_NSPACE_EVENT | NSPACE_ITEM_SNAPSHOT_EVENT)
//#pragma optimization_level 0
typedef enum {
NSPACE_HANDLER_NSPACE = 0,
NSPACE_HANDLER_SNAPSHOT = 1,
- NSPACE_HANDLER_TRACK = 2,
NSPACE_HANDLER_COUNT,
} nspace_type_t;
nspace_handler_t nspace_handlers[NSPACE_HANDLER_COUNT];
+/* namespace fsctl functions */
+static int nspace_flags_matches_handler(uint32_t event_flags, nspace_type_t nspace_type);
+static int nspace_item_flags_for_type(nspace_type_t nspace_type);
+static int nspace_open_flags_for_type(nspace_type_t nspace_type);
+static nspace_type_t nspace_type_for_op(uint64_t op);
+static int nspace_is_special_process(struct proc *proc);
+static int vn_open_with_vp(vnode_t vp, int fmode, vfs_context_t ctx);
+static int wait_for_namespace_event(namespace_handler_data *nhd, nspace_type_t nspace_type);
+static int validate_namespace_args (int is64bit, int size);
+static int process_namespace_fsctl(nspace_type_t nspace_type, int is64bit, u_int size, caddr_t data);
+
+
static inline int nspace_flags_matches_handler(uint32_t event_flags, nspace_type_t nspace_type)
{
switch(nspace_type) {
return (event_flags & NSPACE_ITEM_ALL_EVENT_TYPES) == NSPACE_ITEM_NSPACE_EVENT;
case NSPACE_HANDLER_SNAPSHOT:
return (event_flags & NSPACE_ITEM_ALL_EVENT_TYPES) == NSPACE_ITEM_SNAPSHOT_EVENT;
- case NSPACE_HANDLER_TRACK:
- return (event_flags & NSPACE_ITEM_ALL_EVENT_TYPES) == NSPACE_ITEM_TRACK_EVENT;
default:
printf("nspace_flags_matches_handler: invalid type %u\n", (int)nspace_type);
return 0;
return NSPACE_ITEM_NSPACE_EVENT;
case NSPACE_HANDLER_SNAPSHOT:
return NSPACE_ITEM_SNAPSHOT_EVENT;
- case NSPACE_HANDLER_TRACK:
- return NSPACE_ITEM_TRACK_EVENT;
default:
printf("nspace_item_flags_for_type: invalid type %u\n", (int)nspace_type);
return 0;
case NSPACE_HANDLER_NSPACE:
return FREAD | FWRITE | O_EVTONLY;
case NSPACE_HANDLER_SNAPSHOT:
- case NSPACE_HANDLER_TRACK:
return FREAD | O_EVTONLY;
default:
printf("nspace_open_flags_for_type: invalid type %u\n", (int)nspace_type);
return NSPACE_HANDLER_NSPACE;
case NAMESPACE_HANDLER_SNAPSHOT_EVENT:
return NSPACE_HANDLER_SNAPSHOT;
- case NAMESPACE_HANDLER_TRACK_EVENT:
- return NSPACE_HANDLER_TRACK;
default:
printf("nspace_type_for_op: invalid op mask %llx\n", op & NAMESPACE_HANDLER_EVENT_TYPE_MASK);
return NSPACE_HANDLER_NSPACE;
//
if (vp != nspace_items[i].vp) {
nspace_items[i].vp = vp;
- nspace_items[i].arg = arg;
+ nspace_items[i].arg = (arg == NSPACE_REARM_NO_ARG) ? NULL : arg; // arg is {NULL, true, uio *} - only pass uio thru to the user
nspace_items[i].op = op;
nspace_items[i].vid = vnode_vid(vp);
nspace_items[i].flags = NSPACE_ITEM_NEW;
}
static int
-wait_for_namespace_event(namespace_handler_info_ext *nhi, nspace_type_t nspace_type)
+wait_for_namespace_event(namespace_handler_data *nhd, nspace_type_t nspace_type)
{
int i, error=0, unblock=0;
task_t curtask;
int32_t indx, fmode;
struct proc *p = current_proc();
vfs_context_t ctx = vfs_context_current();
-
+ struct vnode_attr va;
+
+
+ /*
+ * Use vnode pointer to acquire a file descriptor for
+ * hand-off to userland
+ */
fmode = nspace_open_flags_for_type(nspace_type);
-
error = vnode_getwithvid(nspace_items[i].vp, nspace_items[i].vid);
if (error) {
unblock = 1;
}
fp->f_fglob->fg_flag = fmode;
- fp->f_fglob->fg_type = DTYPE_VNODE;
fp->f_fglob->fg_ops = &vnops;
fp->f_fglob->fg_data = (caddr_t)nspace_items[i].vp;
proc_fdlock(p);
procfdtbl_releasefd(p, indx, NULL);
fp_drop(p, indx, fp, 1);
- proc_fdunlock(p);
-
- error = copyout(&nspace_items[i].token, nhi->token, sizeof(uint32_t));
- error = copyout(&nspace_items[i].op, nhi->flags, sizeof(uint64_t));
- error = copyout(&indx, nhi->fdptr, sizeof(uint32_t));
- if (nhi->infoptr) {
+ proc_fdunlock(p);
+
+ /*
+ * All variants of the namespace handler struct support these three fields:
+ * token, flags, and the FD pointer
+ */
+ error = copyout(&nspace_items[i].token, nhd->token, sizeof(uint32_t));
+ error = copyout(&nspace_items[i].op, nhd->flags, sizeof(uint64_t));
+ error = copyout(&indx, nhd->fdptr, sizeof(uint32_t));
+
+ /*
+ * Handle optional fields:
+ * extended version support an info ptr (offset, length), and the
+ *
+ * namedata version supports a unique per-link object ID
+ *
+ */
+ if (nhd->infoptr) {
uio_t uio = (uio_t)nspace_items[i].arg;
uint64_t u_offset, u_length;
u_offset = 0;
u_length = 0;
}
- error = copyout(&u_offset, nhi->infoptr, sizeof(uint64_t));
- error = copyout(&u_length, nhi->infoptr+sizeof(uint64_t), sizeof(uint64_t));
+ error = copyout(&u_offset, nhd->infoptr, sizeof(uint64_t));
+ error = copyout(&u_length, nhd->infoptr+sizeof(uint64_t), sizeof(uint64_t));
}
+
+ if (nhd->objid) {
+ VATTR_INIT(&va);
+ VATTR_WANTED(&va, va_linkid);
+ error = vnode_getattr(nspace_items[i].vp, &va, ctx);
+ if (error == 0 ) {
+ uint64_t linkid = 0;
+ if (VATTR_IS_SUPPORTED (&va, va_linkid)) {
+ linkid = (uint64_t)va.va_linkid;
+ }
+ error = copyout (&linkid, nhd->objid, sizeof(uint64_t));
+ }
+ }
+
if (error) {
vn_close(nspace_items[i].vp, fmode, ctx);
fp_free(p, indx, fp);
return error;
}
+static inline int validate_namespace_args (int is64bit, int size) {
+
+ if (is64bit) {
+ /* Must be one of these */
+ if (size == sizeof(user64_namespace_handler_info)) {
+ goto sizeok;
+ }
+ if (size == sizeof(user64_namespace_handler_info_ext)) {
+ goto sizeok;
+ }
+ if (size == sizeof(user64_namespace_handler_data)) {
+ goto sizeok;
+ }
+ return EINVAL;
+ }
+ else {
+ /* 32 bit -- must be one of these */
+ if (size == sizeof(user32_namespace_handler_info)) {
+ goto sizeok;
+ }
+ if (size == sizeof(user32_namespace_handler_info_ext)) {
+ goto sizeok;
+ }
+ if (size == sizeof(user32_namespace_handler_data)) {
+ goto sizeok;
+ }
+ return EINVAL;
+ }
+
+sizeok:
+
+ return 0;
+
+}
static int process_namespace_fsctl(nspace_type_t nspace_type, int is64bit, u_int size, caddr_t data)
{
int error = 0;
- namespace_handler_info_ext nhi;
+ namespace_handler_data nhd;
- if (nspace_type == NSPACE_HANDLER_SNAPSHOT && (snapshot_timestamp == 0 || snapshot_timestamp == ~0)) {
+ bzero (&nhd, sizeof(namespace_handler_data));
+
+ if (nspace_type == NSPACE_HANDLER_SNAPSHOT &&
+ (snapshot_timestamp == 0 || snapshot_timestamp == ~0)) {
return EINVAL;
}
return error;
}
- if ( (is64bit && size != sizeof(user64_namespace_handler_info) && size != sizeof(user64_namespace_handler_info_ext))
- || (is64bit == 0 && size != sizeof(user32_namespace_handler_info) && size != sizeof(user32_namespace_handler_info_ext))) {
-
- // either you're 64-bit and passed a 64-bit struct or
- // you're 32-bit and passed a 32-bit struct. otherwise
- // it's not ok.
- return EINVAL;
+ error = validate_namespace_args (is64bit, size);
+ if (error) {
+ return error;
}
+ /* Copy in the userland pointers into our kernel-only struct */
+
if (is64bit) {
- nhi.token = (user_addr_t)((user64_namespace_handler_info *)data)->token;
- nhi.flags = (user_addr_t)((user64_namespace_handler_info *)data)->flags;
- nhi.fdptr = (user_addr_t)((user64_namespace_handler_info *)data)->fdptr;
- if (size == sizeof(user64_namespace_handler_info_ext)) {
- nhi.infoptr = (user_addr_t)((user64_namespace_handler_info_ext *)data)->infoptr;
- } else {
- nhi.infoptr = 0;
+ /* 64 bit userland structures */
+ nhd.token = (user_addr_t)((user64_namespace_handler_info *)data)->token;
+ nhd.flags = (user_addr_t)((user64_namespace_handler_info *)data)->flags;
+ nhd.fdptr = (user_addr_t)((user64_namespace_handler_info *)data)->fdptr;
+
+ /* If the size is greater than the standard info struct, add in extra fields */
+ if (size > (sizeof(user64_namespace_handler_info))) {
+ if (size >= (sizeof(user64_namespace_handler_info_ext))) {
+ nhd.infoptr = (user_addr_t)((user64_namespace_handler_info_ext *)data)->infoptr;
+ }
+ if (size == (sizeof(user64_namespace_handler_data))) {
+ nhd.objid = (user_addr_t)((user64_namespace_handler_data*)data)->objid;
+ }
+ /* Otherwise the fields were pre-zeroed when we did the bzero above. */
}
- } else {
- nhi.token = CAST_USER_ADDR_T(((user32_namespace_handler_info *)data)->token);
- nhi.flags = CAST_USER_ADDR_T(((user32_namespace_handler_info *)data)->flags);
- nhi.fdptr = CAST_USER_ADDR_T(((user32_namespace_handler_info *)data)->fdptr);
- if (size == sizeof(user32_namespace_handler_info_ext)) {
- nhi.infoptr = CAST_USER_ADDR_T(((user32_namespace_handler_info_ext *)data)->infoptr);
- } else {
- nhi.infoptr = 0;
+ }
+ else {
+ /* 32 bit userland structures */
+ nhd.token = CAST_USER_ADDR_T(((user32_namespace_handler_info *)data)->token);
+ nhd.flags = CAST_USER_ADDR_T(((user32_namespace_handler_info *)data)->flags);
+ nhd.fdptr = CAST_USER_ADDR_T(((user32_namespace_handler_info *)data)->fdptr);
+
+ if (size > (sizeof(user32_namespace_handler_info))) {
+ if (size >= (sizeof(user32_namespace_handler_info_ext))) {
+ nhd.infoptr = CAST_USER_ADDR_T(((user32_namespace_handler_info_ext *)data)->infoptr);
+ }
+ if (size == (sizeof(user32_namespace_handler_data))) {
+ nhd.objid = (user_addr_t)((user32_namespace_handler_data*)data)->objid;
+ }
+ /* Otherwise the fields were pre-zeroed when we did the bzero above. */
}
}
- return wait_for_namespace_event(&nhi, nspace_type);
+ return wait_for_namespace_event(&nhd, nspace_type);
}
/*
is64bit = proc_is64bit(p);
memp = NULL;
+
+
+ /*
+ * ensure the buffer is large enough for underlying calls
+ */
+#ifndef HFSIOC_GETPATH
+ typedef char pn_t[MAXPATHLEN];
+#define HFSIOC_GETPATH _IOWR('h', 13, pn_t)
+#endif
+
+#ifndef HFS_GETPATH
+#define HFS_GETPATH IOCBASECMD(HFSIOC_GETPATH)
+#endif
+ if (IOCBASECMD(cmd) == HFS_GETPATH) {
+ /* Round up to MAXPATHLEN regardless of user input */
+ size = MAXPATHLEN;
+ }
+
if (size > sizeof (stkbuf)) {
if ((memp = (caddr_t)kalloc(size)) == 0) return ENOMEM;
data = memp;
if (cmd & IOC_IN) {
if (size) {
error = copyin(udata, data, size);
- if (error) goto FSCtl_Exit;
+ if (error) {
+ if (memp) {
+ kfree (memp, size);
+ }
+ return error;
+ }
} else {
if (is64bit) {
*(user_addr_t *)data = udata;
}
/* Check to see if it's a generic command */
- if (IOCBASECMD(cmd) == FSCTL_SYNC_VOLUME) {
- mount_t mp = vp->v_mount;
- int arg = *(uint32_t*)data;
-
- /* record vid of vp so we can drop it below. */
- uint32_t vvid = vp->v_id;
+ switch (IOCBASECMD(cmd)) {
- /*
- * Then grab mount_iterref so that we can release the vnode.
- * Without this, a thread may call vnode_iterate_prepare then
- * get into a deadlock because we've never released the root vp
- */
- error = mount_iterref (mp, 0);
- if (error) {
- goto FSCtl_Exit;
- }
- vnode_put(vp);
+ case FSCTL_SYNC_VOLUME: {
+ mount_t mp = vp->v_mount;
+ int arg = *(uint32_t*)data;
- /* issue the sync for this volume */
- (void)sync_callback(mp, (arg & FSCTL_SYNC_WAIT) ? &arg : NULL);
-
- /*
- * Then release the mount_iterref once we're done syncing; it's not
- * needed for the VNOP_IOCTL below
- */
- mount_iterdrop(mp);
+ /* record vid of vp so we can drop it below. */
+ uint32_t vvid = vp->v_id;
- if (arg & FSCTL_SYNC_FULLSYNC) {
- /* re-obtain vnode iocount on the root vp, if possible */
- error = vnode_getwithvid (vp, vvid);
- if (error == 0) {
- error = VNOP_IOCTL(vp, F_FULLFSYNC, (caddr_t)NULL, 0, ctx);
- vnode_put (vp);
+ /*
+ * Then grab mount_iterref so that we can release the vnode.
+ * Without this, a thread may call vnode_iterate_prepare then
+ * get into a deadlock because we've never released the root vp
+ */
+ error = mount_iterref (mp, 0);
+ if (error) {
+ break;
}
- }
- /* mark the argument VP as having been released */
- *arg_vp = NULL;
-
- } else if (IOCBASECMD(cmd) == FSCTL_SET_PACKAGE_EXTS) {
- user_addr_t ext_strings;
- uint32_t num_entries;
- uint32_t max_width;
-
- if ( (is64bit && size != sizeof(user64_package_ext_info))
- || (is64bit == 0 && size != sizeof(user32_package_ext_info))) {
-
- // either you're 64-bit and passed a 64-bit struct or
- // you're 32-bit and passed a 32-bit struct. otherwise
- // it's not ok.
- error = EINVAL;
- goto FSCtl_Exit;
- }
+ vnode_put(vp);
- if (is64bit) {
- ext_strings = ((user64_package_ext_info *)data)->strings;
- num_entries = ((user64_package_ext_info *)data)->num_entries;
- max_width = ((user64_package_ext_info *)data)->max_width;
- } else {
- ext_strings = CAST_USER_ADDR_T(((user32_package_ext_info *)data)->strings);
- num_entries = ((user32_package_ext_info *)data)->num_entries;
- max_width = ((user32_package_ext_info *)data)->max_width;
- }
-
- error = set_package_extensions_table(ext_strings, num_entries, max_width);
+ /* issue the sync for this volume */
+ (void)sync_callback(mp, (arg & FSCTL_SYNC_WAIT) ? &arg : NULL);
- } else if (IOCBASECMD(cmd) == FSCTL_WAIT_FOR_SYNC) {
- error = tsleep((caddr_t)&sync_wait_time, PVFS|PCATCH, "sync-wait", 0);
- if (error == 0) {
- *(uint32_t *)data = (uint32_t)sync_wait_time;
- error = 0;
- } else {
- error *= -1;
- }
-
- } else if (IOCBASECMD(cmd) == FSCTL_NAMESPACE_HANDLER_GET) {
- error = process_namespace_fsctl(NSPACE_HANDLER_NSPACE, is64bit, size, data);
- } else if (IOCBASECMD(cmd) == FSCTL_OLD_SNAPSHOT_HANDLER_GET) {
- error = process_namespace_fsctl(NSPACE_HANDLER_SNAPSHOT, is64bit, size, data);
- } else if (IOCBASECMD(cmd) == FSCTL_SNAPSHOT_HANDLER_GET_EXT) {
- error = process_namespace_fsctl(NSPACE_HANDLER_SNAPSHOT, is64bit, size, data);
- } else if (IOCBASECMD(cmd) == FSCTL_TRACKED_HANDLER_GET) {
- error = process_namespace_fsctl(NSPACE_HANDLER_TRACK, is64bit, size, data);
- } else if (IOCBASECMD(cmd) == FSCTL_NAMESPACE_HANDLER_UPDATE) {
- uint32_t token, val;
- int i;
-
- if ((error = suser(kauth_cred_get(), &(p->p_acflag)))) {
- goto FSCtl_Exit;
- }
-
- if (!nspace_is_special_process(p)) {
- error = EINVAL;
- goto FSCtl_Exit;
+ /*
+ * Then release the mount_iterref once we're done syncing; it's not
+ * needed for the VNOP_IOCTL below
+ */
+ mount_iterdrop(mp);
+
+ if (arg & FSCTL_SYNC_FULLSYNC) {
+ /* re-obtain vnode iocount on the root vp, if possible */
+ error = vnode_getwithvid (vp, vvid);
+ if (error == 0) {
+ error = VNOP_IOCTL(vp, F_FULLFSYNC, (caddr_t)NULL, 0, ctx);
+ vnode_put (vp);
+ }
+ }
+ /* mark the argument VP as having been released */
+ *arg_vp = NULL;
}
+ break;
- token = ((uint32_t *)data)[0];
- val = ((uint32_t *)data)[1];
+ case FSCTL_SET_PACKAGE_EXTS: {
+ user_addr_t ext_strings;
+ uint32_t num_entries;
+ uint32_t max_width;
- lck_mtx_lock(&nspace_handler_lock);
+ if ( (is64bit && size != sizeof(user64_package_ext_info))
+ || (is64bit == 0 && size != sizeof(user32_package_ext_info))) {
- for(i=0; i < MAX_NSPACE_ITEMS; i++) {
- if (nspace_items[i].token == token) {
+ // either you're 64-bit and passed a 64-bit struct or
+ // you're 32-bit and passed a 32-bit struct. otherwise
+ // it's not ok.
+ error = EINVAL;
break;
}
+
+ if (is64bit) {
+ ext_strings = ((user64_package_ext_info *)data)->strings;
+ num_entries = ((user64_package_ext_info *)data)->num_entries;
+ max_width = ((user64_package_ext_info *)data)->max_width;
+ } else {
+ ext_strings = CAST_USER_ADDR_T(((user32_package_ext_info *)data)->strings);
+ num_entries = ((user32_package_ext_info *)data)->num_entries;
+ max_width = ((user32_package_ext_info *)data)->max_width;
+ }
+ error = set_package_extensions_table(ext_strings, num_entries, max_width);
}
+ break;
- if (i >= MAX_NSPACE_ITEMS) {
- error = ENOENT;
- } else {
- //
- // if this bit is set, when resolve_nspace_item() times out
- // it will loop and go back to sleep.
- //
- nspace_items[i].flags |= NSPACE_ITEM_RESET_TIMER;
+ /* namespace handlers */
+ case FSCTL_NAMESPACE_HANDLER_GET: {
+ error = process_namespace_fsctl(NSPACE_HANDLER_NSPACE, is64bit, size, data);
}
+ break;
- lck_mtx_unlock(&nspace_handler_lock);
+ /* Snapshot handlers */
+ case FSCTL_OLD_SNAPSHOT_HANDLER_GET: {
+ error = process_namespace_fsctl(NSPACE_HANDLER_SNAPSHOT, is64bit, size, data);
+ }
+ break;
- if (error) {
- printf("nspace-handler-update: did not find token %u\n", token);
+ case FSCTL_SNAPSHOT_HANDLER_GET_EXT: {
+ error = process_namespace_fsctl(NSPACE_HANDLER_SNAPSHOT, is64bit, size, data);
}
+ break;
- } else if (IOCBASECMD(cmd) == FSCTL_NAMESPACE_HANDLER_UNBLOCK) {
- uint32_t token, val;
- int i;
+ case FSCTL_NAMESPACE_HANDLER_UPDATE: {
+ uint32_t token, val;
+ int i;
- if ((error = suser(kauth_cred_get(), &(p->p_acflag)))) {
- goto FSCtl_Exit;
- }
+ if ((error = suser(kauth_cred_get(), &(p->p_acflag)))) {
+ break;
+ }
- if (!nspace_is_special_process(p)) {
- error = EINVAL;
- goto FSCtl_Exit;
- }
+ if (!nspace_is_special_process(p)) {
+ error = EINVAL;
+ break;
+ }
- token = ((uint32_t *)data)[0];
- val = ((uint32_t *)data)[1];
+ token = ((uint32_t *)data)[0];
+ val = ((uint32_t *)data)[1];
- lck_mtx_lock(&nspace_handler_lock);
+ lck_mtx_lock(&nspace_handler_lock);
- for(i=0; i < MAX_NSPACE_ITEMS; i++) {
- if (nspace_items[i].token == token) {
+ for(i=0; i < MAX_NSPACE_ITEMS; i++) {
+ if (nspace_items[i].token == token) {
+ break; /* exit for loop, not case stmt */
+ }
+ }
+
+ if (i >= MAX_NSPACE_ITEMS) {
+ error = ENOENT;
+ } else {
+ //
+ // if this bit is set, when resolve_nspace_item() times out
+ // it will loop and go back to sleep.
+ //
+ nspace_items[i].flags |= NSPACE_ITEM_RESET_TIMER;
+ }
+
+ lck_mtx_unlock(&nspace_handler_lock);
+
+ if (error) {
+ printf("nspace-handler-update: did not find token %u\n", token);
+ }
+ }
+ break;
+
+ case FSCTL_NAMESPACE_HANDLER_UNBLOCK: {
+ uint32_t token, val;
+ int i;
+
+ if ((error = suser(kauth_cred_get(), &(p->p_acflag)))) {
break;
}
- }
- if (i >= MAX_NSPACE_ITEMS) {
- printf("nspace-handler-unblock: did not find token %u\n", token);
- error = ENOENT;
- } else {
- if (val == 0 && nspace_items[i].vp) {
- vnode_lock_spin(nspace_items[i].vp);
- nspace_items[i].vp->v_flag &= ~VNEEDSSNAPSHOT;
- vnode_unlock(nspace_items[i].vp);
+ if (!nspace_is_special_process(p)) {
+ error = EINVAL;
+ break;
}
- nspace_items[i].vp = NULL;
- nspace_items[i].arg = NULL;
- nspace_items[i].op = 0;
- nspace_items[i].vid = 0;
- nspace_items[i].flags = NSPACE_ITEM_DONE;
- nspace_items[i].token = 0;
+ token = ((uint32_t *)data)[0];
+ val = ((uint32_t *)data)[1];
- wakeup((caddr_t)&(nspace_items[i].vp));
- }
+ lck_mtx_lock(&nspace_handler_lock);
- lck_mtx_unlock(&nspace_handler_lock);
+ for(i=0; i < MAX_NSPACE_ITEMS; i++) {
+ if (nspace_items[i].token == token) {
+ break; /* exit for loop, not case statement */
+ }
+ }
- } else if (IOCBASECMD(cmd) == FSCTL_NAMESPACE_HANDLER_CANCEL) {
- uint32_t token, val;
- int i;
+ if (i >= MAX_NSPACE_ITEMS) {
+ printf("nspace-handler-unblock: did not find token %u\n", token);
+ error = ENOENT;
+ } else {
+ if (val == 0 && nspace_items[i].vp) {
+ vnode_lock_spin(nspace_items[i].vp);
+ nspace_items[i].vp->v_flag &= ~VNEEDSSNAPSHOT;
+ vnode_unlock(nspace_items[i].vp);
+ }
- if ((error = suser(kauth_cred_get(), &(p->p_acflag)))) {
- goto FSCtl_Exit;
- }
+ nspace_items[i].vp = NULL;
+ nspace_items[i].arg = NULL;
+ nspace_items[i].op = 0;
+ nspace_items[i].vid = 0;
+ nspace_items[i].flags = NSPACE_ITEM_DONE;
+ nspace_items[i].token = 0;
- if (!nspace_is_special_process(p)) {
- error = EINVAL;
- goto FSCtl_Exit;
- }
+ wakeup((caddr_t)&(nspace_items[i].vp));
+ }
- token = ((uint32_t *)data)[0];
- val = ((uint32_t *)data)[1];
+ lck_mtx_unlock(&nspace_handler_lock);
+ }
+ break;
- lck_mtx_lock(&nspace_handler_lock);
+ case FSCTL_NAMESPACE_HANDLER_CANCEL: {
+ uint32_t token, val;
+ int i;
- for(i=0; i < MAX_NSPACE_ITEMS; i++) {
- if (nspace_items[i].token == token) {
+ if ((error = suser(kauth_cred_get(), &(p->p_acflag)))) {
break;
}
- }
- if (i >= MAX_NSPACE_ITEMS) {
- printf("nspace-handler-cancel: did not find token %u\n", token);
- error = ENOENT;
- } else {
- if (nspace_items[i].vp) {
- vnode_lock_spin(nspace_items[i].vp);
- nspace_items[i].vp->v_flag &= ~VNEEDSSNAPSHOT;
- vnode_unlock(nspace_items[i].vp);
+ if (!nspace_is_special_process(p)) {
+ error = EINVAL;
+ break;
}
- nspace_items[i].vp = NULL;
- nspace_items[i].arg = NULL;
- nspace_items[i].vid = 0;
- nspace_items[i].token = val;
- nspace_items[i].flags &= ~NSPACE_ITEM_PROCESSING;
- nspace_items[i].flags |= NSPACE_ITEM_CANCELLED;
+ token = ((uint32_t *)data)[0];
+ val = ((uint32_t *)data)[1];
- wakeup((caddr_t)&(nspace_items[i].vp));
- }
+ lck_mtx_lock(&nspace_handler_lock);
- lck_mtx_unlock(&nspace_handler_lock);
- } else if (IOCBASECMD(cmd) == FSCTL_NAMESPACE_HANDLER_SET_SNAPSHOT_TIME) {
- if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) {
- goto FSCtl_Exit;
- }
+ for(i=0; i < MAX_NSPACE_ITEMS; i++) {
+ if (nspace_items[i].token == token) {
+ break; /* exit for loop, not case stmt */
+ }
+ }
- // we explicitly do not do the namespace_handler_proc check here
+ if (i >= MAX_NSPACE_ITEMS) {
+ printf("nspace-handler-cancel: did not find token %u\n", token);
+ error = ENOENT;
+ } else {
+ if (nspace_items[i].vp) {
+ vnode_lock_spin(nspace_items[i].vp);
+ nspace_items[i].vp->v_flag &= ~VNEEDSSNAPSHOT;
+ vnode_unlock(nspace_items[i].vp);
+ }
- lck_mtx_lock(&nspace_handler_lock);
- snapshot_timestamp = ((uint32_t *)data)[0];
- wakeup(&nspace_item_idx);
- lck_mtx_unlock(&nspace_handler_lock);
- printf("nspace-handler-set-snapshot-time: %d\n", (int)snapshot_timestamp);
+ nspace_items[i].vp = NULL;
+ nspace_items[i].arg = NULL;
+ nspace_items[i].vid = 0;
+ nspace_items[i].token = val;
+ nspace_items[i].flags &= ~NSPACE_ITEM_PROCESSING;
+ nspace_items[i].flags |= NSPACE_ITEM_CANCELLED;
+
+ wakeup((caddr_t)&(nspace_items[i].vp));
+ }
+
+ lck_mtx_unlock(&nspace_handler_lock);
+ }
+ break;
+
+ case FSCTL_NAMESPACE_HANDLER_SET_SNAPSHOT_TIME: {
+ if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) {
+ break;
+ }
+
+ // we explicitly do not do the namespace_handler_proc check here
+
+ lck_mtx_lock(&nspace_handler_lock);
+ snapshot_timestamp = ((uint32_t *)data)[0];
+ wakeup(&nspace_item_idx);
+ lck_mtx_unlock(&nspace_handler_lock);
+ printf("nspace-handler-set-snapshot-time: %d\n", (int)snapshot_timestamp);
+
+ }
+ break;
+
+ case FSCTL_NAMESPACE_ALLOW_DMG_SNAPSHOT_EVENTS:
+ {
+ if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) {
+ break;
+ }
+
+ lck_mtx_lock(&nspace_handler_lock);
+ nspace_allow_virtual_devs = ((uint32_t *)data)[0];
+ lck_mtx_unlock(&nspace_handler_lock);
+ printf("nspace-snapshot-handler will%s allow events on disk-images\n",
+ nspace_allow_virtual_devs ? "" : " NOT");
+ error = 0;
- } else if (IOCBASECMD(cmd) == FSCTL_NAMESPACE_ALLOW_DMG_SNAPSHOT_EVENTS) {
- if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) {
- goto FSCtl_Exit;
}
+ break;
- lck_mtx_lock(&nspace_handler_lock);
- nspace_allow_virtual_devs = ((uint32_t *)data)[0];
- lck_mtx_unlock(&nspace_handler_lock);
- printf("nspace-snapshot-handler will%s allow events on disk-images\n",
- nspace_allow_virtual_devs ? "" : " NOT");
- error = 0;
-
- } else if (IOCBASECMD(cmd) == FSCTL_SET_FSTYPENAME_OVERRIDE) {
- if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) {
- goto FSCtl_Exit;
- }
- if (vp->v_mount) {
- mount_lock(vp->v_mount);
- if (data[0] != 0) {
- strlcpy(&vp->v_mount->fstypename_override[0], data, MFSTYPENAMELEN);
- vp->v_mount->mnt_kern_flag |= MNTK_TYPENAME_OVERRIDE;
- if (vfs_isrdonly(vp->v_mount) && strcmp(vp->v_mount->fstypename_override, "mtmfs") == 0) {
- vp->v_mount->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
- vp->v_mount->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
- }
- } else {
- if (strcmp(vp->v_mount->fstypename_override, "mtmfs") == 0) {
- vp->v_mount->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
+ case FSCTL_SET_FSTYPENAME_OVERRIDE:
+ {
+ if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) {
+ break;
+ }
+ if (vp->v_mount) {
+ mount_lock(vp->v_mount);
+ if (data[0] != 0) {
+ strlcpy(&vp->v_mount->fstypename_override[0], data, MFSTYPENAMELEN);
+ vp->v_mount->mnt_kern_flag |= MNTK_TYPENAME_OVERRIDE;
+ if (vfs_isrdonly(vp->v_mount) && strcmp(vp->v_mount->fstypename_override, "mtmfs") == 0) {
+ vp->v_mount->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
+ vp->v_mount->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
+ }
+ } else {
+ if (strcmp(vp->v_mount->fstypename_override, "mtmfs") == 0) {
+ vp->v_mount->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
+ }
+ vp->v_mount->mnt_kern_flag &= ~MNTK_TYPENAME_OVERRIDE;
+ vp->v_mount->fstypename_override[0] = '\0';
}
- vp->v_mount->mnt_kern_flag &= ~MNTK_TYPENAME_OVERRIDE;
- vp->v_mount->fstypename_override[0] = '\0';
+ mount_unlock(vp->v_mount);
}
- mount_unlock(vp->v_mount);
}
- } else {
- /* Invoke the filesystem-specific code */
- error = VNOP_IOCTL(vp, IOCBASECMD(cmd), data, options, ctx);
- }
-
-
+ break;
+
+ default: {
+ /* Invoke the filesystem-specific code */
+ error = VNOP_IOCTL(vp, IOCBASECMD(cmd), data, options, ctx);
+ }
+
+ } /* end switch stmt */
+
/*
- * Copy any data to user, size was
+ * if no errors, copy any data to user. Size was
* already set and checked above.
*/
if (error == 0 && (cmd & IOC_OUT) && size)
error = copyout(data, udata, size);
-FSCtl_Exit:
- if (memp) kfree(memp, size);
+ if (memp) {
+ kfree(memp, size);
+ }
return error;
}
/* Get the vnode for the file we are getting info on: */
if ((error = file_vnode(uap->fd, &vp)))
- goto done;
+ return error;
fd = uap->fd;
if ((error = vnode_getwithref(vp))) {
- goto done;
+ file_drop(fd);
+ return error;
}
#if CONFIG_MACF
- error = mac_mount_check_fsctl(ctx, vnode_mount(vp), uap->cmd);
- if (error) {
- goto done;
+ if ((error = mac_mount_check_fsctl(ctx, vnode_mount(vp), uap->cmd))) {
+ file_drop(fd);
+ vnode_put(vp);
+ return error;
}
#endif
error = fsctl_internal(p, &vp, uap->cmd, (user_addr_t)uap->data, uap->options, ctx);
-done:
- if (fd != -1)
- file_drop(fd);
+ file_drop(fd);
- if (vp)
+ /*validate vp; fsctl_internal() can drop iocount and reset vp to NULL*/
+ if (vp) {
vnode_put(vp);
+ }
+
return error;
}
/* end of fsctl system call */
-/*
- * An in-kernel sync for power management to call.
- */
-__private_extern__ int
-sync_internal(void)
-{
- int error;
-
- struct sync_args data;
-
- int retval[2];
-
-
- error = sync(current_proc(), &data, &retval[0]);
-
-
- return (error);
-} /* end of sync_internal call */
-
-
/*
* Retrieve the data of an extended attribute.
*/
return (EINVAL);
if ((error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen) != 0)) {
- return (error);
+ if (error == EPERM) {
+ /* if the string won't fit in attrname, copyinstr emits EPERM */
+ return (ENAMETOOLONG);
+ }
+ /* Otherwise return the default error from copyinstr to detect ERANGE, etc */
+ return error;
}
if (xattr_protected(attrname))
return(EPERM);
if (uap->options & (XATTR_NOSECURITY | XATTR_NODEFAULT))
return (EINVAL);
- nameiflags = ((uap->options & XATTR_NOFOLLOW) ? 0 : FOLLOW) | NOTRIGGER;
+ nameiflags = (uap->options & XATTR_NOFOLLOW) ? 0 : FOLLOW;
NDINIT(&nd, LOOKUP, OP_LISTXATTR, nameiflags, spacetype, uap->path, ctx);
if ((error = namei(&nd))) {
return (error);
return (error);
}
+static int fsgetpath_internal(
+ vfs_context_t ctx, int volfs_id, uint64_t objid,
+ vm_size_t bufsize, caddr_t buf, int *pathlen)
+{
+ int error;
+ struct mount *mp = NULL;
+ vnode_t vp;
+ int length;
+ int bpflags;
+
+ if (bufsize > PAGE_SIZE) {
+ return (EINVAL);
+ }
+
+ if (buf == NULL) {
+ return (ENOMEM);
+ }
+
+ if ((mp = mount_lookupby_volfsid(volfs_id, 1)) == NULL) {
+ error = ENOTSUP; /* unexpected failure */
+ return ENOTSUP;
+ }
+
+unionget:
+ if (objid == 2) {
+ error = VFS_ROOT(mp, &vp, ctx);
+ } else {
+ error = VFS_VGET(mp, (ino64_t)objid, &vp, ctx);
+ }
+
+ if (error == ENOENT && (mp->mnt_flag & MNT_UNION)) {
+ /*
+ * If the fileid isn't found and we're in a union
+ * mount volume, then see if the fileid is in the
+ * mounted-on volume.
+ */
+ struct mount *tmp = mp;
+ mp = vnode_mount(tmp->mnt_vnodecovered);
+ vfs_unbusy(tmp);
+ if (vfs_busy(mp, LK_NOWAIT) == 0)
+ goto unionget;
+ } else {
+ vfs_unbusy(mp);
+ }
+
+ if (error) {
+ return error;
+ }
+
+#if CONFIG_MACF
+ error = mac_vnode_check_fsgetpath(ctx, vp);
+ if (error) {
+ vnode_put(vp);
+ return error;
+ }
+#endif
+
+ /* Obtain the absolute path to this vnode. */
+ bpflags = vfs_context_suser(ctx) ? BUILDPATH_CHECKACCESS : 0;
+ bpflags |= BUILDPATH_CHECK_MOVED;
+ error = build_path(vp, buf, bufsize, &length, bpflags, ctx);
+ vnode_put(vp);
+
+ if (error) {
+ goto out;
+ }
+
+ AUDIT_ARG(text, buf);
+
+ if (kdebug_enable) {
+ long dbg_parms[NUMPARMS];
+ int dbg_namelen;
+
+ dbg_namelen = (int)sizeof(dbg_parms);
+
+ if (length < dbg_namelen) {
+ memcpy((char *)dbg_parms, buf, length);
+ memset((char *)dbg_parms + length, 0, dbg_namelen - length);
+
+ dbg_namelen = length;
+ } else {
+ memcpy((char *)dbg_parms, buf + (length - dbg_namelen), dbg_namelen);
+ }
+
+ kdebug_lookup_gen_events(dbg_parms, dbg_namelen, (void *)vp, TRUE);
+ }
+
+ *pathlen = (user_ssize_t)length; /* may be superseded by error */
+
+out:
+ return (error);
+}
+
/*
* Obtain the full pathname of a file system object by id.
*
int
fsgetpath(__unused proc_t p, struct fsgetpath_args *uap, user_ssize_t *retval)
{
- vnode_t vp;
- struct mount *mp = NULL;
vfs_context_t ctx = vfs_context_current();
fsid_t fsid;
char *realpath;
- int bpflags;
int length;
int error;
AUDIT_ARG(value32, fsid.val[0]);
AUDIT_ARG(value64, uap->objid);
/* Restrict output buffer size for now. */
+
if (uap->bufsize > PAGE_SIZE) {
return (EINVAL);
}
if (realpath == NULL) {
return (ENOMEM);
}
- /* Find the target mountpoint. */
- if ((mp = mount_lookupby_volfsid(fsid.val[0], 1)) == NULL) {
- error = ENOTSUP; /* unexpected failure */
- goto out;
- }
- /* Find the target vnode. */
- if (uap->objid == 2) {
- error = VFS_ROOT(mp, &vp, ctx);
- } else {
- error = VFS_VGET(mp, (ino64_t)uap->objid, &vp, ctx);
- }
- vfs_unbusy(mp);
- if (error) {
- goto out;
- }
-#if CONFIG_MACF
- error = mac_vnode_check_fsgetpath(ctx, vp);
- if (error) {
- vnode_put(vp);
- goto out;
- }
-#endif
- /* Obtain the absolute path to this vnode. */
- bpflags = vfs_context_suser(ctx) ? BUILDPATH_CHECKACCESS : 0;
- bpflags |= BUILDPATH_CHECK_MOVED;
- error = build_path(vp, realpath, uap->bufsize, &length, bpflags, ctx);
- vnode_put(vp);
+
+ error = fsgetpath_internal(
+ ctx, fsid.val[0], uap->objid,
+ uap->bufsize, realpath, &length);
+
if (error) {
goto out;
}
- AUDIT_ARG(text, realpath);
+
error = copyout((caddr_t)realpath, uap->buf, length);
*retval = (user_ssize_t)length; /* may be superseded by error */
sfs.f_fsid = sfsp->f_fsid;
sfs.f_owner = sfsp->f_owner;
if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) {
- strlcpy(&sfs.f_fstypename[0], &mp->fstypename_override[0], MFSTYPENAMELEN);
+ strlcpy(&sfs.f_fstypename[0], &mp->fstypename_override[0], MFSNAMELEN);
} else {
strlcpy(&sfs.f_fstypename[0], &sfsp->f_fstypename[0], MFSNAMELEN);
}
sfs.f_fsid = sfsp->f_fsid;
sfs.f_owner = sfsp->f_owner;
if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) {
- strlcpy(&sfs.f_fstypename[0], &mp->fstypename_override[0], MFSTYPENAMELEN);
+ strlcpy(&sfs.f_fstypename[0], &mp->fstypename_override[0], MFSNAMELEN);
} else {
strlcpy(&sfs.f_fstypename[0], &sfsp->f_fstypename[0], MFSNAMELEN);
}
usbp->st_qspare[0] = sbp->st_qspare[0];
usbp->st_qspare[1] = sbp->st_qspare[1];
}
+
+/*
+ * Purge buffer cache for simulating cold starts
+ */
+static int vnode_purge_callback(struct vnode *vp, __unused void *cargs)
+{
+ ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL /* off_t *resid_off */, UBC_PUSHALL | UBC_INVALIDATE);
+
+ return VNODE_RETURNED;
+}
+
+static int vfs_purge_callback(mount_t mp, __unused void * arg)
+{
+ vnode_iterate(mp, VNODE_WAIT | VNODE_ITERATE_ALL, vnode_purge_callback, NULL);
+
+ return VFS_RETURNED;
+}
+
+int
+vfs_purge(__unused struct proc *p, __unused struct vfs_purge_args *uap, __unused int32_t *retval)
+{
+ if (!kauth_cred_issuser(kauth_cred_get()))
+ return EPERM;
+
+ vfs_iterate(0/* flags */, vfs_purge_callback, NULL);
+
+ return 0;
+}
+