/*
- * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <security/mac_framework.h>
#endif
+#include <sys/sdt.h>
+
#define ESUCCESS 0
#undef mount_t
#undef vnode_t
#define COMPAT_ONLY
-
-#ifndef __LP64__
-#define THREAD_SAFE_FS(VP) \
- ((VP)->v_unsafefs ? 0 : 1)
-#endif /* __LP64__ */
-
#define NATIVE_XATTR(VP) \
((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
+#if CONFIG_APPLEDOUBLE
static void xattrfile_remove(vnode_t dvp, const char *basename,
vfs_context_t ctx, int force);
static void xattrfile_setattr(vnode_t dvp, const char * basename,
struct vnode_attr * vap, vfs_context_t ctx);
+#endif /* CONFIG_APPLEDOUBLE */
+
+static errno_t post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp);
/*
* vnode_setneedinactive
}
-#ifndef __LP64__
-int
-lock_fsnode(vnode_t vp, int *funnel_state)
-{
- if (funnel_state)
- *funnel_state = thread_funnel_set(kernel_flock, TRUE);
-
- if (vp->v_unsafefs) {
- if (vp->v_unsafefs->fsnodeowner == current_thread()) {
- vp->v_unsafefs->fsnode_count++;
- } else {
- lck_mtx_lock(&vp->v_unsafefs->fsnodelock);
-
- if (vp->v_lflag & (VL_TERMWANT | VL_TERMINATE | VL_DEAD)) {
- lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
-
- if (funnel_state)
- (void) thread_funnel_set(kernel_flock, *funnel_state);
- return (ENOENT);
- }
- vp->v_unsafefs->fsnodeowner = current_thread();
- vp->v_unsafefs->fsnode_count = 1;
- }
- }
- return (0);
-}
-
-
-void
-unlock_fsnode(vnode_t vp, int *funnel_state)
-{
- if (vp->v_unsafefs) {
- if (--vp->v_unsafefs->fsnode_count == 0) {
- vp->v_unsafefs->fsnodeowner = NULL;
- lck_mtx_unlock(&vp->v_unsafefs->fsnodelock);
- }
- }
- if (funnel_state)
- (void) thread_funnel_set(kernel_flock, *funnel_state);
-}
-#endif /* __LP64__ */
-
-
-
/* ====================================================================== */
/* ************ EXTERNAL KERNEL APIS ********************************** */
/* ====================================================================== */
VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
{
int error;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0))
return(ENOTSUP);
-#ifndef __LP64__
- thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
-
if (vfs_context_is64bit(ctx)) {
if (vfs_64bitready(mp)) {
error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
}
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
-
return (error);
}
VFS_START(mount_t mp, int flags, vfs_context_t ctx)
{
int error;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0))
return(ENOTSUP);
-#ifndef __LP64__
- thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
-
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
-
error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
-
return (error);
}
VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
{
int error;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0))
return(ENOTSUP);
-#ifndef __LP64__
- thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
-
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
-
error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
-
return (error);
}
*
* The return codes documented above are those which may currently
* be returned by HFS from hfs_vfs_root, which is a simple wrapper
- * for a call to hfs_vget on the volume mount poit, not including
+ * for a call to hfs_vget on the volume mount point, not including
* additional error codes which may be propagated from underlying
* routines called by hfs_vget.
*/
VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx)
{
int error;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0))
return(ENOTSUP);
ctx = vfs_context_current();
}
-#ifndef __LP64__
- thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
-
error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
-
return (error);
}
VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
{
int error;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0))
return(ENOTSUP);
-#ifndef __LP64__
- thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
-
error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
-
return (error);
}
VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
{
int error;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0))
return(ENOTSUP);
ctx = vfs_context_current();
}
-#ifndef __LP64__
- thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
-
error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
-
return(error);
}
VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
{
int error;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0))
return(ENOTSUP);
ctx = vfs_context_current();
}
-#ifndef __LP64__
- thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
-
error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
-
return(error);
}
VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
{
int error;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0))
return(ENOTSUP);
ctx = vfs_context_current();
}
-#ifndef __LP64__
- thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
-
error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
-
return(error);
}
VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
{
int error;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0))
return(ENOTSUP);
ctx = vfs_context_current();
}
-#ifndef __LP64__
- thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
-
error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
-
return(error);
}
int
-VFS_FHTOVP(mount_t mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs_context_t ctx)
+VFS_FHTOVP(mount_t mp, int fhlen, unsigned char *fhp, vnode_t *vpp, vfs_context_t ctx)
{
int error;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0))
return(ENOTSUP);
ctx = vfs_context_current();
}
-#ifndef __LP64__
- thread_safe = (mp->mnt_vtable->vfc_vfsflags & VFC_VFSTHREADSAFE);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
-
error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
-
return(error);
}
int
-VFS_VPTOFH(struct vnode * vp, int *fhlenp, unsigned char * fhp, vfs_context_t ctx)
+VFS_VPTOFH(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t ctx)
{
int error;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0))
return(ENOTSUP);
ctx = vfs_context_current();
}
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
-
error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
-
return(error);
}
+int VFS_IOCTL(struct mount *mp, u_long command, caddr_t data,
+ int flags, vfs_context_t context)
+{
+ if (mp == dead_mountp || !mp->mnt_op->vfs_ioctl)
+ return ENOTSUP;
+
+ return mp->mnt_op->vfs_ioctl(mp, command, data, flags,
+ context ?: vfs_context_current());
+}
+
+int
+VFS_VGET_SNAPDIR(mount_t mp, vnode_t *vpp, vfs_context_t ctx)
+{
+ int error;
+
+ if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget_snapdir == 0))
+ return(ENOTSUP);
+
+ if (ctx == NULL)
+ ctx = vfs_context_current();
+
+ error = (*mp->mnt_op->vfs_vget_snapdir)(mp, vpp, ctx);
+
+ return (error);
+}
/* returns the cached throttle mask for the mount_t */
uint64_t
/* returns a copy of vfs type name for the mount_t */
void
-vfs_name(mount_t mp, char * buffer)
+vfs_name(mount_t mp, char *buffer)
{
strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
}
int
vfs_isforce(mount_t mp)
{
- if ((mp->mnt_lflag & MNT_LFORCE) || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT))
+ if (mp->mnt_lflag & MNT_LFORCE)
return(1);
else
return(0);
mount_unlock(mp);
}
-void
-vfs_markdependency(mount_t mp)
-{
- proc_t p = current_proc();
- mount_lock(mp);
- mp->mnt_dependent_process = p;
- mp->mnt_dependent_pid = proc_pid(p);
- mount_unlock(mp);
-}
-
-
int
vfs_authopaque(mount_t mp)
{
mount_unlock(mp);
}
+void
+vfs_setnoswap(mount_t mp)
+{
+ mount_lock(mp);
+ mp->mnt_kern_flag |= MNTK_NOSWAP;
+ mount_unlock(mp);
+}
+
+void
+vfs_clearnoswap(mount_t mp)
+{
+ mount_lock(mp);
+ mp->mnt_kern_flag &= ~MNTK_NOSWAP;
+ mount_unlock(mp);
+}
+
int
vfs_extendedsecurity(mount_t mp)
{
mount_unlock(mp);
}
-
+/* query whether the mount point supports native EAs */
+int
+vfs_nativexattrs(mount_t mp) {
+ return (mp->mnt_kern_flag & MNTK_EXTENDED_ATTRS);
+}
+
/*
* return the block size of the underlying
* device associated with mount_t
void
vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
{
- if (mp == NULL) {
- ioattrp->io_maxreadcnt = MAXPHYS;
+ ioattrp->io_reserved[0] = NULL;
+ ioattrp->io_reserved[1] = NULL;
+ if (mp == NULL) {
+ ioattrp->io_maxreadcnt = MAXPHYS;
ioattrp->io_maxwritecnt = MAXPHYS;
ioattrp->io_segreadcnt = 32;
ioattrp->io_segwritecnt = 32;
ioattrp->io_maxsegwritesize = MAXPHYS;
ioattrp->io_devblocksize = DEV_BSIZE;
ioattrp->io_flags = 0;
+ ioattrp->io_max_swappin_available = 0;
} else {
- ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
+ ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
ioattrp->io_devblocksize = mp->mnt_devblocksize;
ioattrp->io_flags = mp->mnt_ioflags;
+ ioattrp->io_max_swappin_available = mp->mnt_max_swappin_available;
}
- ioattrp->io_reserved[0] = NULL;
- ioattrp->io_reserved[1] = NULL;
}
mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
mp->mnt_devblocksize = ioattrp->io_devblocksize;
mp->mnt_ioflags = ioattrp->io_flags;
+ mp->mnt_max_swappin_available = ioattrp->io_max_swappin_available;
}
/*
typedef int (*PFI)(void *);
extern int vfs_opv_numops;
errno_t
-vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle)
+vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle)
{
struct vfstable *newvfstbl = NULL;
int i,j;
|| (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL))
return(EINVAL);
-#ifdef __LP64__
- /* Non-threadsafe filesystems are not supported for K64 */
+ /* Non-threadsafe filesystems are not supported */
if ((vfe->vfe_flags & (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) {
return (EINVAL);
}
-#endif /* __LP64__ */
MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP,
M_WAITOK);
newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
if ((vfe->vfe_flags & VFS_TBLNOTYPENUM))
- newvfstbl->vfc_typenum = maxvfsconf++;
+ newvfstbl->vfc_typenum = maxvfstypenum++;
else
newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2;
if (vfe->vfe_flags & VFS_TBLVNOP_PAGEOUTV2)
newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2;
-#ifndef __LP64__
- if (vfe->vfe_flags & VFS_TBLTHREADSAFE)
- newvfstbl->vfc_vfsflags |= VFC_VFSTHREADSAFE;
- if (vfe->vfe_flags & VFS_TBLFSNODELOCK)
- newvfstbl->vfc_vfsflags |= VFC_VFSTHREADSAFE;
-#endif /* __LP64__ */
if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL)
newvfstbl->vfc_flags |= MNT_LOCAL;
if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0)
newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
if (vfe->vfe_flags & VFS_TBLNOMACLABEL)
newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
+ if (vfe->vfe_flags & VFS_TBLVNOP_NOUPDATEID_RENAME)
+ newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_NOUPDATEID_RENAME;
+ if (vfe->vfe_flags & VFS_TBLVNOP_SECLUDE_RENAME)
+ newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_SECLUDE_RENAME;
+ if (vfe->vfe_flags & VFS_TBLCANMOUNTROOT)
+ newvfstbl->vfc_vfsflags |= VFC_VFSCANMOUNTROOT;
/*
* Allocate and init the vectors.
newvfstbl->vfc_descptr = descptr;
newvfstbl->vfc_descsize = descsize;
+ newvfstbl->vfc_sysctl = NULL;
for (i= 0; i< desccount; i++ ) {
opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
+ /* Silently skip known-disabled operations */
+ if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) {
+ printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n",
+ vfe->vfe_opvdescs[i], opve_descp->opve_op->vdesc_name);
+ continue;
+ }
+
/*
* Sanity check: is this operation listed
* in the list of operations? We check this
* list of supported operations.
*/
if (opve_descp->opve_op->vdesc_offset == 0 &&
- opve_descp->opve_op->vdesc_offset != VOFFSET(vnop_default)) {
+ opve_descp->opve_op != VDESC(vnop_default)) {
printf("vfs_fsadd: operation %s not listed in %s.\n",
opve_descp->opve_op->vdesc_name,
"vfs_op_descs");
*handle = vfstable_add(newvfstbl);
- if (newvfstbl->vfc_typenum <= maxvfsconf )
- maxvfsconf = newvfstbl->vfc_typenum + 1;
+ if (newvfstbl->vfc_typenum <= maxvfstypenum )
+ maxvfstypenum = newvfstbl->vfc_typenum + 1;
if (newvfstbl->vfc_vfsops->vfs_init) {
struct vfsconf vfsc;
* file system was added
*/
errno_t
-vfs_fsremove(vfstable_t handle)
+vfs_fsremove(vfstable_t handle)
{
struct vfstable * vfstbl = (struct vfstable *)handle;
void *old_desc = NULL;
return(err);
}
+void vfs_setowner(mount_t mp, uid_t uid, gid_t gid)
+{
+ mp->mnt_fsowner = uid;
+ mp->mnt_fsgroup = gid;
+}
+
+/*
+ * Callers should be careful how they use this; accessing
+ * mnt_last_write_completed_timestamp is not thread-safe. Writing to
+ * it isn't either. Point is: be prepared to deal with strange values
+ * being returned.
+ */
+uint64_t vfs_idle_time(mount_t mp)
+{
+ if (mp->mnt_pending_write_size)
+ return 0;
+
+ struct timeval now;
+
+ microuptime(&now);
+
+ return ((now.tv_sec
+ - mp->mnt_last_write_completed_timestamp.tv_sec) * 1000000
+ + now.tv_usec - mp->mnt_last_write_completed_timestamp.tv_usec);
+}
+
int
vfs_context_pid(vfs_context_t ctx)
{
return(kauth_cred_issuser(vfs_context_ucred(ctx)));
}
+int vfs_context_iskernel(vfs_context_t ctx)
+{
+ return ctx == &kerncontext;
+}
+
/*
* Given a context, for all fields of vfs_context_t which
* are not held with a reference, set those fields to the
return 0;
}
+int vfs_isswapmount(mount_t mnt)
+{
+ return mnt && ISSET(mnt->mnt_kern_flag, MNTK_SWAP_MOUNT) ? 1 : 0;
+}
+
/* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
return (vp->v_mount);
}
+#if CONFIG_IOSCHED
+vnode_t
+vnode_mountdevvp(vnode_t vp)
+{
+ if (vp->v_mount)
+ return (vp->v_mount->mnt_devvp);
+ else
+ return ((vnode_t)0);
+}
+#endif
+
mount_t
vnode_mountedhere(vnode_t vp)
{
vnode_unlock(vp);
}
+int
+vnode_isfastdevicecandidate(vnode_t vp)
+{
+ return ((vp->v_flag & VFASTDEVCANDIDATE)? 1 : 0);
+}
+
+void
+vnode_setfastdevicecandidate(vnode_t vp)
+{
+ vnode_lock_spin(vp);
+ vp->v_flag |= VFASTDEVCANDIDATE;
+ vnode_unlock(vp);
+}
+
+void
+vnode_clearfastdevicecandidate(vnode_t vp)
+{
+ vnode_lock_spin(vp);
+ vp->v_flag &= ~VFASTDEVCANDIDATE;
+ vnode_unlock(vp);
+}
+
+int
+vnode_isautocandidate(vnode_t vp)
+{
+ return ((vp->v_flag & VAUTOCANDIDATE)? 1 : 0);
+}
+
+void
+vnode_setautocandidate(vnode_t vp)
+{
+ vnode_lock_spin(vp);
+ vp->v_flag |= VAUTOCANDIDATE;
+ vnode_unlock(vp);
+}
+
+void
+vnode_clearautocandidate(vnode_t vp)
+{
+ vnode_lock_spin(vp);
+ vp->v_flag &= ~VAUTOCANDIDATE;
+ vnode_unlock(vp);
+}
+
+
+
/* mark vnode_t to skip vflush() is SKIPSYSTEM */
void
vp->v_parent = dvp;
}
-const char *
-vnode_name(vnode_t vp)
-{
- /* we try to keep v_name a reasonable name for the node */
- return(vp->v_name);
-}
-
void
vnode_setname(vnode_t vp, char * name)
{
void
vnode_vfsname(vnode_t vp, char * buf)
{
- strncpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
+ strlcpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
}
/* return the FS type number */
fsec = NULL;
fsec_uio = NULL;
- error = 0;
-
+
/* find out how big the EA is */
- if (vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx) != 0) {
+ error = vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx);
+ if (error != 0) {
/* no EA, no filesec */
if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN))
error = 0;
if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
fsec = NULL;
- if ((vp->v_type == VDIR) || (vp->v_type == VLNK) || (vp->v_type == VREG)) {
+ if (XATTR_VNODE_SUPPORTED(vp)) {
/* try to get the filesec */
if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0)
goto out;
error = EROFS;
goto out;
}
+
+#if DEVELOPMENT || DEBUG
+ /*
+ * XXX VSWAP: Check for entitlements or special flag here
+ * so we can restrict access appropriately.
+ */
+#else /* DEVELOPMENT || DEBUG */
+
+ if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) {
+ error = EPERM;
+ goto out;
+ }
+#endif /* DEVELOPMENT || DEBUG */
+
#if NAMEDSTREAMS
/* For streams, va_data_size is the only setable attribute. */
if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
goto out;
}
#endif
+ /* Check for truncation */
+ if(VATTR_IS_ACTIVE(vap, va_data_size)) {
+ switch(vp->v_type) {
+ case VREG:
+ /* For regular files it's ok */
+ break;
+ case VDIR:
+ /* Not allowed to truncate directories */
+ error = EISDIR;
+ goto out;
+ default:
+ /* For everything else we will clear the bit and let underlying FS decide on the rest */
+ VATTR_CLEAR_ACTIVE(vap, va_data_size);
+ if (vap->va_active)
+ break;
+ /* If it was the only bit set, return success, to handle cases like redirect to /dev/null */
+ return (0);
+ }
+ }
/*
* If ownership is being ignored on this volume, we silently discard
goto out;
}
+ /* Never allow the setting of any unsupported superuser flags. */
+ if (VATTR_IS_ACTIVE(vap, va_flags)) {
+ vap->va_flags &= (SF_SUPPORTED | UF_SETTABLE);
+ }
+
error = VNOP_SETATTR(vp, vap, ctx);
if ((error == 0) && !VATTR_ALL_SUPPORTED(vap))
* Fail for file types that we don't permit extended security
* to be set on.
*/
- if ((vp->v_type != VDIR) && (vp->v_type != VLNK) && (vp->v_type != VREG)) {
+ if (!XATTR_VNODE_SUPPORTED(vp)) {
VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
error = EINVAL;
goto out;
{
int _err;
struct vnop_lookup_args a;
- vnode_t vp;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_lookup_desc;
a.a_dvp = dvp;
a.a_cnp = cnp;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(dvp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
-
- vp = *vpp;
-
-#ifndef __LP64__
- if (!thread_safe) {
- if ( (cnp->cn_flags & ISLASTCN) ) {
- if ( (cnp->cn_flags & LOCKPARENT) ) {
- if ( !(cnp->cn_flags & FSNODELOCKHELD) ) {
- /*
- * leave the fsnode lock held on
- * the directory, but restore the funnel...
- * also indicate that we need to drop the
- * fsnode_lock when we're done with the
- * system call processing for this path
- */
- cnp->cn_flags |= FSNODELOCKHELD;
-
- (void) thread_funnel_set(kernel_flock, funnel_state);
- return (_err);
- }
- }
- }
- unlock_fsnode(dvp, &funnel_state);
+ if (_err == 0 && *vpp) {
+ DTRACE_FSINFO(lookup, vnode_t, *vpp);
}
-#endif /* __LP64__ */
return (_err);
}
uint32_t tmp_status = 0;
struct componentname *cnp = &ndp->ni_cnd;
- want_create = (flags & VNOP_COMPOUND_OPEN_DO_CREATE);
+ want_create = (flags & O_CREAT);
a.a_desc = &vnop_compound_open_desc;
a.a_dvp = dvp;
}
_err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a);
+ if (want_create) {
+ if (_err == 0 && *vpp) {
+ DTRACE_FSINFO(compound_open, vnode_t, *vpp);
+ } else {
+ DTRACE_FSINFO(compound_open, vnode_t, dvp);
+ }
+ } else {
+ DTRACE_FSINFO(compound_open, vnode_t, *vpp);
+ }
did_create = (*a.a_status & COMPOUND_OPEN_STATUS_DID_CREATE);
}
if (did_create) {
+#if CONFIG_APPLEDOUBLE
if (!NATIVE_XATTR(dvp)) {
/*
* Remove stale Apple Double file (if any).
*/
xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
}
-
+#endif /* CONFIG_APPLEDOUBLE */
/* On create, provide kqueue notification */
post_event_if_success(dvp, _err, NOTE_WRITE);
}
{
int _err;
struct vnop_create_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_create_desc;
a.a_dvp = dvp;
a.a_vap = vap;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(dvp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
- return (_err);
- }
+ _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
+ if (_err == 0 && *vpp) {
+ DTRACE_FSINFO(create, vnode_t, *vpp);
}
-#endif /* __LP64__ */
- _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
+#if CONFIG_APPLEDOUBLE
if (_err == 0 && !NATIVE_XATTR(dvp)) {
/*
* Remove stale Apple Double file (if any).
*/
xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
}
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(dvp, &funnel_state);
- }
-#endif /* __LP64__ */
+#endif /* CONFIG_APPLEDOUBLE */
post_event_if_success(dvp, _err, NOTE_WRITE);
};
#endif /* 0*/
errno_t
-VNOP_WHITEOUT(vnode_t dvp, struct componentname * cnp, int flags, vfs_context_t ctx)
+VNOP_WHITEOUT(__unused vnode_t dvp, __unused struct componentname *cnp,
+ __unused int flags, __unused vfs_context_t ctx)
{
- int _err;
- struct vnop_whiteout_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
-
- a.a_desc = &vnop_whiteout_desc;
- a.a_dvp = dvp;
- a.a_cnp = cnp;
- a.a_flags = flags;
- a.a_context = ctx;
-
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(dvp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
- _err = (*dvp->v_op[vnop_whiteout_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(dvp, &funnel_state);
- }
-#endif /* __LP64__ */
-
- post_event_if_success(dvp, _err, NOTE_WRITE);
-
- return (_err);
+ return (ENOTSUP); // XXX OBSOLETE
}
- #if 0
+#if 0
/*
*#
*#% mknod dvp L U U
int _err;
struct vnop_mknod_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_mknod_desc;
a.a_dvp = dvp;
a.a_vap = vap;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(dvp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(dvp, &funnel_state);
+ if (_err == 0 && *vpp) {
+ DTRACE_FSINFO(mknod, vnode_t, *vpp);
}
-#endif /* __LP64__ */
post_event_if_success(dvp, _err, NOTE_WRITE);
{
int _err;
struct vnop_open_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
if (ctx == NULL) {
ctx = vfs_context_current();
a.a_mode = mode;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
- if ( (_err = lock_fsnode(vp, NULL)) ) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- return (_err);
- }
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
- unlock_fsnode(vp, NULL);
- }
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(open, vnode_t, vp);
return (_err);
}
{
int _err;
struct vnop_close_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
if (ctx == NULL) {
ctx = vfs_context_current();
a.a_fflag = fflag;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
- if ( (_err = lock_fsnode(vp, NULL)) ) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- return (_err);
- }
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
- unlock_fsnode(vp, NULL);
- }
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(close, vnode_t, vp);
return (_err);
}
{
int _err;
struct vnop_access_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
if (ctx == NULL) {
ctx = vfs_context_current();
a.a_action = action;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(access, vnode_t, vp);
return (_err);
}
{
int _err;
struct vnop_getattr_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_getattr_desc;
a.a_vp = vp;
a.a_vap = vap;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(getattr, vnode_t, vp);
return (_err);
}
{
int _err;
struct vnop_setattr_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_setattr_desc;
a.a_vp = vp;
a.a_vap = vap;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO(setattr, vnode_t, vp);
+#if CONFIG_APPLEDOUBLE
/*
* Shadow uid/gid/mod change to extended attribute file.
*/
vnode_putname(vname);
}
}
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+#endif /* CONFIG_APPLEDOUBLE */
/*
* If we have changed any of the things about the file that are likely
{
int _err;
struct vnop_read_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
+#if CONFIG_DTRACE
+ user_ssize_t resid = uio_resid(uio);
+#endif
if (ctx == NULL) {
- ctx = vfs_context_current();
+ return EINVAL;
}
a.a_desc = &vnop_read_desc;
a.a_ioflag = ioflag;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
- if ( (_err = lock_fsnode(vp, NULL)) ) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- return (_err);
- }
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
- unlock_fsnode(vp, NULL);
- }
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO_IO(read,
+ vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
return (_err);
}
{
struct vnop_write_args a;
int _err;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
+#if CONFIG_DTRACE
+ user_ssize_t resid = uio_resid(uio);
+#endif
if (ctx == NULL) {
- ctx = vfs_context_current();
+ return EINVAL;
}
a.a_desc = &vnop_write_desc;
a.a_ioflag = ioflag;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
- if ( (_err = lock_fsnode(vp, NULL)) ) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- return (_err);
- }
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
- unlock_fsnode(vp, NULL);
- }
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO_IO(write,
+ vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
post_event_if_success(vp, _err, NOTE_WRITE);
{
int _err;
struct vnop_ioctl_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
if (ctx == NULL) {
ctx = vfs_context_current();
* We have to be able to use the root filesystem's device vnode even when
* devfs isn't mounted (yet/anymore), so we can't go looking at its mount
* structure. If there is no data pointer, it doesn't matter whether
- * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZECACHE)
+ * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZE)
* which passes NULL for its data pointer can therefore be used during
* mount or unmount of the root filesystem.
*
a.a_fflag = fflag;
a.a_context= ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
- if ( (_err = lock_fsnode(vp, NULL)) ) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- return (_err);
- }
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
- unlock_fsnode(vp, NULL);
- }
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(ioctl, vnode_t, vp);
return (_err);
}
{
int _err;
struct vnop_select_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
if (ctx == NULL) {
ctx = vfs_context_current();
a.a_context = ctx;
a.a_wql = wql;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
- if ( (_err = lock_fsnode(vp, NULL)) ) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- return (_err);
- }
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) {
- unlock_fsnode(vp, NULL);
- }
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(select, vnode_t, vp);
return (_err);
}
{
int _err;
struct vnop_exchange_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
- vnode_t lock_first = NULL, lock_second = NULL;
-#endif /* __LP64__ */
a.a_desc = &vnop_exchange_desc;
a.a_fvp = fvp;
a.a_options = options;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(fvp);
- if (!thread_safe) {
- /*
- * Lock in vnode address order to avoid deadlocks
- */
- if (fvp < tvp) {
- lock_first = fvp;
- lock_second = tvp;
- } else {
- lock_first = tvp;
- lock_second = fvp;
- }
- if ( (_err = lock_fsnode(lock_first, &funnel_state)) ) {
- return (_err);
- }
- if ( (_err = lock_fsnode(lock_second, NULL)) ) {
- unlock_fsnode(lock_first, &funnel_state);
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(lock_second, NULL);
- unlock_fsnode(lock_first, &funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(exchange, vnode_t, fvp);
/* Don't post NOTE_WRITE because file descriptors follow the data ... */
post_event_if_success(fvp, _err, NOTE_ATTRIB);
{
struct vnop_revoke_args a;
int _err;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_revoke_desc;
a.a_vp = vp;
a.a_flags = flags;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(revoke, vnode_t, vp);
return (_err);
}
{
int _err;
struct vnop_mmap_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_mmap_desc;
a.a_vp = vp;
a.a_fflags = fflags;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(mmap, vnode_t, vp);
return (_err);
}
{
int _err;
struct vnop_mnomap_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_mnomap_desc;
a.a_vp = vp;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(mnomap, vnode_t, vp);
return (_err);
}
{
struct vnop_fsync_args a;
int _err;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_fsync_desc;
a.a_vp = vp;
a.a_waitfor = waitfor;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(fsync, vnode_t, vp);
return (_err);
}
{
int _err;
struct vnop_remove_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_remove_desc;
a.a_dvp = dvp;
a.a_flags = flags;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(dvp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO(remove, vnode_t, vp);
if (_err == 0) {
vnode_setneedinactive(vp);
-
+#if CONFIG_APPLEDOUBLE
if ( !(NATIVE_XATTR(dvp)) ) {
/*
* Remove any associated extended attribute file (._ AppleDouble file).
*/
xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
}
+#endif /* CONFIG_APPLEDOUBLE */
}
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
-
post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
post_event_if_success(dvp, _err, NOTE_WRITE);
a.a_remove_authorizer = vn_authorize_unlink;
_err = (*dvp->v_op[vnop_compound_remove_desc.vdesc_offset])(&a);
+ if (_err == 0 && *vpp) {
+ DTRACE_FSINFO(compound_remove, vnode_t, *vpp);
+ } else {
+ DTRACE_FSINFO(compound_remove, vnode_t, dvp);
+ }
if (_err == 0) {
vnode_setneedinactive(*vpp);
-
+#if CONFIG_APPLEDOUBLE
if ( !(NATIVE_XATTR(dvp)) ) {
/*
* Remove any associated extended attribute file (._ AppleDouble file).
*/
xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1);
}
+#endif /* CONFIG_APPLEDOUBLE */
}
post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
{
int _err;
struct vnop_link_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
+#if CONFIG_APPLEDOUBLE
/*
* For file systems with non-native extended attributes,
* disallow linking to an existing "._" Apple Double file.
return (_err);
}
}
+#endif /* CONFIG_APPLEDOUBLE */
+
a.a_desc = &vnop_link_desc;
a.a_vp = vp;
a.a_tdvp = tdvp;
a.a_cnp = cnp;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(link, vnode_t, vp);
post_event_if_success(vp, _err, NOTE_LINK);
post_event_if_success(tdvp, _err, NOTE_WRITE);
errno_t
vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
- uint32_t flags, vfs_context_t ctx)
+ vfs_rename_flags_t flags, vfs_context_t ctx)
{
int _err;
+ struct nameidata *fromnd = NULL;
+ struct nameidata *tond = NULL;
+#if CONFIG_APPLEDOUBLE
vnode_t src_attr_vp = NULLVP;
vnode_t dst_attr_vp = NULLVP;
- struct nameidata fromnd;
- struct nameidata tond;
char smallname1[48];
char smallname2[48];
char *xfromname = NULL;
char *xtoname = NULL;
+#endif /* CONFIG_APPLEDOUBLE */
int batched;
+ uint32_t tdfflags; // Target directory file flags
batched = vnode_compound_rename_available(fdvp);
-#ifndef __LP64__
- vnode_t fdvp_unsafe = (THREAD_SAFE_FS(fdvp) ? NULLVP : fdvp);
-#endif /* __LP64__ */
-
if (!batched) {
if (*fvpp == NULLVP)
panic("Not batched, and no fvp?");
}
+#if CONFIG_APPLEDOUBLE
/*
* We need to preflight any potential AppleDouble file for the source file
* before doing the rename operation, since we could potentially be doing
* is only for AppleDouble files.
*/
if (xfromname != NULL) {
- NDINIT(&fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK,
+ MALLOC(fromnd, struct nameidata *, sizeof (struct nameidata), M_TEMP, M_WAITOK);
+ NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK,
UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx);
- fromnd.ni_dvp = fdvp;
- error = namei(&fromnd);
+ fromnd->ni_dvp = fdvp;
+ error = namei(fromnd);
/*
* If there was an error looking up source attribute file,
*/
if (error == 0) {
- if (fromnd.ni_vp) {
+ if (fromnd->ni_vp) {
/* src_attr_vp indicates need to call vnode_put / nameidone later */
- src_attr_vp = fromnd.ni_vp;
-
- if (fromnd.ni_vp->v_type != VREG) {
+ src_attr_vp = fromnd->ni_vp;
+
+ if (fromnd->ni_vp->v_type != VREG) {
src_attr_vp = NULLVP;
- vnode_put(fromnd.ni_vp);
+ vnode_put(fromnd->ni_vp);
}
}
/*
* have a vnode here, so we drop our namei buffer for the source attribute file
*/
if (src_attr_vp == NULLVP) {
- nameidone(&fromnd);
+ nameidone(fromnd);
}
}
}
}
+#endif /* CONFIG_APPLEDOUBLE */
if (batched) {
_err = VNOP_COMPOUND_RENAME(fdvp, fvpp, fcnp, fvap, tdvp, tvpp, tcnp, tvap, flags, ctx);
if (_err != 0) {
printf("VNOP_COMPOUND_RENAME() returned %d\n", _err);
}
-
} else {
- _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
+ if (flags) {
+ _err = VNOP_RENAMEX(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, flags, ctx);
+ if (_err == ENOTSUP && flags == VFS_RENAME_SECLUDE) {
+ // Legacy...
+ if ((*fvpp)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_SECLUDE_RENAME) {
+ fcnp->cn_flags |= CN_SECLUDE_RENAME;
+ _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
+ }
+ }
+ } else
+ _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
+ }
+
+ /*
+ * If moved to a new directory that is restricted,
+ * set the restricted flag on the item moved.
+ */
+ if (_err == 0) {
+ _err = vnode_flags(tdvp, &tdfflags, ctx);
+ if (_err == 0 && (tdfflags & SF_RESTRICTED)) {
+ uint32_t fflags;
+ _err = vnode_flags(*fvpp, &fflags, ctx);
+ if (_err == 0 && !(fflags & SF_RESTRICTED)) {
+ struct vnode_attr va;
+ VATTR_INIT(&va);
+ VATTR_SET(&va, va_flags, fflags | SF_RESTRICTED);
+ _err = vnode_setattr(*fvpp, &va, ctx);
+ }
+ }
}
+#if CONFIG_MACF
if (_err == 0) {
mac_vnode_notify_rename(ctx, *fvpp, tdvp, tcnp);
}
+#endif
+#if CONFIG_APPLEDOUBLE
/*
* Rename any associated extended attribute file (._ AppleDouble file).
*/
* Note that tdvp already has an iocount reference. Make sure to check that we
* get a valid vnode from namei.
*/
- NDINIT(&tond, RENAME, OP_RENAME,
+ MALLOC(tond, struct nameidata *, sizeof(struct nameidata), M_TEMP, M_WAITOK);
+ NDINIT(tond, RENAME, OP_RENAME,
NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
CAST_USER_ADDR_T(xtoname), ctx);
- tond.ni_dvp = tdvp;
- error = namei(&tond);
+ tond->ni_dvp = tdvp;
+ error = namei(tond);
if (error)
- goto out;
+ goto ad_error;
- if (tond.ni_vp) {
- dst_attr_vp = tond.ni_vp;
+ if (tond->ni_vp) {
+ dst_attr_vp = tond->ni_vp;
}
if (src_attr_vp) {
+ const char *old_name = src_attr_vp->v_name;
+ vnode_t old_parent = src_attr_vp->v_parent;
+
if (batched) {
- error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd.ni_cnd, NULL,
- tdvp, &dst_attr_vp, &tond.ni_cnd, NULL,
+ error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd->ni_cnd, NULL,
+ tdvp, &dst_attr_vp, &tond->ni_cnd, NULL,
0, ctx);
} else {
- error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd.ni_cnd,
- tdvp, dst_attr_vp, &tond.ni_cnd, ctx);
+ error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd,
+ tdvp, dst_attr_vp, &tond->ni_cnd, ctx);
}
+ if (error == 0 && old_name == src_attr_vp->v_name &&
+ old_parent == src_attr_vp->v_parent) {
+ int update_flags = VNODE_UPDATE_NAME;
+
+ if (fdvp != tdvp)
+ update_flags |= VNODE_UPDATE_PARENT;
+
+ if ((src_attr_vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_NOUPDATEID_RENAME) == 0) {
+ vnode_update_identity(src_attr_vp, tdvp,
+ tond->ni_cnd.cn_nameptr,
+ tond->ni_cnd.cn_namelen,
+ tond->ni_cnd.cn_hash,
+ update_flags);
+ }
+ }
+
/* kevent notifications for moving resource files
* _err is zero if we're here, so no need to notify directories, code
* below will do that. only need to post the rename on the source and
args.a_desc = &vnop_remove_desc;
args.a_dvp = tdvp;
args.a_vp = dst_attr_vp;
- args.a_cnp = &tond.ni_cnd;
+ args.a_cnp = &tond->ni_cnd;
args.a_context = ctx;
-#ifndef __LP64__
- if (fdvp_unsafe != NULLVP)
- error = lock_fsnode(dst_attr_vp, NULL);
-#endif /* __LP64__ */
if (error == 0) {
error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
-#ifndef __LP64__
- if (fdvp_unsafe != NULLVP)
- unlock_fsnode(dst_attr_vp, NULL);
-#endif /* __LP64__ */
-
if (error == 0)
vnode_setneedinactive(dst_attr_vp);
}
post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
}
}
-out:
+ad_error:
if (src_attr_vp) {
vnode_put(src_attr_vp);
- nameidone(&fromnd);
+ nameidone(fromnd);
}
if (dst_attr_vp) {
vnode_put(dst_attr_vp);
- nameidone(&tond);
+ nameidone(tond);
}
-
if (xfromname && xfromname != &smallname1[0]) {
FREE(xfromname, M_TEMP);
}
if (xtoname && xtoname != &smallname2[0]) {
FREE(xtoname, M_TEMP);
}
-
+#endif /* CONFIG_APPLEDOUBLE */
+ if (fromnd) {
+ FREE(fromnd, M_TEMP);
+ }
+ if (tond) {
+ FREE(tond, M_TEMP);
+ }
return _err;
}
vfs_context_t ctx)
{
int _err = 0;
- int events;
struct vnop_rename_args a;
-#ifndef __LP64__
- int funnel_state = 0;
- vnode_t lock_first = NULL, lock_second = NULL;
- vnode_t fdvp_unsafe = NULLVP;
- vnode_t tdvp_unsafe = NULLVP;
-#endif /* __LP64__ */
a.a_desc = &vnop_rename_desc;
a.a_fdvp = fdvp;
a.a_tcnp = tcnp;
a.a_context = ctx;
-#ifndef __LP64__
- if (!THREAD_SAFE_FS(fdvp))
- fdvp_unsafe = fdvp;
- if (!THREAD_SAFE_FS(tdvp))
- tdvp_unsafe = tdvp;
+ /* do the rename of the main file. */
+ _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO(rename, vnode_t, fdvp);
- if (fdvp_unsafe != NULLVP) {
- /*
- * Lock parents in vnode address order to avoid deadlocks
- * note that it's possible for the fdvp to be unsafe,
- * but the tdvp to be safe because tvp could be a directory
- * in the root of a filesystem... in that case, tdvp is the
- * in the filesystem that this root is mounted on
- */
- if (tdvp_unsafe == NULL || fdvp_unsafe == tdvp_unsafe) {
- lock_first = fdvp_unsafe;
- lock_second = NULL;
- } else if (fdvp_unsafe < tdvp_unsafe) {
- lock_first = fdvp_unsafe;
- lock_second = tdvp_unsafe;
- } else {
- lock_first = tdvp_unsafe;
- lock_second = fdvp_unsafe;
- }
- if ( (_err = lock_fsnode(lock_first, &funnel_state)) )
- return (_err);
+ if (_err)
+ return _err;
- if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
- unlock_fsnode(lock_first, &funnel_state);
- return (_err);
- }
+ return post_rename(fdvp, fvp, tdvp, tvp);
+}
- /*
- * Lock both children in vnode address order to avoid deadlocks
- */
- if (tvp == NULL || tvp == fvp) {
- lock_first = fvp;
- lock_second = NULL;
- } else if (fvp < tvp) {
- lock_first = fvp;
- lock_second = tvp;
- } else {
- lock_first = tvp;
- lock_second = fvp;
- }
- if ( (_err = lock_fsnode(lock_first, NULL)) )
- goto out1;
+static errno_t
+post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp)
+{
+ if (tvp && tvp != fvp)
+ vnode_setneedinactive(tvp);
- if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) {
- unlock_fsnode(lock_first, NULL);
- goto out1;
+ /* Wrote at least one directory. If transplanted a dir, also changed link counts */
+ int events = NOTE_WRITE;
+ if (vnode_isdir(fvp)) {
+ /* Link count on dir changed only if we are moving a dir and...
+ * --Moved to new dir, not overwriting there
+ * --Kept in same dir and DID overwrite
+ */
+ if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
+ events |= NOTE_LINK;
}
}
-#endif /* __LP64__ */
-
- /* do the rename of the main file. */
- _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
-#ifndef __LP64__
- if (fdvp_unsafe != NULLVP) {
- if (lock_second != NULL)
- unlock_fsnode(lock_second, NULL);
- unlock_fsnode(lock_first, NULL);
+ lock_vnode_and_post(fdvp, events);
+ if (fdvp != tdvp) {
+ lock_vnode_and_post(tdvp, events);
}
-#endif /* __LP64__ */
- if (_err == 0) {
- if (tvp && tvp != fvp)
- vnode_setneedinactive(tvp);
+ /* If you're replacing the target, post a deletion for it */
+ if (tvp)
+ {
+ lock_vnode_and_post(tvp, NOTE_DELETE);
}
-#ifndef __LP64__
-out1:
- if (fdvp_unsafe != NULLVP) {
- if (tdvp_unsafe != NULLVP)
- unlock_fsnode(tdvp_unsafe, NULL);
- unlock_fsnode(fdvp_unsafe, &funnel_state);
- }
-#endif /* __LP64__ */
+ lock_vnode_and_post(fvp, NOTE_RENAME);
- /* Wrote at least one directory. If transplanted a dir, also changed link counts */
- if (0 == _err) {
- events = NOTE_WRITE;
- if (vnode_isdir(fvp)) {
- /* Link count on dir changed only if we are moving a dir and...
- * --Moved to new dir, not overwriting there
- * --Kept in same dir and DID overwrite
- */
- if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
- events |= NOTE_LINK;
- }
- }
+ return 0;
+}
- lock_vnode_and_post(fdvp, events);
- if (fdvp != tdvp) {
- lock_vnode_and_post(tdvp, events);
- }
+#if 0
+/*
+ *#
+ *#% renamex fdvp U U U
+ *#% renamex fvp U U U
+ *#% renamex tdvp L U U
+ *#% renamex tvp X U U
+ *#
+ */
+struct vnop_renamex_args {
+ struct vnodeop_desc *a_desc;
+ vnode_t a_fdvp;
+ vnode_t a_fvp;
+ struct componentname *a_fcnp;
+ vnode_t a_tdvp;
+ vnode_t a_tvp;
+ struct componentname *a_tcnp;
+ vfs_rename_flags_t a_flags;
+ vfs_context_t a_context;
+};
+#endif /* 0*/
+errno_t
+VNOP_RENAMEX(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
+ struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
+ vfs_rename_flags_t flags, vfs_context_t ctx)
+{
+ int _err = 0;
+ struct vnop_renamex_args a;
- /* If you're replacing the target, post a deletion for it */
- if (tvp)
- {
- lock_vnode_and_post(tvp, NOTE_DELETE);
- }
+ a.a_desc = &vnop_renamex_desc;
+ a.a_fdvp = fdvp;
+ a.a_fvp = fvp;
+ a.a_fcnp = fcnp;
+ a.a_tdvp = tdvp;
+ a.a_tvp = tvp;
+ a.a_tcnp = tcnp;
+ a.a_flags = flags;
+ a.a_context = ctx;
- lock_vnode_and_post(fvp, NOTE_RENAME);
- }
+ /* do the rename of the main file. */
+ _err = (*fdvp->v_op[vnop_renamex_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO(renamex, vnode_t, fdvp);
- return (_err);
+ if (_err)
+ return _err;
+
+ return post_rename(fdvp, fvp, tdvp, tvp);
}
+
int
VNOP_COMPOUND_RENAME(
struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
/* do the rename of the main file. */
_err = (*fdvp->v_op[vnop_compound_rename_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO(compound_rename, vnode_t, fdvp);
if (_err == 0) {
if (*tvpp && *tvpp != *fvpp)
}
/* Wrote at least one directory. If transplanted a dir, also changed link counts */
- if (0 == _err && *fvpp != *tvpp) {
+ if (_err == 0 && *fvpp != *tvpp) {
if (!*fvpp) {
panic("No fvpp after compound rename?");
}
}
}
- #if 0
+#if 0
/*
*#
*#% mkdir dvp L U U
{
int _err;
struct vnop_mkdir_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_mkdir_desc;
a.a_dvp = dvp;
a.a_vap = vap;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(dvp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
+ if (_err == 0 && *vpp) {
+ DTRACE_FSINFO(mkdir, vnode_t, *vpp);
+ }
+#if CONFIG_APPLEDOUBLE
if (_err == 0 && !NATIVE_XATTR(dvp)) {
/*
* Remove stale Apple Double file (if any).
*/
xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
}
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(dvp, &funnel_state);
- }
-#endif /* __LP64__ */
+#endif /* CONFIG_APPLEDOUBLE */
post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
a.a_reserved = NULL;
_err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a);
+ if (_err == 0 && *vpp) {
+ DTRACE_FSINFO(compound_mkdir, vnode_t, *vpp);
+ }
+#if CONFIG_APPLEDOUBLE
if (_err == 0 && !NATIVE_XATTR(dvp)) {
/*
* Remove stale Apple Double file (if any).
*/
xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
}
+#endif /* CONFIG_APPLEDOUBLE */
post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
{
int _err;
struct vnop_rmdir_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_rmdir_desc;
a.a_dvp = dvp;
a.a_cnp = cnp;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(dvp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO(rmdir, vnode_t, vp);
if (_err == 0) {
vnode_setneedinactive(vp);
-
+#if CONFIG_APPLEDOUBLE
if ( !(NATIVE_XATTR(dvp)) ) {
/*
* Remove any associated extended attribute file (._ AppleDouble file).
*/
xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
}
+#endif
}
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
-
/* If you delete a dir, it loses its "." reference --> NOTE_LINK */
post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
no_vp = (*vpp == NULLVP);
_err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a);
+ if (_err == 0 && *vpp) {
+ DTRACE_FSINFO(compound_rmdir, vnode_t, *vpp);
+ }
+#if CONFIG_APPLEDOUBLE
if (_err == 0 && !NATIVE_XATTR(dvp)) {
/*
* Remove stale Apple Double file (if any).
*/
xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
}
+#endif
if (*vpp) {
post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
return (_err);
}
+#if CONFIG_APPLEDOUBLE
/*
* Remove a ._ AppleDouble file
*/
nameidone(&nd);
if (xvp->v_type == VREG) {
-#ifndef __LP64__
- int thread_safe = THREAD_SAFE_FS(dvp);
-#endif /* __LP64__ */
struct vnop_setattr_args a;
a.a_desc = &vnop_setattr_desc;
a.a_vap = vap;
a.a_context = ctx;
-#ifndef __LP64__
- if (!thread_safe) {
- if ( (lock_fsnode(xvp, NULL)) )
- goto out1;
- }
-#endif /* __LP64__ */
-
(void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(xvp, NULL);
- }
-#endif /* __LP64__ */
}
-
-#ifndef __LP64__
-out1:
-#endif /* __LP64__ */
vnode_put(xvp);
-
out2:
if (filename && filename != &smallname[0]) {
FREE(filename, M_TEMP);
}
}
+#endif /* CONFIG_APPLEDOUBLE */
#if 0
/*
{
int _err;
struct vnop_symlink_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_symlink_desc;
a.a_dvp = dvp;
a.a_target = target;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(dvp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(dvp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO(symlink, vnode_t, dvp);
+#if CONFIG_APPLEDOUBLE
if (_err == 0 && !NATIVE_XATTR(dvp)) {
/*
* Remove stale Apple Double file (if any). Posts its own knotes
*/
xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
}
+#endif /* CONFIG_APPLEDOUBLE */
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(dvp, &funnel_state);
- }
-#endif /* __LP64__ */
-
post_event_if_success(dvp, _err, NOTE_WRITE);
return (_err);
{
int _err;
struct vnop_readdir_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
+#if CONFIG_DTRACE
+ user_ssize_t resid = uio_resid(uio);
+#endif
a.a_desc = &vnop_readdir_desc;
a.a_vp = vp;
a.a_eofflag = eofflag;
a.a_numdirent = numdirent;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
-
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
_err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO_IO(readdir,
+ vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
return (_err);
}
{
int _err;
struct vnop_readdirattr_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
+#if CONFIG_DTRACE
+ user_ssize_t resid = uio_resid(uio);
+#endif
a.a_desc = &vnop_readdirattr_desc;
a.a_vp = vp;
a.a_actualcount = actualcount;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO_IO(readdirattr,
+ vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+ return (_err);
+}
+
+#if 0
+struct vnop_getttrlistbulk_args {
+ struct vnodeop_desc *a_desc;
+ vnode_t a_vp;
+ struct attrlist *a_alist;
+ struct vnode_attr *a_vap;
+ struct uio *a_uio;
+ void *a_private
+ uint64_t a_options;
+ int *a_eofflag;
+ uint32_t *a_actualcount;
+ vfs_context_t a_context;
+};
+#endif /* 0*/
+errno_t
+VNOP_GETATTRLISTBULK(struct vnode *vp, struct attrlist *alist,
+ struct vnode_attr *vap, struct uio *uio, void *private, uint64_t options,
+ int32_t *eofflag, int32_t *actualcount, vfs_context_t ctx)
+{
+ int _err;
+ struct vnop_getattrlistbulk_args a;
+#if CONFIG_DTRACE
+ user_ssize_t resid = uio_resid(uio);
+#endif
+
+ a.a_desc = &vnop_getattrlistbulk_desc;
+ a.a_vp = vp;
+ a.a_alist = alist;
+ a.a_vap = vap;
+ a.a_uio = uio;
+ a.a_private = private;
+ a.a_options = options;
+ a.a_eofflag = eofflag;
+ a.a_actualcount = actualcount;
+ a.a_context = ctx;
+
+ _err = (*vp->v_op[vnop_getattrlistbulk_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO_IO(getattrlistbulk,
+ vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
return (_err);
}
{
int _err;
struct vnop_readlink_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
-
+#if CONFIG_DTRACE
+ user_ssize_t resid = uio_resid(uio);
+#endif
a.a_desc = &vnop_readlink_desc;
a.a_vp = vp;
a.a_uio = uio;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO_IO(readlink,
+ vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
return (_err);
}
{
int _err;
struct vnop_inactive_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_inactive_desc;
a.a_vp = vp;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(inactive, vnode_t, vp);
#if NAMEDSTREAMS
/* For file systems that do not support namedstream natively, mark
{
int _err;
struct vnop_reclaim_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_reclaim_desc;
a.a_vp = vp;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(reclaim, vnode_t, vp);
return (_err);
}
{
int _err;
struct vnop_pathconf_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_pathconf_desc;
a.a_vp = vp;
a.a_retval = retval;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(pathconf, vnode_t, vp);
return (_err);
}
};
#endif /* 0*/
errno_t
-VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx)
+VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx, struct timespec *timeout)
{
int _err;
struct vnop_advlock_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_advlock_desc;
a.a_vp = vp;
a.a_fl = fl;
a.a_flags = flags;
a.a_context = ctx;
-
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
+ a.a_timeout = timeout;
/* Disallow advisory locking on non-seekable vnodes */
if (vnode_isfifo(vp)) {
if ((vp->v_flag & VLOCKLOCAL)) {
/* Advisory locking done at this layer */
_err = lf_advlock(&a);
+ } else if (flags & F_OFD_LOCK) {
+ /* Non-local locking doesn't work for OFD locks */
+ _err = err_advlock(&a);
} else {
/* Advisory locking done by underlying filesystem */
_err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
}
+ DTRACE_FSINFO(advlock, vnode_t, vp);
+ if (op == F_UNLCK && flags == F_FLOCK)
+ post_event_if_success(vp, _err, NOTE_FUNLOCK);
}
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
-
return (_err);
}
{
int _err;
struct vnop_allocate_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_allocate_desc;
a.a_vp = vp;
a.a_offset = offset;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO(allocate, vnode_t, vp);
#if CONFIG_FSE
if (_err == 0) {
add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
}
#endif
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
-
return (_err);
}
{
int _err;
struct vnop_pagein_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_pagein_desc;
a.a_vp = vp;
a.a_flags = flags;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO(pagein, vnode_t, vp);
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
-
return (_err);
}
{
int _err;
struct vnop_pageout_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_pageout_desc;
a.a_vp = vp;
a.a_flags = flags;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(pageout, vnode_t, vp);
post_event_if_success(vp, _err, NOTE_WRITE);
}
}
+#if CONFIG_SEARCHFS
#if 0
/*
{
int _err;
struct vnop_searchfs_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_searchfs_desc;
a.a_vp = vp;
a.a_searchstate = searchstate;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(searchfs, vnode_t, vp);
return (_err);
}
+#endif /* CONFIG_SEARCHFS */
#if 0
/*
a.a_flags = flags;
a.a_context = ctx;
_err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO(copyfile, vnode_t, fvp);
+ return (_err);
+}
+
+#if 0
+struct vnop_clonefile_args {
+ struct vnodeop_desc *a_desc;
+ vnode_t a_fvp;
+ vnode_t a_dvp;
+ vnode_t *a_vpp;
+ struct componentname *a_cnp;
+ struct vnode_attr *a_vap;
+ uint32_t a_flags;
+ vfs_context_t a_context;
+ int (*a_dir_clone_authorizer)( /* Authorization callback */
+ struct vnode_attr *vap, /* attribute to be authorized */
+ kauth_action_t action, /* action for which attribute is to be authorized */
+ struct vnode_attr *dvap, /* target directory attributes */
+ vnode_t sdvp, /* source directory vnode pointer (optional) */
+ mount_t mp, /* mount point of filesystem */
+ dir_clone_authorizer_op_t vattr_op, /* specific operation requested : setup, authorization or cleanup */
+ vfs_context_t ctx, /* As passed to VNOP */
+ void *reserved); /* Always NULL */
+ void *a_reserved; /* Currently unused */
+};
+#endif /* 0 */
+
+errno_t
+VNOP_CLONEFILE(vnode_t fvp, vnode_t dvp, vnode_t *vpp,
+ struct componentname *cnp, struct vnode_attr *vap, uint32_t flags,
+ vfs_context_t ctx)
+{
+ int _err;
+ struct vnop_clonefile_args a;
+ a.a_desc = &vnop_clonefile_desc;
+ a.a_fvp = fvp;
+ a.a_dvp = dvp;
+ a.a_vpp = vpp;
+ a.a_cnp = cnp;
+ a.a_vap = vap;
+ a.a_flags = flags;
+ a.a_context = ctx;
+
+ if (vnode_vtype(fvp) == VDIR)
+ a.a_dir_clone_authorizer = vnode_attr_authorize_dir_clone;
+ else
+ a.a_dir_clone_authorizer = NULL;
+
+ _err = (*dvp->v_op[vnop_clonefile_desc.vdesc_offset])(&a);
+
+ if (_err == 0 && *vpp)
+ DTRACE_FSINFO(clonefile, vnode_t, *vpp);
+
+ post_event_if_success(dvp, _err, NOTE_WRITE);
+
return (_err);
}
{
struct vnop_getxattr_args a;
int error;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_getxattr_desc;
a.a_vp = vp;
a.a_options = options;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (error = lock_fsnode(vp, &funnel_state)) ) {
- return (error);
- }
- }
-#endif /* __LP64__ */
-
error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(getxattr, vnode_t, vp);
return (error);
}
{
struct vnop_setxattr_args a;
int error;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_setxattr_desc;
a.a_vp = vp;
a.a_options = options;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (error = lock_fsnode(vp, &funnel_state)) ) {
- return (error);
- }
- }
-#endif /* __LP64__ */
-
error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(setxattr, vnode_t, vp);
if (error == 0)
vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
{
struct vnop_removexattr_args a;
int error;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_removexattr_desc;
a.a_vp = vp;
a.a_options = options;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (error = lock_fsnode(vp, &funnel_state)) ) {
- return (error);
- }
- }
-#endif /* __LP64__ */
-
error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(removexattr, vnode_t, vp);
post_event_if_success(vp, error, NOTE_ATTRIB);
{
struct vnop_listxattr_args a;
int error;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_listxattr_desc;
a.a_vp = vp;
a.a_options = options;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (error = lock_fsnode(vp, &funnel_state)) ) {
- return (error);
- }
- }
-#endif /* __LP64__ */
-
error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(listxattr, vnode_t, vp);
return (error);
}
{
int _err;
struct vnop_blktooff_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_blktooff_desc;
a.a_vp = vp;
a.a_lblkno = lblkno;
a.a_offset = offset;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(blktooff, vnode_t, vp);
return (_err);
}
{
int _err;
struct vnop_offtoblk_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = &vnop_offtoblk_desc;
a.a_vp = vp;
a.a_offset = offset;
a.a_lblkno = lblkno;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(offtoblk, vnode_t, vp);
return (_err);
}
{
int _err;
struct vnop_blockmap_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
+ size_t localrun = 0;
if (ctx == NULL) {
ctx = vfs_context_current();
a.a_foffset = foffset;
a.a_size = size;
a.a_bpn = bpn;
- a.a_run = run;
+ a.a_run = &localrun;
a.a_poff = poff;
a.a_flags = flags;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO(blockmap, vnode_t, vp);
+
+ /*
+ * We used a local variable to request information from the underlying
+ * filesystem about the length of the I/O run in question. If
+ * we get malformed output from the filesystem, we cap it to the length
+ * requested, at most. Update 'run' on the way out.
+ */
+ if (_err == 0) {
+ if (localrun > size) {
+ localrun = size;
+ }
-#ifndef __LP64__
- if (!thread_safe) {
- (void) thread_funnel_set(kernel_flock, funnel_state);
+ if (run) {
+ *run = localrun;
+ }
}
-#endif /* __LP64__ */
return (_err);
}
{
int _err;
struct vnop_strategy_args a;
+ vnode_t vp = buf_vnode(bp);
a.a_desc = &vnop_strategy_desc;
a.a_bp = bp;
- _err = (*buf_vnode(bp)->v_op[vnop_strategy_desc.vdesc_offset])(&a);
+ _err = (*vp->v_op[vnop_strategy_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO(strategy, vnode_t, vp);
return (_err);
}
{
int _err;
struct vnop_bwrite_args a;
+ vnode_t vp = buf_vnode(bp);
a.a_desc = &vnop_bwrite_desc;
a.a_bp = bp;
- _err = (*buf_vnode(bp)->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
+ _err = (*vp->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO(bwrite, vnode_t, vp);
return (_err);
}
{
int _err;
struct vnop_kqfilt_add_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = VDESC(vnop_kqfilt_add);
a.a_vp = vp;
a.a_kn = kn;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO(kqfilt_add, vnode_t, vp);
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
-
return(_err);
}
{
int _err;
struct vnop_kqfilt_remove_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = VDESC(vnop_kqfilt_remove);
a.a_vp = vp;
a.a_ident = ident;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(kqfilt_remove, vnode_t, vp);
return(_err);
}
{
int _err;
struct vnop_monitor_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = VDESC(vnop_monitor);
a.a_vp = vp;
a.a_handle = handle;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(monitor, vnode_t, vp);
return(_err);
}
{
int _err;
struct vnop_setlabel_args a;
-#ifndef __LP64__
- int thread_safe;
- int funnel_state = 0;
-#endif /* __LP64__ */
a.a_desc = VDESC(vnop_setlabel);
a.a_vp = vp;
a.a_vl = label;
a.a_context = ctx;
-#ifndef __LP64__
- thread_safe = THREAD_SAFE_FS(vp);
- if (!thread_safe) {
- if ( (_err = lock_fsnode(vp, &funnel_state)) ) {
- return (_err);
- }
- }
-#endif /* __LP64__ */
-
_err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
-
-#ifndef __LP64__
- if (!thread_safe) {
- unlock_fsnode(vp, &funnel_state);
- }
-#endif /* __LP64__ */
+ DTRACE_FSINFO(setlabel, vnode_t, vp);
return(_err);
}
errno_t
VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
{
+ int _err;
struct vnop_getnamedstream_args a;
-#ifndef __LP64__
- if (!THREAD_SAFE_FS(vp))
- return (ENOTSUP);
-#endif /* __LP64__ */
-
a.a_desc = &vnop_getnamedstream_desc;
a.a_vp = vp;
a.a_svpp = svpp;
a.a_flags = flags;
a.a_context = ctx;
- return (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
+ _err = (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO(getnamedstream, vnode_t, vp);
+ return (_err);
}
/*
errno_t
VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
{
+ int _err;
struct vnop_makenamedstream_args a;
-#ifndef __LP64__
- if (!THREAD_SAFE_FS(vp))
- return (ENOTSUP);
-#endif /* __LP64__ */
-
a.a_desc = &vnop_makenamedstream_desc;
a.a_vp = vp;
a.a_svpp = svpp;
a.a_flags = flags;
a.a_context = ctx;
- return (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
+ _err = (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO(makenamedstream, vnode_t, vp);
+ return (_err);
}
errno_t
VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
{
+ int _err;
struct vnop_removenamedstream_args a;
-#ifndef __LP64__
- if (!THREAD_SAFE_FS(vp))
- return (ENOTSUP);
-#endif /* __LP64__ */
-
a.a_desc = &vnop_removenamedstream_desc;
a.a_vp = vp;
a.a_svp = svp;
a.a_flags = flags;
a.a_context = ctx;
- return (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
+ _err = (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
+ DTRACE_FSINFO(removenamedstream, vnode_t, vp);
+ return (_err);
}
#endif