X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/c910b4d9d2451126ae3917b931cd4390c11e1d52..fe8ab488e9161c46dd9885d58fc52996dc0249ff:/bsd/vfs/kpi_vfs.c diff --git a/bsd/vfs/kpi_vfs.c b/bsd/vfs/kpi_vfs.c index 44c482c8f..1a71f6b69 100644 --- a/bsd/vfs/kpi_vfs.c +++ b/bsd/vfs/kpi_vfs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2008 Apple Inc. All rights reserved. + * Copyright (c) 2000-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -98,6 +98,7 @@ #include #include #include +#include #include #include #include @@ -119,27 +120,40 @@ #include #endif +#include + #define ESUCCESS 0 #undef mount_t #undef vnode_t #define COMPAT_ONLY - -#define THREAD_SAFE_FS(VP) \ - ((VP)->v_unsafefs ? 0 : 1) - #define NATIVE_XATTR(VP) \ ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0) +#if CONFIG_APPLEDOUBLE static void xattrfile_remove(vnode_t dvp, const char *basename, - vfs_context_t ctx, int thread_safe, int force); + vfs_context_t ctx, int force); static void xattrfile_setattr(vnode_t dvp, const char * basename, - struct vnode_attr * vap, vfs_context_t ctx, - int thread_safe); - + struct vnode_attr * vap, vfs_context_t ctx); +#endif /* CONFIG_APPLEDOUBLE */ -static void +/* + * vnode_setneedinactive + * + * Description: Indicate that when the last iocount on this vnode goes away, + * and the usecount is also zero, we should inform the filesystem + * via VNOP_INACTIVE. + * + * Parameters: vnode_t vnode to mark + * + * Returns: Nothing + * + * Notes: Notably used when we're deleting a file--we need not have a + * usecount, so VNOP_INACTIVE may not get called by anyone. We + * want it called when we drop our iocount. + */ +void vnode_setneedinactive(vnode_t vp) { cache_purge(vp); @@ -150,72 +164,21 @@ vnode_setneedinactive(vnode_t vp) } -int -lock_fsnode(vnode_t vp, int *funnel_state) -{ - if (funnel_state) - *funnel_state = thread_funnel_set(kernel_flock, TRUE); - - if (vp->v_unsafefs) { - if (vp->v_unsafefs->fsnodeowner == current_thread()) { - vp->v_unsafefs->fsnode_count++; - } else { - lck_mtx_lock(&vp->v_unsafefs->fsnodelock); - - if (vp->v_lflag & (VL_TERMWANT | VL_TERMINATE | VL_DEAD)) { - lck_mtx_unlock(&vp->v_unsafefs->fsnodelock); - - if (funnel_state) - (void) thread_funnel_set(kernel_flock, *funnel_state); - return (ENOENT); - } - vp->v_unsafefs->fsnodeowner = current_thread(); - vp->v_unsafefs->fsnode_count = 1; - } - } - return (0); -} - - -void -unlock_fsnode(vnode_t vp, int *funnel_state) -{ - if (vp->v_unsafefs) { - if (--vp->v_unsafefs->fsnode_count == 0) { - vp->v_unsafefs->fsnodeowner = NULL; - lck_mtx_unlock(&vp->v_unsafefs->fsnodelock); - } - } - if (funnel_state) - (void) thread_funnel_set(kernel_flock, *funnel_state); -} - - - /* ====================================================================== */ /* ************ EXTERNAL KERNEL APIS ********************************** */ /* ====================================================================== */ /* - * prototypes for exported VFS operations + * implementations of exported VFS operations */ int VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx) { int error; - int thread_safe; - int funnel_state = 0; if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0)) return(ENOTSUP); - thread_safe = mp->mnt_vtable->vfc_threadsafe; - - - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } - if (vfs_context_is64bit(ctx)) { if (vfs_64bitready(mp)) { error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx); @@ -228,9 +191,6 @@ VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx) error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx); } - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); - } return (error); } @@ -238,21 +198,12 @@ int VFS_START(mount_t mp, int flags, vfs_context_t ctx) { int error; - int thread_safe; - int funnel_state = 0; if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0)) return(ENOTSUP); - thread_safe = mp->mnt_vtable->vfc_threadsafe; - - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } error = (*mp->mnt_op->vfs_start)(mp, flags, ctx); - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); - } + return (error); } @@ -260,21 +211,12 @@ int VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx) { int error; - int thread_safe; - int funnel_state = 0; if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0)) return(ENOTSUP); - thread_safe = mp->mnt_vtable->vfc_threadsafe; - - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx); - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); - } + return (error); } @@ -299,8 +241,6 @@ int VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx) { int error; - int thread_safe; - int funnel_state = 0; if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0)) return(ENOTSUP); @@ -308,15 +248,9 @@ VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx) if (ctx == NULL) { ctx = vfs_context_current(); } - thread_safe = mp->mnt_vtable->vfc_threadsafe; - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx); - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); - } + return (error); } @@ -324,21 +258,12 @@ int VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx) { int error; - int thread_safe; - int funnel_state = 0; if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0)) return(ENOTSUP); - thread_safe = mp->mnt_vtable->vfc_threadsafe; - - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx); - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); - } + return (error); } @@ -346,8 +271,6 @@ int VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx) { int error; - int thread_safe; - int funnel_state = 0; if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0)) return(ENOTSUP); @@ -356,15 +279,8 @@ VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx) ctx = vfs_context_current(); } - thread_safe = mp->mnt_vtable->vfc_threadsafe; - - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx); - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); - } + return(error); } @@ -372,8 +288,6 @@ int VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx) { int error; - int thread_safe; - int funnel_state = 0; if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0)) return(ENOTSUP); @@ -382,15 +296,8 @@ VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx) ctx = vfs_context_current(); } - thread_safe = mp->mnt_vtable->vfc_threadsafe; - - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx); - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); - } + return(error); } @@ -398,8 +305,6 @@ int VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx) { int error; - int thread_safe; - int funnel_state = 0; if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0)) return(ENOTSUP); @@ -407,15 +312,9 @@ VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx) if (ctx == NULL) { ctx = vfs_context_current(); } - thread_safe = mp->mnt_vtable->vfc_threadsafe; - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx); - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); - } + return(error); } @@ -423,8 +322,6 @@ int VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx) { int error; - int thread_safe; - int funnel_state = 0; if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0)) return(ENOTSUP); @@ -432,15 +329,9 @@ VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx) if (ctx == NULL) { ctx = vfs_context_current(); } - thread_safe = mp->mnt_vtable->vfc_threadsafe; - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx); - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); - } + return(error); } @@ -448,8 +339,6 @@ int VFS_FHTOVP(mount_t mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs_context_t ctx) { int error; - int thread_safe; - int funnel_state = 0; if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0)) return(ENOTSUP); @@ -457,15 +346,9 @@ VFS_FHTOVP(mount_t mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs_contex if (ctx == NULL) { ctx = vfs_context_current(); } - thread_safe = mp->mnt_vtable->vfc_threadsafe; - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx); - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); - } + return(error); } @@ -473,8 +356,6 @@ int VFS_VPTOFH(struct vnode * vp, int *fhlenp, unsigned char * fhp, vfs_context_t ctx) { int error; - int thread_safe; - int funnel_state = 0; if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0)) return(ENOTSUP); @@ -482,19 +363,20 @@ VFS_VPTOFH(struct vnode * vp, int *fhlenp, unsigned char * fhp, vfs_context_t ct if (ctx == NULL) { ctx = vfs_context_current(); } - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx); - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); - } + return(error); } +/* returns the cached throttle mask for the mount_t */ +uint64_t +vfs_throttle_mask(mount_t mp) +{ + return(mp->mnt_throttle_mask); +} + /* returns a copy of vfs type name for the mount_t */ void vfs_name(mount_t mp, char * buffer) @@ -509,6 +391,12 @@ vfs_typenum(mount_t mp) return(mp->mnt_vtable->vfc_typenum); } +/* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */ +void* +vfs_mntlabel(mount_t mp) +{ + return (void*)mp->mnt_mntlabel; +} /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */ uint64_t @@ -584,20 +472,30 @@ vfs_isreload(mount_t mp) return ((mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD)); } -/* Is mount_t marked for reload (ie MNT_FORCE) */ +/* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */ int vfs_isforce(mount_t mp) { - if ((mp->mnt_lflag & MNT_LFORCE) || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT)) + if (mp->mnt_lflag & MNT_LFORCE) return(1); else return(0); } +int +vfs_isunmount(mount_t mp) +{ + if ((mp->mnt_lflag & MNT_LUNMOUNT)) { + return 1; + } else { + return 0; + } +} + int vfs_64bitready(mount_t mp) { - if ((mp->mnt_vtable->vfc_64bitready)) + if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) return(1); else return(0); @@ -635,17 +533,6 @@ vfs_clearauthcache_ttl(mount_t mp) mount_unlock(mp); } -void -vfs_markdependency(mount_t mp) -{ - proc_t p = current_proc(); - mount_lock(mp); - mp->mnt_dependent_process = p; - mp->mnt_dependent_pid = proc_pid(p); - mount_unlock(mp); -} - - int vfs_authopaque(mount_t mp) { @@ -792,7 +679,12 @@ vfs_setfsprivate(mount_t mp, void *mntdata) mount_unlock(mp); } - +/* query whether the mount point supports native EAs */ +int +vfs_nativexattrs(mount_t mp) { + return (mp->mnt_kern_flag & MNTK_EXTENDED_ATTRS); +} + /* * return the block size of the underlying * device associated with mount_t @@ -803,6 +695,40 @@ vfs_devblocksize(mount_t mp) { return(mp->mnt_devblocksize); } +/* + * Returns vnode with an iocount that must be released with vnode_put() + */ +vnode_t +vfs_vnodecovered(mount_t mp) +{ + vnode_t vp = mp->mnt_vnodecovered; + if ((vp == NULL) || (vnode_getwithref(vp) != 0)) { + return NULL; + } else { + return vp; + } +} + +/* + * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists). + * The iocount must be released with vnode_put(). Note that this KPI is subtle + * with respect to the validity of using this device vnode for anything substantial + * (which is discouraged). If commands are sent to the device driver without + * taking proper steps to ensure that the device is still open, chaos may ensue. + * Similarly, this routine should only be called if there is some guarantee that + * the mount itself is still valid. + */ +vnode_t +vfs_devvp(mount_t mp) +{ + vnode_t vp = mp->mnt_devvp; + + if ((vp != NULLVP) && (vnode_get(vp) == 0)) { + return vp; + } + + return NULLVP; +} /* * return the io attributes associated with mount_t @@ -863,7 +789,6 @@ extern int vfs_opv_numops; errno_t vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle) { -#pragma unused(data) struct vfstable *newvfstbl = NULL; int i,j; int (***opv_desc_vector_p)(void *); @@ -882,10 +807,14 @@ vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle) return(EINVAL); desccount = vfe->vfe_vopcnt; - if ((desccount <=0) || ((desccount > 5)) || (vfe->vfe_vfsops == (struct vfsops *)NULL) + if ((desccount <=0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL) || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL)) return(EINVAL); + /* Non-threadsafe filesystems are not supported */ + if ((vfe->vfe_flags & (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) { + return (EINVAL); + } MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP, M_WAITOK); @@ -893,7 +822,7 @@ vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle) newvfstbl->vfc_vfsops = vfe->vfe_vfsops; strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN); if ((vfe->vfe_flags & VFS_TBLNOTYPENUM)) - newvfstbl->vfc_typenum = maxvfsconf++; + newvfstbl->vfc_typenum = maxvfstypenum++; else newvfstbl->vfc_typenum = vfe->vfe_fstypenum; @@ -901,14 +830,13 @@ vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle) newvfstbl->vfc_flags = 0; newvfstbl->vfc_mountroot = NULL; newvfstbl->vfc_next = NULL; - newvfstbl->vfc_threadsafe = 0; newvfstbl->vfc_vfsflags = 0; if (vfe->vfe_flags & VFS_TBL64BITREADY) - newvfstbl->vfc_64bitready= 1; - if (vfe->vfe_flags & VFS_TBLTHREADSAFE) - newvfstbl->vfc_threadsafe= 1; - if (vfe->vfe_flags & VFS_TBLFSNODELOCK) - newvfstbl->vfc_threadsafe= 1; + newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY; + if (vfe->vfe_flags & VFS_TBLVNOP_PAGEINV2) + newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2; + if (vfe->vfe_flags & VFS_TBLVNOP_PAGEOUTV2) + newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2; if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL) newvfstbl->vfc_flags |= MNT_LOCAL; if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0) @@ -924,6 +852,8 @@ vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle) newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED; if (vfe->vfe_flags & VFS_TBLNOMACLABEL) newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL; + if (vfe->vfe_flags & VFS_TBLVNOP_NOUPDATEID_RENAME) + newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_NOUPDATEID_RENAME; /* * Allocate and init the vectors. @@ -942,6 +872,7 @@ vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle) newvfstbl->vfc_descptr = descptr; newvfstbl->vfc_descsize = descsize; + newvfstbl->vfc_sysctl = NULL; for (i= 0; i< desccount; i++ ) { opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p; @@ -958,7 +889,7 @@ vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle) /* * Sanity check: is this operation listed * in the list of operations? We check this - * by seeing if its offest is zero. Since + * by seeing if its offset is zero. Since * the default routine should always be listed * first, it should be the only one with a zero * offset. Any other operation with a zero @@ -1011,12 +942,22 @@ vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle) *handle = vfstable_add(newvfstbl); - if (newvfstbl->vfc_typenum <= maxvfsconf ) - maxvfsconf = newvfstbl->vfc_typenum + 1; - numused_vfsslots++; + if (newvfstbl->vfc_typenum <= maxvfstypenum ) + maxvfstypenum = newvfstbl->vfc_typenum + 1; + + if (newvfstbl->vfc_vfsops->vfs_init) { + struct vfsconf vfsc; + bzero(&vfsc, sizeof(struct vfsconf)); + vfsc.vfc_reserved1 = 0; + bcopy((*handle)->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name)); + vfsc.vfc_typenum = (*handle)->vfc_typenum; + vfsc.vfc_refcount = (*handle)->vfc_refcount; + vfsc.vfc_flags = (*handle)->vfc_flags; + vfsc.vfc_reserved2 = 0; + vfsc.vfc_reserved3 = 0; - if (newvfstbl->vfc_vfsops->vfs_init) - (*newvfstbl->vfc_vfsops->vfs_init)((struct vfsconf *)handle); + (*newvfstbl->vfc_vfsops->vfs_init)(&vfsc); + } FREE(newvfstbl, M_TEMP); @@ -1041,7 +982,6 @@ vfs_fsremove(vfstable_t handle) mount_list_unlock(); return EBUSY; } - mount_list_unlock(); /* * save the old descriptor; the free cannot occur unconditionally, @@ -1052,6 +992,8 @@ vfs_fsremove(vfstable_t handle) } err = vfstable_del(vfstbl); + mount_list_unlock(); + /* free the descriptor if the delete was successful */ if (err == 0 && old_desc) { FREE(old_desc, M_TEMP); @@ -1060,25 +1002,6 @@ vfs_fsremove(vfstable_t handle) return(err); } -/* - * This returns a reference to mount_t - * which should be dropped using vfs_mountrele(). - * Not doing so will leak a mountpoint - * and associated data structures. - */ -errno_t -vfs_mountref(__unused mount_t mp ) /* gives a reference */ -{ - return(0); -} - -/* This drops the reference on mount_t that was acquired */ -errno_t -vfs_mountrele(__unused mount_t mp ) /* drops reference */ -{ - return(0); -} - int vfs_context_pid(vfs_context_t ctx) { @@ -1092,6 +1015,8 @@ vfs_context_suser(vfs_context_t ctx) } /* + * Return bit field of signals posted to all threads in the context's process. + * * XXX Signals should be tied to threads, not processes, for most uses of this * XXX call. */ @@ -1259,7 +1184,19 @@ vfs_context_cwd(vfs_context_t ctx) return(cwd); } - +/* + * vfs_context_create + * + * Description: Allocate and initialize a new context. + * + * Parameters: vfs_context_t: Context to copy, or NULL for new + * + * Returns: Pointer to new context + * + * Notes: Copy cred and thread from argument, if available; else + * initialize with current thread and new cred. Returns + * with a reference held on the credential. + */ vfs_context_t vfs_context_create(vfs_context_t ctx) { @@ -1343,7 +1280,7 @@ vfs_context_rele(vfs_context_t ctx) } -ucred_t +kauth_cred_t vfs_context_ucred(vfs_context_t ctx) { return (ctx->vc_ucred); @@ -1358,6 +1295,26 @@ vfs_context_issuser(vfs_context_t ctx) return(kauth_cred_issuser(vfs_context_ucred(ctx))); } +/* + * Given a context, for all fields of vfs_context_t which + * are not held with a reference, set those fields to the + * values for the current execution context. Currently, this + * just means the vc_thread. + * + * Returns: 0 for success, nonzero for failure + * + * The intended use is: + * 1. vfs_context_create() gets the caller a context + * 2. vfs_context_bind() sets the unrefcounted data + * 3. vfs_context_rele() releases the context + * + */ +int +vfs_context_bind(vfs_context_t ctx) +{ + ctx->vc_thread = current_thread(); + return 0; +} /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */ @@ -1389,7 +1346,7 @@ vnode_makeimode(int indx, int mode) * vnode manipulation functions. */ -/* returns system root vnode reference; It should be dropped using vrele() */ +/* returns system root vnode iocount; It should be released using vnode_put() */ vnode_t vfs_rootvnode(void) { @@ -1409,14 +1366,23 @@ vnode_vid(vnode_t vp) return ((uint32_t)(vp->v_id)); } -/* returns a mount reference; drop it with vfs_mountrelease() */ mount_t vnode_mount(vnode_t vp) { return (vp->v_mount); } -/* returns a mount reference iff vnode_t is a dir and is a mount point */ +#if CONFIG_IOSCHED +vnode_t +vnode_mountdevvp(vnode_t vp) +{ + if (vp->v_mount) + return (vp->v_mount->mnt_devvp); + else + return ((vnode_t)0); +} +#endif + mount_t vnode_mountedhere(vnode_t vp) { @@ -1478,6 +1444,13 @@ vnode_isswap(vnode_t vp) return ((vp->v_flag & VSWAP)? 1 : 0); } +/* is vnode_t a tty */ +int +vnode_istty(vnode_t vp) +{ + return ((vp->v_flag & VISTTY) ? 1 : 0); +} + /* if vnode_t mount operation in progress */ int vnode_ismount(vnode_t vp) @@ -1497,6 +1470,48 @@ vnode_isrecycled(vnode_t vp) return(ret); } +/* vnode was created by background task requesting rapid aging + and has not since been referenced by a normal task */ +int +vnode_israge(vnode_t vp) +{ + return ((vp->v_flag & VRAGE)? 1 : 0); +} + +int +vnode_needssnapshots(vnode_t vp) +{ + return ((vp->v_flag & VNEEDSSNAPSHOT)? 1 : 0); +} + + +/* Check the process/thread to see if we should skip atime updates */ +int +vfs_ctx_skipatime (vfs_context_t ctx) { + struct uthread *ut; + proc_t proc; + thread_t thr; + + proc = vfs_context_proc(ctx); + thr = vfs_context_thread (ctx); + + /* Validate pointers in case we were invoked via a kernel context */ + if (thr && proc) { + ut = get_bsdthread_info (thr); + + if (proc->p_lflag & P_LRAGE_VNODES) { + return 1; + } + + if (ut) { + if (ut->uu_flag & UT_RAGE_VNODES) { + return 1; + } + } + } + return 0; +} + /* is vnode_t marked to not keep data cached once it's been consumed */ int vnode_isnocache(vnode_t vp) @@ -1554,6 +1569,46 @@ vnode_islnk(vnode_t vp) return ((vp->v_type == VLNK)? 1 : 0); } +int +vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp) +{ + struct nameidata *ndp = cnp->cn_ndp; + + if (ndp == NULL) { + panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL\n"); + } + + if (vnode_isdir(vp)) { + if (vp->v_mountedhere != NULL) { + goto yes; + } + +#if CONFIG_TRIGGERS + if (vp->v_resolve) { + goto yes; + } +#endif /* CONFIG_TRIGGERS */ + + } + + + if (vnode_islnk(vp)) { + /* From lookup(): || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */ + if (cnp->cn_flags & FOLLOW) { + goto yes; + } + if (ndp->ni_flag & NAMEI_TRAILINGSLASH) { + goto yes; + } + } + + return 0; + +yes: + ndp->ni_flag |= NAMEI_CONTLOOKUP; + return EKEEPLOOKING; +} + /* is vnode_t a fifo ? */ int vnode_isfifo(vnode_t vp) @@ -1568,6 +1623,12 @@ vnode_isblk(vnode_t vp) return ((vp->v_type == VBLK)? 1 : 0); } +int +vnode_isspec(vnode_t vp) +{ + return (((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0); +} + /* is vnode_t a char device? */ int vnode_ischr(vnode_t vp) @@ -1582,6 +1643,18 @@ vnode_issock(vnode_t vp) return ((vp->v_type == VSOCK)? 1 : 0); } +/* is vnode_t a device with multiple active vnodes referring to it? */ +int +vnode_isaliased(vnode_t vp) +{ + enum vtype vt = vp->v_type; + if (!((vt == VCHR) || (vt == VBLK))) { + return 0; + } else { + return (vp->v_specflags & SI_ALIASED); + } +} + /* is vnode_t a named stream? */ int vnode_isnamedstream( @@ -1599,22 +1672,38 @@ vnode_isnamedstream( #endif } -int +int vnode_isshadow( #if NAMEDSTREAMS - vnode_t vp + vnode_t vp #else - __unused vnode_t vp + __unused vnode_t vp #endif - ) + ) { #if NAMEDSTREAMS - return ((vp->v_flag & VISSHADOW) ? 1 : 0); + return ((vp->v_flag & VISSHADOW) ? 1 : 0); #else - return (0); + return (0); #endif } +/* does vnode have associated named stream vnodes ? */ +int +vnode_hasnamedstreams( +#if NAMEDSTREAMS + vnode_t vp +#else + __unused vnode_t vp +#endif + ) +{ +#if NAMEDSTREAMS + return ((vp->v_lflag & VL_HASSTREAMS) ? 1 : 0); +#else + return (0); +#endif +} /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */ void vnode_setnocache(vnode_t vp) @@ -1734,13 +1823,6 @@ vnode_setparent(vnode_t vp, vnode_t dvp) vp->v_parent = dvp; } -const char * -vnode_name(vnode_t vp) -{ - /* we try to keep v_name a reasonable name for the node */ - return(vp->v_name); -} - void vnode_setname(vnode_t vp, char * name) { @@ -1765,7 +1847,10 @@ int vnode_vfs64bitready(vnode_t vp) { - if ((vp->v_mount->mnt_vtable->vfc_64bitready)) + /* + * Checking for dead_mountp is a bit of a hack for SnowLeopard: + */ + if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) return(1); else return(0); @@ -1815,6 +1900,37 @@ vnode_vfsisrdonly(vnode_t vp) return ((vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0); } +int +vnode_compound_rename_available(vnode_t vp) +{ + return vnode_compound_op_available(vp, COMPOUND_VNOP_RENAME); +} +int +vnode_compound_rmdir_available(vnode_t vp) +{ + return vnode_compound_op_available(vp, COMPOUND_VNOP_RMDIR); +} +int +vnode_compound_mkdir_available(vnode_t vp) +{ + return vnode_compound_op_available(vp, COMPOUND_VNOP_MKDIR); +} +int +vnode_compound_remove_available(vnode_t vp) +{ + return vnode_compound_op_available(vp, COMPOUND_VNOP_REMOVE); +} +int +vnode_compound_open_available(vnode_t vp) +{ + return vnode_compound_op_available(vp, COMPOUND_VNOP_OPEN); +} + +int +vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid) +{ + return ((vp->v_mount->mnt_compound_ops & opid) != 0); +} /* * Returns vnode ref to current working directory; if a per-thread current @@ -2038,7 +2154,7 @@ vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl); - uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), sizeof(struct kauth_filesec) - sizeof(struct kauth_acl)); + uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL)); uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize); error = vn_setxattr(vp, KAUTH_FILESEC_XATTR, @@ -2107,7 +2223,7 @@ vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) { fsec = NULL; - if ((vp->v_type == VDIR) || (vp->v_type == VLNK) || (vp->v_type == VREG)) { + if (XATTR_VNODE_SUPPORTED(vp)) { /* try to get the filesec */ if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) goto out; @@ -2350,8 +2466,8 @@ vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) #if CONFIG_FSE // only send a stat_changed event if this is more than - // just an access time update - if (error == 0 && (vap->va_active != VNODE_ATTR_BIT(va_access_time))) { + // just an access or backup time update + if (error == 0 && (vap->va_active != VNODE_ATTR_BIT(va_access_time)) && (vap->va_active != VNODE_ATTR_BIT(va_backup_time))) { if (is_perm_change) { if (need_fsevent(FSE_CHOWN, vp)) { add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE); @@ -2415,7 +2531,7 @@ vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) * Fail for file types that we don't permit extended security * to be set on. */ - if ((vp->v_type != VDIR) && (vp->v_type != VLNK) && (vp->v_type != VREG)) { + if (!XATTR_VNODE_SUPPORTED(vp)) { VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp)); error = EINVAL; goto out; @@ -2506,18 +2622,124 @@ out: } /* - * Definition of vnode operations. + * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type + * event on a vnode. */ +int +vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap) +{ + /* These are the same as the corresponding knotes, at least for now. Cheating a little. */ + uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME + | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB); + uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED + | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED); + uint32_t knote_events = (events & knote_mask); -#if 0 -/* - *# - *#% lookup dvp L ? ? - *#% lookup vpp - L - - */ -struct vnop_lookup_args { - struct vnodeop_desc *a_desc; - vnode_t a_dvp; + /* Permissions are not explicitly part of the kqueue model */ + if (events & VNODE_EVENT_PERMS) { + knote_events |= NOTE_ATTRIB; + } + + /* Directory contents information just becomes NOTE_WRITE */ + if ((vnode_isdir(vp)) && (events & dir_contents_mask)) { + knote_events |= NOTE_WRITE; + } + + if (knote_events) { + lock_vnode_and_post(vp, knote_events); +#if CONFIG_FSE + if (vap != NULL) { + create_fsevent_from_kevent(vp, events, vap); + } +#else + (void)vap; +#endif + } + + return 0; +} + + + +int +vnode_isdyldsharedcache(vnode_t vp) +{ + return ((vp->v_flag & VSHARED_DYLD) ? 1 : 0); +} + + +/* + * For a filesystem that isn't tracking its own vnode watchers: + * check whether a vnode is being monitored. + */ +int +vnode_ismonitored(vnode_t vp) { + return (vp->v_knotes.slh_first != NULL); +} + +/* + * Initialize a struct vnode_attr and activate the attributes required + * by the vnode_notify() call. + */ +int +vfs_get_notify_attributes(struct vnode_attr *vap) +{ + VATTR_INIT(vap); + vap->va_active = VNODE_NOTIFY_ATTRS; + return 0; +} + +#if CONFIG_TRIGGERS +int +vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags __unused, vfs_context_t ctx) +{ + int error; + mount_t mp; + + mp = mount_list_lookupby_fsid(fsid, 0 /* locked */, 1 /* withref */); + if (mp == NULL) { + return ENOENT; + } + + error = vfs_busy(mp, LK_NOWAIT); + mount_iterdrop(mp); + + if (error != 0) { + return ENOENT; + } + + mount_lock(mp); + if (mp->mnt_triggercallback != NULL) { + error = EBUSY; + mount_unlock(mp); + goto out; + } + + mp->mnt_triggercallback = vtc; + mp->mnt_triggerdata = data; + mount_unlock(mp); + + mp->mnt_triggercallback(mp, VTC_REPLACE, data, ctx); + +out: + vfs_unbusy(mp); + return 0; +} +#endif /* CONFIG_TRIGGERS */ + +/* + * Definition of vnode operations. + */ + +#if 0 +/* + *# + *#% lookup dvp L ? ? + *#% lookup vpp - L - + */ +struct vnop_lookup_args { + struct vnodeop_desc *a_desc; + vnode_t a_dvp; vnode_t *a_vpp; struct componentname *a_cnp; vfs_context_t a_context; @@ -2551,57 +2773,113 @@ VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t { int _err; struct vnop_lookup_args a; - vnode_t vp; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_lookup_desc; a.a_dvp = dvp; a.a_vpp = vpp; a.a_cnp = cnp; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(dvp); - if (!thread_safe) { - if ( (_err = lock_fsnode(dvp, &funnel_state)) ) { - return (_err); + _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a); + if (_err == 0 && *vpp) { + DTRACE_FSINFO(lookup, vnode_t, *vpp); + } + + return (_err); +} + +#if 0 +struct vnop_compound_open_args { + struct vnodeop_desc *a_desc; + vnode_t a_dvp; + vnode_t *a_vpp; + struct componentname *a_cnp; + int32_t a_flags; + int32_t a_fmode; + struct vnode_attr *a_vap; + vfs_context_t a_context; + void *a_reserved; +}; +#endif /* 0 */ + +int +VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, int32_t fmode, uint32_t *statusp, struct vnode_attr *vap, vfs_context_t ctx) +{ + int _err; + struct vnop_compound_open_args a; + int did_create = 0; + int want_create; + uint32_t tmp_status = 0; + struct componentname *cnp = &ndp->ni_cnd; + + want_create = (flags & O_CREAT); + + a.a_desc = &vnop_compound_open_desc; + a.a_dvp = dvp; + a.a_vpp = vpp; /* Could be NULL */ + a.a_cnp = cnp; + a.a_flags = flags; + a.a_fmode = fmode; + a.a_status = (statusp != NULL) ? statusp : &tmp_status; + a.a_vap = vap; + a.a_context = ctx; + a.a_open_create_authorizer = vn_authorize_create; + a.a_open_existing_authorizer = vn_authorize_open_existing; + a.a_reserved = NULL; + + if (dvp == NULLVP) { + panic("No dvp?"); + } + if (want_create && !vap) { + panic("Want create, but no vap?"); + } + if (!want_create && vap) { + panic("Don't want create, but have a vap?"); + } + + _err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a); + if (want_create) { + if (_err == 0 && *vpp) { + DTRACE_FSINFO(compound_open, vnode_t, *vpp); + } else { + DTRACE_FSINFO(compound_open, vnode_t, dvp); } + } else { + DTRACE_FSINFO(compound_open, vnode_t, *vpp); } - _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a); - vp = *vpp; - - if (!thread_safe) { - if ( (cnp->cn_flags & ISLASTCN) ) { - if ( (cnp->cn_flags & LOCKPARENT) ) { - if ( !(cnp->cn_flags & FSNODELOCKHELD) ) { - /* - * leave the fsnode lock held on - * the directory, but restore the funnel... - * also indicate that we need to drop the - * fsnode_lock when we're done with the - * system call processing for this path - */ - cnp->cn_flags |= FSNODELOCKHELD; - - (void) thread_funnel_set(kernel_flock, funnel_state); - return (_err); - } - } + did_create = (*a.a_status & COMPOUND_OPEN_STATUS_DID_CREATE); + + if (did_create && !want_create) { + panic("Filesystem did a create, even though none was requested?"); + } + + if (did_create) { +#if CONFIG_APPLEDOUBLE + if (!NATIVE_XATTR(dvp)) { + /* + * Remove stale Apple Double file (if any). + */ + xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0); } - unlock_fsnode(dvp, &funnel_state); +#endif /* CONFIG_APPLEDOUBLE */ + /* On create, provide kqueue notification */ + post_event_if_success(dvp, _err, NOTE_WRITE); + } + + lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, did_create); +#if 0 /* FSEvents... */ + if (*vpp && _err && _err != EKEEPLOOKING) { + vnode_put(*vpp); + *vpp = NULLVP; } +#endif /* 0 */ + return (_err); + } #if 0 -/* - *# - *#% create dvp L L L - *#% create vpp - L - - *# - */ - struct vnop_create_args { struct vnodeop_desc *a_desc; vnode_t a_dvp; @@ -2616,8 +2894,6 @@ VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode { int _err; struct vnop_create_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_create_desc; a.a_dvp = dvp; @@ -2625,23 +2901,23 @@ VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode a.a_cnp = cnp; a.a_vap = vap; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(dvp); - if (!thread_safe) { - if ( (_err = lock_fsnode(dvp, &funnel_state)) ) { - return (_err); - } - } _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a); + if (_err == 0 && *vpp) { + DTRACE_FSINFO(create, vnode_t, *vpp); + } + +#if CONFIG_APPLEDOUBLE if (_err == 0 && !NATIVE_XATTR(dvp)) { /* * Remove stale Apple Double file (if any). */ - xattrfile_remove(dvp, cnp->cn_nameptr, ctx, thread_safe, 0); - } - if (!thread_safe) { - unlock_fsnode(dvp, &funnel_state); + xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0); } +#endif /* CONFIG_APPLEDOUBLE */ + + post_event_if_success(dvp, _err, NOTE_WRITE); + return (_err); } @@ -2662,33 +2938,13 @@ struct vnop_whiteout_args { }; #endif /* 0*/ errno_t -VNOP_WHITEOUT(vnode_t dvp, struct componentname * cnp, int flags, vfs_context_t ctx) +VNOP_WHITEOUT(__unused vnode_t dvp, __unused struct componentname *cnp, + __unused int flags, __unused vfs_context_t ctx) { - int _err; - struct vnop_whiteout_args a; - int thread_safe; - int funnel_state = 0; - - a.a_desc = &vnop_whiteout_desc; - a.a_dvp = dvp; - a.a_cnp = cnp; - a.a_flags = flags; - a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(dvp); - - if (!thread_safe) { - if ( (_err = lock_fsnode(dvp, &funnel_state)) ) { - return (_err); - } - } - _err = (*dvp->v_op[vnop_whiteout_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(dvp, &funnel_state); - } - return (_err); + return (ENOTSUP); // XXX OBSOLETE } - #if 0 +#if 0 /* *# *#% mknod dvp L U U @@ -2710,8 +2966,6 @@ VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_ int _err; struct vnop_mknod_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_mknod_desc; a.a_dvp = dvp; @@ -2719,17 +2973,14 @@ VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_ a.a_cnp = cnp; a.a_vap = vap; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(dvp); - if (!thread_safe) { - if ( (_err = lock_fsnode(dvp, &funnel_state)) ) { - return (_err); - } - } _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(dvp, &funnel_state); + if (_err == 0 && *vpp) { + DTRACE_FSINFO(mknod, vnode_t, *vpp); } + + post_event_if_success(dvp, _err, NOTE_WRITE); + return (_err); } @@ -2747,38 +2998,22 @@ struct vnop_open_args { }; #endif /* 0*/ errno_t -VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx) +VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx) { int _err; struct vnop_open_args a; - int thread_safe; - int funnel_state = 0; if (ctx == NULL) { ctx = vfs_context_current(); - } + } a.a_desc = &vnop_open_desc; a.a_vp = vp; a.a_mode = mode; - a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) { - if ( (_err = lock_fsnode(vp, NULL)) ) { - (void) thread_funnel_set(kernel_flock, funnel_state); - return (_err); - } - } - } + a.a_context = ctx; + _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a); - if (!thread_safe) { - if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) { - unlock_fsnode(vp, NULL); - } - (void) thread_funnel_set(kernel_flock, funnel_state); - } + DTRACE_FSINFO(open, vnode_t, vp); + return (_err); } @@ -2800,8 +3035,6 @@ VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx) { int _err; struct vnop_close_args a; - int thread_safe; - int funnel_state = 0; if (ctx == NULL) { ctx = vfs_context_current(); @@ -2810,24 +3043,10 @@ VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx) a.a_vp = vp; a.a_fflag = fflag; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) { - if ( (_err = lock_fsnode(vp, NULL)) ) { - (void) thread_funnel_set(kernel_flock, funnel_state); - return (_err); - } - } - } + _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a); - if (!thread_safe) { - if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) { - unlock_fsnode(vp, NULL); - } - (void) thread_funnel_set(kernel_flock, funnel_state); - } + DTRACE_FSINFO(close, vnode_t, vp); + return (_err); } @@ -2849,8 +3068,6 @@ VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx) { int _err; struct vnop_access_args a; - int thread_safe; - int funnel_state = 0; if (ctx == NULL) { ctx = vfs_context_current(); @@ -2859,17 +3076,10 @@ VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx) a.a_vp = vp; a.a_action = action; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO(access, vnode_t, vp); + return (_err); } @@ -2891,24 +3101,15 @@ VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx) { int _err; struct vnop_getattr_args a; - int thread_safe; - int funnel_state = 0; /* protected by thread_safe */ a.a_desc = &vnop_getattr_desc; a.a_vp = vp; a.a_vap = vap; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO(getattr, vnode_t, vp); + return (_err); } @@ -2930,22 +3131,16 @@ VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx) { int _err; struct vnop_setattr_args a; - int thread_safe; - int funnel_state = 0; /* protected by thread_safe */ a.a_desc = &vnop_setattr_desc; a.a_vp = vp; a.a_vap = vap; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a); + DTRACE_FSINFO(setattr, vnode_t, vp); +#if CONFIG_APPLEDOUBLE /* * Shadow uid/gid/mod change to extended attribute file. */ @@ -2973,16 +3168,15 @@ VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx) dvp = vnode_getparent(vp); vname = vnode_getname(vp); - xattrfile_setattr(dvp, vname, &va, ctx, thread_safe); + xattrfile_setattr(dvp, vname, &va, ctx); if (dvp != NULLVP) vnode_put(dvp); if (vname != NULL) vnode_putname(vname); } } - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } +#endif /* CONFIG_APPLEDOUBLE */ + /* * If we have changed any of the things about the file that are likely * to result in changes to authorization results, blow the vnode auth @@ -2995,9 +3189,23 @@ VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx) VATTR_IS_SUPPORTED(vap, va_flags) || VATTR_IS_SUPPORTED(vap, va_acl) || VATTR_IS_SUPPORTED(vap, va_uuuid) || - VATTR_IS_SUPPORTED(vap, va_guuid))) + VATTR_IS_SUPPORTED(vap, va_guuid))) { vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS); +#if NAMEDSTREAMS + if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) { + vnode_t svp; + if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) { + vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS); + vnode_put(svp); + } + } +#endif /* NAMEDSTREAMS */ + } + + + post_event_if_success(vp, _err, NOTE_ATTRIB); + return (_err); } @@ -3021,11 +3229,12 @@ VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx) { int _err; struct vnop_read_args a; - int thread_safe; - int funnel_state = 0; +#if CONFIG_DTRACE + user_ssize_t resid = uio_resid(uio); +#endif if (ctx == NULL) { - ctx = vfs_context_current(); + return EINVAL; } a.a_desc = &vnop_read_desc; @@ -3033,25 +3242,11 @@ VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx) a.a_uio = uio; a.a_ioflag = ioflag; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) { - if ( (_err = lock_fsnode(vp, NULL)) ) { - (void) thread_funnel_set(kernel_flock, funnel_state); - return (_err); - } - } - } _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a); + DTRACE_FSINFO_IO(read, + vnode_t, vp, user_ssize_t, (resid - uio_resid(uio))); - if (!thread_safe) { - if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) { - unlock_fsnode(vp, NULL); - } - (void) thread_funnel_set(kernel_flock, funnel_state); - } return (_err); } @@ -3075,11 +3270,12 @@ VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx) { struct vnop_write_args a; int _err; - int thread_safe; - int funnel_state = 0; +#if CONFIG_DTRACE + user_ssize_t resid = uio_resid(uio); +#endif if (ctx == NULL) { - ctx = vfs_context_current(); + return EINVAL; } a.a_desc = &vnop_write_desc; @@ -3087,25 +3283,13 @@ VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx) a.a_uio = uio; a.a_ioflag = ioflag; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) { - if ( (_err = lock_fsnode(vp, NULL)) ) { - (void) thread_funnel_set(kernel_flock, funnel_state); - return (_err); - } - } - } _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a); + DTRACE_FSINFO_IO(write, + vnode_t, vp, user_ssize_t, (resid - uio_resid(uio))); + + post_event_if_success(vp, _err, NOTE_WRITE); - if (!thread_safe) { - if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) { - unlock_fsnode(vp, NULL); - } - (void) thread_funnel_set(kernel_flock, funnel_state); - } return (_err); } @@ -3130,15 +3314,27 @@ VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ct { int _err; struct vnop_ioctl_args a; - int thread_safe; - int funnel_state = 0; if (ctx == NULL) { ctx = vfs_context_current(); } - if (vfs_context_is64bit(ctx)) { - if (!vnode_vfs64bitready(vp)) { + /* + * This check should probably have been put in the TTY code instead... + * + * We have to be careful about what we assume during startup and shutdown. + * We have to be able to use the root filesystem's device vnode even when + * devfs isn't mounted (yet/anymore), so we can't go looking at its mount + * structure. If there is no data pointer, it doesn't matter whether + * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZECACHE) + * which passes NULL for its data pointer can therefore be used during + * mount or unmount of the root filesystem. + * + * Depending on what root filesystems need to do during mount/unmount, we + * may need to loosen this check again in the future. + */ + if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) { + if (data != NULL && !vnode_vfs64bitready(vp)) { return(ENOTTY); } } @@ -3149,24 +3345,10 @@ VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ct a.a_data = data; a.a_fflag = fflag; a.a_context= ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) { - if ( (_err = lock_fsnode(vp, NULL)) ) { - (void) thread_funnel_set(kernel_flock, funnel_state); - return (_err); - } - } - } _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a); - if (!thread_safe) { - if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) { - unlock_fsnode(vp, NULL); - } - (void) thread_funnel_set(kernel_flock, funnel_state); - } + DTRACE_FSINFO(ioctl, vnode_t, vp); + return (_err); } @@ -3191,8 +3373,6 @@ VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t ctx) { int _err; struct vnop_select_args a; - int thread_safe; - int funnel_state = 0; if (ctx == NULL) { ctx = vfs_context_current(); @@ -3203,24 +3383,10 @@ VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t ctx) a.a_fflags = fflags; a.a_context = ctx; a.a_wql = wql; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) { - if ( (_err = lock_fsnode(vp, NULL)) ) { - (void) thread_funnel_set(kernel_flock, funnel_state); - return (_err); - } - } - } _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a); - if (!thread_safe) { - if (vp->v_type != VCHR && vp->v_type != VFIFO && vp->v_type != VSOCK) { - unlock_fsnode(vp, NULL); - } - (void) thread_funnel_set(kernel_flock, funnel_state); - } + DTRACE_FSINFO(select, vnode_t, vp); + return (_err); } @@ -3245,41 +3411,20 @@ VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx) { int _err; struct vnop_exchange_args a; - int thread_safe; - int funnel_state = 0; - vnode_t lock_first = NULL, lock_second = NULL; a.a_desc = &vnop_exchange_desc; a.a_fvp = fvp; a.a_tvp = tvp; a.a_options = options; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(fvp); - if (!thread_safe) { - /* - * Lock in vnode address order to avoid deadlocks - */ - if (fvp < tvp) { - lock_first = fvp; - lock_second = tvp; - } else { - lock_first = tvp; - lock_second = fvp; - } - if ( (_err = lock_fsnode(lock_first, &funnel_state)) ) { - return (_err); - } - if ( (_err = lock_fsnode(lock_second, NULL)) ) { - unlock_fsnode(lock_first, &funnel_state); - return (_err); - } - } _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(lock_second, NULL); - unlock_fsnode(lock_first, &funnel_state); - } + DTRACE_FSINFO(exchange, vnode_t, fvp); + + /* Don't post NOTE_WRITE because file descriptors follow the data ... */ + post_event_if_success(fvp, _err, NOTE_ATTRIB); + post_event_if_success(tvp, _err, NOTE_ATTRIB); + return (_err); } @@ -3302,22 +3447,15 @@ VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx) { struct vnop_revoke_args a; int _err; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_revoke_desc; a.a_vp = vp; a.a_flags = flags; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a); - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); - } + DTRACE_FSINFO(revoke, vnode_t, vp); + return (_err); } @@ -3340,24 +3478,15 @@ VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx) { int _err; struct vnop_mmap_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_mmap_desc; a.a_vp = vp; a.a_fflags = fflags; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO(mmap, vnode_t, vp); + return (_err); } @@ -3379,23 +3508,14 @@ VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx) { int _err; struct vnop_mnomap_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_mnomap_desc; a.a_vp = vp; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO(mnomap, vnode_t, vp); + return (_err); } @@ -3418,24 +3538,15 @@ VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx) { struct vnop_fsync_args a; int _err; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_fsync_desc; a.a_vp = vp; a.a_waitfor = waitfor; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO(fsync, vnode_t, vp); + return (_err); } @@ -3461,8 +3572,6 @@ VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_ { int _err; struct vnop_remove_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_remove_desc; a.a_dvp = dvp; @@ -3470,32 +3579,78 @@ VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_ a.a_cnp = cnp; a.a_flags = flags; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(dvp); - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a); + DTRACE_FSINFO(remove, vnode_t, vp); if (_err == 0) { vnode_setneedinactive(vp); +#if CONFIG_APPLEDOUBLE + if ( !(NATIVE_XATTR(dvp)) ) { + /* + * Remove any associated extended attribute file (._ AppleDouble file). + */ + xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1); + } +#endif /* CONFIG_APPLEDOUBLE */ + } + + post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK); + post_event_if_success(dvp, _err, NOTE_WRITE); + + return (_err); +} + +int +VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx) +{ + int _err; + struct vnop_compound_remove_args a; + int no_vp = (*vpp == NULLVP); + a.a_desc = &vnop_compound_remove_desc; + a.a_dvp = dvp; + a.a_vpp = vpp; + a.a_cnp = &ndp->ni_cnd; + a.a_flags = flags; + a.a_vap = vap; + a.a_context = ctx; + a.a_remove_authorizer = vn_authorize_unlink; + + _err = (*dvp->v_op[vnop_compound_remove_desc.vdesc_offset])(&a); + if (_err == 0 && *vpp) { + DTRACE_FSINFO(compound_remove, vnode_t, *vpp); + } else { + DTRACE_FSINFO(compound_remove, vnode_t, dvp); + } + if (_err == 0) { + vnode_setneedinactive(*vpp); +#if CONFIG_APPLEDOUBLE if ( !(NATIVE_XATTR(dvp)) ) { /* * Remove any associated extended attribute file (._ AppleDouble file). */ - xattrfile_remove(dvp, cnp->cn_nameptr, ctx, thread_safe, 1); + xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1); } +#endif /* CONFIG_APPLEDOUBLE */ } - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); + + post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK); + post_event_if_success(dvp, _err, NOTE_WRITE); + + if (no_vp) { + lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0); + if (*vpp && _err && _err != EKEEPLOOKING) { + vnode_put(*vpp); + *vpp = NULLVP; + } } + + //printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err); + return (_err); } - #if 0 /* *# @@ -3516,9 +3671,8 @@ VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ct { int _err; struct vnop_link_args a; - int thread_safe; - int funnel_state = 0; +#if CONFIG_APPLEDOUBLE /* * For file systems with non-native extended attributes, * disallow linking to an existing "._" Apple Double file. @@ -3537,130 +3691,69 @@ VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ct return (_err); } } +#endif /* CONFIG_APPLEDOUBLE */ + a.a_desc = &vnop_link_desc; a.a_vp = vp; a.a_tdvp = tdvp; a.a_cnp = cnp; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO(link, vnode_t, vp); + + post_event_if_success(vp, _err, NOTE_LINK); + post_event_if_success(tdvp, _err, NOTE_WRITE); + return (_err); } - -#if 0 -/* - *# - *#% rename fdvp U U U - *#% rename fvp U U U - *#% rename tdvp L U U - *#% rename tvp X U U - *# - */ -struct vnop_rename_args { - struct vnodeop_desc *a_desc; - vnode_t a_fdvp; - vnode_t a_fvp; - struct componentname *a_fcnp; - vnode_t a_tdvp; - vnode_t a_tvp; - struct componentname *a_tcnp; - vfs_context_t a_context; -}; -#endif /* 0*/ errno_t -VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, - struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, - vfs_context_t ctx) +vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap, + struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap, + uint32_t flags, vfs_context_t ctx) { int _err; - struct vnop_rename_args a; - int funnel_state = 0; + struct nameidata *fromnd = NULL; + struct nameidata *tond = NULL; +#if CONFIG_APPLEDOUBLE + vnode_t src_attr_vp = NULLVP; + vnode_t dst_attr_vp = NULLVP; char smallname1[48]; char smallname2[48]; char *xfromname = NULL; char *xtoname = NULL; - vnode_t lock_first = NULL, lock_second = NULL; - vnode_t fdvp_unsafe = NULLVP; - vnode_t tdvp_unsafe = NULLVP; - - a.a_desc = &vnop_rename_desc; - a.a_fdvp = fdvp; - a.a_fvp = fvp; - a.a_fcnp = fcnp; - a.a_tdvp = tdvp; - a.a_tvp = tvp; - a.a_tcnp = tcnp; - a.a_context = ctx; - - if (!THREAD_SAFE_FS(fdvp)) - fdvp_unsafe = fdvp; - if (!THREAD_SAFE_FS(tdvp)) - tdvp_unsafe = tdvp; - - if (fdvp_unsafe != NULLVP) { - /* - * Lock parents in vnode address order to avoid deadlocks - * note that it's possible for the fdvp to be unsafe, - * but the tdvp to be safe because tvp could be a directory - * in the root of a filesystem... in that case, tdvp is the - * in the filesystem that this root is mounted on - */ - if (tdvp_unsafe == NULL || fdvp_unsafe == tdvp_unsafe) { - lock_first = fdvp_unsafe; - lock_second = NULL; - } else if (fdvp_unsafe < tdvp_unsafe) { - lock_first = fdvp_unsafe; - lock_second = tdvp_unsafe; - } else { - lock_first = tdvp_unsafe; - lock_second = fdvp_unsafe; - } - if ( (_err = lock_fsnode(lock_first, &funnel_state)) ) - return (_err); +#endif /* CONFIG_APPLEDOUBLE */ + int batched; + uint32_t tdfflags; // Target directory file flags - if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) { - unlock_fsnode(lock_first, &funnel_state); - return (_err); - } + batched = vnode_compound_rename_available(fdvp); - /* - * Lock both children in vnode address order to avoid deadlocks - */ - if (tvp == NULL || tvp == fvp) { - lock_first = fvp; - lock_second = NULL; - } else if (fvp < tvp) { - lock_first = fvp; - lock_second = tvp; - } else { - lock_first = tvp; - lock_second = fvp; - } - if ( (_err = lock_fsnode(lock_first, NULL)) ) - goto out1; + if (!batched) { + if (*fvpp == NULLVP) + panic("Not batched, and no fvp?"); + } - if (lock_second != NULL && (_err = lock_fsnode(lock_second, NULL))) { - unlock_fsnode(lock_first, NULL); - goto out1; - } +#if CONFIG_SECLUDED_RENAME + if ((fcnp->cn_flags & CN_SECLUDE_RENAME) && + (((*fvpp)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_SECLUDE_RENAME) == 0)) { + return ENOTSUP; } +#endif + +#if CONFIG_APPLEDOUBLE /* - * Save source and destination names (._ AppleDouble files). - * Skip if source already has a "._" prefix. + * We need to preflight any potential AppleDouble file for the source file + * before doing the rename operation, since we could potentially be doing + * this operation on a network filesystem, and would end up duplicating + * the work. Also, save the source and destination names. Skip it if the + * source has a "._" prefix. */ + if (!NATIVE_XATTR(fdvp) && !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) { size_t len; + int error; /* Get source attribute file name. */ len = fcnp->cn_namelen + 3; @@ -3683,197 +3776,385 @@ VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, strlcpy(xtoname, "._", min(sizeof smallname2, len)); strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen); xtoname[len-1] = '\0'; - } + + /* + * Look up source attribute file, keep reference on it if exists. + * Note that we do the namei with the nameiop of RENAME, which is different than + * in the rename syscall. It's OK if the source file does not exist, since this + * is only for AppleDouble files. + */ + if (xfromname != NULL) { + MALLOC(fromnd, struct nameidata *, sizeof (struct nameidata), M_TEMP, M_WAITOK); + NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, + UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx); + fromnd->ni_dvp = fdvp; + error = namei(fromnd); + + /* + * If there was an error looking up source attribute file, + * we'll behave as if it didn't exist. + */ - _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a); + if (error == 0) { + if (fromnd->ni_vp) { + /* src_attr_vp indicates need to call vnode_put / nameidone later */ + src_attr_vp = fromnd->ni_vp; + + if (fromnd->ni_vp->v_type != VREG) { + src_attr_vp = NULLVP; + vnode_put(fromnd->ni_vp); + } + } + /* + * Either we got an invalid vnode type (not a regular file) or the namei lookup + * suppressed ENOENT as a valid error since we're renaming. Either way, we don't + * have a vnode here, so we drop our namei buffer for the source attribute file + */ + if (src_attr_vp == NULLVP) { + nameidone(fromnd); + } + } + } + } +#endif /* CONFIG_APPLEDOUBLE */ - if (fdvp_unsafe != NULLVP) { - if (lock_second != NULL) - unlock_fsnode(lock_second, NULL); - unlock_fsnode(lock_first, NULL); + if (batched) { + _err = VNOP_COMPOUND_RENAME(fdvp, fvpp, fcnp, fvap, tdvp, tvpp, tcnp, tvap, flags, ctx); + if (_err != 0) { + printf("VNOP_COMPOUND_RENAME() returned %d\n", _err); + } + } else { + _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx); } +#if CONFIG_MACF if (_err == 0) { - if (tvp && tvp != fvp) - vnode_setneedinactive(tvp); + mac_vnode_notify_rename(ctx, *fvpp, tdvp, tcnp); + } +#endif + + /* + * If moved to a new directory that is restricted, + * set the restricted flag on the item moved. + */ + if (_err == 0) { + _err = vnode_flags(tdvp, &tdfflags, ctx); + if (_err == 0 && (tdfflags & SF_RESTRICTED)) { + uint32_t fflags; + _err = vnode_flags(*fvpp, &fflags, ctx); + if (_err == 0 && !(fflags & SF_RESTRICTED)) { + struct vnode_attr va; + VATTR_INIT(&va); + VATTR_SET(&va, va_flags, fflags | SF_RESTRICTED); + _err = vnode_setattr(*fvpp, &va, ctx); + } + } } +#if CONFIG_APPLEDOUBLE /* * Rename any associated extended attribute file (._ AppleDouble file). */ if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) { - struct nameidata fromnd, tond; - int killdest = 0; - int error; - + int error = 0; + /* - * Get source attribute file vnode. - * Note that fdvp already has an iocount reference and - * using DELETE will take an additional reference. + * Get destination attribute file vnode. + * Note that tdvp already has an iocount reference. Make sure to check that we + * get a valid vnode from namei. */ - NDINIT(&fromnd, DELETE, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE, - CAST_USER_ADDR_T(xfromname), ctx); - fromnd.ni_dvp = fdvp; - error = namei(&fromnd); - - if (error) { - /* When source doesn't exist there still may be a destination. */ - if (error == ENOENT) { - killdest = 1; + MALLOC(tond, struct nameidata *, sizeof(struct nameidata), M_TEMP, M_WAITOK); + NDINIT(tond, RENAME, OP_RENAME, + NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE, + CAST_USER_ADDR_T(xtoname), ctx); + tond->ni_dvp = tdvp; + error = namei(tond); + + if (error) + goto ad_error; + + if (tond->ni_vp) { + dst_attr_vp = tond->ni_vp; + } + + if (src_attr_vp) { + const char *old_name = src_attr_vp->v_name; + vnode_t old_parent = src_attr_vp->v_parent; + + if (batched) { + error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd->ni_cnd, NULL, + tdvp, &dst_attr_vp, &tond->ni_cnd, NULL, + 0, ctx); } else { - goto out; + error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd, + tdvp, dst_attr_vp, &tond->ni_cnd, ctx); + } + + if (error == 0 && old_name == src_attr_vp->v_name && + old_parent == src_attr_vp->v_parent) { + int update_flags = VNODE_UPDATE_NAME; + + if (fdvp != tdvp) + update_flags |= VNODE_UPDATE_PARENT; + + if ((src_attr_vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_NOUPDATEID_RENAME) == 0) { + vnode_update_identity(src_attr_vp, tdvp, + tond->ni_cnd.cn_nameptr, + tond->ni_cnd.cn_namelen, + tond->ni_cnd.cn_hash, + update_flags); + } + } + + /* kevent notifications for moving resource files + * _err is zero if we're here, so no need to notify directories, code + * below will do that. only need to post the rename on the source and + * possibly a delete on the dest + */ + post_event_if_success(src_attr_vp, error, NOTE_RENAME); + if (dst_attr_vp) { + post_event_if_success(dst_attr_vp, error, NOTE_DELETE); } - } else if (fromnd.ni_vp->v_type != VREG) { - vnode_put(fromnd.ni_vp); - nameidone(&fromnd); - killdest = 1; - } - if (killdest) { - struct vnop_remove_args args; + } else if (dst_attr_vp) { /* - * Get destination attribute file vnode. + * Just delete destination attribute file vnode if it exists, since + * we didn't have a source attribute file. * Note that tdvp already has an iocount reference. */ - NDINIT(&tond, DELETE, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE, - CAST_USER_ADDR_T(xtoname), ctx); - tond.ni_dvp = tdvp; - error = namei(&tond); - if (error) { - goto out; - } - if (tond.ni_vp->v_type != VREG) { - vnode_put(tond.ni_vp); - nameidone(&tond); - goto out; - } + + struct vnop_remove_args args; + args.a_desc = &vnop_remove_desc; args.a_dvp = tdvp; - args.a_vp = tond.ni_vp; - args.a_cnp = &tond.ni_cnd; + args.a_vp = dst_attr_vp; + args.a_cnp = &tond->ni_cnd; args.a_context = ctx; - if (fdvp_unsafe != NULLVP) - error = lock_fsnode(tond.ni_vp, NULL); if (error == 0) { - error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args); - - if (fdvp_unsafe != NULLVP) - unlock_fsnode(tond.ni_vp, NULL); + error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args); if (error == 0) - vnode_setneedinactive(tond.ni_vp); + vnode_setneedinactive(dst_attr_vp); } - vnode_put(tond.ni_vp); - nameidone(&tond); - goto out; + + /* kevent notification for deleting the destination's attribute file + * if it existed. Only need to post the delete on the destination, since + * the code below will handle the directories. + */ + post_event_if_success(dst_attr_vp, error, NOTE_DELETE); } + } +ad_error: + if (src_attr_vp) { + vnode_put(src_attr_vp); + nameidone(fromnd); + } + if (dst_attr_vp) { + vnode_put(dst_attr_vp); + nameidone(tond); + } + if (xfromname && xfromname != &smallname1[0]) { + FREE(xfromname, M_TEMP); + } + if (xtoname && xtoname != &smallname2[0]) { + FREE(xtoname, M_TEMP); + } +#endif /* CONFIG_APPLEDOUBLE */ + if (fromnd) { + FREE(fromnd, M_TEMP); + } + if (tond) { + FREE(tond, M_TEMP); + } + return _err; +} - /* - * Get destination attribute file vnode. - */ - NDINIT(&tond, RENAME, - NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE, - CAST_USER_ADDR_T(xtoname), ctx); - tond.ni_dvp = tdvp; - error = namei(&tond); - if (error) { - vnode_put(fromnd.ni_vp); - nameidone(&fromnd); - goto out; - } - a.a_desc = &vnop_rename_desc; - a.a_fdvp = fdvp; - a.a_fvp = fromnd.ni_vp; - a.a_fcnp = &fromnd.ni_cnd; - a.a_tdvp = tdvp; - a.a_tvp = tond.ni_vp; - a.a_tcnp = &tond.ni_cnd; - a.a_context = ctx; +#if 0 +/* + *# + *#% rename fdvp U U U + *#% rename fvp U U U + *#% rename tdvp L U U + *#% rename tvp X U U + *# + */ +struct vnop_rename_args { + struct vnodeop_desc *a_desc; + vnode_t a_fdvp; + vnode_t a_fvp; + struct componentname *a_fcnp; + vnode_t a_tdvp; + vnode_t a_tvp; + struct componentname *a_tcnp; + vfs_context_t a_context; +}; +#endif /* 0*/ +errno_t +VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, + struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, + vfs_context_t ctx) +{ + int _err = 0; + int events; + struct vnop_rename_args a; - if (fdvp_unsafe != NULLVP) { - /* - * Lock in vnode address order to avoid deadlocks + a.a_desc = &vnop_rename_desc; + a.a_fdvp = fdvp; + a.a_fvp = fvp; + a.a_fcnp = fcnp; + a.a_tdvp = tdvp; + a.a_tvp = tvp; + a.a_tcnp = tcnp; + a.a_context = ctx; + + /* do the rename of the main file. */ + _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a); + DTRACE_FSINFO(rename, vnode_t, fdvp); + + if (_err == 0) { + if (tvp && tvp != fvp) + vnode_setneedinactive(tvp); + } + + /* Wrote at least one directory. If transplanted a dir, also changed link counts */ + if (_err == 0) { + events = NOTE_WRITE; + if (vnode_isdir(fvp)) { + /* Link count on dir changed only if we are moving a dir and... + * --Moved to new dir, not overwriting there + * --Kept in same dir and DID overwrite */ - if (tond.ni_vp == NULL || tond.ni_vp == fromnd.ni_vp) { - lock_first = fromnd.ni_vp; - lock_second = NULL; - } else if (fromnd.ni_vp < tond.ni_vp) { - lock_first = fromnd.ni_vp; - lock_second = tond.ni_vp; - } else { - lock_first = tond.ni_vp; - lock_second = fromnd.ni_vp; - } - if ( (error = lock_fsnode(lock_first, NULL)) == 0) { - if (lock_second != NULL && (error = lock_fsnode(lock_second, NULL)) ) - unlock_fsnode(lock_first, NULL); + if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) { + events |= NOTE_LINK; } } - if (error == 0) { - const char *oname; - vnode_t oparent; - /* Save these off so we can later verify them (fix up below) */ - oname = fromnd.ni_vp->v_name; - oparent = fromnd.ni_vp->v_parent; + lock_vnode_and_post(fdvp, events); + if (fdvp != tdvp) { + lock_vnode_and_post(tdvp, events); + } - error = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a); + /* If you're replacing the target, post a deletion for it */ + if (tvp) + { + lock_vnode_and_post(tvp, NOTE_DELETE); + } - if (fdvp_unsafe != NULLVP) { - if (lock_second != NULL) - unlock_fsnode(lock_second, NULL); - unlock_fsnode(lock_first, NULL); - } - if (error == 0) { - vnode_setneedinactive(fromnd.ni_vp); - - if (tond.ni_vp && tond.ni_vp != fromnd.ni_vp) - vnode_setneedinactive(tond.ni_vp); - /* - * Fix up name & parent pointers on ._ file - */ - if (oname == fromnd.ni_vp->v_name && - oparent == fromnd.ni_vp->v_parent) { - int update_flags; - - update_flags = VNODE_UPDATE_NAME; - - if (fdvp != tdvp) - update_flags |= VNODE_UPDATE_PARENT; - - vnode_update_identity(fromnd.ni_vp, tdvp, - tond.ni_cnd.cn_nameptr, - tond.ni_cnd.cn_namelen, - tond.ni_cnd.cn_hash, - update_flags); - } + lock_vnode_and_post(fvp, NOTE_RENAME); + } + + return (_err); +} + +int +VNOP_COMPOUND_RENAME( + struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap, + struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap, + uint32_t flags, vfs_context_t ctx) +{ + int _err = 0; + int events; + struct vnop_compound_rename_args a; + int no_fvp, no_tvp; + + no_fvp = (*fvpp) == NULLVP; + no_tvp = (*tvpp) == NULLVP; + + a.a_desc = &vnop_compound_rename_desc; + + a.a_fdvp = fdvp; + a.a_fvpp = fvpp; + a.a_fcnp = fcnp; + a.a_fvap = fvap; + + a.a_tdvp = tdvp; + a.a_tvpp = tvpp; + a.a_tcnp = tcnp; + a.a_tvap = tvap; + + a.a_flags = flags; + a.a_context = ctx; + a.a_rename_authorizer = vn_authorize_rename; + a.a_reserved = NULL; + + /* do the rename of the main file. */ + _err = (*fdvp->v_op[vnop_compound_rename_desc.vdesc_offset])(&a); + DTRACE_FSINFO(compound_rename, vnode_t, fdvp); + + if (_err == 0) { + if (*tvpp && *tvpp != *fvpp) + vnode_setneedinactive(*tvpp); + } + + /* Wrote at least one directory. If transplanted a dir, also changed link counts */ + if (_err == 0 && *fvpp != *tvpp) { + if (!*fvpp) { + panic("No fvpp after compound rename?"); + } + + events = NOTE_WRITE; + if (vnode_isdir(*fvpp)) { + /* Link count on dir changed only if we are moving a dir and... + * --Moved to new dir, not overwriting there + * --Kept in same dir and DID overwrite + */ + if (((fdvp != tdvp) && (!*tvpp)) || ((fdvp == tdvp) && (*tvpp))) { + events |= NOTE_LINK; } } - vnode_put(fromnd.ni_vp); - if (tond.ni_vp) { - vnode_put(tond.ni_vp); + + lock_vnode_and_post(fdvp, events); + if (fdvp != tdvp) { + lock_vnode_and_post(tdvp, events); } - nameidone(&tond); - nameidone(&fromnd); + + /* If you're replacing the target, post a deletion for it */ + if (*tvpp) + { + lock_vnode_and_post(*tvpp, NOTE_DELETE); + } + + lock_vnode_and_post(*fvpp, NOTE_RENAME); } -out: - if (xfromname && xfromname != &smallname1[0]) { - FREE(xfromname, M_TEMP); + + if (no_fvp) { + lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0); } - if (xtoname && xtoname != &smallname2[0]) { - FREE(xtoname, M_TEMP); + if (no_tvp && *tvpp != NULLVP) { + lookup_compound_vnop_post_hook(_err, tdvp, *tvpp, tcnp->cn_ndp, 0); } -out1: - if (fdvp_unsafe != NULLVP) { - if (tdvp_unsafe != NULLVP) - unlock_fsnode(tdvp_unsafe, NULL); - unlock_fsnode(fdvp_unsafe, &funnel_state); + + if (_err && _err != EKEEPLOOKING) { + if (*fvpp) { + vnode_put(*fvpp); + *fvpp = NULLVP; + } + if (*tvpp) { + vnode_put(*tvpp); + *tvpp = NULLVP; + } } + return (_err); } - #if 0 +int +vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp, + struct vnode_attr *vap, vfs_context_t ctx) +{ + if (ndp->ni_cnd.cn_nameiop != CREATE) { + panic("Non-CREATE nameiop in vn_mkdir()?"); + } + + if (vnode_compound_mkdir_available(dvp)) { + return VNOP_COMPOUND_MKDIR(dvp, vpp, ndp, vap, ctx); + } else { + return VNOP_MKDIR(dvp, vpp, &ndp->ni_cnd, vap, ctx); + } +} + +#if 0 /* *# *#% mkdir dvp L U U @@ -3895,8 +4176,6 @@ VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, { int _err; struct vnop_mkdir_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_mkdir_desc; a.a_dvp = dvp; @@ -3904,26 +4183,83 @@ VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, a.a_cnp = cnp; a.a_vap = vap; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(dvp); - if (!thread_safe) { - if ( (_err = lock_fsnode(dvp, &funnel_state)) ) { - return (_err); - } - } _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a); + if (_err == 0 && *vpp) { + DTRACE_FSINFO(mkdir, vnode_t, *vpp); + } +#if CONFIG_APPLEDOUBLE if (_err == 0 && !NATIVE_XATTR(dvp)) { /* * Remove stale Apple Double file (if any). */ - xattrfile_remove(dvp, cnp->cn_nameptr, ctx, thread_safe, 0); + xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0); } - if (!thread_safe) { - unlock_fsnode(dvp, &funnel_state); - } +#endif /* CONFIG_APPLEDOUBLE */ + + post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE); + return (_err); } +int +VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp, + struct vnode_attr *vap, vfs_context_t ctx) +{ + int _err; + struct vnop_compound_mkdir_args a; + + a.a_desc = &vnop_compound_mkdir_desc; + a.a_dvp = dvp; + a.a_vpp = vpp; + a.a_cnp = &ndp->ni_cnd; + a.a_vap = vap; + a.a_flags = 0; + a.a_context = ctx; +#if 0 + a.a_mkdir_authorizer = vn_authorize_mkdir; +#endif /* 0 */ + a.a_reserved = NULL; + + _err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a); + if (_err == 0 && *vpp) { + DTRACE_FSINFO(compound_mkdir, vnode_t, *vpp); + } +#if CONFIG_APPLEDOUBLE + if (_err == 0 && !NATIVE_XATTR(dvp)) { + /* + * Remove stale Apple Double file (if any). + */ + xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0); + } +#endif /* CONFIG_APPLEDOUBLE */ + + post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE); + + lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, (_err == 0)); + if (*vpp && _err && _err != EKEEPLOOKING) { + vnode_put(*vpp); + *vpp = NULLVP; + } + + return (_err); +} + +int +vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx) +{ + if (vnode_compound_rmdir_available(dvp)) { + return VNOP_COMPOUND_RMDIR(dvp, vpp, ndp, vap, ctx); + } else { + if (*vpp == NULLVP) { + panic("NULL vp, but not a compound VNOP?"); + } + if (vap != NULL) { + panic("Non-NULL vap, but not a compound VNOP?"); + } + return VNOP_RMDIR(dvp, *vpp, &ndp->ni_cnd, ctx); + } +} #if 0 /* @@ -3946,45 +4282,95 @@ VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_c { int _err; struct vnop_rmdir_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_rmdir_desc; a.a_dvp = dvp; a.a_vp = vp; a.a_cnp = cnp; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(dvp); - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a); + DTRACE_FSINFO(rmdir, vnode_t, vp); if (_err == 0) { vnode_setneedinactive(vp); - +#if CONFIG_APPLEDOUBLE if ( !(NATIVE_XATTR(dvp)) ) { /* * Remove any associated extended attribute file (._ AppleDouble file). */ - xattrfile_remove(dvp, cnp->cn_nameptr, ctx, thread_safe, 1); + xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1); } +#endif } - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + + /* If you delete a dir, it loses its "." reference --> NOTE_LINK */ + post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK); + post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE); + return (_err); } +int +VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp, + struct vnode_attr *vap, vfs_context_t ctx) +{ + int _err; + struct vnop_compound_rmdir_args a; + int no_vp; + + a.a_desc = &vnop_mkdir_desc; + a.a_dvp = dvp; + a.a_vpp = vpp; + a.a_cnp = &ndp->ni_cnd; + a.a_vap = vap; + a.a_flags = 0; + a.a_context = ctx; + a.a_rmdir_authorizer = vn_authorize_rmdir; + a.a_reserved = NULL; + + no_vp = (*vpp == NULLVP); + + _err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a); + if (_err == 0 && *vpp) { + DTRACE_FSINFO(compound_rmdir, vnode_t, *vpp); + } +#if CONFIG_APPLEDOUBLE + if (_err == 0 && !NATIVE_XATTR(dvp)) { + /* + * Remove stale Apple Double file (if any). + */ + xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0); + } +#endif + + if (*vpp) { + post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK); + } + post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE); + + if (no_vp) { + lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0); + +#if 0 /* Removing orphaned ._ files requires a vp.... */ + if (*vpp && _err && _err != EKEEPLOOKING) { + vnode_put(*vpp); + *vpp = NULLVP; + } +#endif /* 0 */ + } + + return (_err); +} + +#if CONFIG_APPLEDOUBLE /* * Remove a ._ AppleDouble file */ #define AD_STALE_SECS (180) static void -xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int thread_safe, int force) { +xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force) +{ vnode_t xvp; struct nameidata nd; char smallname[64]; @@ -4002,7 +4388,7 @@ xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int thre MALLOC(filename, char *, len, M_TEMP, M_WAITOK); len = snprintf(filename, len, "._%s", basename); } - NDINIT(&nd, DELETE, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE, + NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE, CAST_USER_ADDR_T(filename), ctx); nd.ni_dvp = dvp; if (namei(&nd) != 0) @@ -4038,27 +4424,16 @@ xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int thre } } if (force) { - struct vnop_remove_args a; int error; - a.a_desc = &vnop_remove_desc; - a.a_dvp = nd.ni_dvp; - a.a_vp = xvp; - a.a_cnp = &nd.ni_cnd; - a.a_context = ctx; - - if (!thread_safe) { - if ( (lock_fsnode(xvp, NULL)) ) - goto out1; - } - error = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a); - - if (!thread_safe) - unlock_fsnode(xvp, NULL); - + error = VNOP_REMOVE(dvp, xvp, &nd.ni_cnd, 0, ctx); if (error == 0) vnode_setneedinactive(xvp); + + post_event_if_success(xvp, error, NOTE_DELETE); + post_event_if_success(dvp, error, NOTE_WRITE); } + out1: vnode_put(dvp); vnode_put(xvp); @@ -4073,7 +4448,8 @@ out2: */ static void xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap, - vfs_context_t ctx, int thread_safe) { + vfs_context_t ctx) +{ vnode_t xvp; struct nameidata nd; char smallname[64]; @@ -4092,7 +4468,7 @@ xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap, MALLOC(filename, char *, len, M_TEMP, M_WAITOK); len = snprintf(filename, len, "._%s", basename); } - NDINIT(&nd, LOOKUP, NOFOLLOW | USEDVP, UIO_SYSSPACE, + NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE, CAST_USER_ADDR_T(filename), ctx); nd.ni_dvp = dvp; if (namei(&nd) != 0) @@ -4109,22 +4485,16 @@ xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap, a.a_vap = vap; a.a_context = ctx; - if (!thread_safe) { - if ( (lock_fsnode(xvp, NULL)) ) - goto out1; - } (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(xvp, NULL); - } } -out1: + vnode_put(xvp); out2: if (filename && filename != &smallname[0]) { FREE(filename, M_TEMP); } } +#endif /* CONFIG_APPLEDOUBLE */ #if 0 /* @@ -4150,8 +4520,6 @@ VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, { int _err; struct vnop_symlink_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_symlink_desc; a.a_dvp = dvp; @@ -4160,24 +4528,21 @@ VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, a.a_vap = vap; a.a_target = target; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(dvp); - if (!thread_safe) { - if ( (_err = lock_fsnode(dvp, &funnel_state)) ) { - return (_err); - } - } _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a); + DTRACE_FSINFO(symlink, vnode_t, dvp); +#if CONFIG_APPLEDOUBLE if (_err == 0 && !NATIVE_XATTR(dvp)) { /* - * Remove stale Apple Double file (if any). + * Remove stale Apple Double file (if any). Posts its own knotes */ - xattrfile_remove(dvp, cnp->cn_nameptr, ctx, thread_safe, 0); + xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0); } - if (!thread_safe) { - unlock_fsnode(dvp, &funnel_state); - } - return (_err); +#endif /* CONFIG_APPLEDOUBLE */ + + post_event_if_success(dvp, _err, NOTE_WRITE); + + return (_err); } #if 0 @@ -4203,8 +4568,9 @@ VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag, { int _err; struct vnop_readdir_args a; - int thread_safe; - int funnel_state = 0; +#if CONFIG_DTRACE + user_ssize_t resid = uio_resid(uio); +#endif a.a_desc = &vnop_readdir_desc; a.a_vp = vp; @@ -4213,17 +4579,11 @@ VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag, a.a_eofflag = eofflag; a.a_numdirent = numdirent; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO_IO(readdir, + vnode_t, vp, user_ssize_t, (resid - uio_resid(uio))); + return (_err); } @@ -4238,23 +4598,24 @@ struct vnop_readdirattr_args { vnode_t a_vp; struct attrlist *a_alist; struct uio *a_uio; - u_long a_maxcount; - u_long a_options; - u_long *a_newstate; + uint32_t a_maxcount; + uint32_t a_options; + uint32_t *a_newstate; int *a_eofflag; - u_long *a_actualcount; + uint32_t *a_actualcount; vfs_context_t a_context; }; #endif /* 0*/ errno_t -VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, u_long maxcount, - u_long options, u_long *newstate, int *eofflag, u_long *actualcount, vfs_context_t ctx) +VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount, + uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx) { int _err; struct vnop_readdirattr_args a; - int thread_safe; - int funnel_state = 0; +#if CONFIG_DTRACE + user_ssize_t resid = uio_resid(uio); +#endif a.a_desc = &vnop_readdirattr_desc; a.a_vp = vp; @@ -4266,17 +4627,54 @@ VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, u_lo a.a_eofflag = eofflag; a.a_actualcount = actualcount; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO_IO(readdirattr, + vnode_t, vp, user_ssize_t, (resid - uio_resid(uio))); + + return (_err); +} + +#if 0 +struct vnop_getttrlistbulk_args { + struct vnodeop_desc *a_desc; + vnode_t a_vp; + struct attrlist *a_alist; + struct vnode_attr *a_vap; + struct uio *a_uio; + void *a_private + uint64_t a_options; + int *a_eofflag; + uint32_t *a_actualcount; + vfs_context_t a_context; +}; +#endif /* 0*/ +errno_t +VNOP_GETATTRLISTBULK(struct vnode *vp, struct attrlist *alist, + struct vnode_attr *vap, struct uio *uio, void *private, uint64_t options, + int32_t *eofflag, int32_t *actualcount, vfs_context_t ctx) +{ + int _err; + struct vnop_getattrlistbulk_args a; +#if CONFIG_DTRACE + user_ssize_t resid = uio_resid(uio); +#endif + + a.a_desc = &vnop_getattrlistbulk_desc; + a.a_vp = vp; + a.a_alist = alist; + a.a_vap = vap; + a.a_uio = uio; + a.a_private = private; + a.a_options = options; + a.a_eofflag = eofflag; + a.a_actualcount = actualcount; + a.a_context = ctx; + + _err = (*vp->v_op[vnop_getattrlistbulk_desc.vdesc_offset])(&a); + DTRACE_FSINFO_IO(getattrlistbulk, + vnode_t, vp, user_ssize_t, (resid - uio_resid(uio))); + return (_err); } @@ -4318,24 +4716,18 @@ VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx) { int _err; struct vnop_readlink_args a; - int thread_safe; - int funnel_state = 0; - +#if CONFIG_DTRACE + user_ssize_t resid = uio_resid(uio); +#endif a.a_desc = &vnop_readlink_desc; a.a_vp = vp; a.a_uio = uio; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO_IO(readlink, + vnode_t, vp, user_ssize_t, (resid - uio_resid(uio))); + return (_err); } @@ -4356,34 +4748,24 @@ VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx) { int _err; struct vnop_inactive_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_inactive_desc; a.a_vp = vp; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } + _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO(inactive, vnode_t, vp); #if NAMEDSTREAMS - /* For file systems that do not support namedstreams natively, mark - * the shadow stream file vnode to be recycled as soon as the last - * reference goes away. To avoid re-entering reclaim code, do not - * call recycle on terminating named stream vnodes. + /* For file systems that do not support namedstream natively, mark + * the shadow stream file vnode to be recycled as soon as the last + * reference goes away. To avoid re-entering reclaim code, do not + * call recycle on terminating namedstream vnodes. */ if (vnode_isnamedstream(vp) && - (vp->v_parent != NULLVP) && - (vnode_isshadow(vp)) && - ((vp->v_lflag & VL_TERMINATE) == 0)) { + (vp->v_parent != NULLVP) && + vnode_isshadow(vp) && + ((vp->v_lflag & VL_TERMINATE) == 0)) { vnode_recycle(vp); } #endif @@ -4409,21 +4791,14 @@ VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx) { int _err; struct vnop_reclaim_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_reclaim_desc; a.a_vp = vp; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a); - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); - } + DTRACE_FSINFO(reclaim, vnode_t, vp); + return (_err); } @@ -4445,34 +4820,25 @@ struct vnop_pathconf_args { struct vnodeop_desc *a_desc; vnode_t a_vp; int a_name; - register_t *a_retval; + int32_t *a_retval; vfs_context_t a_context; }; #endif /* 0*/ errno_t -VNOP_PATHCONF(struct vnode *vp, int name, register_t *retval, vfs_context_t ctx) +VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx) { int _err; struct vnop_pathconf_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_pathconf_desc; a.a_vp = vp; a.a_name = name; a.a_retval = retval; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO(pathconf, vnode_t, vp); + return (_err); } @@ -4504,13 +4870,10 @@ struct vnop_advlock_args { }; #endif /* 0*/ errno_t -VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx) +VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx, struct timespec *timeout) { int _err; struct vnop_advlock_args a; - int thread_safe; - int funnel_state = 0; - struct uthread * uth; a.a_desc = &vnop_advlock_desc; a.a_vp = vp; @@ -4519,12 +4882,8 @@ VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, a.a_fl = fl; a.a_flags = flags; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); + a.a_timeout = timeout; - uth = get_bsdthread_info(current_thread()); - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } /* Disallow advisory locking on non-seekable vnodes */ if (vnode_isfifo(vp)) { _err = err_advlock(&a); @@ -4536,10 +4895,9 @@ VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, /* Advisory locking done by underlying filesystem */ _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a); } + DTRACE_FSINFO(advlock, vnode_t, vp); } - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); - } + return (_err); } @@ -4567,8 +4925,6 @@ VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesalloc { int _err; struct vnop_allocate_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_allocate_desc; a.a_vp = vp; @@ -4577,17 +4933,15 @@ VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesalloc a.a_bytesallocated = bytesallocated; a.a_offset = offset; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); + DTRACE_FSINFO(allocate, vnode_t, vp); +#if CONFIG_FSE + if (_err == 0) { + add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE); } +#endif + return (_err); } @@ -4601,7 +4955,7 @@ struct vnop_pagein_args { struct vnodeop_desc *a_desc; vnode_t a_vp; upl_t a_pl; - vm_offset_t a_pl_offset; + upl_offset_t a_pl_offset; off_t a_f_offset; size_t a_size; int a_flags; @@ -4609,12 +4963,10 @@ struct vnop_pagein_args { }; #endif /* 0*/ errno_t -VNOP_PAGEIN(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx) +VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx) { int _err; struct vnop_pagein_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_pagein_desc; a.a_vp = vp; @@ -4624,15 +4976,10 @@ VNOP_PAGEIN(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, s a.a_size = size; a.a_flags = flags; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a); - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); - } + DTRACE_FSINFO(pagein, vnode_t, vp); + return (_err); } @@ -4646,7 +4993,7 @@ struct vnop_pageout_args { struct vnodeop_desc *a_desc; vnode_t a_vp; upl_t a_pl; - vm_offset_t a_pl_offset; + upl_offset_t a_pl_offset; off_t a_f_offset; size_t a_size; int a_flags; @@ -4655,12 +5002,10 @@ struct vnop_pageout_args { #endif /* 0*/ errno_t -VNOP_PAGEOUT(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx) +VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx) { int _err; struct vnop_pageout_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_pageout_desc; a.a_vp = vp; @@ -4670,18 +5015,26 @@ VNOP_PAGEOUT(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, a.a_size = size; a.a_flags = flags; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a); - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); - } + DTRACE_FSINFO(pageout, vnode_t, vp); + + post_event_if_success(vp, _err, NOTE_WRITE); + return (_err); } +int +vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx) +{ + if (vnode_compound_remove_available(dvp)) { + return VNOP_COMPOUND_REMOVE(dvp, vpp, ndp, flags, vap, ctx); + } else { + return VNOP_REMOVE(dvp, *vpp, &ndp->ni_cnd, flags, ctx); + } +} + +#if CONFIG_SEARCHFS #if 0 /* @@ -4695,12 +5048,12 @@ struct vnop_searchfs_args { void *a_searchparams1; void *a_searchparams2; struct attrlist *a_searchattrs; - u_long a_maxmatches; + uint32_t a_maxmatches; struct timeval *a_timelimit; struct attrlist *a_returnattrs; - u_long *a_nummatches; - u_long a_scriptcode; - u_long a_options; + uint32_t *a_nummatches; + uint32_t a_scriptcode; + uint32_t a_options; struct uio *a_uio; struct searchstate *a_searchstate; vfs_context_t a_context; @@ -4708,12 +5061,10 @@ struct vnop_searchfs_args { #endif /* 0*/ errno_t -VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, u_long maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, u_long *nummatches, u_long scriptcode, u_long options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx) +VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx) { int _err; struct vnop_searchfs_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_searchfs_desc; a.a_vp = vp; @@ -4729,19 +5080,13 @@ VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct a.a_uio = uio; a.a_searchstate = searchstate; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO(searchfs, vnode_t, vp); + return (_err); } +#endif /* CONFIG_SEARCHFS */ #if 0 /* @@ -4777,6 +5122,7 @@ VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct c a.a_flags = flags; a.a_context = ctx; _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a); + DTRACE_FSINFO(copyfile, vnode_t, fvp); return (_err); } @@ -4785,8 +5131,6 @@ VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options { struct vnop_getxattr_args a; int error; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_getxattr_desc; a.a_vp = vp; @@ -4796,16 +5140,9 @@ VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options a.a_options = options; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (error = lock_fsnode(vp, &funnel_state)) ) { - return (error); - } - } error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO(getxattr, vnode_t, vp); + return (error); } @@ -4814,8 +5151,6 @@ VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_ { struct vnop_setxattr_args a; int error; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_setxattr_desc; a.a_vp = vp; @@ -4824,18 +5159,14 @@ VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_ a.a_options = options; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (error = lock_fsnode(vp, &funnel_state)) ) { - return (error); - } - } error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO(setxattr, vnode_t, vp); + if (error == 0) vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS); + + post_event_if_success(vp, error, NOTE_ATTRIB); + return (error); } @@ -4844,8 +5175,6 @@ VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx) { struct vnop_removexattr_args a; int error; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_removexattr_desc; a.a_vp = vp; @@ -4853,16 +5182,11 @@ VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx) a.a_options = options; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (error = lock_fsnode(vp, &funnel_state)) ) { - return (error); - } - } error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO(removexattr, vnode_t, vp); + + post_event_if_success(vp, error, NOTE_ATTRIB); + return (error); } @@ -4871,8 +5195,6 @@ VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t c { struct vnop_listxattr_args a; int error; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_listxattr_desc; a.a_vp = vp; @@ -4881,16 +5203,9 @@ VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t c a.a_options = options; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (error = lock_fsnode(vp, &funnel_state)) ) { - return (error); - } - } error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO(listxattr, vnode_t, vp); + return (error); } @@ -4913,22 +5228,15 @@ VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset) { int _err; struct vnop_blktooff_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_blktooff_desc; a.a_vp = vp; a.a_lblkno = lblkno; a.a_offset = offset; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a); - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); - } + DTRACE_FSINFO(blktooff, vnode_t, vp); + return (_err); } @@ -4950,22 +5258,15 @@ VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno) { int _err; struct vnop_offtoblk_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = &vnop_offtoblk_desc; a.a_vp = vp; a.a_offset = offset; a.a_lblkno = lblkno; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a); - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); - } + DTRACE_FSINFO(offtoblk, vnode_t, vp); + return (_err); } @@ -4992,8 +5293,7 @@ VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size { int _err; struct vnop_blockmap_args a; - int thread_safe; - int funnel_state = 0; + size_t localrun = 0; if (ctx == NULL) { ctx = vfs_context_current(); @@ -5003,19 +5303,30 @@ VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size a.a_foffset = foffset; a.a_size = size; a.a_bpn = bpn; - a.a_run = run; + a.a_run = &localrun; a.a_poff = poff; a.a_flags = flags; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - funnel_state = thread_funnel_set(kernel_flock, TRUE); - } _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a); - if (!thread_safe) { - (void) thread_funnel_set(kernel_flock, funnel_state); + DTRACE_FSINFO(blockmap, vnode_t, vp); + + /* + * We used a local variable to request information from the underlying + * filesystem about the length of the I/O run in question. If + * we get malformed output from the filesystem, we cap it to the length + * requested, at most. Update 'run' on the way out. + */ + if (_err == 0) { + if (localrun > size) { + localrun = size; + } + + if (run) { + *run = localrun; + } } + return (_err); } @@ -5031,9 +5342,11 @@ VNOP_STRATEGY(struct buf *bp) { int _err; struct vnop_strategy_args a; + vnode_t vp = buf_vnode(bp); a.a_desc = &vnop_strategy_desc; a.a_bp = bp; - _err = (*buf_vnode(bp)->v_op[vnop_strategy_desc.vdesc_offset])(&a); + _err = (*vp->v_op[vnop_strategy_desc.vdesc_offset])(&a); + DTRACE_FSINFO(strategy, vnode_t, vp); return (_err); } @@ -5048,9 +5361,11 @@ VNOP_BWRITE(struct buf *bp) { int _err; struct vnop_bwrite_args a; + vnode_t vp = buf_vnode(bp); a.a_desc = &vnop_bwrite_desc; a.a_bp = bp; - _err = (*buf_vnode(bp)->v_op[vnop_bwrite_desc.vdesc_offset])(&a); + _err = (*vp->v_op[vnop_bwrite_desc.vdesc_offset])(&a); + DTRACE_FSINFO(bwrite, vnode_t, vp); return (_err); } @@ -5067,24 +5382,15 @@ VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx) { int _err; struct vnop_kqfilt_add_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = VDESC(vnop_kqfilt_add); a.a_vp = vp; a.a_kn = kn; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO(kqfilt_add, vnode_t, vp); + return(_err); } @@ -5101,24 +5407,34 @@ VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx) { int _err; struct vnop_kqfilt_remove_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = VDESC(vnop_kqfilt_remove); a.a_vp = vp; a.a_ident = ident; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO(kqfilt_remove, vnode_t, vp); + + return(_err); +} + +errno_t +VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx) +{ + int _err; + struct vnop_monitor_args a; + + a.a_desc = VDESC(vnop_monitor); + a.a_vp = vp; + a.a_events = events; + a.a_flags = flags; + a.a_handle = handle; + a.a_context = ctx; + + _err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a); + DTRACE_FSINFO(monitor, vnode_t, vp); + return(_err); } @@ -5135,24 +5451,15 @@ VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx) { int _err; struct vnop_setlabel_args a; - int thread_safe; - int funnel_state = 0; a.a_desc = VDESC(vnop_setlabel); a.a_vp = vp; a.a_vl = label; a.a_context = ctx; - thread_safe = THREAD_SAFE_FS(vp); - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } _err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } + DTRACE_FSINFO(setlabel, vnode_t, vp); + return(_err); } @@ -5164,10 +5471,9 @@ VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx) errno_t VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx) { + int _err; struct vnop_getnamedstream_args a; - if (!THREAD_SAFE_FS(vp)) - return (ENOTSUP); a.a_desc = &vnop_getnamedstream_desc; a.a_vp = vp; a.a_svpp = svpp; @@ -5176,7 +5482,9 @@ VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperatio a.a_flags = flags; a.a_context = ctx; - return (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a); + _err = (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a); + DTRACE_FSINFO(getnamedstream, vnode_t, vp); + return (_err); } /* @@ -5185,10 +5493,9 @@ VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperatio errno_t VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx) { + int _err; struct vnop_makenamedstream_args a; - if (!THREAD_SAFE_FS(vp)) - return (ENOTSUP); a.a_desc = &vnop_makenamedstream_desc; a.a_vp = vp; a.a_svpp = svpp; @@ -5196,7 +5503,9 @@ VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs a.a_flags = flags; a.a_context = ctx; - return (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a); + _err = (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a); + DTRACE_FSINFO(makenamedstream, vnode_t, vp); + return (_err); } @@ -5206,10 +5515,9 @@ VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs errno_t VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx) { + int _err; struct vnop_removenamedstream_args a; - if (!THREAD_SAFE_FS(vp)) - return (ENOTSUP); a.a_desc = &vnop_removenamedstream_desc; a.a_vp = vp; a.a_svp = svp; @@ -5217,6 +5525,8 @@ VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs a.a_flags = flags; a.a_context = ctx; - return (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a); + _err = (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a); + DTRACE_FSINFO(removenamedstream, vnode_t, vp); + return (_err); } #endif