X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/91447636331957f3d9b5ca5b508f07c526b0074d..935ed37a5c468c8a1c07408573c08b8b7ef80e8b:/bsd/vfs/kpi_vfs.c diff --git a/bsd/vfs/kpi_vfs.c b/bsd/vfs/kpi_vfs.c index 7f72472a9..79e526ef2 100644 --- a/bsd/vfs/kpi_vfs.c +++ b/bsd/vfs/kpi_vfs.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ /* @@ -59,13 +65,17 @@ * * @(#)kpi_vfs.c */ +/* + * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce + * support for mandatory and extensible security protections. This notice + * is included in support of clause 2.2 (b) of the Apple Public License, + * Version 2.0. + */ /* * External virtual filesystem routines */ -#undef DIAGNOSTIC -#define DIAGNOSTIC 1 #include #include @@ -95,11 +105,19 @@ #include #include +#include + +#include #include #include #include +#include + +#if CONFIG_MACF +#include +#endif #define ESUCCESS 0 #undef mount_t @@ -112,12 +130,13 @@ ((VP)->v_unsafefs ? 0 : 1) #define NATIVE_XATTR(VP) \ - ((VP)->v_mount ? (VP)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR : 0) + ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0) -static void xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t context, - int thread_safe, int force); -static void xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap, - vfs_context_t context, int thread_safe); +static void xattrfile_remove(vnode_t dvp, const char *basename, + vfs_context_t ctx, int thread_safe, int force); +static void xattrfile_setattr(vnode_t dvp, const char * basename, + struct vnode_attr * vap, vfs_context_t ctx, + int thread_safe); static void @@ -125,7 +144,7 @@ vnode_setneedinactive(vnode_t vp) { cache_purge(vp); - vnode_lock(vp); + vnode_lock_spin(vp); vp->v_lflag |= VL_NEEDINACTIVE; vnode_unlock(vp); } @@ -181,7 +200,7 @@ unlock_fsnode(vnode_t vp, int *funnel_state) * prototypes for exported VFS operations */ int -VFS_MOUNT(struct mount * mp, vnode_t devvp, user_addr_t data, vfs_context_t context) +VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx) { int error; int thread_safe; @@ -197,16 +216,16 @@ VFS_MOUNT(struct mount * mp, vnode_t devvp, user_addr_t data, vfs_context_t cont funnel_state = thread_funnel_set(kernel_flock, TRUE); } - if (vfs_context_is64bit(context)) { + if (vfs_context_is64bit(ctx)) { if (vfs_64bitready(mp)) { - error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, context); + error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx); } else { error = ENOTSUP; } } else { - error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, context); + error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx); } if (!thread_safe) { @@ -216,7 +235,7 @@ VFS_MOUNT(struct mount * mp, vnode_t devvp, user_addr_t data, vfs_context_t cont } int -VFS_START(struct mount * mp, int flags, vfs_context_t context) +VFS_START(mount_t mp, int flags, vfs_context_t ctx) { int error; int thread_safe; @@ -230,7 +249,7 @@ VFS_START(struct mount * mp, int flags, vfs_context_t context) if (!thread_safe) { funnel_state = thread_funnel_set(kernel_flock, TRUE); } - error = (*mp->mnt_op->vfs_start)(mp, flags, context); + error = (*mp->mnt_op->vfs_start)(mp, flags, ctx); if (!thread_safe) { (void) thread_funnel_set(kernel_flock, funnel_state); } @@ -238,7 +257,7 @@ VFS_START(struct mount * mp, int flags, vfs_context_t context) } int -VFS_UNMOUNT(struct mount *mp, int flags, vfs_context_t context) +VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx) { int error; int thread_safe; @@ -252,35 +271,49 @@ VFS_UNMOUNT(struct mount *mp, int flags, vfs_context_t context) if (!thread_safe) { funnel_state = thread_funnel_set(kernel_flock, TRUE); } - error = (*mp->mnt_op->vfs_unmount)(mp, flags, context); + error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx); if (!thread_safe) { (void) thread_funnel_set(kernel_flock, funnel_state); } return (error); } +/* + * Returns: 0 Success + * ENOTSUP Not supported + * :ENOENT + * :??? + * + * Note: The return codes from the underlying VFS's root routine can't + * be fully enumerated here, since third party VFS authors may not + * limit their error returns to the ones documented here, even + * though this may result in some programs functioning incorrectly. + * + * The return codes documented above are those which may currently + * be returned by HFS from hfs_vfs_root, which is a simple wrapper + * for a call to hfs_vget on the volume mount poit, not including + * additional error codes which may be propagated from underlying + * routines called by hfs_vget. + */ int -VFS_ROOT(struct mount * mp, struct vnode ** vpp, vfs_context_t context) +VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx) { int error; int thread_safe; int funnel_state = 0; - struct vfs_context acontext; if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0)) return(ENOTSUP); - if (context == NULL) { - acontext.vc_proc = current_proc(); - acontext.vc_ucred = kauth_cred_get(); - context = &acontext; + if (ctx == NULL) { + ctx = vfs_context_current(); } thread_safe = mp->mnt_vtable->vfc_threadsafe; if (!thread_safe) { funnel_state = thread_funnel_set(kernel_flock, TRUE); } - error = (*mp->mnt_op->vfs_root)(mp, vpp, context); + error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx); if (!thread_safe) { (void) thread_funnel_set(kernel_flock, funnel_state); } @@ -288,7 +321,7 @@ VFS_ROOT(struct mount * mp, struct vnode ** vpp, vfs_context_t context) } int -VFS_QUOTACTL(struct mount *mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t context) +VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx) { int error; int thread_safe; @@ -302,7 +335,7 @@ VFS_QUOTACTL(struct mount *mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t if (!thread_safe) { funnel_state = thread_funnel_set(kernel_flock, TRUE); } - error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, context); + error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx); if (!thread_safe) { (void) thread_funnel_set(kernel_flock, funnel_state); } @@ -310,27 +343,25 @@ VFS_QUOTACTL(struct mount *mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t } int -VFS_GETATTR(struct mount *mp, struct vfs_attr *vfa, vfs_context_t context) +VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx) { int error; int thread_safe; int funnel_state = 0; - struct vfs_context acontext; if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0)) return(ENOTSUP); - if (context == NULL) { - acontext.vc_proc = current_proc(); - acontext.vc_ucred = kauth_cred_get(); - context = &acontext; + if (ctx == NULL) { + ctx = vfs_context_current(); } + thread_safe = mp->mnt_vtable->vfc_threadsafe; if (!thread_safe) { funnel_state = thread_funnel_set(kernel_flock, TRUE); } - error = (*mp->mnt_op->vfs_getattr)(mp, vfa, context); + error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx); if (!thread_safe) { (void) thread_funnel_set(kernel_flock, funnel_state); } @@ -338,27 +369,25 @@ VFS_GETATTR(struct mount *mp, struct vfs_attr *vfa, vfs_context_t context) } int -VFS_SETATTR(struct mount *mp, struct vfs_attr *vfa, vfs_context_t context) +VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx) { int error; int thread_safe; int funnel_state = 0; - struct vfs_context acontext; if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0)) return(ENOTSUP); - if (context == NULL) { - acontext.vc_proc = current_proc(); - acontext.vc_ucred = kauth_cred_get(); - context = &acontext; + if (ctx == NULL) { + ctx = vfs_context_current(); } + thread_safe = mp->mnt_vtable->vfc_threadsafe; if (!thread_safe) { funnel_state = thread_funnel_set(kernel_flock, TRUE); } - error = (*mp->mnt_op->vfs_setattr)(mp, vfa, context); + error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx); if (!thread_safe) { (void) thread_funnel_set(kernel_flock, funnel_state); } @@ -366,27 +395,24 @@ VFS_SETATTR(struct mount *mp, struct vfs_attr *vfa, vfs_context_t context) } int -VFS_SYNC(struct mount *mp, int flags, vfs_context_t context) +VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx) { int error; int thread_safe; int funnel_state = 0; - struct vfs_context acontext; if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0)) return(ENOTSUP); - if (context == NULL) { - acontext.vc_proc = current_proc(); - acontext.vc_ucred = kauth_cred_get(); - context = &acontext; + if (ctx == NULL) { + ctx = vfs_context_current(); } thread_safe = mp->mnt_vtable->vfc_threadsafe; if (!thread_safe) { funnel_state = thread_funnel_set(kernel_flock, TRUE); } - error = (*mp->mnt_op->vfs_sync)(mp, flags, context); + error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx); if (!thread_safe) { (void) thread_funnel_set(kernel_flock, funnel_state); } @@ -394,27 +420,24 @@ VFS_SYNC(struct mount *mp, int flags, vfs_context_t context) } int -VFS_VGET(struct mount * mp, ino64_t ino, struct vnode **vpp, vfs_context_t context) +VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx) { int error; int thread_safe; int funnel_state = 0; - struct vfs_context acontext; if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0)) return(ENOTSUP); - if (context == NULL) { - acontext.vc_proc = current_proc(); - acontext.vc_ucred = kauth_cred_get(); - context = &acontext; + if (ctx == NULL) { + ctx = vfs_context_current(); } thread_safe = mp->mnt_vtable->vfc_threadsafe; if (!thread_safe) { funnel_state = thread_funnel_set(kernel_flock, TRUE); } - error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, context); + error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx); if (!thread_safe) { (void) thread_funnel_set(kernel_flock, funnel_state); } @@ -422,27 +445,24 @@ VFS_VGET(struct mount * mp, ino64_t ino, struct vnode **vpp, vfs_context_t conte } int -VFS_FHTOVP(struct mount * mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs_context_t context) +VFS_FHTOVP(mount_t mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs_context_t ctx) { int error; int thread_safe; int funnel_state = 0; - struct vfs_context acontext; if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0)) return(ENOTSUP); - if (context == NULL) { - acontext.vc_proc = current_proc(); - acontext.vc_ucred = kauth_cred_get(); - context = &acontext; + if (ctx == NULL) { + ctx = vfs_context_current(); } thread_safe = mp->mnt_vtable->vfc_threadsafe; if (!thread_safe) { funnel_state = thread_funnel_set(kernel_flock, TRUE); } - error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, context); + error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx); if (!thread_safe) { (void) thread_funnel_set(kernel_flock, funnel_state); } @@ -450,27 +470,24 @@ VFS_FHTOVP(struct mount * mp, int fhlen, unsigned char * fhp, vnode_t * vpp, vfs } int -VFS_VPTOFH(struct vnode * vp, int *fhlenp, unsigned char * fhp, vfs_context_t context) +VFS_VPTOFH(struct vnode * vp, int *fhlenp, unsigned char * fhp, vfs_context_t ctx) { int error; int thread_safe; int funnel_state = 0; - struct vfs_context acontext; if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0)) return(ENOTSUP); - if (context == NULL) { - acontext.vc_proc = current_proc(); - acontext.vc_ucred = kauth_cred_get(); - context = &acontext; + if (ctx == NULL) { + ctx = vfs_context_current(); } thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { funnel_state = thread_funnel_set(kernel_flock, TRUE); } - error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, context); + error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx); if (!thread_safe) { (void) thread_funnel_set(kernel_flock, funnel_state); } @@ -506,7 +523,9 @@ vfs_setflags(mount_t mp, uint64_t flags) { uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK)); + mount_lock(mp); mp->mnt_flag |= lflags; + mount_unlock(mp); } /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */ @@ -515,7 +534,9 @@ vfs_clearflags(mount_t mp , uint64_t flags) { uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK)); + mount_lock(mp); mp->mnt_flag &= ~lflags; + mount_unlock(mp); } /* Is the mount_t ronly and upgrade read/write requested? */ @@ -567,7 +588,7 @@ vfs_isreload(mount_t mp) int vfs_isforce(mount_t mp) { - if ((mp->mnt_flag & MNT_FORCE) || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT)) + if ((mp->mnt_lflag & MNT_LFORCE) || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT)) return(1); else return(0); @@ -582,6 +603,49 @@ vfs_64bitready(mount_t mp) return(0); } + +int +vfs_authcache_ttl(mount_t mp) +{ + if ( (mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) ) + return (mp->mnt_authcache_ttl); + else + return (CACHED_RIGHT_INFINITE_TTL); +} + +void +vfs_setauthcache_ttl(mount_t mp, int ttl) +{ + mount_lock(mp); + mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL; + mp->mnt_authcache_ttl = ttl; + mount_unlock(mp); +} + +void +vfs_clearauthcache_ttl(mount_t mp) +{ + mount_lock(mp); + mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL; + /* + * back to the default TTL value in case + * MNTK_AUTH_OPAQUE is set on this mount + */ + mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL; + mount_unlock(mp); +} + +void +vfs_markdependency(mount_t mp) +{ + proc_t p = current_proc(); + mount_lock(mp); + mp->mnt_dependent_process = p; + mp->mnt_dependent_pid = proc_pid(p); + mount_unlock(mp); +} + + int vfs_authopaque(mount_t mp) { @@ -679,7 +743,6 @@ int vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx) { int error; - char *vname; if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0) return(error); @@ -724,7 +787,9 @@ vfs_fsprivate(mount_t mp) void vfs_setfsprivate(mount_t mp, void *mntdata) { + mount_lock(mp); mp->mnt_data = mntdata; + mount_unlock(mp); } @@ -753,6 +818,7 @@ vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp) ioattrp->io_maxsegreadsize = MAXPHYS; ioattrp->io_maxsegwritesize = MAXPHYS; ioattrp->io_devblocksize = DEV_BSIZE; + ioattrp->io_flags = 0; } else { ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt; ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt; @@ -761,10 +827,10 @@ vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp) ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize; ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize; ioattrp->io_devblocksize = mp->mnt_devblocksize; + ioattrp->io_flags = mp->mnt_ioflags; } - ioattrp->io_reserved[0] = 0; - ioattrp->io_reserved[1] = 0; - ioattrp->io_reserved[2] = 0; + ioattrp->io_reserved[0] = NULL; + ioattrp->io_reserved[1] = NULL; } @@ -783,6 +849,7 @@ vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp) mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize; mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize; mp->mnt_devblocksize = ioattrp->io_devblocksize; + mp->mnt_ioflags = ioattrp->io_flags; } /* @@ -844,11 +911,19 @@ vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t * handle) newvfstbl->vfc_threadsafe= 1; if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL) newvfstbl->vfc_flags |= MNT_LOCAL; - if (vfe->vfe_flags & VFS_TBLLOCALVOL) + if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0) newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS; else newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS; - + + if (vfe->vfe_flags & VFS_TBLNATIVEXATTR) + newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR; + if (vfe->vfe_flags & VFS_TBLUNMOUNT_PREFLIGHT) + newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT; + if (vfe->vfe_flags & VFS_TBLREADDIR_EXTENDED) + newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED; + if (vfe->vfe_flags & VFS_TBLNOMACLABEL) + newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL; /* * Allocate and init the vectors. @@ -1005,80 +1080,282 @@ vfs_mountrele(__unused mount_t mp ) /* drops reference */ } int -vfs_context_pid(vfs_context_t context) +vfs_context_pid(vfs_context_t ctx) { - return (context->vc_proc->p_pid); + return (proc_pid(vfs_context_proc(ctx))); } int -vfs_context_suser(vfs_context_t context) +vfs_context_suser(vfs_context_t ctx) { - return (suser(context->vc_ucred, 0)); + return (suser(ctx->vc_ucred, NULL)); } + +/* + * XXX Signals should be tied to threads, not processes, for most uses of this + * XXX call. + */ int -vfs_context_issignal(vfs_context_t context, sigset_t mask) +vfs_context_issignal(vfs_context_t ctx, sigset_t mask) { - if (context->vc_proc) - return(proc_pendingsignals(context->vc_proc, mask)); + proc_t p = vfs_context_proc(ctx); + if (p) + return(proc_pendingsignals(p, mask)); return(0); } int -vfs_context_is64bit(vfs_context_t context) +vfs_context_is64bit(vfs_context_t ctx) { - if (context->vc_proc) - return(proc_is64bit(context->vc_proc)); + proc_t proc = vfs_context_proc(ctx); + + if (proc) + return(proc_is64bit(proc)); return(0); } + +/* + * vfs_context_proc + * + * Description: Given a vfs_context_t, return the proc_t associated with it. + * + * Parameters: vfs_context_t The context to use + * + * Returns: proc_t The process for this context + * + * Notes: This function will return the current_proc() if any of the + * following conditions are true: + * + * o The supplied context pointer is NULL + * o There is no Mach thread associated with the context + * o There is no Mach task associated with the Mach thread + * o There is no proc_t associated with the Mach task + * o The proc_t has no per process open file table + * o The proc_t is post-vfork() + * + * This causes this function to return a value matching as + * closely as possible the previous behaviour, while at the + * same time avoiding the task lending that results from vfork() + */ proc_t -vfs_context_proc(vfs_context_t context) +vfs_context_proc(vfs_context_t ctx) +{ + proc_t proc = NULL; + + if (ctx != NULL && ctx->vc_thread != NULL) + proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread); + if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK))) + proc = NULL; + + return(proc == NULL ? current_proc() : proc); +} + +/* + * vfs_context_get_special_port + * + * Description: Return the requested special port from the task associated + * with the given context. + * + * Parameters: vfs_context_t The context to use + * int Index of special port + * ipc_port_t * Pointer to returned port + * + * Returns: kern_return_t see task_get_special_port() + */ +kern_return_t +vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp) +{ + task_t task = NULL; + + if (ctx != NULL && ctx->vc_thread != NULL) + task = get_threadtask(ctx->vc_thread); + + return task_get_special_port(task, which, portp); +} + +/* + * vfs_context_set_special_port + * + * Description: Set the requested special port in the task associated + * with the given context. + * + * Parameters: vfs_context_t The context to use + * int Index of special port + * ipc_port_t New special port + * + * Returns: kern_return_t see task_set_special_port() + */ +kern_return_t +vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port) +{ + task_t task = NULL; + + if (ctx != NULL && ctx->vc_thread != NULL) + task = get_threadtask(ctx->vc_thread); + + return task_set_special_port(task, which, port); +} + +/* + * vfs_context_thread + * + * Description: Return the Mach thread associated with a vfs_context_t + * + * Parameters: vfs_context_t The context to use + * + * Returns: thread_t The thread for this context, or + * NULL, if there is not one. + * + * Notes: NULL thread_t's are legal, but discouraged. They occur only + * as a result of a static vfs_context_t declaration in a function + * and will result in this function returning NULL. + * + * This is intentional; this function should NOT return the + * current_thread() in this case. + */ +thread_t +vfs_context_thread(vfs_context_t ctx) +{ + return(ctx->vc_thread); +} + + +/* + * vfs_context_cwd + * + * Description: Returns a reference on the vnode for the current working + * directory for the supplied context + * + * Parameters: vfs_context_t The context to use + * + * Returns: vnode_t The current working directory + * for this context + * + * Notes: The function first attempts to obtain the current directory + * from the thread, and if it is not present there, falls back + * to obtaining it from the process instead. If it can't be + * obtained from either place, we return NULLVP. + */ +vnode_t +vfs_context_cwd(vfs_context_t ctx) { - return (context->vc_proc); + vnode_t cwd = NULLVP; + + if(ctx != NULL && ctx->vc_thread != NULL) { + uthread_t uth = get_bsdthread_info(ctx->vc_thread); + proc_t proc; + + /* + * Get the cwd from the thread; if there isn't one, get it + * from the process, instead. + */ + if ((cwd = uth->uu_cdir) == NULLVP && + (proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread)) != NULL && + proc->p_fd != NULL) + cwd = proc->p_fd->fd_cdir; + } + + return(cwd); } + vfs_context_t -vfs_context_create(vfs_context_t context) +vfs_context_create(vfs_context_t ctx) { - struct vfs_context * newcontext; + vfs_context_t newcontext; - newcontext = (struct vfs_context *)kalloc(sizeof(struct vfs_context)); + newcontext = (vfs_context_t)kalloc(sizeof(struct vfs_context)); if (newcontext) { - if (context) { - newcontext->vc_proc = context->vc_proc; - newcontext->vc_ucred = context->vc_ucred; + kauth_cred_t safecred; + if (ctx) { + newcontext->vc_thread = ctx->vc_thread; + safecred = ctx->vc_ucred; } else { - newcontext->vc_proc = proc_self(); - newcontext->vc_ucred = kauth_cred_get(); + newcontext->vc_thread = current_thread(); + safecred = kauth_cred_get(); } - return(newcontext); + if (IS_VALID_CRED(safecred)) + kauth_cred_ref(safecred); + newcontext->vc_ucred = safecred; + return(newcontext); } - return((vfs_context_t)0); + return(NULL); +} + + +vfs_context_t +vfs_context_current(void) +{ + vfs_context_t ctx = NULL; + volatile uthread_t ut = (uthread_t)get_bsdthread_info(current_thread()); + + if (ut != NULL ) { + if (ut->uu_context.vc_ucred != NULL) { + ctx = &ut->uu_context; + } + } + + return(ctx == NULL ? vfs_context_kernel() : ctx); +} + + +/* + * XXX Do not ask + * + * Dangerous hack - adopt the first kernel thread as the current thread, to + * get to the vfs_context_t in the uthread associated with a kernel thread. + * This is used by UDF to make the call into IOCDMediaBSDClient, + * IOBDMediaBSDClient, and IODVDMediaBSDClient to determine whether the + * ioctl() is being called from kernel or user space (and all this because + * we do not pass threads into our ioctl()'s, instead of processes). + * + * This is also used by imageboot_setup(), called early from bsd_init() after + * kernproc has been given a credential. + * + * Note: The use of proc_thread() here is a convenience to avoid inclusion + * of many Mach headers to do the reference directly rather than indirectly; + * we will need to forego this convenience when we reture proc_thread(). + */ +static struct vfs_context kerncontext; +vfs_context_t +vfs_context_kernel(void) +{ + if (kerncontext.vc_ucred == NOCRED) + kerncontext.vc_ucred = kernproc->p_ucred; + if (kerncontext.vc_thread == NULL) + kerncontext.vc_thread = proc_thread(kernproc); + + return(&kerncontext); } + int -vfs_context_rele(vfs_context_t context) +vfs_context_rele(vfs_context_t ctx) { - if (context) - kfree(context, sizeof(struct vfs_context)); + if (ctx) { + if (IS_VALID_CRED(ctx->vc_ucred)) + kauth_cred_unref(&ctx->vc_ucred); + kfree(ctx, sizeof(struct vfs_context)); + } return(0); } ucred_t -vfs_context_ucred(vfs_context_t context) +vfs_context_ucred(vfs_context_t ctx) { - return (context->vc_ucred); + return (ctx->vc_ucred); } /* * Return true if the context is owned by the superuser. */ int -vfs_context_issuser(vfs_context_t context) +vfs_context_issuser(vfs_context_t ctx) { - return(context->vc_ucred->cr_uid == 0); + return(kauth_cred_issuser(vfs_context_ucred(ctx))); } @@ -1169,7 +1446,7 @@ vnode_fsnode(vnode_t vp) void vnode_clearfsnode(vnode_t vp) { - vp->v_data = 0; + vp->v_data = NULL; } dev_t @@ -1194,6 +1471,13 @@ vnode_issystem(vnode_t vp) return ((vp->v_flag & VSYSTEM)? 1 : 0); } +/* is vnode_t a swap file vnode */ +int +vnode_isswap(vnode_t vp) +{ + return ((vp->v_flag & VSWAP)? 1 : 0); +} + /* if vnode_t mount operation in progress */ int vnode_ismount(vnode_t vp) @@ -1207,7 +1491,7 @@ vnode_isrecycled(vnode_t vp) { int ret; - vnode_lock(vp); + vnode_lock_spin(vp); ret = (vp->v_lflag & (VL_TERMINATE|VL_DEAD))? 1 : 0; vnode_unlock(vp); return(ret); @@ -1229,6 +1513,12 @@ vnode_isnoreadahead(vnode_t vp) return ((vp->v_flag & VRAOFF)? 1 : 0); } +int +vnode_is_openevt(vnode_t vp) +{ + return ((vp->v_flag & VOPENEVT)? 1 : 0); +} + /* is vnode_t a standard one? */ int vnode_isstandard(vnode_t vp) @@ -1292,12 +1582,28 @@ vnode_issock(vnode_t vp) return ((vp->v_type == VSOCK)? 1 : 0); } +/* is vnode_t a named stream? */ +int +vnode_isnamedstream( +#if NAMEDSTREAMS + vnode_t vp +#else + __unused vnode_t vp +#endif + ) +{ +#if NAMEDSTREAMS + return ((vp->v_flag & VISNAMEDSTREAM) ? 1 : 0); +#else + return (0); +#endif +} /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */ void vnode_setnocache(vnode_t vp) { - vnode_lock(vp); + vnode_lock_spin(vp); vp->v_flag |= VNOCACHE_DATA; vnode_unlock(vp); } @@ -1305,15 +1611,32 @@ vnode_setnocache(vnode_t vp) void vnode_clearnocache(vnode_t vp) { - vnode_lock(vp); + vnode_lock_spin(vp); vp->v_flag &= ~VNOCACHE_DATA; vnode_unlock(vp); } +void +vnode_set_openevt(vnode_t vp) +{ + vnode_lock_spin(vp); + vp->v_flag |= VOPENEVT; + vnode_unlock(vp); +} + +void +vnode_clear_openevt(vnode_t vp) +{ + vnode_lock_spin(vp); + vp->v_flag &= ~VOPENEVT; + vnode_unlock(vp); +} + + void vnode_setnoreadahead(vnode_t vp) { - vnode_lock(vp); + vnode_lock_spin(vp); vp->v_flag |= VRAOFF; vnode_unlock(vp); } @@ -1321,7 +1644,7 @@ vnode_setnoreadahead(vnode_t vp) void vnode_clearnoreadahead(vnode_t vp) { - vnode_lock(vp); + vnode_lock_spin(vp); vp->v_flag &= ~VRAOFF; vnode_unlock(vp); } @@ -1331,7 +1654,7 @@ vnode_clearnoreadahead(vnode_t vp) void vnode_setnoflush(vnode_t vp) { - vnode_lock(vp); + vnode_lock_spin(vp); vp->v_flag |= VNOFLUSH; vnode_unlock(vp); } @@ -1339,7 +1662,7 @@ vnode_setnoflush(vnode_t vp) void vnode_clearnoflush(vnode_t vp) { - vnode_lock(vp); + vnode_lock_spin(vp); vp->v_flag &= ~VNOFLUSH; vnode_unlock(vp); } @@ -1355,7 +1678,7 @@ vnode_ismountedon(vnode_t vp) void vnode_setmountedon(vnode_t vp) { - vnode_lock(vp); + vnode_lock_spin(vp); vp->v_specflags |= SI_MOUNTEDON; vnode_unlock(vp); } @@ -1363,7 +1686,7 @@ vnode_setmountedon(vnode_t vp) void vnode_clearmountedon(vnode_t vp) { - vnode_lock(vp); + vnode_lock_spin(vp); vp->v_specflags &= ~SI_MOUNTEDON; vnode_unlock(vp); } @@ -1395,7 +1718,7 @@ vnode_setparent(vnode_t vp, vnode_t dvp) vp->v_parent = dvp; } -char * +const char * vnode_name(vnode_t vp) { /* we try to keep v_name a reasonable name for the node */ @@ -1477,34 +1800,65 @@ vnode_vfsisrdonly(vnode_t vp) } -/* returns vnode ref to current working directory */ +/* + * Returns vnode ref to current working directory; if a per-thread current + * working directory is in effect, return that instead of the per process one. + * + * XXX Published, but not used. + */ vnode_t current_workingdir(void) { - struct proc *p = current_proc(); - struct vnode * vp ; - - if ( (vp = p->p_fd->fd_cdir) ) { - if ( (vnode_getwithref(vp)) ) - return (NULL); - } - return vp; + return vfs_context_cwd(vfs_context_current()); } /* returns vnode ref to current root(chroot) directory */ vnode_t current_rootdir(void) { - struct proc *p = current_proc(); + proc_t proc = current_proc(); struct vnode * vp ; - if ( (vp = p->p_fd->fd_rdir) ) { + if ( (vp = proc->p_fd->fd_rdir) ) { if ( (vnode_getwithref(vp)) ) return (NULL); } return vp; } +/* + * Get a filesec and optional acl contents from an extended attribute. + * Function will attempt to retrive ACL, UUID, and GUID information using a + * read of a named extended attribute (KAUTH_FILESEC_XATTR). + * + * Parameters: vp The vnode on which to operate. + * fsecp The filesec (and ACL, if any) being + * retrieved. + * ctx The vnode context in which the + * operation is to be attempted. + * + * Returns: 0 Success + * !0 errno value + * + * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in + * host byte order, as will be the ACL contents, if any. + * Internally, we will cannonize these values from network (PPC) + * byte order after we retrieve them so that the on-disk contents + * of the extended attribute are identical for both PPC and Intel + * (if we were not being required to provide this service via + * fallback, this would be the job of the filesystem + * 'VNOP_GETATTR' call). + * + * We use ntohl() because it has a transitive property on Intel + * machines and no effect on PPC mancines. This guarantees us + * + * XXX: Deleting rather than ignoreing a corrupt security structure is + * probably the only way to reset it without assistance from an + * file system integrity checking tool. Right now we ignore it. + * + * XXX: We should enummerate the possible errno values here, and where + * in the code they originated. + */ static int vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx) { @@ -1513,6 +1867,8 @@ vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx) size_t fsec_size; size_t xsize, rsize; int error; + uint32_t host_fsec_magic; + uint32_t host_acl_entrycount; fsec = NULL; fsec_uio = NULL; @@ -1526,6 +1882,18 @@ vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx) /* either way, we are done */ goto out; } + + /* + * To be valid, a kauth_filesec_t must be large enough to hold a zero + * ACE entrly ACL, and if it's larger than that, it must have the right + * number of bytes such that it contains an atomic number of ACEs, + * rather than partial entries. Otherwise, we ignore it. + */ + if (!KAUTH_FILESEC_VALID(xsize)) { + KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize); + error = 0; + goto out; + } /* how many entries would fit? */ fsec_size = KAUTH_FILESEC_COUNT(xsize); @@ -1556,28 +1924,38 @@ vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx) } /* - * Validate security structure. If it's corrupt, we will - * just ignore it. + * Validate security structure; the validation must take place in host + * byte order. If it's corrupt, we will just ignore it. */ + + /* Validate the size before trying to convert it */ if (rsize < KAUTH_FILESEC_SIZE(0)) { KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize); goto out; } - if (fsec->fsec_magic != KAUTH_FILESEC_MAGIC) { - KAUTH_DEBUG("ACL - BAD MAGIC %x", fsec->fsec_magic); - goto out; - } - if ((fsec->fsec_acl.acl_entrycount != KAUTH_FILESEC_NOACL) && - (fsec->fsec_acl.acl_entrycount > KAUTH_ACL_MAX_ENTRIES)) { - KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", fsec->fsec_entrycount); + + /* Validate the magic number before trying to convert it */ + host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC); + if (fsec->fsec_magic != host_fsec_magic) { + KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic); goto out; } - if ((fsec->fsec_acl.acl_entrycount != KAUTH_FILESEC_NOACL) && - (KAUTH_FILESEC_SIZE(fsec->fsec_acl.acl_entrycount) > rsize)) { - KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", fsec->fsec_acl.acl_entrycount, rsize); - goto out; + + /* Validate the entry count before trying to convert it. */ + host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount); + if (host_acl_entrycount != KAUTH_FILESEC_NOACL) { + if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) { + KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount); + goto out; + } + if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) { + KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize); + goto out; + } } + kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL); + *fsecp = fsec; fsec = NULL; error = 0; @@ -1591,11 +1969,43 @@ out: return(error); } +/* + * Set a filesec and optional acl contents into an extended attribute. + * function will attempt to store ACL, UUID, and GUID information using a + * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl' + * may or may not point to the `fsec->fsec_acl`, depending on whether the + * original caller supplied an acl. + * + * Parameters: vp The vnode on which to operate. + * fsec The filesec being set. + * acl The acl to be associated with 'fsec'. + * ctx The vnode context in which the + * operation is to be attempted. + * + * Returns: 0 Success + * !0 errno value + * + * Notes: Both the fsec and the acl are always valid. + * + * The kauth_filesec_t in 'fsec', if any, is in host byte order, + * as are the acl contents, if they are used. Internally, we will + * cannonize these values into network (PPC) byte order before we + * attempt to write them so that the on-disk contents of the + * extended attribute are identical for both PPC and Intel (if we + * were not being required to provide this service via fallback, + * this would be the job of the filesystem 'VNOP_SETATTR' call). + * We reverse this process on the way out, so we leave with the + * same byte order we started with. + * + * XXX: We should enummerate the possible errno values here, and where + * in the code they originated. + */ static int vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx) { - uio_t fsec_uio; - int error; + uio_t fsec_uio; + int error; + uint32_t saved_acl_copysize; fsec_uio = NULL; @@ -1604,8 +2014,16 @@ vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context error = ENOMEM; goto out; } + /* + * Save the pre-converted ACL copysize, because it gets swapped too + * if we are running with the wrong endianness. + */ + saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl); + + kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl); + uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), sizeof(struct kauth_filesec) - sizeof(struct kauth_acl)); - uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), KAUTH_ACL_COPYSIZE(acl)); + uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize); error = vn_setxattr(vp, KAUTH_FILESEC_XATTR, fsec_uio, @@ -1613,6 +2031,8 @@ vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context ctx); VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error); + kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl); + out: if (fsec_uio != NULL) uio_free(fsec_uio); @@ -1620,6 +2040,15 @@ out: } +/* + * Returns: 0 Success + * ENOMEM Not enough space [only if has filesec] + * VNOP_GETATTR: ??? + * vnode_get_filesec: ??? + * kauth_cred_guid2uid: ??? + * kauth_cred_guid2gid: ??? + * vfs_update_vfsstat: ??? + */ int vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) { @@ -1730,7 +2159,9 @@ vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here. */ if (VATTR_IS_ACTIVE(vap, va_uid)) { - if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) { + if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_uid)) { + nuid = vap->va_uid; + } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) { nuid = vp->v_mount->mnt_fsowner; if (nuid == KAUTH_UID_NONE) nuid = 99; @@ -1745,7 +2176,9 @@ vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) VATTR_RETURN(vap, va_uid, nuid); } if (VATTR_IS_ACTIVE(vap, va_gid)) { - if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) { + if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_gid)) { + ngid = vap->va_gid; + } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) { ngid = vp->v_mount->mnt_fsgroup; if (ngid == KAUTH_GID_NONE) ngid = 99; @@ -1787,7 +2220,7 @@ vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) VATTR_IS_ACTIVE(vap, va_total_alloc)) { /* make sure f_bsize is valid */ if (vp->v_mount->mnt_vfsstat.f_bsize == 0) { - if ((error = vfs_update_vfsstat(vp->v_mount, ctx)) != 0) + if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0) goto out; } @@ -1826,17 +2259,48 @@ out: return(error); } +/* + * Set the attributes on a vnode in a vnode context. + * + * Parameters: vp The vnode whose attributes to set. + * vap A pointer to the attributes to set. + * ctx The vnode context in which the + * operation is to be attempted. + * + * Returns: 0 Success + * !0 errno value + * + * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order. + * + * The contents of the data area pointed to by 'vap' may be + * modified if the vnode is on a filesystem which has been + * mounted with ingore ownership flags, or by the underlyng + * VFS itself, or by the fallback code, if the underlying VFS + * does not support ACL, UUID, or GUUID attributes directly. + * + * XXX: We should enummerate the possible errno values here, and where + * in the code they originated. + */ int vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) { - int error, is_ownership_change=0; + int error, is_perm_change=0; /* * Make sure the filesystem is mounted R/W. * If not, return an error. */ - if (vfs_isrdonly(vp->v_mount)) - return(EROFS); + if (vfs_isrdonly(vp->v_mount)) { + error = EROFS; + goto out; + } +#if NAMEDSTREAMS + /* For streams, va_data_size is the only setable attribute. */ + if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) { + error = EPERM; + goto out; + } +#endif /* * If ownership is being ignored on this volume, we silently discard @@ -1847,8 +2311,9 @@ vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) VATTR_CLEAR_ACTIVE(vap, va_gid); } - if (VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)) { - is_ownership_change = 1; + if ( VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid) + || VATTR_IS_ACTIVE(vap, va_mode) || VATTR_IS_ACTIVE(vap, va_acl)) { + is_perm_change = 1; } /* @@ -1858,7 +2323,8 @@ vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) if (!vfs_extendedsecurity(vnode_mount(vp)) && (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) { KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security"); - return(ENOTSUP); + error = ENOTSUP; + goto out; } error = VNOP_SETATTR(vp, vap, ctx); @@ -1866,36 +2332,48 @@ vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) if ((error == 0) && !VATTR_ALL_SUPPORTED(vap)) error = vnode_setattr_fallback(vp, vap, ctx); - /* - * If we have changed any of the things about the file that are likely - * to result in changes to authorisation results, blow the vnode auth - * cache - */ - if (VATTR_IS_SUPPORTED(vap, va_mode) || - VATTR_IS_SUPPORTED(vap, va_uid) || - VATTR_IS_SUPPORTED(vap, va_gid) || - VATTR_IS_SUPPORTED(vap, va_flags) || - VATTR_IS_SUPPORTED(vap, va_acl) || - VATTR_IS_SUPPORTED(vap, va_uuuid) || - VATTR_IS_SUPPORTED(vap, va_guuid)) - vnode_uncache_credentials(vp); +#if CONFIG_FSE // only send a stat_changed event if this is more than // just an access time update if (error == 0 && (vap->va_active != VNODE_ATTR_BIT(va_access_time))) { - if (need_fsevent(FSE_STAT_CHANGED, vp) || (is_ownership_change && need_fsevent(FSE_CHOWN, vp))) { - if (is_ownership_change == 0) - add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE); - else - add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE); + if (is_perm_change) { + if (need_fsevent(FSE_CHOWN, vp)) { + add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE); + } + } else if(need_fsevent(FSE_STAT_CHANGED, vp)) { + add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE); } } +#endif + +out: return(error); } /* - * Following an operation which sets attributes (setattr, create, etc.) we may - * need to perform fallback operations to get attributes saved. - */ + * Fallback for setting the attributes on a vnode in a vnode context. This + * Function will attempt to store ACL, UUID, and GUID information utilizing + * a read/modify/write operation against an EA used as a backing store for + * the object. + * + * Parameters: vp The vnode whose attributes to set. + * vap A pointer to the attributes to set. + * ctx The vnode context in which the + * operation is to be attempted. + * + * Returns: 0 Success + * !0 errno value + * + * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order, + * as are the fsec and lfsec, if they are used. + * + * The contents of the data area pointed to by 'vap' may be + * modified to indicate that the attribute is supported for + * any given requested attribute. + * + * XXX: We should enummerate the possible errno values here, and where + * in the code they originated. + */ int vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) { @@ -1909,7 +2387,8 @@ vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) /* * Extended security fallback via extended attributes. * - * Note that we do not free the filesec; the caller is expected to do this. + * Note that we do not free the filesec; the caller is expected to + * do this. */ if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || @@ -1917,7 +2396,8 @@ vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback"); /* - * Fail for file types that we don't permit extended security to be set on. + * Fail for file types that we don't permit extended security + * to be set on. */ if ((vp->v_type != VDIR) && (vp->v_type != VLNK) && (vp->v_type != VREG)) { VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp)); @@ -1926,8 +2406,9 @@ vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) } /* - * If we don't have all the extended security items, we need to fetch the existing - * data to perform a read-modify-write operation. + * If we don't have all the extended security items, we need + * to fetch the existing data to perform a read-modify-write + * operation. */ fsec = NULL; if (!VATTR_IS_ACTIVE(vap, va_acl) || @@ -1982,7 +2463,8 @@ vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) } /* - * If the filesec data is all invalid, we can just remove the EA completely. + * If the filesec data is all invalid, we can just remove + * the EA completely. */ if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) && kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) && @@ -2026,8 +2508,30 @@ struct vnop_lookup_args { }; #endif /* 0*/ +/* + * Returns: 0 Success + * lock_fsnode:ENOENT No such file or directory [only for VFS + * that is not thread safe & vnode is + * currently being/has been terminated] + * :ENAMETOOLONG + * :ENOENT + * :EJUSTRETURN + * :EPERM + * :EISDIR + * :ENOTDIR + * :??? + * + * Note: The return codes from the underlying VFS's lookup routine can't + * be fully enumerated here, since third party VFS authors may not + * limit their error returns to the ones documented here, even + * though this may result in some programs functioning incorrectly. + * + * The return codes documented above are those which may currently + * be returned by HFS from hfs_lookup, not including additional + * error code which may be propagated from underlying routines. + */ errno_t -VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t context) +VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx) { int _err; struct vnop_lookup_args a; @@ -2039,11 +2543,9 @@ VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t a.a_dvp = dvp; a.a_vpp = vpp; a.a_cnp = cnp; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(dvp); - vnode_cache_credentials(dvp, context); - if (!thread_safe) { if ( (_err = lock_fsnode(dvp, &funnel_state)) ) { return (_err); @@ -2094,7 +2596,7 @@ struct vnop_create_args { }; #endif /* 0*/ errno_t -VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t context) +VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx) { int _err; struct vnop_create_args a; @@ -2106,7 +2608,7 @@ VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode a.a_vpp = vpp; a.a_cnp = cnp; a.a_vap = vap; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(dvp); if (!thread_safe) { @@ -2119,7 +2621,7 @@ VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode /* * Remove stale Apple Double file (if any). */ - xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0); + xattrfile_remove(dvp, cnp->cn_nameptr, ctx, thread_safe, 0); } if (!thread_safe) { unlock_fsnode(dvp, &funnel_state); @@ -2144,7 +2646,7 @@ struct vnop_whiteout_args { }; #endif /* 0*/ errno_t -VNOP_WHITEOUT(vnode_t dvp, struct componentname * cnp, int flags, vfs_context_t context) +VNOP_WHITEOUT(vnode_t dvp, struct componentname * cnp, int flags, vfs_context_t ctx) { int _err; struct vnop_whiteout_args a; @@ -2155,7 +2657,7 @@ VNOP_WHITEOUT(vnode_t dvp, struct componentname * cnp, int flags, vfs_context_t a.a_dvp = dvp; a.a_cnp = cnp; a.a_flags = flags; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(dvp); if (!thread_safe) { @@ -2187,7 +2689,7 @@ struct vnop_mknod_args { }; #endif /* 0*/ errno_t -VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t context) +VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx) { int _err; @@ -2200,7 +2702,7 @@ VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_ a.a_vpp = vpp; a.a_cnp = cnp; a.a_vap = vap; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(dvp); if (!thread_safe) { @@ -2229,23 +2731,20 @@ struct vnop_open_args { }; #endif /* 0*/ errno_t -VNOP_OPEN(vnode_t vp, int mode, vfs_context_t context) +VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx) { int _err; struct vnop_open_args a; int thread_safe; int funnel_state = 0; - struct vfs_context acontext; - if (context == NULL) { - acontext.vc_proc = current_proc(); - acontext.vc_ucred = kauth_cred_get(); - context = &acontext; + if (ctx == NULL) { + ctx = vfs_context_current(); } a.a_desc = &vnop_open_desc; a.a_vp = vp; a.a_mode = mode; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -2281,23 +2780,20 @@ struct vnop_close_args { }; #endif /* 0*/ errno_t -VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t context) +VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx) { int _err; struct vnop_close_args a; int thread_safe; int funnel_state = 0; - struct vfs_context acontext; - if (context == NULL) { - acontext.vc_proc = current_proc(); - acontext.vc_ucred = kauth_cred_get(); - context = &acontext; + if (ctx == NULL) { + ctx = vfs_context_current(); } a.a_desc = &vnop_close_desc; a.a_vp = vp; a.a_fflag = fflag; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -2333,23 +2829,20 @@ struct vnop_access_args { }; #endif /* 0*/ errno_t -VNOP_ACCESS(vnode_t vp, int action, vfs_context_t context) +VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx) { int _err; struct vnop_access_args a; int thread_safe; int funnel_state = 0; - struct vfs_context acontext; - if (context == NULL) { - acontext.vc_proc = current_proc(); - acontext.vc_ucred = kauth_cred_get(); - context = &acontext; + if (ctx == NULL) { + ctx = vfs_context_current(); } a.a_desc = &vnop_access_desc; a.a_vp = vp; a.a_action = action; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -2378,17 +2871,17 @@ struct vnop_getattr_args { }; #endif /* 0*/ errno_t -VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t context) +VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx) { int _err; struct vnop_getattr_args a; int thread_safe; - int funnel_state; + int funnel_state = 0; /* protected by thread_safe */ a.a_desc = &vnop_getattr_desc; a.a_vp = vp; a.a_vap = vap; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -2417,17 +2910,17 @@ struct vnop_setattr_args { }; #endif /* 0*/ errno_t -VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t context) +VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx) { int _err; struct vnop_setattr_args a; int thread_safe; - int funnel_state; + int funnel_state = 0; /* protected by thread_safe */ a.a_desc = &vnop_setattr_desc; a.a_vp = vp; a.a_vap = vap; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -2438,7 +2931,7 @@ VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t context) _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a); /* - * Shadow uid/gid/mod change to extended attibute file. + * Shadow uid/gid/mod change to extended attribute file. */ if (_err == 0 && !NATIVE_XATTR(vp)) { struct vnode_attr va; @@ -2459,12 +2952,12 @@ VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t context) } if (change) { vnode_t dvp; - char *vname; + const char *vname; dvp = vnode_getparent(vp); vname = vnode_getname(vp); - xattrfile_setattr(dvp, vname, &va, context, thread_safe); + xattrfile_setattr(dvp, vname, &va, ctx, thread_safe); if (dvp != NULLVP) vnode_put(dvp); if (vname != NULL) @@ -2474,95 +2967,21 @@ VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t context) if (!thread_safe) { unlock_fsnode(vp, &funnel_state); } - return (_err); -} - -#if 0 -/* - *# - *#% getattrlist vp = = = - *# - */ -struct vnop_getattrlist_args { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - struct attrlist *a_alist; - struct uio *a_uio; - int a_options; - vfs_context_t a_context; -}; -#endif /* 0*/ -errno_t -VNOP_GETATTRLIST(vnode_t vp, struct attrlist * alist, struct uio * uio, int options, vfs_context_t context) -{ - int _err; - struct vnop_getattrlist_args a; - int thread_safe; - int funnel_state = 0; - - a.a_desc = &vnop_getattrlist_desc; - a.a_vp = vp; - a.a_alist = alist; - a.a_uio = uio; - a.a_options = options; - a.a_context = context; - thread_safe = THREAD_SAFE_FS(vp); - - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } - _err = (*vp->v_op[vnop_getattrlist_desc.vdesc_offset])(&a); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } - return (_err); -} - -#if 0 -/* - *# - *#% setattrlist vp L L L - *# - */ -struct vnop_setattrlist_args { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - struct attrlist *a_alist; - struct uio *a_uio; - int a_options; - vfs_context_t a_context; -}; -#endif /* 0*/ -errno_t -VNOP_SETATTRLIST(vnode_t vp, struct attrlist * alist, struct uio * uio, int options, vfs_context_t context) -{ - int _err; - struct vnop_setattrlist_args a; - int thread_safe; - int funnel_state = 0; - - a.a_desc = &vnop_setattrlist_desc; - a.a_vp = vp; - a.a_alist = alist; - a.a_uio = uio; - a.a_options = options; - a.a_context = context; - thread_safe = THREAD_SAFE_FS(vp); - - if (!thread_safe) { - if ( (_err = lock_fsnode(vp, &funnel_state)) ) { - return (_err); - } - } - _err = (*vp->v_op[vnop_setattrlist_desc.vdesc_offset])(&a); - - vnode_uncache_credentials(vp); + /* + * If we have changed any of the things about the file that are likely + * to result in changes to authorization results, blow the vnode auth + * cache + */ + if (_err == 0 && ( + VATTR_IS_SUPPORTED(vap, va_mode) || + VATTR_IS_SUPPORTED(vap, va_uid) || + VATTR_IS_SUPPORTED(vap, va_gid) || + VATTR_IS_SUPPORTED(vap, va_flags) || + VATTR_IS_SUPPORTED(vap, va_acl) || + VATTR_IS_SUPPORTED(vap, va_uuuid) || + VATTR_IS_SUPPORTED(vap, va_guuid))) + vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS); - if (!thread_safe) { - unlock_fsnode(vp, &funnel_state); - } return (_err); } @@ -2582,25 +3001,22 @@ struct vnop_read_args { }; #endif /* 0*/ errno_t -VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t context) +VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx) { int _err; struct vnop_read_args a; int thread_safe; int funnel_state = 0; - struct vfs_context acontext; - if (context == NULL) { - acontext.vc_proc = current_proc(); - acontext.vc_ucred = kauth_cred_get(); - context = &acontext; + if (ctx == NULL) { + ctx = vfs_context_current(); } a.a_desc = &vnop_read_desc; a.a_vp = vp; a.a_uio = uio; a.a_ioflag = ioflag; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -2639,25 +3055,22 @@ struct vnop_write_args { }; #endif /* 0*/ errno_t -VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t context) +VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx) { struct vnop_write_args a; int _err; int thread_safe; int funnel_state = 0; - struct vfs_context acontext; - if (context == NULL) { - acontext.vc_proc = current_proc(); - acontext.vc_ucred = kauth_cred_get(); - context = &acontext; + if (ctx == NULL) { + ctx = vfs_context_current(); } a.a_desc = &vnop_write_desc; a.a_vp = vp; a.a_uio = uio; a.a_ioflag = ioflag; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -2697,21 +3110,18 @@ struct vnop_ioctl_args { }; #endif /* 0*/ errno_t -VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t context) +VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx) { int _err; struct vnop_ioctl_args a; int thread_safe; int funnel_state = 0; - struct vfs_context acontext; - if (context == NULL) { - acontext.vc_proc = current_proc(); - acontext.vc_ucred = kauth_cred_get(); - context = &acontext; + if (ctx == NULL) { + ctx = vfs_context_current(); } - if (vfs_context_is64bit(context)) { + if (vfs_context_is64bit(ctx)) { if (!vnode_vfs64bitready(vp)) { return(ENOTTY); } @@ -2722,7 +3132,7 @@ VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t co a.a_command = command; a.a_data = data; a.a_fflag = fflag; - a.a_context= context; + a.a_context= ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -2761,24 +3171,21 @@ struct vnop_select_args { }; #endif /* 0*/ errno_t -VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t context) +VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t ctx) { int _err; struct vnop_select_args a; int thread_safe; int funnel_state = 0; - struct vfs_context acontext; - if (context == NULL) { - acontext.vc_proc = current_proc(); - acontext.vc_ucred = kauth_cred_get(); - context = &acontext; + if (ctx == NULL) { + ctx = vfs_context_current(); } a.a_desc = &vnop_select_desc; a.a_vp = vp; a.a_which = which; a.a_fflags = fflags; - a.a_context = context; + a.a_context = ctx; a.a_wql = wql; thread_safe = THREAD_SAFE_FS(vp); @@ -2818,7 +3225,7 @@ struct vnop_exchange_args { }; #endif /* 0*/ errno_t -VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t context) +VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx) { int _err; struct vnop_exchange_args a; @@ -2830,7 +3237,7 @@ VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t context) a.a_fvp = fvp; a.a_tvp = tvp; a.a_options = options; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(fvp); if (!thread_safe) { @@ -2875,7 +3282,7 @@ struct vnop_revoke_args { }; #endif /* 0*/ errno_t -VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t context) +VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx) { struct vnop_revoke_args a; int _err; @@ -2885,7 +3292,7 @@ VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t context) a.a_desc = &vnop_revoke_desc; a.a_vp = vp; a.a_flags = flags; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -2913,7 +3320,7 @@ struct vnop_mmap_args { }; #endif /* 0*/ errno_t -VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t context) +VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx) { int _err; struct vnop_mmap_args a; @@ -2923,7 +3330,7 @@ VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t context) a.a_desc = &vnop_mmap_desc; a.a_vp = vp; a.a_fflags = fflags; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -2952,7 +3359,7 @@ struct vnop_mnomap_args { }; #endif /* 0*/ errno_t -VNOP_MNOMAP(vnode_t vp, vfs_context_t context) +VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx) { int _err; struct vnop_mnomap_args a; @@ -2961,7 +3368,7 @@ VNOP_MNOMAP(vnode_t vp, vfs_context_t context) a.a_desc = &vnop_mnomap_desc; a.a_vp = vp; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -2991,7 +3398,7 @@ struct vnop_fsync_args { }; #endif /* 0*/ errno_t -VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t context) +VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx) { struct vnop_fsync_args a; int _err; @@ -3001,7 +3408,7 @@ VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t context) a.a_desc = &vnop_fsync_desc; a.a_vp = vp; a.a_waitfor = waitfor; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -3034,7 +3441,7 @@ struct vnop_remove_args { }; #endif /* 0*/ errno_t -VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t context) +VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx) { int _err; struct vnop_remove_args a; @@ -3046,7 +3453,7 @@ VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_ a.a_vp = vp; a.a_cnp = cnp; a.a_flags = flags; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(dvp); if (!thread_safe) { @@ -3061,9 +3468,9 @@ VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_ if ( !(NATIVE_XATTR(dvp)) ) { /* - * Remove any associated extended attibute file (._ AppleDouble file). + * Remove any associated extended attribute file (._ AppleDouble file). */ - xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 1); + xattrfile_remove(dvp, cnp->cn_nameptr, ctx, thread_safe, 1); } } if (!thread_safe) { @@ -3089,7 +3496,7 @@ struct vnop_link_args { }; #endif /* 0*/ errno_t -VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t context) +VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx) { int _err; struct vnop_link_args a; @@ -3101,7 +3508,7 @@ VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t co * disallow linking to an existing "._" Apple Double file. */ if ( !NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) { - char *vname; + const char *vname; vname = vnode_getname(vp); if (vname != NULL) { @@ -3118,7 +3525,7 @@ VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t co a.a_vp = vp; a.a_tdvp = tdvp; a.a_cnp = cnp; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -3157,7 +3564,7 @@ struct vnop_rename_args { errno_t VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, - vfs_context_t context) + vfs_context_t ctx) { int _err; struct vnop_rename_args a; @@ -3177,7 +3584,7 @@ VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, a.a_tdvp = tdvp; a.a_tvp = tvp; a.a_tcnp = tcnp; - a.a_context = context; + a.a_context = ctx; if (!THREAD_SAFE_FS(fdvp)) fdvp_unsafe = fdvp; @@ -3246,7 +3653,7 @@ VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, } else { xfromname = &smallname1[0]; } - strcpy(xfromname, "._"); + strlcpy(xfromname, "._", min(sizeof smallname1, len)); strncat(xfromname, fcnp->cn_nameptr, fcnp->cn_namelen); xfromname[len-1] = '\0'; @@ -3257,7 +3664,7 @@ VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, } else { xtoname = &smallname2[0]; } - strcpy(xtoname, "._"); + strlcpy(xtoname, "._", min(sizeof smallname2, len)); strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen); xtoname[len-1] = '\0'; } @@ -3275,7 +3682,7 @@ VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, } /* - * Rename any associated extended attibute file (._ AppleDouble file). + * Rename any associated extended attribute file (._ AppleDouble file). */ if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) { struct nameidata fromnd, tond; @@ -3287,8 +3694,8 @@ VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, * Note that fdvp already has an iocount reference and * using DELETE will take an additional reference. */ - NDINIT(&fromnd, DELETE, NOFOLLOW | USEDVP, UIO_SYSSPACE, - CAST_USER_ADDR_T(xfromname), context); + NDINIT(&fromnd, DELETE, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE, + CAST_USER_ADDR_T(xfromname), ctx); fromnd.ni_dvp = fdvp; error = namei(&fromnd); @@ -3311,8 +3718,8 @@ VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, * Get destination attribute file vnode. * Note that tdvp already has an iocount reference. */ - NDINIT(&tond, DELETE, NOFOLLOW | USEDVP, UIO_SYSSPACE, - CAST_USER_ADDR_T(xtoname), context); + NDINIT(&tond, DELETE, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE, + CAST_USER_ADDR_T(xtoname), ctx); tond.ni_dvp = tdvp; error = namei(&tond); if (error) { @@ -3327,7 +3734,7 @@ VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, args.a_dvp = tdvp; args.a_vp = tond.ni_vp; args.a_cnp = &tond.ni_cnd; - args.a_context = context; + args.a_context = ctx; if (fdvp_unsafe != NULLVP) error = lock_fsnode(tond.ni_vp, NULL); @@ -3349,8 +3756,8 @@ VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, * Get destination attribute file vnode. */ NDINIT(&tond, RENAME, - NOCACHE | NOFOLLOW | USEDVP, UIO_SYSSPACE, - CAST_USER_ADDR_T(xtoname), context); + NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE, + CAST_USER_ADDR_T(xtoname), ctx); tond.ni_dvp = tdvp; error = namei(&tond); @@ -3366,7 +3773,7 @@ VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, a.a_tdvp = tdvp; a.a_tvp = tond.ni_vp; a.a_tcnp = &tond.ni_cnd; - a.a_context = context; + a.a_context = ctx; if (fdvp_unsafe != NULLVP) { /* @@ -3388,6 +3795,13 @@ VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, } } if (error == 0) { + const char *oname; + vnode_t oparent; + + /* Save these off so we can later verify them (fix up below) */ + oname = fromnd.ni_vp->v_name; + oparent = fromnd.ni_vp->v_parent; + error = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a); if (fdvp_unsafe != NULLVP) { @@ -3400,6 +3814,24 @@ VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, if (tond.ni_vp && tond.ni_vp != fromnd.ni_vp) vnode_setneedinactive(tond.ni_vp); + /* + * Fix up name & parent pointers on ._ file + */ + if (oname == fromnd.ni_vp->v_name && + oparent == fromnd.ni_vp->v_parent) { + int update_flags; + + update_flags = VNODE_UPDATE_NAME; + + if (fdvp != tdvp) + update_flags |= VNODE_UPDATE_PARENT; + + vnode_update_identity(fromnd.ni_vp, tdvp, + tond.ni_cnd.cn_nameptr, + tond.ni_cnd.cn_namelen, + tond.ni_cnd.cn_hash, + update_flags); + } } } vnode_put(fromnd.ni_vp); @@ -3443,7 +3875,7 @@ struct vnop_mkdir_args { #endif /* 0*/ errno_t VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, - struct vnode_attr *vap, vfs_context_t context) + struct vnode_attr *vap, vfs_context_t ctx) { int _err; struct vnop_mkdir_args a; @@ -3455,7 +3887,7 @@ VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, a.a_vpp = vpp; a.a_cnp = cnp; a.a_vap = vap; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(dvp); if (!thread_safe) { @@ -3468,7 +3900,7 @@ VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, /* * Remove stale Apple Double file (if any). */ - xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0); + xattrfile_remove(dvp, cnp->cn_nameptr, ctx, thread_safe, 0); } if (!thread_safe) { unlock_fsnode(dvp, &funnel_state); @@ -3494,7 +3926,7 @@ struct vnop_rmdir_args { #endif /* 0*/ errno_t -VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t context) +VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx) { int _err; struct vnop_rmdir_args a; @@ -3505,7 +3937,7 @@ VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_c a.a_dvp = dvp; a.a_vp = vp; a.a_cnp = cnp; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(dvp); if (!thread_safe) { @@ -3520,9 +3952,9 @@ VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_c if ( !(NATIVE_XATTR(dvp)) ) { /* - * Remove any associated extended attibute file (._ AppleDouble file). + * Remove any associated extended attribute file (._ AppleDouble file). */ - xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 1); + xattrfile_remove(dvp, cnp->cn_nameptr, ctx, thread_safe, 1); } } if (!thread_safe) { @@ -3536,7 +3968,7 @@ VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_c */ #define AD_STALE_SECS (180) static void -xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t context, int thread_safe, int force) { +xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int thread_safe, int force) { vnode_t xvp; struct nameidata nd; char smallname[64]; @@ -3554,8 +3986,8 @@ xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t context, int MALLOC(filename, char *, len, M_TEMP, M_WAITOK); len = snprintf(filename, len, "._%s", basename); } - NDINIT(&nd, DELETE, LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE, - CAST_USER_ADDR_T(filename), context); + NDINIT(&nd, DELETE, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE, + CAST_USER_ADDR_T(filename), ctx); nd.ni_dvp = dvp; if (namei(&nd) != 0) goto out2; @@ -3576,7 +4008,7 @@ xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t context, int VATTR_INIT(&va); VATTR_WANTED(&va, va_data_size); VATTR_WANTED(&va, va_modify_time); - if (VNOP_GETATTR(xvp, &va, context) == 0 && + if (VNOP_GETATTR(xvp, &va, ctx) == 0 && VATTR_IS_SUPPORTED(&va, va_data_size) && VATTR_IS_SUPPORTED(&va, va_modify_time) && va.va_data_size != 0) { @@ -3597,7 +4029,7 @@ xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t context, int a.a_dvp = nd.ni_dvp; a.a_vp = xvp; a.a_cnp = &nd.ni_cnd; - a.a_context = context; + a.a_context = ctx; if (!thread_safe) { if ( (lock_fsnode(xvp, NULL)) ) @@ -3612,7 +4044,7 @@ xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t context, int vnode_setneedinactive(xvp); } out1: - /* Note: nd.ni_dvp's iocount is dropped by caller of VNOP_XXXX */ + vnode_put(dvp); vnode_put(xvp); out2: if (filename && filename != &smallname[0]) { @@ -3625,7 +4057,7 @@ out2: */ static void xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap, - vfs_context_t context, int thread_safe) { + vfs_context_t ctx, int thread_safe) { vnode_t xvp; struct nameidata nd; char smallname[64]; @@ -3645,7 +4077,7 @@ xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap, len = snprintf(filename, len, "._%s", basename); } NDINIT(&nd, LOOKUP, NOFOLLOW | USEDVP, UIO_SYSSPACE, - CAST_USER_ADDR_T(filename), context); + CAST_USER_ADDR_T(filename), ctx); nd.ni_dvp = dvp; if (namei(&nd) != 0) goto out2; @@ -3659,7 +4091,7 @@ xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap, a.a_desc = &vnop_setattr_desc; a.a_vp = xvp; a.a_vap = vap; - a.a_context = context; + a.a_context = ctx; if (!thread_safe) { if ( (lock_fsnode(xvp, NULL)) ) @@ -3698,7 +4130,7 @@ struct vnop_symlink_args { #endif /* 0*/ errno_t VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, - struct vnode_attr *vap, char *target, vfs_context_t context) + struct vnode_attr *vap, char *target, vfs_context_t ctx) { int _err; struct vnop_symlink_args a; @@ -3711,7 +4143,7 @@ VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, a.a_cnp = cnp; a.a_vap = vap; a.a_target = target; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(dvp); if (!thread_safe) { @@ -3724,7 +4156,7 @@ VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, /* * Remove stale Apple Double file (if any). */ - xattrfile_remove(dvp, cnp->cn_nameptr, context, thread_safe, 0); + xattrfile_remove(dvp, cnp->cn_nameptr, ctx, thread_safe, 0); } if (!thread_safe) { unlock_fsnode(dvp, &funnel_state); @@ -3751,7 +4183,7 @@ struct vnop_readdir_args { #endif /* 0*/ errno_t VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag, - int *numdirent, vfs_context_t context) + int *numdirent, vfs_context_t ctx) { int _err; struct vnop_readdir_args a; @@ -3764,7 +4196,7 @@ VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag, a.a_flags = flags; a.a_eofflag = eofflag; a.a_numdirent = numdirent; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -3801,7 +4233,7 @@ struct vnop_readdirattr_args { #endif /* 0*/ errno_t VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, u_long maxcount, - u_long options, u_long *newstate, int *eofflag, u_long *actualcount, vfs_context_t context) + u_long options, u_long *newstate, int *eofflag, u_long *actualcount, vfs_context_t ctx) { int _err; struct vnop_readdirattr_args a; @@ -3817,7 +4249,7 @@ VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, u_lo a.a_newstate = newstate; a.a_eofflag = eofflag; a.a_actualcount = actualcount; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -3846,8 +4278,27 @@ struct vnop_readlink_args { }; #endif /* 0 */ +/* + * Returns: 0 Success + * lock_fsnode:ENOENT No such file or directory [only for VFS + * that is not thread safe & vnode is + * currently being/has been terminated] + * :EINVAL + * :??? + * + * Note: The return codes from the underlying VFS's readlink routine + * can't be fully enumerated here, since third party VFS authors + * may not limit their error returns to the ones documented here, + * even though this may result in some programs functioning + * incorrectly. + * + * The return codes documented above are those which may currently + * be returned by HFS from hfs_vnop_readlink, not including + * additional error code which may be propagated from underlying + * routines. + */ errno_t -VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t context) +VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx) { int _err; struct vnop_readlink_args a; @@ -3857,7 +4308,7 @@ VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t context) a.a_desc = &vnop_readlink_desc; a.a_vp = vp; a.a_uio = uio; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -3885,7 +4336,7 @@ struct vnop_inactive_args { }; #endif /* 0*/ errno_t -VNOP_INACTIVE(struct vnode *vp, vfs_context_t context) +VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx) { int _err; struct vnop_inactive_args a; @@ -3894,7 +4345,7 @@ VNOP_INACTIVE(struct vnode *vp, vfs_context_t context) a.a_desc = &vnop_inactive_desc; a.a_vp = vp; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -3906,6 +4357,21 @@ VNOP_INACTIVE(struct vnode *vp, vfs_context_t context) if (!thread_safe) { unlock_fsnode(vp, &funnel_state); } + +#if NAMEDSTREAMS + /* For file systems that do not support namedstreams natively, mark + * the shadow stream file vnode to be recycled as soon as the last + * reference goes away. To avoid re-entering reclaim code, do not + * call recycle on terminating named stream vnodes. + */ + if (vnode_isnamedstream(vp) && + (vp->v_parent != NULLVP) && + ((vp->v_parent->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0) && + ((vp->v_lflag & VL_TERMINATE) == 0)) { + vnode_recycle(vp); + } +#endif + return (_err); } @@ -3923,7 +4389,7 @@ struct vnop_reclaim_args { }; #endif /* 0*/ errno_t -VNOP_RECLAIM(struct vnode *vp, vfs_context_t context) +VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx) { int _err; struct vnop_reclaim_args a; @@ -3932,7 +4398,7 @@ VNOP_RECLAIM(struct vnode *vp, vfs_context_t context) a.a_desc = &vnop_reclaim_desc; a.a_vp = vp; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -3946,6 +4412,13 @@ VNOP_RECLAIM(struct vnode *vp, vfs_context_t context) } +/* + * Returns: 0 Success + * lock_fsnode:ENOENT No such file or directory [only for VFS + * that is not thread safe & vnode is + * currently being/has been terminated] + * :??? [per FS implementation specific] + */ #if 0 /* *# @@ -3961,7 +4434,7 @@ struct vnop_pathconf_args { }; #endif /* 0*/ errno_t -VNOP_PATHCONF(struct vnode *vp, int name, register_t *retval, vfs_context_t context) +VNOP_PATHCONF(struct vnode *vp, int name, register_t *retval, vfs_context_t ctx) { int _err; struct vnop_pathconf_args a; @@ -3972,7 +4445,7 @@ VNOP_PATHCONF(struct vnode *vp, int name, register_t *retval, vfs_context_t cont a.a_vp = vp; a.a_name = name; a.a_retval = retval; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -3987,6 +4460,17 @@ VNOP_PATHCONF(struct vnode *vp, int name, register_t *retval, vfs_context_t cont return (_err); } +/* + * Returns: 0 Success + * err_advlock:ENOTSUP + * lf_advlock:??? + * :??? + * + * Notes: VFS implementations of advisory locking using calls through + * because lock enforcement does not occur + * locally should try to limit themselves to the return codes + * documented above for lf_advlock and err_advlock. + */ #if 0 /* *# @@ -4004,7 +4488,7 @@ struct vnop_advlock_args { }; #endif /* 0*/ errno_t -VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t context) +VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx) { int _err; struct vnop_advlock_args a; @@ -4018,7 +4502,7 @@ VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, a.a_op = op; a.a_fl = fl; a.a_flags = flags; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); uth = get_bsdthread_info(current_thread()); @@ -4063,7 +4547,7 @@ struct vnop_allocate_args { #endif /* 0*/ errno_t -VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t context) +VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx) { int _err; struct vnop_allocate_args a; @@ -4076,7 +4560,7 @@ VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesalloc a.a_flags = flags; a.a_bytesallocated = bytesallocated; a.a_offset = offset; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -4109,7 +4593,7 @@ struct vnop_pagein_args { }; #endif /* 0*/ errno_t -VNOP_PAGEIN(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t context) +VNOP_PAGEIN(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx) { int _err; struct vnop_pagein_args a; @@ -4123,7 +4607,7 @@ VNOP_PAGEIN(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, s a.a_f_offset = f_offset; a.a_size = size; a.a_flags = flags; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -4155,7 +4639,7 @@ struct vnop_pageout_args { #endif /* 0*/ errno_t -VNOP_PAGEOUT(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t context) +VNOP_PAGEOUT(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx) { int _err; struct vnop_pageout_args a; @@ -4169,7 +4653,7 @@ VNOP_PAGEOUT(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, a.a_f_offset = f_offset; a.a_size = size; a.a_flags = flags; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -4208,7 +4692,7 @@ struct vnop_searchfs_args { #endif /* 0*/ errno_t -VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, u_long maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, u_long *nummatches, u_long scriptcode, u_long options, struct uio *uio, struct searchstate *searchstate, vfs_context_t context) +VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, u_long maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, u_long *nummatches, u_long scriptcode, u_long options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx) { int _err; struct vnop_searchfs_args a; @@ -4228,7 +4712,7 @@ VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct a.a_options = options; a.a_uio = uio; a.a_searchstate = searchstate; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -4264,7 +4748,7 @@ struct vnop_copyfile_args { #endif /* 0*/ errno_t VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, - int mode, int flags, vfs_context_t context) + int mode, int flags, vfs_context_t ctx) { int _err; struct vnop_copyfile_args a; @@ -4275,14 +4759,13 @@ VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct c a.a_tcnp = tcnp; a.a_mode = mode; a.a_flags = flags; - a.a_context = context; + a.a_context = ctx; _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a); return (_err); } - errno_t -VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t context) +VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx) { struct vnop_getxattr_args a; int error; @@ -4295,7 +4778,7 @@ VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options a.a_uio = uio; a.a_size = size; a.a_options = options; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -4311,7 +4794,7 @@ VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options } errno_t -VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t context) +VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx) { struct vnop_setxattr_args a; int error; @@ -4323,7 +4806,7 @@ VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_ a.a_name = name; a.a_uio = uio; a.a_options = options; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -4335,11 +4818,13 @@ VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_ if (!thread_safe) { unlock_fsnode(vp, &funnel_state); } + if (error == 0) + vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS); return (error); } errno_t -VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t context) +VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx) { struct vnop_removexattr_args a; int error; @@ -4350,7 +4835,7 @@ VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t contex a.a_vp = vp; a.a_name = name; a.a_options = options; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -4366,7 +4851,7 @@ VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t contex } errno_t -VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t context) +VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx) { struct vnop_listxattr_args a; int error; @@ -4378,7 +4863,7 @@ VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t c a.a_uio = uio; a.a_size = size; a.a_options = options; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -4487,18 +4972,15 @@ struct vnop_blockmap_args { }; #endif /* 0*/ errno_t -VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t context) +VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx) { int _err; struct vnop_blockmap_args a; int thread_safe; int funnel_state = 0; - struct vfs_context acontext; - if (context == NULL) { - acontext.vc_proc = current_proc(); - acontext.vc_ucred = kauth_cred_get(); - context = &acontext; + if (ctx == NULL) { + ctx = vfs_context_current(); } a.a_desc = &vnop_blockmap_desc; a.a_vp = vp; @@ -4508,7 +4990,7 @@ VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size a.a_run = run; a.a_poff = poff; a.a_flags = flags; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -4565,7 +5047,7 @@ struct vnop_kqfilt_add_args { }; #endif errno_t -VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t context) +VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx) { int _err; struct vnop_kqfilt_add_args a; @@ -4575,7 +5057,7 @@ VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t context) a.a_desc = VDESC(vnop_kqfilt_add); a.a_vp = vp; a.a_kn = kn; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -4599,7 +5081,7 @@ struct vnop_kqfilt_remove_args { }; #endif errno_t -VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t context) +VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx) { int _err; struct vnop_kqfilt_remove_args a; @@ -4609,7 +5091,7 @@ VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t context) a.a_desc = VDESC(vnop_kqfilt_remove); a.a_vp = vp; a.a_ident = ident; - a.a_context = context; + a.a_context = ctx; thread_safe = THREAD_SAFE_FS(vp); if (!thread_safe) { @@ -4624,3 +5106,101 @@ VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t context) return(_err); } +#if 0 +struct vnop_setlabel_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct label *a_vl; + vfs_context_t a_context; +}; +#endif +errno_t +VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx) +{ + int _err; + struct vnop_setlabel_args a; + int thread_safe; + int funnel_state = 0; + + a.a_desc = VDESC(vnop_setlabel); + a.a_vp = vp; + a.a_vl = label; + a.a_context = ctx; + thread_safe = THREAD_SAFE_FS(vp); + + if (!thread_safe) { + if ( (_err = lock_fsnode(vp, &funnel_state)) ) { + return (_err); + } + } + _err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a); + if (!thread_safe) { + unlock_fsnode(vp, &funnel_state); + } + return(_err); +} + + +#if NAMEDSTREAMS +/* + * Get a named streamed + */ +errno_t +VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx) +{ + struct vnop_getnamedstream_args a; + + if (!THREAD_SAFE_FS(vp)) + return (ENOTSUP); + a.a_desc = &vnop_getnamedstream_desc; + a.a_vp = vp; + a.a_svpp = svpp; + a.a_name = name; + a.a_operation = operation; + a.a_flags = flags; + a.a_context = ctx; + + return (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a); +} + +/* + * Create a named streamed + */ +errno_t +VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx) +{ + struct vnop_makenamedstream_args a; + + if (!THREAD_SAFE_FS(vp)) + return (ENOTSUP); + a.a_desc = &vnop_makenamedstream_desc; + a.a_vp = vp; + a.a_svpp = svpp; + a.a_name = name; + a.a_flags = flags; + a.a_context = ctx; + + return (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a); +} + + +/* + * Remove a named streamed + */ +errno_t +VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx) +{ + struct vnop_removenamedstream_args a; + + if (!THREAD_SAFE_FS(vp)) + return (ENOTSUP); + a.a_desc = &vnop_removenamedstream_desc; + a.a_vp = vp; + a.a_svp = svp; + a.a_name = name; + a.a_flags = flags; + a.a_context = ctx; + + return (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a); +} +#endif