/*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
/*
*
* @(#)union_subr.c 8.20 (Berkeley) 5/20/95
*/
+/*
+ * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
+ * support for mandatory and extensible security protections. This notice
+ * is included in support of clause 2.2 (b) of the Apple Public License,
+ * Version 2.0.
+ */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/vnode_internal.h>
#include <sys/namei.h>
#include <sys/malloc.h>
-#include <sys/file.h>
+#include <sys/file_internal.h>
#include <sys/filedesc.h>
#include <sys/queue.h>
#include <sys/mount_internal.h>
#include <sys/ubc.h>
#include <sys/uio_internal.h>
#include <miscfs/union/union.h>
-
-#if DIAGNOSTIC
-#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/kdebug.h>
+#if CONFIG_MACF
+#include <security/mac_framework.h>
#endif
+
+static int union_vn_close(struct vnode *vp, int fmode, vfs_context_t ctx);
+
/* must be power of two, otherwise change UNION_HASH() */
#define NHASH 32
static LIST_HEAD(unhead, union_node) unhead[NHASH];
static int unvplock[NHASH];
+static lck_grp_t * union_lck_grp;
+static lck_grp_attr_t * union_lck_grp_attr;
+static lck_attr_t * union_lck_attr;
+static lck_mtx_t * union_mtxp;
+
+static int union_dircheck(struct vnode **, struct fileproc *, vfs_context_t ctx);
+static void union_newlower(struct union_node *, struct vnode *);
+static void union_newupper(struct union_node *, struct vnode *);
+
+
int
-union_init()
+union_init(__unused struct vfsconf *vfsp)
{
int i;
+ union_lck_grp_attr= lck_grp_attr_alloc_init();
+#if DIAGNOSTIC
+ lck_grp_attr_setstat(union_lck_grp_attr);
+#endif
+ union_lck_grp = lck_grp_alloc_init("union", union_lck_grp_attr);
+ union_lck_attr = lck_attr_alloc_init();
+#if DIAGNOSTIC
+ lck_attr_setdebug(union_lck_attr);
+#endif
+ union_mtxp = lck_mtx_alloc_init(union_lck_grp, union_lck_attr);
+
for (i = 0; i < NHASH; i++)
LIST_INIT(&unhead[i]);
bzero((caddr_t) unvplock, sizeof(unvplock));
+ /* add the hook for getdirentries */
+ union_dircheckp = union_dircheck;
+
+ return (0);
}
+void
+union_lock()
+{
+ lck_mtx_lock(union_mtxp);
+}
+
+void
+union_unlock()
+{
+ lck_mtx_unlock(union_mtxp);
+}
+
+
static int
-union_list_lock(ix)
- int ix;
+union_list_lock(int ix)
{
- if (unvplock[ix] & UN_LOCKED) {
- unvplock[ix] |= UN_WANT;
- sleep((caddr_t) &unvplock[ix], PINOD);
+ if (unvplock[ix] & UNVP_LOCKED) {
+ unvplock[ix] |= UNVP_WANT;
+ msleep((caddr_t) &unvplock[ix], union_mtxp, PINOD, "union_list_lock", NULL);
return (1);
}
- unvplock[ix] |= UN_LOCKED;
+ unvplock[ix] |= UNVP_LOCKED;
return (0);
}
static void
-union_list_unlock(ix)
- int ix;
+union_list_unlock(int ix)
{
- unvplock[ix] &= ~UN_LOCKED;
+ unvplock[ix] &= ~UNVP_LOCKED;
- if (unvplock[ix] & UN_WANT) {
- unvplock[ix] &= ~UN_WANT;
+ if (unvplock[ix] & UNVP_WANT) {
+ unvplock[ix] &= ~UNVP_WANT;
wakeup((caddr_t) &unvplock[ix]);
}
}
+/*
+ * union_updatevp:
+ *
+ * The uppervp, if not NULL, must be referenced and not locked by us
+ * The lowervp, if not NULL, must be referenced.
+ *
+ * If uppervp and lowervp match pointers already installed, then
+ * nothing happens. The passed vp's (when matching) are not adjusted.
+ *
+ * This routine may only be called by union_newupper() and
+ * union_newlower().
+ */
+
+/* always called with union lock held */
void
-union_updatevp(un, uppervp, lowervp)
- struct union_node *un;
- struct vnode *uppervp;
- struct vnode *lowervp;
+union_updatevp(struct union_node *un, struct vnode *uppervp,
+ struct vnode *lowervp)
{
int ohash = UNION_HASH(un->un_uppervp, un->un_lowervp);
int nhash = UNION_HASH(uppervp, lowervp);
int docache = (lowervp != NULLVP || uppervp != NULLVP);
int lhash, uhash;
+ vnode_t freevp;
+ vnode_t freedirvp;
+ caddr_t freepath;
/*
* Ensure locking is ordered from lower to higher
uhash = nhash;
}
- if (lhash != uhash)
+ if (lhash != uhash) {
while (union_list_lock(lhash))
continue;
+ }
while (union_list_lock(uhash))
continue;
union_list_unlock(ohash);
if (un->un_lowervp != lowervp) {
+ freevp = freedirvp = NULLVP;
+ freepath = (caddr_t)0;
if (un->un_lowervp) {
- vnode_put(un->un_lowervp);
+ freevp = un->un_lowervp;
+ un->un_lowervp = lowervp;
if (un->un_path) {
- _FREE(un->un_path, M_TEMP);
+ freepath = un->un_path;
un->un_path = 0;
}
if (un->un_dirvp) {
- vnode_put(un->un_dirvp);
+ freedirvp = un->un_dirvp;
un->un_dirvp = NULLVP;
}
- }
- un->un_lowervp = lowervp;
+ union_unlock();
+ if (freevp)
+ vnode_put(freevp);
+ if (freedirvp)
+ vnode_put(freedirvp);
+ if (freepath)
+ _FREE(un->un_path, M_TEMP);
+ union_lock();
+ } else
+ un->un_lowervp = lowervp;
+ if (lowervp != NULLVP)
+ un->un_lowervid = vnode_vid(lowervp);
un->un_lowersz = VNOVAL;
}
if (un->un_uppervp != uppervp) {
- if (un->un_uppervp)
- vnode_put(un->un_uppervp);
-
+ freevp = NULLVP;
+ if (un->un_uppervp) {
+ freevp = un->un_uppervp;
+ }
un->un_uppervp = uppervp;
+ if (uppervp != NULLVP)
+ un->un_uppervid = vnode_vid(uppervp);
un->un_uppersz = VNOVAL;
+ union_unlock();
+ if (freevp)
+ vnode_put(freevp);
+ union_lock();
}
if (docache && (ohash != nhash)) {
union_list_unlock(nhash);
}
-void
+/*
+ * Set a new lowervp. The passed lowervp must be referenced and will be
+ * stored in the vp in a referenced state.
+ */
+/* always called with union lock held */
+
+static void
union_newlower(un, lowervp)
struct union_node *un;
struct vnode *lowervp;
{
-
union_updatevp(un, un->un_uppervp, lowervp);
}
-void
+/*
+ * Set a new uppervp. The passed uppervp must be locked and will be
+ * stored in the vp in a locked state. The caller should not unlock
+ * uppervp.
+ */
+
+/* always called with union lock held */
+static void
union_newupper(un, uppervp)
struct union_node *un;
struct vnode *uppervp;
{
-
union_updatevp(un, uppervp, un->un_lowervp);
}
* If the size changes, then callback to the vm layer
* giving priority to the upper layer size.
*/
+/* always called with union lock held */
void
union_newsize(vp, uppersz, lowersz)
struct vnode *vp;
printf("union: %s size now %ld\n",
uppersz != VNOVAL ? "upper" : "lower", (long) sz);
#endif
+ union_unlock();
ubc_setsize(vp, sz);
+ union_lock();
}
}
/*
- * allocate a union_node/vnode pair. the vnode is
- * referenced and locked. the new vnode is returned
- * via (vpp). (mp) is the mountpoint of the union filesystem,
- * (dvp) is the parent directory where the upper layer object
- * should exist (but doesn't) and (cnp) is the componentname
- * information which is partially copied to allow the upper
- * layer object to be created at a later time. (uppervp)
- * and (lowervp) reference the upper and lower layer objects
- * being mapped. either, but not both, can be nil.
- * if supplied, (uppervp) is locked.
- * the reference is either maintained in the new union_node
- * object which is allocated, or they are vnode_put'd.
+ * union_allocvp: allocate a union_node and associate it with a
+ * parent union_node and one or two vnodes.
+ *
+ * vpp Holds the returned vnode locked and referenced if no
+ * error occurs.
+ *
+ * mp Holds the mount point. mp may or may not be busied.
+ * allocvp() makes no changes to mp.
+ *
+ * dvp Holds the parent union_node to the one we wish to create.
+ * XXX may only be used to traverse an uncopied lowervp-based
+ * tree? XXX
*
- * all union_nodes are maintained on a singly-linked
- * list. new nodes are only allocated when they cannot
- * be found on this list. entries on the list are
+ * dvp may or may not be locked. allocvp() makes no changes
+ * to dvp.
+ *
+ * upperdvp Holds the parent vnode to uppervp, generally used along
+ * with path component information to create a shadow of
+ * lowervp when uppervp does not exist.
+ *
+ * upperdvp is referenced but unlocked on entry, and will be
+ * dereferenced on return.
+ *
+ * uppervp Holds the new uppervp vnode to be stored in the
+ * union_node we are allocating. uppervp is referenced but
+ * not locked, and will be dereferenced on return.
+ *
+ * lowervp Holds the new lowervp vnode to be stored in the
+ * union_node we are allocating. lowervp is referenced but
+ * not locked, and will be dereferenced on return.
+ *
+ * cnp Holds path component information to be coupled with
+ * lowervp and upperdvp to allow unionfs to create an uppervp
+ * later on. Only used if lowervp is valid. The contents
+ * of cnp is only valid for the duration of the call.
+ *
+ * docache Determine whether this node should be entered in the
+ * cache or whether it should be destroyed as soon as possible.
+ *
+ * All union_nodes are maintained on a singly-linked
+ * list. New nodes are only allocated when they cannot
+ * be found on this list. Entries on the list are
* removed when the vfs reclaim entry is called.
*
- * a single lock is kept for the entire list. this is
+ * A single lock is kept for the entire list. This is
* needed because the getnewvnode() function can block
* waiting for a vnode to become free, in which case there
* may be more than one process trying to get the same
- * vnode. this lock is only taken if we are going to
- * call getnewvnode, since the kernel itself is single-threaded.
+ * vnode. This lock is only taken if we are going to
+ * call getnewvnode(), since the kernel itself is single-threaded.
*
- * if an entry is found on the list, then call vnode_get() to
- * take a reference. this is done because there may be
+ * If an entry is found on the list, then call vget() to
+ * take a reference. This is done because there may be
* zero references to it and so it needs to removed from
* the vnode free list.
*/
+
+/* always called with union lock held */
+
int
-union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp, docache)
- struct vnode **vpp;
- struct mount *mp;
- struct vnode *undvp; /* parent union vnode */
- struct vnode *dvp; /* may be null */
- struct componentname *cnp; /* may be null */
- struct vnode *uppervp; /* may be null */
- struct vnode *lowervp; /* may be null */
- int docache;
+union_allocvp(struct vnode **vpp,
+ struct mount *mp,
+ struct vnode *undvp,
+ struct vnode *dvp,
+ struct componentname *cnp,
+ struct vnode *uppervp,
+ struct vnode *lowervp,
+ int docache)
{
int error;
- struct union_node *un;
- struct union_node **pp;
+ struct union_node *un = NULL;
+ struct union_node *unp;
struct vnode *xlowervp = NULLVP;
struct union_mount *um = MOUNTTOUNIONMOUNT(mp);
- int hash;
+ int hash = 0; /* protected by docache */
int markroot;
int try;
- struct union_node *unp;
struct vnode_fsparam vfsp;
enum vtype vtype;
if (uppervp == NULLVP && lowervp == NULLVP)
panic("union: unidentifiable allocation");
+ /*
+ * if both upper and lower vp are provided and are off different type
+ * consider lowervp as NULL
+ */
if (uppervp && lowervp && (uppervp->v_type != lowervp->v_type)) {
xlowervp = lowervp;
lowervp = NULLVP;
((lowervp == NULLVP) || lowervp == um->um_lowervp)) {
if (lowervp == NULLVP) {
lowervp = um->um_lowervp;
- if (lowervp != NULLVP)
+ if (lowervp != NULLVP) {
+ union_unlock();
vnode_get(lowervp);
+ union_lock();
+ }
}
markroot = VROOT;
}
loop:
if (!docache) {
- un = 0;
+ un = NULL;
} else for (try = 0; try < 3; try++) {
switch (try) {
case 0:
case 2:
if (lowervp == NULLVP)
continue;
+ /* Not sure how this path gets exercised ? */
hash = UNION_HASH(NULLVP, lowervp);
break;
}
un->un_lowervp == NULLVP) &&
(un->un_uppervp == uppervp ||
un->un_uppervp == NULLVP) &&
- (UNIONTOV(un)->v_mount == mp)) {
- if (vnode_get(UNIONTOV(un))) {
- union_list_unlock(hash);
- goto loop;
- }
+ (un->un_mount == mp)) {
break;
}
}
* with uppervp.
*/
- if ((dvp != NULLVP) && (uppervp == dvp)) {
- /*
- * Access ``.'', so (un) will already
- * be locked. Since this process has
- * the lock on (uppervp) no other
- * process can hold the lock on (un).
- */
-#if DIAGNOSTIC
- if ((un->un_flags & UN_LOCKED) == 0)
- panic("union: . not locked");
- else if (current_proc() && un->un_pid != current_proc()->p_pid &&
- un->un_pid > -1 && current_proc()->p_pid > -1)
- panic("union: allocvp not lock owner");
-#endif
- } else {
- if (un->un_flags & UN_LOCKED) {
- vnode_put(UNIONTOV(un));
- un->un_flags |= UN_WANT;
- sleep((caddr_t) &un->un_flags, PINOD);
- goto loop;
+ if (un->un_flags & UN_LOCKED) {
+ un->un_flags |= UN_WANT;
+ msleep((caddr_t) &un->un_flags, union_mtxp, PINOD, "union node locked", 0);
+ goto loop;
+ }
+ un->un_flags |= UN_LOCKED;
+
+ union_unlock();
+ if (UNIONTOV(un) == NULLVP)
+ panic("null vnode in union node\n");
+ if (vnode_get(UNIONTOV(un))) {
+ union_lock();
+ un->un_flags &= ~UN_LOCKED;
+ if ((un->un_flags & UN_WANT) == UN_WANT) {
+ un->un_flags &= ~UN_LOCKED;
+ wakeup(&un->un_flags);
}
- un->un_flags |= UN_LOCKED;
-
-#if DIAGNOSTIC
- if (current_proc())
- un->un_pid = current_proc()->p_pid;
- else
- un->un_pid = -1;
-#endif
+ goto loop;
}
+ union_lock();
/*
* At this point, the union_node is locked,
if (uppervp != un->un_uppervp) {
union_newupper(un, uppervp);
} else if (uppervp) {
+ union_unlock();
vnode_put(uppervp);
- }
-
- if (un->un_uppervp) {
- un->un_flags |= UN_ULOCK;
- un->un_flags &= ~UN_KLOCK;
+ union_lock();
}
/*
union_newlower(un, lowervp);
if (cnp && (lowervp != NULLVP)) {
un->un_hash = cnp->cn_hash;
+ union_unlock();
MALLOC(un->un_path, caddr_t, cnp->cn_namelen+1,
M_TEMP, M_WAITOK);
bcopy(cnp->cn_nameptr, un->un_path,
cnp->cn_namelen);
- un->un_path[cnp->cn_namelen] = '\0';
vnode_get(dvp);
+ union_lock();
+ un->un_path[cnp->cn_namelen] = '\0';
un->un_dirvp = dvp;
}
} else if (lowervp) {
+ union_unlock();
vnode_put(lowervp);
+ union_lock();
}
*vpp = UNIONTOV(un);
+ un->un_flags &= ~UN_LOCKED;
+ if ((un->un_flags & UN_WANT) == UN_WANT) {
+ un->un_flags &= ~UN_WANT;
+ wakeup(&un->un_flags);
+ }
return (0);
}
goto loop;
}
+ union_unlock();
MALLOC(unp, void *, sizeof(struct union_node), M_TEMP, M_WAITOK);
+ union_lock();
+
+ bzero(unp, sizeof(struct union_node));
+ un = unp;
+ un->un_uppervp = uppervp;
+ if (uppervp != NULLVP)
+ un->un_uppervid = vnode_vid(uppervp);
+ un->un_uppersz = VNOVAL;
+ un->un_lowervp = lowervp;
+ if (lowervp != NULLVP)
+ un->un_lowervid = vnode_vid(lowervp);
+ un->un_lowersz = VNOVAL;
+ un->un_pvp = undvp;
+ if (undvp != NULLVP)
+ vnode_get(undvp);
+ un->un_dircache = 0;
+ un->un_openl = 0;
+ un->un_mount = mp;
+ un->un_flags = UN_LOCKED;
+#ifdef FAULTFS
+ if (UNION_FAULTIN(um))
+ un->un_flags |= UN_FAULTFS;
+#endif
+
+ if (docache) {
+ /* Insert with lock held */
+ LIST_INSERT_HEAD(&unhead[hash], un, un_cache);
+ un->un_flags |= UN_CACHED;
+ union_list_unlock(hash);
+ }
+
+ union_unlock();
if (uppervp)
vtype = uppervp->v_type;
else
vtype = lowervp->v_type;
- //bzero(&vfsp, sizeof(struct vnode_fsparam));
+
+ bzero(&vfsp, sizeof(struct vnode_fsparam));
vfsp.vnfs_mp = mp;
vfsp.vnfs_vtype = vtype;
vfsp.vnfs_str = "unionfs";
- vfsp.vnfs_dvp = dvp;
+ vfsp.vnfs_dvp = undvp;
vfsp.vnfs_fsnode = unp;
vfsp.vnfs_cnp = cnp;
vfsp.vnfs_vops = union_vnodeop_p;
error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, vpp);
if (error) {
- FREE(unp, M_TEMP);
+ /* XXXXX Is this right ???? XXXXXXX */
if (uppervp) {
vnode_put(uppervp);
}
if (lowervp)
vnode_put(lowervp);
- goto out;
+ union_lock();
+ if (un->un_flags & UN_CACHED) {
+ un->un_flags &= ~UN_CACHED;
+ LIST_REMOVE(un, un_cache);
+ }
+ if (docache)
+ union_list_unlock(hash);
+
+ FREE(unp, M_TEMP);
+
+ return (error);
}
- (*vpp)->v_tag = VT_UNION;
- un = VTOUNION(*vpp);
- un->un_vnode = *vpp;
- un->un_uppervp = uppervp;
- un->un_uppersz = VNOVAL;
- un->un_lowervp = lowervp;
- un->un_lowersz = VNOVAL;
- un->un_pvp = undvp;
- if (undvp != NULLVP)
- vnode_get(undvp);
- un->un_dircache = 0;
- un->un_openl = 0;
- un->un_flags = UN_LOCKED;
- if (un->un_uppervp)
- un->un_flags |= UN_ULOCK;
-#if DIAGNOSTIC
- if (current_proc())
- un->un_pid = current_proc()->p_pid;
- else
- un->un_pid = -1;
-#endif
if (cnp && (lowervp != NULLVP)) {
un->un_hash = cnp->cn_hash;
un->un_path = _MALLOC(cnp->cn_namelen+1, M_TEMP, M_WAITOK);
un->un_dirvp = 0;
}
- if (docache) {
- LIST_INSERT_HEAD(&unhead[hash], un, un_cache);
- un->un_flags |= UN_CACHED;
- }
-
if (xlowervp)
vnode_put(xlowervp);
-out:
- if (docache)
- union_list_unlock(hash);
+ union_lock();
+
+ vnode_settag(*vpp, VT_UNION);
+ un->un_vnode = *vpp;
+ if (un->un_vnode->v_type == VDIR) {
+ if (un->un_uppervp == NULLVP) {
+ panic("faulting fs and no upper vp for dir?");
+ }
+
+ }
+
+
+ un->un_flags &= ~UN_LOCKED;
+ if ((un->un_flags & UN_WANT) == UN_WANT) {
+ un->un_flags &= ~UN_WANT;
+ wakeup(&un->un_flags);
+ }
+
+ return(error);
- return (error);
}
+/* always called with union lock held */
int
-union_freevp(vp)
- struct vnode *vp;
+union_freevp(struct vnode *vp)
{
struct union_node *un = VTOUNION(vp);
LIST_REMOVE(un, un_cache);
}
+ union_unlock();
if (un->un_pvp != NULLVP)
vnode_put(un->un_pvp);
if (un->un_uppervp != NULLVP)
FREE(vp->v_data, M_TEMP);
vp->v_data = 0;
+ union_lock();
return (0);
}
* using a sequence of reads and writes. both (fvp)
* and (tvp) are locked on entry and exit.
*/
+/* called with no union lock held */
int
-union_copyfile(struct vnode *fvp, struct vnode *tvp, kauth_cred_t cred,
- struct proc *p)
+union_copyfile(struct vnode *fvp, struct vnode *tvp, vfs_context_t context)
{
char *bufp;
struct uio uio;
struct iovec_32 iov;
- struct vfs_context context;
int error = 0;
/*
* give up at the first sign of trouble.
*/
- context.vc_proc = p;
- context.vc_ucred = cred;
#if 1 /* LP64todo - can't use new segment flags until the drivers are ready */
uio.uio_segflg = UIO_SYSSPACE;
iov.iov_len = MAXPHYSIO;
uio_setresid(&uio, iov.iov_len);
uio.uio_rw = UIO_READ;
- error = VNOP_READ(fvp, &uio, 0, &context);
+ error = VNOP_READ(fvp, &uio, 0, context);
if (error == 0) {
uio.uio_iovs.iov32p = &iov;
break;
do {
- error = VNOP_WRITE(tvp, &uio, 0, &context);
+ error = VNOP_WRITE(tvp, &uio, 0, context);
} while ((uio_resid(&uio) > 0) && (error == 0));
}
* (un) is assumed to be locked on entry and remains
* locked on exit.
*/
+/* always called with union lock held */
int
-union_copyup(struct union_node *un, int docopy, kauth_cred_t cred,
- struct proc *p)
+union_copyup(struct union_node *un, int docopy, vfs_context_t context)
{
int error;
struct vnode *lvp, *uvp;
- struct vfs_context context;
+ struct vnode_attr vattr;
+ mode_t cmode = 0;
- error = union_vn_create(&uvp, un, p);
- if (error)
- return (error);
+
+ lvp = un->un_lowervp;
- context.vc_proc = p;
- context.vc_ucred = cred;
+ union_unlock();
+ if (UNNODE_FAULTIN(un)) {
+ /* Need to inherit exec mode in faulting fs */
+ VATTR_INIT(&vattr);
+ VATTR_WANTED(&vattr, va_flags);
+ if (vnode_getattr(lvp, &vattr, context) == 0 )
+ cmode = vattr.va_mode;
+
+ }
+ error = union_vn_create(&uvp, un, cmode, context);
+ if (error) {
+ union_lock();
+ if (error == EEXIST) {
+ if (uvp != NULLVP) {
+ union_newupper(un, uvp);
+ error = 0;
+ }
+ }
+ return (error);
+ }
+
+ union_lock();
/* at this point, uppervp is locked */
union_newupper(un, uvp);
- un->un_flags |= UN_ULOCK;
+ union_unlock();
- lvp = un->un_lowervp;
if (docopy) {
/*
* XX - should not ignore errors
* from vnop_close
*/
- error = VNOP_OPEN(lvp, FREAD, &context);
+ error = VNOP_OPEN(lvp, FREAD, context);
if (error == 0) {
- error = union_copyfile(lvp, uvp, cred, p);
- (void) VNOP_CLOSE(lvp, FREAD, &context);
+ error = union_copyfile(lvp, uvp, context);
+ (void) VNOP_CLOSE(lvp, FREAD, context);
}
#ifdef UNION_DIAGNOSTIC
if (error == 0)
#endif
}
- un->un_flags &= ~UN_ULOCK;
- union_vn_close(uvp, FWRITE, cred, p);
- un->un_flags |= UN_ULOCK;
+ union_vn_close(uvp, FWRITE, context);
/*
* Subsequent IOs will go to the top layer, so
* the right thing with (cred) and (FREAD) though.
* Ignoring error returns is not right, either.
*/
+
+ /* No need to hold the lock as the union node should be locked for this(it is in faultin mode) */
if (error == 0) {
int i;
for (i = 0; i < un->un_openl; i++) {
- (void) VNOP_CLOSE(lvp, FREAD, &context);
- (void) VNOP_OPEN(uvp, FREAD, &context);
+ (void) VNOP_CLOSE(lvp, FREAD, context);
+ (void) VNOP_OPEN(uvp, FREAD, context);
}
un->un_openl = 0;
}
+ union_lock();
+
return (error);
}
+
+int
+union_faultin_copyup(struct vnode **vpp, vnode_t udvp, vnode_t lvp, struct componentname * cnp, vfs_context_t context)
+{
+ int error;
+ struct vnode *uvp;
+ struct vnode_attr vattr;
+ struct vnode_attr *vap;
+ mode_t cmode = 0;
+ int fmode = FFLAGS(O_WRONLY|O_CREAT|O_TRUNC|O_EXCL);
+ struct proc * p = vfs_context_proc(context);
+ struct componentname cn;
+
+
+ vap = &vattr;
+ VATTR_INIT(vap);
+ VATTR_WANTED(vap, va_flags);
+ if (vnode_getattr(lvp, vap, context) == 0 )
+ cmode = vattr.va_mode;
+
+ *vpp = NULLVP;
+
+
+ if (cmode == (mode_t)0)
+ cmode = UN_FILEMODE & ~p->p_fd->fd_cmask;
+ else
+ cmode = cmode & ~p->p_fd->fd_cmask;
+
+
+ /*
+ * Build a new componentname structure (for the same
+ * reasons outlines in union_mkshadow()).
+ * The difference here is that the file is owned by
+ * the current user, rather than by the person who
+ * did the mount, since the current user needs to be
+ * able to write the file (that's why it is being
+ * copied in the first place).
+ */
+ bzero(&cn, sizeof(struct componentname));
+
+ cn.cn_namelen = cnp->cn_namelen;
+ cn.cn_pnbuf = (caddr_t) _MALLOC_ZONE(cn.cn_namelen+1,
+ M_NAMEI, M_WAITOK);
+ cn.cn_pnlen = cn.cn_namelen+1;
+ bcopy(cnp->cn_nameptr, cn.cn_pnbuf, cn.cn_namelen+1);
+ cn.cn_nameiop = CREATE;
+ cn.cn_flags = (HASBUF|SAVENAME|SAVESTART|ISLASTCN|UNIONCREATED);
+ cn.cn_context = context;
+ cn.cn_nameptr = cn.cn_pnbuf;
+ cn.cn_hash = 0;
+ cn.cn_consume = 0;
+
+ /*
+ * Pass dvp unlocked and referenced on call to relookup().
+ *
+ * If an error occurs, dvp will be returned unlocked and dereferenced.
+ */
+ if ((error = relookup(udvp, &uvp, &cn)) != 0) {
+ goto out;
+ }
+
+ /*
+ * If no error occurs, dvp will be returned locked with the reference
+ * left as before, and vpp will be returned referenced and locked.
+ */
+ if (uvp) {
+ *vpp = uvp;
+ error = EEXIST;
+ goto out;
+ }
+
+ /*
+ * Good - there was no race to create the file
+ * so go ahead and create it. The permissions
+ * on the file will be 0666 modified by the
+ * current user's umask. Access to the file, while
+ * it is unioned, will require access to the top *and*
+ * bottom files. Access when not unioned will simply
+ * require access to the top-level file.
+ *
+ * TODO: confirm choice of access permissions.
+ * decide on authorisation behaviour
+ */
+
+ VATTR_INIT(vap);
+ VATTR_SET(vap, va_type, VREG);
+ VATTR_SET(vap, va_mode, cmode);
+
+ cn.cn_flags |= (UNIONCREATED);
+ if ((error = vn_create(udvp, &uvp, &cn, vap, 0, context)) != 0) {
+ goto out;
+ }
+
+
+ if ((error = VNOP_OPEN(uvp, fmode, context)) != 0) {
+ vn_clearunionwait(uvp, 0);
+ vnode_recycle(uvp);
+ vnode_put(uvp);
+ goto out;
+ }
+
+ error = vnode_ref_ext(uvp, fmode);
+ if (error ) {
+ vn_clearunionwait(uvp, 0);
+ VNOP_CLOSE(uvp, fmode, context);
+ vnode_recycle(uvp);
+ vnode_put(uvp);
+ goto out;
+ }
+
+
+ /*
+ * XX - should not ignore errors
+ * from vnop_close
+ */
+ error = VNOP_OPEN(lvp, FREAD, context);
+ if (error == 0) {
+ error = union_copyfile(lvp, uvp, context);
+ (void) VNOP_CLOSE(lvp, FREAD, context);
+ }
+
+ VNOP_CLOSE(uvp, fmode, context);
+ vnode_rele_ext(uvp, fmode, 0);
+ vn_clearunionwait(uvp, 0);
+
+ *vpp = uvp;
+out:
+ if ((cn.cn_flags & HASBUF) == HASBUF) {
+ FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
+ cn.cn_flags &= ~HASBUF;
+ }
+ return (error);
+}
+
+
+/*
+ * union_relookup:
+ *
+ * dvp should be locked on entry and will be locked on return. No
+ * net change in the ref count will occur.
+ *
+ * If an error is returned, *vpp will be invalid, otherwise it
+ * will hold a locked, referenced vnode. If *vpp == dvp then
+ * remember that only one exclusive lock is held.
+ */
+
+/* No union lock held for this call */
static int
-union_relookup(um, dvp, vpp, cnp, cn, path, pathlen)
- struct union_mount *um;
- struct vnode *dvp;
- struct vnode **vpp;
- struct componentname *cnp;
- struct componentname *cn;
- char *path;
- int pathlen;
+union_relookup(
+#ifdef XXX_HELP_ME
+ struct union_mount *um,
+#else /* !XXX_HELP_ME */
+ __unused struct union_mount *um,
+#endif /* !XXX_HELP_ME */
+ struct vnode *dvp,
+ struct vnode **vpp,
+ struct componentname *cnp,
+ struct componentname *cn,
+ char *path,
+ int pathlen)
{
int error;
cn->cn_pnbuf[cn->cn_namelen] = '\0';
cn->cn_nameiop = CREATE;
- cn->cn_flags = (LOCKPARENT|HASBUF|SAVENAME|SAVESTART|ISLASTCN);
+ cn->cn_flags = (HASBUF|SAVENAME|SAVESTART|ISLASTCN );
#ifdef XXX_HELP_ME
cn->cn_proc = cnp->cn_proc;
if (um->um_op == UNMNT_ABOVE)
#endif
cn->cn_context = cnp->cn_context; /* XXX !UNMNT_ABOVE case ??? */
cn->cn_nameptr = cn->cn_pnbuf;
- cn->cn_hash = cnp->cn_hash;
+ cn->cn_hash = 0;
cn->cn_consume = cnp->cn_consume;
vnode_get(dvp);
error = relookup(dvp, vpp, cn);
- if (!error)
- vnode_put(dvp);
+ vnode_put(dvp);
return (error);
}
*
* (um) points to the union mount structure for access to the
* the mounting process's credentials.
- * (dvp) is the directory in which to create the shadow directory.
- * it is unlocked on entry and exit.
- * (cnp) is the componentname to be created.
+ * (dvp) is the directory in which to create the shadow directory,
+ * It is locked (but not ref'd) on entry and return.
+ * (cnp) is the component name to be created.
* (vpp) is the returned newly created shadow directory, which
- * is returned locked.
+ * is returned locked and ref'd
*/
+/* No union lock held for this call */
int
union_mkshadow(um, dvp, cnp, vpp)
struct union_mount *um;
struct vnode_attr va;
struct componentname cn;
+ bzero(&cn, sizeof(struct componentname));
+
+
error = union_relookup(um, dvp, vpp, cnp, &cn,
cnp->cn_nameptr, cnp->cn_namelen);
- if (error)
- return (error);
+ if (error)
+ goto out;
if (*vpp) {
- vnode_put(*vpp);
- *vpp = NULLVP;
- return (EEXIST);
+ error = EEXIST;
+ goto out;
}
/*
- * policy: when creating the shadow directory in the
+ * Policy: when creating the shadow directory in the
* upper layer, create it owned by the user who did
* the mount, group from parent directory, and mode
* 777 modified by umask (ie mostly identical to the
* mkdir syscall). (jsp, kb)
*/
+
VATTR_INIT(&va);
VATTR_SET(&va, va_type, VDIR);
VATTR_SET(&va, va_mode, um->um_cmode);
error = vn_create(dvp, vpp, &cn, &va, 0, cnp->cn_context);
+out:
+ if ((cn.cn_flags & HASBUF) == HASBUF) {
+ FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
+ cn.cn_flags &= ~HASBUF;
+ }
return (error);
}
* it is locked on entry and exit.
* (cnp) is the componentname to be created.
*/
+/* No union lock held for this call */
int
union_mkwhiteout(um, dvp, cnp, path)
struct union_mount *um;
struct vnode *wvp;
struct componentname cn;
+ bzero(&cn, sizeof(struct componentname));
+
error = union_relookup(um, dvp, &wvp, cnp, &cn, path, strlen(path));
if (error) {
- return (error);
+ goto out;
}
if (wvp) {
- vnode_put(dvp);
- vnode_put(wvp);
- return (EEXIST);
+ error = EEXIST;
+ goto out;
}
error = VNOP_WHITEOUT(dvp, &cn, CREATE, cnp->cn_context);
- vnode_put(dvp);
-
+out:
+ if ((cn.cn_flags & HASBUF) == HASBUF) {
+ FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
+ cn.cn_flags &= ~HASBUF;
+ }
return (error);
}
+
/*
* union_vn_create: creates and opens a new shadow file
- * on the upper union layer. this function is similar
- * in spirit to calling vn_open but it avoids calling namei().
- * the problem with calling namei is that a) it locks too many
+ * on the upper union layer. This function is similar
+ * in spirit to calling vn_open() but it avoids calling namei().
+ * The problem with calling namei() is that a) it locks too many
* things, and b) it doesn't start at the "right" directory,
- * whereas relookup is told where to start.
+ * whereas relookup() is told where to start.
+ *
+ * On entry, the vnode associated with un is locked. It remains locked
+ * on return.
+ *
+ * If no error occurs, *vpp contains a locked referenced vnode for your
+ * use. If an error occurs *vpp iis undefined.
*/
+/* called with no union lock held */
int
-union_vn_create(vpp, un, p)
- struct vnode **vpp;
- struct union_node *un;
- struct proc *p;
+union_vn_create(struct vnode **vpp, struct union_node *un, mode_t cmode, vfs_context_t context)
{
struct vnode *vp;
struct vnode_attr vat;
struct vnode_attr *vap = &vat;
- struct vfs_context context;
int fmode = FFLAGS(O_WRONLY|O_CREAT|O_TRUNC|O_EXCL);
int error;
- int cmode = UN_FILEMODE & ~p->p_fd->fd_cmask;
- char *cp;
+ struct proc * p = vfs_context_proc(context);
struct componentname cn;
+ bzero(&cn, sizeof(struct componentname));
*vpp = NULLVP;
- context.vc_proc = p;
- context.vc_ucred = kauth_cred_proc_ref(p);
+ if (cmode == (mode_t)0)
+ cmode = UN_FILEMODE & ~p->p_fd->fd_cmask;
+ else
+ cmode = cmode & ~p->p_fd->fd_cmask;
+
/*
* Build a new componentname structure (for the same
- * reasons outlines in union_mkshadow).
+ * reasons outlines in union_mkshadow()).
* The difference here is that the file is owned by
* the current user, rather than by the person who
* did the mount, since the current user needs to be
cn.cn_pnlen = cn.cn_namelen+1;
bcopy(un->un_path, cn.cn_pnbuf, cn.cn_namelen+1);
cn.cn_nameiop = CREATE;
- cn.cn_flags = (LOCKPARENT|HASBUF|SAVENAME|SAVESTART|ISLASTCN);
- cn.cn_context = &context;
+ if (UNNODE_FAULTIN(un))
+ cn.cn_flags = (HASBUF|SAVENAME|SAVESTART|ISLASTCN|UNIONCREATED);
+ else
+ cn.cn_flags = (HASBUF|SAVENAME|SAVESTART|ISLASTCN);
+ cn.cn_context = context;
cn.cn_nameptr = cn.cn_pnbuf;
cn.cn_hash = un->un_hash;
cn.cn_consume = 0;
+ /*
+ * Pass dvp unlocked and referenced on call to relookup().
+ *
+ * If an error occurs, dvp will be returned unlocked and dereferenced.
+ */
vnode_get(un->un_dirvp);
- if (error = relookup(un->un_dirvp, &vp, &cn)) {
- kauth_cred_unref(&context.vc_ucred);
- return (error);
+ if ((error = relookup(un->un_dirvp, &vp, &cn)) != 0) {
+ vnode_put(un->un_dirvp);
+ goto out;
}
vnode_put(un->un_dirvp);
+ /*
+ * If no error occurs, dvp will be returned locked with the reference
+ * left as before, and vpp will be returned referenced and locked.
+ */
if (vp) {
- vnode_put(un->un_dirvp);
- kauth_cred_unref(&context.vc_ucred);
- vnode_put(vp);
- return (EEXIST);
+ *vpp = vp;
+ error = EEXIST;
+ goto out;
}
/*
VATTR_SET(vap, va_type, VREG);
VATTR_SET(vap, va_mode, cmode);
- if (error = vn_create(un->un_dirvp, &vp, &cn, vap, 0, &context)) {
- kauth_cred_unref(&context.vc_ucred);
- return (error);
+ if ((error = vn_create(un->un_dirvp, &vp, &cn, vap, 0, context)) != 0) {
+ goto out;
}
- if (error = VNOP_OPEN(vp, fmode, &context)) {
+ if ((error = VNOP_OPEN(vp, fmode, context)) != 0) {
vnode_put(vp);
- kauth_cred_unref(&context.vc_ucred);
- return (error);
+ goto out;
}
vnode_lock(vp);
panic("union: v_writecount");
vnode_unlock(vp);
*vpp = vp;
- kauth_cred_unref(&context.vc_ucred);
- return (0);
+ error = 0;
+
+out:
+ if ((cn.cn_flags & HASBUF) == HASBUF) {
+ FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
+ cn.cn_flags &= ~HASBUF;
+ }
+ return(error);
}
-int
-union_vn_close(struct vnode *vp, int fmode, kauth_cred_t cred,
- struct proc *p)
+/* called with no union lock held */
+static int
+union_vn_close(struct vnode *vp, int fmode, vfs_context_t context)
{
- struct vfs_context context;
-
- context.vc_proc = p;
- context.vc_ucred = cred;
if (fmode & FWRITE) {
vnode_lock(vp);
--vp->v_writecount;
vnode_unlock(vp);
}
- return (VNOP_CLOSE(vp, fmode, &context));
+ return (VNOP_CLOSE(vp, fmode, context));
}
+/*
+ * union_removed_upper:
+ *
+ * An upper-only file/directory has been removed; un-cache it so
+ * that unionfs vnode gets reclaimed and the last uppervp reference
+ * disappears.
+ *
+ * Called with union_node unlocked.
+ */
+/* always called with union lock held */
void
union_removed_upper(un)
struct union_node *un;
{
- struct proc *p = current_proc(); /* XXX */
-
union_newupper(un, NULLVP);
if (un->un_flags & UN_CACHED) {
un->un_flags &= ~UN_CACHED;
LIST_REMOVE(un, un_cache);
}
- if (un->un_flags & UN_ULOCK) {
- un->un_flags &= ~UN_ULOCK;
- }
}
#if 0
#endif
/*
- * determine whether a whiteout is needed
+ * Determine whether a whiteout is needed
* during a remove/rmdir operation.
*/
+/* called with no union lock held */
int
union_dowhiteout(struct union_node *un, vfs_context_t ctx)
{
struct vnode_attr va;
- if (un->un_lowervp != NULLVP)
+ if (UNNODE_FAULTIN(un))
+ return(0);
+
+ if ((un->un_lowervp != NULLVP) )
return (1);
VATTR_INIT(&va);
return (0);
}
+/* called with no union lock held */
static void
-union_dircache_r(vp, vppp, cntp)
- struct vnode *vp;
- struct vnode ***vppp;
- int *cntp;
+union_dircache_r(struct vnode *vp, struct vnode ***vppp, int *cntp)
{
struct union_node *un;
union_dircache_r(un->un_lowervp, vppp, cntp);
}
+/* called with no union lock held */
struct vnode *
-union_dircache(vp, p)
- struct vnode *vp;
- struct proc *p;
+union_dircache(struct vnode *vp, __unused vfs_context_t context)
{
int count;
- struct vnode *nvp;
+ struct vnode *nvp, *lvp;
struct vnode **vpp;
- struct vnode **dircache;
+ struct vnode **dircache, **newdircache;
struct union_node *un;
int error;
+ int alloced = 0;
- dircache = VTOUNION(vp)->un_dircache;
+ union_lock();
+ newdircache = NULL;
nvp = NULLVP;
+ un = VTOUNION(vp);
+ dircache = un->un_dircache;
if (dircache == 0) {
+ union_unlock();
count = 0;
union_dircache_r(vp, 0, &count);
count++;
+#if 0
+ /* too bad; we need Union now! */
+#if MAC_XXX
+ panic("MAC Framework doesn't support unionfs (yet)\n");
+#endif /* MAC */
+#endif
+
dircache = (struct vnode **)
_MALLOC(count * sizeof(struct vnode *),
M_TEMP, M_WAITOK);
+ newdircache = dircache;
+ alloced = 1;
vpp = dircache;
union_dircache_r(vp, &vpp, &count);
*vpp = NULLVP;
vpp = dircache + 1;
+ union_lock();
} else {
vpp = dircache;
do {
- if (*vpp++ == VTOUNION(vp)->un_uppervp)
+ if (*vpp++ == un->un_uppervp)
break;
} while (*vpp != NULLVP);
}
- if (*vpp == NULLVP)
+ lvp = *vpp;
+ union_unlock();
+ if (lvp == NULLVP) {
goto out;
+ }
- vnode_get(*vpp);
- error = union_allocvp(&nvp, vp->v_mount, NULLVP, NULLVP, 0, *vpp, NULLVP, 0);
- if (error)
+ vnode_get(lvp);
+ union_lock();
+
+ error = union_allocvp(&nvp, vp->v_mount, NULLVP, NULLVP, 0, lvp, NULLVP, 0);
+ if (error) {
+ union_unlock();
+ vnode_put(lvp);
goto out;
+ }
- VTOUNION(vp)->un_dircache = 0;
+ un->un_dircache = 0;
un = VTOUNION(nvp);
+#if 0
+ if ((alloced != 0) && (un->un_dircache != 0)) {
+ union_unlock();
+ for (vpp = newdircache; *vpp != NULLVP; vpp++)
+ vnode_put(*vpp);
+ _FREE(newdircache, M_TEMP);
+ newdircache = NULL;
+ union_lock();
+ if (nvp != NULLVP)
+ union_freevp(nvp);
+ goto loop;
+ }
+#endif
un->un_dircache = dircache;
+ un->un_flags |= UN_DIRENVN;
+
+ newdircache = NULL;
+ union_unlock();
+ return (nvp);
out:
- return (nvp);
+ /*
+ * If we allocated a new dircache and couldn't attach
+ * it to a new vp, free the resources we allocated.
+ */
+ if (newdircache) {
+ for (vpp = newdircache; *vpp != NULLVP; vpp++)
+ vnode_put(*vpp);
+ _FREE(newdircache, M_TEMP);
+ }
+ return (NULLVP);
+}
+
+/*
+ * Module glue to remove #ifdef UNION from vfs_syscalls.c
+ */
+/* Called with no union lock, the union_dircache takes locks when necessary */
+static int
+union_dircheck(struct vnode **vpp, struct fileproc *fp, vfs_context_t ctx)
+{
+ int error = 0;
+ vnode_t vp = *vpp;
+
+ if (vp->v_op == union_vnodeop_p) {
+ struct vnode *lvp;
+
+ lvp = union_dircache(vp, ctx);
+ if (lvp != NULLVP) {
+ struct vnode_attr va;
+ /*
+ * If the directory is opaque,
+ * then don't show lower entries
+ */
+ VATTR_INIT(&va);
+ VATTR_WANTED(&va, va_flags);
+ error = vnode_getattr(vp, &va, ctx);
+ if (va.va_flags & OPAQUE) {
+ vnode_put(lvp);
+ lvp = NULL;
+ }
+ }
+
+ if (lvp != NULLVP) {
+#if CONFIG_MACF
+ error = mac_vnode_check_open(ctx, lvp, FREAD);
+ if (error) {
+ vnode_put(lvp);
+ return(error);
+ }
+#endif /* MAC */
+ error = VNOP_OPEN(lvp, FREAD, ctx);
+ if (error) {
+ vnode_put(lvp);
+ return(error);
+ }
+ vnode_ref(lvp);
+ fp->f_fglob->fg_data = (caddr_t) lvp;
+ fp->f_fglob->fg_offset = 0;
+
+ error = VNOP_CLOSE(vp, FREAD, ctx);
+ vnode_rele(vp);
+ vnode_put(vp);
+ if (error)
+ return(error);
+
+ *vpp = lvp;
+ return -1; /* goto unionread */
+ }
+ }
+ return error;
}
+
+/* called from inactive with union lock held */
+void
+union_dircache_free(struct union_node *un)
+{
+ struct vnode **vpp;
+
+ vpp = un->un_dircache;
+ un->un_dircache = NULL;
+ union_unlock();
+
+ for (; *vpp != NULLVP; vpp++)
+ vnode_put(*vpp);
+ _FREE(un->un_dircache, M_TEMP);
+ union_lock();
+}
+