2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry.
25 * Copyright (c) 1992, 1993, 1994, 1995
26 * The Regents of the University of California. All rights reserved.
28 * This code is derived from software contributed to Berkeley by
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 * This product includes software developed by the University of
42 * California, Berkeley and its contributors.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * @(#)union_vnops.c 8.32 (Berkeley) 6/23/95
62 #include <sys/param.h>
63 #include <sys/systm.h>
68 #include <sys/types.h>
69 #include <sys/vnode.h>
70 #include <sys/mount.h>
71 #include <sys/namei.h>
72 #include <sys/malloc.h>
74 #include <sys/queue.h>
76 #include <miscfs/union/union.h>
77 #include <vfs/vfs_support.h>
80 #define FIXUP(un, p) { \
81 if (((un)->un_flags & UN_ULOCK) == 0) { \
88 struct union_node
*un
;
92 vn_lock(un
->un_uppervp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
93 un
->un_flags
|= UN_ULOCK
;
97 union_lookup1(udvp
, dvpp
, vpp
, cnp
)
101 struct componentname
*cnp
;
104 struct proc
*p
= cnp
->cn_proc
;
112 * If stepping up the directory tree, check for going
113 * back across the mount point, in which case do what
114 * lookup would do by stepping back down the mount
117 if (cnp
->cn_flags
& ISDOTDOT
) {
118 while ((dvp
!= udvp
) && (dvp
->v_flag
& VROOT
)) {
120 * Don't do the NOCROSSMOUNT check
121 * at this level. By definition,
122 * union fs deals with namespaces, not
126 *dvpp
= dvp
= dvp
->v_mount
->mnt_vnodecovered
;
129 vn_lock(dvp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
133 error
= VOP_LOOKUP(dvp
, &tdvp
, cnp
);
138 * The parent directory will have been unlocked, unless lookup
139 * found the last component. In which case, re-lock the node
140 * here to allow it to be unlocked again (phew) in union_lookup.
142 if (dvp
!= tdvp
&& !(cnp
->cn_flags
& ISLASTCN
))
143 vn_lock(dvp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
148 * Lastly check if the current node is a mount point in
149 * which case walk up the mount hierarchy making sure not to
150 * bump into the root of the mount tree (ie. dvp != udvp).
152 while (dvp
!= udvp
&& (dvp
->v_type
== VDIR
) &&
153 (mp
= dvp
->v_mountedhere
)) {
155 if (vfs_busy(mp
, 0, 0, p
))
158 error
= VFS_ROOT(mp
, &tdvp
);
175 struct vop_lookup_args
/* {
176 struct vnodeop_desc *a_desc;
178 struct vnode **a_vpp;
179 struct componentname *a_cnp;
184 struct vnode
*uppervp
, *lowervp
;
185 struct vnode
*upperdvp
, *lowerdvp
;
186 struct vnode
*dvp
= ap
->a_dvp
;
187 struct union_node
*dun
= VTOUNION(dvp
);
188 struct componentname
*cnp
= ap
->a_cnp
;
189 struct proc
*p
= cnp
->cn_proc
;
190 int lockparent
= cnp
->cn_flags
& LOCKPARENT
;
191 int rdonly
= cnp
->cn_flags
& RDONLY
;
192 struct union_mount
*um
= MOUNTTOUNIONMOUNT(dvp
->v_mount
);
193 struct ucred
*saved_cred
;
198 if (cnp
->cn_namelen
== 3 &&
199 cnp
->cn_nameptr
[2] == '.' &&
200 cnp
->cn_nameptr
[1] == '.' &&
201 cnp
->cn_nameptr
[0] == '.') {
202 dvp
= *ap
->a_vpp
= LOWERVP(ap
->a_dvp
);
206 vn_lock(dvp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
207 if (!lockparent
|| !(cnp
->cn_flags
& ISLASTCN
))
208 VOP_UNLOCK(ap
->a_dvp
, 0, p
);
213 cnp
->cn_flags
|= LOCKPARENT
;
215 upperdvp
= dun
->un_uppervp
;
216 lowerdvp
= dun
->un_lowervp
;
222 * do the lookup in the upper level.
223 * if that level comsumes additional pathnames,
224 * then assume that something special is going
225 * on and just return that vnode.
227 if (upperdvp
!= NULLVP
) {
229 uerror
= union_lookup1(um
->um_uppervp
, &upperdvp
,
231 /*if (uppervp == upperdvp)
232 dun->un_flags |= UN_KLOCK;*/
234 if (cnp
->cn_consume
!= 0) {
235 *ap
->a_vpp
= uppervp
;
237 cnp
->cn_flags
&= ~LOCKPARENT
;
240 if (uerror
== ENOENT
|| uerror
== EJUSTRETURN
) {
241 if (cnp
->cn_flags
& ISWHITEOUT
) {
243 } else if (lowerdvp
!= NULLVP
) {
244 lerror
= VOP_GETATTR(upperdvp
, &va
,
245 cnp
->cn_cred
, cnp
->cn_proc
);
246 if (lerror
== 0 && (va
.va_flags
& OPAQUE
))
255 * in a similar way to the upper layer, do the lookup
256 * in the lower layer. this time, if there is some
257 * component magic going on, then vput whatever we got
258 * back from the upper layer and return the lower vnode
261 if (lowerdvp
!= NULLVP
&& !iswhiteout
) {
264 vn_lock(lowerdvp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
267 * Only do a LOOKUP on the bottom node, since
268 * we won't be making changes to it anyway.
270 nameiop
= cnp
->cn_nameiop
;
271 cnp
->cn_nameiop
= LOOKUP
;
272 if (um
->um_op
== UNMNT_BELOW
) {
273 saved_cred
= cnp
->cn_cred
;
274 cnp
->cn_cred
= um
->um_cred
;
276 lerror
= union_lookup1(um
->um_lowervp
, &lowerdvp
,
278 if (um
->um_op
== UNMNT_BELOW
)
279 cnp
->cn_cred
= saved_cred
;
280 cnp
->cn_nameiop
= nameiop
;
282 if (lowervp
!= lowerdvp
)
283 VOP_UNLOCK(lowerdvp
, 0, p
);
285 if (cnp
->cn_consume
!= 0) {
286 if (uppervp
!= NULLVP
) {
287 if (uppervp
== upperdvp
)
293 *ap
->a_vpp
= lowervp
;
295 cnp
->cn_flags
&= ~LOCKPARENT
;
300 if ((cnp
->cn_flags
& ISDOTDOT
) && dun
->un_pvp
!= NULLVP
) {
301 lowervp
= LOWERVP(dun
->un_pvp
);
302 if (lowervp
!= NULLVP
) {
304 vn_lock(lowervp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
311 cnp
->cn_flags
&= ~LOCKPARENT
;
314 * at this point, we have uerror and lerror indicating
315 * possible errors with the lookups in the upper and lower
316 * layers. additionally, uppervp and lowervp are (locked)
317 * references to existing vnodes in the upper and lower layers.
319 * there are now three cases to consider.
320 * 1. if both layers returned an error, then return whatever
321 * error the upper layer generated.
323 * 2. if the top layer failed and the bottom layer succeeded
324 * then two subcases occur.
325 * a. the bottom vnode is not a directory, in which
326 * case just return a new union vnode referencing
327 * an empty top layer and the existing bottom layer.
328 * b. the bottom vnode is a directory, in which case
329 * create a new directory in the top-level and
330 * continue as in case 3.
332 * 3. if the top layer succeeded then return a new union
333 * vnode referencing whatever the new top layer and
334 * whatever the bottom layer returned.
340 if ((uerror
!= 0) && (lerror
!= 0)) {
345 if (uerror
!= 0 /* && (lerror == 0) */ ) {
346 if (lowervp
->v_type
== VDIR
) { /* case 2b. */
347 dun
->un_flags
&= ~UN_ULOCK
;
348 VOP_UNLOCK(upperdvp
, 0, p
);
349 uerror
= union_mkshadow(um
, upperdvp
, cnp
, &uppervp
);
350 vn_lock(upperdvp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
351 dun
->un_flags
|= UN_ULOCK
;
354 if (lowervp
!= NULLVP
) {
363 if (lowervp
!= NULLVP
)
364 VOP_UNLOCK(lowervp
, 0, p
);
366 error
= union_allocvp(ap
->a_vpp
, dvp
->v_mount
, dvp
, upperdvp
, cnp
,
367 uppervp
, lowervp
, 1);
370 if (uppervp
!= NULLVP
)
372 if (lowervp
!= NULLVP
)
375 if (*ap
->a_vpp
!= dvp
)
376 if (!lockparent
|| !(cnp
->cn_flags
& ISLASTCN
))
377 VOP_UNLOCK(dvp
, 0, p
);
385 struct vop_create_args
/* {
387 struct vnode **a_vpp;
388 struct componentname *a_cnp;
392 struct union_node
*un
= VTOUNION(ap
->a_dvp
);
393 struct vnode
*dvp
= un
->un_uppervp
;
394 struct componentname
*cnp
= ap
->a_cnp
;
395 struct proc
*p
= cnp
->cn_proc
;
405 un
->un_flags
|= UN_KLOCK
;
406 mp
= ap
->a_dvp
->v_mount
;
408 error
= VOP_CREATE(dvp
, &vp
, cnp
, ap
->a_vap
);
412 error
= union_allocvp(ap
->a_vpp
, mp
, NULLVP
, NULLVP
, cnp
, vp
,
425 struct vop_whiteout_args
/* {
427 struct componentname *a_cnp;
431 struct union_node
*un
= VTOUNION(ap
->a_dvp
);
432 struct componentname
*cnp
= ap
->a_cnp
;
433 struct proc
*p
= cnp
->cn_proc
;
435 if (un
->un_uppervp
== NULLVP
)
439 return (VOP_WHITEOUT(un
->un_uppervp
, cnp
, ap
->a_flags
));
444 struct vop_mknod_args
/* {
446 struct vnode **a_vpp;
447 struct componentname *a_cnp;
451 struct union_node
*un
= VTOUNION(ap
->a_dvp
);
452 struct vnode
*dvp
= un
->un_uppervp
;
453 struct componentname
*cnp
= ap
->a_cnp
;
454 struct proc
*p
= cnp
->cn_proc
;
464 un
->un_flags
|= UN_KLOCK
;
465 mp
= ap
->a_dvp
->v_mount
;
467 error
= VOP_MKNOD(dvp
, &vp
, cnp
, ap
->a_vap
);
472 error
= union_allocvp(ap
->a_vpp
, mp
, NULLVP
, NULLVP
,
486 struct vop_open_args
/* {
487 struct vnodeop_desc *a_desc;
490 struct ucred *a_cred;
494 struct union_node
*un
= VTOUNION(ap
->a_vp
);
496 int mode
= ap
->a_mode
;
497 struct ucred
*cred
= ap
->a_cred
;
498 struct proc
*p
= ap
->a_p
;
502 * If there is an existing upper vp then simply open that.
504 tvp
= un
->un_uppervp
;
507 * If the lower vnode is being opened for writing, then
508 * copy the file contents to the upper vnode and open that,
509 * otherwise can simply open the lower vnode.
511 tvp
= un
->un_lowervp
;
512 if ((ap
->a_mode
& FWRITE
) && (tvp
->v_type
== VREG
)) {
513 error
= union_copyup(un
, (mode
&O_TRUNC
) == 0, cred
, p
);
515 error
= VOP_OPEN(un
->un_uppervp
, mode
, cred
, p
);
520 * Just open the lower vnode
523 vn_lock(tvp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
524 error
= VOP_OPEN(tvp
, mode
, cred
, p
);
525 VOP_UNLOCK(tvp
, 0, p
);
532 error
= VOP_OPEN(tvp
, mode
, cred
, p
);
539 struct vop_close_args
/* {
542 struct ucred *a_cred;
546 struct union_node
*un
= VTOUNION(ap
->a_vp
);
549 if ((vp
= un
->un_uppervp
) == NULLVP
) {
550 #ifdef UNION_DIAGNOSTIC
551 if (un
->un_openl
<= 0)
552 panic("union: un_openl cnt");
559 return (VCALL(vp
, VOFFSET(vop_close
), ap
));
563 * Check access permission on the union vnode.
564 * The access check being enforced is to check
565 * against both the underlying vnode, and any
566 * copied vnode. This ensures that no additional
567 * file permissions are given away simply because
568 * the user caused an implicit file copy.
572 struct vop_access_args
/* {
573 struct vnodeop_desc *a_desc;
576 struct ucred *a_cred;
580 struct union_node
*un
= VTOUNION(ap
->a_vp
);
581 struct proc
*p
= ap
->a_p
;
585 if ((vp
= un
->un_uppervp
) != NULLVP
) {
588 return (VCALL(vp
, VOFFSET(vop_access
), ap
));
591 if ((vp
= un
->un_lowervp
) != NULLVP
) {
592 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
594 error
= VCALL(vp
, VOFFSET(vop_access
), ap
);
596 struct union_mount
*um
= MOUNTTOUNIONMOUNT(vp
->v_mount
);
598 if (um
->um_op
== UNMNT_BELOW
) {
599 ap
->a_cred
= um
->um_cred
;
600 error
= VCALL(vp
, VOFFSET(vop_access
), ap
);
603 VOP_UNLOCK(vp
, 0, p
);
612 * We handle getattr only to change the fsid and
617 struct vop_getattr_args
/* {
620 struct ucred *a_cred;
625 struct union_node
*un
= VTOUNION(ap
->a_vp
);
626 struct vnode
*vp
= un
->un_uppervp
;
627 struct proc
*p
= ap
->a_p
;
633 * Some programs walk the filesystem hierarchy by counting
634 * links to directories to avoid stat'ing all the time.
635 * This means the link count on directories needs to be "correct".
636 * The only way to do that is to call getattr on both layers
637 * and fix up the link count. The link count will not necessarily
638 * be accurate but will be large enough to defeat the tree walkers.
646 * It's not clear whether VOP_GETATTR is to be
647 * called with the vnode locked or not. stat() calls
648 * it with (vp) locked, and fstat calls it with
650 * In the mean time, compensate here by checking
651 * the union_node's lock flag.
653 if (un
->un_flags
& UN_LOCKED
)
656 error
= VOP_GETATTR(vp
, vap
, ap
->a_cred
, ap
->a_p
);
659 union_newsize(ap
->a_vp
, vap
->va_size
, VNOVAL
);
664 } else if (vp
->v_type
== VDIR
) {
672 error
= VOP_GETATTR(vp
, vap
, ap
->a_cred
, ap
->a_p
);
675 union_newsize(ap
->a_vp
, VNOVAL
, vap
->va_size
);
678 if ((vap
!= ap
->a_vap
) && (vap
->va_type
== VDIR
))
679 ap
->a_vap
->va_nlink
+= vap
->va_nlink
;
681 ap
->a_vap
->va_fsid
= ap
->a_vp
->v_mount
->mnt_stat
.f_fsid
.val
[0];
687 struct vop_setattr_args
/* {
690 struct ucred *a_cred;
694 struct union_node
*un
= VTOUNION(ap
->a_vp
);
695 struct proc
*p
= ap
->a_p
;
699 * Handle case of truncating lower object to zero size,
700 * by creating a zero length upper object. This is to
701 * handle the case of open with O_TRUNC and O_CREAT.
703 if ((un
->un_uppervp
== NULLVP
) &&
704 /* assert(un->un_lowervp != NULLVP) */
705 (un
->un_lowervp
->v_type
== VREG
)) {
706 error
= union_copyup(un
, (ap
->a_vap
->va_size
!= 0),
707 ap
->a_cred
, ap
->a_p
);
713 * Try to set attributes in upper layer,
714 * otherwise return read-only filesystem error.
716 if (un
->un_uppervp
!= NULLVP
) {
718 error
= VOP_SETATTR(un
->un_uppervp
, ap
->a_vap
,
719 ap
->a_cred
, ap
->a_p
);
720 if ((error
== 0) && (ap
->a_vap
->va_size
!= VNOVAL
))
721 union_newsize(ap
->a_vp
, ap
->a_vap
->va_size
, VNOVAL
);
731 struct vop_read_args
/* {
735 struct ucred *a_cred;
739 struct proc
*p
= ap
->a_uio
->uio_procp
;
740 struct vnode
*vp
= OTHERVP(ap
->a_vp
);
741 int dolock
= (vp
== LOWERVP(ap
->a_vp
));
744 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
746 FIXUP(VTOUNION(ap
->a_vp
), p
);
747 error
= VOP_READ(vp
, ap
->a_uio
, ap
->a_ioflag
, ap
->a_cred
);
749 VOP_UNLOCK(vp
, 0, p
);
753 * perhaps the size of the underlying object has changed under
754 * our feet. take advantage of the offset information present
755 * in the uio structure.
758 struct union_node
*un
= VTOUNION(ap
->a_vp
);
759 off_t cur
= ap
->a_uio
->uio_offset
;
761 if (vp
== un
->un_uppervp
) {
762 if (cur
> un
->un_uppersz
)
763 union_newsize(ap
->a_vp
, cur
, VNOVAL
);
765 if (cur
> un
->un_lowersz
)
766 union_newsize(ap
->a_vp
, VNOVAL
, cur
);
775 struct vop_read_args
/* {
779 struct ucred *a_cred;
784 struct union_node
*un
= VTOUNION(ap
->a_vp
);
785 struct proc
*p
= ap
->a_uio
->uio_procp
;
787 vp
= UPPERVP(ap
->a_vp
);
789 panic("union: missing upper layer in write");
792 error
= VOP_WRITE(vp
, ap
->a_uio
, ap
->a_ioflag
, ap
->a_cred
);
795 * the size of the underlying object may be changed by the
799 off_t cur
= ap
->a_uio
->uio_offset
;
801 if (cur
> un
->un_uppersz
)
802 union_newsize(ap
->a_vp
, cur
, VNOVAL
);
809 struct vop_lease_args
/* {
812 struct ucred *a_cred;
816 register struct vnode
*ovp
= OTHERVP(ap
->a_vp
);
819 return (VCALL(ovp
, VOFFSET(vop_lease
), ap
));
824 struct vop_ioctl_args
/* {
829 struct ucred *a_cred;
833 register struct vnode
*ovp
= OTHERVP(ap
->a_vp
);
836 return (VCALL(ovp
, VOFFSET(vop_ioctl
), ap
));
841 struct vop_select_args
/* {
845 struct ucred *a_cred;
849 register struct vnode
*ovp
= OTHERVP(ap
->a_vp
);
852 return (VCALL(ovp
, VOFFSET(vop_select
), ap
));
857 struct vop_revoke_args
/* {
863 struct vnode
*vp
= ap
->a_vp
;
866 VOP_REVOKE(UPPERVP(vp
), ap
->a_flags
);
868 VOP_REVOKE(LOWERVP(vp
), ap
->a_flags
);
874 struct vop_mmap_args
/* {
877 struct ucred *a_cred;
881 register struct vnode
*ovp
= OTHERVP(ap
->a_vp
);
884 return (VCALL(ovp
, VOFFSET(vop_mmap
), ap
));
889 struct vop_fsync_args
/* {
891 struct ucred *a_cred;
897 struct proc
*p
= ap
->a_p
;
898 struct vnode
*targetvp
= OTHERVP(ap
->a_vp
);
900 if (targetvp
!= NULLVP
) {
901 int dolock
= (targetvp
== LOWERVP(ap
->a_vp
));
904 vn_lock(targetvp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
906 FIXUP(VTOUNION(ap
->a_vp
), p
);
907 error
= VOP_FSYNC(targetvp
, ap
->a_cred
, ap
->a_waitfor
, p
);
909 VOP_UNLOCK(targetvp
, 0, p
);
917 struct vop_seek_args
/* {
921 struct ucred *a_cred;
924 register struct vnode
*ovp
= OTHERVP(ap
->a_vp
);
927 return (VCALL(ovp
, VOFFSET(vop_seek
), ap
));
932 struct vop_remove_args
/* {
935 struct componentname *a_cnp;
939 struct union_node
*dun
= VTOUNION(ap
->a_dvp
);
940 struct union_node
*un
= VTOUNION(ap
->a_vp
);
941 struct componentname
*cnp
= ap
->a_cnp
;
942 struct proc
*p
= cnp
->cn_proc
;
944 if (dun
->un_uppervp
== NULLVP
)
945 panic("union remove: null upper vnode");
947 if (un
->un_uppervp
!= NULLVP
) {
948 struct vnode
*dvp
= dun
->un_uppervp
;
949 struct vnode
*vp
= un
->un_uppervp
;
953 dun
->un_flags
|= UN_KLOCK
;
957 un
->un_flags
|= UN_KLOCK
;
960 if (union_dowhiteout(un
, cnp
->cn_cred
, cnp
->cn_proc
))
961 cnp
->cn_flags
|= DOWHITEOUT
;
962 error
= VOP_REMOVE(dvp
, vp
, cnp
);
964 union_removed_upper(un
);
967 error
= union_mkwhiteout(
968 MOUNTTOUNIONMOUNT(UNIONTOV(dun
)->v_mount
),
969 dun
->un_uppervp
, ap
->a_cnp
, un
->un_path
);
979 struct vop_link_args
/* {
981 struct vnode *a_tdvp;
982 struct componentname *a_cnp;
986 struct componentname
*cnp
= ap
->a_cnp
;
987 struct proc
*p
= cnp
->cn_proc
;
988 struct union_node
*un
;
992 un
= VTOUNION(ap
->a_tdvp
);
994 if (ap
->a_tdvp
->v_op
!= ap
->a_vp
->v_op
) {
997 struct union_node
*tun
= VTOUNION(ap
->a_vp
);
998 if (tun
->un_uppervp
== NULLVP
) {
999 vn_lock(ap
->a_vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1000 if (un
->un_uppervp
== tun
->un_dirvp
) {
1001 un
->un_flags
&= ~UN_ULOCK
;
1002 VOP_UNLOCK(un
->un_uppervp
, 0, p
);
1004 error
= union_copyup(tun
, 1, cnp
->cn_cred
, p
);
1005 if (un
->un_uppervp
== tun
->un_dirvp
) {
1006 vn_lock(un
->un_uppervp
,
1007 LK_EXCLUSIVE
| LK_RETRY
, p
);
1008 un
->un_flags
|= UN_ULOCK
;
1010 VOP_UNLOCK(ap
->a_vp
, 0, p
);
1012 vp
= tun
->un_uppervp
;
1015 tdvp
= un
->un_uppervp
;
1026 un
->un_flags
|= UN_KLOCK
;
1029 return (VOP_LINK(vp
, tdvp
, cnp
));
1034 struct vop_rename_args
/* {
1035 struct vnode *a_fdvp;
1036 struct vnode *a_fvp;
1037 struct componentname *a_fcnp;
1038 struct vnode *a_tdvp;
1039 struct vnode *a_tvp;
1040 struct componentname *a_tcnp;
1045 struct vnode
*fdvp
= ap
->a_fdvp
;
1046 struct vnode
*fvp
= ap
->a_fvp
;
1047 struct vnode
*tdvp
= ap
->a_tdvp
;
1048 struct vnode
*tvp
= ap
->a_tvp
;
1050 if (fdvp
->v_op
== union_vnodeop_p
) { /* always true */
1051 struct union_node
*un
= VTOUNION(fdvp
);
1052 if (un
->un_uppervp
== NULLVP
) {
1054 * this should never happen in normal
1055 * operation but might if there was
1056 * a problem creating the top-level shadow
1063 fdvp
= un
->un_uppervp
;
1068 if (fvp
->v_op
== union_vnodeop_p
) { /* always true */
1069 struct union_node
*un
= VTOUNION(fvp
);
1070 if (un
->un_uppervp
== NULLVP
) {
1071 /* XXX: should do a copyup */
1076 if (un
->un_lowervp
!= NULLVP
)
1077 ap
->a_fcnp
->cn_flags
|= DOWHITEOUT
;
1079 fvp
= un
->un_uppervp
;
1084 if (tdvp
->v_op
== union_vnodeop_p
) {
1085 struct union_node
*un
= VTOUNION(tdvp
);
1086 if (un
->un_uppervp
== NULLVP
) {
1088 * this should never happen in normal
1089 * operation but might if there was
1090 * a problem creating the top-level shadow
1097 tdvp
= un
->un_uppervp
;
1099 un
->un_flags
|= UN_KLOCK
;
1103 if (tvp
!= NULLVP
&& tvp
->v_op
== union_vnodeop_p
) {
1104 struct union_node
*un
= VTOUNION(tvp
);
1106 tvp
= un
->un_uppervp
;
1107 if (tvp
!= NULLVP
) {
1109 un
->un_flags
|= UN_KLOCK
;
1114 return (VOP_RENAME(fdvp
, fvp
, ap
->a_fcnp
, tdvp
, tvp
, ap
->a_tcnp
));
1128 struct vop_mkdir_args
/* {
1129 struct vnode *a_dvp;
1130 struct vnode **a_vpp;
1131 struct componentname *a_cnp;
1132 struct vattr *a_vap;
1135 struct union_node
*un
= VTOUNION(ap
->a_dvp
);
1136 struct vnode
*dvp
= un
->un_uppervp
;
1137 struct componentname
*cnp
= ap
->a_cnp
;
1138 struct proc
*p
= cnp
->cn_proc
;
1140 if (dvp
!= NULLVP
) {
1146 un
->un_flags
|= UN_KLOCK
;
1147 VOP_UNLOCK(ap
->a_dvp
, 0, p
);
1148 error
= VOP_MKDIR(dvp
, &vp
, cnp
, ap
->a_vap
);
1154 error
= union_allocvp(ap
->a_vpp
, ap
->a_dvp
->v_mount
, ap
->a_dvp
,
1155 NULLVP
, cnp
, vp
, NULLVP
, 1);
1168 struct vop_rmdir_args
/* {
1169 struct vnode *a_dvp;
1171 struct componentname *a_cnp;
1175 struct union_node
*dun
= VTOUNION(ap
->a_dvp
);
1176 struct union_node
*un
= VTOUNION(ap
->a_vp
);
1177 struct componentname
*cnp
= ap
->a_cnp
;
1178 struct proc
*p
= cnp
->cn_proc
;
1180 if (dun
->un_uppervp
== NULLVP
)
1181 panic("union rmdir: null upper vnode");
1183 if (un
->un_uppervp
!= NULLVP
) {
1184 struct vnode
*dvp
= dun
->un_uppervp
;
1185 struct vnode
*vp
= un
->un_uppervp
;
1189 dun
->un_flags
|= UN_KLOCK
;
1193 un
->un_flags
|= UN_KLOCK
;
1196 if (union_dowhiteout(un
, cnp
->cn_cred
, cnp
->cn_proc
))
1197 cnp
->cn_flags
|= DOWHITEOUT
;
1198 error
= VOP_RMDIR(dvp
, vp
, ap
->a_cnp
);
1200 union_removed_upper(un
);
1203 error
= union_mkwhiteout(
1204 MOUNTTOUNIONMOUNT(UNIONTOV(dun
)->v_mount
),
1205 dun
->un_uppervp
, ap
->a_cnp
, un
->un_path
);
1215 struct vop_symlink_args
/* {
1216 struct vnode *a_dvp;
1217 struct vnode **a_vpp;
1218 struct componentname *a_cnp;
1219 struct vattr *a_vap;
1223 struct union_node
*un
= VTOUNION(ap
->a_dvp
);
1224 struct vnode
*dvp
= un
->un_uppervp
;
1225 struct componentname
*cnp
= ap
->a_cnp
;
1226 struct proc
*p
= cnp
->cn_proc
;
1228 if (dvp
!= NULLVP
) {
1231 struct mount
*mp
= ap
->a_dvp
->v_mount
;
1235 un
->un_flags
|= UN_KLOCK
;
1237 error
= VOP_SYMLINK(dvp
, &vp
, cnp
, ap
->a_vap
, ap
->a_target
);
1238 *ap
->a_vpp
= NULLVP
;
1247 * union_readdir works in concert with getdirentries and
1248 * readdir(3) to provide a list of entries in the unioned
1249 * directories. getdirentries is responsible for walking
1250 * down the union stack. readdir(3) is responsible for
1251 * eliminating duplicate names from the returned data stream.
1255 struct vop_readdir_args
/* {
1256 struct vnodeop_desc *a_desc;
1259 struct ucred *a_cred;
1265 struct union_node
*un
= VTOUNION(ap
->a_vp
);
1266 struct vnode
*uvp
= un
->un_uppervp
;
1267 struct proc
*p
= ap
->a_uio
->uio_procp
;
1274 return (VCALL(uvp
, VOFFSET(vop_readdir
), ap
));
1279 struct vop_readlink_args
/* {
1282 struct ucred *a_cred;
1286 struct uio
*uio
= ap
->a_uio
;
1287 struct proc
*p
= uio
->uio_procp
;
1288 struct vnode
*vp
= OTHERVP(ap
->a_vp
);
1289 int dolock
= (vp
== LOWERVP(ap
->a_vp
));
1292 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1294 FIXUP(VTOUNION(ap
->a_vp
), p
);
1296 error
= VCALL(vp
, VOFFSET(vop_readlink
), ap
);
1298 VOP_UNLOCK(vp
, 0, p
);
1305 struct vop_abortop_args
/* {
1306 struct vnode *a_dvp;
1307 struct componentname *a_cnp;
1311 struct componentname
*cnp
= ap
->a_cnp
;
1312 struct proc
*p
= cnp
->cn_proc
;
1313 struct vnode
*vp
= OTHERVP(ap
->a_dvp
);
1314 struct union_node
*un
= VTOUNION(ap
->a_dvp
);
1315 int islocked
= un
->un_flags
& UN_LOCKED
;
1316 int dolock
= (vp
== LOWERVP(ap
->a_dvp
));
1320 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1322 FIXUP(VTOUNION(ap
->a_dvp
), p
);
1325 error
= VCALL(vp
, VOFFSET(vop_abortop
), ap
);
1326 if (islocked
&& dolock
)
1327 VOP_UNLOCK(vp
, 0, p
);
1334 struct vop_inactive_args
/* {
1339 struct vnode
*vp
= ap
->a_vp
;
1340 struct proc
*p
= ap
->a_p
;
1341 struct union_node
*un
= VTOUNION(vp
);
1345 * Do nothing (and _don't_ bypass).
1346 * Wait to vrele lowervp until reclaim,
1347 * so that until then our union_node is in the
1348 * cache and reusable.
1350 * NEEDSWORK: Someday, consider inactive'ing
1351 * the lowervp and then trying to reactivate it
1352 * with capabilities (v_id)
1353 * like they do in the name lookup cache code.
1354 * That's too much work for now.
1357 if (un
->un_dircache
!= 0) {
1358 for (vpp
= un
->un_dircache
; *vpp
!= NULLVP
; vpp
++)
1360 _FREE(un
->un_dircache
, M_TEMP
);
1361 un
->un_dircache
= 0;
1364 VOP_UNLOCK(vp
, 0, p
);
1366 if ((un
->un_flags
& UN_CACHED
) == 0)
1374 struct vop_reclaim_args
/* {
1379 union_freevp(ap
->a_vp
);
1386 struct vop_lock_args
*ap
;
1388 struct vnode
*vp
= ap
->a_vp
;
1389 struct proc
*p
= ap
->a_p
;
1390 int flags
= ap
->a_flags
;
1391 struct union_node
*un
;
1397 * Need to do real lockmgr-style locking here.
1398 * in the mean time, draining won't work quite right,
1399 * which could lead to a few race conditions.
1400 * the following test was here, but is not quite right, we
1401 * still need to take the lock:
1402 if ((flags & LK_TYPE_MASK) == LK_DRAIN)
1405 flags
&= ~LK_INTERLOCK
;
1410 if (un
->un_uppervp
!= NULLVP
) {
1411 if (((un
->un_flags
& UN_ULOCK
) == 0) &&
1412 (vp
->v_usecount
!= 0)) {
1413 error
= vn_lock(un
->un_uppervp
, flags
, p
);
1416 un
->un_flags
|= UN_ULOCK
;
1419 if (un
->un_flags
& UN_KLOCK
) {
1420 vprint("union: dangling klock", vp
);
1421 panic("union: dangling upper lock (%lx)", vp
);
1426 if (un
->un_flags
& UN_LOCKED
) {
1428 if (current_proc() && un
->un_pid
== current_proc()->p_pid
&&
1429 un
->un_pid
> -1 && current_proc()->p_pid
> -1)
1430 panic("union: locking against myself");
1432 un
->un_flags
|= UN_WANT
;
1433 tsleep((caddr_t
)&un
->un_flags
, PINOD
, "unionlk2", 0);
1439 un
->un_pid
= current_proc()->p_pid
;
1444 un
->un_flags
|= UN_LOCKED
;
1449 * When operations want to vput() a union node yet retain a lock on
1450 * the upper vnode (say, to do some further operations like link(),
1451 * mkdir(), ...), they set UN_KLOCK on the union node, then call
1452 * vput() which calls VOP_UNLOCK() and comes here. union_unlock()
1453 * unlocks the union node (leaving the upper vnode alone), clears the
1454 * KLOCK flag, and then returns to vput(). The caller then does whatever
1455 * is left to do with the upper vnode, and ensures that it gets unlocked.
1457 * If UN_KLOCK isn't set, then the upper vnode is unlocked here.
1461 struct vop_unlock_args
/* {
1467 struct union_node
*un
= VTOUNION(ap
->a_vp
);
1468 struct proc
*p
= ap
->a_p
;
1471 if ((un
->un_flags
& UN_LOCKED
) == 0)
1472 panic("union: unlock unlocked node");
1473 if (current_proc() && un
->un_pid
!= current_proc()->p_pid
&&
1474 current_proc()->p_pid
> -1 && un
->un_pid
> -1)
1475 panic("union: unlocking other process's union node");
1478 un
->un_flags
&= ~UN_LOCKED
;
1480 if ((un
->un_flags
& (UN_ULOCK
|UN_KLOCK
)) == UN_ULOCK
)
1481 VOP_UNLOCK(un
->un_uppervp
, 0, p
);
1483 un
->un_flags
&= ~(UN_ULOCK
|UN_KLOCK
);
1485 if (un
->un_flags
& UN_WANT
) {
1486 un
->un_flags
&= ~UN_WANT
;
1487 wakeup((caddr_t
) &un
->un_flags
);
1500 struct vop_bmap_args
/* {
1503 struct vnode **a_vpp;
1509 struct proc
*p
= current_proc(); /* XXX */
1510 struct vnode
*vp
= OTHERVP(ap
->a_vp
);
1511 int dolock
= (vp
== LOWERVP(ap
->a_vp
));
1514 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1516 FIXUP(VTOUNION(ap
->a_vp
), p
);
1518 error
= VCALL(vp
, VOFFSET(vop_bmap
), ap
);
1520 VOP_UNLOCK(vp
, 0, p
);
1527 struct vop_cmap_args
/* {
1537 struct proc
*p
= current_proc(); /* XXX */
1538 struct vnode
*vp
= OTHERVP(ap
->a_vp
);
1539 int dolock
= (vp
== LOWERVP(ap
->a_vp
));
1542 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1544 FIXUP(VTOUNION(ap
->a_vp
), p
);
1546 error
= VCALL(vp
, VOFFSET(vop_cmap
), ap
);
1548 VOP_UNLOCK(vp
, 0, p
);
1555 struct vop_print_args
/* {
1559 struct vnode
*vp
= ap
->a_vp
;
1561 printf("\ttag VT_UNION, vp=%x, uppervp=%x, lowervp=%x\n",
1562 vp
, UPPERVP(vp
), LOWERVP(vp
));
1563 if (UPPERVP(vp
) != NULLVP
)
1564 vprint("union: upper", UPPERVP(vp
));
1565 if (LOWERVP(vp
) != NULLVP
)
1566 vprint("union: lower", LOWERVP(vp
));
1573 struct vop_islocked_args
/* {
1578 return ((VTOUNION(ap
->a_vp
)->un_flags
& UN_LOCKED
) ? 1 : 0);
1583 struct vop_pathconf_args
/* {
1590 struct proc
*p
= current_proc(); /* XXX */
1591 struct vnode
*vp
= OTHERVP(ap
->a_vp
);
1592 int dolock
= (vp
== LOWERVP(ap
->a_vp
));
1595 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1597 FIXUP(VTOUNION(ap
->a_vp
), p
);
1599 error
= VCALL(vp
, VOFFSET(vop_pathconf
), ap
);
1601 VOP_UNLOCK(vp
, 0, p
);
1608 struct vop_advlock_args
/* {
1616 register struct vnode
*ovp
= OTHERVP(ap
->a_vp
);
1619 return (VCALL(ovp
, VOFFSET(vop_advlock
), ap
));
1624 * XXX - vop_strategy must be hand coded because it has no
1625 * vnode in its arguments.
1626 * This goes away with a merged VM/buffer cache.
1630 struct vop_strategy_args
/* {
1634 struct buf
*bp
= ap
->a_bp
;
1636 struct vnode
*savedvp
;
1639 bp
->b_vp
= OTHERVP(bp
->b_vp
);
1642 if (bp
->b_vp
== NULLVP
)
1643 panic("union_strategy: nil vp");
1644 if (((bp
->b_flags
& B_READ
) == 0) &&
1645 (bp
->b_vp
== LOWERVP(savedvp
)))
1646 panic("union_strategy: writing to lowervp");
1649 error
= VOP_STRATEGY(bp
);
1657 struct vop_pagein_args
/* {
1660 vm_offset_t a_pl_offset,
1663 struct ucred *a_cred,
1668 struct proc
*p
= current_proc();
1669 struct vnode
*vp
= OTHERVP(ap
->a_vp
);
1671 error
= VOP_PAGEIN(vp
, ap
->a_pl
, ap
->a_pl_offset
, ap
->a_f_offset
,
1672 ap
->a_size
, ap
->a_cred
,ap
->a_flags
);
1676 * perhaps the size of the underlying object has changed under
1677 * our feet. take advantage of the offset information present
1678 * in the uio structure.
1681 struct union_node
*un
= VTOUNION(ap
->a_vp
);
1682 off_t cur
= ap
->a_f_offset
+ (off_t
)ap
->a_pl_offset
;
1684 if (vp
== un
->un_uppervp
) {
1685 if (cur
> un
->un_uppersz
)
1686 union_newsize(ap
->a_vp
, cur
, VNOVAL
);
1688 if (cur
> un
->un_lowersz
)
1689 union_newsize(ap
->a_vp
, VNOVAL
, cur
);
1698 struct vop_pageout_args
/* {
1701 vm_offset_t a_pl_offset,
1704 struct ucred *a_cred,
1710 struct union_node
*un
= VTOUNION(ap
->a_vp
);
1712 vp
= UPPERVP(ap
->a_vp
);
1714 panic("union: missing upper layer in pageout");
1716 error
= VOP_PAGEOUT(vp
, ap
->a_pl
, ap
->a_pl_offset
, ap
->a_f_offset
,
1717 ap
->a_size
, ap
->a_cred
,ap
->a_flags
);
1720 * the size of the underlying object may be changed by the
1724 off_t cur
= ap
->a_f_offset
+ (off_t
)ap
->a_pl_offset
;
1726 if (cur
> un
->un_uppersz
)
1727 union_newsize(ap
->a_vp
, cur
, VNOVAL
);
1733 /* Blktooff derives file offset for the given logical block number */
1736 struct vop_blktooff_args
/* {
1743 struct vnode
*vp
= OTHERVP(ap
->a_vp
);
1745 error
= VOP_BLKTOOFF(vp
, ap
->a_lblkno
, ap
->a_offset
);
1750 /* offtoblk derives file offset for the given logical block number */
1753 struct vop_offtoblk_args
/* {
1760 struct vnode
*vp
= OTHERVP(ap
->a_vp
);
1762 error
= VOP_OFFTOBLK(vp
, ap
->a_offset
, ap
->a_lblkno
);
1767 #define VOPFUNC int (*)(void *)
1770 * Global vfs data structures
1772 int (**union_vnodeop_p
)(void *);
1773 struct vnodeopv_entry_desc union_vnodeop_entries
[] = {
1774 { &vop_default_desc
, (VOPFUNC
)vn_default_error
},
1775 { &vop_lookup_desc
, (VOPFUNC
)union_lookup
}, /* lookup */
1776 { &vop_create_desc
, (VOPFUNC
)union_create
}, /* create */
1777 { &vop_whiteout_desc
, (VOPFUNC
)union_whiteout
}, /* whiteout */
1778 { &vop_mknod_desc
, (VOPFUNC
)union_mknod
}, /* mknod */
1779 { &vop_open_desc
, (VOPFUNC
)union_open
}, /* open */
1780 { &vop_close_desc
, (VOPFUNC
)union_close
}, /* close */
1781 { &vop_access_desc
, (VOPFUNC
)union_access
}, /* access */
1782 { &vop_getattr_desc
, (VOPFUNC
)union_getattr
}, /* getattr */
1783 { &vop_setattr_desc
, (VOPFUNC
)union_setattr
}, /* setattr */
1784 { &vop_read_desc
, (VOPFUNC
)union_read
}, /* read */
1785 { &vop_write_desc
, (VOPFUNC
)union_write
}, /* write */
1786 { &vop_lease_desc
, (VOPFUNC
)union_lease
}, /* lease */
1787 { &vop_ioctl_desc
, (VOPFUNC
)union_ioctl
}, /* ioctl */
1788 { &vop_select_desc
, (VOPFUNC
)union_select
}, /* select */
1789 { &vop_revoke_desc
, (VOPFUNC
)union_revoke
}, /* revoke */
1790 { &vop_mmap_desc
, (VOPFUNC
)union_mmap
}, /* mmap */
1791 { &vop_fsync_desc
, (VOPFUNC
)union_fsync
}, /* fsync */
1792 { &vop_seek_desc
, (VOPFUNC
)union_seek
}, /* seek */
1793 { &vop_remove_desc
, (VOPFUNC
)union_remove
}, /* remove */
1794 { &vop_link_desc
, (VOPFUNC
)union_link
}, /* link */
1795 { &vop_rename_desc
, (VOPFUNC
)union_rename
}, /* rename */
1796 { &vop_mkdir_desc
, (VOPFUNC
)union_mkdir
}, /* mkdir */
1797 { &vop_rmdir_desc
, (VOPFUNC
)union_rmdir
}, /* rmdir */
1798 { &vop_symlink_desc
, (VOPFUNC
)union_symlink
}, /* symlink */
1799 { &vop_readdir_desc
, (VOPFUNC
)union_readdir
}, /* readdir */
1800 { &vop_readlink_desc
, (VOPFUNC
)union_readlink
}, /* readlink */
1801 { &vop_abortop_desc
, (VOPFUNC
)union_abortop
}, /* abortop */
1802 { &vop_inactive_desc
, (VOPFUNC
)union_inactive
}, /* inactive */
1803 { &vop_reclaim_desc
, (VOPFUNC
)union_reclaim
}, /* reclaim */
1804 { &vop_lock_desc
, (VOPFUNC
)union_lock
}, /* lock */
1805 { &vop_unlock_desc
, (VOPFUNC
)union_unlock
}, /* unlock */
1806 { &vop_bmap_desc
, (VOPFUNC
)union_bmap
}, /* bmap */
1807 { &vop_strategy_desc
, (VOPFUNC
)union_strategy
}, /* strategy */
1808 { &vop_print_desc
, (VOPFUNC
)union_print
}, /* print */
1809 { &vop_islocked_desc
, (VOPFUNC
)union_islocked
}, /* islocked */
1810 { &vop_pathconf_desc
, (VOPFUNC
)union_pathconf
}, /* pathconf */
1811 { &vop_advlock_desc
, (VOPFUNC
)union_advlock
}, /* advlock */
1813 { &vop_blkatoff_desc
, (VOPFUNC
)union_blkatoff
}, /* blkatoff */
1814 { &vop_valloc_desc
, (VOPFUNC
)union_valloc
}, /* valloc */
1815 { &vop_vfree_desc
, (VOPFUNC
)union_vfree
}, /* vfree */
1816 { &vop_truncate_desc
, (VOPFUNC
)union_truncate
}, /* truncate */
1817 { &vop_update_desc
, (VOPFUNC
)union_update
}, /* update */
1818 { &vop_bwrite_desc
, (VOPFUNC
)union_bwrite
}, /* bwrite */
1820 { &vop_pagein_desc
, (VOPFUNC
)union_pagein
}, /* Pagein */
1821 { &vop_pageout_desc
, (VOPFUNC
)union_pageout
}, /* Pageout */
1822 { &vop_copyfile_desc
, (VOPFUNC
)err_copyfile
}, /* Copyfile */
1823 { &vop_blktooff_desc
, (VOPFUNC
)union_blktooff
}, /* blktooff */
1824 { &vop_offtoblk_desc
, (VOPFUNC
)union_offtoblk
}, /* offtoblk */
1825 { &vop_cmap_desc
, (VOPFUNC
)union_cmap
}, /* cmap */
1826 { (struct vnodeop_desc
*)NULL
, (int(*)())NULL
}
1828 struct vnodeopv_desc union_vnodeop_opv_desc
=
1829 { &union_vnodeop_p
, union_vnodeop_entries
};