]>
git.saurik.com Git - apple/xnu.git/blob - bsd/miscfs/union/union_subr.c
8f5ce12ace4cd7c6f438a7af77186e4b06afaf19
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1994 Jan-Simon Pendry
26 * The Regents of the University of California. All rights reserved.
28 * This code is derived from software contributed to Berkeley by
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 * This product includes software developed by the University of
42 * California, Berkeley and its contributors.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * @(#)union_subr.c 8.20 (Berkeley) 5/20/95
62 #include <sys/param.h>
63 #include <sys/systm.h>
66 #include <sys/kernel.h>
67 #include <sys/vnode.h>
68 #include <sys/namei.h>
69 #include <sys/malloc.h>
71 #include <sys/filedesc.h>
72 #include <sys/queue.h>
73 #include <sys/mount.h>
76 #include <miscfs/union/union.h>
82 /* must be power of two, otherwise change UNION_HASH() */
85 /* unsigned int ... */
86 #define UNION_HASH(u, l) \
87 (((((unsigned long) (u)) + ((unsigned long) l)) >> 8) & (NHASH-1))
89 static LIST_HEAD(unhead
, union_node
) unhead
[NHASH
];
90 static int unvplock
[NHASH
];
97 for (i
= 0; i
< NHASH
; i
++)
98 LIST_INIT(&unhead
[i
]);
99 bzero((caddr_t
) unvplock
, sizeof(unvplock
));
107 if (unvplock
[ix
] & UN_LOCKED
) {
108 unvplock
[ix
] |= UN_WANT
;
109 sleep((caddr_t
) &unvplock
[ix
], PINOD
);
113 unvplock
[ix
] |= UN_LOCKED
;
119 union_list_unlock(ix
)
123 unvplock
[ix
] &= ~UN_LOCKED
;
125 if (unvplock
[ix
] & UN_WANT
) {
126 unvplock
[ix
] &= ~UN_WANT
;
127 wakeup((caddr_t
) &unvplock
[ix
]);
132 union_updatevp(un
, uppervp
, lowervp
)
133 struct union_node
*un
;
134 struct vnode
*uppervp
;
135 struct vnode
*lowervp
;
137 int ohash
= UNION_HASH(un
->un_uppervp
, un
->un_lowervp
);
138 int nhash
= UNION_HASH(uppervp
, lowervp
);
139 int docache
= (lowervp
!= NULLVP
|| uppervp
!= NULLVP
);
140 int lhash
, hhash
, uhash
;
143 * Ensure locking is ordered from lower to higher
144 * to avoid deadlocks.
155 while (union_list_lock(lhash
))
158 while (union_list_lock(uhash
))
161 if (ohash
!= nhash
|| !docache
) {
162 if (un
->un_flags
& UN_CACHED
) {
163 un
->un_flags
&= ~UN_CACHED
;
164 LIST_REMOVE(un
, un_cache
);
169 union_list_unlock(ohash
);
171 if (un
->un_lowervp
!= lowervp
) {
172 if (un
->un_lowervp
) {
173 vrele(un
->un_lowervp
);
175 _FREE(un
->un_path
, M_TEMP
);
180 un
->un_dirvp
= NULLVP
;
183 un
->un_lowervp
= lowervp
;
184 un
->un_lowersz
= VNOVAL
;
187 if (un
->un_uppervp
!= uppervp
) {
189 vrele(un
->un_uppervp
);
191 un
->un_uppervp
= uppervp
;
192 un
->un_uppersz
= VNOVAL
;
195 if (docache
&& (ohash
!= nhash
)) {
196 LIST_INSERT_HEAD(&unhead
[nhash
], un
, un_cache
);
197 un
->un_flags
|= UN_CACHED
;
200 union_list_unlock(nhash
);
204 union_newlower(un
, lowervp
)
205 struct union_node
*un
;
206 struct vnode
*lowervp
;
209 union_updatevp(un
, un
->un_uppervp
, lowervp
);
213 union_newupper(un
, uppervp
)
214 struct union_node
*un
;
215 struct vnode
*uppervp
;
218 union_updatevp(un
, uppervp
, un
->un_lowervp
);
222 * Keep track of size changes in the underlying vnodes.
223 * If the size changes, then callback to the vm layer
224 * giving priority to the upper layer size.
227 union_newsize(vp
, uppersz
, lowersz
)
229 off_t uppersz
, lowersz
;
231 struct union_node
*un
;
234 /* only interested in regular files */
235 if (vp
->v_type
!= VREG
)
241 if ((uppersz
!= VNOVAL
) && (un
->un_uppersz
!= uppersz
)) {
242 un
->un_uppersz
= uppersz
;
247 if ((lowersz
!= VNOVAL
) && (un
->un_lowersz
!= lowersz
)) {
248 un
->un_lowersz
= lowersz
;
254 #ifdef UNION_DIAGNOSTIC
255 printf("union: %s size now %ld\n",
256 uppersz
!= VNOVAL
? "upper" : "lower", (long) sz
);
259 ubc_setsize(vp
, sz
); /* XXX check error */
264 * allocate a union_node/vnode pair. the vnode is
265 * referenced and locked. the new vnode is returned
266 * via (vpp). (mp) is the mountpoint of the union filesystem,
267 * (dvp) is the parent directory where the upper layer object
268 * should exist (but doesn't) and (cnp) is the componentname
269 * information which is partially copied to allow the upper
270 * layer object to be created at a later time. (uppervp)
271 * and (lowervp) reference the upper and lower layer objects
272 * being mapped. either, but not both, can be nil.
273 * if supplied, (uppervp) is locked.
274 * the reference is either maintained in the new union_node
275 * object which is allocated, or they are vrele'd.
277 * all union_nodes are maintained on a singly-linked
278 * list. new nodes are only allocated when they cannot
279 * be found on this list. entries on the list are
280 * removed when the vfs reclaim entry is called.
282 * a single lock is kept for the entire list. this is
283 * needed because the getnewvnode() function can block
284 * waiting for a vnode to become free, in which case there
285 * may be more than one process trying to get the same
286 * vnode. this lock is only taken if we are going to
287 * call getnewvnode, since the kernel itself is single-threaded.
289 * if an entry is found on the list, then call vget() to
290 * take a reference. this is done because there may be
291 * zero references to it and so it needs to removed from
292 * the vnode free list.
295 union_allocvp(vpp
, mp
, undvp
, dvp
, cnp
, uppervp
, lowervp
, docache
)
298 struct vnode
*undvp
; /* parent union vnode */
299 struct vnode
*dvp
; /* may be null */
300 struct componentname
*cnp
; /* may be null */
301 struct vnode
*uppervp
; /* may be null */
302 struct vnode
*lowervp
; /* may be null */
306 struct union_node
*un
;
307 struct union_node
**pp
;
308 struct vnode
*xlowervp
= NULLVP
;
309 struct union_mount
*um
= MOUNTTOUNIONMOUNT(mp
);
313 struct union_node
*unp
;
315 if (uppervp
== NULLVP
&& lowervp
== NULLVP
)
316 panic("union: unidentifiable allocation");
318 if (uppervp
&& lowervp
&& (uppervp
->v_type
!= lowervp
->v_type
)) {
323 /* detect the root vnode (and aliases) */
325 if ((uppervp
== um
->um_uppervp
) &&
326 ((lowervp
== NULLVP
) || lowervp
== um
->um_lowervp
)) {
327 if (lowervp
== NULLVP
) {
328 lowervp
= um
->um_lowervp
;
329 if (lowervp
!= NULLVP
)
338 } else for (try = 0; try < 3; try++) {
341 if (lowervp
== NULLVP
)
343 hash
= UNION_HASH(uppervp
, lowervp
);
347 if (uppervp
== NULLVP
)
349 hash
= UNION_HASH(uppervp
, NULLVP
);
353 if (lowervp
== NULLVP
)
355 hash
= UNION_HASH(NULLVP
, lowervp
);
359 while (union_list_lock(hash
))
362 for (un
= unhead
[hash
].lh_first
; un
!= 0;
363 un
= un
->un_cache
.le_next
) {
364 if ((un
->un_lowervp
== lowervp
||
365 un
->un_lowervp
== NULLVP
) &&
366 (un
->un_uppervp
== uppervp
||
367 un
->un_uppervp
== NULLVP
) &&
368 (UNIONTOV(un
)->v_mount
== mp
)) {
369 if (vget(UNIONTOV(un
), 0,
370 cnp
? cnp
->cn_proc
: NULL
)) {
371 union_list_unlock(hash
);
378 union_list_unlock(hash
);
386 * Obtain a lock on the union_node.
387 * uppervp is locked, though un->un_uppervp
388 * may not be. this doesn't break the locking
389 * hierarchy since in the case that un->un_uppervp
390 * is not yet locked it will be vrele'd and replaced
394 if ((dvp
!= NULLVP
) && (uppervp
== dvp
)) {
396 * Access ``.'', so (un) will already
397 * be locked. Since this process has
398 * the lock on (uppervp) no other
399 * process can hold the lock on (un).
402 if ((un
->un_flags
& UN_LOCKED
) == 0)
403 panic("union: . not locked");
404 else if (current_proc() && un
->un_pid
!= current_proc()->p_pid
&&
405 un
->un_pid
> -1 && current_proc()->p_pid
> -1)
406 panic("union: allocvp not lock owner");
409 if (un
->un_flags
& UN_LOCKED
) {
411 un
->un_flags
|= UN_WANT
;
412 sleep((caddr_t
) &un
->un_flags
, PINOD
);
415 un
->un_flags
|= UN_LOCKED
;
419 un
->un_pid
= current_proc()->p_pid
;
426 * At this point, the union_node is locked,
427 * un->un_uppervp may not be locked, and uppervp
432 * Save information about the upper layer.
434 if (uppervp
!= un
->un_uppervp
) {
435 union_newupper(un
, uppervp
);
436 } else if (uppervp
) {
440 if (un
->un_uppervp
) {
441 un
->un_flags
|= UN_ULOCK
;
442 un
->un_flags
&= ~UN_KLOCK
;
446 * Save information about the lower layer.
447 * This needs to keep track of pathname
448 * and directory information which union_vn_create
451 if (lowervp
!= un
->un_lowervp
) {
452 union_newlower(un
, lowervp
);
453 if (cnp
&& (lowervp
!= NULLVP
)) {
454 un
->un_hash
= cnp
->cn_hash
;
455 MALLOC(un
->un_path
, caddr_t
, cnp
->cn_namelen
+1,
457 bcopy(cnp
->cn_nameptr
, un
->un_path
,
459 un
->un_path
[cnp
->cn_namelen
] = '\0';
463 } else if (lowervp
) {
472 * otherwise lock the vp list while we call getnewvnode
473 * since that can block.
475 hash
= UNION_HASH(uppervp
, lowervp
);
477 if (union_list_lock(hash
))
481 MALLOC(unp
, void *, sizeof(struct union_node
), M_TEMP
, M_WAITOK
);
482 error
= getnewvnode(VT_UNION
, mp
, union_vnodeop_p
, vpp
);
497 (*vpp
)->v_data
= unp
;
498 (*vpp
)->v_flag
|= vflag
;
500 (*vpp
)->v_type
= uppervp
->v_type
;
502 (*vpp
)->v_type
= lowervp
->v_type
;
504 if ((*vpp
)->v_type
== VREG
)
509 un
->un_uppervp
= uppervp
;
510 un
->un_uppersz
= VNOVAL
;
511 un
->un_lowervp
= lowervp
;
512 un
->un_lowersz
= VNOVAL
;
518 un
->un_flags
= UN_LOCKED
;
520 un
->un_flags
|= UN_ULOCK
;
523 un
->un_pid
= current_proc()->p_pid
;
527 if (cnp
&& (lowervp
!= NULLVP
)) {
528 un
->un_hash
= cnp
->cn_hash
;
529 un
->un_path
= _MALLOC(cnp
->cn_namelen
+1, M_TEMP
, M_WAITOK
);
530 bcopy(cnp
->cn_nameptr
, un
->un_path
, cnp
->cn_namelen
);
531 un
->un_path
[cnp
->cn_namelen
] = '\0';
541 LIST_INSERT_HEAD(&unhead
[hash
], un
, un_cache
);
542 un
->un_flags
|= UN_CACHED
;
550 union_list_unlock(hash
);
559 struct union_node
*un
= VTOUNION(vp
);
561 if (un
->un_flags
& UN_CACHED
) {
562 un
->un_flags
&= ~UN_CACHED
;
563 LIST_REMOVE(un
, un_cache
);
566 if (un
->un_pvp
!= NULLVP
)
568 if (un
->un_uppervp
!= NULLVP
)
569 vrele(un
->un_uppervp
);
570 if (un
->un_lowervp
!= NULLVP
)
571 vrele(un
->un_lowervp
);
572 if (un
->un_dirvp
!= NULLVP
)
575 _FREE(un
->un_path
, M_TEMP
);
577 FREE(vp
->v_data
, M_TEMP
);
584 * copyfile. copy the vnode (fvp) to the vnode (tvp)
585 * using a sequence of reads and writes. both (fvp)
586 * and (tvp) are locked on entry and exit.
589 union_copyfile(fvp
, tvp
, cred
, p
)
602 * allocate a buffer of size MAXPHYSIO.
603 * loop doing reads and writes, keeping track
604 * of the current uio offset.
605 * give up at the first sign of trouble.
609 uio
.uio_segflg
= UIO_SYSSPACE
;
612 VOP_UNLOCK(fvp
, 0, p
); /* XXX */
613 VOP_LEASE(fvp
, p
, cred
, LEASE_READ
);
614 vn_lock(fvp
, LK_EXCLUSIVE
| LK_RETRY
, p
); /* XXX */
615 VOP_UNLOCK(tvp
, 0, p
); /* XXX */
616 VOP_LEASE(tvp
, p
, cred
, LEASE_WRITE
);
617 vn_lock(tvp
, LK_EXCLUSIVE
| LK_RETRY
, p
); /* XXX */
619 buf
= _MALLOC(MAXPHYSIO
, M_TEMP
, M_WAITOK
);
621 /* ugly loop follows... */
623 off_t offset
= uio
.uio_offset
;
628 iov
.iov_len
= MAXPHYSIO
;
629 uio
.uio_resid
= iov
.iov_len
;
630 uio
.uio_rw
= UIO_READ
;
631 error
= VOP_READ(fvp
, &uio
, 0, cred
);
637 iov
.iov_len
= MAXPHYSIO
- uio
.uio_resid
;
638 uio
.uio_offset
= offset
;
639 uio
.uio_rw
= UIO_WRITE
;
640 uio
.uio_resid
= iov
.iov_len
;
642 if (uio
.uio_resid
== 0)
646 error
= VOP_WRITE(tvp
, &uio
, 0, cred
);
647 } while ((uio
.uio_resid
> 0) && (error
== 0));
650 } while (error
== 0);
657 * (un) is assumed to be locked on entry and remains
661 union_copyup(un
, docopy
, cred
, p
)
662 struct union_node
*un
;
668 struct vnode
*lvp
, *uvp
;
670 error
= union_vn_create(&uvp
, un
, p
);
674 /* at this point, uppervp is locked */
675 union_newupper(un
, uvp
);
676 un
->un_flags
|= UN_ULOCK
;
678 lvp
= un
->un_lowervp
;
682 * XX - should not ignore errors
685 vn_lock(lvp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
686 error
= VOP_OPEN(lvp
, FREAD
, cred
, p
);
688 error
= union_copyfile(lvp
, uvp
, cred
, p
);
689 VOP_UNLOCK(lvp
, 0, p
);
690 (void) VOP_CLOSE(lvp
, FREAD
, cred
, p
);
692 #ifdef UNION_DIAGNOSTIC
694 uprintf("union: copied up %s\n", un
->un_path
);
698 un
->un_flags
&= ~UN_ULOCK
;
699 VOP_UNLOCK(uvp
, 0, p
);
700 union_vn_close(uvp
, FWRITE
, cred
, p
);
701 vn_lock(uvp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
702 un
->un_flags
|= UN_ULOCK
;
705 * Subsequent IOs will go to the top layer, so
706 * call close on the lower vnode and open on the
707 * upper vnode to ensure that the filesystem keeps
708 * its references counts right. This doesn't do
709 * the right thing with (cred) and (FREAD) though.
710 * Ignoring error returns is not right, either.
715 for (i
= 0; i
< un
->un_openl
; i
++) {
716 (void) VOP_CLOSE(lvp
, FREAD
, cred
, p
);
717 (void) VOP_OPEN(uvp
, FREAD
, cred
, p
);
727 union_relookup(um
, dvp
, vpp
, cnp
, cn
, path
, pathlen
)
728 struct union_mount
*um
;
731 struct componentname
*cnp
;
732 struct componentname
*cn
;
739 * A new componentname structure must be faked up because
740 * there is no way to know where the upper level cnp came
741 * from or what it is being used for. This must duplicate
742 * some of the work done by NDINIT, some of the work done
743 * by namei, some of the work done by lookup and some of
744 * the work done by VOP_LOOKUP when given a CREATE flag.
745 * Conclusion: Horrible.
747 * The pathname buffer will be FREEed by VOP_MKDIR.
749 cn
->cn_namelen
= pathlen
;
750 cn
->cn_pnbuf
= _MALLOC_ZONE(cn
->cn_namelen
+1, M_NAMEI
, M_WAITOK
);
751 cn
->cn_pnlen
= cn
->cn_namelen
+1;
752 bcopy(path
, cn
->cn_pnbuf
, cn
->cn_namelen
);
753 cn
->cn_pnbuf
[cn
->cn_namelen
] = '\0';
755 cn
->cn_nameiop
= CREATE
;
756 cn
->cn_flags
= (LOCKPARENT
|HASBUF
|SAVENAME
|SAVESTART
|ISLASTCN
);
757 cn
->cn_proc
= cnp
->cn_proc
;
758 if (um
->um_op
== UNMNT_ABOVE
)
759 cn
->cn_cred
= cnp
->cn_cred
;
761 cn
->cn_cred
= um
->um_cred
;
762 cn
->cn_nameptr
= cn
->cn_pnbuf
;
763 cn
->cn_hash
= cnp
->cn_hash
;
764 cn
->cn_consume
= cnp
->cn_consume
;
767 error
= relookup(dvp
, vpp
, cn
);
775 * Create a shadow directory in the upper layer.
776 * The new vnode is returned locked.
778 * (um) points to the union mount structure for access to the
779 * the mounting process's credentials.
780 * (dvp) is the directory in which to create the shadow directory.
781 * it is unlocked on entry and exit.
782 * (cnp) is the componentname to be created.
783 * (vpp) is the returned newly created shadow directory, which
784 * is returned locked.
787 union_mkshadow(um
, dvp
, cnp
, vpp
)
788 struct union_mount
*um
;
790 struct componentname
*cnp
;
795 struct proc
*p
= cnp
->cn_proc
;
796 struct componentname cn
;
798 error
= union_relookup(um
, dvp
, vpp
, cnp
, &cn
,
799 cnp
->cn_nameptr
, cnp
->cn_namelen
);
804 VOP_ABORTOP(dvp
, &cn
);
805 VOP_UNLOCK(dvp
, 0, p
);
812 * policy: when creating the shadow directory in the
813 * upper layer, create it owned by the user who did
814 * the mount, group from parent directory, and mode
815 * 777 modified by umask (ie mostly identical to the
816 * mkdir syscall). (jsp, kb)
821 va
.va_mode
= um
->um_cmode
;
823 /* VOP_LEASE: dvp is locked */
824 VOP_LEASE(dvp
, p
, cn
.cn_cred
, LEASE_WRITE
);
826 error
= VOP_MKDIR(dvp
, vpp
, &cn
, &va
);
831 * Create a whiteout entry in the upper layer.
833 * (um) points to the union mount structure for access to the
834 * the mounting process's credentials.
835 * (dvp) is the directory in which to create the whiteout.
836 * it is locked on entry and exit.
837 * (cnp) is the componentname to be created.
840 union_mkwhiteout(um
, dvp
, cnp
, path
)
841 struct union_mount
*um
;
843 struct componentname
*cnp
;
848 struct proc
*p
= cnp
->cn_proc
;
850 struct componentname cn
;
852 VOP_UNLOCK(dvp
, 0, p
);
853 error
= union_relookup(um
, dvp
, &wvp
, cnp
, &cn
, path
, strlen(path
));
855 vn_lock(dvp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
860 VOP_ABORTOP(dvp
, &cn
);
866 /* VOP_LEASE: dvp is locked */
867 VOP_LEASE(dvp
, p
, p
->p_ucred
, LEASE_WRITE
);
869 error
= VOP_WHITEOUT(dvp
, &cn
, CREATE
);
871 VOP_ABORTOP(dvp
, &cn
);
879 * union_vn_create: creates and opens a new shadow file
880 * on the upper union layer. this function is similar
881 * in spirit to calling vn_open but it avoids calling namei().
882 * the problem with calling namei is that a) it locks too many
883 * things, and b) it doesn't start at the "right" directory,
884 * whereas relookup is told where to start.
887 union_vn_create(vpp
, un
, p
)
889 struct union_node
*un
;
893 struct ucred
*cred
= p
->p_ucred
;
895 struct vattr
*vap
= &vat
;
896 int fmode
= FFLAGS(O_WRONLY
|O_CREAT
|O_TRUNC
|O_EXCL
);
898 int cmode
= UN_FILEMODE
& ~p
->p_fd
->fd_cmask
;
900 struct componentname cn
;
905 * Build a new componentname structure (for the same
906 * reasons outlines in union_mkshadow).
907 * The difference here is that the file is owned by
908 * the current user, rather than by the person who
909 * did the mount, since the current user needs to be
910 * able to write the file (that's why it is being
911 * copied in the first place).
913 cn
.cn_namelen
= strlen(un
->un_path
);
914 cn
.cn_pnbuf
= (caddr_t
) _MALLOC_ZONE(cn
.cn_namelen
+1,
916 cn
.cn_pnlen
= cn
.cn_namelen
+1;
917 bcopy(un
->un_path
, cn
.cn_pnbuf
, cn
.cn_namelen
+1);
918 cn
.cn_nameiop
= CREATE
;
919 cn
.cn_flags
= (LOCKPARENT
|HASBUF
|SAVENAME
|SAVESTART
|ISLASTCN
);
921 cn
.cn_cred
= p
->p_ucred
;
922 cn
.cn_nameptr
= cn
.cn_pnbuf
;
923 cn
.cn_hash
= un
->un_hash
;
927 if (error
= relookup(un
->un_dirvp
, &vp
, &cn
))
932 VOP_ABORTOP(un
->un_dirvp
, &cn
);
933 if (un
->un_dirvp
== vp
)
942 * Good - there was no race to create the file
943 * so go ahead and create it. The permissions
944 * on the file will be 0666 modified by the
945 * current user's umask. Access to the file, while
946 * it is unioned, will require access to the top *and*
947 * bottom files. Access when not unioned will simply
948 * require access to the top-level file.
949 * TODO: confirm choice of access permissions.
953 vap
->va_mode
= cmode
;
954 VOP_LEASE(un
->un_dirvp
, p
, cred
, LEASE_WRITE
);
955 if (error
= VOP_CREATE(un
->un_dirvp
, &vp
, &cn
, vap
))
958 if (error
= VOP_OPEN(vp
, fmode
, cred
, p
)) {
963 if (++vp
->v_writecount
<= 0)
964 panic("union: v_writecount");
970 union_vn_close(vp
, fmode
, cred
, p
)
979 return (VOP_CLOSE(vp
, fmode
, cred
, p
));
983 union_removed_upper(un
)
984 struct union_node
*un
;
986 struct proc
*p
= current_proc(); /* XXX */
988 union_newupper(un
, NULLVP
);
989 if (un
->un_flags
& UN_CACHED
) {
990 un
->un_flags
&= ~UN_CACHED
;
991 LIST_REMOVE(un
, un_cache
);
994 if (un
->un_flags
& UN_ULOCK
) {
995 un
->un_flags
&= ~UN_ULOCK
;
996 VOP_UNLOCK(un
->un_uppervp
, 0, p
);
1005 struct union_node
*un
= VTOUNION(vp
);
1007 if ((un
->un_lowervp
!= NULLVP
) &&
1008 (vp
->v_type
== un
->un_lowervp
->v_type
)) {
1009 if (vget(un
->un_lowervp
, 0, current_proc()) == 0)
1010 return (un
->un_lowervp
);
1018 * determine whether a whiteout is needed
1019 * during a remove/rmdir operation.
1022 union_dowhiteout(un
, cred
, p
)
1023 struct union_node
*un
;
1029 if (un
->un_lowervp
!= NULLVP
)
1032 if (VOP_GETATTR(un
->un_uppervp
, &va
, cred
, p
) == 0 &&
1033 (va
.va_flags
& OPAQUE
))
1040 union_dircache_r(vp
, vppp
, cntp
)
1042 struct vnode
***vppp
;
1045 struct union_node
*un
;
1047 if (vp
->v_op
!= union_vnodeop_p
) {
1052 panic("union: dircache table too small");
1061 if (un
->un_uppervp
!= NULLVP
)
1062 union_dircache_r(un
->un_uppervp
, vppp
, cntp
);
1063 if (un
->un_lowervp
!= NULLVP
)
1064 union_dircache_r(un
->un_lowervp
, vppp
, cntp
);
1068 union_dircache(vp
, p
)
1075 struct vnode
**dircache
;
1076 struct union_node
*un
;
1079 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1080 dircache
= VTOUNION(vp
)->un_dircache
;
1084 if (dircache
== 0) {
1086 union_dircache_r(vp
, 0, &cnt
);
1088 dircache
= (struct vnode
**)
1089 _MALLOC(cnt
* sizeof(struct vnode
*),
1092 union_dircache_r(vp
, &vpp
, &cnt
);
1098 if (*vpp
++ == VTOUNION(vp
)->un_uppervp
)
1100 } while (*vpp
!= NULLVP
);
1106 vn_lock(*vpp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1108 error
= union_allocvp(&nvp
, vp
->v_mount
, NULLVP
, NULLVP
, 0, *vpp
, NULLVP
, 0);
1112 VTOUNION(vp
)->un_dircache
= 0;
1114 un
->un_dircache
= dircache
;
1117 VOP_UNLOCK(vp
, 0, p
);