]>
git.saurik.com Git - apple/xnu.git/blob - bsd/miscfs/union/union_subr.c
d5838e6c473722e55e3cc36192c8655bad52cb02
2 * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
30 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
32 * Copyright (c) 1994 Jan-Simon Pendry
34 * The Regents of the University of California. All rights reserved.
36 * This code is derived from software contributed to Berkeley by
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by the University of
50 * California, Berkeley and its contributors.
51 * 4. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 * @(#)union_subr.c 8.20 (Berkeley) 5/20/95
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/proc_internal.h>
73 #include <sys/kauth.h>
75 #include <sys/kernel.h>
76 #include <sys/vnode_internal.h>
77 #include <sys/namei.h>
78 #include <sys/malloc.h>
80 #include <sys/filedesc.h>
81 #include <sys/queue.h>
82 #include <sys/mount_internal.h>
85 #include <sys/uio_internal.h>
86 #include <miscfs/union/union.h>
92 /* must be power of two, otherwise change UNION_HASH() */
95 /* unsigned int ... */
96 #define UNION_HASH(u, l) \
97 (((((unsigned long) (u)) + ((unsigned long) l)) >> 8) & (NHASH-1))
99 static LIST_HEAD(unhead
, union_node
) unhead
[NHASH
];
100 static int unvplock
[NHASH
];
107 for (i
= 0; i
< NHASH
; i
++)
108 LIST_INIT(&unhead
[i
]);
109 bzero((caddr_t
) unvplock
, sizeof(unvplock
));
117 if (unvplock
[ix
] & UN_LOCKED
) {
118 unvplock
[ix
] |= UN_WANT
;
119 sleep((caddr_t
) &unvplock
[ix
], PINOD
);
123 unvplock
[ix
] |= UN_LOCKED
;
129 union_list_unlock(ix
)
133 unvplock
[ix
] &= ~UN_LOCKED
;
135 if (unvplock
[ix
] & UN_WANT
) {
136 unvplock
[ix
] &= ~UN_WANT
;
137 wakeup((caddr_t
) &unvplock
[ix
]);
142 union_updatevp(un
, uppervp
, lowervp
)
143 struct union_node
*un
;
144 struct vnode
*uppervp
;
145 struct vnode
*lowervp
;
147 int ohash
= UNION_HASH(un
->un_uppervp
, un
->un_lowervp
);
148 int nhash
= UNION_HASH(uppervp
, lowervp
);
149 int docache
= (lowervp
!= NULLVP
|| uppervp
!= NULLVP
);
153 * Ensure locking is ordered from lower to higher
154 * to avoid deadlocks.
165 while (union_list_lock(lhash
))
168 while (union_list_lock(uhash
))
171 if (ohash
!= nhash
|| !docache
) {
172 if (un
->un_flags
& UN_CACHED
) {
173 un
->un_flags
&= ~UN_CACHED
;
174 LIST_REMOVE(un
, un_cache
);
179 union_list_unlock(ohash
);
181 if (un
->un_lowervp
!= lowervp
) {
182 if (un
->un_lowervp
) {
183 vnode_put(un
->un_lowervp
);
185 _FREE(un
->un_path
, M_TEMP
);
189 vnode_put(un
->un_dirvp
);
190 un
->un_dirvp
= NULLVP
;
193 un
->un_lowervp
= lowervp
;
194 un
->un_lowersz
= VNOVAL
;
197 if (un
->un_uppervp
!= uppervp
) {
199 vnode_put(un
->un_uppervp
);
201 un
->un_uppervp
= uppervp
;
202 un
->un_uppersz
= VNOVAL
;
205 if (docache
&& (ohash
!= nhash
)) {
206 LIST_INSERT_HEAD(&unhead
[nhash
], un
, un_cache
);
207 un
->un_flags
|= UN_CACHED
;
210 union_list_unlock(nhash
);
214 union_newlower(un
, lowervp
)
215 struct union_node
*un
;
216 struct vnode
*lowervp
;
219 union_updatevp(un
, un
->un_uppervp
, lowervp
);
223 union_newupper(un
, uppervp
)
224 struct union_node
*un
;
225 struct vnode
*uppervp
;
228 union_updatevp(un
, uppervp
, un
->un_lowervp
);
232 * Keep track of size changes in the underlying vnodes.
233 * If the size changes, then callback to the vm layer
234 * giving priority to the upper layer size.
237 union_newsize(vp
, uppersz
, lowersz
)
239 off_t uppersz
, lowersz
;
241 struct union_node
*un
;
244 /* only interested in regular files */
245 if (vp
->v_type
!= VREG
)
251 if ((uppersz
!= VNOVAL
) && (un
->un_uppersz
!= uppersz
)) {
252 un
->un_uppersz
= uppersz
;
257 if ((lowersz
!= VNOVAL
) && (un
->un_lowersz
!= lowersz
)) {
258 un
->un_lowersz
= lowersz
;
264 #ifdef UNION_DIAGNOSTIC
265 printf("union: %s size now %ld\n",
266 uppersz
!= VNOVAL
? "upper" : "lower", (long) sz
);
273 * allocate a union_node/vnode pair. the vnode is
274 * referenced and locked. the new vnode is returned
275 * via (vpp). (mp) is the mountpoint of the union filesystem,
276 * (dvp) is the parent directory where the upper layer object
277 * should exist (but doesn't) and (cnp) is the componentname
278 * information which is partially copied to allow the upper
279 * layer object to be created at a later time. (uppervp)
280 * and (lowervp) reference the upper and lower layer objects
281 * being mapped. either, but not both, can be nil.
282 * if supplied, (uppervp) is locked.
283 * the reference is either maintained in the new union_node
284 * object which is allocated, or they are vnode_put'd.
286 * all union_nodes are maintained on a singly-linked
287 * list. new nodes are only allocated when they cannot
288 * be found on this list. entries on the list are
289 * removed when the vfs reclaim entry is called.
291 * a single lock is kept for the entire list. this is
292 * needed because the getnewvnode() function can block
293 * waiting for a vnode to become free, in which case there
294 * may be more than one process trying to get the same
295 * vnode. this lock is only taken if we are going to
296 * call getnewvnode, since the kernel itself is single-threaded.
298 * if an entry is found on the list, then call vnode_get() to
299 * take a reference. this is done because there may be
300 * zero references to it and so it needs to removed from
301 * the vnode free list.
304 union_allocvp(vpp
, mp
, undvp
, dvp
, cnp
, uppervp
, lowervp
, docache
)
307 struct vnode
*undvp
; /* parent union vnode */
308 struct vnode
*dvp
; /* may be null */
309 struct componentname
*cnp
; /* may be null */
310 struct vnode
*uppervp
; /* may be null */
311 struct vnode
*lowervp
; /* may be null */
315 struct union_node
*un
;
316 struct union_node
**pp
;
317 struct vnode
*xlowervp
= NULLVP
;
318 struct union_mount
*um
= MOUNTTOUNIONMOUNT(mp
);
322 struct union_node
*unp
;
323 struct vnode_fsparam vfsp
;
326 if (uppervp
== NULLVP
&& lowervp
== NULLVP
)
327 panic("union: unidentifiable allocation");
329 if (uppervp
&& lowervp
&& (uppervp
->v_type
!= lowervp
->v_type
)) {
334 /* detect the root vnode (and aliases) */
336 if ((uppervp
== um
->um_uppervp
) &&
337 ((lowervp
== NULLVP
) || lowervp
== um
->um_lowervp
)) {
338 if (lowervp
== NULLVP
) {
339 lowervp
= um
->um_lowervp
;
340 if (lowervp
!= NULLVP
)
349 } else for (try = 0; try < 3; try++) {
352 if (lowervp
== NULLVP
)
354 hash
= UNION_HASH(uppervp
, lowervp
);
358 if (uppervp
== NULLVP
)
360 hash
= UNION_HASH(uppervp
, NULLVP
);
364 if (lowervp
== NULLVP
)
366 hash
= UNION_HASH(NULLVP
, lowervp
);
370 while (union_list_lock(hash
))
373 for (un
= unhead
[hash
].lh_first
; un
!= 0;
374 un
= un
->un_cache
.le_next
) {
375 if ((un
->un_lowervp
== lowervp
||
376 un
->un_lowervp
== NULLVP
) &&
377 (un
->un_uppervp
== uppervp
||
378 un
->un_uppervp
== NULLVP
) &&
379 (UNIONTOV(un
)->v_mount
== mp
)) {
380 if (vnode_get(UNIONTOV(un
))) {
381 union_list_unlock(hash
);
388 union_list_unlock(hash
);
396 * Obtain a lock on the union_node.
397 * uppervp is locked, though un->un_uppervp
398 * may not be. this doesn't break the locking
399 * hierarchy since in the case that un->un_uppervp
400 * is not yet locked it will be vnode_put'd and replaced
404 if ((dvp
!= NULLVP
) && (uppervp
== dvp
)) {
406 * Access ``.'', so (un) will already
407 * be locked. Since this process has
408 * the lock on (uppervp) no other
409 * process can hold the lock on (un).
412 if ((un
->un_flags
& UN_LOCKED
) == 0)
413 panic("union: . not locked");
414 else if (current_proc() && un
->un_pid
!= current_proc()->p_pid
&&
415 un
->un_pid
> -1 && current_proc()->p_pid
> -1)
416 panic("union: allocvp not lock owner");
419 if (un
->un_flags
& UN_LOCKED
) {
420 vnode_put(UNIONTOV(un
));
421 un
->un_flags
|= UN_WANT
;
422 sleep((caddr_t
) &un
->un_flags
, PINOD
);
425 un
->un_flags
|= UN_LOCKED
;
429 un
->un_pid
= current_proc()->p_pid
;
436 * At this point, the union_node is locked,
437 * un->un_uppervp may not be locked, and uppervp
442 * Save information about the upper layer.
444 if (uppervp
!= un
->un_uppervp
) {
445 union_newupper(un
, uppervp
);
446 } else if (uppervp
) {
450 if (un
->un_uppervp
) {
451 un
->un_flags
|= UN_ULOCK
;
452 un
->un_flags
&= ~UN_KLOCK
;
456 * Save information about the lower layer.
457 * This needs to keep track of pathname
458 * and directory information which union_vn_create
461 if (lowervp
!= un
->un_lowervp
) {
462 union_newlower(un
, lowervp
);
463 if (cnp
&& (lowervp
!= NULLVP
)) {
464 un
->un_hash
= cnp
->cn_hash
;
465 MALLOC(un
->un_path
, caddr_t
, cnp
->cn_namelen
+1,
467 bcopy(cnp
->cn_nameptr
, un
->un_path
,
469 un
->un_path
[cnp
->cn_namelen
] = '\0';
473 } else if (lowervp
) {
482 * otherwise lock the vp list while we call getnewvnode
483 * since that can block.
485 hash
= UNION_HASH(uppervp
, lowervp
);
487 if (union_list_lock(hash
))
491 MALLOC(unp
, void *, sizeof(struct union_node
), M_TEMP
, M_WAITOK
);
494 vtype
= uppervp
->v_type
;
496 vtype
= lowervp
->v_type
;
497 //bzero(&vfsp, sizeof(struct vnode_fsparam));
499 vfsp
.vnfs_vtype
= vtype
;
500 vfsp
.vnfs_str
= "unionfs";
502 vfsp
.vnfs_fsnode
= unp
;
504 vfsp
.vnfs_vops
= union_vnodeop_p
;
506 vfsp
.vnfs_filesize
= 0;
507 vfsp
.vnfs_flags
= VNFS_NOCACHE
| VNFS_CANTCACHE
;
508 vfsp
.vnfs_marksystem
= 0;
509 vfsp
.vnfs_markroot
= markroot
;
511 error
= vnode_create(VNCREATE_FLAVOR
, VCREATESIZE
, &vfsp
, vpp
);
523 (*vpp
)->v_tag
= VT_UNION
;
526 un
->un_uppervp
= uppervp
;
527 un
->un_uppersz
= VNOVAL
;
528 un
->un_lowervp
= lowervp
;
529 un
->un_lowersz
= VNOVAL
;
535 un
->un_flags
= UN_LOCKED
;
537 un
->un_flags
|= UN_ULOCK
;
540 un
->un_pid
= current_proc()->p_pid
;
544 if (cnp
&& (lowervp
!= NULLVP
)) {
545 un
->un_hash
= cnp
->cn_hash
;
546 un
->un_path
= _MALLOC(cnp
->cn_namelen
+1, M_TEMP
, M_WAITOK
);
547 bcopy(cnp
->cn_nameptr
, un
->un_path
, cnp
->cn_namelen
);
548 un
->un_path
[cnp
->cn_namelen
] = '\0';
558 LIST_INSERT_HEAD(&unhead
[hash
], un
, un_cache
);
559 un
->un_flags
|= UN_CACHED
;
567 union_list_unlock(hash
);
576 struct union_node
*un
= VTOUNION(vp
);
578 if (un
->un_flags
& UN_CACHED
) {
579 un
->un_flags
&= ~UN_CACHED
;
580 LIST_REMOVE(un
, un_cache
);
583 if (un
->un_pvp
!= NULLVP
)
584 vnode_put(un
->un_pvp
);
585 if (un
->un_uppervp
!= NULLVP
)
586 vnode_put(un
->un_uppervp
);
587 if (un
->un_lowervp
!= NULLVP
)
588 vnode_put(un
->un_lowervp
);
589 if (un
->un_dirvp
!= NULLVP
)
590 vnode_put(un
->un_dirvp
);
592 _FREE(un
->un_path
, M_TEMP
);
594 FREE(vp
->v_data
, M_TEMP
);
601 * copyfile. copy the vnode (fvp) to the vnode (tvp)
602 * using a sequence of reads and writes. both (fvp)
603 * and (tvp) are locked on entry and exit.
606 union_copyfile(struct vnode
*fvp
, struct vnode
*tvp
, kauth_cred_t cred
,
612 struct vfs_context context
;
617 * allocate a buffer of size MAXPHYSIO.
618 * loop doing reads and writes, keeping track
619 * of the current uio offset.
620 * give up at the first sign of trouble.
624 context
.vc_ucred
= cred
;
626 #if 1 /* LP64todo - can't use new segment flags until the drivers are ready */
627 uio
.uio_segflg
= UIO_SYSSPACE
;
629 uio
.uio_segflg
= UIO_SYSSPACE32
;
633 bufp
= _MALLOC(MAXPHYSIO
, M_TEMP
, M_WAITOK
);
635 /* ugly loop follows... */
637 off_t offset
= uio
.uio_offset
;
639 uio
.uio_iovs
.iov32p
= &iov
;
641 iov
.iov_base
= (uintptr_t)bufp
;
642 iov
.iov_len
= MAXPHYSIO
;
643 uio_setresid(&uio
, iov
.iov_len
);
644 uio
.uio_rw
= UIO_READ
;
645 error
= VNOP_READ(fvp
, &uio
, 0, &context
);
648 uio
.uio_iovs
.iov32p
= &iov
;
650 iov
.iov_base
= (uintptr_t)bufp
;
651 iov
.iov_len
= MAXPHYSIO
- uio_resid(&uio
);
652 uio
.uio_offset
= offset
;
653 uio
.uio_rw
= UIO_WRITE
;
654 uio_setresid(&uio
, iov
.iov_len
);
656 if (uio_resid(&uio
) == 0)
660 error
= VNOP_WRITE(tvp
, &uio
, 0, &context
);
661 } while ((uio_resid(&uio
) > 0) && (error
== 0));
664 } while (error
== 0);
671 * (un) is assumed to be locked on entry and remains
675 union_copyup(struct union_node
*un
, int docopy
, kauth_cred_t cred
,
679 struct vnode
*lvp
, *uvp
;
680 struct vfs_context context
;
682 error
= union_vn_create(&uvp
, un
, p
);
687 context
.vc_ucred
= cred
;
689 /* at this point, uppervp is locked */
690 union_newupper(un
, uvp
);
691 un
->un_flags
|= UN_ULOCK
;
693 lvp
= un
->un_lowervp
;
697 * XX - should not ignore errors
700 error
= VNOP_OPEN(lvp
, FREAD
, &context
);
702 error
= union_copyfile(lvp
, uvp
, cred
, p
);
703 (void) VNOP_CLOSE(lvp
, FREAD
, &context
);
705 #ifdef UNION_DIAGNOSTIC
707 uprintf("union: copied up %s\n", un
->un_path
);
711 un
->un_flags
&= ~UN_ULOCK
;
712 union_vn_close(uvp
, FWRITE
, cred
, p
);
713 un
->un_flags
|= UN_ULOCK
;
716 * Subsequent IOs will go to the top layer, so
717 * call close on the lower vnode and open on the
718 * upper vnode to ensure that the filesystem keeps
719 * its references counts right. This doesn't do
720 * the right thing with (cred) and (FREAD) though.
721 * Ignoring error returns is not right, either.
726 for (i
= 0; i
< un
->un_openl
; i
++) {
727 (void) VNOP_CLOSE(lvp
, FREAD
, &context
);
728 (void) VNOP_OPEN(uvp
, FREAD
, &context
);
738 union_relookup(um
, dvp
, vpp
, cnp
, cn
, path
, pathlen
)
739 struct union_mount
*um
;
742 struct componentname
*cnp
;
743 struct componentname
*cn
;
750 * A new componentname structure must be faked up because
751 * there is no way to know where the upper level cnp came
752 * from or what it is being used for. This must duplicate
753 * some of the work done by NDINIT, some of the work done
754 * by namei, some of the work done by lookup and some of
755 * the work done by vnop_lookup when given a CREATE flag.
756 * Conclusion: Horrible.
758 cn
->cn_namelen
= pathlen
;
759 cn
->cn_pnbuf
= _MALLOC_ZONE(cn
->cn_namelen
+1, M_NAMEI
, M_WAITOK
);
760 cn
->cn_pnlen
= cn
->cn_namelen
+1;
761 bcopy(path
, cn
->cn_pnbuf
, cn
->cn_namelen
);
762 cn
->cn_pnbuf
[cn
->cn_namelen
] = '\0';
764 cn
->cn_nameiop
= CREATE
;
765 cn
->cn_flags
= (LOCKPARENT
|HASBUF
|SAVENAME
|SAVESTART
|ISLASTCN
);
767 cn
->cn_proc
= cnp
->cn_proc
;
768 if (um
->um_op
== UNMNT_ABOVE
)
769 cn
->cn_cred
= cnp
->cn_cred
;
771 cn
->cn_cred
= um
->um_cred
;
773 cn
->cn_context
= cnp
->cn_context
; /* XXX !UNMNT_ABOVE case ??? */
774 cn
->cn_nameptr
= cn
->cn_pnbuf
;
775 cn
->cn_hash
= cnp
->cn_hash
;
776 cn
->cn_consume
= cnp
->cn_consume
;
779 error
= relookup(dvp
, vpp
, cn
);
787 * Create a shadow directory in the upper layer.
788 * The new vnode is returned locked.
790 * (um) points to the union mount structure for access to the
791 * the mounting process's credentials.
792 * (dvp) is the directory in which to create the shadow directory.
793 * it is unlocked on entry and exit.
794 * (cnp) is the componentname to be created.
795 * (vpp) is the returned newly created shadow directory, which
796 * is returned locked.
799 union_mkshadow(um
, dvp
, cnp
, vpp
)
800 struct union_mount
*um
;
802 struct componentname
*cnp
;
806 struct vnode_attr va
;
807 struct componentname cn
;
809 error
= union_relookup(um
, dvp
, vpp
, cnp
, &cn
,
810 cnp
->cn_nameptr
, cnp
->cn_namelen
);
821 * policy: when creating the shadow directory in the
822 * upper layer, create it owned by the user who did
823 * the mount, group from parent directory, and mode
824 * 777 modified by umask (ie mostly identical to the
825 * mkdir syscall). (jsp, kb)
828 VATTR_SET(&va
, va_type
, VDIR
);
829 VATTR_SET(&va
, va_mode
, um
->um_cmode
);
831 error
= vn_create(dvp
, vpp
, &cn
, &va
, 0, cnp
->cn_context
);
836 * Create a whiteout entry in the upper layer.
838 * (um) points to the union mount structure for access to the
839 * the mounting process's credentials.
840 * (dvp) is the directory in which to create the whiteout.
841 * it is locked on entry and exit.
842 * (cnp) is the componentname to be created.
845 union_mkwhiteout(um
, dvp
, cnp
, path
)
846 struct union_mount
*um
;
848 struct componentname
*cnp
;
853 struct componentname cn
;
855 error
= union_relookup(um
, dvp
, &wvp
, cnp
, &cn
, path
, strlen(path
));
865 error
= VNOP_WHITEOUT(dvp
, &cn
, CREATE
, cnp
->cn_context
);
873 * union_vn_create: creates and opens a new shadow file
874 * on the upper union layer. this function is similar
875 * in spirit to calling vn_open but it avoids calling namei().
876 * the problem with calling namei is that a) it locks too many
877 * things, and b) it doesn't start at the "right" directory,
878 * whereas relookup is told where to start.
881 union_vn_create(vpp
, un
, p
)
883 struct union_node
*un
;
887 kauth_cred_t cred
= p
->p_ucred
;
888 struct vnode_attr vat
;
889 struct vnode_attr
*vap
= &vat
;
890 struct vfs_context context
;
891 int fmode
= FFLAGS(O_WRONLY
|O_CREAT
|O_TRUNC
|O_EXCL
);
893 int cmode
= UN_FILEMODE
& ~p
->p_fd
->fd_cmask
;
895 struct componentname cn
;
900 context
.vc_ucred
= p
->p_ucred
;
903 * Build a new componentname structure (for the same
904 * reasons outlines in union_mkshadow).
905 * The difference here is that the file is owned by
906 * the current user, rather than by the person who
907 * did the mount, since the current user needs to be
908 * able to write the file (that's why it is being
909 * copied in the first place).
911 cn
.cn_namelen
= strlen(un
->un_path
);
912 cn
.cn_pnbuf
= (caddr_t
) _MALLOC_ZONE(cn
.cn_namelen
+1,
914 cn
.cn_pnlen
= cn
.cn_namelen
+1;
915 bcopy(un
->un_path
, cn
.cn_pnbuf
, cn
.cn_namelen
+1);
916 cn
.cn_nameiop
= CREATE
;
917 cn
.cn_flags
= (LOCKPARENT
|HASBUF
|SAVENAME
|SAVESTART
|ISLASTCN
);
918 cn
.cn_context
= &context
;
919 cn
.cn_nameptr
= cn
.cn_pnbuf
;
920 cn
.cn_hash
= un
->un_hash
;
923 vnode_get(un
->un_dirvp
);
924 if (error
= relookup(un
->un_dirvp
, &vp
, &cn
))
926 vnode_put(un
->un_dirvp
);
929 vnode_put(un
->un_dirvp
);
935 * Good - there was no race to create the file
936 * so go ahead and create it. The permissions
937 * on the file will be 0666 modified by the
938 * current user's umask. Access to the file, while
939 * it is unioned, will require access to the top *and*
940 * bottom files. Access when not unioned will simply
941 * require access to the top-level file.
943 * TODO: confirm choice of access permissions.
944 * decide on authorisation behaviour
948 VATTR_SET(vap
, va_type
, VREG
);
949 VATTR_SET(vap
, va_mode
, cmode
);
951 if (error
= vn_create(un
->un_dirvp
, &vp
, &cn
, vap
, 0, &context
))
954 if (error
= VNOP_OPEN(vp
, fmode
, &context
)) {
960 if (++vp
->v_writecount
<= 0)
961 panic("union: v_writecount");
968 union_vn_close(struct vnode
*vp
, int fmode
, kauth_cred_t cred
,
971 struct vfs_context context
;
974 context
.vc_ucred
= cred
;
976 if (fmode
& FWRITE
) {
981 return (VNOP_CLOSE(vp
, fmode
, &context
));
985 union_removed_upper(un
)
986 struct union_node
*un
;
988 struct proc
*p
= current_proc(); /* XXX */
990 union_newupper(un
, NULLVP
);
991 if (un
->un_flags
& UN_CACHED
) {
992 un
->un_flags
&= ~UN_CACHED
;
993 LIST_REMOVE(un
, un_cache
);
996 if (un
->un_flags
& UN_ULOCK
) {
997 un
->un_flags
&= ~UN_ULOCK
;
1006 struct union_node
*un
= VTOUNION(vp
);
1008 if ((un
->un_lowervp
!= NULLVP
) &&
1009 (vp
->v_type
== un
->un_lowervp
->v_type
)) {
1010 if (vnode_get(un
->un_lowervp
) == 0)
1011 return (un
->un_lowervp
);
1019 * determine whether a whiteout is needed
1020 * during a remove/rmdir operation.
1023 union_dowhiteout(struct union_node
*un
, vfs_context_t ctx
)
1025 struct vnode_attr va
;
1027 if (un
->un_lowervp
!= NULLVP
)
1031 VATTR_WANTED(&va
, va_flags
);
1032 if (vnode_getattr(un
->un_uppervp
, &va
, ctx
) == 0 &&
1033 (va
.va_flags
& OPAQUE
))
1040 union_dircache_r(vp
, vppp
, cntp
)
1042 struct vnode
***vppp
;
1045 struct union_node
*un
;
1047 if (vp
->v_op
!= union_vnodeop_p
) {
1052 panic("union: dircache table too small");
1061 if (un
->un_uppervp
!= NULLVP
)
1062 union_dircache_r(un
->un_uppervp
, vppp
, cntp
);
1063 if (un
->un_lowervp
!= NULLVP
)
1064 union_dircache_r(un
->un_lowervp
, vppp
, cntp
);
1068 union_dircache(vp
, p
)
1075 struct vnode
**dircache
;
1076 struct union_node
*un
;
1079 dircache
= VTOUNION(vp
)->un_dircache
;
1083 if (dircache
== 0) {
1085 union_dircache_r(vp
, 0, &count
);
1087 dircache
= (struct vnode
**)
1088 _MALLOC(count
* sizeof(struct vnode
*),
1091 union_dircache_r(vp
, &vpp
, &count
);
1097 if (*vpp
++ == VTOUNION(vp
)->un_uppervp
)
1099 } while (*vpp
!= NULLVP
);
1106 error
= union_allocvp(&nvp
, vp
->v_mount
, NULLVP
, NULLVP
, 0, *vpp
, NULLVP
, 0);
1110 VTOUNION(vp
)->un_dircache
= 0;
1112 un
->un_dircache
= dircache
;