]> git.saurik.com Git - apple/xnu.git/blob - bsd/miscfs/union/union_subr.c
1e3120b0ed47ce7b9ceefd2a4567e42fde1327af
[apple/xnu.git] / bsd / miscfs / union / union_subr.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 /*
25 * Copyright (c) 1994 Jan-Simon Pendry
26 * Copyright (c) 1994
27 * The Regents of the University of California. All rights reserved.
28 *
29 * This code is derived from software contributed to Berkeley by
30 * Jan-Simon Pendry.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)union_subr.c 8.20 (Berkeley) 5/20/95
61 */
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/proc_internal.h>
66 #include <sys/kauth.h>
67 #include <sys/time.h>
68 #include <sys/kernel.h>
69 #include <sys/vnode_internal.h>
70 #include <sys/namei.h>
71 #include <sys/malloc.h>
72 #include <sys/file.h>
73 #include <sys/filedesc.h>
74 #include <sys/queue.h>
75 #include <sys/mount_internal.h>
76 #include <sys/stat.h>
77 #include <sys/ubc.h>
78 #include <sys/uio_internal.h>
79 #include <miscfs/union/union.h>
80
81 #if DIAGNOSTIC
82 #include <sys/proc.h>
83 #endif
84
85 /* must be power of two, otherwise change UNION_HASH() */
86 #define NHASH 32
87
88 /* unsigned int ... */
89 #define UNION_HASH(u, l) \
90 (((((unsigned long) (u)) + ((unsigned long) l)) >> 8) & (NHASH-1))
91
92 static LIST_HEAD(unhead, union_node) unhead[NHASH];
93 static int unvplock[NHASH];
94
95 int
96 union_init()
97 {
98 int i;
99
100 for (i = 0; i < NHASH; i++)
101 LIST_INIT(&unhead[i]);
102 bzero((caddr_t) unvplock, sizeof(unvplock));
103 }
104
105 static int
106 union_list_lock(ix)
107 int ix;
108 {
109
110 if (unvplock[ix] & UN_LOCKED) {
111 unvplock[ix] |= UN_WANT;
112 sleep((caddr_t) &unvplock[ix], PINOD);
113 return (1);
114 }
115
116 unvplock[ix] |= UN_LOCKED;
117
118 return (0);
119 }
120
121 static void
122 union_list_unlock(ix)
123 int ix;
124 {
125
126 unvplock[ix] &= ~UN_LOCKED;
127
128 if (unvplock[ix] & UN_WANT) {
129 unvplock[ix] &= ~UN_WANT;
130 wakeup((caddr_t) &unvplock[ix]);
131 }
132 }
133
134 void
135 union_updatevp(un, uppervp, lowervp)
136 struct union_node *un;
137 struct vnode *uppervp;
138 struct vnode *lowervp;
139 {
140 int ohash = UNION_HASH(un->un_uppervp, un->un_lowervp);
141 int nhash = UNION_HASH(uppervp, lowervp);
142 int docache = (lowervp != NULLVP || uppervp != NULLVP);
143 int lhash, uhash;
144
145 /*
146 * Ensure locking is ordered from lower to higher
147 * to avoid deadlocks.
148 */
149 if (nhash < ohash) {
150 lhash = nhash;
151 uhash = ohash;
152 } else {
153 lhash = ohash;
154 uhash = nhash;
155 }
156
157 if (lhash != uhash)
158 while (union_list_lock(lhash))
159 continue;
160
161 while (union_list_lock(uhash))
162 continue;
163
164 if (ohash != nhash || !docache) {
165 if (un->un_flags & UN_CACHED) {
166 un->un_flags &= ~UN_CACHED;
167 LIST_REMOVE(un, un_cache);
168 }
169 }
170
171 if (ohash != nhash)
172 union_list_unlock(ohash);
173
174 if (un->un_lowervp != lowervp) {
175 if (un->un_lowervp) {
176 vnode_put(un->un_lowervp);
177 if (un->un_path) {
178 _FREE(un->un_path, M_TEMP);
179 un->un_path = 0;
180 }
181 if (un->un_dirvp) {
182 vnode_put(un->un_dirvp);
183 un->un_dirvp = NULLVP;
184 }
185 }
186 un->un_lowervp = lowervp;
187 un->un_lowersz = VNOVAL;
188 }
189
190 if (un->un_uppervp != uppervp) {
191 if (un->un_uppervp)
192 vnode_put(un->un_uppervp);
193
194 un->un_uppervp = uppervp;
195 un->un_uppersz = VNOVAL;
196 }
197
198 if (docache && (ohash != nhash)) {
199 LIST_INSERT_HEAD(&unhead[nhash], un, un_cache);
200 un->un_flags |= UN_CACHED;
201 }
202
203 union_list_unlock(nhash);
204 }
205
206 void
207 union_newlower(un, lowervp)
208 struct union_node *un;
209 struct vnode *lowervp;
210 {
211
212 union_updatevp(un, un->un_uppervp, lowervp);
213 }
214
215 void
216 union_newupper(un, uppervp)
217 struct union_node *un;
218 struct vnode *uppervp;
219 {
220
221 union_updatevp(un, uppervp, un->un_lowervp);
222 }
223
224 /*
225 * Keep track of size changes in the underlying vnodes.
226 * If the size changes, then callback to the vm layer
227 * giving priority to the upper layer size.
228 */
229 void
230 union_newsize(vp, uppersz, lowersz)
231 struct vnode *vp;
232 off_t uppersz, lowersz;
233 {
234 struct union_node *un;
235 off_t sz;
236
237 /* only interested in regular files */
238 if (vp->v_type != VREG)
239 return;
240
241 un = VTOUNION(vp);
242 sz = VNOVAL;
243
244 if ((uppersz != VNOVAL) && (un->un_uppersz != uppersz)) {
245 un->un_uppersz = uppersz;
246 if (sz == VNOVAL)
247 sz = un->un_uppersz;
248 }
249
250 if ((lowersz != VNOVAL) && (un->un_lowersz != lowersz)) {
251 un->un_lowersz = lowersz;
252 if (sz == VNOVAL)
253 sz = un->un_lowersz;
254 }
255
256 if (sz != VNOVAL) {
257 #ifdef UNION_DIAGNOSTIC
258 printf("union: %s size now %ld\n",
259 uppersz != VNOVAL ? "upper" : "lower", (long) sz);
260 #endif
261 ubc_setsize(vp, sz);
262 }
263 }
264
265 /*
266 * allocate a union_node/vnode pair. the vnode is
267 * referenced and locked. the new vnode is returned
268 * via (vpp). (mp) is the mountpoint of the union filesystem,
269 * (dvp) is the parent directory where the upper layer object
270 * should exist (but doesn't) and (cnp) is the componentname
271 * information which is partially copied to allow the upper
272 * layer object to be created at a later time. (uppervp)
273 * and (lowervp) reference the upper and lower layer objects
274 * being mapped. either, but not both, can be nil.
275 * if supplied, (uppervp) is locked.
276 * the reference is either maintained in the new union_node
277 * object which is allocated, or they are vnode_put'd.
278 *
279 * all union_nodes are maintained on a singly-linked
280 * list. new nodes are only allocated when they cannot
281 * be found on this list. entries on the list are
282 * removed when the vfs reclaim entry is called.
283 *
284 * a single lock is kept for the entire list. this is
285 * needed because the getnewvnode() function can block
286 * waiting for a vnode to become free, in which case there
287 * may be more than one process trying to get the same
288 * vnode. this lock is only taken if we are going to
289 * call getnewvnode, since the kernel itself is single-threaded.
290 *
291 * if an entry is found on the list, then call vnode_get() to
292 * take a reference. this is done because there may be
293 * zero references to it and so it needs to removed from
294 * the vnode free list.
295 */
296 int
297 union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp, docache)
298 struct vnode **vpp;
299 struct mount *mp;
300 struct vnode *undvp; /* parent union vnode */
301 struct vnode *dvp; /* may be null */
302 struct componentname *cnp; /* may be null */
303 struct vnode *uppervp; /* may be null */
304 struct vnode *lowervp; /* may be null */
305 int docache;
306 {
307 int error;
308 struct union_node *un;
309 struct union_node **pp;
310 struct vnode *xlowervp = NULLVP;
311 struct union_mount *um = MOUNTTOUNIONMOUNT(mp);
312 int hash;
313 int markroot;
314 int try;
315 struct union_node *unp;
316 struct vnode_fsparam vfsp;
317 enum vtype vtype;
318
319 if (uppervp == NULLVP && lowervp == NULLVP)
320 panic("union: unidentifiable allocation");
321
322 if (uppervp && lowervp && (uppervp->v_type != lowervp->v_type)) {
323 xlowervp = lowervp;
324 lowervp = NULLVP;
325 }
326
327 /* detect the root vnode (and aliases) */
328 markroot = 0;
329 if ((uppervp == um->um_uppervp) &&
330 ((lowervp == NULLVP) || lowervp == um->um_lowervp)) {
331 if (lowervp == NULLVP) {
332 lowervp = um->um_lowervp;
333 if (lowervp != NULLVP)
334 vnode_get(lowervp);
335 }
336 markroot = VROOT;
337 }
338
339 loop:
340 if (!docache) {
341 un = 0;
342 } else for (try = 0; try < 3; try++) {
343 switch (try) {
344 case 0:
345 if (lowervp == NULLVP)
346 continue;
347 hash = UNION_HASH(uppervp, lowervp);
348 break;
349
350 case 1:
351 if (uppervp == NULLVP)
352 continue;
353 hash = UNION_HASH(uppervp, NULLVP);
354 break;
355
356 case 2:
357 if (lowervp == NULLVP)
358 continue;
359 hash = UNION_HASH(NULLVP, lowervp);
360 break;
361 }
362
363 while (union_list_lock(hash))
364 continue;
365
366 for (un = unhead[hash].lh_first; un != 0;
367 un = un->un_cache.le_next) {
368 if ((un->un_lowervp == lowervp ||
369 un->un_lowervp == NULLVP) &&
370 (un->un_uppervp == uppervp ||
371 un->un_uppervp == NULLVP) &&
372 (UNIONTOV(un)->v_mount == mp)) {
373 if (vnode_get(UNIONTOV(un))) {
374 union_list_unlock(hash);
375 goto loop;
376 }
377 break;
378 }
379 }
380
381 union_list_unlock(hash);
382
383 if (un)
384 break;
385 }
386
387 if (un) {
388 /*
389 * Obtain a lock on the union_node.
390 * uppervp is locked, though un->un_uppervp
391 * may not be. this doesn't break the locking
392 * hierarchy since in the case that un->un_uppervp
393 * is not yet locked it will be vnode_put'd and replaced
394 * with uppervp.
395 */
396
397 if ((dvp != NULLVP) && (uppervp == dvp)) {
398 /*
399 * Access ``.'', so (un) will already
400 * be locked. Since this process has
401 * the lock on (uppervp) no other
402 * process can hold the lock on (un).
403 */
404 #if DIAGNOSTIC
405 if ((un->un_flags & UN_LOCKED) == 0)
406 panic("union: . not locked");
407 else if (current_proc() && un->un_pid != current_proc()->p_pid &&
408 un->un_pid > -1 && current_proc()->p_pid > -1)
409 panic("union: allocvp not lock owner");
410 #endif
411 } else {
412 if (un->un_flags & UN_LOCKED) {
413 vnode_put(UNIONTOV(un));
414 un->un_flags |= UN_WANT;
415 sleep((caddr_t) &un->un_flags, PINOD);
416 goto loop;
417 }
418 un->un_flags |= UN_LOCKED;
419
420 #if DIAGNOSTIC
421 if (current_proc())
422 un->un_pid = current_proc()->p_pid;
423 else
424 un->un_pid = -1;
425 #endif
426 }
427
428 /*
429 * At this point, the union_node is locked,
430 * un->un_uppervp may not be locked, and uppervp
431 * is locked or nil.
432 */
433
434 /*
435 * Save information about the upper layer.
436 */
437 if (uppervp != un->un_uppervp) {
438 union_newupper(un, uppervp);
439 } else if (uppervp) {
440 vnode_put(uppervp);
441 }
442
443 if (un->un_uppervp) {
444 un->un_flags |= UN_ULOCK;
445 un->un_flags &= ~UN_KLOCK;
446 }
447
448 /*
449 * Save information about the lower layer.
450 * This needs to keep track of pathname
451 * and directory information which union_vn_create
452 * might need.
453 */
454 if (lowervp != un->un_lowervp) {
455 union_newlower(un, lowervp);
456 if (cnp && (lowervp != NULLVP)) {
457 un->un_hash = cnp->cn_hash;
458 MALLOC(un->un_path, caddr_t, cnp->cn_namelen+1,
459 M_TEMP, M_WAITOK);
460 bcopy(cnp->cn_nameptr, un->un_path,
461 cnp->cn_namelen);
462 un->un_path[cnp->cn_namelen] = '\0';
463 vnode_get(dvp);
464 un->un_dirvp = dvp;
465 }
466 } else if (lowervp) {
467 vnode_put(lowervp);
468 }
469 *vpp = UNIONTOV(un);
470 return (0);
471 }
472
473 if (docache) {
474 /*
475 * otherwise lock the vp list while we call getnewvnode
476 * since that can block.
477 */
478 hash = UNION_HASH(uppervp, lowervp);
479
480 if (union_list_lock(hash))
481 goto loop;
482 }
483
484 MALLOC(unp, void *, sizeof(struct union_node), M_TEMP, M_WAITOK);
485
486 if (uppervp)
487 vtype = uppervp->v_type;
488 else
489 vtype = lowervp->v_type;
490 //bzero(&vfsp, sizeof(struct vnode_fsparam));
491 vfsp.vnfs_mp = mp;
492 vfsp.vnfs_vtype = vtype;
493 vfsp.vnfs_str = "unionfs";
494 vfsp.vnfs_dvp = dvp;
495 vfsp.vnfs_fsnode = unp;
496 vfsp.vnfs_cnp = cnp;
497 vfsp.vnfs_vops = union_vnodeop_p;
498 vfsp.vnfs_rdev = 0;
499 vfsp.vnfs_filesize = 0;
500 vfsp.vnfs_flags = VNFS_NOCACHE | VNFS_CANTCACHE;
501 vfsp.vnfs_marksystem = 0;
502 vfsp.vnfs_markroot = markroot;
503
504 error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, vpp);
505 if (error) {
506 FREE(unp, M_TEMP);
507 if (uppervp) {
508 vnode_put(uppervp);
509 }
510 if (lowervp)
511 vnode_put(lowervp);
512
513 goto out;
514 }
515
516 (*vpp)->v_tag = VT_UNION;
517 un = VTOUNION(*vpp);
518 un->un_vnode = *vpp;
519 un->un_uppervp = uppervp;
520 un->un_uppersz = VNOVAL;
521 un->un_lowervp = lowervp;
522 un->un_lowersz = VNOVAL;
523 un->un_pvp = undvp;
524 if (undvp != NULLVP)
525 vnode_get(undvp);
526 un->un_dircache = 0;
527 un->un_openl = 0;
528 un->un_flags = UN_LOCKED;
529 if (un->un_uppervp)
530 un->un_flags |= UN_ULOCK;
531 #if DIAGNOSTIC
532 if (current_proc())
533 un->un_pid = current_proc()->p_pid;
534 else
535 un->un_pid = -1;
536 #endif
537 if (cnp && (lowervp != NULLVP)) {
538 un->un_hash = cnp->cn_hash;
539 un->un_path = _MALLOC(cnp->cn_namelen+1, M_TEMP, M_WAITOK);
540 bcopy(cnp->cn_nameptr, un->un_path, cnp->cn_namelen);
541 un->un_path[cnp->cn_namelen] = '\0';
542 vnode_get(dvp);
543 un->un_dirvp = dvp;
544 } else {
545 un->un_hash = 0;
546 un->un_path = 0;
547 un->un_dirvp = 0;
548 }
549
550 if (docache) {
551 LIST_INSERT_HEAD(&unhead[hash], un, un_cache);
552 un->un_flags |= UN_CACHED;
553 }
554
555 if (xlowervp)
556 vnode_put(xlowervp);
557
558 out:
559 if (docache)
560 union_list_unlock(hash);
561
562 return (error);
563 }
564
565 int
566 union_freevp(vp)
567 struct vnode *vp;
568 {
569 struct union_node *un = VTOUNION(vp);
570
571 if (un->un_flags & UN_CACHED) {
572 un->un_flags &= ~UN_CACHED;
573 LIST_REMOVE(un, un_cache);
574 }
575
576 if (un->un_pvp != NULLVP)
577 vnode_put(un->un_pvp);
578 if (un->un_uppervp != NULLVP)
579 vnode_put(un->un_uppervp);
580 if (un->un_lowervp != NULLVP)
581 vnode_put(un->un_lowervp);
582 if (un->un_dirvp != NULLVP)
583 vnode_put(un->un_dirvp);
584 if (un->un_path)
585 _FREE(un->un_path, M_TEMP);
586
587 FREE(vp->v_data, M_TEMP);
588 vp->v_data = 0;
589
590 return (0);
591 }
592
593 /*
594 * copyfile. copy the vnode (fvp) to the vnode (tvp)
595 * using a sequence of reads and writes. both (fvp)
596 * and (tvp) are locked on entry and exit.
597 */
598 int
599 union_copyfile(struct vnode *fvp, struct vnode *tvp, kauth_cred_t cred,
600 struct proc *p)
601 {
602 char *bufp;
603 struct uio uio;
604 struct iovec_32 iov;
605 struct vfs_context context;
606 int error = 0;
607
608 /*
609 * strategy:
610 * allocate a buffer of size MAXPHYSIO.
611 * loop doing reads and writes, keeping track
612 * of the current uio offset.
613 * give up at the first sign of trouble.
614 */
615
616 context.vc_proc = p;
617 context.vc_ucred = cred;
618
619 #if 1 /* LP64todo - can't use new segment flags until the drivers are ready */
620 uio.uio_segflg = UIO_SYSSPACE;
621 #else
622 uio.uio_segflg = UIO_SYSSPACE32;
623 #endif
624 uio.uio_offset = 0;
625
626 bufp = _MALLOC(MAXPHYSIO, M_TEMP, M_WAITOK);
627
628 /* ugly loop follows... */
629 do {
630 off_t offset = uio.uio_offset;
631
632 uio.uio_iovs.iov32p = &iov;
633 uio.uio_iovcnt = 1;
634 iov.iov_base = (uintptr_t)bufp;
635 iov.iov_len = MAXPHYSIO;
636 uio_setresid(&uio, iov.iov_len);
637 uio.uio_rw = UIO_READ;
638 error = VNOP_READ(fvp, &uio, 0, &context);
639
640 if (error == 0) {
641 uio.uio_iovs.iov32p = &iov;
642 uio.uio_iovcnt = 1;
643 iov.iov_base = (uintptr_t)bufp;
644 iov.iov_len = MAXPHYSIO - uio_resid(&uio);
645 uio.uio_offset = offset;
646 uio.uio_rw = UIO_WRITE;
647 uio_setresid(&uio, iov.iov_len);
648
649 if (uio_resid(&uio) == 0)
650 break;
651
652 do {
653 error = VNOP_WRITE(tvp, &uio, 0, &context);
654 } while ((uio_resid(&uio) > 0) && (error == 0));
655 }
656
657 } while (error == 0);
658
659 _FREE(bufp, M_TEMP);
660 return (error);
661 }
662
663 /*
664 * (un) is assumed to be locked on entry and remains
665 * locked on exit.
666 */
667 int
668 union_copyup(struct union_node *un, int docopy, kauth_cred_t cred,
669 struct proc *p)
670 {
671 int error;
672 struct vnode *lvp, *uvp;
673 struct vfs_context context;
674
675 error = union_vn_create(&uvp, un, p);
676 if (error)
677 return (error);
678
679 context.vc_proc = p;
680 context.vc_ucred = cred;
681
682 /* at this point, uppervp is locked */
683 union_newupper(un, uvp);
684 un->un_flags |= UN_ULOCK;
685
686 lvp = un->un_lowervp;
687
688 if (docopy) {
689 /*
690 * XX - should not ignore errors
691 * from vnop_close
692 */
693 error = VNOP_OPEN(lvp, FREAD, &context);
694 if (error == 0) {
695 error = union_copyfile(lvp, uvp, cred, p);
696 (void) VNOP_CLOSE(lvp, FREAD, &context);
697 }
698 #ifdef UNION_DIAGNOSTIC
699 if (error == 0)
700 uprintf("union: copied up %s\n", un->un_path);
701 #endif
702
703 }
704 un->un_flags &= ~UN_ULOCK;
705 union_vn_close(uvp, FWRITE, cred, p);
706 un->un_flags |= UN_ULOCK;
707
708 /*
709 * Subsequent IOs will go to the top layer, so
710 * call close on the lower vnode and open on the
711 * upper vnode to ensure that the filesystem keeps
712 * its references counts right. This doesn't do
713 * the right thing with (cred) and (FREAD) though.
714 * Ignoring error returns is not right, either.
715 */
716 if (error == 0) {
717 int i;
718
719 for (i = 0; i < un->un_openl; i++) {
720 (void) VNOP_CLOSE(lvp, FREAD, &context);
721 (void) VNOP_OPEN(uvp, FREAD, &context);
722 }
723 un->un_openl = 0;
724 }
725
726 return (error);
727
728 }
729
730 static int
731 union_relookup(um, dvp, vpp, cnp, cn, path, pathlen)
732 struct union_mount *um;
733 struct vnode *dvp;
734 struct vnode **vpp;
735 struct componentname *cnp;
736 struct componentname *cn;
737 char *path;
738 int pathlen;
739 {
740 int error;
741
742 /*
743 * A new componentname structure must be faked up because
744 * there is no way to know where the upper level cnp came
745 * from or what it is being used for. This must duplicate
746 * some of the work done by NDINIT, some of the work done
747 * by namei, some of the work done by lookup and some of
748 * the work done by vnop_lookup when given a CREATE flag.
749 * Conclusion: Horrible.
750 */
751 cn->cn_namelen = pathlen;
752 cn->cn_pnbuf = _MALLOC_ZONE(cn->cn_namelen+1, M_NAMEI, M_WAITOK);
753 cn->cn_pnlen = cn->cn_namelen+1;
754 bcopy(path, cn->cn_pnbuf, cn->cn_namelen);
755 cn->cn_pnbuf[cn->cn_namelen] = '\0';
756
757 cn->cn_nameiop = CREATE;
758 cn->cn_flags = (LOCKPARENT|HASBUF|SAVENAME|SAVESTART|ISLASTCN);
759 #ifdef XXX_HELP_ME
760 cn->cn_proc = cnp->cn_proc;
761 if (um->um_op == UNMNT_ABOVE)
762 cn->cn_cred = cnp->cn_cred;
763 else
764 cn->cn_cred = um->um_cred;
765 #endif
766 cn->cn_context = cnp->cn_context; /* XXX !UNMNT_ABOVE case ??? */
767 cn->cn_nameptr = cn->cn_pnbuf;
768 cn->cn_hash = cnp->cn_hash;
769 cn->cn_consume = cnp->cn_consume;
770
771 vnode_get(dvp);
772 error = relookup(dvp, vpp, cn);
773 if (!error)
774 vnode_put(dvp);
775
776 return (error);
777 }
778
779 /*
780 * Create a shadow directory in the upper layer.
781 * The new vnode is returned locked.
782 *
783 * (um) points to the union mount structure for access to the
784 * the mounting process's credentials.
785 * (dvp) is the directory in which to create the shadow directory.
786 * it is unlocked on entry and exit.
787 * (cnp) is the componentname to be created.
788 * (vpp) is the returned newly created shadow directory, which
789 * is returned locked.
790 */
791 int
792 union_mkshadow(um, dvp, cnp, vpp)
793 struct union_mount *um;
794 struct vnode *dvp;
795 struct componentname *cnp;
796 struct vnode **vpp;
797 {
798 int error;
799 struct vnode_attr va;
800 struct componentname cn;
801
802 error = union_relookup(um, dvp, vpp, cnp, &cn,
803 cnp->cn_nameptr, cnp->cn_namelen);
804 if (error)
805 return (error);
806
807 if (*vpp) {
808 vnode_put(*vpp);
809 *vpp = NULLVP;
810 return (EEXIST);
811 }
812
813 /*
814 * policy: when creating the shadow directory in the
815 * upper layer, create it owned by the user who did
816 * the mount, group from parent directory, and mode
817 * 777 modified by umask (ie mostly identical to the
818 * mkdir syscall). (jsp, kb)
819 */
820 VATTR_INIT(&va);
821 VATTR_SET(&va, va_type, VDIR);
822 VATTR_SET(&va, va_mode, um->um_cmode);
823
824 error = vn_create(dvp, vpp, &cn, &va, 0, cnp->cn_context);
825 return (error);
826 }
827
828 /*
829 * Create a whiteout entry in the upper layer.
830 *
831 * (um) points to the union mount structure for access to the
832 * the mounting process's credentials.
833 * (dvp) is the directory in which to create the whiteout.
834 * it is locked on entry and exit.
835 * (cnp) is the componentname to be created.
836 */
837 int
838 union_mkwhiteout(um, dvp, cnp, path)
839 struct union_mount *um;
840 struct vnode *dvp;
841 struct componentname *cnp;
842 char *path;
843 {
844 int error;
845 struct vnode *wvp;
846 struct componentname cn;
847
848 error = union_relookup(um, dvp, &wvp, cnp, &cn, path, strlen(path));
849 if (error) {
850 return (error);
851 }
852 if (wvp) {
853 vnode_put(dvp);
854 vnode_put(wvp);
855 return (EEXIST);
856 }
857
858 error = VNOP_WHITEOUT(dvp, &cn, CREATE, cnp->cn_context);
859
860 vnode_put(dvp);
861
862 return (error);
863 }
864
865 /*
866 * union_vn_create: creates and opens a new shadow file
867 * on the upper union layer. this function is similar
868 * in spirit to calling vn_open but it avoids calling namei().
869 * the problem with calling namei is that a) it locks too many
870 * things, and b) it doesn't start at the "right" directory,
871 * whereas relookup is told where to start.
872 */
873 int
874 union_vn_create(vpp, un, p)
875 struct vnode **vpp;
876 struct union_node *un;
877 struct proc *p;
878 {
879 struct vnode *vp;
880 kauth_cred_t cred = p->p_ucred;
881 struct vnode_attr vat;
882 struct vnode_attr *vap = &vat;
883 struct vfs_context context;
884 int fmode = FFLAGS(O_WRONLY|O_CREAT|O_TRUNC|O_EXCL);
885 int error;
886 int cmode = UN_FILEMODE & ~p->p_fd->fd_cmask;
887 char *cp;
888 struct componentname cn;
889
890 *vpp = NULLVP;
891
892 context.vc_proc = p;
893 context.vc_ucred = p->p_ucred;
894
895 /*
896 * Build a new componentname structure (for the same
897 * reasons outlines in union_mkshadow).
898 * The difference here is that the file is owned by
899 * the current user, rather than by the person who
900 * did the mount, since the current user needs to be
901 * able to write the file (that's why it is being
902 * copied in the first place).
903 */
904 cn.cn_namelen = strlen(un->un_path);
905 cn.cn_pnbuf = (caddr_t) _MALLOC_ZONE(cn.cn_namelen+1,
906 M_NAMEI, M_WAITOK);
907 cn.cn_pnlen = cn.cn_namelen+1;
908 bcopy(un->un_path, cn.cn_pnbuf, cn.cn_namelen+1);
909 cn.cn_nameiop = CREATE;
910 cn.cn_flags = (LOCKPARENT|HASBUF|SAVENAME|SAVESTART|ISLASTCN);
911 cn.cn_context = &context;
912 cn.cn_nameptr = cn.cn_pnbuf;
913 cn.cn_hash = un->un_hash;
914 cn.cn_consume = 0;
915
916 vnode_get(un->un_dirvp);
917 if (error = relookup(un->un_dirvp, &vp, &cn))
918 return (error);
919 vnode_put(un->un_dirvp);
920
921 if (vp) {
922 vnode_put(un->un_dirvp);
923 vnode_put(vp);
924 return (EEXIST);
925 }
926
927 /*
928 * Good - there was no race to create the file
929 * so go ahead and create it. The permissions
930 * on the file will be 0666 modified by the
931 * current user's umask. Access to the file, while
932 * it is unioned, will require access to the top *and*
933 * bottom files. Access when not unioned will simply
934 * require access to the top-level file.
935 *
936 * TODO: confirm choice of access permissions.
937 * decide on authorisation behaviour
938 */
939
940 VATTR_INIT(vap);
941 VATTR_SET(vap, va_type, VREG);
942 VATTR_SET(vap, va_mode, cmode);
943
944 if (error = vn_create(un->un_dirvp, &vp, &cn, vap, 0, &context))
945 return (error);
946
947 if (error = VNOP_OPEN(vp, fmode, &context)) {
948 vnode_put(vp);
949 return (error);
950 }
951
952 vnode_lock(vp);
953 if (++vp->v_writecount <= 0)
954 panic("union: v_writecount");
955 vnode_unlock(vp);
956 *vpp = vp;
957 return (0);
958 }
959
960 int
961 union_vn_close(struct vnode *vp, int fmode, kauth_cred_t cred,
962 struct proc *p)
963 {
964 struct vfs_context context;
965
966 context.vc_proc = p;
967 context.vc_ucred = cred;
968
969 if (fmode & FWRITE) {
970 vnode_lock(vp);
971 --vp->v_writecount;
972 vnode_unlock(vp);
973 }
974 return (VNOP_CLOSE(vp, fmode, &context));
975 }
976
977 void
978 union_removed_upper(un)
979 struct union_node *un;
980 {
981 struct proc *p = current_proc(); /* XXX */
982
983 union_newupper(un, NULLVP);
984 if (un->un_flags & UN_CACHED) {
985 un->un_flags &= ~UN_CACHED;
986 LIST_REMOVE(un, un_cache);
987 }
988
989 if (un->un_flags & UN_ULOCK) {
990 un->un_flags &= ~UN_ULOCK;
991 }
992 }
993
994 #if 0
995 struct vnode *
996 union_lowervp(vp)
997 struct vnode *vp;
998 {
999 struct union_node *un = VTOUNION(vp);
1000
1001 if ((un->un_lowervp != NULLVP) &&
1002 (vp->v_type == un->un_lowervp->v_type)) {
1003 if (vnode_get(un->un_lowervp) == 0)
1004 return (un->un_lowervp);
1005 }
1006
1007 return (NULLVP);
1008 }
1009 #endif
1010
1011 /*
1012 * determine whether a whiteout is needed
1013 * during a remove/rmdir operation.
1014 */
1015 int
1016 union_dowhiteout(struct union_node *un, vfs_context_t ctx)
1017 {
1018 struct vnode_attr va;
1019
1020 if (un->un_lowervp != NULLVP)
1021 return (1);
1022
1023 VATTR_INIT(&va);
1024 VATTR_WANTED(&va, va_flags);
1025 if (vnode_getattr(un->un_uppervp, &va, ctx) == 0 &&
1026 (va.va_flags & OPAQUE))
1027 return (1);
1028
1029 return (0);
1030 }
1031
1032 static void
1033 union_dircache_r(vp, vppp, cntp)
1034 struct vnode *vp;
1035 struct vnode ***vppp;
1036 int *cntp;
1037 {
1038 struct union_node *un;
1039
1040 if (vp->v_op != union_vnodeop_p) {
1041 if (vppp) {
1042 vnode_get(vp);
1043 *(*vppp)++ = vp;
1044 if (--(*cntp) == 0)
1045 panic("union: dircache table too small");
1046 } else {
1047 (*cntp)++;
1048 }
1049
1050 return;
1051 }
1052
1053 un = VTOUNION(vp);
1054 if (un->un_uppervp != NULLVP)
1055 union_dircache_r(un->un_uppervp, vppp, cntp);
1056 if (un->un_lowervp != NULLVP)
1057 union_dircache_r(un->un_lowervp, vppp, cntp);
1058 }
1059
1060 struct vnode *
1061 union_dircache(vp, p)
1062 struct vnode *vp;
1063 struct proc *p;
1064 {
1065 int count;
1066 struct vnode *nvp;
1067 struct vnode **vpp;
1068 struct vnode **dircache;
1069 struct union_node *un;
1070 int error;
1071
1072 dircache = VTOUNION(vp)->un_dircache;
1073
1074 nvp = NULLVP;
1075
1076 if (dircache == 0) {
1077 count = 0;
1078 union_dircache_r(vp, 0, &count);
1079 count++;
1080 dircache = (struct vnode **)
1081 _MALLOC(count * sizeof(struct vnode *),
1082 M_TEMP, M_WAITOK);
1083 vpp = dircache;
1084 union_dircache_r(vp, &vpp, &count);
1085 *vpp = NULLVP;
1086 vpp = dircache + 1;
1087 } else {
1088 vpp = dircache;
1089 do {
1090 if (*vpp++ == VTOUNION(vp)->un_uppervp)
1091 break;
1092 } while (*vpp != NULLVP);
1093 }
1094
1095 if (*vpp == NULLVP)
1096 goto out;
1097
1098 vnode_get(*vpp);
1099 error = union_allocvp(&nvp, vp->v_mount, NULLVP, NULLVP, 0, *vpp, NULLVP, 0);
1100 if (error)
1101 goto out;
1102
1103 VTOUNION(vp)->un_dircache = 0;
1104 un = VTOUNION(nvp);
1105 un->un_dircache = dircache;
1106
1107 out:
1108 return (nvp);
1109 }