]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
de355530 A |
6 | * The contents of this file constitute Original Code as defined in and |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
1c79356b | 11 | * |
de355530 A |
12 | * This Original Code and all software distributed under the License are |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
de355530 A |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
1c79356b A |
19 | * |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ | |
23 | /* | |
24 | * Copyright (c) 1994 Jan-Simon Pendry | |
25 | * Copyright (c) 1994 | |
26 | * The Regents of the University of California. All rights reserved. | |
27 | * | |
28 | * This code is derived from software contributed to Berkeley by | |
29 | * Jan-Simon Pendry. | |
30 | * | |
31 | * Redistribution and use in source and binary forms, with or without | |
32 | * modification, are permitted provided that the following conditions | |
33 | * are met: | |
34 | * 1. Redistributions of source code must retain the above copyright | |
35 | * notice, this list of conditions and the following disclaimer. | |
36 | * 2. Redistributions in binary form must reproduce the above copyright | |
37 | * notice, this list of conditions and the following disclaimer in the | |
38 | * documentation and/or other materials provided with the distribution. | |
39 | * 3. All advertising materials mentioning features or use of this software | |
40 | * must display the following acknowledgement: | |
41 | * This product includes software developed by the University of | |
42 | * California, Berkeley and its contributors. | |
43 | * 4. Neither the name of the University nor the names of its contributors | |
44 | * may be used to endorse or promote products derived from this software | |
45 | * without specific prior written permission. | |
46 | * | |
47 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
48 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
49 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
50 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
51 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
52 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
53 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
54 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
55 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
56 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
57 | * SUCH DAMAGE. | |
58 | * | |
59 | * @(#)union_subr.c 8.20 (Berkeley) 5/20/95 | |
60 | */ | |
61 | ||
62 | #include <sys/param.h> | |
63 | #include <sys/systm.h> | |
64 | #include <sys/proc.h> | |
65 | #include <sys/time.h> | |
66 | #include <sys/kernel.h> | |
67 | #include <sys/vnode.h> | |
68 | #include <sys/namei.h> | |
69 | #include <sys/malloc.h> | |
70 | #include <sys/file.h> | |
71 | #include <sys/filedesc.h> | |
72 | #include <sys/queue.h> | |
73 | #include <sys/mount.h> | |
74 | #include <sys/stat.h> | |
75 | #include <sys/ubc.h> | |
76 | #include <miscfs/union/union.h> | |
77 | ||
78 | #if DIAGNOSTIC | |
79 | #include <sys/proc.h> | |
80 | #endif | |
81 | ||
82 | /* must be power of two, otherwise change UNION_HASH() */ | |
83 | #define NHASH 32 | |
84 | ||
85 | /* unsigned int ... */ | |
86 | #define UNION_HASH(u, l) \ | |
87 | (((((unsigned long) (u)) + ((unsigned long) l)) >> 8) & (NHASH-1)) | |
88 | ||
89 | static LIST_HEAD(unhead, union_node) unhead[NHASH]; | |
90 | static int unvplock[NHASH]; | |
91 | ||
92 | int | |
93 | union_init() | |
94 | { | |
95 | int i; | |
96 | ||
97 | for (i = 0; i < NHASH; i++) | |
98 | LIST_INIT(&unhead[i]); | |
99 | bzero((caddr_t) unvplock, sizeof(unvplock)); | |
100 | } | |
101 | ||
102 | static int | |
103 | union_list_lock(ix) | |
104 | int ix; | |
105 | { | |
106 | ||
107 | if (unvplock[ix] & UN_LOCKED) { | |
108 | unvplock[ix] |= UN_WANT; | |
109 | sleep((caddr_t) &unvplock[ix], PINOD); | |
110 | return (1); | |
111 | } | |
112 | ||
113 | unvplock[ix] |= UN_LOCKED; | |
114 | ||
115 | return (0); | |
116 | } | |
117 | ||
118 | static void | |
119 | union_list_unlock(ix) | |
120 | int ix; | |
121 | { | |
122 | ||
123 | unvplock[ix] &= ~UN_LOCKED; | |
124 | ||
125 | if (unvplock[ix] & UN_WANT) { | |
126 | unvplock[ix] &= ~UN_WANT; | |
127 | wakeup((caddr_t) &unvplock[ix]); | |
128 | } | |
129 | } | |
130 | ||
131 | void | |
132 | union_updatevp(un, uppervp, lowervp) | |
133 | struct union_node *un; | |
134 | struct vnode *uppervp; | |
135 | struct vnode *lowervp; | |
136 | { | |
137 | int ohash = UNION_HASH(un->un_uppervp, un->un_lowervp); | |
138 | int nhash = UNION_HASH(uppervp, lowervp); | |
139 | int docache = (lowervp != NULLVP || uppervp != NULLVP); | |
140 | int lhash, hhash, uhash; | |
141 | ||
142 | /* | |
143 | * Ensure locking is ordered from lower to higher | |
144 | * to avoid deadlocks. | |
145 | */ | |
146 | if (nhash < ohash) { | |
147 | lhash = nhash; | |
148 | uhash = ohash; | |
149 | } else { | |
150 | lhash = ohash; | |
151 | uhash = nhash; | |
152 | } | |
153 | ||
154 | if (lhash != uhash) | |
155 | while (union_list_lock(lhash)) | |
156 | continue; | |
157 | ||
158 | while (union_list_lock(uhash)) | |
159 | continue; | |
160 | ||
161 | if (ohash != nhash || !docache) { | |
162 | if (un->un_flags & UN_CACHED) { | |
163 | un->un_flags &= ~UN_CACHED; | |
164 | LIST_REMOVE(un, un_cache); | |
165 | } | |
166 | } | |
167 | ||
168 | if (ohash != nhash) | |
169 | union_list_unlock(ohash); | |
170 | ||
171 | if (un->un_lowervp != lowervp) { | |
172 | if (un->un_lowervp) { | |
173 | vrele(un->un_lowervp); | |
174 | if (un->un_path) { | |
175 | _FREE(un->un_path, M_TEMP); | |
176 | un->un_path = 0; | |
177 | } | |
178 | if (un->un_dirvp) { | |
179 | vrele(un->un_dirvp); | |
180 | un->un_dirvp = NULLVP; | |
181 | } | |
182 | } | |
183 | un->un_lowervp = lowervp; | |
184 | un->un_lowersz = VNOVAL; | |
185 | } | |
186 | ||
187 | if (un->un_uppervp != uppervp) { | |
188 | if (un->un_uppervp) | |
189 | vrele(un->un_uppervp); | |
190 | ||
191 | un->un_uppervp = uppervp; | |
192 | un->un_uppersz = VNOVAL; | |
193 | } | |
194 | ||
195 | if (docache && (ohash != nhash)) { | |
196 | LIST_INSERT_HEAD(&unhead[nhash], un, un_cache); | |
197 | un->un_flags |= UN_CACHED; | |
198 | } | |
199 | ||
200 | union_list_unlock(nhash); | |
201 | } | |
202 | ||
203 | void | |
204 | union_newlower(un, lowervp) | |
205 | struct union_node *un; | |
206 | struct vnode *lowervp; | |
207 | { | |
208 | ||
209 | union_updatevp(un, un->un_uppervp, lowervp); | |
210 | } | |
211 | ||
212 | void | |
213 | union_newupper(un, uppervp) | |
214 | struct union_node *un; | |
215 | struct vnode *uppervp; | |
216 | { | |
217 | ||
218 | union_updatevp(un, uppervp, un->un_lowervp); | |
219 | } | |
220 | ||
221 | /* | |
222 | * Keep track of size changes in the underlying vnodes. | |
223 | * If the size changes, then callback to the vm layer | |
224 | * giving priority to the upper layer size. | |
225 | */ | |
226 | void | |
227 | union_newsize(vp, uppersz, lowersz) | |
228 | struct vnode *vp; | |
229 | off_t uppersz, lowersz; | |
230 | { | |
231 | struct union_node *un; | |
232 | off_t sz; | |
233 | ||
234 | /* only interested in regular files */ | |
235 | if (vp->v_type != VREG) | |
236 | return; | |
237 | ||
238 | un = VTOUNION(vp); | |
239 | sz = VNOVAL; | |
240 | ||
241 | if ((uppersz != VNOVAL) && (un->un_uppersz != uppersz)) { | |
242 | un->un_uppersz = uppersz; | |
243 | if (sz == VNOVAL) | |
244 | sz = un->un_uppersz; | |
245 | } | |
246 | ||
247 | if ((lowersz != VNOVAL) && (un->un_lowersz != lowersz)) { | |
248 | un->un_lowersz = lowersz; | |
249 | if (sz == VNOVAL) | |
250 | sz = un->un_lowersz; | |
251 | } | |
252 | ||
253 | if (sz != VNOVAL) { | |
254 | #ifdef UNION_DIAGNOSTIC | |
255 | printf("union: %s size now %ld\n", | |
256 | uppersz != VNOVAL ? "upper" : "lower", (long) sz); | |
257 | #endif | |
258 | if (UBCISVALID(vp)) | |
259 | ubc_setsize(vp, sz); /* XXX check error */ | |
260 | } | |
261 | } | |
262 | ||
263 | /* | |
264 | * allocate a union_node/vnode pair. the vnode is | |
265 | * referenced and locked. the new vnode is returned | |
266 | * via (vpp). (mp) is the mountpoint of the union filesystem, | |
267 | * (dvp) is the parent directory where the upper layer object | |
268 | * should exist (but doesn't) and (cnp) is the componentname | |
269 | * information which is partially copied to allow the upper | |
270 | * layer object to be created at a later time. (uppervp) | |
271 | * and (lowervp) reference the upper and lower layer objects | |
272 | * being mapped. either, but not both, can be nil. | |
273 | * if supplied, (uppervp) is locked. | |
274 | * the reference is either maintained in the new union_node | |
275 | * object which is allocated, or they are vrele'd. | |
276 | * | |
277 | * all union_nodes are maintained on a singly-linked | |
278 | * list. new nodes are only allocated when they cannot | |
279 | * be found on this list. entries on the list are | |
280 | * removed when the vfs reclaim entry is called. | |
281 | * | |
282 | * a single lock is kept for the entire list. this is | |
283 | * needed because the getnewvnode() function can block | |
284 | * waiting for a vnode to become free, in which case there | |
285 | * may be more than one process trying to get the same | |
286 | * vnode. this lock is only taken if we are going to | |
287 | * call getnewvnode, since the kernel itself is single-threaded. | |
288 | * | |
289 | * if an entry is found on the list, then call vget() to | |
290 | * take a reference. this is done because there may be | |
291 | * zero references to it and so it needs to removed from | |
292 | * the vnode free list. | |
293 | */ | |
294 | int | |
295 | union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp, docache) | |
296 | struct vnode **vpp; | |
297 | struct mount *mp; | |
298 | struct vnode *undvp; /* parent union vnode */ | |
299 | struct vnode *dvp; /* may be null */ | |
300 | struct componentname *cnp; /* may be null */ | |
301 | struct vnode *uppervp; /* may be null */ | |
302 | struct vnode *lowervp; /* may be null */ | |
303 | int docache; | |
304 | { | |
305 | int error; | |
306 | struct union_node *un; | |
307 | struct union_node **pp; | |
308 | struct vnode *xlowervp = NULLVP; | |
309 | struct union_mount *um = MOUNTTOUNIONMOUNT(mp); | |
310 | int hash; | |
311 | int vflag; | |
312 | int try; | |
313 | struct union_node *unp; | |
314 | ||
315 | if (uppervp == NULLVP && lowervp == NULLVP) | |
316 | panic("union: unidentifiable allocation"); | |
317 | ||
318 | if (uppervp && lowervp && (uppervp->v_type != lowervp->v_type)) { | |
319 | xlowervp = lowervp; | |
320 | lowervp = NULLVP; | |
321 | } | |
322 | ||
323 | /* detect the root vnode (and aliases) */ | |
324 | vflag = 0; | |
325 | if ((uppervp == um->um_uppervp) && | |
326 | ((lowervp == NULLVP) || lowervp == um->um_lowervp)) { | |
327 | if (lowervp == NULLVP) { | |
328 | lowervp = um->um_lowervp; | |
329 | if (lowervp != NULLVP) | |
330 | VREF(lowervp); | |
331 | } | |
332 | vflag = VROOT; | |
333 | } | |
334 | ||
335 | loop: | |
336 | if (!docache) { | |
337 | un = 0; | |
338 | } else for (try = 0; try < 3; try++) { | |
339 | switch (try) { | |
340 | case 0: | |
341 | if (lowervp == NULLVP) | |
342 | continue; | |
343 | hash = UNION_HASH(uppervp, lowervp); | |
344 | break; | |
345 | ||
346 | case 1: | |
347 | if (uppervp == NULLVP) | |
348 | continue; | |
349 | hash = UNION_HASH(uppervp, NULLVP); | |
350 | break; | |
351 | ||
352 | case 2: | |
353 | if (lowervp == NULLVP) | |
354 | continue; | |
355 | hash = UNION_HASH(NULLVP, lowervp); | |
356 | break; | |
357 | } | |
358 | ||
359 | while (union_list_lock(hash)) | |
360 | continue; | |
361 | ||
362 | for (un = unhead[hash].lh_first; un != 0; | |
363 | un = un->un_cache.le_next) { | |
364 | if ((un->un_lowervp == lowervp || | |
365 | un->un_lowervp == NULLVP) && | |
366 | (un->un_uppervp == uppervp || | |
367 | un->un_uppervp == NULLVP) && | |
368 | (UNIONTOV(un)->v_mount == mp)) { | |
369 | if (vget(UNIONTOV(un), 0, | |
370 | cnp ? cnp->cn_proc : NULL)) { | |
371 | union_list_unlock(hash); | |
372 | goto loop; | |
373 | } | |
374 | break; | |
375 | } | |
376 | } | |
377 | ||
378 | union_list_unlock(hash); | |
379 | ||
380 | if (un) | |
381 | break; | |
382 | } | |
383 | ||
384 | if (un) { | |
385 | /* | |
386 | * Obtain a lock on the union_node. | |
387 | * uppervp is locked, though un->un_uppervp | |
388 | * may not be. this doesn't break the locking | |
389 | * hierarchy since in the case that un->un_uppervp | |
390 | * is not yet locked it will be vrele'd and replaced | |
391 | * with uppervp. | |
392 | */ | |
393 | ||
394 | if ((dvp != NULLVP) && (uppervp == dvp)) { | |
395 | /* | |
396 | * Access ``.'', so (un) will already | |
397 | * be locked. Since this process has | |
398 | * the lock on (uppervp) no other | |
399 | * process can hold the lock on (un). | |
400 | */ | |
401 | #if DIAGNOSTIC | |
402 | if ((un->un_flags & UN_LOCKED) == 0) | |
403 | panic("union: . not locked"); | |
404 | else if (current_proc() && un->un_pid != current_proc()->p_pid && | |
405 | un->un_pid > -1 && current_proc()->p_pid > -1) | |
406 | panic("union: allocvp not lock owner"); | |
407 | #endif | |
408 | } else { | |
409 | if (un->un_flags & UN_LOCKED) { | |
410 | vrele(UNIONTOV(un)); | |
411 | un->un_flags |= UN_WANT; | |
412 | sleep((caddr_t) &un->un_flags, PINOD); | |
413 | goto loop; | |
414 | } | |
415 | un->un_flags |= UN_LOCKED; | |
416 | ||
417 | #if DIAGNOSTIC | |
418 | if (current_proc()) | |
419 | un->un_pid = current_proc()->p_pid; | |
420 | else | |
421 | un->un_pid = -1; | |
422 | #endif | |
423 | } | |
424 | ||
425 | /* | |
426 | * At this point, the union_node is locked, | |
427 | * un->un_uppervp may not be locked, and uppervp | |
428 | * is locked or nil. | |
429 | */ | |
430 | ||
431 | /* | |
432 | * Save information about the upper layer. | |
433 | */ | |
434 | if (uppervp != un->un_uppervp) { | |
435 | union_newupper(un, uppervp); | |
436 | } else if (uppervp) { | |
437 | vrele(uppervp); | |
438 | } | |
439 | ||
440 | if (un->un_uppervp) { | |
441 | un->un_flags |= UN_ULOCK; | |
442 | un->un_flags &= ~UN_KLOCK; | |
443 | } | |
444 | ||
445 | /* | |
446 | * Save information about the lower layer. | |
447 | * This needs to keep track of pathname | |
448 | * and directory information which union_vn_create | |
449 | * might need. | |
450 | */ | |
451 | if (lowervp != un->un_lowervp) { | |
452 | union_newlower(un, lowervp); | |
453 | if (cnp && (lowervp != NULLVP)) { | |
454 | un->un_hash = cnp->cn_hash; | |
455 | MALLOC(un->un_path, caddr_t, cnp->cn_namelen+1, | |
456 | M_TEMP, M_WAITOK); | |
457 | bcopy(cnp->cn_nameptr, un->un_path, | |
458 | cnp->cn_namelen); | |
459 | un->un_path[cnp->cn_namelen] = '\0'; | |
460 | VREF(dvp); | |
461 | un->un_dirvp = dvp; | |
462 | } | |
463 | } else if (lowervp) { | |
464 | vrele(lowervp); | |
465 | } | |
466 | *vpp = UNIONTOV(un); | |
467 | return (0); | |
468 | } | |
469 | ||
470 | if (docache) { | |
471 | /* | |
472 | * otherwise lock the vp list while we call getnewvnode | |
473 | * since that can block. | |
474 | */ | |
475 | hash = UNION_HASH(uppervp, lowervp); | |
476 | ||
477 | if (union_list_lock(hash)) | |
478 | goto loop; | |
479 | } | |
480 | ||
481 | MALLOC(unp, void *, sizeof(struct union_node), M_TEMP, M_WAITOK); | |
482 | error = getnewvnode(VT_UNION, mp, union_vnodeop_p, vpp); | |
483 | if (error) { | |
484 | FREE(unp, M_TEMP); | |
485 | if (uppervp) { | |
486 | if (dvp == uppervp) | |
487 | vrele(uppervp); | |
488 | else | |
489 | vput(uppervp); | |
490 | } | |
491 | if (lowervp) | |
492 | vrele(lowervp); | |
493 | ||
494 | goto out; | |
495 | } | |
496 | ||
497 | (*vpp)->v_data = unp; | |
498 | (*vpp)->v_flag |= vflag; | |
499 | if (uppervp) | |
500 | (*vpp)->v_type = uppervp->v_type; | |
501 | else | |
502 | (*vpp)->v_type = lowervp->v_type; | |
503 | ||
504 | if ((*vpp)->v_type == VREG) | |
505 | ubc_info_init(*vpp); | |
506 | ||
507 | un = VTOUNION(*vpp); | |
508 | un->un_vnode = *vpp; | |
509 | un->un_uppervp = uppervp; | |
510 | un->un_uppersz = VNOVAL; | |
511 | un->un_lowervp = lowervp; | |
512 | un->un_lowersz = VNOVAL; | |
513 | un->un_pvp = undvp; | |
514 | if (undvp != NULLVP) | |
515 | VREF(undvp); | |
516 | un->un_dircache = 0; | |
517 | un->un_openl = 0; | |
518 | un->un_flags = UN_LOCKED; | |
519 | if (un->un_uppervp) | |
520 | un->un_flags |= UN_ULOCK; | |
521 | #if DIAGNOSTIC | |
522 | if (current_proc()) | |
523 | un->un_pid = current_proc()->p_pid; | |
524 | else | |
525 | un->un_pid = -1; | |
526 | #endif | |
527 | if (cnp && (lowervp != NULLVP)) { | |
528 | un->un_hash = cnp->cn_hash; | |
529 | un->un_path = _MALLOC(cnp->cn_namelen+1, M_TEMP, M_WAITOK); | |
530 | bcopy(cnp->cn_nameptr, un->un_path, cnp->cn_namelen); | |
531 | un->un_path[cnp->cn_namelen] = '\0'; | |
532 | VREF(dvp); | |
533 | un->un_dirvp = dvp; | |
534 | } else { | |
535 | un->un_hash = 0; | |
536 | un->un_path = 0; | |
537 | un->un_dirvp = 0; | |
538 | } | |
539 | ||
540 | if (docache) { | |
541 | LIST_INSERT_HEAD(&unhead[hash], un, un_cache); | |
542 | un->un_flags |= UN_CACHED; | |
543 | } | |
544 | ||
545 | if (xlowervp) | |
546 | vrele(xlowervp); | |
547 | ||
548 | out: | |
549 | if (docache) | |
550 | union_list_unlock(hash); | |
551 | ||
552 | return (error); | |
553 | } | |
554 | ||
555 | int | |
556 | union_freevp(vp) | |
557 | struct vnode *vp; | |
558 | { | |
559 | struct union_node *un = VTOUNION(vp); | |
560 | ||
561 | if (un->un_flags & UN_CACHED) { | |
562 | un->un_flags &= ~UN_CACHED; | |
563 | LIST_REMOVE(un, un_cache); | |
564 | } | |
565 | ||
566 | if (un->un_pvp != NULLVP) | |
567 | vrele(un->un_pvp); | |
568 | if (un->un_uppervp != NULLVP) | |
569 | vrele(un->un_uppervp); | |
570 | if (un->un_lowervp != NULLVP) | |
571 | vrele(un->un_lowervp); | |
572 | if (un->un_dirvp != NULLVP) | |
573 | vrele(un->un_dirvp); | |
574 | if (un->un_path) | |
575 | _FREE(un->un_path, M_TEMP); | |
576 | ||
577 | FREE(vp->v_data, M_TEMP); | |
578 | vp->v_data = 0; | |
579 | ||
580 | return (0); | |
581 | } | |
582 | ||
583 | /* | |
584 | * copyfile. copy the vnode (fvp) to the vnode (tvp) | |
585 | * using a sequence of reads and writes. both (fvp) | |
586 | * and (tvp) are locked on entry and exit. | |
587 | */ | |
588 | int | |
589 | union_copyfile(fvp, tvp, cred, p) | |
590 | struct vnode *fvp; | |
591 | struct vnode *tvp; | |
592 | struct ucred *cred; | |
593 | struct proc *p; | |
594 | { | |
595 | char *buf; | |
596 | struct uio uio; | |
597 | struct iovec iov; | |
598 | int error = 0; | |
599 | ||
600 | /* | |
601 | * strategy: | |
602 | * allocate a buffer of size MAXPHYSIO. | |
603 | * loop doing reads and writes, keeping track | |
604 | * of the current uio offset. | |
605 | * give up at the first sign of trouble. | |
606 | */ | |
607 | ||
608 | uio.uio_procp = p; | |
609 | uio.uio_segflg = UIO_SYSSPACE; | |
610 | uio.uio_offset = 0; | |
611 | ||
612 | VOP_UNLOCK(fvp, 0, p); /* XXX */ | |
613 | VOP_LEASE(fvp, p, cred, LEASE_READ); | |
614 | vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p); /* XXX */ | |
615 | VOP_UNLOCK(tvp, 0, p); /* XXX */ | |
616 | VOP_LEASE(tvp, p, cred, LEASE_WRITE); | |
617 | vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p); /* XXX */ | |
618 | ||
619 | buf = _MALLOC(MAXPHYSIO, M_TEMP, M_WAITOK); | |
620 | ||
621 | /* ugly loop follows... */ | |
622 | do { | |
623 | off_t offset = uio.uio_offset; | |
624 | ||
625 | uio.uio_iov = &iov; | |
626 | uio.uio_iovcnt = 1; | |
627 | iov.iov_base = buf; | |
628 | iov.iov_len = MAXPHYSIO; | |
629 | uio.uio_resid = iov.iov_len; | |
630 | uio.uio_rw = UIO_READ; | |
631 | error = VOP_READ(fvp, &uio, 0, cred); | |
632 | ||
633 | if (error == 0) { | |
634 | uio.uio_iov = &iov; | |
635 | uio.uio_iovcnt = 1; | |
636 | iov.iov_base = buf; | |
637 | iov.iov_len = MAXPHYSIO - uio.uio_resid; | |
638 | uio.uio_offset = offset; | |
639 | uio.uio_rw = UIO_WRITE; | |
640 | uio.uio_resid = iov.iov_len; | |
641 | ||
642 | if (uio.uio_resid == 0) | |
643 | break; | |
644 | ||
645 | do { | |
646 | error = VOP_WRITE(tvp, &uio, 0, cred); | |
647 | } while ((uio.uio_resid > 0) && (error == 0)); | |
648 | } | |
649 | ||
650 | } while (error == 0); | |
651 | ||
652 | _FREE(buf, M_TEMP); | |
653 | return (error); | |
654 | } | |
655 | ||
656 | /* | |
657 | * (un) is assumed to be locked on entry and remains | |
658 | * locked on exit. | |
659 | */ | |
660 | int | |
661 | union_copyup(un, docopy, cred, p) | |
662 | struct union_node *un; | |
663 | int docopy; | |
664 | struct ucred *cred; | |
665 | struct proc *p; | |
666 | { | |
667 | int error; | |
668 | struct vnode *lvp, *uvp; | |
669 | ||
670 | error = union_vn_create(&uvp, un, p); | |
671 | if (error) | |
672 | return (error); | |
673 | ||
674 | /* at this point, uppervp is locked */ | |
675 | union_newupper(un, uvp); | |
676 | un->un_flags |= UN_ULOCK; | |
677 | ||
678 | lvp = un->un_lowervp; | |
679 | ||
680 | if (docopy) { | |
681 | /* | |
682 | * XX - should not ignore errors | |
683 | * from VOP_CLOSE | |
684 | */ | |
685 | vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY, p); | |
686 | error = VOP_OPEN(lvp, FREAD, cred, p); | |
687 | if (error == 0) { | |
688 | error = union_copyfile(lvp, uvp, cred, p); | |
689 | VOP_UNLOCK(lvp, 0, p); | |
690 | (void) VOP_CLOSE(lvp, FREAD, cred, p); | |
691 | } | |
692 | #ifdef UNION_DIAGNOSTIC | |
693 | if (error == 0) | |
694 | uprintf("union: copied up %s\n", un->un_path); | |
695 | #endif | |
696 | ||
697 | } | |
698 | un->un_flags &= ~UN_ULOCK; | |
699 | VOP_UNLOCK(uvp, 0, p); | |
700 | union_vn_close(uvp, FWRITE, cred, p); | |
701 | vn_lock(uvp, LK_EXCLUSIVE | LK_RETRY, p); | |
702 | un->un_flags |= UN_ULOCK; | |
703 | ||
704 | /* | |
705 | * Subsequent IOs will go to the top layer, so | |
706 | * call close on the lower vnode and open on the | |
707 | * upper vnode to ensure that the filesystem keeps | |
708 | * its references counts right. This doesn't do | |
709 | * the right thing with (cred) and (FREAD) though. | |
710 | * Ignoring error returns is not right, either. | |
711 | */ | |
712 | if (error == 0) { | |
713 | int i; | |
714 | ||
715 | for (i = 0; i < un->un_openl; i++) { | |
716 | (void) VOP_CLOSE(lvp, FREAD, cred, p); | |
717 | (void) VOP_OPEN(uvp, FREAD, cred, p); | |
718 | } | |
719 | un->un_openl = 0; | |
720 | } | |
721 | ||
722 | return (error); | |
723 | ||
724 | } | |
725 | ||
726 | static int | |
727 | union_relookup(um, dvp, vpp, cnp, cn, path, pathlen) | |
728 | struct union_mount *um; | |
729 | struct vnode *dvp; | |
730 | struct vnode **vpp; | |
731 | struct componentname *cnp; | |
732 | struct componentname *cn; | |
733 | char *path; | |
734 | int pathlen; | |
735 | { | |
736 | int error; | |
737 | ||
738 | /* | |
739 | * A new componentname structure must be faked up because | |
740 | * there is no way to know where the upper level cnp came | |
741 | * from or what it is being used for. This must duplicate | |
742 | * some of the work done by NDINIT, some of the work done | |
743 | * by namei, some of the work done by lookup and some of | |
744 | * the work done by VOP_LOOKUP when given a CREATE flag. | |
745 | * Conclusion: Horrible. | |
746 | * | |
747 | * The pathname buffer will be FREEed by VOP_MKDIR. | |
748 | */ | |
749 | cn->cn_namelen = pathlen; | |
750 | cn->cn_pnbuf = _MALLOC_ZONE(cn->cn_namelen+1, M_NAMEI, M_WAITOK); | |
751 | cn->cn_pnlen = cn->cn_namelen+1; | |
752 | bcopy(path, cn->cn_pnbuf, cn->cn_namelen); | |
753 | cn->cn_pnbuf[cn->cn_namelen] = '\0'; | |
754 | ||
755 | cn->cn_nameiop = CREATE; | |
756 | cn->cn_flags = (LOCKPARENT|HASBUF|SAVENAME|SAVESTART|ISLASTCN); | |
757 | cn->cn_proc = cnp->cn_proc; | |
758 | if (um->um_op == UNMNT_ABOVE) | |
759 | cn->cn_cred = cnp->cn_cred; | |
760 | else | |
761 | cn->cn_cred = um->um_cred; | |
762 | cn->cn_nameptr = cn->cn_pnbuf; | |
763 | cn->cn_hash = cnp->cn_hash; | |
764 | cn->cn_consume = cnp->cn_consume; | |
765 | ||
766 | VREF(dvp); | |
767 | error = relookup(dvp, vpp, cn); | |
768 | if (!error) | |
769 | vrele(dvp); | |
770 | ||
771 | return (error); | |
772 | } | |
773 | ||
774 | /* | |
775 | * Create a shadow directory in the upper layer. | |
776 | * The new vnode is returned locked. | |
777 | * | |
778 | * (um) points to the union mount structure for access to the | |
779 | * the mounting process's credentials. | |
780 | * (dvp) is the directory in which to create the shadow directory. | |
781 | * it is unlocked on entry and exit. | |
782 | * (cnp) is the componentname to be created. | |
783 | * (vpp) is the returned newly created shadow directory, which | |
784 | * is returned locked. | |
785 | */ | |
786 | int | |
787 | union_mkshadow(um, dvp, cnp, vpp) | |
788 | struct union_mount *um; | |
789 | struct vnode *dvp; | |
790 | struct componentname *cnp; | |
791 | struct vnode **vpp; | |
792 | { | |
793 | int error; | |
794 | struct vattr va; | |
795 | struct proc *p = cnp->cn_proc; | |
796 | struct componentname cn; | |
797 | ||
798 | error = union_relookup(um, dvp, vpp, cnp, &cn, | |
799 | cnp->cn_nameptr, cnp->cn_namelen); | |
800 | if (error) | |
801 | return (error); | |
802 | ||
803 | if (*vpp) { | |
804 | VOP_ABORTOP(dvp, &cn); | |
805 | VOP_UNLOCK(dvp, 0, p); | |
806 | vrele(*vpp); | |
807 | *vpp = NULLVP; | |
808 | return (EEXIST); | |
809 | } | |
810 | ||
811 | /* | |
812 | * policy: when creating the shadow directory in the | |
813 | * upper layer, create it owned by the user who did | |
814 | * the mount, group from parent directory, and mode | |
815 | * 777 modified by umask (ie mostly identical to the | |
816 | * mkdir syscall). (jsp, kb) | |
817 | */ | |
818 | ||
819 | VATTR_NULL(&va); | |
820 | va.va_type = VDIR; | |
821 | va.va_mode = um->um_cmode; | |
822 | ||
823 | /* VOP_LEASE: dvp is locked */ | |
824 | VOP_LEASE(dvp, p, cn.cn_cred, LEASE_WRITE); | |
825 | ||
826 | error = VOP_MKDIR(dvp, vpp, &cn, &va); | |
827 | return (error); | |
828 | } | |
829 | ||
830 | /* | |
831 | * Create a whiteout entry in the upper layer. | |
832 | * | |
833 | * (um) points to the union mount structure for access to the | |
834 | * the mounting process's credentials. | |
835 | * (dvp) is the directory in which to create the whiteout. | |
836 | * it is locked on entry and exit. | |
837 | * (cnp) is the componentname to be created. | |
838 | */ | |
839 | int | |
840 | union_mkwhiteout(um, dvp, cnp, path) | |
841 | struct union_mount *um; | |
842 | struct vnode *dvp; | |
843 | struct componentname *cnp; | |
844 | char *path; | |
845 | { | |
846 | int error; | |
847 | struct vattr va; | |
848 | struct proc *p = cnp->cn_proc; | |
849 | struct vnode *wvp; | |
850 | struct componentname cn; | |
851 | ||
852 | VOP_UNLOCK(dvp, 0, p); | |
853 | error = union_relookup(um, dvp, &wvp, cnp, &cn, path, strlen(path)); | |
854 | if (error) { | |
855 | vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); | |
856 | return (error); | |
857 | } | |
858 | ||
859 | if (wvp) { | |
860 | VOP_ABORTOP(dvp, &cn); | |
861 | vrele(dvp); | |
862 | vrele(wvp); | |
863 | return (EEXIST); | |
864 | } | |
865 | ||
866 | /* VOP_LEASE: dvp is locked */ | |
867 | VOP_LEASE(dvp, p, p->p_ucred, LEASE_WRITE); | |
868 | ||
869 | error = VOP_WHITEOUT(dvp, &cn, CREATE); | |
870 | if (error) | |
871 | VOP_ABORTOP(dvp, &cn); | |
872 | ||
873 | vrele(dvp); | |
874 | ||
875 | return (error); | |
876 | } | |
877 | ||
878 | /* | |
879 | * union_vn_create: creates and opens a new shadow file | |
880 | * on the upper union layer. this function is similar | |
881 | * in spirit to calling vn_open but it avoids calling namei(). | |
882 | * the problem with calling namei is that a) it locks too many | |
883 | * things, and b) it doesn't start at the "right" directory, | |
884 | * whereas relookup is told where to start. | |
885 | */ | |
886 | int | |
887 | union_vn_create(vpp, un, p) | |
888 | struct vnode **vpp; | |
889 | struct union_node *un; | |
890 | struct proc *p; | |
891 | { | |
892 | struct vnode *vp; | |
893 | struct ucred *cred = p->p_ucred; | |
894 | struct vattr vat; | |
895 | struct vattr *vap = &vat; | |
896 | int fmode = FFLAGS(O_WRONLY|O_CREAT|O_TRUNC|O_EXCL); | |
897 | int error; | |
898 | int cmode = UN_FILEMODE & ~p->p_fd->fd_cmask; | |
899 | char *cp; | |
900 | struct componentname cn; | |
901 | ||
902 | *vpp = NULLVP; | |
903 | ||
904 | /* | |
905 | * Build a new componentname structure (for the same | |
906 | * reasons outlines in union_mkshadow). | |
907 | * The difference here is that the file is owned by | |
908 | * the current user, rather than by the person who | |
909 | * did the mount, since the current user needs to be | |
910 | * able to write the file (that's why it is being | |
911 | * copied in the first place). | |
912 | */ | |
913 | cn.cn_namelen = strlen(un->un_path); | |
914 | cn.cn_pnbuf = (caddr_t) _MALLOC_ZONE(cn.cn_namelen+1, | |
915 | M_NAMEI, M_WAITOK); | |
916 | cn.cn_pnlen = cn.cn_namelen+1; | |
917 | bcopy(un->un_path, cn.cn_pnbuf, cn.cn_namelen+1); | |
918 | cn.cn_nameiop = CREATE; | |
919 | cn.cn_flags = (LOCKPARENT|HASBUF|SAVENAME|SAVESTART|ISLASTCN); | |
920 | cn.cn_proc = p; | |
921 | cn.cn_cred = p->p_ucred; | |
922 | cn.cn_nameptr = cn.cn_pnbuf; | |
923 | cn.cn_hash = un->un_hash; | |
924 | cn.cn_consume = 0; | |
925 | ||
926 | VREF(un->un_dirvp); | |
927 | if (error = relookup(un->un_dirvp, &vp, &cn)) | |
928 | return (error); | |
929 | vrele(un->un_dirvp); | |
930 | ||
931 | if (vp) { | |
932 | VOP_ABORTOP(un->un_dirvp, &cn); | |
933 | if (un->un_dirvp == vp) | |
934 | vrele(un->un_dirvp); | |
935 | else | |
936 | vput(un->un_dirvp); | |
937 | vrele(vp); | |
938 | return (EEXIST); | |
939 | } | |
940 | ||
941 | /* | |
942 | * Good - there was no race to create the file | |
943 | * so go ahead and create it. The permissions | |
944 | * on the file will be 0666 modified by the | |
945 | * current user's umask. Access to the file, while | |
946 | * it is unioned, will require access to the top *and* | |
947 | * bottom files. Access when not unioned will simply | |
948 | * require access to the top-level file. | |
949 | * TODO: confirm choice of access permissions. | |
950 | */ | |
951 | VATTR_NULL(vap); | |
952 | vap->va_type = VREG; | |
953 | vap->va_mode = cmode; | |
954 | VOP_LEASE(un->un_dirvp, p, cred, LEASE_WRITE); | |
955 | if (error = VOP_CREATE(un->un_dirvp, &vp, &cn, vap)) | |
956 | return (error); | |
957 | ||
958 | if (error = VOP_OPEN(vp, fmode, cred, p)) { | |
959 | vput(vp); | |
960 | return (error); | |
961 | } | |
962 | ||
963 | if (++vp->v_writecount <= 0) | |
964 | panic("union: v_writecount"); | |
965 | *vpp = vp; | |
966 | return (0); | |
967 | } | |
968 | ||
969 | int | |
970 | union_vn_close(vp, fmode, cred, p) | |
971 | struct vnode *vp; | |
972 | int fmode; | |
973 | struct ucred *cred; | |
974 | struct proc *p; | |
975 | { | |
976 | ||
977 | if (fmode & FWRITE) | |
978 | --vp->v_writecount; | |
979 | return (VOP_CLOSE(vp, fmode, cred, p)); | |
980 | } | |
981 | ||
982 | void | |
983 | union_removed_upper(un) | |
984 | struct union_node *un; | |
985 | { | |
986 | struct proc *p = current_proc(); /* XXX */ | |
987 | ||
988 | union_newupper(un, NULLVP); | |
989 | if (un->un_flags & UN_CACHED) { | |
990 | un->un_flags &= ~UN_CACHED; | |
991 | LIST_REMOVE(un, un_cache); | |
992 | } | |
993 | ||
994 | if (un->un_flags & UN_ULOCK) { | |
995 | un->un_flags &= ~UN_ULOCK; | |
996 | VOP_UNLOCK(un->un_uppervp, 0, p); | |
997 | } | |
998 | } | |
999 | ||
1000 | #if 0 | |
1001 | struct vnode * | |
1002 | union_lowervp(vp) | |
1003 | struct vnode *vp; | |
1004 | { | |
1005 | struct union_node *un = VTOUNION(vp); | |
1006 | ||
1007 | if ((un->un_lowervp != NULLVP) && | |
1008 | (vp->v_type == un->un_lowervp->v_type)) { | |
1009 | if (vget(un->un_lowervp, 0, current_proc()) == 0) | |
1010 | return (un->un_lowervp); | |
1011 | } | |
1012 | ||
1013 | return (NULLVP); | |
1014 | } | |
1015 | #endif | |
1016 | ||
1017 | /* | |
1018 | * determine whether a whiteout is needed | |
1019 | * during a remove/rmdir operation. | |
1020 | */ | |
1021 | int | |
1022 | union_dowhiteout(un, cred, p) | |
1023 | struct union_node *un; | |
1024 | struct ucred *cred; | |
1025 | struct proc *p; | |
1026 | { | |
1027 | struct vattr va; | |
1028 | ||
1029 | if (un->un_lowervp != NULLVP) | |
1030 | return (1); | |
1031 | ||
1032 | if (VOP_GETATTR(un->un_uppervp, &va, cred, p) == 0 && | |
1033 | (va.va_flags & OPAQUE)) | |
1034 | return (1); | |
1035 | ||
1036 | return (0); | |
1037 | } | |
1038 | ||
1039 | static void | |
1040 | union_dircache_r(vp, vppp, cntp) | |
1041 | struct vnode *vp; | |
1042 | struct vnode ***vppp; | |
1043 | int *cntp; | |
1044 | { | |
1045 | struct union_node *un; | |
1046 | ||
1047 | if (vp->v_op != union_vnodeop_p) { | |
1048 | if (vppp) { | |
1049 | VREF(vp); | |
1050 | *(*vppp)++ = vp; | |
1051 | if (--(*cntp) == 0) | |
1052 | panic("union: dircache table too small"); | |
1053 | } else { | |
1054 | (*cntp)++; | |
1055 | } | |
1056 | ||
1057 | return; | |
1058 | } | |
1059 | ||
1060 | un = VTOUNION(vp); | |
1061 | if (un->un_uppervp != NULLVP) | |
1062 | union_dircache_r(un->un_uppervp, vppp, cntp); | |
1063 | if (un->un_lowervp != NULLVP) | |
1064 | union_dircache_r(un->un_lowervp, vppp, cntp); | |
1065 | } | |
1066 | ||
1067 | struct vnode * | |
1068 | union_dircache(vp, p) | |
1069 | struct vnode *vp; | |
1070 | struct proc *p; | |
1071 | { | |
1072 | int cnt; | |
1073 | struct vnode *nvp; | |
1074 | struct vnode **vpp; | |
1075 | struct vnode **dircache; | |
1076 | struct union_node *un; | |
1077 | int error; | |
1078 | ||
1079 | vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); | |
1080 | dircache = VTOUNION(vp)->un_dircache; | |
1081 | ||
1082 | nvp = NULLVP; | |
1083 | ||
1084 | if (dircache == 0) { | |
1085 | cnt = 0; | |
1086 | union_dircache_r(vp, 0, &cnt); | |
1087 | cnt++; | |
1088 | dircache = (struct vnode **) | |
1089 | _MALLOC(cnt * sizeof(struct vnode *), | |
1090 | M_TEMP, M_WAITOK); | |
1091 | vpp = dircache; | |
1092 | union_dircache_r(vp, &vpp, &cnt); | |
1093 | *vpp = NULLVP; | |
1094 | vpp = dircache + 1; | |
1095 | } else { | |
1096 | vpp = dircache; | |
1097 | do { | |
1098 | if (*vpp++ == VTOUNION(vp)->un_uppervp) | |
1099 | break; | |
1100 | } while (*vpp != NULLVP); | |
1101 | } | |
1102 | ||
1103 | if (*vpp == NULLVP) | |
1104 | goto out; | |
1105 | ||
1106 | vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, p); | |
1107 | VREF(*vpp); | |
1108 | error = union_allocvp(&nvp, vp->v_mount, NULLVP, NULLVP, 0, *vpp, NULLVP, 0); | |
1109 | if (error) | |
1110 | goto out; | |
1111 | ||
1112 | VTOUNION(vp)->un_dircache = 0; | |
1113 | un = VTOUNION(nvp); | |
1114 | un->un_dircache = dircache; | |
1115 | ||
1116 | out: | |
1117 | VOP_UNLOCK(vp, 0, p); | |
1118 | return (nvp); | |
1119 | } |