]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/nfs_node.c
xnu-1699.22.73.tar.gz
[apple/xnu.git] / bsd / nfs / nfs_node.c
1 /*
2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)nfs_node.c 8.6 (Berkeley) 5/22/95
65 * FreeBSD-Id: nfs_node.c,v 1.22 1997/10/28 14:06:20 bde Exp $
66 */
67
68
69 #include <sys/param.h>
70 #include <sys/kernel.h>
71 #include <sys/systm.h>
72 #include <sys/proc.h>
73 #include <sys/kauth.h>
74 #include <sys/mount_internal.h>
75 #include <sys/vnode.h>
76 #include <sys/ubc.h>
77 #include <sys/malloc.h>
78 #include <sys/fcntl.h>
79
80 #include <nfs/rpcv2.h>
81 #include <nfs/nfsproto.h>
82 #include <nfs/nfs.h>
83 #include <nfs/nfsnode.h>
84 #include <nfs/nfs_gss.h>
85 #include <nfs/nfsmount.h>
86
87 #define NFSNOHASH(fhsum) \
88 (&nfsnodehashtbl[(fhsum) & nfsnodehash])
89 static LIST_HEAD(nfsnodehashhead, nfsnode) *nfsnodehashtbl;
90 static u_long nfsnodehash;
91
92 static lck_grp_t *nfs_node_hash_lck_grp;
93 static lck_grp_t *nfs_node_lck_grp;
94 static lck_grp_t *nfs_data_lck_grp;
95 lck_mtx_t *nfs_node_hash_mutex;
96
97 /*
98 * Initialize hash links for nfsnodes
99 * and build nfsnode free list.
100 */
101 void
102 nfs_nhinit(void)
103 {
104 nfs_node_hash_lck_grp = lck_grp_alloc_init("nfs_node_hash", LCK_GRP_ATTR_NULL);
105 nfs_node_hash_mutex = lck_mtx_alloc_init(nfs_node_hash_lck_grp, LCK_ATTR_NULL);
106 nfs_node_lck_grp = lck_grp_alloc_init("nfs_node", LCK_GRP_ATTR_NULL);
107 nfs_data_lck_grp = lck_grp_alloc_init("nfs_data", LCK_GRP_ATTR_NULL);
108 }
109
110 void
111 nfs_nhinit_finish(void)
112 {
113 lck_mtx_lock(nfs_node_hash_mutex);
114 if (!nfsnodehashtbl)
115 nfsnodehashtbl = hashinit(desiredvnodes, M_NFSNODE, &nfsnodehash);
116 lck_mtx_unlock(nfs_node_hash_mutex);
117 }
118
119 /*
120 * Compute an entry in the NFS hash table structure
121 */
122 u_long
123 nfs_hash(u_char *fhp, int fhsize)
124 {
125 u_long fhsum;
126 int i;
127
128 fhsum = 0;
129 for (i = 0; i < fhsize; i++)
130 fhsum += *fhp++;
131 return (fhsum);
132 }
133
134 /*
135 * Look up a vnode/nfsnode by file handle.
136 * Callers must check for mount points!!
137 * In all cases, a pointer to a
138 * nfsnode structure is returned.
139 */
140 int
141 nfs_nget(
142 mount_t mp,
143 nfsnode_t dnp,
144 struct componentname *cnp,
145 u_char *fhp,
146 int fhsize,
147 struct nfs_vattr *nvap,
148 u_int64_t *xidp,
149 uint32_t auth,
150 int flags,
151 nfsnode_t *npp)
152 {
153 nfsnode_t np;
154 struct nfsnodehashhead *nhpp;
155 vnode_t vp;
156 int error, nfsvers;
157 mount_t mp2;
158 struct vnode_fsparam vfsp;
159 uint32_t vid;
160
161 FSDBG_TOP(263, mp, dnp, flags, npp);
162
163 /* Check for unmount in progress */
164 if (!mp || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT)) {
165 *npp = NULL;
166 error = ENXIO;
167 FSDBG_BOT(263, mp, dnp, 0xd1e, error);
168 return (error);
169 }
170 nfsvers = VFSTONFS(mp)->nm_vers;
171
172 nhpp = NFSNOHASH(nfs_hash(fhp, fhsize));
173 loop:
174 lck_mtx_lock(nfs_node_hash_mutex);
175 for (np = nhpp->lh_first; np != 0; np = np->n_hash.le_next) {
176 mp2 = (np->n_hflag & NHINIT) ? np->n_mount : NFSTOMP(np);
177 if (mp != mp2 || np->n_fhsize != fhsize ||
178 bcmp(fhp, np->n_fhp, fhsize))
179 continue;
180 if (nvap && (nvap->nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) &&
181 cnp && (cnp->cn_namelen > (fhsize - (int)sizeof(dnp)))) {
182 /* The name was too long to fit in the file handle. Check it against the node's name. */
183 int namecmp = 0;
184 const char *vname = vnode_getname(NFSTOV(np));
185 if (vname) {
186 if (cnp->cn_namelen != (int)strlen(vname))
187 namecmp = 1;
188 else
189 namecmp = strncmp(vname, cnp->cn_nameptr, cnp->cn_namelen);
190 vnode_putname(vname);
191 }
192 if (namecmp) /* full name didn't match */
193 continue;
194 }
195 FSDBG(263, dnp, np, np->n_flag, 0xcace0000);
196 /* if the node is locked, sleep on it */
197 if ((np->n_hflag & NHLOCKED) && !(flags & NG_NOCREATE)) {
198 np->n_hflag |= NHLOCKWANT;
199 FSDBG(263, dnp, np, np->n_flag, 0xcace2222);
200 msleep(np, nfs_node_hash_mutex, PDROP | PINOD, "nfs_nget", NULL);
201 FSDBG(263, dnp, np, np->n_flag, 0xcace3333);
202 goto loop;
203 }
204 vp = NFSTOV(np);
205 vid = vnode_vid(vp);
206 lck_mtx_unlock(nfs_node_hash_mutex);
207 if ((error = vnode_getwithvid(vp, vid))) {
208 /*
209 * If vnode is being reclaimed or has already
210 * changed identity, no need to wait.
211 */
212 FSDBG_BOT(263, dnp, *npp, 0xcace0d1e, error);
213 return (error);
214 }
215 if ((error = nfs_node_lock(np))) {
216 /* this only fails if the node is now unhashed */
217 /* so let's see if we can find/create it again */
218 FSDBG(263, dnp, *npp, 0xcaced1e2, error);
219 vnode_put(vp);
220 if (flags & NG_NOCREATE) {
221 *npp = 0;
222 FSDBG_BOT(263, dnp, *npp, 0xcaced1e0, ENOENT);
223 return (ENOENT);
224 }
225 goto loop;
226 }
227 /* update attributes */
228 if (nvap)
229 error = nfs_loadattrcache(np, nvap, xidp, 0);
230 if (error) {
231 nfs_node_unlock(np);
232 vnode_put(vp);
233 } else {
234 if (dnp && cnp && (flags & NG_MAKEENTRY))
235 cache_enter(NFSTOV(dnp), vp, cnp);
236 *npp = np;
237 }
238 FSDBG_BOT(263, dnp, *npp, 0xcace0000, error);
239 return(error);
240 }
241
242 FSDBG(263, mp, dnp, npp, 0xaaaaaaaa);
243
244 if (flags & NG_NOCREATE) {
245 lck_mtx_unlock(nfs_node_hash_mutex);
246 *npp = 0;
247 FSDBG_BOT(263, dnp, *npp, 0x80000001, ENOENT);
248 return (ENOENT);
249 }
250
251 /*
252 * allocate and initialize nfsnode and stick it in the hash
253 * before calling getnewvnode(). Anyone finding it in the
254 * hash before initialization is complete will wait for it.
255 */
256 MALLOC_ZONE(np, nfsnode_t, sizeof *np, M_NFSNODE, M_WAITOK);
257 if (!np) {
258 lck_mtx_unlock(nfs_node_hash_mutex);
259 *npp = 0;
260 FSDBG_BOT(263, dnp, *npp, 0x80000001, ENOMEM);
261 return (ENOMEM);
262 }
263 bzero(np, sizeof *np);
264 np->n_hflag |= (NHINIT | NHLOCKED);
265 np->n_mount = mp;
266 np->n_auth = auth;
267 TAILQ_INIT(&np->n_opens);
268 TAILQ_INIT(&np->n_lock_owners);
269 TAILQ_INIT(&np->n_locks);
270 np->n_dlink.tqe_next = NFSNOLIST;
271 np->n_dreturn.tqe_next = NFSNOLIST;
272 np->n_monlink.le_next = NFSNOLIST;
273
274 /* ugh... need to keep track of ".zfs" directories to workaround server bugs */
275 if ((nvap->nva_type == VDIR) && cnp && (cnp->cn_namelen == 4) &&
276 (cnp->cn_nameptr[0] == '.') && (cnp->cn_nameptr[1] == 'z') &&
277 (cnp->cn_nameptr[2] == 'f') && (cnp->cn_nameptr[3] == 's'))
278 np->n_flag |= NISDOTZFS;
279 if (dnp && (dnp->n_flag & NISDOTZFS))
280 np->n_flag |= NISDOTZFSCHILD;
281
282 if (dnp && cnp && ((cnp->cn_namelen != 2) ||
283 (cnp->cn_nameptr[0] != '.') || (cnp->cn_nameptr[1] != '.'))) {
284 vnode_t dvp = NFSTOV(dnp);
285 if (!vnode_get(dvp)) {
286 if (!vnode_ref(dvp))
287 np->n_parent = dvp;
288 vnode_put(dvp);
289 }
290 }
291
292 /* setup node's file handle */
293 if (fhsize > NFS_SMALLFH) {
294 MALLOC_ZONE(np->n_fhp, u_char *,
295 fhsize, M_NFSBIGFH, M_WAITOK);
296 if (!np->n_fhp) {
297 lck_mtx_unlock(nfs_node_hash_mutex);
298 FREE_ZONE(np, sizeof *np, M_NFSNODE);
299 *npp = 0;
300 FSDBG_BOT(263, dnp, *npp, 0x80000002, ENOMEM);
301 return (ENOMEM);
302 }
303 } else {
304 np->n_fhp = &np->n_fh[0];
305 }
306 bcopy(fhp, np->n_fhp, fhsize);
307 np->n_fhsize = fhsize;
308
309 /* Insert the nfsnode in the hash queue for its new file handle */
310 LIST_INSERT_HEAD(nhpp, np, n_hash);
311 np->n_hflag |= NHHASHED;
312 FSDBG(266, 0, np, np->n_flag, np->n_hflag);
313
314 /* lock the new nfsnode */
315 lck_mtx_init(&np->n_lock, nfs_node_lck_grp, LCK_ATTR_NULL);
316 lck_rw_init(&np->n_datalock, nfs_data_lck_grp, LCK_ATTR_NULL);
317 lck_mtx_init(&np->n_openlock, nfs_open_grp, LCK_ATTR_NULL);
318 lck_mtx_lock(&np->n_lock);
319
320 /* release lock on hash table */
321 lck_mtx_unlock(nfs_node_hash_mutex);
322
323 /* do initial loading of attributes */
324 NACLINVALIDATE(np);
325 NACCESSINVALIDATE(np);
326 error = nfs_loadattrcache(np, nvap, xidp, 1);
327 if (error) {
328 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
329 nfs_node_unlock(np);
330 lck_mtx_lock(nfs_node_hash_mutex);
331 LIST_REMOVE(np, n_hash);
332 np->n_hflag &= ~(NHHASHED|NHINIT|NHLOCKED);
333 if (np->n_hflag & NHLOCKWANT) {
334 np->n_hflag &= ~NHLOCKWANT;
335 wakeup(np);
336 }
337 lck_mtx_unlock(nfs_node_hash_mutex);
338 if (np->n_parent) {
339 if (!vnode_get(np->n_parent)) {
340 vnode_rele(np->n_parent);
341 vnode_put(np->n_parent);
342 }
343 np->n_parent = NULL;
344 }
345 lck_mtx_destroy(&np->n_lock, nfs_node_lck_grp);
346 lck_rw_destroy(&np->n_datalock, nfs_data_lck_grp);
347 lck_mtx_destroy(&np->n_openlock, nfs_open_grp);
348 if (np->n_fhsize > NFS_SMALLFH)
349 FREE_ZONE(np->n_fhp, np->n_fhsize, M_NFSBIGFH);
350 FREE_ZONE(np, sizeof *np, M_NFSNODE);
351 *npp = 0;
352 FSDBG_BOT(263, dnp, *npp, 0x80000003, error);
353 return (error);
354 }
355 NFS_CHANGED_UPDATE(nfsvers, np, nvap);
356 if (nvap->nva_type == VDIR)
357 NFS_CHANGED_UPDATE_NC(nfsvers, np, nvap);
358
359 /* now, attempt to get a new vnode */
360 vfsp.vnfs_mp = mp;
361 vfsp.vnfs_vtype = nvap->nva_type;
362 vfsp.vnfs_str = "nfs";
363 vfsp.vnfs_dvp = dnp ? NFSTOV(dnp) : NULL;
364 vfsp.vnfs_fsnode = np;
365 if (nfsvers == NFS_VER4) {
366 #if FIFO
367 if (nvap->nva_type == VFIFO)
368 vfsp.vnfs_vops = fifo_nfsv4nodeop_p;
369 else
370 #endif /* FIFO */
371 if (nvap->nva_type == VBLK || nvap->nva_type == VCHR)
372 vfsp.vnfs_vops = spec_nfsv4nodeop_p;
373 else
374 vfsp.vnfs_vops = nfsv4_vnodeop_p;
375 } else {
376 #if FIFO
377 if (nvap->nva_type == VFIFO)
378 vfsp.vnfs_vops = fifo_nfsv2nodeop_p;
379 else
380 #endif /* FIFO */
381 if (nvap->nva_type == VBLK || nvap->nva_type == VCHR)
382 vfsp.vnfs_vops = spec_nfsv2nodeop_p;
383 else
384 vfsp.vnfs_vops = nfsv2_vnodeop_p;
385 }
386 vfsp.vnfs_markroot = (flags & NG_MARKROOT) ? 1 : 0;
387 vfsp.vnfs_marksystem = 0;
388 vfsp.vnfs_rdev = 0;
389 vfsp.vnfs_filesize = nvap->nva_size;
390 vfsp.vnfs_cnp = cnp;
391 vfsp.vnfs_flags = VNFS_ADDFSREF;
392 if (!dnp || !cnp || !(flags & NG_MAKEENTRY))
393 vfsp.vnfs_flags |= VNFS_NOCACHE;
394
395 #if CONFIG_TRIGGERS
396 if ((nfsvers >= NFS_VER4) && (nvap->nva_type == VDIR) && (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER)) {
397 struct vnode_trigger_param vtp;
398 bzero(&vtp, sizeof(vtp));
399 bcopy(&vfsp, &vtp.vnt_params, sizeof(vfsp));
400 vtp.vnt_resolve_func = nfs_mirror_mount_trigger_resolve;
401 vtp.vnt_unresolve_func = nfs_mirror_mount_trigger_unresolve;
402 vtp.vnt_rearm_func = nfs_mirror_mount_trigger_rearm;
403 vtp.vnt_flags = VNT_AUTO_REARM;
404 error = vnode_create(VNCREATE_TRIGGER, VNCREATE_TRIGGER_SIZE, &vtp, &np->n_vnode);
405 } else
406 #endif
407 {
408 error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &np->n_vnode);
409 }
410 if (error) {
411 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
412 nfs_node_unlock(np);
413 lck_mtx_lock(nfs_node_hash_mutex);
414 LIST_REMOVE(np, n_hash);
415 np->n_hflag &= ~(NHHASHED|NHINIT|NHLOCKED);
416 if (np->n_hflag & NHLOCKWANT) {
417 np->n_hflag &= ~NHLOCKWANT;
418 wakeup(np);
419 }
420 lck_mtx_unlock(nfs_node_hash_mutex);
421 if (np->n_parent) {
422 if (!vnode_get(np->n_parent)) {
423 vnode_rele(np->n_parent);
424 vnode_put(np->n_parent);
425 }
426 np->n_parent = NULL;
427 }
428 lck_mtx_destroy(&np->n_lock, nfs_node_lck_grp);
429 lck_rw_destroy(&np->n_datalock, nfs_data_lck_grp);
430 lck_mtx_destroy(&np->n_openlock, nfs_open_grp);
431 if (np->n_fhsize > NFS_SMALLFH)
432 FREE_ZONE(np->n_fhp, np->n_fhsize, M_NFSBIGFH);
433 FREE_ZONE(np, sizeof *np, M_NFSNODE);
434 *npp = 0;
435 FSDBG_BOT(263, dnp, *npp, 0x80000004, error);
436 return (error);
437 }
438 vp = np->n_vnode;
439 vnode_settag(vp, VT_NFS);
440 /* node is now initialized */
441
442 /* check if anyone's waiting on this node */
443 lck_mtx_lock(nfs_node_hash_mutex);
444 np->n_hflag &= ~(NHINIT|NHLOCKED);
445 if (np->n_hflag & NHLOCKWANT) {
446 np->n_hflag &= ~NHLOCKWANT;
447 wakeup(np);
448 }
449 lck_mtx_unlock(nfs_node_hash_mutex);
450
451 *npp = np;
452
453 FSDBG_BOT(263, dnp, vp, *npp, error);
454 return (error);
455 }
456
457
458 int
459 nfs_vnop_inactive(ap)
460 struct vnop_inactive_args /* {
461 struct vnodeop_desc *a_desc;
462 vnode_t a_vp;
463 vfs_context_t a_context;
464 } */ *ap;
465 {
466 vnode_t vp = ap->a_vp;
467 vfs_context_t ctx = ap->a_context;
468 nfsnode_t np = VTONFS(ap->a_vp);
469 struct nfs_sillyrename *nsp;
470 struct nfs_vattr nvattr;
471 int unhash, attrerr, busyerror, error, inuse, busied, force;
472 struct nfs_open_file *nofp;
473 struct componentname cn;
474 struct nfsmount *nmp = NFSTONMP(np);
475 mount_t mp = vnode_mount(vp);
476
477 restart:
478 force = (!mp || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT));
479 error = 0;
480 inuse = (nfs_mount_state_in_use_start(nmp, NULL) == 0);
481
482 /* There shouldn't be any open or lock state at this point */
483 lck_mtx_lock(&np->n_openlock);
484 if (np->n_openrefcnt && !force)
485 NP(np, "nfs_vnop_inactive: still open: %d", np->n_openrefcnt);
486 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
487 lck_mtx_lock(&nofp->nof_lock);
488 if (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
489 if (!force)
490 NP(np, "nfs_vnop_inactive: open file busy");
491 busied = 0;
492 } else {
493 nofp->nof_flags |= NFS_OPEN_FILE_BUSY;
494 busied = 1;
495 }
496 lck_mtx_unlock(&nofp->nof_lock);
497 if ((np->n_flag & NREVOKE) || (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
498 if (busied)
499 nfs_open_file_clear_busy(nofp);
500 continue;
501 }
502 /*
503 * If we just created the file, we already had it open in
504 * anticipation of getting a subsequent open call. If the
505 * node has gone inactive without being open, we need to
506 * clean up (close) the open done in the create.
507 */
508 if ((nofp->nof_flags & NFS_OPEN_FILE_CREATE) && nofp->nof_creator && !force) {
509 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
510 lck_mtx_unlock(&np->n_openlock);
511 if (busied)
512 nfs_open_file_clear_busy(nofp);
513 if (inuse)
514 nfs_mount_state_in_use_end(nmp, 0);
515 if (!nfs4_reopen(nofp, NULL))
516 goto restart;
517 }
518 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
519 lck_mtx_unlock(&np->n_openlock);
520 error = nfs_close(np, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
521 if (error) {
522 NP(np, "nfs_vnop_inactive: create close error: %d", error);
523 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
524 }
525 if (busied)
526 nfs_open_file_clear_busy(nofp);
527 if (inuse)
528 nfs_mount_state_in_use_end(nmp, error);
529 goto restart;
530 }
531 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
532 /*
533 * If the file is marked as needing reopen, but this was the only
534 * open on the file, just drop the open.
535 */
536 nofp->nof_flags &= ~NFS_OPEN_FILE_NEEDCLOSE;
537 if ((nofp->nof_flags & NFS_OPEN_FILE_REOPEN) && (nofp->nof_opencnt == 1)) {
538 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
539 nofp->nof_r--;
540 nofp->nof_opencnt--;
541 nofp->nof_access = 0;
542 } else if (!force) {
543 lck_mtx_unlock(&np->n_openlock);
544 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
545 if (busied)
546 nfs_open_file_clear_busy(nofp);
547 if (inuse)
548 nfs_mount_state_in_use_end(nmp, 0);
549 if (!nfs4_reopen(nofp, NULL))
550 goto restart;
551 }
552 error = nfs_close(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
553 if (error) {
554 NP(np, "nfs_vnop_inactive: need close error: %d", error);
555 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
556 }
557 if (busied)
558 nfs_open_file_clear_busy(nofp);
559 if (inuse)
560 nfs_mount_state_in_use_end(nmp, error);
561 goto restart;
562 }
563 }
564 if (nofp->nof_opencnt && !force)
565 NP(np, "nfs_vnop_inactive: file still open: %d", nofp->nof_opencnt);
566 if (!force && (nofp->nof_access || nofp->nof_deny ||
567 nofp->nof_mmap_access || nofp->nof_mmap_deny ||
568 nofp->nof_r || nofp->nof_w || nofp->nof_rw ||
569 nofp->nof_r_dw || nofp->nof_w_dw || nofp->nof_rw_dw ||
570 nofp->nof_r_drw || nofp->nof_w_drw || nofp->nof_rw_drw ||
571 nofp->nof_d_r || nofp->nof_d_w || nofp->nof_d_rw ||
572 nofp->nof_d_r_dw || nofp->nof_d_w_dw || nofp->nof_d_rw_dw ||
573 nofp->nof_d_r_drw || nofp->nof_d_w_drw || nofp->nof_d_rw_drw)) {
574 NP(np, "nfs_vnop_inactive: non-zero access: %d %d %d %d # %u.%u %u.%u %u.%u dw %u.%u %u.%u %u.%u drw %u.%u %u.%u %u.%u",
575 nofp->nof_access, nofp->nof_deny,
576 nofp->nof_mmap_access, nofp->nof_mmap_deny,
577 nofp->nof_r, nofp->nof_d_r,
578 nofp->nof_w, nofp->nof_d_w,
579 nofp->nof_rw, nofp->nof_d_rw,
580 nofp->nof_r_dw, nofp->nof_d_r_dw,
581 nofp->nof_w_dw, nofp->nof_d_w_dw,
582 nofp->nof_rw_dw, nofp->nof_d_rw_dw,
583 nofp->nof_r_drw, nofp->nof_d_r_drw,
584 nofp->nof_w_drw, nofp->nof_d_w_drw,
585 nofp->nof_rw_drw, nofp->nof_d_rw_drw);
586 }
587 if (busied)
588 nfs_open_file_clear_busy(nofp);
589 }
590 lck_mtx_unlock(&np->n_openlock);
591
592 if (inuse && nfs_mount_state_in_use_end(nmp, error))
593 goto restart;
594
595 nfs_node_lock_force(np);
596
597 if (vnode_vtype(vp) != VDIR) {
598 nsp = np->n_sillyrename;
599 np->n_sillyrename = NULL;
600 } else {
601 nsp = NULL;
602 }
603
604 FSDBG_TOP(264, vp, np, np->n_flag, nsp);
605
606 if (!nsp) {
607 /* no silly file to clean up... */
608 /* clear all flags other than these */
609 np->n_flag &= (NMODIFIED);
610 nfs_node_unlock(np);
611 FSDBG_BOT(264, vp, np, np->n_flag, 0);
612 return (0);
613 }
614 nfs_node_unlock(np);
615
616 /* Remove the silly file that was rename'd earlier */
617
618 /* flush all the buffers */
619 nfs_vinvalbuf2(vp, V_SAVE, vfs_context_thread(ctx), nsp->nsr_cred, 1);
620
621 /* try to get the latest attributes */
622 attrerr = nfs_getattr(np, &nvattr, ctx, NGA_UNCACHED);
623
624 /* Check if we should remove it from the node hash. */
625 /* Leave it if inuse or it has multiple hard links. */
626 if (vnode_isinuse(vp, 0) || (!attrerr && (nvattr.nva_nlink > 1))) {
627 unhash = 0;
628 } else {
629 unhash = 1;
630 ubc_setsize(vp, 0);
631 }
632
633 /* mark this node and the directory busy while we do the remove */
634 busyerror = nfs_node_set_busy2(nsp->nsr_dnp, np, vfs_context_thread(ctx));
635
636 /* lock the node while we remove the silly file */
637 lck_mtx_lock(nfs_node_hash_mutex);
638 while (np->n_hflag & NHLOCKED) {
639 np->n_hflag |= NHLOCKWANT;
640 msleep(np, nfs_node_hash_mutex, PINOD, "nfs_inactive", NULL);
641 }
642 np->n_hflag |= NHLOCKED;
643 lck_mtx_unlock(nfs_node_hash_mutex);
644
645 /* purge the name cache to deter others from finding it */
646 bzero(&cn, sizeof(cn));
647 cn.cn_nameptr = nsp->nsr_name;
648 cn.cn_namelen = nsp->nsr_namlen;
649 nfs_name_cache_purge(nsp->nsr_dnp, np, &cn, ctx);
650
651 FSDBG(264, np, np->n_size, np->n_vattr.nva_size, 0xf00d00f1);
652
653 /* now remove the silly file */
654 nfs_removeit(nsp);
655
656 /* clear all flags other than these */
657 nfs_node_lock_force(np);
658 np->n_flag &= (NMODIFIED);
659 nfs_node_unlock(np);
660
661 if (!busyerror)
662 nfs_node_clear_busy2(nsp->nsr_dnp, np);
663
664 if (unhash && vnode_isinuse(vp, 0)) {
665 /* vnode now inuse after silly remove? */
666 unhash = 0;
667 ubc_setsize(vp, np->n_size);
668 }
669
670 lck_mtx_lock(nfs_node_hash_mutex);
671 if (unhash) {
672 /*
673 * remove nfsnode from hash now so we can't accidentally find it
674 * again if another object gets created with the same filehandle
675 * before this vnode gets reclaimed
676 */
677 if (np->n_hflag & NHHASHED) {
678 LIST_REMOVE(np, n_hash);
679 np->n_hflag &= ~NHHASHED;
680 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
681 }
682 vnode_recycle(vp);
683 }
684 /* unlock the node */
685 np->n_hflag &= ~NHLOCKED;
686 if (np->n_hflag & NHLOCKWANT) {
687 np->n_hflag &= ~NHLOCKWANT;
688 wakeup(np);
689 }
690 lck_mtx_unlock(nfs_node_hash_mutex);
691
692 /* cleanup sillyrename info */
693 if (nsp->nsr_cred != NOCRED)
694 kauth_cred_unref(&nsp->nsr_cred);
695 vnode_rele(NFSTOV(nsp->nsr_dnp));
696 FREE_ZONE(nsp, sizeof(*nsp), M_NFSREQ);
697
698 FSDBG_BOT(264, vp, np, np->n_flag, 0);
699 return (0);
700 }
701
702 /*
703 * Reclaim an nfsnode so that it can be used for other purposes.
704 */
705 int
706 nfs_vnop_reclaim(ap)
707 struct vnop_reclaim_args /* {
708 struct vnodeop_desc *a_desc;
709 vnode_t a_vp;
710 vfs_context_t a_context;
711 } */ *ap;
712 {
713 vnode_t vp = ap->a_vp;
714 nfsnode_t np = VTONFS(vp);
715 vfs_context_t ctx = ap->a_context;
716 struct nfs_open_file *nofp, *nextnofp;
717 struct nfs_file_lock *nflp, *nextnflp;
718 struct nfs_lock_owner *nlop, *nextnlop;
719 struct nfsmount *nmp = np->n_mount ? VFSTONFS(np->n_mount) : NFSTONMP(np);
720 mount_t mp = vnode_mount(vp);
721 int force;
722
723 FSDBG_TOP(265, vp, np, np->n_flag, 0);
724 force = (!mp || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT));
725
726 /* There shouldn't be any open or lock state at this point */
727 lck_mtx_lock(&np->n_openlock);
728
729 if (nmp && (nmp->nm_vers >= NFS_VER4)) {
730 /* need to drop a delegation */
731 if (np->n_dreturn.tqe_next != NFSNOLIST) {
732 /* remove this node from the delegation return list */
733 lck_mtx_lock(&nmp->nm_lock);
734 if (np->n_dreturn.tqe_next != NFSNOLIST) {
735 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
736 np->n_dreturn.tqe_next = NFSNOLIST;
737 }
738 lck_mtx_unlock(&nmp->nm_lock);
739 }
740 if (np->n_dlink.tqe_next != NFSNOLIST) {
741 /* remove this node from the delegation list */
742 lck_mtx_lock(&nmp->nm_lock);
743 if (np->n_dlink.tqe_next != NFSNOLIST) {
744 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
745 np->n_dlink.tqe_next = NFSNOLIST;
746 }
747 lck_mtx_unlock(&nmp->nm_lock);
748 }
749 if ((np->n_openflags & N_DELEG_MASK) && !force) {
750 /* try to return the delegation */
751 np->n_openflags &= ~N_DELEG_MASK;
752 nfs4_delegreturn_rpc(nmp, np->n_fhp, np->n_fhsize, &np->n_dstateid,
753 R_RECOVER, vfs_context_thread(ctx), vfs_context_ucred(ctx));
754 }
755 if (np->n_attrdirfh) {
756 FREE(np->n_attrdirfh, M_TEMP);
757 np->n_attrdirfh = NULL;
758 }
759 }
760
761 /* clean up file locks */
762 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
763 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !force) {
764 NP(np, "nfs_vnop_reclaim: lock 0x%llx 0x%llx 0x%x (bc %d)",
765 nflp->nfl_start, nflp->nfl_end, nflp->nfl_flags, nflp->nfl_blockcnt);
766 }
767 if (!(nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))) {
768 /* try sending an unlock RPC if it wasn't delegated */
769 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED) && !force)
770 nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER,
771 NULL, nflp->nfl_owner->nlo_open_owner->noo_cred);
772 lck_mtx_lock(&nflp->nfl_owner->nlo_lock);
773 TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink);
774 lck_mtx_unlock(&nflp->nfl_owner->nlo_lock);
775 }
776 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
777 nfs_file_lock_destroy(nflp);
778 }
779 /* clean up lock owners */
780 TAILQ_FOREACH_SAFE(nlop, &np->n_lock_owners, nlo_link, nextnlop) {
781 if (!TAILQ_EMPTY(&nlop->nlo_locks) && !force)
782 NP(np, "nfs_vnop_reclaim: lock owner with locks");
783 TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
784 nfs_lock_owner_destroy(nlop);
785 }
786 /* clean up open state */
787 if (np->n_openrefcnt && !force)
788 NP(np, "nfs_vnop_reclaim: still open: %d", np->n_openrefcnt);
789 TAILQ_FOREACH_SAFE(nofp, &np->n_opens, nof_link, nextnofp) {
790 if (nofp->nof_flags & NFS_OPEN_FILE_BUSY)
791 NP(np, "nfs_vnop_reclaim: open file busy");
792 if (!(np->n_flag & NREVOKE) && !(nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
793 if (nofp->nof_opencnt && !force)
794 NP(np, "nfs_vnop_reclaim: file still open: %d", nofp->nof_opencnt);
795 if (!force && (nofp->nof_access || nofp->nof_deny ||
796 nofp->nof_mmap_access || nofp->nof_mmap_deny ||
797 nofp->nof_r || nofp->nof_w || nofp->nof_rw ||
798 nofp->nof_r_dw || nofp->nof_w_dw || nofp->nof_rw_dw ||
799 nofp->nof_r_drw || nofp->nof_w_drw || nofp->nof_rw_drw ||
800 nofp->nof_d_r || nofp->nof_d_w || nofp->nof_d_rw ||
801 nofp->nof_d_r_dw || nofp->nof_d_w_dw || nofp->nof_d_rw_dw ||
802 nofp->nof_d_r_drw || nofp->nof_d_w_drw || nofp->nof_d_rw_drw)) {
803 NP(np, "nfs_vnop_reclaim: non-zero access: %d %d %d %d # %u.%u %u.%u %u.%u dw %u.%u %u.%u %u.%u drw %u.%u %u.%u %u.%u",
804 nofp->nof_access, nofp->nof_deny,
805 nofp->nof_mmap_access, nofp->nof_mmap_deny,
806 nofp->nof_r, nofp->nof_d_r,
807 nofp->nof_w, nofp->nof_d_w,
808 nofp->nof_rw, nofp->nof_d_rw,
809 nofp->nof_r_dw, nofp->nof_d_r_dw,
810 nofp->nof_w_dw, nofp->nof_d_w_dw,
811 nofp->nof_rw_dw, nofp->nof_d_rw_dw,
812 nofp->nof_r_drw, nofp->nof_d_r_drw,
813 nofp->nof_w_drw, nofp->nof_d_w_drw,
814 nofp->nof_rw_drw, nofp->nof_d_rw_drw);
815 /* try sending a close RPC if it wasn't delegated */
816 if (nofp->nof_r || nofp->nof_w || nofp->nof_rw ||
817 nofp->nof_r_dw || nofp->nof_w_dw || nofp->nof_rw_dw ||
818 nofp->nof_r_drw || nofp->nof_w_drw || nofp->nof_rw_drw)
819 nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER);
820 }
821 }
822 TAILQ_REMOVE(&np->n_opens, nofp, nof_link);
823 nfs_open_file_destroy(nofp);
824 }
825 lck_mtx_unlock(&np->n_openlock);
826
827 if (np->n_monlink.le_next != NFSNOLIST) {
828 /* Wait for any in-progress getattr to complete, */
829 /* then remove this node from the monitored node list. */
830 lck_mtx_lock(&nmp->nm_lock);
831 while (np->n_mflag & NMMONSCANINPROG) {
832 struct timespec ts = { 1, 0 };
833 np->n_mflag |= NMMONSCANWANT;
834 msleep(&np->n_mflag, &nmp->nm_lock, PZERO-1, "nfswaitmonscan", &ts);
835 }
836 if (np->n_monlink.le_next != NFSNOLIST) {
837 LIST_REMOVE(np, n_monlink);
838 np->n_monlink.le_next = NFSNOLIST;
839 }
840 lck_mtx_unlock(&nmp->nm_lock);
841 }
842
843 lck_mtx_lock(nfs_buf_mutex);
844 if (!force && (!LIST_EMPTY(&np->n_dirtyblkhd) || !LIST_EMPTY(&np->n_cleanblkhd)))
845 NP(np, "nfs_reclaim: dropping %s buffers", (!LIST_EMPTY(&np->n_dirtyblkhd) ? "dirty" : "clean"));
846 lck_mtx_unlock(nfs_buf_mutex);
847 nfs_vinvalbuf(vp, V_IGNORE_WRITEERR, ap->a_context, 0);
848
849 lck_mtx_lock(nfs_node_hash_mutex);
850
851 if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) {
852 if (!force)
853 NP(np, "nfs_reclaim: leaving unlinked file %s", np->n_sillyrename->nsr_name);
854 if (np->n_sillyrename->nsr_cred != NOCRED)
855 kauth_cred_unref(&np->n_sillyrename->nsr_cred);
856 vnode_rele(NFSTOV(np->n_sillyrename->nsr_dnp));
857 FREE_ZONE(np->n_sillyrename, sizeof(*np->n_sillyrename), M_NFSREQ);
858 }
859
860 vnode_removefsref(vp);
861
862 if (np->n_hflag & NHHASHED) {
863 LIST_REMOVE(np, n_hash);
864 np->n_hflag &= ~NHHASHED;
865 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
866 }
867 lck_mtx_unlock(nfs_node_hash_mutex);
868
869 /*
870 * Free up any directory cookie structures and large file handle
871 * structures that might be associated with this nfs node.
872 */
873 nfs_node_lock_force(np);
874 if ((vnode_vtype(vp) == VDIR) && np->n_cookiecache)
875 FREE_ZONE(np->n_cookiecache, sizeof(struct nfsdmap), M_NFSDIROFF);
876 if (np->n_fhsize > NFS_SMALLFH)
877 FREE_ZONE(np->n_fhp, np->n_fhsize, M_NFSBIGFH);
878 if (np->n_vattr.nva_acl)
879 kauth_acl_free(np->n_vattr.nva_acl);
880 nfs_node_unlock(np);
881 vnode_clearfsnode(vp);
882
883 if (np->n_parent) {
884 if (!vnode_get(np->n_parent)) {
885 vnode_rele(np->n_parent);
886 vnode_put(np->n_parent);
887 }
888 np->n_parent = NULL;
889 }
890
891 lck_mtx_destroy(&np->n_lock, nfs_node_lck_grp);
892 lck_rw_destroy(&np->n_datalock, nfs_data_lck_grp);
893 lck_mtx_destroy(&np->n_openlock, nfs_open_grp);
894
895 FSDBG_BOT(265, vp, np, np->n_flag, 0xd1ed1e);
896 FREE_ZONE(np, sizeof(struct nfsnode), M_NFSNODE);
897 return (0);
898 }
899
900 /*
901 * Acquire an NFS node lock
902 */
903
904 int
905 nfs_node_lock_internal(nfsnode_t np, int force)
906 {
907 FSDBG_TOP(268, np, force, 0, 0);
908 lck_mtx_lock(&np->n_lock);
909 if (!force && !(np->n_hflag && NHHASHED)) {
910 FSDBG_BOT(268, np, 0xdead, 0, 0);
911 lck_mtx_unlock(&np->n_lock);
912 return (ENOENT);
913 }
914 FSDBG_BOT(268, np, force, 0, 0);
915 return (0);
916 }
917
918 int
919 nfs_node_lock(nfsnode_t np)
920 {
921 return nfs_node_lock_internal(np, 0);
922 }
923
924 void
925 nfs_node_lock_force(nfsnode_t np)
926 {
927 nfs_node_lock_internal(np, 1);
928 }
929
930 /*
931 * Release an NFS node lock
932 */
933 void
934 nfs_node_unlock(nfsnode_t np)
935 {
936 FSDBG(269, np, current_thread(), 0, 0);
937 lck_mtx_unlock(&np->n_lock);
938 }
939
940 /*
941 * Acquire 2 NFS node locks
942 * - locks taken in reverse address order
943 * - both or neither of the locks are taken
944 * - only one lock taken per node (dup nodes are skipped)
945 */
946 int
947 nfs_node_lock2(nfsnode_t np1, nfsnode_t np2)
948 {
949 nfsnode_t first, second;
950 int error;
951
952 first = (np1 > np2) ? np1 : np2;
953 second = (np1 > np2) ? np2 : np1;
954 if ((error = nfs_node_lock(first)))
955 return (error);
956 if (np1 == np2)
957 return (error);
958 if ((error = nfs_node_lock(second)))
959 nfs_node_unlock(first);
960 return (error);
961 }
962
963 void
964 nfs_node_unlock2(nfsnode_t np1, nfsnode_t np2)
965 {
966 nfs_node_unlock(np1);
967 if (np1 != np2)
968 nfs_node_unlock(np2);
969 }
970
971 /*
972 * Manage NFS node busy state.
973 * (Similar to NFS node locks above)
974 */
975 int
976 nfs_node_set_busy(nfsnode_t np, thread_t thd)
977 {
978 struct timespec ts = { 2, 0 };
979 int error;
980
981 if ((error = nfs_node_lock(np)))
982 return (error);
983 while (ISSET(np->n_flag, NBUSY)) {
984 SET(np->n_flag, NBUSYWANT);
985 msleep(np, &np->n_lock, PZERO-1, "nfsbusywant", &ts);
986 if ((error = nfs_sigintr(NFSTONMP(np), NULL, thd, 0)))
987 break;
988 }
989 if (!error)
990 SET(np->n_flag, NBUSY);
991 nfs_node_unlock(np);
992 return (error);
993 }
994
995 void
996 nfs_node_clear_busy(nfsnode_t np)
997 {
998 int wanted;
999
1000 nfs_node_lock_force(np);
1001 wanted = ISSET(np->n_flag, NBUSYWANT);
1002 CLR(np->n_flag, NBUSY|NBUSYWANT);
1003 nfs_node_unlock(np);
1004 if (wanted)
1005 wakeup(np);
1006 }
1007
1008 int
1009 nfs_node_set_busy2(nfsnode_t np1, nfsnode_t np2, thread_t thd)
1010 {
1011 nfsnode_t first, second;
1012 int error;
1013
1014 first = (np1 > np2) ? np1 : np2;
1015 second = (np1 > np2) ? np2 : np1;
1016 if ((error = nfs_node_set_busy(first, thd)))
1017 return (error);
1018 if (np1 == np2)
1019 return (error);
1020 if ((error = nfs_node_set_busy(second, thd)))
1021 nfs_node_clear_busy(first);
1022 return (error);
1023 }
1024
1025 void
1026 nfs_node_clear_busy2(nfsnode_t np1, nfsnode_t np2)
1027 {
1028 nfs_node_clear_busy(np1);
1029 if (np1 != np2)
1030 nfs_node_clear_busy(np2);
1031 }
1032
1033 /* helper function to sort four nodes in reverse address order (no dupes) */
1034 static void
1035 nfs_node_sort4(nfsnode_t np1, nfsnode_t np2, nfsnode_t np3, nfsnode_t np4, nfsnode_t *list, int *lcntp)
1036 {
1037 nfsnode_t na[2], nb[2];
1038 int a, b, i, lcnt;
1039
1040 /* sort pairs then merge */
1041 na[0] = (np1 > np2) ? np1 : np2;
1042 na[1] = (np1 > np2) ? np2 : np1;
1043 nb[0] = (np3 > np4) ? np3 : np4;
1044 nb[1] = (np3 > np4) ? np4 : np3;
1045 for (a = b = i = lcnt = 0; i < 4; i++) {
1046 if (a >= 2)
1047 list[lcnt] = nb[b++];
1048 else if ((b >= 2) || (na[a] >= nb[b]))
1049 list[lcnt] = na[a++];
1050 else
1051 list[lcnt] = nb[b++];
1052 if ((lcnt <= 0) || (list[lcnt] != list[lcnt-1]))
1053 lcnt++; /* omit dups */
1054 }
1055 if (list[lcnt-1] == NULL)
1056 lcnt--;
1057 *lcntp = lcnt;
1058 }
1059
1060 int
1061 nfs_node_set_busy4(nfsnode_t np1, nfsnode_t np2, nfsnode_t np3, nfsnode_t np4, thread_t thd)
1062 {
1063 nfsnode_t list[4];
1064 int i, lcnt, error;
1065
1066 nfs_node_sort4(np1, np2, np3, np4, list, &lcnt);
1067
1068 /* Now we can lock using list[0 - lcnt-1] */
1069 for (i = 0; i < lcnt; ++i)
1070 if ((error = nfs_node_set_busy(list[i], thd))) {
1071 /* Drop any locks we acquired. */
1072 while (--i >= 0)
1073 nfs_node_clear_busy(list[i]);
1074 return (error);
1075 }
1076 return (0);
1077 }
1078
1079 void
1080 nfs_node_clear_busy4(nfsnode_t np1, nfsnode_t np2, nfsnode_t np3, nfsnode_t np4)
1081 {
1082 nfsnode_t list[4];
1083 int lcnt;
1084
1085 nfs_node_sort4(np1, np2, np3, np4, list, &lcnt);
1086 while (--lcnt >= 0)
1087 nfs_node_clear_busy(list[lcnt]);
1088 }
1089
1090 /*
1091 * Acquire an NFS node data lock
1092 */
1093 void
1094 nfs_data_lock(nfsnode_t np, int locktype)
1095 {
1096 nfs_data_lock_internal(np, locktype, 1);
1097 }
1098 void
1099 nfs_data_lock_noupdate(nfsnode_t np, int locktype)
1100 {
1101 nfs_data_lock_internal(np, locktype, 0);
1102 }
1103 void
1104 nfs_data_lock_internal(nfsnode_t np, int locktype, int updatesize)
1105 {
1106 FSDBG_TOP(270, np, locktype, np->n_datalockowner, 0);
1107 if (locktype == NFS_DATA_LOCK_SHARED) {
1108 if (updatesize && ISSET(np->n_flag, NUPDATESIZE))
1109 nfs_data_update_size(np, 0);
1110 lck_rw_lock_shared(&np->n_datalock);
1111 } else {
1112 lck_rw_lock_exclusive(&np->n_datalock);
1113 np->n_datalockowner = current_thread();
1114 if (updatesize && ISSET(np->n_flag, NUPDATESIZE))
1115 nfs_data_update_size(np, 1);
1116 }
1117 FSDBG_BOT(270, np, locktype, np->n_datalockowner, 0);
1118 }
1119
1120 /*
1121 * Release an NFS node data lock
1122 */
1123 void
1124 nfs_data_unlock(nfsnode_t np)
1125 {
1126 nfs_data_unlock_internal(np, 1);
1127 }
1128 void
1129 nfs_data_unlock_noupdate(nfsnode_t np)
1130 {
1131 nfs_data_unlock_internal(np, 0);
1132 }
1133 void
1134 nfs_data_unlock_internal(nfsnode_t np, int updatesize)
1135 {
1136 int mine = (np->n_datalockowner == current_thread());
1137 FSDBG_TOP(271, np, np->n_datalockowner, current_thread(), 0);
1138 if (updatesize && mine && ISSET(np->n_flag, NUPDATESIZE))
1139 nfs_data_update_size(np, 1);
1140 np->n_datalockowner = NULL;
1141 lck_rw_done(&np->n_datalock);
1142 if (updatesize && !mine && ISSET(np->n_flag, NUPDATESIZE))
1143 nfs_data_update_size(np, 0);
1144 FSDBG_BOT(271, np, np->n_datalockowner, current_thread(), 0);
1145 }
1146
1147
1148 /*
1149 * update an NFS node's size
1150 */
1151 void
1152 nfs_data_update_size(nfsnode_t np, int datalocked)
1153 {
1154 int error;
1155
1156 FSDBG_TOP(272, np, np->n_flag, np->n_size, np->n_newsize);
1157 if (!datalocked) {
1158 nfs_data_lock(np, NFS_DATA_LOCK_EXCLUSIVE);
1159 /* grabbing data lock will automatically update size */
1160 nfs_data_unlock(np);
1161 FSDBG_BOT(272, np, np->n_flag, np->n_size, np->n_newsize);
1162 return;
1163 }
1164 error = nfs_node_lock(np);
1165 if (error || !ISSET(np->n_flag, NUPDATESIZE)) {
1166 if (!error)
1167 nfs_node_unlock(np);
1168 FSDBG_BOT(272, np, np->n_flag, np->n_size, np->n_newsize);
1169 return;
1170 }
1171 CLR(np->n_flag, NUPDATESIZE);
1172 np->n_size = np->n_newsize;
1173 /* make sure we invalidate buffers the next chance we get */
1174 SET(np->n_flag, NNEEDINVALIDATE);
1175 nfs_node_unlock(np);
1176 ubc_setsize(NFSTOV(np), (off_t)np->n_size); /* XXX error? */
1177 FSDBG_BOT(272, np, np->n_flag, np->n_size, np->n_newsize);
1178 }
1179