2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)nfs_node.c 8.6 (Berkeley) 5/22/95
65 * FreeBSD-Id: nfs_node.c,v 1.22 1997/10/28 14:06:20 bde Exp $
69 #include <sys/param.h>
70 #include <sys/kernel.h>
71 #include <sys/systm.h>
73 #include <sys/kauth.h>
74 #include <sys/mount_internal.h>
75 #include <sys/vnode_internal.h>
76 #include <sys/vnode.h>
78 #include <sys/malloc.h>
79 #include <sys/fcntl.h>
82 #include <nfs/rpcv2.h>
83 #include <nfs/nfsproto.h>
85 #include <nfs/nfsnode.h>
86 #include <nfs/nfs_gss.h>
87 #include <nfs/nfsmount.h>
89 #define NFSNOHASH(fhsum) \
90 (&nfsnodehashtbl[(fhsum) & nfsnodehash])
91 static LIST_HEAD(nfsnodehashhead
, nfsnode
) * nfsnodehashtbl
;
92 static u_long nfsnodehash
;
94 static lck_grp_t
*nfs_node_hash_lck_grp
;
95 static lck_grp_t
*nfs_node_lck_grp
;
96 static lck_grp_t
*nfs_data_lck_grp
;
97 lck_mtx_t
*nfs_node_hash_mutex
;
99 #define NFS_NODE_DBG(...) NFS_DBG(NFS_FAC_NODE, 7, ## __VA_ARGS__)
102 * Initialize hash links for nfsnodes
103 * and build nfsnode free list.
108 nfs_node_hash_lck_grp
= lck_grp_alloc_init("nfs_node_hash", LCK_GRP_ATTR_NULL
);
109 nfs_node_hash_mutex
= lck_mtx_alloc_init(nfs_node_hash_lck_grp
, LCK_ATTR_NULL
);
110 nfs_node_lck_grp
= lck_grp_alloc_init("nfs_node", LCK_GRP_ATTR_NULL
);
111 nfs_data_lck_grp
= lck_grp_alloc_init("nfs_data", LCK_GRP_ATTR_NULL
);
115 nfs_nhinit_finish(void)
117 lck_mtx_lock(nfs_node_hash_mutex
);
118 if (!nfsnodehashtbl
) {
119 nfsnodehashtbl
= hashinit(desiredvnodes
, M_NFSNODE
, &nfsnodehash
);
121 lck_mtx_unlock(nfs_node_hash_mutex
);
125 * Compute an entry in the NFS hash table structure
128 nfs_hash(u_char
*fhp
, int fhsize
)
134 for (i
= 0; i
< fhsize
; i
++) {
141 int nfs_case_insensitive(mount_t
);
144 nfs_case_insensitive(mount_t mp
)
146 struct nfsmount
*nmp
= VFSTONFS(mp
);
150 if (nfs_mount_gone(nmp
)) {
154 if (nmp
->nm_vers
== NFS_VER2
) {
155 /* V2 has no way to know */
159 lck_mtx_lock(&nmp
->nm_lock
);
160 if (nmp
->nm_vers
== NFS_VER3
) {
161 if (!(nmp
->nm_state
& NFSSTA_GOTPATHCONF
)) {
162 /* We're holding the node lock so we just return
163 * with answer as case sensitive. Is very rare
164 * for file systems not to be homogenous w.r.t. pathconf
168 } else if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_HOMOGENEOUS
)) {
169 /* no pathconf info cached */
173 if (!skip
&& (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_CASE_INSENSITIVE
)) {
177 lck_mtx_unlock(&nmp
->nm_lock
);
184 * Look up a vnode/nfsnode by file handle.
185 * Callers must check for mount points!!
186 * In all cases, a pointer to a
187 * nfsnode structure is returned.
193 struct componentname
*cnp
,
196 struct nfs_vattr
*nvap
,
203 struct nfsnodehashhead
*nhpp
;
207 struct vnode_fsparam vfsp
;
210 FSDBG_TOP(263, mp
, dnp
, flags
, npp
);
212 /* Check for unmount in progress */
213 if (!mp
|| vfs_isforce(mp
)) {
216 FSDBG_BOT(263, mp
, dnp
, 0xd1e, error
);
219 nfsvers
= VFSTONFS(mp
)->nm_vers
;
221 nhpp
= NFSNOHASH(nfs_hash(fhp
, fhsize
));
223 lck_mtx_lock(nfs_node_hash_mutex
);
224 for (np
= nhpp
->lh_first
; np
!= 0; np
= np
->n_hash
.le_next
) {
225 mp2
= (np
->n_hflag
& NHINIT
) ? np
->n_mount
: NFSTOMP(np
);
226 if (mp
!= mp2
|| np
->n_fhsize
!= fhsize
||
227 bcmp(fhp
, np
->n_fhp
, fhsize
)) {
230 if (nvap
&& (nvap
->nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) &&
231 cnp
&& (cnp
->cn_namelen
> (fhsize
- (int)sizeof(dnp
)))) {
232 /* The name was too long to fit in the file handle. Check it against the node's name. */
234 const char *vname
= vnode_getname(NFSTOV(np
));
236 if (cnp
->cn_namelen
!= (int)strlen(vname
)) {
239 namecmp
= strncmp(vname
, cnp
->cn_nameptr
, cnp
->cn_namelen
);
241 vnode_putname(vname
);
243 if (namecmp
) { /* full name didn't match */
247 FSDBG(263, dnp
, np
, np
->n_flag
, 0xcace0000);
248 /* if the node is locked, sleep on it */
249 if ((np
->n_hflag
& NHLOCKED
) && !(flags
& NG_NOCREATE
)) {
250 np
->n_hflag
|= NHLOCKWANT
;
251 FSDBG(263, dnp
, np
, np
->n_flag
, 0xcace2222);
252 msleep(np
, nfs_node_hash_mutex
, PDROP
| PINOD
, "nfs_nget", NULL
);
253 FSDBG(263, dnp
, np
, np
->n_flag
, 0xcace3333);
258 lck_mtx_unlock(nfs_node_hash_mutex
);
259 if ((error
= vnode_getwithvid(vp
, vid
))) {
261 * If vnode is being reclaimed or has already
262 * changed identity, no need to wait.
264 FSDBG_BOT(263, dnp
, *npp
, 0xcace0d1e, error
);
267 if ((error
= nfs_node_lock(np
))) {
268 /* this only fails if the node is now unhashed */
269 /* so let's see if we can find/create it again */
270 FSDBG(263, dnp
, *npp
, 0xcaced1e2, error
);
272 if (flags
& NG_NOCREATE
) {
274 FSDBG_BOT(263, dnp
, *npp
, 0xcaced1e0, ENOENT
);
279 /* update attributes */
281 error
= nfs_loadattrcache(np
, nvap
, xidp
, 0);
287 if (dnp
&& cnp
&& (flags
& NG_MAKEENTRY
)) {
288 cache_enter(NFSTOV(dnp
), vp
, cnp
);
291 * Update the vnode if the name/and or the parent has
292 * changed. We need to do this so that if getattrlist is
293 * called asking for ATTR_CMN_NAME, that the "most"
294 * correct name is being returned. In addition for
295 * monitored vnodes we need to kick the vnode out of the
296 * name cache. We do this so that if there are hard
297 * links in the same directory the link will not be
298 * found and a lookup will get us here to return the
299 * name of the current link. In addition by removing the
300 * name from the name cache the old name will not be
301 * found after a rename done on another client or the
302 * server. The principle reason to do this is because
303 * Finder is asking for notifications on a directory.
304 * The directory changes, Finder gets notified, reads
305 * the directory (which we have purged) and for each
306 * entry returned calls getattrlist with the name
307 * returned from readdir. gettattrlist has to call
308 * namei/lookup to resolve the name, because its not in
309 * the cache we end up here. We need to update the name
310 * so Finder will get the name it called us with.
312 * We had an imperfect solution with respect to case
313 * sensitivity. There is a test that is run in
314 * FileBuster that does renames from some name to
315 * another name differing only in case. It then reads
316 * the directory looking for the new name, after it
317 * finds that new name, it ask gettattrlist to verify
318 * that the name is the new name. Usually that works,
319 * but renames generate fsevents and fseventsd will do a
320 * lookup on the name via lstat. Since that test renames
321 * old name to new name back and forth there is a race
322 * that an fsevent will be behind and will access the
323 * file by the old name, on a case insensitive file
324 * system that will work. Problem is if we do a case
325 * sensitive compare, we're going to change the name,
326 * which the test's getattrlist verification step is
327 * going to fail. So we will check the case sensitivity
328 * of the file system and do the appropriate compare. In
329 * a rare instance for non homogeneous file systems
330 * w.r.t. pathconf we will use case sensitive compares.
331 * That could break if the file system is actually case
334 * Note that V2 does not know the case, so we just
335 * assume case sensitivity.
337 * This is clearly not perfect due to races, but this is
338 * as good as its going to get. You can defeat the
339 * handling of hard links simply by doing:
341 * while :; do ls -l > /dev/null; done
343 * in a terminal window. Even a single ls -l can cause a
346 * <rant>What we really need is for the caller, that
347 * knows the name being used is valid since it got it
348 * from a readdir to use that name and not ask for the
349 * ATTR_CMN_NAME</rant>
351 if (dnp
&& cnp
&& (vp
!= NFSTOV(dnp
))) {
352 int update_flags
= (vnode_ismonitored((NFSTOV(dnp
)))) ? VNODE_UPDATE_CACHE
: 0;
353 int (*cmp
)(const char *s1
, const char *s2
, size_t n
);
355 cmp
= nfs_case_insensitive(mp
) ? strncasecmp
: strncmp
;
357 if (vp
->v_name
&& (size_t)cnp
->cn_namelen
!= strnlen(vp
->v_name
, MAXPATHLEN
)) {
358 update_flags
|= VNODE_UPDATE_NAME
;
360 if (vp
->v_name
&& cnp
->cn_namelen
&& (*cmp
)(cnp
->cn_nameptr
, vp
->v_name
, cnp
->cn_namelen
)) {
361 update_flags
|= VNODE_UPDATE_NAME
;
363 if ((vp
->v_name
== NULL
&& cnp
->cn_namelen
!= 0) || (vp
->v_name
!= NULL
&& cnp
->cn_namelen
== 0)) {
364 update_flags
|= VNODE_UPDATE_NAME
;
366 if (vnode_parent(vp
) != NFSTOV(dnp
)) {
367 update_flags
|= VNODE_UPDATE_PARENT
;
370 NFS_NODE_DBG("vnode_update_identity old name %s new name %.*s update flags = %x\n",
371 vp
->v_name
, cnp
->cn_namelen
, cnp
->cn_nameptr
? cnp
->cn_nameptr
: "", update_flags
);
372 vnode_update_identity(vp
, NFSTOV(dnp
), cnp
->cn_nameptr
, cnp
->cn_namelen
, 0, update_flags
);
378 FSDBG_BOT(263, dnp
, *npp
, 0xcace0000, error
);
382 FSDBG(263, mp
, dnp
, npp
, 0xaaaaaaaa);
384 if (flags
& NG_NOCREATE
) {
385 lck_mtx_unlock(nfs_node_hash_mutex
);
387 FSDBG_BOT(263, dnp
, *npp
, 0x80000001, ENOENT
);
392 * allocate and initialize nfsnode and stick it in the hash
393 * before calling getnewvnode(). Anyone finding it in the
394 * hash before initialization is complete will wait for it.
396 MALLOC_ZONE(np
, nfsnode_t
, sizeof *np
, M_NFSNODE
, M_WAITOK
);
398 lck_mtx_unlock(nfs_node_hash_mutex
);
400 FSDBG_BOT(263, dnp
, *npp
, 0x80000001, ENOMEM
);
403 bzero(np
, sizeof *np
);
404 np
->n_hflag
|= (NHINIT
| NHLOCKED
);
407 TAILQ_INIT(&np
->n_opens
);
408 TAILQ_INIT(&np
->n_lock_owners
);
409 TAILQ_INIT(&np
->n_locks
);
410 np
->n_dlink
.tqe_next
= NFSNOLIST
;
411 np
->n_dreturn
.tqe_next
= NFSNOLIST
;
412 np
->n_monlink
.le_next
= NFSNOLIST
;
414 /* ugh... need to keep track of ".zfs" directories to workaround server bugs */
415 if ((nvap
->nva_type
== VDIR
) && cnp
&& (cnp
->cn_namelen
== 4) &&
416 (cnp
->cn_nameptr
[0] == '.') && (cnp
->cn_nameptr
[1] == 'z') &&
417 (cnp
->cn_nameptr
[2] == 'f') && (cnp
->cn_nameptr
[3] == 's')) {
418 np
->n_flag
|= NISDOTZFS
;
420 if (dnp
&& (dnp
->n_flag
& NISDOTZFS
)) {
421 np
->n_flag
|= NISDOTZFSCHILD
;
424 if (dnp
&& cnp
&& ((cnp
->cn_namelen
!= 2) ||
425 (cnp
->cn_nameptr
[0] != '.') || (cnp
->cn_nameptr
[1] != '.'))) {
426 vnode_t dvp
= NFSTOV(dnp
);
427 if (!vnode_get(dvp
)) {
428 if (!vnode_ref(dvp
)) {
435 /* setup node's file handle */
436 if (fhsize
> NFS_SMALLFH
) {
437 MALLOC_ZONE(np
->n_fhp
, u_char
*,
438 fhsize
, M_NFSBIGFH
, M_WAITOK
);
440 lck_mtx_unlock(nfs_node_hash_mutex
);
441 FREE_ZONE(np
, sizeof *np
, M_NFSNODE
);
443 FSDBG_BOT(263, dnp
, *npp
, 0x80000002, ENOMEM
);
447 np
->n_fhp
= &np
->n_fh
[0];
449 bcopy(fhp
, np
->n_fhp
, fhsize
);
450 np
->n_fhsize
= fhsize
;
452 /* Insert the nfsnode in the hash queue for its new file handle */
453 LIST_INSERT_HEAD(nhpp
, np
, n_hash
);
454 np
->n_hflag
|= NHHASHED
;
455 FSDBG(266, 0, np
, np
->n_flag
, np
->n_hflag
);
457 /* lock the new nfsnode */
458 lck_mtx_init(&np
->n_lock
, nfs_node_lck_grp
, LCK_ATTR_NULL
);
459 lck_rw_init(&np
->n_datalock
, nfs_data_lck_grp
, LCK_ATTR_NULL
);
460 lck_mtx_init(&np
->n_openlock
, nfs_open_grp
, LCK_ATTR_NULL
);
461 lck_mtx_lock(&np
->n_lock
);
463 /* release lock on hash table */
464 lck_mtx_unlock(nfs_node_hash_mutex
);
466 /* do initial loading of attributes */
468 NACCESSINVALIDATE(np
);
469 error
= nfs_loadattrcache(np
, nvap
, xidp
, 1);
471 FSDBG(266, 0, np
, np
->n_flag
, 0xb1eb1e);
473 lck_mtx_lock(nfs_node_hash_mutex
);
474 LIST_REMOVE(np
, n_hash
);
475 np
->n_hflag
&= ~(NHHASHED
| NHINIT
| NHLOCKED
);
476 if (np
->n_hflag
& NHLOCKWANT
) {
477 np
->n_hflag
&= ~NHLOCKWANT
;
480 lck_mtx_unlock(nfs_node_hash_mutex
);
482 if (!vnode_get(np
->n_parent
)) {
483 vnode_rele(np
->n_parent
);
484 vnode_put(np
->n_parent
);
488 lck_mtx_destroy(&np
->n_lock
, nfs_node_lck_grp
);
489 lck_rw_destroy(&np
->n_datalock
, nfs_data_lck_grp
);
490 lck_mtx_destroy(&np
->n_openlock
, nfs_open_grp
);
491 if (np
->n_fhsize
> NFS_SMALLFH
) {
492 FREE_ZONE(np
->n_fhp
, np
->n_fhsize
, M_NFSBIGFH
);
494 FREE_ZONE(np
, sizeof *np
, M_NFSNODE
);
496 FSDBG_BOT(263, dnp
, *npp
, 0x80000003, error
);
499 NFS_CHANGED_UPDATE(nfsvers
, np
, nvap
);
500 if (nvap
->nva_type
== VDIR
) {
501 NFS_CHANGED_UPDATE_NC(nfsvers
, np
, nvap
);
504 /* now, attempt to get a new vnode */
506 vfsp
.vnfs_vtype
= nvap
->nva_type
;
507 vfsp
.vnfs_str
= "nfs";
508 vfsp
.vnfs_dvp
= dnp
? NFSTOV(dnp
) : NULL
;
509 vfsp
.vnfs_fsnode
= np
;
511 if (nfsvers
== NFS_VER4
) {
513 if (nvap
->nva_type
== VFIFO
) {
514 vfsp
.vnfs_vops
= fifo_nfsv4nodeop_p
;
517 if (nvap
->nva_type
== VBLK
|| nvap
->nva_type
== VCHR
) {
518 vfsp
.vnfs_vops
= spec_nfsv4nodeop_p
;
520 vfsp
.vnfs_vops
= nfsv4_vnodeop_p
;
523 #endif /* CONFIG_NFS4 */
526 if (nvap
->nva_type
== VFIFO
) {
527 vfsp
.vnfs_vops
= fifo_nfsv2nodeop_p
;
530 if (nvap
->nva_type
== VBLK
|| nvap
->nva_type
== VCHR
) {
531 vfsp
.vnfs_vops
= spec_nfsv2nodeop_p
;
533 vfsp
.vnfs_vops
= nfsv2_vnodeop_p
;
536 vfsp
.vnfs_markroot
= (flags
& NG_MARKROOT
) ? 1 : 0;
537 vfsp
.vnfs_marksystem
= 0;
539 vfsp
.vnfs_filesize
= nvap
->nva_size
;
541 vfsp
.vnfs_flags
= VNFS_ADDFSREF
;
542 if (!dnp
|| !cnp
|| !(flags
& NG_MAKEENTRY
)) {
543 vfsp
.vnfs_flags
|= VNFS_NOCACHE
;
547 if (((nfsvers
>= NFS_VER4
)
549 && (nvap
->nva_type
== VDIR
) && (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER
)
550 && !(flags
& NG_MARKROOT
)) {
551 struct vnode_trigger_param vtp
;
552 bzero(&vtp
, sizeof(vtp
));
553 bcopy(&vfsp
, &vtp
.vnt_params
, sizeof(vfsp
));
554 vtp
.vnt_resolve_func
= nfs_mirror_mount_trigger_resolve
;
555 vtp
.vnt_unresolve_func
= nfs_mirror_mount_trigger_unresolve
;
556 vtp
.vnt_rearm_func
= nfs_mirror_mount_trigger_rearm
;
557 vtp
.vnt_flags
= VNT_AUTO_REARM
| VNT_KERN_RESOLVE
;
558 error
= vnode_create(VNCREATE_TRIGGER
, VNCREATE_TRIGGER_SIZE
, &vtp
, &np
->n_vnode
);
562 error
= vnode_create(VNCREATE_FLAVOR
, VCREATESIZE
, &vfsp
, &np
->n_vnode
);
566 FSDBG(266, 0, np
, np
->n_flag
, 0xb1eb1e);
568 lck_mtx_lock(nfs_node_hash_mutex
);
569 LIST_REMOVE(np
, n_hash
);
570 np
->n_hflag
&= ~(NHHASHED
| NHINIT
| NHLOCKED
);
571 if (np
->n_hflag
& NHLOCKWANT
) {
572 np
->n_hflag
&= ~NHLOCKWANT
;
575 lck_mtx_unlock(nfs_node_hash_mutex
);
577 if (!vnode_get(np
->n_parent
)) {
578 vnode_rele(np
->n_parent
);
579 vnode_put(np
->n_parent
);
583 lck_mtx_destroy(&np
->n_lock
, nfs_node_lck_grp
);
584 lck_rw_destroy(&np
->n_datalock
, nfs_data_lck_grp
);
585 lck_mtx_destroy(&np
->n_openlock
, nfs_open_grp
);
586 if (np
->n_fhsize
> NFS_SMALLFH
) {
587 FREE_ZONE(np
->n_fhp
, np
->n_fhsize
, M_NFSBIGFH
);
589 FREE_ZONE(np
, sizeof *np
, M_NFSNODE
);
591 FSDBG_BOT(263, dnp
, *npp
, 0x80000004, error
);
595 vnode_settag(vp
, VT_NFS
);
596 /* node is now initialized */
598 /* check if anyone's waiting on this node */
599 lck_mtx_lock(nfs_node_hash_mutex
);
600 np
->n_hflag
&= ~(NHINIT
| NHLOCKED
);
601 if (np
->n_hflag
& NHLOCKWANT
) {
602 np
->n_hflag
&= ~NHLOCKWANT
;
605 lck_mtx_unlock(nfs_node_hash_mutex
);
609 FSDBG_BOT(263, dnp
, vp
, *npp
, error
);
616 struct vnop_inactive_args
/* {
617 * struct vnodeop_desc *a_desc;
619 * vfs_context_t a_context;
622 vnode_t vp
= ap
->a_vp
;
623 vfs_context_t ctx
= ap
->a_context
;
625 struct nfs_sillyrename
*nsp
;
626 struct nfs_vattr nvattr
;
627 int unhash
, attrerr
, busyerror
, error
, inuse
, busied
, force
;
628 struct nfs_open_file
*nofp
;
629 struct componentname cn
;
630 struct nfsmount
*nmp
;
634 panic("nfs_vnop_inactive: vp == NULL");
638 panic("nfs_vnop_inactive: np == NULL");
642 mp
= vnode_mount(vp
);
645 force
= (!mp
|| vfs_isforce(mp
));
647 inuse
= (nfs_mount_state_in_use_start(nmp
, NULL
) == 0);
649 /* There shouldn't be any open or lock state at this point */
650 lck_mtx_lock(&np
->n_openlock
);
651 if (np
->n_openrefcnt
&& !force
) {
653 * vnode_rele and vnode_put drop the vnode lock before
654 * calling VNOP_INACTIVE, so there is a race were the
655 * vnode could become active again. Perhaps there are
656 * other places where this can happen, so if we've got
657 * here we need to get out.
659 #ifdef NFS_NODE_DEBUG
660 NP(np
, "nfs_vnop_inactive: still open: %d", np
->n_openrefcnt
);
662 lck_mtx_unlock(&np
->n_openlock
);
666 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
667 lck_mtx_lock(&nofp
->nof_lock
);
668 if (nofp
->nof_flags
& NFS_OPEN_FILE_BUSY
) {
670 NP(np
, "nfs_vnop_inactive: open file busy");
674 nofp
->nof_flags
|= NFS_OPEN_FILE_BUSY
;
677 lck_mtx_unlock(&nofp
->nof_lock
);
678 if ((np
->n_flag
& NREVOKE
) || (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
680 nfs_open_file_clear_busy(nofp
);
685 * If we just created the file, we already had it open in
686 * anticipation of getting a subsequent open call. If the
687 * node has gone inactive without being open, we need to
688 * clean up (close) the open done in the create.
691 if ((nofp
->nof_flags
& NFS_OPEN_FILE_CREATE
) && nofp
->nof_creator
&& !force
) {
692 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
693 lck_mtx_unlock(&np
->n_openlock
);
695 nfs_open_file_clear_busy(nofp
);
698 nfs_mount_state_in_use_end(nmp
, 0);
700 if (!nfs4_reopen(nofp
, NULL
)) {
704 nofp
->nof_flags
&= ~NFS_OPEN_FILE_CREATE
;
705 lck_mtx_unlock(&np
->n_openlock
);
706 error
= nfs_close(np
, nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
708 NP(np
, "nfs_vnop_inactive: create close error: %d", error
);
709 nofp
->nof_flags
|= NFS_OPEN_FILE_CREATE
;
712 nfs_open_file_clear_busy(nofp
);
715 nfs_mount_state_in_use_end(nmp
, error
);
720 if (nofp
->nof_flags
& NFS_OPEN_FILE_NEEDCLOSE
) {
722 * If the file is marked as needing reopen, but this was the only
723 * open on the file, just drop the open.
725 nofp
->nof_flags
&= ~NFS_OPEN_FILE_NEEDCLOSE
;
726 if ((nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) && (nofp
->nof_opencnt
== 1)) {
727 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPEN
;
730 nofp
->nof_access
= 0;
732 lck_mtx_unlock(&np
->n_openlock
);
733 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
735 nfs_open_file_clear_busy(nofp
);
738 nfs_mount_state_in_use_end(nmp
, 0);
741 if (!nfs4_reopen(nofp
, NULL
)) {
746 error
= nfs_close(np
, nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
748 NP(np
, "nfs_vnop_inactive: need close error: %d", error
);
749 nofp
->nof_flags
|= NFS_OPEN_FILE_NEEDCLOSE
;
752 nfs_open_file_clear_busy(nofp
);
755 nfs_mount_state_in_use_end(nmp
, error
);
760 if (nofp
->nof_opencnt
&& !force
) {
761 NP(np
, "nfs_vnop_inactive: file still open: %d", nofp
->nof_opencnt
);
763 if (!force
&& (nofp
->nof_access
|| nofp
->nof_deny
||
764 nofp
->nof_mmap_access
|| nofp
->nof_mmap_deny
||
765 nofp
->nof_r
|| nofp
->nof_w
|| nofp
->nof_rw
||
766 nofp
->nof_r_dw
|| nofp
->nof_w_dw
|| nofp
->nof_rw_dw
||
767 nofp
->nof_r_drw
|| nofp
->nof_w_drw
|| nofp
->nof_rw_drw
||
768 nofp
->nof_d_r
|| nofp
->nof_d_w
|| nofp
->nof_d_rw
||
769 nofp
->nof_d_r_dw
|| nofp
->nof_d_w_dw
|| nofp
->nof_d_rw_dw
||
770 nofp
->nof_d_r_drw
|| nofp
->nof_d_w_drw
|| nofp
->nof_d_rw_drw
)) {
771 NP(np
, "nfs_vnop_inactive: non-zero access: %d %d %d %d # %u.%u %u.%u %u.%u dw %u.%u %u.%u %u.%u drw %u.%u %u.%u %u.%u",
772 nofp
->nof_access
, nofp
->nof_deny
,
773 nofp
->nof_mmap_access
, nofp
->nof_mmap_deny
,
774 nofp
->nof_r
, nofp
->nof_d_r
,
775 nofp
->nof_w
, nofp
->nof_d_w
,
776 nofp
->nof_rw
, nofp
->nof_d_rw
,
777 nofp
->nof_r_dw
, nofp
->nof_d_r_dw
,
778 nofp
->nof_w_dw
, nofp
->nof_d_w_dw
,
779 nofp
->nof_rw_dw
, nofp
->nof_d_rw_dw
,
780 nofp
->nof_r_drw
, nofp
->nof_d_r_drw
,
781 nofp
->nof_w_drw
, nofp
->nof_d_w_drw
,
782 nofp
->nof_rw_drw
, nofp
->nof_d_rw_drw
);
785 nfs_open_file_clear_busy(nofp
);
788 lck_mtx_unlock(&np
->n_openlock
);
790 if (inuse
&& nfs_mount_state_in_use_end(nmp
, error
)) {
794 nfs_node_lock_force(np
);
796 if (vnode_vtype(vp
) != VDIR
) {
797 nsp
= np
->n_sillyrename
;
798 np
->n_sillyrename
= NULL
;
803 FSDBG_TOP(264, vp
, np
, np
->n_flag
, nsp
);
806 /* no silly file to clean up... */
807 /* clear all flags other than these */
808 np
->n_flag
&= (NMODIFIED
);
810 FSDBG_BOT(264, vp
, np
, np
->n_flag
, 0);
815 /* Remove the silly file that was rename'd earlier */
817 /* flush all the buffers */
818 nfs_vinvalbuf2(vp
, V_SAVE
, vfs_context_thread(ctx
), nsp
->nsr_cred
, 1);
820 /* try to get the latest attributes */
821 attrerr
= nfs_getattr(np
, &nvattr
, ctx
, NGA_UNCACHED
);
823 /* Check if we should remove it from the node hash. */
824 /* Leave it if inuse or it has multiple hard links. */
825 if (vnode_isinuse(vp
, 0) || (!attrerr
&& (nvattr
.nva_nlink
> 1))) {
832 /* mark this node and the directory busy while we do the remove */
833 busyerror
= nfs_node_set_busy2(nsp
->nsr_dnp
, np
, vfs_context_thread(ctx
));
835 /* lock the node while we remove the silly file */
836 lck_mtx_lock(nfs_node_hash_mutex
);
837 while (np
->n_hflag
& NHLOCKED
) {
838 np
->n_hflag
|= NHLOCKWANT
;
839 msleep(np
, nfs_node_hash_mutex
, PINOD
, "nfs_inactive", NULL
);
841 np
->n_hflag
|= NHLOCKED
;
842 lck_mtx_unlock(nfs_node_hash_mutex
);
844 /* purge the name cache to deter others from finding it */
845 bzero(&cn
, sizeof(cn
));
846 cn
.cn_nameptr
= nsp
->nsr_name
;
847 cn
.cn_namelen
= nsp
->nsr_namlen
;
848 nfs_name_cache_purge(nsp
->nsr_dnp
, np
, &cn
, ctx
);
850 FSDBG(264, np
, np
->n_size
, np
->n_vattr
.nva_size
, 0xf00d00f1);
852 /* now remove the silly file */
855 /* clear all flags other than these */
856 nfs_node_lock_force(np
);
857 np
->n_flag
&= (NMODIFIED
);
861 nfs_node_clear_busy2(nsp
->nsr_dnp
, np
);
864 if (unhash
&& vnode_isinuse(vp
, 0)) {
865 /* vnode now inuse after silly remove? */
867 ubc_setsize(vp
, np
->n_size
);
870 lck_mtx_lock(nfs_node_hash_mutex
);
873 * remove nfsnode from hash now so we can't accidentally find it
874 * again if another object gets created with the same filehandle
875 * before this vnode gets reclaimed
877 if (np
->n_hflag
& NHHASHED
) {
878 LIST_REMOVE(np
, n_hash
);
879 np
->n_hflag
&= ~NHHASHED
;
880 FSDBG(266, 0, np
, np
->n_flag
, 0xb1eb1e);
884 /* unlock the node */
885 np
->n_hflag
&= ~NHLOCKED
;
886 if (np
->n_hflag
& NHLOCKWANT
) {
887 np
->n_hflag
&= ~NHLOCKWANT
;
890 lck_mtx_unlock(nfs_node_hash_mutex
);
892 /* cleanup sillyrename info */
893 if (nsp
->nsr_cred
!= NOCRED
) {
894 kauth_cred_unref(&nsp
->nsr_cred
);
896 vnode_rele(NFSTOV(nsp
->nsr_dnp
));
897 FREE_ZONE(nsp
, sizeof(*nsp
), M_NFSREQ
);
899 FSDBG_BOT(264, vp
, np
, np
->n_flag
, 0);
904 * Reclaim an nfsnode so that it can be used for other purposes.
908 struct vnop_reclaim_args
/* {
909 * struct vnodeop_desc *a_desc;
911 * vfs_context_t a_context;
914 vnode_t vp
= ap
->a_vp
;
915 nfsnode_t np
= VTONFS(vp
);
916 vfs_context_t ctx
= ap
->a_context
;
917 struct nfs_open_file
*nofp
, *nextnofp
;
918 struct nfs_file_lock
*nflp
, *nextnflp
;
919 struct nfs_lock_owner
*nlop
, *nextnlop
;
920 struct nfsmount
*nmp
= np
->n_mount
? VFSTONFS(np
->n_mount
) : NFSTONMP(np
);
921 mount_t mp
= vnode_mount(vp
);
924 FSDBG_TOP(265, vp
, np
, np
->n_flag
, 0);
925 force
= (!mp
|| vfs_isforce(mp
) || nfs_mount_gone(nmp
));
928 /* There shouldn't be any open or lock state at this point */
929 lck_mtx_lock(&np
->n_openlock
);
932 if (nmp
&& (nmp
->nm_vers
>= NFS_VER4
)) {
933 /* need to drop a delegation */
934 if (np
->n_dreturn
.tqe_next
!= NFSNOLIST
) {
935 /* remove this node from the delegation return list */
936 lck_mtx_lock(&nmp
->nm_lock
);
937 if (np
->n_dreturn
.tqe_next
!= NFSNOLIST
) {
938 TAILQ_REMOVE(&nmp
->nm_dreturnq
, np
, n_dreturn
);
939 np
->n_dreturn
.tqe_next
= NFSNOLIST
;
941 lck_mtx_unlock(&nmp
->nm_lock
);
943 if (np
->n_dlink
.tqe_next
!= NFSNOLIST
) {
944 /* remove this node from the delegation list */
945 lck_mtx_lock(&nmp
->nm_lock
);
946 if (np
->n_dlink
.tqe_next
!= NFSNOLIST
) {
947 TAILQ_REMOVE(&nmp
->nm_delegations
, np
, n_dlink
);
948 np
->n_dlink
.tqe_next
= NFSNOLIST
;
950 lck_mtx_unlock(&nmp
->nm_lock
);
952 if ((np
->n_openflags
& N_DELEG_MASK
) && !force
) {
953 /* try to return the delegation */
954 np
->n_openflags
&= ~N_DELEG_MASK
;
955 nfs4_delegreturn_rpc(nmp
, np
->n_fhp
, np
->n_fhsize
, &np
->n_dstateid
,
956 R_RECOVER
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
958 if (np
->n_attrdirfh
) {
959 FREE(np
->n_attrdirfh
, M_TEMP
);
960 np
->n_attrdirfh
= NULL
;
965 /* clean up file locks */
966 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
967 if (!(nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) && !force
) {
968 NP(np
, "nfs_vnop_reclaim: lock 0x%llx 0x%llx 0x%x (bc %d)",
969 nflp
->nfl_start
, nflp
->nfl_end
, nflp
->nfl_flags
, nflp
->nfl_blockcnt
);
971 if (!(nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
))) {
972 /* try sending an unlock RPC if it wasn't delegated */
973 if (!(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
) && !force
) {
974 nmp
->nm_funcs
->nf_unlock_rpc(np
, nflp
->nfl_owner
, F_WRLCK
, nflp
->nfl_start
, nflp
->nfl_end
, R_RECOVER
,
975 NULL
, nflp
->nfl_owner
->nlo_open_owner
->noo_cred
);
977 lck_mtx_lock(&nflp
->nfl_owner
->nlo_lock
);
978 TAILQ_REMOVE(&nflp
->nfl_owner
->nlo_locks
, nflp
, nfl_lolink
);
979 lck_mtx_unlock(&nflp
->nfl_owner
->nlo_lock
);
981 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
982 nfs_file_lock_destroy(nflp
);
984 /* clean up lock owners */
985 TAILQ_FOREACH_SAFE(nlop
, &np
->n_lock_owners
, nlo_link
, nextnlop
) {
986 if (!TAILQ_EMPTY(&nlop
->nlo_locks
) && !force
) {
987 NP(np
, "nfs_vnop_reclaim: lock owner with locks");
989 TAILQ_REMOVE(&np
->n_lock_owners
, nlop
, nlo_link
);
990 nfs_lock_owner_destroy(nlop
);
992 /* clean up open state */
993 if (np
->n_openrefcnt
&& !force
) {
994 NP(np
, "nfs_vnop_reclaim: still open: %d", np
->n_openrefcnt
);
996 TAILQ_FOREACH_SAFE(nofp
, &np
->n_opens
, nof_link
, nextnofp
) {
997 if (nofp
->nof_flags
& NFS_OPEN_FILE_BUSY
) {
998 NP(np
, "nfs_vnop_reclaim: open file busy");
1000 if (!(np
->n_flag
& NREVOKE
) && !(nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
1001 if (nofp
->nof_opencnt
&& !force
) {
1002 NP(np
, "nfs_vnop_reclaim: file still open: %d", nofp
->nof_opencnt
);
1004 if (!force
&& (nofp
->nof_access
|| nofp
->nof_deny
||
1005 nofp
->nof_mmap_access
|| nofp
->nof_mmap_deny
||
1006 nofp
->nof_r
|| nofp
->nof_w
|| nofp
->nof_rw
||
1007 nofp
->nof_r_dw
|| nofp
->nof_w_dw
|| nofp
->nof_rw_dw
||
1008 nofp
->nof_r_drw
|| nofp
->nof_w_drw
|| nofp
->nof_rw_drw
||
1009 nofp
->nof_d_r
|| nofp
->nof_d_w
|| nofp
->nof_d_rw
||
1010 nofp
->nof_d_r_dw
|| nofp
->nof_d_w_dw
|| nofp
->nof_d_rw_dw
||
1011 nofp
->nof_d_r_drw
|| nofp
->nof_d_w_drw
|| nofp
->nof_d_rw_drw
)) {
1012 NP(np
, "nfs_vnop_reclaim: non-zero access: %d %d %d %d # %u.%u %u.%u %u.%u dw %u.%u %u.%u %u.%u drw %u.%u %u.%u %u.%u",
1013 nofp
->nof_access
, nofp
->nof_deny
,
1014 nofp
->nof_mmap_access
, nofp
->nof_mmap_deny
,
1015 nofp
->nof_r
, nofp
->nof_d_r
,
1016 nofp
->nof_w
, nofp
->nof_d_w
,
1017 nofp
->nof_rw
, nofp
->nof_d_rw
,
1018 nofp
->nof_r_dw
, nofp
->nof_d_r_dw
,
1019 nofp
->nof_w_dw
, nofp
->nof_d_w_dw
,
1020 nofp
->nof_rw_dw
, nofp
->nof_d_rw_dw
,
1021 nofp
->nof_r_drw
, nofp
->nof_d_r_drw
,
1022 nofp
->nof_w_drw
, nofp
->nof_d_w_drw
,
1023 nofp
->nof_rw_drw
, nofp
->nof_d_rw_drw
);
1025 /* try sending a close RPC if it wasn't delegated */
1026 if (nofp
->nof_r
|| nofp
->nof_w
|| nofp
->nof_rw
||
1027 nofp
->nof_r_dw
|| nofp
->nof_w_dw
|| nofp
->nof_rw_dw
||
1028 nofp
->nof_r_drw
|| nofp
->nof_w_drw
|| nofp
->nof_rw_drw
) {
1029 nfs4_close_rpc(np
, nofp
, NULL
, nofp
->nof_owner
->noo_cred
, R_RECOVER
);
1034 TAILQ_REMOVE(&np
->n_opens
, nofp
, nof_link
);
1035 nfs_open_file_destroy(nofp
);
1037 lck_mtx_unlock(&np
->n_openlock
);
1039 if (np
->n_monlink
.le_next
!= NFSNOLIST
) {
1040 /* Wait for any in-progress getattr to complete, */
1041 /* then remove this node from the monitored node list. */
1042 lck_mtx_lock(&nmp
->nm_lock
);
1043 while (np
->n_mflag
& NMMONSCANINPROG
) {
1044 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
1045 np
->n_mflag
|= NMMONSCANWANT
;
1046 msleep(&np
->n_mflag
, &nmp
->nm_lock
, PZERO
- 1, "nfswaitmonscan", &ts
);
1048 if (np
->n_monlink
.le_next
!= NFSNOLIST
) {
1049 LIST_REMOVE(np
, n_monlink
);
1050 np
->n_monlink
.le_next
= NFSNOLIST
;
1052 lck_mtx_unlock(&nmp
->nm_lock
);
1055 lck_mtx_lock(nfs_buf_mutex
);
1056 if (!force
&& (!LIST_EMPTY(&np
->n_dirtyblkhd
) || !LIST_EMPTY(&np
->n_cleanblkhd
))) {
1057 NP(np
, "nfs_reclaim: dropping %s buffers", (!LIST_EMPTY(&np
->n_dirtyblkhd
) ? "dirty" : "clean"));
1059 lck_mtx_unlock(nfs_buf_mutex
);
1060 nfs_vinvalbuf(vp
, V_IGNORE_WRITEERR
, ap
->a_context
, 0);
1062 lck_mtx_lock(nfs_node_hash_mutex
);
1064 if ((vnode_vtype(vp
) != VDIR
) && np
->n_sillyrename
) {
1066 NP(np
, "nfs_reclaim: leaving unlinked file %s", np
->n_sillyrename
->nsr_name
);
1068 if (np
->n_sillyrename
->nsr_cred
!= NOCRED
) {
1069 kauth_cred_unref(&np
->n_sillyrename
->nsr_cred
);
1071 vnode_rele(NFSTOV(np
->n_sillyrename
->nsr_dnp
));
1072 FREE_ZONE(np
->n_sillyrename
, sizeof(*np
->n_sillyrename
), M_NFSREQ
);
1075 vnode_removefsref(vp
);
1077 if (np
->n_hflag
& NHHASHED
) {
1078 LIST_REMOVE(np
, n_hash
);
1079 np
->n_hflag
&= ~NHHASHED
;
1080 FSDBG(266, 0, np
, np
->n_flag
, 0xb1eb1e);
1082 lck_mtx_unlock(nfs_node_hash_mutex
);
1085 * Free up any directory cookie structures and large file handle
1086 * structures that might be associated with this nfs node.
1088 nfs_node_lock_force(np
);
1089 if ((vnode_vtype(vp
) == VDIR
) && np
->n_cookiecache
) {
1090 FREE_ZONE(np
->n_cookiecache
, sizeof(struct nfsdmap
), M_NFSDIROFF
);
1092 if (np
->n_fhsize
> NFS_SMALLFH
) {
1093 FREE_ZONE(np
->n_fhp
, np
->n_fhsize
, M_NFSBIGFH
);
1095 if (np
->n_vattr
.nva_acl
) {
1096 kauth_acl_free(np
->n_vattr
.nva_acl
);
1098 nfs_node_unlock(np
);
1099 vnode_clearfsnode(vp
);
1102 if (!vnode_get(np
->n_parent
)) {
1103 vnode_rele(np
->n_parent
);
1104 vnode_put(np
->n_parent
);
1106 np
->n_parent
= NULL
;
1109 lck_mtx_destroy(&np
->n_lock
, nfs_node_lck_grp
);
1110 lck_rw_destroy(&np
->n_datalock
, nfs_data_lck_grp
);
1111 lck_mtx_destroy(&np
->n_openlock
, nfs_open_grp
);
1113 FSDBG_BOT(265, vp
, np
, np
->n_flag
, 0xd1ed1e);
1114 FREE_ZONE(np
, sizeof(struct nfsnode
), M_NFSNODE
);
1119 * Acquire an NFS node lock
1123 nfs_node_lock_internal(nfsnode_t np
, int force
)
1125 FSDBG_TOP(268, np
, force
, 0, 0);
1126 lck_mtx_lock(&np
->n_lock
);
1127 if (!force
&& !(np
->n_hflag
&& NHHASHED
)) {
1128 FSDBG_BOT(268, np
, 0xdead, 0, 0);
1129 lck_mtx_unlock(&np
->n_lock
);
1132 FSDBG_BOT(268, np
, force
, 0, 0);
1137 nfs_node_lock(nfsnode_t np
)
1139 return nfs_node_lock_internal(np
, 0);
1143 nfs_node_lock_force(nfsnode_t np
)
1145 nfs_node_lock_internal(np
, 1);
1149 * Release an NFS node lock
1152 nfs_node_unlock(nfsnode_t np
)
1154 FSDBG(269, np
, current_thread(), 0, 0);
1155 lck_mtx_unlock(&np
->n_lock
);
1159 * Acquire 2 NFS node locks
1160 * - locks taken in reverse address order
1161 * - both or neither of the locks are taken
1162 * - only one lock taken per node (dup nodes are skipped)
1165 nfs_node_lock2(nfsnode_t np1
, nfsnode_t np2
)
1167 nfsnode_t first
, second
;
1170 first
= (np1
> np2
) ? np1
: np2
;
1171 second
= (np1
> np2
) ? np2
: np1
;
1172 if ((error
= nfs_node_lock(first
))) {
1178 if ((error
= nfs_node_lock(second
))) {
1179 nfs_node_unlock(first
);
1185 nfs_node_unlock2(nfsnode_t np1
, nfsnode_t np2
)
1187 nfs_node_unlock(np1
);
1189 nfs_node_unlock(np2
);
1194 * Manage NFS node busy state.
1195 * (Similar to NFS node locks above)
1198 nfs_node_set_busy(nfsnode_t np
, thread_t thd
)
1200 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
1203 if ((error
= nfs_node_lock(np
))) {
1206 while (ISSET(np
->n_flag
, NBUSY
)) {
1207 SET(np
->n_flag
, NBUSYWANT
);
1208 msleep(np
, &np
->n_lock
, PZERO
- 1, "nfsbusywant", &ts
);
1209 if ((error
= nfs_sigintr(NFSTONMP(np
), NULL
, thd
, 0))) {
1214 SET(np
->n_flag
, NBUSY
);
1216 nfs_node_unlock(np
);
1221 nfs_node_clear_busy(nfsnode_t np
)
1225 nfs_node_lock_force(np
);
1226 wanted
= ISSET(np
->n_flag
, NBUSYWANT
);
1227 CLR(np
->n_flag
, NBUSY
| NBUSYWANT
);
1228 nfs_node_unlock(np
);
1235 nfs_node_set_busy2(nfsnode_t np1
, nfsnode_t np2
, thread_t thd
)
1237 nfsnode_t first
, second
;
1240 first
= (np1
> np2
) ? np1
: np2
;
1241 second
= (np1
> np2
) ? np2
: np1
;
1242 if ((error
= nfs_node_set_busy(first
, thd
))) {
1248 if ((error
= nfs_node_set_busy(second
, thd
))) {
1249 nfs_node_clear_busy(first
);
1255 nfs_node_clear_busy2(nfsnode_t np1
, nfsnode_t np2
)
1257 nfs_node_clear_busy(np1
);
1259 nfs_node_clear_busy(np2
);
1263 /* helper function to sort four nodes in reverse address order (no dupes) */
1265 nfs_node_sort4(nfsnode_t np1
, nfsnode_t np2
, nfsnode_t np3
, nfsnode_t np4
, nfsnode_t
*list
, int *lcntp
)
1267 nfsnode_t na
[2], nb
[2];
1270 /* sort pairs then merge */
1271 na
[0] = (np1
> np2
) ? np1
: np2
;
1272 na
[1] = (np1
> np2
) ? np2
: np1
;
1273 nb
[0] = (np3
> np4
) ? np3
: np4
;
1274 nb
[1] = (np3
> np4
) ? np4
: np3
;
1275 for (a
= b
= i
= lcnt
= 0; i
< 4; i
++) {
1277 list
[lcnt
] = nb
[b
++];
1278 } else if ((b
>= 2) || (na
[a
] >= nb
[b
])) {
1279 list
[lcnt
] = na
[a
++];
1281 list
[lcnt
] = nb
[b
++];
1283 if ((lcnt
<= 0) || (list
[lcnt
] != list
[lcnt
- 1])) {
1284 lcnt
++; /* omit dups */
1287 if (list
[lcnt
- 1] == NULL
) {
1294 nfs_node_set_busy4(nfsnode_t np1
, nfsnode_t np2
, nfsnode_t np3
, nfsnode_t np4
, thread_t thd
)
1299 nfs_node_sort4(np1
, np2
, np3
, np4
, list
, &lcnt
);
1301 /* Now we can lock using list[0 - lcnt-1] */
1302 for (i
= 0; i
< lcnt
; ++i
) {
1303 if ((error
= nfs_node_set_busy(list
[i
], thd
))) {
1304 /* Drop any locks we acquired. */
1306 nfs_node_clear_busy(list
[i
]);
1315 nfs_node_clear_busy4(nfsnode_t np1
, nfsnode_t np2
, nfsnode_t np3
, nfsnode_t np4
)
1320 nfs_node_sort4(np1
, np2
, np3
, np4
, list
, &lcnt
);
1321 while (--lcnt
>= 0) {
1322 nfs_node_clear_busy(list
[lcnt
]);
1327 * Acquire an NFS node data lock
1330 nfs_data_lock(nfsnode_t np
, int locktype
)
1332 nfs_data_lock_internal(np
, locktype
, 1);
1335 nfs_data_lock_noupdate(nfsnode_t np
, int locktype
)
1337 nfs_data_lock_internal(np
, locktype
, 0);
1340 nfs_data_lock_internal(nfsnode_t np
, int locktype
, int updatesize
)
1342 FSDBG_TOP(270, np
, locktype
, np
->n_datalockowner
, 0);
1343 if (locktype
== NFS_DATA_LOCK_SHARED
) {
1344 if (updatesize
&& ISSET(np
->n_flag
, NUPDATESIZE
)) {
1345 nfs_data_update_size(np
, 0);
1347 lck_rw_lock_shared(&np
->n_datalock
);
1349 lck_rw_lock_exclusive(&np
->n_datalock
);
1350 np
->n_datalockowner
= current_thread();
1351 if (updatesize
&& ISSET(np
->n_flag
, NUPDATESIZE
)) {
1352 nfs_data_update_size(np
, 1);
1355 FSDBG_BOT(270, np
, locktype
, np
->n_datalockowner
, 0);
1359 * Release an NFS node data lock
1362 nfs_data_unlock(nfsnode_t np
)
1364 nfs_data_unlock_internal(np
, 1);
1367 nfs_data_unlock_noupdate(nfsnode_t np
)
1369 nfs_data_unlock_internal(np
, 0);
1372 nfs_data_unlock_internal(nfsnode_t np
, int updatesize
)
1374 int mine
= (np
->n_datalockowner
== current_thread());
1375 FSDBG_TOP(271, np
, np
->n_datalockowner
, current_thread(), 0);
1376 if (updatesize
&& mine
&& ISSET(np
->n_flag
, NUPDATESIZE
)) {
1377 nfs_data_update_size(np
, 1);
1379 np
->n_datalockowner
= NULL
;
1380 lck_rw_done(&np
->n_datalock
);
1381 if (updatesize
&& !mine
&& ISSET(np
->n_flag
, NUPDATESIZE
)) {
1382 nfs_data_update_size(np
, 0);
1384 FSDBG_BOT(271, np
, np
->n_datalockowner
, current_thread(), 0);
1389 * update an NFS node's size
1392 nfs_data_update_size(nfsnode_t np
, int datalocked
)
1396 FSDBG_TOP(272, np
, np
->n_flag
, np
->n_size
, np
->n_newsize
);
1398 nfs_data_lock(np
, NFS_DATA_LOCK_EXCLUSIVE
);
1399 /* grabbing data lock will automatically update size */
1400 nfs_data_unlock(np
);
1401 FSDBG_BOT(272, np
, np
->n_flag
, np
->n_size
, np
->n_newsize
);
1404 error
= nfs_node_lock(np
);
1405 if (error
|| !ISSET(np
->n_flag
, NUPDATESIZE
)) {
1407 nfs_node_unlock(np
);
1409 FSDBG_BOT(272, np
, np
->n_flag
, np
->n_size
, np
->n_newsize
);
1412 CLR(np
->n_flag
, NUPDATESIZE
);
1413 np
->n_size
= np
->n_newsize
;
1414 /* make sure we invalidate buffers the next chance we get */
1415 SET(np
->n_flag
, NNEEDINVALIDATE
);
1416 nfs_node_unlock(np
);
1417 ubc_setsize(NFSTOV(np
), (off_t
)np
->n_size
); /* XXX error? */
1418 FSDBG_BOT(272, np
, np
->n_flag
, np
->n_size
, np
->n_newsize
);
1424 nfs_mount_is_dirty(mount_t mp
)
1429 struct timeval now
, then
, diff
;
1433 lck_mtx_lock(nfs_node_hash_mutex
);
1434 for (i
= 0; i
<= nfsnodehash
; i
++) {
1435 LIST_FOREACH(np
, &nfsnodehashtbl
[i
], n_hash
) {
1439 if (np
->n_mount
== mp
&& !LIST_EMPTY(&np
->n_dirtyblkhd
)) {
1445 lck_mtx_unlock(nfs_node_hash_mutex
);
1448 timersub(&then
, &now
, &diff
);
1450 NFS_DBG(NFS_FAC_SOCK
, 7, "mount_is_dirty for %s took %lld mics for %ld slots and %ld nodes return %d\n",
1451 vfs_statfs(mp
)->f_mntfromname
, (uint64_t)diff
.tv_sec
* 1000000LL + diff
.tv_usec
, i
, ncnt
, (i
<= nfsnodehash
));
1454 return i
<= nfsnodehash
;