2 * Copyright (c) 2006-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * vnode op calls for NFS version 4
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/systm.h>
35 #include <sys/resourcevar.h>
36 #include <sys/proc_internal.h>
37 #include <sys/kauth.h>
38 #include <sys/mount_internal.h>
39 #include <sys/malloc.h>
40 #include <sys/kpi_mbuf.h>
42 #include <sys/vnode_internal.h>
43 #include <sys/dirent.h>
44 #include <sys/fcntl.h>
45 #include <sys/lockf.h>
46 #include <sys/ubc_internal.h>
48 #include <sys/signalvar.h>
51 #include <vfs/vfs_support.h>
56 #include <kern/clock.h>
57 #include <libkern/OSAtomic.h>
59 #include <miscfs/fifofs/fifo.h>
60 #include <miscfs/specfs/specdev.h>
62 #include <nfs/rpcv2.h>
63 #include <nfs/nfsproto.h>
65 #include <nfs/nfsnode.h>
66 #include <nfs/nfs_gss.h>
67 #include <nfs/nfsmount.h>
68 #include <nfs/nfs_lock.h>
69 #include <nfs/xdr_subs.h>
70 #include <nfs/nfsm_subs.h>
73 #include <netinet/in.h>
74 #include <netinet/in_var.h>
75 #include <vm/vm_kern.h>
77 #include <kern/task.h>
78 #include <kern/sched_prim.h>
81 nfs4_access_rpc(nfsnode_t np
, u_int32_t
*mode
, vfs_context_t ctx
)
83 int error
= 0, lockerror
= ENOENT
, status
, numops
, slot
;
85 struct nfsm_chain nmreq
, nmrep
;
87 uint32_t access
= 0, supported
= 0, missing
;
88 struct nfsmount
*nmp
= NFSTONMP(np
);
89 int nfsvers
= nmp
->nm_vers
;
92 nfsm_chain_null(&nmreq
);
93 nfsm_chain_null(&nmrep
);
95 // PUTFH, ACCESS, GETATTR
97 nfsm_chain_build_alloc_init(error
, &nmreq
, 17 * NFSX_UNSIGNED
);
98 nfsm_chain_add_compound_header(error
, &nmreq
, "access", numops
);
100 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
101 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
103 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_ACCESS
);
104 nfsm_chain_add_32(error
, &nmreq
, *mode
);
106 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
107 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
108 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
109 nfsm_chain_build_done(error
, &nmreq
);
110 nfsm_assert(error
, (numops
== 0), EPROTO
);
112 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &nmrep
, &xid
, &status
);
114 if ((lockerror
= nfs_node_lock(np
)))
116 nfsm_chain_skip_tag(error
, &nmrep
);
117 nfsm_chain_get_32(error
, &nmrep
, numops
);
118 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
119 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_ACCESS
);
120 nfsm_chain_get_32(error
, &nmrep
, supported
);
121 nfsm_chain_get_32(error
, &nmrep
, access
);
123 if ((missing
= (*mode
& ~supported
))) {
124 /* missing support for something(s) we wanted */
125 if (missing
& NFS_ACCESS_DELETE
) {
127 * If the server doesn't report DELETE (possible
128 * on UNIX systems), we'll assume that it is OK
129 * and just let any subsequent delete action fail
130 * if it really isn't deletable.
132 access
|= NFS_ACCESS_DELETE
;
135 /* Some servers report DELETE support but erroneously give a denied answer. */
136 if ((*mode
& NFS_ACCESS_DELETE
) && nfs_access_delete
&& !(access
& NFS_ACCESS_DELETE
))
137 access
|= NFS_ACCESS_DELETE
;
138 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
139 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, NULL
, &xid
);
142 uid
= kauth_cred_getuid(vfs_context_ucred(ctx
));
143 slot
= nfs_node_mode_slot(np
, uid
, 1);
144 np
->n_modeuid
[slot
] = uid
;
146 np
->n_modestamp
[slot
] = now
.tv_sec
;
147 np
->n_mode
[slot
] = access
;
149 /* pass back the mode returned with this request */
150 *mode
= np
->n_mode
[slot
];
154 nfsm_chain_cleanup(&nmreq
);
155 nfsm_chain_cleanup(&nmrep
);
166 struct nfs_vattr
*nvap
,
169 struct nfsmount
*nmp
= mp
? VFSTONFS(mp
) : NFSTONMP(np
);
170 int error
= 0, status
, nfsvers
, numops
;
171 struct nfsm_chain nmreq
, nmrep
;
175 nfsvers
= nmp
->nm_vers
;
177 nfsm_chain_null(&nmreq
);
178 nfsm_chain_null(&nmrep
);
182 nfsm_chain_build_alloc_init(error
, &nmreq
, 15 * NFSX_UNSIGNED
);
183 nfsm_chain_add_compound_header(error
, &nmreq
, "getattr", numops
);
185 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
186 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, fhp
, fhsize
);
188 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
189 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
190 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
191 nfsm_chain_build_done(error
, &nmreq
);
192 nfsm_assert(error
, (numops
== 0), EPROTO
);
194 error
= nfs_request(np
, mp
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &nmrep
, xidp
, &status
);
196 nfsm_chain_skip_tag(error
, &nmrep
);
197 nfsm_chain_get_32(error
, &nmrep
, numops
);
198 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
199 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
201 NFS_CLEAR_ATTRIBUTES(nvap
->nva_bitmap
);
202 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
);
204 nfsm_chain_cleanup(&nmreq
);
205 nfsm_chain_cleanup(&nmrep
);
210 nfs4_readlink_rpc(nfsnode_t np
, char *buf
, uint32_t *buflenp
, vfs_context_t ctx
)
212 struct nfsmount
*nmp
;
213 int error
= 0, lockerror
= ENOENT
, status
, numops
;
216 struct nfsm_chain nmreq
, nmrep
;
221 nfsm_chain_null(&nmreq
);
222 nfsm_chain_null(&nmrep
);
224 // PUTFH, GETATTR, READLINK
226 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
227 nfsm_chain_add_compound_header(error
, &nmreq
, "readlink", numops
);
229 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
230 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
232 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
233 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
234 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
236 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READLINK
);
237 nfsm_chain_build_done(error
, &nmreq
);
238 nfsm_assert(error
, (numops
== 0), EPROTO
);
240 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &nmrep
, &xid
, &status
);
242 if ((lockerror
= nfs_node_lock(np
)))
244 nfsm_chain_skip_tag(error
, &nmrep
);
245 nfsm_chain_get_32(error
, &nmrep
, numops
);
246 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
247 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
248 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, NULL
, &xid
);
249 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READLINK
);
250 nfsm_chain_get_32(error
, &nmrep
, len
);
252 if (len
>= *buflenp
) {
253 if (np
->n_size
&& (np
->n_size
< *buflenp
))
258 nfsm_chain_get_opaque(error
, &nmrep
, len
, buf
);
264 nfsm_chain_cleanup(&nmreq
);
265 nfsm_chain_cleanup(&nmrep
);
276 struct nfsreq_cbinfo
*cb
,
277 struct nfsreq
**reqp
)
279 struct nfsmount
*nmp
;
280 int error
= 0, nfsvers
, numops
;
282 struct nfsm_chain nmreq
;
287 nfsvers
= nmp
->nm_vers
;
289 nfsm_chain_null(&nmreq
);
291 // PUTFH, READ, GETATTR
293 nfsm_chain_build_alloc_init(error
, &nmreq
, 22 * NFSX_UNSIGNED
);
294 nfsm_chain_add_compound_header(error
, &nmreq
, "read", numops
);
296 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
297 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
299 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READ
);
300 nfs_get_stateid(np
, thd
, cred
, &stateid
);
301 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
302 nfsm_chain_add_64(error
, &nmreq
, offset
);
303 nfsm_chain_add_32(error
, &nmreq
, len
);
305 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
306 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
307 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
308 nfsm_chain_build_done(error
, &nmreq
);
309 nfsm_assert(error
, (numops
== 0), EPROTO
);
311 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, cb
, reqp
);
313 nfsm_chain_cleanup(&nmreq
);
318 nfs4_read_rpc_async_finish(
325 struct nfsmount
*nmp
;
326 int error
= 0, lockerror
, nfsvers
, numops
, status
, eof
= 0;
329 struct nfsm_chain nmrep
;
333 nfs_request_async_cancel(req
);
336 nfsvers
= nmp
->nm_vers
;
338 nfsm_chain_null(&nmrep
);
340 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
341 if (error
== EINPROGRESS
) /* async request restarted */
344 if ((lockerror
= nfs_node_lock(np
)))
346 nfsm_chain_skip_tag(error
, &nmrep
);
347 nfsm_chain_get_32(error
, &nmrep
, numops
);
348 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
349 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READ
);
350 nfsm_chain_get_32(error
, &nmrep
, eof
);
351 nfsm_chain_get_32(error
, &nmrep
, retlen
);
353 *lenp
= MIN(retlen
, *lenp
);
354 error
= nfsm_chain_get_uio(&nmrep
, *lenp
, uio
);
356 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
357 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, NULL
, &xid
);
365 nfsm_chain_cleanup(&nmrep
);
370 nfs4_write_rpc_async(
377 struct nfsreq_cbinfo
*cb
,
378 struct nfsreq
**reqp
)
380 struct nfsmount
*nmp
;
381 int error
= 0, nfsvers
, numops
;
383 struct nfsm_chain nmreq
;
388 nfsvers
= nmp
->nm_vers
;
390 nfsm_chain_null(&nmreq
);
392 // PUTFH, WRITE, GETATTR
394 nfsm_chain_build_alloc_init(error
, &nmreq
, 25 * NFSX_UNSIGNED
+ len
);
395 nfsm_chain_add_compound_header(error
, &nmreq
, "write", numops
);
397 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
398 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
400 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_WRITE
);
401 nfs_get_stateid(np
, thd
, cred
, &stateid
);
402 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
403 nfsm_chain_add_64(error
, &nmreq
, uio_offset(uio
));
404 nfsm_chain_add_32(error
, &nmreq
, iomode
);
405 nfsm_chain_add_32(error
, &nmreq
, len
);
407 error
= nfsm_chain_add_uio(&nmreq
, uio
, len
);
409 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
410 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
411 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
412 nfsm_chain_build_done(error
, &nmreq
);
413 nfsm_assert(error
, (numops
== 0), EPROTO
);
416 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, cb
, reqp
);
418 nfsm_chain_cleanup(&nmreq
);
423 nfs4_write_rpc_async_finish(
430 struct nfsmount
*nmp
;
431 int error
= 0, lockerror
= ENOENT
, nfsvers
, numops
, status
;
432 int committed
= NFS_WRITE_FILESYNC
;
434 u_int64_t xid
, wverf
;
436 struct nfsm_chain nmrep
;
440 nfs_request_async_cancel(req
);
443 nfsvers
= nmp
->nm_vers
;
445 nfsm_chain_null(&nmrep
);
447 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
448 if (error
== EINPROGRESS
) /* async request restarted */
453 if (!error
&& (lockerror
= nfs_node_lock(np
)))
455 nfsm_chain_skip_tag(error
, &nmrep
);
456 nfsm_chain_get_32(error
, &nmrep
, numops
);
457 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
458 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_WRITE
);
459 nfsm_chain_get_32(error
, &nmrep
, rlen
);
464 nfsm_chain_get_32(error
, &nmrep
, committed
);
465 nfsm_chain_get_64(error
, &nmrep
, wverf
);
469 lck_mtx_lock(&nmp
->nm_lock
);
470 if (!(nmp
->nm_state
& NFSSTA_HASWRITEVERF
)) {
471 nmp
->nm_verf
= wverf
;
472 nmp
->nm_state
|= NFSSTA_HASWRITEVERF
;
473 } else if (nmp
->nm_verf
!= wverf
) {
474 nmp
->nm_verf
= wverf
;
476 lck_mtx_unlock(&nmp
->nm_lock
);
477 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
478 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, NULL
, &xid
);
482 nfsm_chain_cleanup(&nmrep
);
483 if ((committed
!= NFS_WRITE_FILESYNC
) && nfs_allow_async
&&
484 ((mp
= NFSTOMP(np
))) && (vfs_flags(mp
) & MNT_ASYNC
))
485 committed
= NFS_WRITE_FILESYNC
;
486 *iomodep
= committed
;
498 int error
= 0, lockerror
= ENOENT
, remove_error
= 0, status
;
499 struct nfsmount
*nmp
;
502 struct nfsm_chain nmreq
, nmrep
;
507 nfsvers
= nmp
->nm_vers
;
509 nfsm_chain_null(&nmreq
);
510 nfsm_chain_null(&nmrep
);
512 // PUTFH, REMOVE, GETATTR
514 nfsm_chain_build_alloc_init(error
, &nmreq
, 17 * NFSX_UNSIGNED
+ namelen
);
515 nfsm_chain_add_compound_header(error
, &nmreq
, "remove", numops
);
517 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
518 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
520 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_REMOVE
);
521 nfsm_chain_add_string(error
, &nmreq
, name
, namelen
);
523 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
524 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
525 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
526 nfsm_chain_build_done(error
, &nmreq
);
527 nfsm_assert(error
, (numops
== 0), EPROTO
);
530 error
= nfs_request2(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, 0, &nmrep
, &xid
, &status
);
532 if ((lockerror
= nfs_node_lock(dnp
)))
534 nfsm_chain_skip_tag(error
, &nmrep
);
535 nfsm_chain_get_32(error
, &nmrep
, numops
);
536 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
537 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_REMOVE
);
538 remove_error
= error
;
539 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
540 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
541 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, NULL
, &xid
);
542 if (error
&& !lockerror
)
543 NATTRINVALIDATE(dnp
);
545 nfsm_chain_cleanup(&nmreq
);
546 nfsm_chain_cleanup(&nmrep
);
549 dnp
->n_flag
|= NMODIFIED
;
550 nfs_node_unlock(dnp
);
552 if (error
== NFSERR_GRACE
) {
553 tsleep(&nmp
->nm_state
, (PZERO
-1), "nfsgrace", 2*hz
);
557 return (remove_error
);
570 int error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
;
571 struct nfsmount
*nmp
;
572 u_int64_t xid
, savedxid
;
573 struct nfsm_chain nmreq
, nmrep
;
575 nmp
= NFSTONMP(fdnp
);
578 nfsvers
= nmp
->nm_vers
;
580 nfsm_chain_null(&nmreq
);
581 nfsm_chain_null(&nmrep
);
583 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
585 nfsm_chain_build_alloc_init(error
, &nmreq
, 30 * NFSX_UNSIGNED
+ fnamelen
+ tnamelen
);
586 nfsm_chain_add_compound_header(error
, &nmreq
, "rename", numops
);
588 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
589 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, fdnp
->n_fhp
, fdnp
->n_fhsize
);
591 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
593 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
594 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, tdnp
->n_fhp
, tdnp
->n_fhsize
);
596 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RENAME
);
597 nfsm_chain_add_string(error
, &nmreq
, fnameptr
, fnamelen
);
598 nfsm_chain_add_string(error
, &nmreq
, tnameptr
, tnamelen
);
600 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
601 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
602 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
604 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
606 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
607 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
608 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
609 nfsm_chain_build_done(error
, &nmreq
);
610 nfsm_assert(error
, (numops
== 0), EPROTO
);
613 error
= nfs_request(fdnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &nmrep
, &xid
, &status
);
615 if ((lockerror
= nfs_node_lock2(fdnp
, tdnp
)))
617 nfsm_chain_skip_tag(error
, &nmrep
);
618 nfsm_chain_get_32(error
, &nmrep
, numops
);
619 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
620 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
621 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
622 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RENAME
);
623 nfsm_chain_check_change_info(error
, &nmrep
, fdnp
);
624 nfsm_chain_check_change_info(error
, &nmrep
, tdnp
);
625 /* directory attributes: if we don't get them, make sure to invalidate */
626 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
628 nfsm_chain_loadattr(error
, &nmrep
, tdnp
, nfsvers
, NULL
, &xid
);
629 if (error
&& !lockerror
)
630 NATTRINVALIDATE(tdnp
);
631 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
632 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
634 nfsm_chain_loadattr(error
, &nmrep
, fdnp
, nfsvers
, NULL
, &xid
);
635 if (error
&& !lockerror
)
636 NATTRINVALIDATE(fdnp
);
638 nfsm_chain_cleanup(&nmreq
);
639 nfsm_chain_cleanup(&nmrep
);
641 fdnp
->n_flag
|= NMODIFIED
;
642 tdnp
->n_flag
|= NMODIFIED
;
643 nfs_node_unlock2(fdnp
, tdnp
);
645 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
652 * NFS V4 readdir RPC.
655 nfs4_readdir_rpc(nfsnode_t dnp
, struct nfsbuf
*bp
, vfs_context_t ctx
)
657 struct nfsmount
*nmp
;
658 int error
= 0, lockerror
, nfsvers
, rdirplus
, bigcookies
, numops
;
659 int i
, status
, more_entries
= 1, eof
, bp_dropped
= 0;
660 uint32_t nmreaddirsize
, nmrsize
;
661 uint32_t namlen
, skiplen
, fhlen
, xlen
, attrlen
, reclen
, space_free
, space_needed
;
662 uint64_t cookie
, lastcookie
, xid
, savedxid
;
663 struct nfsm_chain nmreq
, nmrep
, nmrepsave
;
665 struct nfs_vattr nvattr
, *nvattrp
;
666 struct nfs_dir_buf_header
*ndbhp
;
668 char *padstart
, padlen
;
670 uint32_t entry_attrs
[NFS_ATTR_BITMAP_LEN
];
676 nfsvers
= nmp
->nm_vers
;
677 nmreaddirsize
= nmp
->nm_readdirsize
;
678 nmrsize
= nmp
->nm_rsize
;
679 bigcookies
= nmp
->nm_state
& NFSSTA_BIGCOOKIES
;
680 rdirplus
= ((nfsvers
> NFS_VER2
) && (nmp
->nm_flag
& NFSMNT_RDIRPLUS
)) ? 1 : 0;
683 * Set up attribute request for entries.
684 * For READDIRPLUS functionality, get everything.
685 * Otherwise, just get what we need for struct direntry.
689 for (i
=0; i
< NFS_ATTR_BITMAP_LEN
; i
++)
691 nfs_getattr_bitmap
[i
] &
692 nmp
->nm_fsattr
.nfsa_supp_attr
[i
];
693 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_FILEHANDLE
);
696 NFS_CLEAR_ATTRIBUTES(entry_attrs
);
697 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_TYPE
);
698 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_FILEID
);
700 /* XXX NFS_BITMAP_SET(entry_attrs, NFS_FATTR_MOUNTED_ON_FILEID); */
701 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_RDATTR_ERROR
);
703 /* lock to protect access to cookie verifier */
704 if ((lockerror
= nfs_node_lock(dnp
)))
707 /* determine cookie to use, and move dp to the right offset */
708 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
709 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
710 if (ndbhp
->ndbh_count
) {
711 for (i
=0; i
< ndbhp
->ndbh_count
-1; i
++)
712 dp
= NFS_DIRENTRY_NEXT(dp
);
713 cookie
= dp
->d_seekoff
;
714 dp
= NFS_DIRENTRY_NEXT(dp
);
716 cookie
= bp
->nb_lblkno
;
717 /* increment with every buffer read */
718 OSAddAtomic(1, &nfsstats
.readdir_bios
);
723 * The NFS client is responsible for the "." and ".." entries in the
724 * directory. So, we put them at the start of the first buffer.
726 if ((bp
->nb_lblkno
== 0) && (ndbhp
->ndbh_count
== 0)) {
728 fhlen
= rdirplus
? fh
.fh_len
+ 1 : 0;
729 xlen
= rdirplus
? (fhlen
+ sizeof(time_t)) : 0;
732 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
734 bzero(&dp
->d_name
[namlen
+1], xlen
);
735 dp
->d_namlen
= namlen
;
736 strlcpy(dp
->d_name
, ".", namlen
+1);
737 dp
->d_fileno
= dnp
->n_vattr
.nva_fileid
;
739 dp
->d_reclen
= reclen
;
741 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
742 dp
= NFS_DIRENTRY_NEXT(dp
);
743 padlen
= (char*)dp
- padstart
;
745 bzero(padstart
, padlen
);
746 if (rdirplus
) /* zero out attributes */
747 bzero(NFS_DIR_BUF_NVATTR(bp
, 0), sizeof(struct nfs_vattr
));
751 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
753 bzero(&dp
->d_name
[namlen
+1], xlen
);
754 dp
->d_namlen
= namlen
;
755 strlcpy(dp
->d_name
, "..", namlen
+1);
757 dp
->d_fileno
= VTONFS(dnp
->n_parent
)->n_vattr
.nva_fileid
;
759 dp
->d_fileno
= dnp
->n_vattr
.nva_fileid
;
761 dp
->d_reclen
= reclen
;
763 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
764 dp
= NFS_DIRENTRY_NEXT(dp
);
765 padlen
= (char*)dp
- padstart
;
767 bzero(padstart
, padlen
);
768 if (rdirplus
) /* zero out attributes */
769 bzero(NFS_DIR_BUF_NVATTR(bp
, 1), sizeof(struct nfs_vattr
));
771 ndbhp
->ndbh_entry_end
= (char*)dp
- bp
->nb_data
;
772 ndbhp
->ndbh_count
= 2;
776 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
777 * the buffer is full (or we hit EOF). Then put the remainder of the
778 * results in the next buffer(s).
780 nfsm_chain_null(&nmreq
);
781 nfsm_chain_null(&nmrep
);
782 while (nfs_dir_buf_freespace(bp
, rdirplus
) && !(ndbhp
->ndbh_flags
& NDB_FULL
)) {
784 // PUTFH, GETATTR, READDIR
786 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
787 nfsm_chain_add_compound_header(error
, &nmreq
, tag
, numops
);
789 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
790 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
792 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
793 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
794 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
796 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READDIR
);
797 nfsm_chain_add_64(error
, &nmreq
, (cookie
<= 2) ? 0 : cookie
);
798 nfsm_chain_add_64(error
, &nmreq
, dnp
->n_cookieverf
);
799 nfsm_chain_add_32(error
, &nmreq
, nmreaddirsize
);
800 nfsm_chain_add_32(error
, &nmreq
, nmrsize
);
801 nfsm_chain_add_bitmap(error
, &nmreq
, entry_attrs
, NFS_ATTR_BITMAP_LEN
);
802 nfsm_chain_build_done(error
, &nmreq
);
803 nfsm_assert(error
, (numops
== 0), EPROTO
);
804 nfs_node_unlock(dnp
);
806 error
= nfs_request(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &nmrep
, &xid
, &status
);
808 if ((lockerror
= nfs_node_lock(dnp
)))
812 nfsm_chain_skip_tag(error
, &nmrep
);
813 nfsm_chain_get_32(error
, &nmrep
, numops
);
814 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
815 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
816 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, NULL
, &xid
);
817 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READDIR
);
818 nfsm_chain_get_64(error
, &nmrep
, dnp
->n_cookieverf
);
819 nfsm_chain_get_32(error
, &nmrep
, more_entries
);
822 nfs_node_unlock(dnp
);
830 /* loop through the entries packing them into the buffer */
831 while (more_entries
) {
832 /* Entry: COOKIE, NAME, FATTR */
833 nfsm_chain_get_64(error
, &nmrep
, cookie
);
834 nfsm_chain_get_32(error
, &nmrep
, namlen
);
836 if (!bigcookies
&& (cookie
>> 32) && (nmp
== NFSTONMP(dnp
))) {
837 /* we've got a big cookie, make sure flag is set */
838 lck_mtx_lock(&nmp
->nm_lock
);
839 nmp
->nm_state
|= NFSSTA_BIGCOOKIES
;
840 lck_mtx_unlock(&nmp
->nm_lock
);
843 /* just truncate names that don't fit in direntry.d_name */
848 if (namlen
> (sizeof(dp
->d_name
)-1)) {
849 skiplen
= namlen
- sizeof(dp
->d_name
) + 1;
850 namlen
= sizeof(dp
->d_name
) - 1;
854 /* guess that fh size will be same as parent */
855 fhlen
= rdirplus
? (1 + dnp
->n_fhsize
) : 0;
856 xlen
= rdirplus
? (fhlen
+ sizeof(time_t)) : 0;
857 attrlen
= rdirplus
? sizeof(struct nfs_vattr
) : 0;
858 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
859 space_needed
= reclen
+ attrlen
;
860 space_free
= nfs_dir_buf_freespace(bp
, rdirplus
);
861 if (space_needed
> space_free
) {
863 * We still have entries to pack, but we've
864 * run out of room in the current buffer.
865 * So we need to move to the next buffer.
866 * The block# for the next buffer is the
867 * last cookie in the current buffer.
870 ndbhp
->ndbh_flags
|= NDB_FULL
;
871 nfs_buf_release(bp
, 0);
874 error
= nfs_buf_get(dnp
, lastcookie
, NFS_DIRBLKSIZ
, vfs_context_thread(ctx
), NBLK_READ
, &bp
);
876 /* initialize buffer */
877 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
878 ndbhp
->ndbh_flags
= 0;
879 ndbhp
->ndbh_count
= 0;
880 ndbhp
->ndbh_entry_end
= sizeof(*ndbhp
);
881 ndbhp
->ndbh_ncgen
= dnp
->n_ncgen
;
882 space_free
= nfs_dir_buf_freespace(bp
, rdirplus
);
883 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
884 /* increment with every buffer read */
885 OSAddAtomic(1, &nfsstats
.readdir_bios
);
888 dp
->d_fileno
= cookie
; /* placeholder */
889 dp
->d_seekoff
= cookie
;
890 dp
->d_namlen
= namlen
;
891 dp
->d_reclen
= reclen
;
892 dp
->d_type
= DT_UNKNOWN
;
893 nfsm_chain_get_opaque(error
, &nmrep
, namlen
, dp
->d_name
);
895 dp
->d_name
[namlen
] = '\0';
897 nfsm_chain_adv(error
, &nmrep
,
898 nfsm_rndup(namlen
+ skiplen
) - nfsm_rndup(namlen
));
900 nvattrp
= rdirplus
? NFS_DIR_BUF_NVATTR(bp
, ndbhp
->ndbh_count
) : &nvattr
;
901 NFS_CLEAR_ATTRIBUTES(nvattrp
->nva_bitmap
);
902 error
= nfs4_parsefattr(&nmrep
, NULL
, nvattrp
, &fh
, NULL
);
903 if (error
&& NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_RDATTR_ERROR
)) {
904 /* OK, we didn't get attributes, whatever... */
905 if (rdirplus
) /* mark the attributes invalid */
906 bzero(nvattrp
, sizeof(struct nfs_vattr
));
908 NFS_CLEAR_ATTRIBUTES(nvattrp
->nva_bitmap
);
911 /* check for more entries after this one */
912 nfsm_chain_get_32(error
, &nmrep
, more_entries
);
915 /* Skip any "." and ".." entries returned from server. */
916 if ((dp
->d_name
[0] == '.') && ((namlen
== 1) || ((namlen
== 2) && (dp
->d_name
[1] == '.')))) {
921 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_TYPE
))
922 dp
->d_type
= IFTODT(VTTOIF(nvattrp
->nva_type
));
923 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_FILEID
))
924 dp
->d_fileno
= nvattrp
->nva_fileid
;
926 /* fileid is already in d_fileno, so stash xid in attrs */
927 nvattrp
->nva_fileid
= savedxid
;
928 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
929 fhlen
= fh
.fh_len
+ 1;
930 xlen
= fhlen
+ sizeof(time_t);
931 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
932 space_needed
= reclen
+ attrlen
;
933 if (space_needed
> space_free
) {
934 /* didn't actually have the room... move on to next buffer */
938 /* pack the file handle into the record */
939 dp
->d_name
[dp
->d_namlen
+1] = fh
.fh_len
;
940 bcopy(fh
.fh_data
, &dp
->d_name
[dp
->d_namlen
+2], fh
.fh_len
);
942 /* mark the file handle invalid */
944 fhlen
= fh
.fh_len
+ 1;
945 xlen
= fhlen
+ sizeof(time_t);
946 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
947 bzero(&dp
->d_name
[dp
->d_namlen
+1], fhlen
);
949 *(time_t*)(&dp
->d_name
[dp
->d_namlen
+1+fhlen
]) = now
.tv_sec
;
950 dp
->d_reclen
= reclen
;
952 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
956 /* advance to next direntry in buffer */
957 dp
= NFS_DIRENTRY_NEXT(dp
);
958 ndbhp
->ndbh_entry_end
= (char*)dp
- bp
->nb_data
;
959 /* zero out the pad bytes */
960 padlen
= (char*)dp
- padstart
;
962 bzero(padstart
, padlen
);
964 /* Finally, get the eof boolean */
965 nfsm_chain_get_32(error
, &nmrep
, eof
);
968 ndbhp
->ndbh_flags
|= (NDB_FULL
|NDB_EOF
);
969 nfs_node_lock_force(dnp
);
970 dnp
->n_eofcookie
= lastcookie
;
971 nfs_node_unlock(dnp
);
976 nfs_buf_release(bp
, 0);
980 if ((lockerror
= nfs_node_lock(dnp
)))
983 nfsm_chain_cleanup(&nmrep
);
984 nfsm_chain_null(&nmreq
);
987 if (bp_dropped
&& bp
)
988 nfs_buf_release(bp
, 0);
990 nfs_node_unlock(dnp
);
991 nfsm_chain_cleanup(&nmreq
);
992 nfsm_chain_cleanup(&nmrep
);
993 return (bp_dropped
? NFSERR_DIRBUFDROPPED
: error
);
997 nfs4_lookup_rpc_async(
1002 struct nfsreq
**reqp
)
1004 int error
= 0, isdotdot
= 0, getattrs
= 1, nfsvers
, numops
;
1005 struct nfsm_chain nmreq
;
1006 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
1007 struct nfsmount
*nmp
;
1009 nmp
= NFSTONMP(dnp
);
1012 nfsvers
= nmp
->nm_vers
;
1014 if ((name
[0] == '.') && (name
[1] == '.') && (namelen
== 2))
1017 nfsm_chain_null(&nmreq
);
1019 // PUTFH, GETATTR, LOOKUP(P), GETATTR (FH)
1020 numops
= getattrs
? 4 : 3;
1021 nfsm_chain_build_alloc_init(error
, &nmreq
, 20 * NFSX_UNSIGNED
+ namelen
);
1022 nfsm_chain_add_compound_header(error
, &nmreq
, "lookup", numops
);
1024 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1025 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
1027 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1028 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
1029 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
1032 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUPP
);
1034 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUP
);
1035 nfsm_chain_add_string(error
, &nmreq
, name
, namelen
);
1039 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1040 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
1041 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
1042 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
1043 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
1045 nfsm_chain_build_done(error
, &nmreq
);
1046 nfsm_assert(error
, (numops
== 0), EPROTO
);
1048 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
1049 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), NULL
, reqp
);
1051 nfsm_chain_cleanup(&nmreq
);
1056 nfs4_lookup_rpc_async_finish(
1058 __unused vfs_context_t ctx
,
1062 struct nfs_vattr
*nvap
)
1064 int error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
;
1067 struct nfsmount
*nmp
;
1068 struct nfsm_chain nmrep
;
1070 nmp
= NFSTONMP(dnp
);
1071 nfsvers
= nmp
->nm_vers
;
1073 nfsm_chain_null(&nmrep
);
1075 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
1077 if ((lockerror
= nfs_node_lock(dnp
)))
1079 nfsm_chain_skip_tag(error
, &nmrep
);
1080 nfsm_chain_get_32(error
, &nmrep
, numops
);
1081 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1082 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1085 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, NULL
, &xid
);
1087 // nfsm_chain_op_check(error, &nmrep, (isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP));
1088 nfsm_chain_get_32(error
, &nmrep
, val
);
1089 nfsm_assert(error
, (val
== NFS_OP_LOOKUPP
) || (val
== NFS_OP_LOOKUP
), EBADRPC
);
1090 nfsm_chain_get_32(error
, &nmrep
, val
);
1091 nfsm_assert(error
, (val
== NFS_OK
), val
);
1093 nfsmout_if(error
|| !fhp
|| !nvap
);
1094 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1096 NFS_CLEAR_ATTRIBUTES(nvap
->nva_bitmap
);
1097 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, fhp
, NULL
);
1098 if (!NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
1104 nfs_node_unlock(dnp
);
1105 nfsm_chain_cleanup(&nmrep
);
1116 struct nfsmount
*nmp
;
1117 int error
= 0, lockerror
, status
, nfsvers
, numops
;
1118 u_int64_t xid
, wverf
;
1120 struct nfsm_chain nmreq
, nmrep
;
1123 FSDBG(521, np
, offset
, count
, nmp
? nmp
->nm_state
: 0);
1126 if (!(nmp
->nm_state
& NFSSTA_HASWRITEVERF
))
1128 nfsvers
= nmp
->nm_vers
;
1130 if (count
> UINT32_MAX
)
1135 nfsm_chain_null(&nmreq
);
1136 nfsm_chain_null(&nmrep
);
1138 // PUTFH, COMMIT, GETATTR
1140 nfsm_chain_build_alloc_init(error
, &nmreq
, 19 * NFSX_UNSIGNED
);
1141 nfsm_chain_add_compound_header(error
, &nmreq
, "commit", numops
);
1143 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1144 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1146 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_COMMIT
);
1147 nfsm_chain_add_64(error
, &nmreq
, offset
);
1148 nfsm_chain_add_32(error
, &nmreq
, count32
);
1150 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1151 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
1152 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
1153 nfsm_chain_build_done(error
, &nmreq
);
1154 nfsm_assert(error
, (numops
== 0), EPROTO
);
1156 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
1157 current_thread(), cred
, 0, &nmrep
, &xid
, &status
);
1159 if ((lockerror
= nfs_node_lock(np
)))
1161 nfsm_chain_skip_tag(error
, &nmrep
);
1162 nfsm_chain_get_32(error
, &nmrep
, numops
);
1163 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1164 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_COMMIT
);
1165 nfsm_chain_get_64(error
, &nmrep
, wverf
);
1166 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1167 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, NULL
, &xid
);
1169 nfs_node_unlock(np
);
1171 lck_mtx_lock(&nmp
->nm_lock
);
1172 if (nmp
->nm_verf
!= wverf
) {
1173 nmp
->nm_verf
= wverf
;
1174 error
= NFSERR_STALEWRITEVERF
;
1176 lck_mtx_unlock(&nmp
->nm_lock
);
1178 nfsm_chain_cleanup(&nmreq
);
1179 nfsm_chain_cleanup(&nmrep
);
1186 struct nfs_fsattr
*nfsap
,
1190 int error
= 0, lockerror
, status
, nfsvers
, numops
;
1191 struct nfsm_chain nmreq
, nmrep
;
1192 struct nfsmount
*nmp
= NFSTONMP(np
);
1193 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
1194 struct nfs_vattr nvattr
;
1198 nfsvers
= nmp
->nm_vers
;
1200 nfsm_chain_null(&nmreq
);
1201 nfsm_chain_null(&nmrep
);
1203 /* NFSv4: fetch "pathconf" info for this node */
1206 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
1207 nfsm_chain_add_compound_header(error
, &nmreq
, "pathconf", numops
);
1209 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1210 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1212 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1213 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
1214 NFS_BITMAP_SET(bitmap
, NFS_FATTR_MAXLINK
);
1215 NFS_BITMAP_SET(bitmap
, NFS_FATTR_MAXNAME
);
1216 NFS_BITMAP_SET(bitmap
, NFS_FATTR_NO_TRUNC
);
1217 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CHOWN_RESTRICTED
);
1218 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CASE_INSENSITIVE
);
1219 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CASE_PRESERVING
);
1220 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
1221 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
1222 nfsm_chain_build_done(error
, &nmreq
);
1223 nfsm_assert(error
, (numops
== 0), EPROTO
);
1225 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &nmrep
, &xid
, &status
);
1227 nfsm_chain_skip_tag(error
, &nmrep
);
1228 nfsm_chain_get_32(error
, &nmrep
, numops
);
1229 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1230 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1232 NFS_CLEAR_ATTRIBUTES(nvattr
.nva_bitmap
);
1233 error
= nfs4_parsefattr(&nmrep
, nfsap
, &nvattr
, NULL
, NULL
);
1235 if ((lockerror
= nfs_node_lock(np
)))
1238 nfs_loadattrcache(np
, &nvattr
, &xid
, 0);
1240 nfs_node_unlock(np
);
1242 nfsm_chain_cleanup(&nmreq
);
1243 nfsm_chain_cleanup(&nmrep
);
1249 struct vnop_getattr_args
/* {
1250 struct vnodeop_desc *a_desc;
1252 struct vnode_attr *a_vap;
1253 vfs_context_t a_context;
1256 struct vnode_attr
*vap
= ap
->a_vap
;
1257 struct nfs_vattr nva
;
1260 error
= nfs_getattr(VTONFS(ap
->a_vp
), &nva
, ap
->a_context
, NGA_CACHED
);
1264 /* copy what we have in nva to *a_vap */
1265 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_RAWDEV
)) {
1266 dev_t rdev
= makedev(nva
.nva_rawdev
.specdata1
, nva
.nva_rawdev
.specdata2
);
1267 VATTR_RETURN(vap
, va_rdev
, rdev
);
1269 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_NUMLINKS
))
1270 VATTR_RETURN(vap
, va_nlink
, nva
.nva_nlink
);
1271 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_SIZE
))
1272 VATTR_RETURN(vap
, va_data_size
, nva
.nva_size
);
1273 // VATTR_RETURN(vap, va_data_alloc, ???);
1274 // VATTR_RETURN(vap, va_total_size, ???);
1275 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_SPACE_USED
))
1276 VATTR_RETURN(vap
, va_total_alloc
, nva
.nva_bytes
);
1277 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER
))
1278 VATTR_RETURN(vap
, va_uid
, nva
.nva_uid
);
1279 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER_GROUP
))
1280 VATTR_RETURN(vap
, va_gid
, nva
.nva_gid
);
1281 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_MODE
))
1282 VATTR_RETURN(vap
, va_mode
, nva
.nva_mode
);
1283 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_ARCHIVE
) ||
1284 NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_HIDDEN
)) {
1286 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_ARCHIVE
))
1287 flags
|= SF_ARCHIVED
;
1288 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_HIDDEN
))
1290 VATTR_RETURN(vap
, va_flags
, flags
);
1292 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_CREATE
)) {
1293 vap
->va_create_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_CREATE
];
1294 vap
->va_create_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_CREATE
];
1295 VATTR_SET_SUPPORTED(vap
, va_create_time
);
1297 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_ACCESS
)) {
1298 vap
->va_access_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_ACCESS
];
1299 vap
->va_access_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_ACCESS
];
1300 VATTR_SET_SUPPORTED(vap
, va_access_time
);
1302 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_MODIFY
)) {
1303 vap
->va_modify_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_MODIFY
];
1304 vap
->va_modify_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_MODIFY
];
1305 VATTR_SET_SUPPORTED(vap
, va_modify_time
);
1307 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_METADATA
)) {
1308 vap
->va_change_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_CHANGE
];
1309 vap
->va_change_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_CHANGE
];
1310 VATTR_SET_SUPPORTED(vap
, va_change_time
);
1312 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_BACKUP
)) {
1313 vap
->va_backup_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_BACKUP
];
1314 vap
->va_backup_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_BACKUP
];
1315 VATTR_SET_SUPPORTED(vap
, va_backup_time
);
1317 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_FILEID
))
1318 VATTR_RETURN(vap
, va_fileid
, nva
.nva_fileid
);
1319 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TYPE
))
1320 VATTR_RETURN(vap
, va_type
, nva
.nva_type
);
1321 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_CHANGE
))
1322 VATTR_RETURN(vap
, va_filerev
, nva
.nva_change
);
1324 // other attrs we might support someday:
1325 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
1326 // struct kauth_acl *va_acl; /* access control list */
1327 // guid_t va_uuuid; /* file owner UUID */
1328 // guid_t va_guuid; /* file group UUID */
1336 struct vnode_attr
*vap
,
1339 struct nfsmount
*nmp
= NFSTONMP(np
);
1340 int error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
;
1341 u_int64_t xid
, nextxid
;
1342 struct nfsm_chain nmreq
, nmrep
;
1343 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
1344 nfs_stateid stateid
;
1348 nfsvers
= nmp
->nm_vers
;
1350 if (VATTR_IS_ACTIVE(vap
, va_flags
) && (vap
->va_flags
& ~(SF_ARCHIVED
|UF_HIDDEN
))) {
1351 /* we don't support setting unsupported flags (duh!) */
1352 if (vap
->va_active
& ~VNODE_ATTR_va_flags
)
1353 return (EINVAL
); /* return EINVAL if other attributes also set */
1355 return (ENOTSUP
); /* return ENOTSUP for chflags(2) */
1358 nfsm_chain_null(&nmreq
);
1359 nfsm_chain_null(&nmrep
);
1361 // PUTFH, SETATTR, GETATTR
1363 nfsm_chain_build_alloc_init(error
, &nmreq
, 40 * NFSX_UNSIGNED
);
1364 nfsm_chain_add_compound_header(error
, &nmreq
, "setattr", numops
);
1366 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1367 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1369 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SETATTR
);
1370 if (VATTR_IS_ACTIVE(vap
, va_data_size
))
1371 nfs_get_stateid(np
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &stateid
);
1373 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
1374 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
1375 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
1377 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1378 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
1379 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
1380 nfsm_chain_build_done(error
, &nmreq
);
1381 nfsm_assert(error
, (numops
== 0), EPROTO
);
1383 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &nmrep
, &xid
, &status
);
1385 if ((lockerror
= nfs_node_lock(np
)))
1387 nfsm_chain_skip_tag(error
, &nmrep
);
1388 nfsm_chain_get_32(error
, &nmrep
, numops
);
1389 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1390 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SETATTR
);
1391 bmlen
= NFS_ATTR_BITMAP_LEN
;
1392 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
1394 nfs_vattr_set_supported(bitmap
, vap
);
1395 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1396 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, NULL
, &xid
);
1398 NATTRINVALIDATE(np
);
1400 * We just changed the attributes and we want to make sure that we
1401 * see the latest attributes. Get the next XID. If it's not the
1402 * next XID after the SETATTR XID, then it's possible that another
1403 * RPC was in flight at the same time and it might put stale attributes
1404 * in the cache. In that case, we invalidate the attributes and set
1405 * the attribute cache XID to guarantee that newer attributes will
1409 nfs_get_xid(&nextxid
);
1410 if (nextxid
!= (xid
+ 1)) {
1411 np
->n_xid
= nextxid
;
1412 NATTRINVALIDATE(np
);
1416 nfs_node_unlock(np
);
1417 nfsm_chain_cleanup(&nmreq
);
1418 nfsm_chain_cleanup(&nmrep
);
1423 * Wait for any pending recovery to complete.
1426 nfs_mount_state_wait_for_recovery(struct nfsmount
*nmp
)
1428 struct timespec ts
= { 1, 0 };
1429 int error
= 0, slpflag
= (nmp
->nm_flag
& NFSMNT_INT
) ? PCATCH
: 0;
1431 lck_mtx_lock(&nmp
->nm_lock
);
1432 while (nmp
->nm_state
& NFSSTA_RECOVER
) {
1433 if ((error
= nfs_sigintr(nmp
, NULL
, current_thread(), 1)))
1435 nfs_mount_sock_thread_wake(nmp
);
1436 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, slpflag
|(PZERO
-1), "nfsrecoverwait", &ts
);
1438 lck_mtx_unlock(&nmp
->nm_lock
);
1444 * We're about to use/manipulate NFS mount's open/lock state.
1445 * Wait for any pending state recovery to complete, then
1446 * mark the state as being in use (which will hold off
1447 * the recovery thread until we're done).
1450 nfs_mount_state_in_use_start(struct nfsmount
*nmp
)
1452 struct timespec ts
= { 1, 0 };
1453 int error
= 0, slpflag
= (nmp
->nm_flag
& NFSMNT_INT
) ? PCATCH
: 0;
1457 lck_mtx_lock(&nmp
->nm_lock
);
1458 while (nmp
->nm_state
& NFSSTA_RECOVER
) {
1459 if ((error
= nfs_sigintr(nmp
, NULL
, current_thread(), 1)))
1461 nfs_mount_sock_thread_wake(nmp
);
1462 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, slpflag
|(PZERO
-1), "nfsrecoverwait", &ts
);
1465 nmp
->nm_stateinuse
++;
1466 lck_mtx_unlock(&nmp
->nm_lock
);
1472 * We're done using/manipulating the NFS mount's open/lock
1473 * state. If the given error indicates that recovery should
1474 * be performed, we'll initiate recovery.
1477 nfs_mount_state_in_use_end(struct nfsmount
*nmp
, int error
)
1479 int restart
= nfs_mount_state_error_should_restart(error
);
1483 lck_mtx_lock(&nmp
->nm_lock
);
1484 if (restart
&& (error
!= NFSERR_OLD_STATEID
) && (error
!= NFSERR_GRACE
)) {
1485 if (!(nmp
->nm_state
& NFSSTA_RECOVER
)) {
1486 printf("nfs_mount_state_in_use_end: error %d, initiating recovery\n", error
);
1487 nmp
->nm_state
|= NFSSTA_RECOVER
;
1488 nfs_mount_sock_thread_wake(nmp
);
1491 if (nmp
->nm_stateinuse
> 0)
1492 nmp
->nm_stateinuse
--;
1494 panic("NFS mount state in use count underrun");
1495 if (!nmp
->nm_stateinuse
&& (nmp
->nm_state
& NFSSTA_RECOVER
))
1496 wakeup(&nmp
->nm_stateinuse
);
1497 lck_mtx_unlock(&nmp
->nm_lock
);
1498 if (error
== NFSERR_GRACE
)
1499 tsleep(&nmp
->nm_state
, (PZERO
-1), "nfsgrace", 2*hz
);
1505 * Does the error mean we should restart/redo a state-related operation?
1508 nfs_mount_state_error_should_restart(int error
)
1511 case NFSERR_STALE_STATEID
:
1512 case NFSERR_STALE_CLIENTID
:
1513 case NFSERR_ADMIN_REVOKED
:
1514 case NFSERR_EXPIRED
:
1515 case NFSERR_OLD_STATEID
:
1516 case NFSERR_BAD_STATEID
:
1524 * In some cases we may want to limit how many times we restart a
1525 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1526 * Base the limit on the lease (as long as it's not too short).
1529 nfs_mount_state_max_restarts(struct nfsmount
*nmp
)
1531 return (MAX(nmp
->nm_fsattr
.nfsa_lease
, 60));
1536 * Mark an NFS node's open state as busy.
1539 nfs_open_state_set_busy(nfsnode_t np
, vfs_context_t ctx
)
1541 struct nfsmount
*nmp
;
1542 thread_t thd
= vfs_context_thread(ctx
);
1543 struct timespec ts
= {2, 0};
1544 int error
= 0, slpflag
;
1549 slpflag
= (nmp
->nm_flag
& NFSMNT_INT
) ? PCATCH
: 0;
1551 lck_mtx_lock(&np
->n_openlock
);
1552 while (np
->n_openflags
& N_OPENBUSY
) {
1553 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0)))
1555 np
->n_openflags
|= N_OPENWANT
;
1556 msleep(&np
->n_openflags
, &np
->n_openlock
, slpflag
, "nfs_open_state_set_busy", &ts
);
1559 np
->n_openflags
|= N_OPENBUSY
;
1560 lck_mtx_unlock(&np
->n_openlock
);
1566 * Clear an NFS node's open state busy flag and wake up
1567 * anyone wanting it.
1570 nfs_open_state_clear_busy(nfsnode_t np
)
1574 lck_mtx_lock(&np
->n_openlock
);
1575 if (!(np
->n_openflags
& N_OPENBUSY
))
1576 panic("nfs_open_state_clear_busy");
1577 wanted
= (np
->n_openflags
& N_OPENWANT
);
1578 np
->n_openflags
&= ~(N_OPENBUSY
|N_OPENWANT
);
1579 lck_mtx_unlock(&np
->n_openlock
);
1581 wakeup(&np
->n_openflags
);
1585 * Search a mount's open owner list for the owner for this credential.
1586 * If not found and "alloc" is set, then allocate a new one.
1588 struct nfs_open_owner
*
1589 nfs_open_owner_find(struct nfsmount
*nmp
, kauth_cred_t cred
, int alloc
)
1591 uid_t uid
= kauth_cred_getuid(cred
);
1592 struct nfs_open_owner
*noop
, *newnoop
= NULL
;
1595 lck_mtx_lock(&nmp
->nm_lock
);
1596 TAILQ_FOREACH(noop
, &nmp
->nm_open_owners
, noo_link
) {
1597 if (kauth_cred_getuid(noop
->noo_cred
) == uid
)
1601 if (!noop
&& !newnoop
&& alloc
) {
1602 lck_mtx_unlock(&nmp
->nm_lock
);
1603 MALLOC(newnoop
, struct nfs_open_owner
*, sizeof(struct nfs_open_owner
), M_TEMP
, M_WAITOK
);
1606 bzero(newnoop
, sizeof(*newnoop
));
1607 lck_mtx_init(&newnoop
->noo_lock
, nfs_open_grp
, LCK_ATTR_NULL
);
1608 newnoop
->noo_mount
= nmp
;
1609 kauth_cred_ref(cred
);
1610 newnoop
->noo_cred
= cred
;
1611 newnoop
->noo_name
= OSAddAtomic(1, &nfs_open_owner_seqnum
);
1612 TAILQ_INIT(&newnoop
->noo_opens
);
1615 if (!noop
&& newnoop
) {
1616 newnoop
->noo_flags
|= NFS_OPEN_OWNER_LINK
;
1617 TAILQ_INSERT_HEAD(&nmp
->nm_open_owners
, newnoop
, noo_link
);
1620 lck_mtx_unlock(&nmp
->nm_lock
);
1622 if (newnoop
&& (noop
!= newnoop
))
1623 nfs_open_owner_destroy(newnoop
);
1626 nfs_open_owner_ref(noop
);
1632 * destroy an open owner that's no longer needed
1635 nfs_open_owner_destroy(struct nfs_open_owner
*noop
)
1638 kauth_cred_unref(&noop
->noo_cred
);
1639 lck_mtx_destroy(&noop
->noo_lock
, nfs_open_grp
);
1644 * acquire a reference count on an open owner
1647 nfs_open_owner_ref(struct nfs_open_owner
*noop
)
1649 lck_mtx_lock(&noop
->noo_lock
);
1651 lck_mtx_unlock(&noop
->noo_lock
);
1655 * drop a reference count on an open owner and destroy it if
1656 * it is no longer referenced and no longer on the mount's list.
1659 nfs_open_owner_rele(struct nfs_open_owner
*noop
)
1661 lck_mtx_lock(&noop
->noo_lock
);
1662 if (noop
->noo_refcnt
< 1)
1663 panic("nfs_open_owner_rele: no refcnt");
1665 if (!noop
->noo_refcnt
&& (noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
))
1666 panic("nfs_open_owner_rele: busy");
1667 /* XXX we may potentially want to clean up idle/unused open owner structures */
1668 if (noop
->noo_refcnt
|| (noop
->noo_flags
& NFS_OPEN_OWNER_LINK
)) {
1669 lck_mtx_unlock(&noop
->noo_lock
);
1672 /* owner is no longer referenced or linked to mount, so destroy it */
1673 lck_mtx_unlock(&noop
->noo_lock
);
1674 nfs_open_owner_destroy(noop
);
1678 * Mark an open owner as busy because we are about to
1679 * start an operation that uses and updates open owner state.
1682 nfs_open_owner_set_busy(struct nfs_open_owner
*noop
, thread_t thd
)
1684 struct nfsmount
*nmp
;
1685 struct timespec ts
= {2, 0};
1686 int error
= 0, slpflag
;
1688 nmp
= noop
->noo_mount
;
1691 slpflag
= (nmp
->nm_flag
& NFSMNT_INT
) ? PCATCH
: 0;
1693 lck_mtx_lock(&noop
->noo_lock
);
1694 while (noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
) {
1695 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0)))
1697 noop
->noo_flags
|= NFS_OPEN_OWNER_WANT
;
1698 msleep(noop
, &noop
->noo_lock
, slpflag
, "nfs_open_owner_set_busy", &ts
);
1701 noop
->noo_flags
|= NFS_OPEN_OWNER_BUSY
;
1702 lck_mtx_unlock(&noop
->noo_lock
);
1708 * Clear the busy flag on an open owner and wake up anyone waiting
1712 nfs_open_owner_clear_busy(struct nfs_open_owner
*noop
)
1716 lck_mtx_lock(&noop
->noo_lock
);
1717 if (!(noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
))
1718 panic("nfs_open_owner_clear_busy");
1719 wanted
= (noop
->noo_flags
& NFS_OPEN_OWNER_WANT
);
1720 noop
->noo_flags
&= ~(NFS_OPEN_OWNER_BUSY
|NFS_OPEN_OWNER_WANT
);
1721 lck_mtx_unlock(&noop
->noo_lock
);
1727 * Given an open/lock owner and an error code, increment the
1728 * sequence ID if appropriate.
1731 nfs_owner_seqid_increment(struct nfs_open_owner
*noop
, struct nfs_lock_owner
*nlop
, int error
)
1734 case NFSERR_STALE_CLIENTID
:
1735 case NFSERR_STALE_STATEID
:
1736 case NFSERR_OLD_STATEID
:
1737 case NFSERR_BAD_STATEID
:
1738 case NFSERR_BAD_SEQID
:
1740 case NFSERR_RESOURCE
:
1741 case NFSERR_NOFILEHANDLE
:
1742 /* do not increment the open seqid on these errors */
1752 * Search a node's open file list for any conflicts with this request.
1753 * Also find this open owner's open file structure.
1754 * If not found and "alloc" is set, then allocate one.
1759 struct nfs_open_owner
*noop
,
1760 struct nfs_open_file
**nofpp
,
1761 uint32_t accessMode
,
1765 struct nfs_open_file
*nofp
= NULL
, *nofp2
, *newnofp
= NULL
;
1770 lck_mtx_lock(&np
->n_openlock
);
1771 TAILQ_FOREACH(nofp2
, &np
->n_opens
, nof_link
) {
1772 if (nofp2
->nof_owner
== noop
) {
1777 if ((accessMode
& nofp2
->nof_deny
) || (denyMode
& nofp2
->nof_access
)) {
1778 /* This request conflicts with an existing open on this client. */
1779 lck_mtx_unlock(&np
->n_openlock
);
1786 * If this open owner doesn't have an open
1787 * file structure yet, we create one for it.
1789 if (!nofp
&& !newnofp
&& alloc
) {
1790 lck_mtx_unlock(&np
->n_openlock
);
1792 MALLOC(newnofp
, struct nfs_open_file
*, sizeof(struct nfs_open_file
), M_TEMP
, M_WAITOK
);
1797 bzero(newnofp
, sizeof(*newnofp
));
1798 lck_mtx_init(&newnofp
->nof_lock
, nfs_open_grp
, LCK_ATTR_NULL
);
1799 newnofp
->nof_owner
= noop
;
1800 nfs_open_owner_ref(noop
);
1801 newnofp
->nof_np
= np
;
1802 lck_mtx_lock(&noop
->noo_lock
);
1803 TAILQ_INSERT_HEAD(&noop
->noo_opens
, newnofp
, nof_oolink
);
1804 lck_mtx_unlock(&noop
->noo_lock
);
1808 if (!nofp
&& newnofp
) {
1810 TAILQ_INSERT_HEAD(&np
->n_opens
, newnofp
, nof_link
);
1814 lck_mtx_unlock(&np
->n_openlock
);
1816 if (newnofp
&& (nofp
!= newnofp
))
1817 nfs_open_file_destroy(newnofp
);
1820 return (nofp
? 0 : ESRCH
);
1824 * Destroy an open file structure.
1827 nfs_open_file_destroy(struct nfs_open_file
*nofp
)
1829 lck_mtx_lock(&nofp
->nof_owner
->noo_lock
);
1830 TAILQ_REMOVE(&nofp
->nof_owner
->noo_opens
, nofp
, nof_oolink
);
1831 lck_mtx_unlock(&nofp
->nof_owner
->noo_lock
);
1832 nfs_open_owner_rele(nofp
->nof_owner
);
1833 lck_mtx_destroy(&nofp
->nof_lock
, nfs_open_grp
);
1838 * Mark an open file as busy because we are about to
1839 * start an operation that uses and updates open file state.
1842 nfs_open_file_set_busy(struct nfs_open_file
*nofp
, thread_t thd
)
1844 struct nfsmount
*nmp
;
1845 struct timespec ts
= {2, 0};
1846 int error
= 0, slpflag
;
1848 nmp
= nofp
->nof_owner
->noo_mount
;
1851 slpflag
= (nmp
->nm_flag
& NFSMNT_INT
) ? PCATCH
: 0;
1853 lck_mtx_lock(&nofp
->nof_lock
);
1854 while (nofp
->nof_flags
& NFS_OPEN_FILE_BUSY
) {
1855 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0)))
1857 nofp
->nof_flags
|= NFS_OPEN_FILE_WANT
;
1858 msleep(nofp
, &nofp
->nof_lock
, slpflag
, "nfs_open_file_set_busy", &ts
);
1861 nofp
->nof_flags
|= NFS_OPEN_FILE_BUSY
;
1862 lck_mtx_unlock(&nofp
->nof_lock
);
1868 * Clear the busy flag on an open file and wake up anyone waiting
1872 nfs_open_file_clear_busy(struct nfs_open_file
*nofp
)
1876 lck_mtx_lock(&nofp
->nof_lock
);
1877 if (!(nofp
->nof_flags
& NFS_OPEN_FILE_BUSY
))
1878 panic("nfs_open_file_clear_busy");
1879 wanted
= (nofp
->nof_flags
& NFS_OPEN_FILE_WANT
);
1880 nofp
->nof_flags
&= ~(NFS_OPEN_FILE_BUSY
|NFS_OPEN_FILE_WANT
);
1881 lck_mtx_unlock(&nofp
->nof_lock
);
1887 * Get the current (delegation, lock, open, default) stateid for this node.
1888 * If node has a delegation, use that stateid.
1889 * If pid has a lock, use the lockowner's stateid.
1890 * Or use the open file's stateid.
1891 * If no open file, use a default stateid of all ones.
1894 nfs_get_stateid(nfsnode_t np
, thread_t thd
, kauth_cred_t cred
, nfs_stateid
*sid
)
1896 struct nfsmount
*nmp
= NFSTONMP(np
);
1897 proc_t p
= thd
? get_bsdthreadtask_info(thd
) : current_thread(); // XXX async I/O requests don't have a thread
1898 struct nfs_open_owner
*noop
= NULL
;
1899 struct nfs_open_file
*nofp
= NULL
;
1900 struct nfs_lock_owner
*nlop
= NULL
;
1901 nfs_stateid
*s
= NULL
;
1903 if (np
->n_openflags
& N_DELEG_MASK
)
1904 s
= &np
->n_dstateid
;
1906 nlop
= nfs_lock_owner_find(np
, p
, 0);
1907 if (nlop
&& !TAILQ_EMPTY(&nlop
->nlo_locks
)) {
1908 /* we hold locks, use lock stateid */
1909 s
= &nlop
->nlo_stateid
;
1910 } else if (((noop
= nfs_open_owner_find(nmp
, cred
, 0))) &&
1911 (nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 0) == 0) &&
1912 !(nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) &&
1914 /* we (should) have the file open, use open stateid */
1915 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)
1916 nfs4_reopen(nofp
, thd
);
1917 if (!(nofp
->nof_flags
& NFS_OPEN_FILE_LOST
))
1918 s
= &nofp
->nof_stateid
;
1922 sid
->seqid
= s
->seqid
;
1923 sid
->other
[0] = s
->other
[0];
1924 sid
->other
[1] = s
->other
[1];
1925 sid
->other
[2] = s
->other
[2];
1927 const char *vname
= vnode_getname(NFSTOV(np
));
1928 printf("nfs_get_stateid: no stateid for %s\n", vname
? vname
: "???");
1929 vnode_putname(vname
);
1930 sid
->seqid
= sid
->other
[0] = sid
->other
[1] = sid
->other
[2] = 0xffffffff;
1933 nfs_lock_owner_rele(nlop
);
1935 nfs_open_owner_rele(noop
);
1939 * We always send the open RPC even if this open's mode is a subset of all
1940 * the existing opens. This makes sure that we will always be able to do a
1941 * downgrade to any of the open modes.
1943 * Note: local conflicts should have already been checked. (nfs_open_file_find)
1948 struct nfs_open_file
*nofp
,
1949 uint32_t accessMode
,
1953 vnode_t vp
= NFSTOV(np
);
1955 struct componentname cn
;
1956 const char *vname
= NULL
;
1958 char smallname
[128];
1959 char *filename
= NULL
;
1960 int error
= 0, readtoo
= 0;
1962 dvp
= vnode_getparent(vp
);
1963 vname
= vnode_getname(vp
);
1964 if (!dvp
|| !vname
) {
1968 filename
= &smallname
[0];
1969 namelen
= snprintf(filename
, sizeof(smallname
), "%s", vname
);
1970 if (namelen
>= sizeof(smallname
)) {
1971 namelen
++; /* snprintf result doesn't include '\0' */
1972 MALLOC(filename
, char *, namelen
, M_TEMP
, M_WAITOK
);
1977 snprintf(filename
, namelen
, "%s", vname
);
1979 bzero(&cn
, sizeof(cn
));
1980 cn
.cn_nameptr
= filename
;
1981 cn
.cn_namelen
= namelen
;
1983 if (!(accessMode
& NFS_OPEN_SHARE_ACCESS_READ
)) {
1985 * Try to open it for read access too,
1986 * so the buffer cache can read data.
1989 accessMode
|= NFS_OPEN_SHARE_ACCESS_READ
;
1992 error
= nfs4_open_rpc(nofp
, ctx
, &cn
, NULL
, dvp
, &vp
, NFS_OPEN_NOCREATE
, accessMode
, denyMode
);
1994 if (!nfs_mount_state_error_should_restart(error
) && readtoo
) {
1995 /* try again without the extra read access */
1996 accessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2002 nofp
->nof_access
|= accessMode
;
2003 nofp
->nof_deny
|= denyMode
;
2005 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2006 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2008 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2010 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2012 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2013 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2015 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2017 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2019 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2020 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2022 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2024 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2027 nofp
->nof_opencnt
++;
2029 if (filename
&& (filename
!= &smallname
[0]))
2030 FREE(filename
, M_TEMP
);
2032 vnode_putname(vname
);
2041 struct vnop_open_args
/* {
2042 struct vnodeop_desc *a_desc;
2045 vfs_context_t a_context;
2048 vfs_context_t ctx
= ap
->a_context
;
2049 vnode_t vp
= ap
->a_vp
;
2050 nfsnode_t np
= VTONFS(vp
);
2051 struct nfsmount
*nmp
;
2052 int error
, accessMode
, denyMode
, opened
= 0;
2053 struct nfs_open_owner
*noop
= NULL
;
2054 struct nfs_open_file
*nofp
= NULL
;
2056 if (!(ap
->a_mode
& (FREAD
|FWRITE
)))
2063 /* First, call the common code */
2064 if ((error
= nfs3_vnop_open(ap
)))
2067 if (!vnode_isreg(vp
)) {
2068 /* Just mark that it was opened */
2069 lck_mtx_lock(&np
->n_openlock
);
2071 lck_mtx_unlock(&np
->n_openlock
);
2075 /* mode contains some combination of: FREAD, FWRITE, O_SHLOCK, O_EXLOCK */
2077 if (ap
->a_mode
& FREAD
)
2078 accessMode
|= NFS_OPEN_SHARE_ACCESS_READ
;
2079 if (ap
->a_mode
& FWRITE
)
2080 accessMode
|= NFS_OPEN_SHARE_ACCESS_WRITE
;
2081 if (ap
->a_mode
& O_EXLOCK
)
2082 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
2083 else if (ap
->a_mode
& O_SHLOCK
)
2084 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
2086 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2088 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
2093 error
= nfs_mount_state_in_use_start(nmp
);
2095 nfs_open_owner_rele(noop
);
2099 error
= nfs_open_file_find(np
, noop
, &nofp
, accessMode
, denyMode
, 1);
2100 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
2101 const char *vname
= vnode_getname(NFSTOV(np
));
2102 printf("nfs_vnop_open: LOST %s\n", vname
);
2103 vnode_putname(vname
);
2106 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
2107 nfs_mount_state_in_use_end(nmp
, 0);
2108 nfs4_reopen(nofp
, vfs_context_thread(ctx
));
2113 error
= nfs_open_file_set_busy(nofp
, vfs_context_thread(ctx
));
2120 * If we just created the file and the modes match, then we simply use
2121 * the open performed in the create. Otherwise, send the request.
2123 if ((nofp
->nof_flags
& NFS_OPEN_FILE_CREATE
) &&
2124 (nofp
->nof_creator
== current_thread()) &&
2125 (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) &&
2126 (denyMode
== NFS_OPEN_SHARE_DENY_NONE
)) {
2127 nofp
->nof_flags
&= ~NFS_OPEN_FILE_CREATE
;
2128 nofp
->nof_creator
= NULL
;
2131 error
= nfs4_open(np
, nofp
, accessMode
, denyMode
, ctx
);
2132 if ((error
== EACCES
) && (nofp
->nof_flags
& NFS_OPEN_FILE_CREATE
) &&
2133 (nofp
->nof_creator
== current_thread())) {
2135 * Ugh. This can happen if we just created the file with read-only
2136 * perms and we're trying to open it for real with different modes
2137 * (e.g. write-only or with a deny mode) and the server decides to
2138 * not allow the second open because of the read-only perms.
2139 * The best we can do is to just use the create's open.
2140 * We may have access we don't need or we may not have a requested
2141 * deny mode. We may log complaints later, but we'll try to avoid it.
2143 if (denyMode
!= NFS_OPEN_SHARE_DENY_NONE
) {
2144 const char *vname
= vnode_getname(NFSTOV(np
));
2145 printf("nfs4_vnop_open: deny mode foregone on create, %s\n", vname
);
2146 vnode_putname(vname
);
2148 nofp
->nof_creator
= NULL
;
2155 * If we had just created the file, we already had it open.
2156 * If the actual open mode is less than what we grabbed at
2157 * create time, then we'll downgrade the open here.
2159 if ((nofp
->nof_flags
& NFS_OPEN_FILE_CREATE
) &&
2160 (nofp
->nof_creator
== current_thread())) {
2161 error
= nfs4_close(np
, nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
2163 const char *vname
= vnode_getname(NFSTOV(np
));
2164 printf("nfs_vnop_open: create close error %d, %s\n", error
, vname
);
2165 vnode_putname(vname
);
2167 if (!nfs_mount_state_error_should_restart(error
)) {
2169 nofp
->nof_flags
&= ~NFS_OPEN_FILE_CREATE
;
2176 nfs_open_file_clear_busy(nofp
);
2177 if (nfs_mount_state_in_use_end(nmp
, error
)) {
2182 nfs_open_owner_rele(noop
);
2184 const char *vname
= vnode_getname(NFSTOV(np
));
2185 printf("nfs_vnop_open: error %d, %s\n", error
, vname
);
2186 vnode_putname(vname
);
2194 struct nfs_open_file
*nofp
,
2195 uint32_t accessMode
,
2199 struct nfs_lock_owner
*nlop
;
2200 int error
= 0, changed
= 0, closed
= 0;
2201 uint32_t newAccessMode
, newDenyMode
;
2203 /* warn if modes don't match current state */
2204 if (((accessMode
& nofp
->nof_access
) != accessMode
) || ((denyMode
& nofp
->nof_deny
) != denyMode
)) {
2205 const char *vname
= vnode_getname(NFSTOV(np
));
2206 printf("nfs4_close: mode mismatch %d %d, current %d %d, %s\n",
2207 accessMode
, denyMode
, nofp
->nof_access
, nofp
->nof_deny
, vname
);
2208 vnode_putname(vname
);
2212 * If we're closing a write-only open, we may not have a write-only count
2213 * if we also grabbed read access. So, check the read-write count.
2215 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2216 if ((accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) &&
2217 (nofp
->nof_w
== 0) && nofp
->nof_rw
)
2218 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
2219 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2220 if ((accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) &&
2221 (nofp
->nof_w_dw
== 0) && nofp
->nof_rw_dw
)
2222 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
2223 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2224 if ((accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) &&
2225 (nofp
->nof_w_drw
== 0) && nofp
->nof_rw_drw
)
2226 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
2230 * Calculate new modes: a mode bit gets removed when there's only
2231 * one count in all the corresponding counts
2233 newAccessMode
= nofp
->nof_access
;
2234 newDenyMode
= nofp
->nof_deny
;
2235 if ((accessMode
& NFS_OPEN_SHARE_ACCESS_READ
) &&
2236 (newAccessMode
& NFS_OPEN_SHARE_ACCESS_READ
) &&
2237 ((nofp
->nof_r
+ nofp
->nof_rw
+ nofp
->nof_r_dw
+
2238 nofp
->nof_rw_dw
+ nofp
->nof_r_drw
+ nofp
->nof_rw_dw
) == 1)) {
2239 newAccessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2242 if ((accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
) &&
2243 (newAccessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
) &&
2244 ((nofp
->nof_w
+ nofp
->nof_rw
+ nofp
->nof_w_dw
+
2245 nofp
->nof_rw_dw
+ nofp
->nof_w_drw
+ nofp
->nof_rw_dw
) == 1)) {
2246 newAccessMode
&= ~NFS_OPEN_SHARE_ACCESS_WRITE
;
2249 if ((denyMode
& NFS_OPEN_SHARE_DENY_READ
) &&
2250 (newDenyMode
& NFS_OPEN_SHARE_DENY_READ
) &&
2251 ((nofp
->nof_r_drw
+ nofp
->nof_w_drw
+ nofp
->nof_rw_drw
) == 1)) {
2252 newDenyMode
&= ~NFS_OPEN_SHARE_DENY_READ
;
2255 if ((denyMode
& NFS_OPEN_SHARE_DENY_WRITE
) &&
2256 (newDenyMode
& NFS_OPEN_SHARE_DENY_WRITE
) &&
2257 ((nofp
->nof_r_drw
+ nofp
->nof_w_drw
+ nofp
->nof_rw_drw
+
2258 nofp
->nof_r_dw
+ nofp
->nof_w_dw
+ nofp
->nof_rw_dw
) == 1)) {
2259 newDenyMode
&= ~NFS_OPEN_SHARE_DENY_WRITE
;
2264 if ((newAccessMode
== 0) || (nofp
->nof_opencnt
== 1)) {
2266 * No more access after this close, so clean up and close it.
2269 if (!(nofp
->nof_flags
& NFS_OPEN_FILE_LOST
))
2270 error
= nfs4_close_rpc(np
, nofp
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
), 0);
2271 if (error
== NFSERR_LOCKS_HELD
) {
2273 * Hmm... the server says we have locks we need to release first
2274 * Find the lock owner and try to unlock everything.
2276 nlop
= nfs_lock_owner_find(np
, vfs_context_proc(ctx
), 0);
2278 nfs4_unlock_rpc(np
, nlop
, F_WRLCK
, 0, UINT64_MAX
, ctx
);
2279 nfs_lock_owner_rele(nlop
);
2281 error
= nfs4_close_rpc(np
, nofp
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
), 0);
2283 } else if (changed
) {
2285 * File is still open but with less access, so downgrade the open.
2287 if (!(nofp
->nof_flags
& NFS_OPEN_FILE_LOST
))
2288 error
= nfs4_open_downgrade_rpc(np
, nofp
, ctx
);
2292 const char *vname
= vnode_getname(NFSTOV(np
));
2293 printf("nfs4_close: error %d, %s\n", error
, vname
);
2294 vnode_putname(vname
);
2298 /* Decrement the corresponding open access/deny mode counter. */
2299 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2300 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2301 if (nofp
->nof_r
== 0)
2302 printf("nfs4_close: open(R) count underrun\n");
2305 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2306 if (nofp
->nof_w
== 0)
2307 printf("nfs4_close: open(W) count underrun\n");
2310 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2311 if (nofp
->nof_rw
== 0)
2312 printf("nfs4_close: open(RW) count underrun\n");
2316 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2317 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2318 if (nofp
->nof_r_dw
== 0)
2319 printf("nfs4_close: open(R,DW) count underrun\n");
2322 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2323 if (nofp
->nof_w_dw
== 0)
2324 printf("nfs4_close: open(W,DW) count underrun\n");
2327 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2328 if (nofp
->nof_rw_dw
== 0)
2329 printf("nfs4_close: open(RW,DW) count underrun\n");
2333 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2334 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2335 if (nofp
->nof_r_drw
== 0)
2336 printf("nfs4_close: open(R,DRW) count underrun\n");
2339 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2340 if (nofp
->nof_w_drw
== 0)
2341 printf("nfs4_close: open(W,DRW) count underrun\n");
2344 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2345 if (nofp
->nof_rw_drw
== 0)
2346 printf("nfs4_close: open(RW,DRW) count underrun\n");
2351 /* update the modes */
2352 nofp
->nof_access
= newAccessMode
;
2353 nofp
->nof_deny
= newDenyMode
;
2355 if (nofp
->nof_r
|| nofp
->nof_w
||
2356 (nofp
->nof_rw
&& !((nofp
->nof_flags
& NFS_OPEN_FILE_CREATE
) && !nofp
->nof_creator
&& (nofp
->nof_rw
== 1))) ||
2357 nofp
->nof_r_dw
|| nofp
->nof_w_dw
|| nofp
->nof_rw_dw
||
2358 nofp
->nof_r_drw
|| nofp
->nof_w_drw
|| nofp
->nof_rw_drw
)
2359 printf("nfs4_close: unexpected count: %u %u %u dw %u %u %u drw %u %u %u flags 0x%x\n",
2360 nofp
->nof_r
, nofp
->nof_w
, nofp
->nof_rw
,
2361 nofp
->nof_r_dw
, nofp
->nof_w_dw
, nofp
->nof_rw_dw
,
2362 nofp
->nof_r_drw
, nofp
->nof_w_drw
, nofp
->nof_rw_drw
,
2364 /* clear out all open info, just to be safe */
2365 nofp
->nof_access
= nofp
->nof_deny
= 0;
2366 nofp
->nof_mmap_access
= nofp
->nof_mmap_deny
= 0;
2367 nofp
->nof_r
= nofp
->nof_w
= nofp
->nof_rw
= 0;
2368 nofp
->nof_r_dw
= nofp
->nof_w_dw
= nofp
->nof_rw_dw
= 0;
2369 nofp
->nof_r_drw
= nofp
->nof_w_drw
= nofp
->nof_rw_drw
= 0;
2370 nofp
->nof_flags
&= ~NFS_OPEN_FILE_CREATE
;
2371 /* XXX we may potentially want to clean up idle/unused open file structures */
2373 nofp
->nof_opencnt
--;
2374 if (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) {
2376 if (!nofp
->nof_opencnt
)
2377 nofp
->nof_flags
&= ~NFS_OPEN_FILE_LOST
;
2378 const char *vname
= vnode_getname(NFSTOV(np
));
2379 printf("nfs_close: LOST%s, %s\n", !(nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) ? " (last)" : "", vname
);
2380 vnode_putname(vname
);
2387 struct vnop_close_args
/* {
2388 struct vnodeop_desc *a_desc;
2391 vfs_context_t a_context;
2394 vfs_context_t ctx
= ap
->a_context
;
2395 vnode_t vp
= ap
->a_vp
;
2396 int fflag
= ap
->a_fflag
;
2397 int error
, common_error
, accessMode
, denyMode
;
2398 nfsnode_t np
= VTONFS(vp
);
2399 struct nfsmount
*nmp
;
2400 struct nfs_open_owner
*noop
= NULL
;
2401 struct nfs_open_file
*nofp
= NULL
;
2407 /* First, call the common code */
2408 common_error
= nfs3_vnop_close(ap
);
2410 if (!vnode_isreg(vp
)) {
2411 /* Just mark that it was closed */
2412 lck_mtx_lock(&np
->n_openlock
);
2414 lck_mtx_unlock(&np
->n_openlock
);
2415 return (common_error
);
2418 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 0);
2420 printf("nfs4_vnop_close: can't get open owner!\n");
2425 error
= nfs_mount_state_in_use_start(nmp
);
2427 nfs_open_owner_rele(noop
);
2431 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 0);
2432 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
2433 nfs_mount_state_in_use_end(nmp
, 0);
2434 nfs4_reopen(nofp
, vfs_context_thread(ctx
));
2439 const char *vname
= vnode_getname(NFSTOV(np
));
2440 printf("nfs4_vnop_close: no open file for owner %d, %s\n", error
, vname
);
2441 vnode_putname(vname
);
2445 error
= nfs_open_file_set_busy(nofp
, vfs_context_thread(ctx
));
2451 /* fflag contains some combination of: FREAD, FWRITE, FHASLOCK */
2454 accessMode
|= NFS_OPEN_SHARE_ACCESS_READ
;
2456 accessMode
|= NFS_OPEN_SHARE_ACCESS_WRITE
;
2457 // XXX It would be nice if we still had the O_EXLOCK/O_SHLOCK flags that were on the open
2458 // if (fflag & O_EXLOCK)
2459 // denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2460 // else if (fflag & O_SHLOCK)
2461 // denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2463 // denyMode = NFS_OPEN_SHARE_DENY_NONE;
2464 if (fflag
& FHASLOCK
) {
2465 /* XXX assume FHASLOCK is for the deny mode and not flock */
2466 /* FHASLOCK flock will be unlocked in the close path, but the flag is not cleared. */
2467 if (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_READ
)
2468 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
2469 else if (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_WRITE
)
2470 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
2472 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2474 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2482 error
= nfs4_close(np
, nofp
, accessMode
, denyMode
, ctx
);
2484 const char *vname
= vnode_getname(NFSTOV(np
));
2485 printf("nfs_vnop_close: close error %d, %s\n", error
, vname
);
2486 vnode_putname(vname
);
2491 nfs_open_file_clear_busy(nofp
);
2492 if (nfs_mount_state_in_use_end(nmp
, error
)) {
2497 nfs_open_owner_rele(noop
);
2499 const char *vname
= vnode_getname(NFSTOV(np
));
2500 printf("nfs_vnop_close: error %d, %s\n", error
, vname
);
2501 vnode_putname(vname
);
2504 error
= common_error
;
2510 struct vnop_mmap_args
/* {
2511 struct vnodeop_desc *a_desc;
2514 vfs_context_t a_context;
2517 vfs_context_t ctx
= ap
->a_context
;
2518 vnode_t vp
= ap
->a_vp
;
2519 nfsnode_t np
= VTONFS(vp
);
2520 int error
= 0, accessMode
, denyMode
;
2521 struct nfsmount
*nmp
;
2522 struct nfs_open_owner
*noop
= NULL
;
2523 struct nfs_open_file
*nofp
= NULL
;
2529 if (!vnode_isreg(vp
) || !(ap
->a_fflags
& (PROT_READ
|PROT_WRITE
)))
2533 * fflags contains some combination of: PROT_READ, PROT_WRITE
2534 * Since it's not possible to mmap() without having the file open for reading,
2535 * read access is always there (regardless if PROT_READ is not set).
2537 accessMode
= NFS_OPEN_SHARE_ACCESS_READ
;
2538 if (ap
->a_fflags
& PROT_WRITE
)
2539 accessMode
|= NFS_OPEN_SHARE_ACCESS_WRITE
;
2540 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2542 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 0);
2544 printf("nfs4_vnop_mmap: no open owner\n");
2549 error
= nfs_mount_state_in_use_start(nmp
);
2551 nfs_open_owner_rele(noop
);
2555 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 1);
2556 if (error
|| (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
))) {
2557 printf("nfs4_vnop_mmap: no open file for owner %d\n", error
);
2560 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
2561 nfs_mount_state_in_use_end(nmp
, 0);
2562 nfs4_reopen(nofp
, vfs_context_thread(ctx
));
2567 error
= nfs_open_file_set_busy(nofp
, vfs_context_thread(ctx
));
2574 * The open reference for mmap must mirror an existing open because
2575 * we may need to reclaim it after the file is closed.
2576 * So grab another open count matching the accessMode passed in.
2577 * If we already had an mmap open, prefer read/write without deny mode.
2578 * This means we may have to drop the current mmap open first.
2581 /* determine deny mode for open */
2582 if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2584 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2585 else if (nofp
->nof_rw_dw
)
2586 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
2587 else if (nofp
->nof_rw_drw
)
2588 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
2591 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
2593 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2594 else if (nofp
->nof_r_dw
)
2595 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
2596 else if (nofp
->nof_r_drw
)
2597 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
2601 if (error
) /* mmap mode without proper open mode */
2605 * If the existing mmap access is more than the new access OR the
2606 * existing access is the same and the existing deny mode is less,
2607 * then we'll stick with the existing mmap open mode.
2609 if ((nofp
->nof_mmap_access
> accessMode
) ||
2610 ((nofp
->nof_mmap_access
== accessMode
) && (nofp
->nof_mmap_deny
<= denyMode
)))
2613 /* update mmap open mode */
2614 if (nofp
->nof_mmap_access
) {
2615 error
= nfs4_close(np
, nofp
, nofp
->nof_mmap_access
, nofp
->nof_mmap_deny
, ctx
);
2617 if (!nfs_mount_state_error_should_restart(error
))
2618 printf("nfs_vnop_mmap: close of previous mmap mode failed: %d\n", error
);
2619 const char *vname
= vnode_getname(NFSTOV(np
));
2620 printf("nfs_vnop_mmap: update, close error %d, %s\n", error
, vname
);
2621 vnode_putname(vname
);
2624 nofp
->nof_mmap_access
= nofp
->nof_mmap_deny
= 0;
2627 if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2628 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
)
2630 else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
)
2632 else /* NFS_OPEN_SHARE_DENY_BOTH */
2634 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2635 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
)
2637 else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
)
2639 else /* NFS_OPEN_SHARE_DENY_BOTH */
2642 nofp
->nof_mmap_access
= accessMode
;
2643 nofp
->nof_mmap_deny
= denyMode
;
2644 nofp
->nof_opencnt
++;
2648 nfs_open_file_clear_busy(nofp
);
2649 if (nfs_mount_state_in_use_end(nmp
, error
)) {
2654 nfs_open_owner_rele(noop
);
2661 struct vnop_mnomap_args
/* {
2662 struct vnodeop_desc *a_desc;
2664 vfs_context_t a_context;
2667 vfs_context_t ctx
= ap
->a_context
;
2668 vnode_t vp
= ap
->a_vp
;
2669 nfsnode_t np
= VTONFS(vp
);
2670 struct nfsmount
*nmp
;
2671 struct nfs_open_file
*nofp
= NULL
;
2678 /* walk all open files and close all mmap opens */
2680 error
= nfs_mount_state_in_use_start(nmp
);
2683 lck_mtx_lock(&np
->n_openlock
);
2684 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
2685 if (!nofp
->nof_mmap_access
)
2687 lck_mtx_unlock(&np
->n_openlock
);
2688 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
2689 nfs_mount_state_in_use_end(nmp
, 0);
2690 nfs4_reopen(nofp
, vfs_context_thread(ctx
));
2693 error
= nfs_open_file_set_busy(nofp
, vfs_context_thread(ctx
));
2695 lck_mtx_lock(&np
->n_openlock
);
2698 if (nofp
->nof_mmap_access
) {
2699 error
= nfs4_close(np
, nofp
, nofp
->nof_mmap_access
, nofp
->nof_mmap_deny
, ctx
);
2700 if (!nfs_mount_state_error_should_restart(error
)) {
2701 if (error
) /* not a state-operation-restarting error, so just clear the access */
2702 printf("nfs_vnop_mnomap: close of mmap mode failed: %d\n", error
);
2703 nofp
->nof_mmap_access
= nofp
->nof_mmap_deny
= 0;
2706 const char *vname
= vnode_getname(NFSTOV(np
));
2707 printf("nfs_vnop_mnomap: error %d, %s\n", error
, vname
);
2708 vnode_putname(vname
);
2711 nfs_open_file_clear_busy(nofp
);
2712 nfs_mount_state_in_use_end(nmp
, error
);
2715 lck_mtx_unlock(&np
->n_openlock
);
2716 nfs_mount_state_in_use_end(nmp
, error
);
2721 * Search a node's lock owner list for the owner for this process.
2722 * If not found and "alloc" is set, then allocate a new one.
2724 struct nfs_lock_owner
*
2725 nfs_lock_owner_find(nfsnode_t np
, proc_t p
, int alloc
)
2727 pid_t pid
= proc_pid(p
);
2728 struct nfs_lock_owner
*nlop
, *newnlop
= NULL
;
2731 lck_mtx_lock(&np
->n_openlock
);
2732 TAILQ_FOREACH(nlop
, &np
->n_lock_owners
, nlo_link
) {
2733 if (nlop
->nlo_pid
!= pid
)
2735 if (timevalcmp(&nlop
->nlo_pid_start
, &p
->p_start
, ==))
2737 /* stale lock owner... reuse it if we can */
2738 if (nlop
->nlo_refcnt
) {
2739 TAILQ_REMOVE(&np
->n_lock_owners
, nlop
, nlo_link
);
2740 nlop
->nlo_flags
&= ~NFS_LOCK_OWNER_LINK
;
2741 lck_mtx_unlock(&np
->n_openlock
);
2744 nlop
->nlo_pid_start
= p
->p_start
;
2745 nlop
->nlo_seqid
= 0;
2746 nlop
->nlo_stategenid
= 0;
2750 if (!nlop
&& !newnlop
&& alloc
) {
2751 lck_mtx_unlock(&np
->n_openlock
);
2752 MALLOC(newnlop
, struct nfs_lock_owner
*, sizeof(struct nfs_lock_owner
), M_TEMP
, M_WAITOK
);
2755 bzero(newnlop
, sizeof(*newnlop
));
2756 lck_mtx_init(&newnlop
->nlo_lock
, nfs_open_grp
, LCK_ATTR_NULL
);
2757 newnlop
->nlo_pid
= pid
;
2758 newnlop
->nlo_pid_start
= p
->p_start
;
2759 newnlop
->nlo_name
= OSAddAtomic(1, &nfs_lock_owner_seqnum
);
2760 TAILQ_INIT(&newnlop
->nlo_locks
);
2763 if (!nlop
&& newnlop
) {
2764 newnlop
->nlo_flags
|= NFS_LOCK_OWNER_LINK
;
2765 TAILQ_INSERT_HEAD(&np
->n_lock_owners
, newnlop
, nlo_link
);
2768 lck_mtx_unlock(&np
->n_openlock
);
2770 if (newnlop
&& (nlop
!= newnlop
))
2771 nfs_lock_owner_destroy(newnlop
);
2774 nfs_lock_owner_ref(nlop
);
2780 * destroy a lock owner that's no longer needed
2783 nfs_lock_owner_destroy(struct nfs_lock_owner
*nlop
)
2785 if (nlop
->nlo_open_owner
) {
2786 nfs_open_owner_rele(nlop
->nlo_open_owner
);
2787 nlop
->nlo_open_owner
= NULL
;
2789 lck_mtx_destroy(&nlop
->nlo_lock
, nfs_open_grp
);
2794 * acquire a reference count on a lock owner
2797 nfs_lock_owner_ref(struct nfs_lock_owner
*nlop
)
2799 lck_mtx_lock(&nlop
->nlo_lock
);
2801 lck_mtx_unlock(&nlop
->nlo_lock
);
2805 * drop a reference count on a lock owner and destroy it if
2806 * it is no longer referenced and no longer on the mount's list.
2809 nfs_lock_owner_rele(struct nfs_lock_owner
*nlop
)
2811 lck_mtx_lock(&nlop
->nlo_lock
);
2812 if (nlop
->nlo_refcnt
< 1)
2813 panic("nfs_lock_owner_rele: no refcnt");
2815 if (!nlop
->nlo_refcnt
&& (nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
))
2816 panic("nfs_lock_owner_rele: busy");
2817 /* XXX we may potentially want to clean up idle/unused lock owner structures */
2818 if (nlop
->nlo_refcnt
|| (nlop
->nlo_flags
& NFS_LOCK_OWNER_LINK
)) {
2819 lck_mtx_unlock(&nlop
->nlo_lock
);
2822 /* owner is no longer referenced or linked to mount, so destroy it */
2823 lck_mtx_unlock(&nlop
->nlo_lock
);
2824 nfs_lock_owner_destroy(nlop
);
2828 * Mark a lock owner as busy because we are about to
2829 * start an operation that uses and updates lock owner state.
2832 nfs_lock_owner_set_busy(struct nfs_lock_owner
*nlop
, thread_t thd
)
2834 struct nfsmount
*nmp
;
2835 struct timespec ts
= {2, 0};
2836 int error
= 0, slpflag
;
2838 nmp
= nlop
->nlo_open_owner
->noo_mount
;
2841 slpflag
= (nmp
->nm_flag
& NFSMNT_INT
) ? PCATCH
: 0;
2843 lck_mtx_lock(&nlop
->nlo_lock
);
2844 while (nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
) {
2845 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0)))
2847 nlop
->nlo_flags
|= NFS_LOCK_OWNER_WANT
;
2848 msleep(nlop
, &nlop
->nlo_lock
, slpflag
, "nfs_lock_owner_set_busy", &ts
);
2851 nlop
->nlo_flags
|= NFS_LOCK_OWNER_BUSY
;
2852 lck_mtx_unlock(&nlop
->nlo_lock
);
2858 * Clear the busy flag on a lock owner and wake up anyone waiting
2862 nfs_lock_owner_clear_busy(struct nfs_lock_owner
*nlop
)
2866 lck_mtx_lock(&nlop
->nlo_lock
);
2867 if (!(nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
))
2868 panic("nfs_lock_owner_clear_busy");
2869 wanted
= (nlop
->nlo_flags
& NFS_LOCK_OWNER_WANT
);
2870 nlop
->nlo_flags
&= ~(NFS_LOCK_OWNER_BUSY
|NFS_LOCK_OWNER_WANT
);
2871 lck_mtx_unlock(&nlop
->nlo_lock
);
2877 * Insert a held lock into a lock owner's sorted list.
2878 * (flock locks are always inserted at the head the list)
2881 nfs_lock_owner_insert_held_lock(struct nfs_lock_owner
*nlop
, struct nfs_file_lock
*newnflp
)
2883 struct nfs_file_lock
*nflp
;
2885 /* insert new lock in lock owner's held lock list */
2886 lck_mtx_lock(&nlop
->nlo_lock
);
2887 if ((newnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
) {
2888 TAILQ_INSERT_HEAD(&nlop
->nlo_locks
, newnflp
, nfl_lolink
);
2890 TAILQ_FOREACH(nflp
, &nlop
->nlo_locks
, nfl_lolink
) {
2891 if (newnflp
->nfl_start
< nflp
->nfl_start
)
2895 TAILQ_INSERT_BEFORE(nflp
, newnflp
, nfl_lolink
);
2897 TAILQ_INSERT_TAIL(&nlop
->nlo_locks
, newnflp
, nfl_lolink
);
2899 lck_mtx_unlock(&nlop
->nlo_lock
);
2903 * Get a file lock structure for this lock owner.
2905 struct nfs_file_lock
*
2906 nfs_file_lock_alloc(struct nfs_lock_owner
*nlop
)
2908 struct nfs_file_lock
*nflp
= NULL
;
2910 lck_mtx_lock(&nlop
->nlo_lock
);
2911 if (!nlop
->nlo_alock
.nfl_owner
) {
2912 nflp
= &nlop
->nlo_alock
;
2913 nflp
->nfl_owner
= nlop
;
2915 lck_mtx_unlock(&nlop
->nlo_lock
);
2917 MALLOC(nflp
, struct nfs_file_lock
*, sizeof(struct nfs_file_lock
), M_TEMP
, M_WAITOK
);
2920 bzero(nflp
, sizeof(*nflp
));
2921 nflp
->nfl_flags
|= NFS_FILE_LOCK_ALLOC
;
2922 nflp
->nfl_owner
= nlop
;
2924 nfs_lock_owner_ref(nlop
);
2929 * destroy the given NFS file lock structure
2932 nfs_file_lock_destroy(struct nfs_file_lock
*nflp
)
2934 struct nfs_lock_owner
*nlop
= nflp
->nfl_owner
;
2936 if (nflp
->nfl_flags
& NFS_FILE_LOCK_ALLOC
) {
2937 nflp
->nfl_owner
= NULL
;
2940 lck_mtx_lock(&nlop
->nlo_lock
);
2941 bzero(nflp
, sizeof(nflp
));
2942 lck_mtx_unlock(&nlop
->nlo_lock
);
2944 nfs_lock_owner_rele(nlop
);
2948 * Check if one file lock conflicts with another.
2949 * (nflp1 is the new lock. nflp2 is the existing lock.)
2952 nfs_file_lock_conflict(struct nfs_file_lock
*nflp1
, struct nfs_file_lock
*nflp2
, int *willsplit
)
2954 /* no conflict if lock is dead */
2955 if ((nflp1
->nfl_flags
& NFS_FILE_LOCK_DEAD
) || (nflp2
->nfl_flags
& NFS_FILE_LOCK_DEAD
))
2957 /* no conflict if it's ours - unless the lock style doesn't match */
2958 if ((nflp1
->nfl_owner
== nflp2
->nfl_owner
) &&
2959 ((nflp1
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == (nflp2
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
))) {
2960 if (willsplit
&& (nflp1
->nfl_type
!= nflp2
->nfl_type
) &&
2961 (nflp1
->nfl_start
> nflp2
->nfl_start
) &&
2962 (nflp1
->nfl_end
< nflp2
->nfl_end
))
2966 /* no conflict if ranges don't overlap */
2967 if ((nflp1
->nfl_start
> nflp2
->nfl_end
) || (nflp1
->nfl_end
< nflp2
->nfl_start
))
2969 /* no conflict if neither lock is exclusive */
2970 if ((nflp1
->nfl_type
!= F_WRLCK
) && (nflp2
->nfl_type
!= F_WRLCK
))
2977 * Send an NFSv4 LOCK RPC to the server.
2982 struct nfs_open_file
*nofp
,
2983 struct nfs_file_lock
*nflp
,
2988 struct nfs_lock_owner
*nlop
= nflp
->nfl_owner
;
2989 struct nfsmount
*nmp
;
2990 struct nfsm_chain nmreq
, nmrep
;
2993 int error
= 0, lockerror
= ENOENT
, newlocker
, numops
, status
;
2999 newlocker
= (nlop
->nlo_stategenid
!= nmp
->nm_stategenid
);
3000 locktype
= (nflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
) ?
3001 ((nflp
->nfl_type
== F_WRLCK
) ?
3002 NFS_LOCK_TYPE_WRITEW
:
3003 NFS_LOCK_TYPE_READW
) :
3004 ((nflp
->nfl_type
== F_WRLCK
) ?
3005 NFS_LOCK_TYPE_WRITE
:
3006 NFS_LOCK_TYPE_READ
);
3008 error
= nfs_open_file_set_busy(nofp
, thd
);
3011 error
= nfs_open_owner_set_busy(nofp
->nof_owner
, thd
);
3013 nfs_open_file_clear_busy(nofp
);
3016 if (!nlop
->nlo_open_owner
) {
3017 nfs_open_owner_ref(nofp
->nof_owner
);
3018 nlop
->nlo_open_owner
= nofp
->nof_owner
;
3021 error
= nfs_lock_owner_set_busy(nlop
, thd
);
3024 nfs_open_owner_clear_busy(nofp
->nof_owner
);
3025 nfs_open_file_clear_busy(nofp
);
3030 nfsm_chain_null(&nmreq
);
3031 nfsm_chain_null(&nmrep
);
3033 // PUTFH, GETATTR, LOCK
3035 nfsm_chain_build_alloc_init(error
, &nmreq
, 33 * NFSX_UNSIGNED
);
3036 nfsm_chain_add_compound_header(error
, &nmreq
, "lock", numops
);
3038 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3039 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3041 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3042 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
3043 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
3045 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCK
);
3046 nfsm_chain_add_32(error
, &nmreq
, locktype
);
3047 nfsm_chain_add_32(error
, &nmreq
, reclaim
);
3048 nfsm_chain_add_64(error
, &nmreq
, nflp
->nfl_start
);
3049 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(nflp
->nfl_start
, nflp
->nfl_end
));
3050 nfsm_chain_add_32(error
, &nmreq
, newlocker
);
3052 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_owner
->noo_seqid
);
3053 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
3054 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3055 nfsm_chain_add_lock_owner4(error
, &nmreq
, nmp
, nlop
);
3057 nfsm_chain_add_stateid(error
, &nmreq
, &nlop
->nlo_stateid
);
3058 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3060 nfsm_chain_build_done(error
, &nmreq
);
3061 nfsm_assert(error
, (numops
== 0), EPROTO
);
3064 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, (reclaim
? R_RECOVER
: 0), &nmrep
, &xid
, &status
);
3066 if ((lockerror
= nfs_node_lock(np
)))
3068 nfsm_chain_skip_tag(error
, &nmrep
);
3069 nfsm_chain_get_32(error
, &nmrep
, numops
);
3070 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3072 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3073 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, NULL
, &xid
);
3075 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCK
);
3076 nfs_owner_seqid_increment(newlocker
? nofp
->nof_owner
: NULL
, nlop
, error
);
3077 nfsm_chain_get_stateid(error
, &nmrep
, &nlop
->nlo_stateid
);
3079 /* Update the lock owner's stategenid once it appears the server has state for it. */
3080 /* We determine this by noting the request was successful (we got a stateid). */
3081 if (newlocker
&& !error
)
3082 nlop
->nlo_stategenid
= nmp
->nm_stategenid
;
3085 nfs_node_unlock(np
);
3086 nfs_lock_owner_clear_busy(nlop
);
3088 nfs_open_owner_clear_busy(nofp
->nof_owner
);
3089 nfs_open_file_clear_busy(nofp
);
3091 nfsm_chain_cleanup(&nmreq
);
3092 nfsm_chain_cleanup(&nmrep
);
3097 * Send an NFSv4 LOCKU RPC to the server.
3102 struct nfs_lock_owner
*nlop
,
3108 struct nfsmount
*nmp
;
3109 struct nfsm_chain nmreq
, nmrep
;
3111 int error
= 0, lockerror
= ENOENT
, numops
, status
;
3117 error
= nfs_lock_owner_set_busy(nlop
, vfs_context_thread(ctx
));
3121 nfsm_chain_null(&nmreq
);
3122 nfsm_chain_null(&nmrep
);
3124 // PUTFH, GETATTR, LOCKU
3126 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
3127 nfsm_chain_add_compound_header(error
, &nmreq
, "unlock", numops
);
3129 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3130 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3132 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3133 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
3134 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
3136 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCKU
);
3137 nfsm_chain_add_32(error
, &nmreq
, (type
== F_WRLCK
) ? NFS_LOCK_TYPE_WRITE
: NFS_LOCK_TYPE_READ
);
3138 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3139 nfsm_chain_add_stateid(error
, &nmreq
, &nlop
->nlo_stateid
);
3140 nfsm_chain_add_64(error
, &nmreq
, start
);
3141 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(start
, end
));
3142 nfsm_chain_build_done(error
, &nmreq
);
3143 nfsm_assert(error
, (numops
== 0), EPROTO
);
3146 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &nmrep
, &xid
, &status
);
3148 if ((lockerror
= nfs_node_lock(np
)))
3150 nfsm_chain_skip_tag(error
, &nmrep
);
3151 nfsm_chain_get_32(error
, &nmrep
, numops
);
3152 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3154 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3155 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, NULL
, &xid
);
3157 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCKU
);
3158 nfs_owner_seqid_increment(NULL
, nlop
, error
);
3159 nfsm_chain_get_stateid(error
, &nmrep
, &nlop
->nlo_stateid
);
3162 nfs_node_unlock(np
);
3163 nfs_lock_owner_clear_busy(nlop
);
3164 nfsm_chain_cleanup(&nmreq
);
3165 nfsm_chain_cleanup(&nmrep
);
3170 * Check for any conflicts with the given lock.
3172 * Checking for a lock doesn't require the file to be opened.
3173 * So we skip all the open owner, open file, lock owner work
3174 * and just check for a conflicting lock.
3179 struct nfs_lock_owner
*nlop
,
3185 struct nfsmount
*nmp
;
3186 struct nfs_file_lock
*nflp
;
3187 struct nfsm_chain nmreq
, nmrep
;
3188 uint64_t xid
, val64
= 0;
3190 int error
= 0, lockerror
= ENOENT
, numops
, status
;
3196 lck_mtx_lock(&np
->n_openlock
);
3197 /* scan currently held locks for conflict */
3198 TAILQ_FOREACH(nflp
, &np
->n_locks
, nfl_link
) {
3199 if (nflp
->nfl_flags
& NFS_FILE_LOCK_BLOCKED
)
3201 if ((start
<= nflp
->nfl_end
) && (end
>= nflp
->nfl_start
) &&
3202 ((fl
->l_type
== F_WRLCK
) || (nflp
->nfl_type
== F_WRLCK
)))
3206 /* found a conflicting lock */
3207 fl
->l_type
= nflp
->nfl_type
;
3208 fl
->l_pid
= (nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_FLOCK
) ? -1 : nflp
->nfl_owner
->nlo_pid
;
3209 fl
->l_start
= nflp
->nfl_start
;
3210 fl
->l_len
= NFS_FLOCK_LENGTH(nflp
->nfl_start
, nflp
->nfl_end
);
3211 fl
->l_whence
= SEEK_SET
;
3213 lck_mtx_unlock(&np
->n_openlock
);
3217 /* no conflict found locally, so ask the server */
3219 nfsm_chain_null(&nmreq
);
3220 nfsm_chain_null(&nmrep
);
3222 // PUTFH, GETATTR, LOCKT
3224 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
3225 nfsm_chain_add_compound_header(error
, &nmreq
, "locktest", numops
);
3227 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3228 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3230 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3231 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
3232 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
3234 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCKT
);
3235 nfsm_chain_add_32(error
, &nmreq
, (fl
->l_type
== F_WRLCK
) ? NFS_LOCK_TYPE_WRITE
: NFS_LOCK_TYPE_READ
);
3236 nfsm_chain_add_64(error
, &nmreq
, start
);
3237 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(start
, end
));
3238 nfsm_chain_add_lock_owner4(error
, &nmreq
, nmp
, nlop
);
3239 nfsm_chain_build_done(error
, &nmreq
);
3240 nfsm_assert(error
, (numops
== 0), EPROTO
);
3243 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &nmrep
, &xid
, &status
);
3245 if ((lockerror
= nfs_node_lock(np
)))
3247 nfsm_chain_skip_tag(error
, &nmrep
);
3248 nfsm_chain_get_32(error
, &nmrep
, numops
);
3249 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3251 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3252 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, NULL
, &xid
);
3254 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCKT
);
3255 if (error
== NFSERR_DENIED
) {
3257 nfsm_chain_get_64(error
, &nmrep
, fl
->l_start
);
3258 nfsm_chain_get_64(error
, &nmrep
, val64
);
3259 fl
->l_len
= (val64
== UINT64_MAX
) ? 0 : val64
;
3260 nfsm_chain_get_32(error
, &nmrep
, val
);
3261 fl
->l_type
= (val
== NFS_LOCK_TYPE_WRITE
) ? F_WRLCK
: F_RDLCK
;
3263 fl
->l_whence
= SEEK_SET
;
3264 } else if (!error
) {
3265 fl
->l_type
= F_UNLCK
;
3269 nfs_node_unlock(np
);
3270 nfsm_chain_cleanup(&nmreq
);
3271 nfsm_chain_cleanup(&nmrep
);
3276 * Acquire a file lock for the given range.
3278 * Add the lock (request) to the lock queue.
3279 * Scan the lock queue for any conflicting locks.
3280 * If a conflict is found, block or return an error.
3281 * Once end of queue is reached, send request to the server.
3282 * If the server grants the lock, scan the lock queue and
3283 * update any existing locks. Then (optionally) scan the
3284 * queue again to coalesce any locks adjacent to the new one.
3289 struct nfs_open_file
*nofp
,
3290 struct nfs_lock_owner
*nlop
,
3298 struct nfsmount
*nmp
;
3299 struct nfs_file_lock
*newnflp
, *nflp
, *nflp2
= NULL
, *nextnflp
, *flocknflp
= NULL
;
3300 struct nfs_file_lock
*coalnflp
;
3301 int error
= 0, error2
, willsplit
= 0, delay
, slpflag
, busy
= 0, inuse
= 0, restart
, inqueue
= 0;
3302 struct timespec ts
= {1, 0};
3307 slpflag
= (nmp
->nm_flag
& NFSMNT_INT
) ? PCATCH
: 0;
3309 /* allocate a new lock */
3310 newnflp
= nfs_file_lock_alloc(nlop
);
3313 newnflp
->nfl_start
= start
;
3314 newnflp
->nfl_end
= end
;
3315 newnflp
->nfl_type
= type
;
3317 newnflp
->nfl_flags
|= NFS_FILE_LOCK_WAIT
;
3318 newnflp
->nfl_flags
|= style
;
3319 newnflp
->nfl_flags
|= NFS_FILE_LOCK_BLOCKED
;
3321 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) && (type
== F_WRLCK
)) {
3323 * For exclusive flock-style locks, if we block waiting for the
3324 * lock, we need to first release any currently held shared
3325 * flock-style lock. So, the first thing we do is check if we
3326 * have a shared flock-style lock.
3328 nflp
= TAILQ_FIRST(&nlop
->nlo_locks
);
3329 if (nflp
&& ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != NFS_FILE_LOCK_STYLE_FLOCK
))
3331 if (nflp
&& (nflp
->nfl_type
!= F_RDLCK
))
3338 error
= nfs_mount_state_in_use_start(nmp
);
3342 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
3343 nfs_mount_state_in_use_end(nmp
, 0);
3345 nfs4_reopen(nofp
, vfs_context_thread(ctx
));
3349 lck_mtx_lock(&np
->n_openlock
);
3351 /* insert new lock at beginning of list */
3352 TAILQ_INSERT_HEAD(&np
->n_locks
, newnflp
, nfl_link
);
3356 /* scan current list of locks (held and pending) for conflicts */
3357 for (nflp
= TAILQ_NEXT(newnflp
, nfl_link
); nflp
; nflp
= TAILQ_NEXT(nflp
, nfl_link
)) {
3358 if (!nfs_file_lock_conflict(newnflp
, nflp
, &willsplit
))
3361 if (!(newnflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
)) {
3365 /* Block until this lock is no longer held. */
3366 if (nflp
->nfl_blockcnt
== UINT_MAX
) {
3370 nflp
->nfl_blockcnt
++;
3373 /* release any currently held shared lock before sleeping */
3374 lck_mtx_unlock(&np
->n_openlock
);
3375 nfs_mount_state_in_use_end(nmp
, 0);
3377 error
= nfs4_unlock(np
, nofp
, nlop
, 0, UINT64_MAX
, NFS_FILE_LOCK_STYLE_FLOCK
, ctx
);
3380 error
= nfs_mount_state_in_use_start(nmp
);
3382 lck_mtx_lock(&np
->n_openlock
);
3386 lck_mtx_lock(&np
->n_openlock
);
3387 /* no need to block/sleep if the conflict is gone */
3388 if (!nfs_file_lock_conflict(newnflp
, nflp
, NULL
))
3391 msleep(nflp
, &np
->n_openlock
, slpflag
, "nfs4_setlock_blocked", &ts
);
3392 error
= nfs_sigintr(NFSTONMP(np
), NULL
, vfs_context_thread(ctx
), 0);
3393 if (!error
&& (nmp
->nm_state
& NFSSTA_RECOVER
)) {
3394 /* looks like we have a recover pending... restart */
3396 lck_mtx_unlock(&np
->n_openlock
);
3397 nfs_mount_state_in_use_end(nmp
, 0);
3399 lck_mtx_lock(&np
->n_openlock
);
3402 } while (!error
&& nfs_file_lock_conflict(newnflp
, nflp
, NULL
));
3403 nflp
->nfl_blockcnt
--;
3404 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) && !nflp
->nfl_blockcnt
) {
3405 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
3406 nfs_file_lock_destroy(nflp
);
3408 if (error
|| restart
)
3411 lck_mtx_unlock(&np
->n_openlock
);
3419 * It looks like this operation is splitting a lock.
3420 * We allocate a new lock now so we don't have to worry
3421 * about the allocation failing after we've updated some state.
3423 nflp2
= nfs_file_lock_alloc(nlop
);
3430 /* once scan for local conflicts is clear, send request to server */
3431 if ((error
= nfs_open_state_set_busy(np
, ctx
)))
3436 error
= nfs4_lock_rpc(np
, nofp
, newnflp
, 0, vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
3437 if (!error
|| ((error
!= NFSERR_DENIED
) && (error
!= NFSERR_GRACE
)))
3439 /* request was denied due to either conflict or grace period */
3440 if ((error
!= NFSERR_GRACE
) && !(newnflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
)) {
3445 /* release any currently held shared lock before sleeping */
3446 nfs_open_state_clear_busy(np
);
3448 nfs_mount_state_in_use_end(nmp
, 0);
3450 error2
= nfs4_unlock(np
, nofp
, nlop
, 0, UINT64_MAX
, NFS_FILE_LOCK_STYLE_FLOCK
, ctx
);
3453 error2
= nfs_mount_state_in_use_start(nmp
);
3456 error2
= nfs_open_state_set_busy(np
, ctx
);
3464 /* wait a little bit and send the request again */
3465 if (error
== NFSERR_GRACE
)
3469 tsleep(newnflp
, slpflag
, "nfs4_setlock_delay", delay
* (hz
/2));
3470 error
= nfs_sigintr(NFSTONMP(np
), NULL
, vfs_context_thread(ctx
), 0);
3471 if (!error
&& (nmp
->nm_state
& NFSSTA_RECOVER
)) {
3472 /* looks like we have a recover pending... restart */
3473 nfs_open_state_clear_busy(np
);
3475 nfs_mount_state_in_use_end(nmp
, 0);
3482 if (nfs_mount_state_error_should_restart(error
)) {
3483 /* looks like we need to restart this operation */
3485 nfs_open_state_clear_busy(np
);
3489 nfs_mount_state_in_use_end(nmp
, error
);
3494 lck_mtx_lock(&np
->n_openlock
);
3495 newnflp
->nfl_flags
&= ~NFS_FILE_LOCK_BLOCKED
;
3497 newnflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
3498 if (newnflp
->nfl_blockcnt
) {
3499 /* wake up anyone blocked on this lock */
3502 /* remove newnflp from lock list and destroy */
3503 TAILQ_REMOVE(&np
->n_locks
, newnflp
, nfl_link
);
3504 nfs_file_lock_destroy(newnflp
);
3506 lck_mtx_unlock(&np
->n_openlock
);
3508 nfs_open_state_clear_busy(np
);
3510 nfs_mount_state_in_use_end(nmp
, error
);
3512 nfs_file_lock_destroy(nflp2
);
3516 /* server granted the lock */
3519 * Scan for locks to update.
3521 * Locks completely covered are killed.
3522 * At most two locks may need to be clipped.
3523 * It's possible that a single lock may need to be split.
3525 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
3526 if (nflp
== newnflp
)
3528 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
|NFS_FILE_LOCK_DEAD
))
3530 if (nflp
->nfl_owner
!= nlop
)
3532 if ((newnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != (nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
))
3534 if ((newnflp
->nfl_start
> nflp
->nfl_end
) || (newnflp
->nfl_end
< nflp
->nfl_start
))
3536 /* here's one to update */
3537 if ((newnflp
->nfl_start
<= nflp
->nfl_start
) && (newnflp
->nfl_end
>= nflp
->nfl_end
)) {
3538 /* The entire lock is being replaced. */
3539 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
3540 lck_mtx_lock(&nlop
->nlo_lock
);
3541 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
3542 lck_mtx_unlock(&nlop
->nlo_lock
);
3543 /* lock will be destroyed below, if no waiters */
3544 } else if ((newnflp
->nfl_start
> nflp
->nfl_start
) && (newnflp
->nfl_end
< nflp
->nfl_end
)) {
3545 /* We're replacing a range in the middle of a lock. */
3546 /* The current lock will be split into two locks. */
3547 /* Update locks and insert new lock after current lock. */
3548 nflp2
->nfl_flags
|= (nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
);
3549 nflp2
->nfl_type
= nflp
->nfl_type
;
3550 nflp2
->nfl_start
= newnflp
->nfl_end
+ 1;
3551 nflp2
->nfl_end
= nflp
->nfl_end
;
3552 nflp
->nfl_end
= newnflp
->nfl_start
- 1;
3553 TAILQ_INSERT_AFTER(&np
->n_locks
, nflp
, nflp2
, nfl_link
);
3554 nfs_lock_owner_insert_held_lock(nlop
, nflp2
);
3557 } else if (newnflp
->nfl_start
> nflp
->nfl_start
) {
3558 /* We're replacing the end of a lock. */
3559 nflp
->nfl_end
= newnflp
->nfl_start
- 1;
3560 } else if (newnflp
->nfl_end
< nflp
->nfl_end
) {
3561 /* We're replacing the start of a lock. */
3562 nflp
->nfl_start
= newnflp
->nfl_end
+ 1;
3564 if (nflp
->nfl_blockcnt
) {
3565 /* wake up anyone blocked on this lock */
3567 } else if (nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) {
3568 /* remove nflp from lock list and destroy */
3569 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
3570 nfs_file_lock_destroy(nflp
);
3574 nfs_lock_owner_insert_held_lock(nlop
, newnflp
);
3577 * POSIX locks should be coalesced when possible.
3579 if ((style
== NFS_FILE_LOCK_STYLE_POSIX
) && (nofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
)) {
3581 * Walk through the lock queue and check each of our held locks with
3582 * the previous and next locks in the lock owner's "held lock list".
3583 * If the two locks can be coalesced, we merge the current lock into
3584 * the other (previous or next) lock. Merging this way makes sure that
3585 * lock ranges are always merged forward in the lock queue. This is
3586 * important because anyone blocked on the lock being "merged away"
3587 * will still need to block on that range and it will simply continue
3588 * checking locks that are further down the list.
3590 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
3591 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
|NFS_FILE_LOCK_DEAD
))
3593 if (nflp
->nfl_owner
!= nlop
)
3595 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != NFS_FILE_LOCK_STYLE_POSIX
)
3597 if (((coalnflp
= TAILQ_PREV(nflp
, nfs_file_lock_queue
, nfl_lolink
))) &&
3598 ((coalnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) &&
3599 (coalnflp
->nfl_type
== nflp
->nfl_type
) &&
3600 (coalnflp
->nfl_end
== (nflp
->nfl_start
- 1))) {
3601 coalnflp
->nfl_end
= nflp
->nfl_end
;
3602 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
3603 lck_mtx_lock(&nlop
->nlo_lock
);
3604 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
3605 lck_mtx_unlock(&nlop
->nlo_lock
);
3606 } else if (((coalnflp
= TAILQ_NEXT(nflp
, nfl_lolink
))) &&
3607 ((coalnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) &&
3608 (coalnflp
->nfl_type
== nflp
->nfl_type
) &&
3609 (coalnflp
->nfl_start
== (nflp
->nfl_end
+ 1))) {
3610 coalnflp
->nfl_start
= nflp
->nfl_start
;
3611 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
3612 lck_mtx_lock(&nlop
->nlo_lock
);
3613 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
3614 lck_mtx_unlock(&nlop
->nlo_lock
);
3616 if (!(nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
))
3618 if (nflp
->nfl_blockcnt
) {
3619 /* wake up anyone blocked on this lock */
3622 /* remove nflp from lock list and destroy */
3623 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
3624 nfs_file_lock_destroy(nflp
);
3629 lck_mtx_unlock(&np
->n_openlock
);
3630 nfs_open_state_clear_busy(np
);
3631 nfs_mount_state_in_use_end(nmp
, error
);
3634 nfs_file_lock_destroy(nflp2
);
3641 struct nfs_open_file
*nofp
,
3642 struct nfs_lock_owner
*nlop
,
3648 struct nfsmount
*nmp
;
3649 struct nfs_file_lock
*nflp
, *nextnflp
, *newnflp
= NULL
;
3650 int error
= 0, willsplit
= 0, send_unlock_rpcs
= 1;
3657 if ((error
= nfs_mount_state_in_use_start(nmp
)))
3659 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
3660 nfs_mount_state_in_use_end(nmp
, 0);
3661 nfs4_reopen(nofp
, vfs_context_thread(ctx
));
3664 if ((error
= nfs_open_state_set_busy(np
, ctx
))) {
3665 nfs_mount_state_in_use_end(nmp
, error
);
3669 lck_mtx_lock(&np
->n_openlock
);
3670 if ((start
> 0) && (end
< UINT64_MAX
) && !willsplit
) {
3672 * We may need to allocate a new lock if an existing lock gets split.
3673 * So, we first scan the list to check for a split, and if there's
3674 * going to be one, we'll allocate one now.
3676 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
3677 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
|NFS_FILE_LOCK_DEAD
))
3679 if (nflp
->nfl_owner
!= nlop
)
3681 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != style
)
3683 if ((start
> nflp
->nfl_end
) || (end
< nflp
->nfl_start
))
3685 if ((start
> nflp
->nfl_start
) && (end
< nflp
->nfl_end
)) {
3691 lck_mtx_unlock(&np
->n_openlock
);
3692 nfs_open_state_clear_busy(np
);
3693 nfs_mount_state_in_use_end(nmp
, 0);
3694 newnflp
= nfs_file_lock_alloc(nlop
);
3702 * Free all of our locks in the given range.
3704 * Note that this process requires sending requests to the server.
3705 * Because of this, we will release the n_openlock while performing
3706 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
3707 * locks from changing underneath us. However, other entries in the
3708 * list may be removed. So we need to be careful walking the list.
3712 * Don't unlock ranges that are held by other-style locks.
3713 * If style is posix, don't send any unlock rpcs if flock is held.
3714 * If we unlock an flock, don't send unlock rpcs for any posix-style
3715 * ranges held - instead send unlocks for the ranges not held.
3717 if ((style
== NFS_FILE_LOCK_STYLE_POSIX
) &&
3718 ((nflp
= TAILQ_FIRST(&nlop
->nlo_locks
))) &&
3719 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
))
3720 send_unlock_rpcs
= 0;
3721 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) &&
3722 ((nflp
= TAILQ_FIRST(&nlop
->nlo_locks
))) &&
3723 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
) &&
3724 ((nflp
= TAILQ_NEXT(nflp
, nfl_lolink
))) &&
3725 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
)) {
3727 int type
= TAILQ_FIRST(&nlop
->nlo_locks
)->nfl_type
;
3729 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) {
3730 /* unlock the range preceding this lock */
3731 lck_mtx_unlock(&np
->n_openlock
);
3732 error
= nfs4_unlock_rpc(np
, nlop
, type
, s
, nflp
->nfl_start
-1, ctx
);
3733 if (nfs_mount_state_error_should_restart(error
)) {
3734 nfs_open_state_clear_busy(np
);
3735 nfs_mount_state_in_use_end(nmp
, error
);
3738 lck_mtx_lock(&np
->n_openlock
);
3741 s
= nflp
->nfl_end
+1;
3743 nflp
= TAILQ_NEXT(nflp
, nfl_lolink
);
3745 lck_mtx_unlock(&np
->n_openlock
);
3746 error
= nfs4_unlock_rpc(np
, nlop
, type
, s
, end
, ctx
);
3747 if (nfs_mount_state_error_should_restart(error
)) {
3748 nfs_open_state_clear_busy(np
);
3749 nfs_mount_state_in_use_end(nmp
, error
);
3752 lck_mtx_lock(&np
->n_openlock
);
3755 send_unlock_rpcs
= 0;
3758 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
3759 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
|NFS_FILE_LOCK_DEAD
))
3761 if (nflp
->nfl_owner
!= nlop
)
3763 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != style
)
3765 if ((start
> nflp
->nfl_end
) || (end
< nflp
->nfl_start
))
3767 /* here's one to unlock */
3768 if ((start
<= nflp
->nfl_start
) && (end
>= nflp
->nfl_end
)) {
3769 /* The entire lock is being unlocked. */
3770 if (send_unlock_rpcs
) {
3771 lck_mtx_unlock(&np
->n_openlock
);
3772 error
= nfs4_unlock_rpc(np
, nlop
, nflp
->nfl_type
, nflp
->nfl_start
, nflp
->nfl_end
, ctx
);
3773 if (nfs_mount_state_error_should_restart(error
)) {
3774 nfs_open_state_clear_busy(np
);
3775 nfs_mount_state_in_use_end(nmp
, error
);
3778 lck_mtx_lock(&np
->n_openlock
);
3780 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
3783 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
3784 lck_mtx_lock(&nlop
->nlo_lock
);
3785 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
3786 lck_mtx_unlock(&nlop
->nlo_lock
);
3787 /* lock will be destroyed below, if no waiters */
3788 } else if ((start
> nflp
->nfl_start
) && (end
< nflp
->nfl_end
)) {
3789 /* We're unlocking a range in the middle of a lock. */
3790 /* The current lock will be split into two locks. */
3791 if (send_unlock_rpcs
) {
3792 lck_mtx_unlock(&np
->n_openlock
);
3793 error
= nfs4_unlock_rpc(np
, nlop
, nflp
->nfl_type
, start
, end
, ctx
);
3794 if (nfs_mount_state_error_should_restart(error
)) {
3795 nfs_open_state_clear_busy(np
);
3796 nfs_mount_state_in_use_end(nmp
, error
);
3799 lck_mtx_lock(&np
->n_openlock
);
3803 /* update locks and insert new lock after current lock */
3804 newnflp
->nfl_flags
|= (nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
);
3805 newnflp
->nfl_type
= nflp
->nfl_type
;
3806 newnflp
->nfl_start
= end
+ 1;
3807 newnflp
->nfl_end
= nflp
->nfl_end
;
3808 nflp
->nfl_end
= start
- 1;
3809 TAILQ_INSERT_AFTER(&np
->n_locks
, nflp
, newnflp
, nfl_link
);
3810 nfs_lock_owner_insert_held_lock(nlop
, newnflp
);
3813 } else if (start
> nflp
->nfl_start
) {
3814 /* We're unlocking the end of a lock. */
3815 if (send_unlock_rpcs
) {
3816 lck_mtx_unlock(&np
->n_openlock
);
3817 error
= nfs4_unlock_rpc(np
, nlop
, nflp
->nfl_type
, start
, nflp
->nfl_end
, ctx
);
3818 if (nfs_mount_state_error_should_restart(error
)) {
3819 nfs_open_state_clear_busy(np
);
3820 nfs_mount_state_in_use_end(nmp
, error
);
3823 lck_mtx_lock(&np
->n_openlock
);
3825 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
3828 nflp
->nfl_end
= start
- 1;
3829 } else if (end
< nflp
->nfl_end
) {
3830 /* We're unlocking the start of a lock. */
3831 if (send_unlock_rpcs
) {
3832 lck_mtx_unlock(&np
->n_openlock
);
3833 error
= nfs4_unlock_rpc(np
, nlop
, nflp
->nfl_type
, nflp
->nfl_start
, end
, ctx
);
3834 if (nfs_mount_state_error_should_restart(error
)) {
3835 nfs_open_state_clear_busy(np
);
3836 nfs_mount_state_in_use_end(nmp
, error
);
3839 lck_mtx_lock(&np
->n_openlock
);
3841 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
3844 nflp
->nfl_start
= end
+ 1;
3846 if (nflp
->nfl_blockcnt
) {
3847 /* wake up anyone blocked on this lock */
3849 } else if (nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) {
3850 /* remove nflp from lock list and destroy */
3851 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
3852 nfs_file_lock_destroy(nflp
);
3856 lck_mtx_unlock(&np
->n_openlock
);
3857 nfs_open_state_clear_busy(np
);
3858 nfs_mount_state_in_use_end(nmp
, 0);
3861 nfs_file_lock_destroy(newnflp
);
3866 * NFSv4 advisory file locking
3870 struct vnop_advlock_args
/* {
3871 struct vnodeop_desc *a_desc;
3877 vfs_context_t a_context;
3880 vnode_t vp
= ap
->a_vp
;
3881 nfsnode_t np
= VTONFS(ap
->a_vp
);
3882 struct flock
*fl
= ap
->a_fl
;
3884 int flags
= ap
->a_flags
;
3885 vfs_context_t ctx
= ap
->a_context
;
3886 struct nfsmount
*nmp
;
3887 struct nfs_vattr nvattr
;
3888 struct nfs_open_owner
*noop
= NULL
;
3889 struct nfs_open_file
*nofp
= NULL
;
3890 struct nfs_lock_owner
*nlop
= NULL
;
3892 uint64_t start
, end
;
3893 int error
= 0, modified
, style
;
3894 #define OFF_MAX QUAD_MAX
3896 nmp
= VTONMP(ap
->a_vp
);
3900 switch (fl
->l_whence
) {
3904 * Caller is responsible for adding any necessary offset
3905 * to fl->l_start when SEEK_CUR is used.
3907 lstart
= fl
->l_start
;
3910 /* need to flush, and refetch attributes to make */
3911 /* sure we have the correct end of file offset */
3912 if ((error
= nfs_node_lock(np
)))
3914 modified
= (np
->n_flag
& NMODIFIED
);
3915 nfs_node_unlock(np
);
3916 if (modified
&& ((error
= nfs_vinvalbuf(vp
, V_SAVE
, ctx
, 1))))
3918 if ((error
= nfs_getattr(np
, &nvattr
, ctx
, NGA_UNCACHED
)))
3920 nfs_data_lock(np
, NFS_DATA_LOCK_SHARED
);
3921 if ((np
->n_size
> OFF_MAX
) ||
3922 ((fl
->l_start
> 0) && (np
->n_size
> (u_quad_t
)(OFF_MAX
- fl
->l_start
))))
3924 lstart
= np
->n_size
+ fl
->l_start
;
3925 nfs_data_unlock(np
);
3935 if (fl
->l_len
== 0) {
3937 } else if (fl
->l_len
> 0) {
3938 if ((fl
->l_len
- 1) > (OFF_MAX
- lstart
))
3940 end
= start
- 1 + fl
->l_len
;
3941 } else { /* l_len is negative */
3942 if ((lstart
+ fl
->l_len
) < 0)
3950 style
= (flags
& F_FLOCK
) ? NFS_FILE_LOCK_STYLE_FLOCK
: NFS_FILE_LOCK_STYLE_POSIX
;
3951 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) && ((start
!= 0) || (end
!= UINT64_MAX
)))
3954 /* find the lock owner, alloc if not unlock */
3955 nlop
= nfs_lock_owner_find(np
, vfs_context_proc(ctx
), (op
!= F_UNLCK
));
3957 error
= (op
== F_UNLCK
) ? 0 : ENOMEM
;
3959 printf("nfs4_vnop_advlock: no lock owner %d\n", error
);
3963 if (op
== F_GETLK
) {
3964 error
= nfs4_getlock(np
, nlop
, fl
, start
, end
, ctx
);
3966 /* find the open owner */
3967 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 0);
3969 printf("nfs4_vnop_advlock: no open owner\n");
3973 /* find the open file */
3975 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 0);
3978 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
3979 printf("nfs_vnop_advlock: LOST\n");
3982 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
3983 nfs4_reopen(nofp
, vfs_context_thread(ctx
));
3988 printf("nfs4_vnop_advlock: no open file %d\n", error
);
3991 if (op
== F_UNLCK
) {
3992 error
= nfs4_unlock(np
, nofp
, nlop
, start
, end
, style
, ctx
);
3993 } else if ((op
== F_SETLK
) || (op
== F_SETLKW
)) {
3994 if ((op
== F_SETLK
) && (flags
& F_WAIT
))
3996 error
= nfs4_setlock(np
, nofp
, nlop
, op
, start
, end
, style
, fl
->l_type
, ctx
);
3998 /* not getlk, unlock or lock? */
4005 nfs_lock_owner_rele(nlop
);
4007 nfs_open_owner_rele(noop
);
4012 * Check if an open owner holds any locks on a file.
4015 nfs4_check_for_locks(struct nfs_open_owner
*noop
, struct nfs_open_file
*nofp
)
4017 struct nfs_lock_owner
*nlop
;
4019 TAILQ_FOREACH(nlop
, &nofp
->nof_np
->n_lock_owners
, nlo_link
) {
4020 if (nlop
->nlo_open_owner
!= noop
)
4022 if (!TAILQ_EMPTY(&nlop
->nlo_locks
))
4025 return (nlop
? 1 : 0);
4029 * Reopen simple (no deny, no locks) open state that was lost.
4032 nfs4_reopen(struct nfs_open_file
*nofp
, thread_t thd
)
4034 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
4035 struct nfsmount
*nmp
= NFSTONMP(nofp
->nof_np
);
4036 vnode_t vp
= NFSTOV(nofp
->nof_np
);
4038 struct componentname cn
;
4039 const char *vname
= NULL
;
4041 char smallname
[128];
4042 char *filename
= NULL
;
4043 int error
= 0, done
= 0, slpflag
= (nmp
->nm_flag
& NFSMNT_INT
) ? PCATCH
: 0;
4044 struct timespec ts
= { 1, 0 };
4046 lck_mtx_lock(&nofp
->nof_lock
);
4047 while (nofp
->nof_flags
& NFS_OPEN_FILE_REOPENING
) {
4048 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0)))
4050 msleep(&nofp
->nof_flags
, &nofp
->nof_lock
, slpflag
|(PZERO
-1), "nfsreopenwait", &ts
);
4052 if (!(nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
4053 lck_mtx_unlock(&nofp
->nof_lock
);
4056 nofp
->nof_flags
|= NFS_OPEN_FILE_REOPENING
;
4057 lck_mtx_unlock(&nofp
->nof_lock
);
4059 dvp
= vnode_getparent(vp
);
4060 vname
= vnode_getname(vp
);
4061 if (!dvp
|| !vname
) {
4065 filename
= &smallname
[0];
4066 namelen
= snprintf(filename
, sizeof(smallname
), "%s", vname
);
4067 if (namelen
>= sizeof(smallname
)) {
4068 namelen
++; /* snprintf result doesn't include '\0' */
4069 MALLOC(filename
, char *, namelen
, M_TEMP
, M_WAITOK
);
4074 snprintf(filename
, namelen
, "%s", vname
);
4076 bzero(&cn
, sizeof(cn
));
4077 cn
.cn_nameptr
= filename
;
4078 cn
.cn_namelen
= namelen
;
4082 if ((error
= nfs_mount_state_in_use_start(nmp
)))
4086 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
);
4087 if (!error
&& nofp
->nof_w
)
4088 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_NONE
);
4089 if (!error
&& nofp
->nof_r
)
4090 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
);
4092 if (nfs_mount_state_in_use_end(nmp
, error
)) {
4093 if (error
== NFSERR_GRACE
)
4100 lck_mtx_lock(&nofp
->nof_lock
);
4101 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPENING
;
4103 nofp
->nof_flags
|= NFS_OPEN_FILE_LOST
;
4105 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPEN
;
4107 printf("nfs4_reopen: failed, error %d, lost %d\n", error
, (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) ? 1 : 0);
4108 lck_mtx_unlock(&nofp
->nof_lock
);
4109 if (filename
&& (filename
!= &smallname
[0]))
4110 FREE(filename
, M_TEMP
);
4112 vnode_putname(vname
);
4118 * Send a normal OPEN RPC to open/create a file.
4122 struct nfs_open_file
*nofp
,
4124 struct componentname
*cnp
,
4125 struct vnode_attr
*vap
,
4132 return (nfs4_open_rpc_internal(nofp
, ctx
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
4133 cnp
, vap
, dvp
, vpp
, create
, share_access
, share_deny
));
4137 * Send an OPEN RPC to reopen a file.
4140 nfs4_open_reopen_rpc(
4141 struct nfs_open_file
*nofp
,
4144 struct componentname
*cnp
,
4150 return (nfs4_open_rpc_internal(nofp
, NULL
, thd
, cred
, cnp
, NULL
, dvp
, vpp
, 0, share_access
, share_deny
));
4154 * common OPEN RPC code
4156 * If create is set, ctx must be passed in.
4159 nfs4_open_rpc_internal(
4160 struct nfs_open_file
*nofp
,
4164 struct componentname
*cnp
,
4165 struct vnode_attr
*vap
,
4172 struct nfsmount
*nmp
;
4173 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
4174 struct nfs_vattr nvattr
, dnvattr
;
4175 int error
= 0, open_error
= EIO
, lockerror
= ENOENT
, busyerror
= ENOENT
, status
;
4176 int nfsvers
, numops
, exclusive
= 0, gotuid
, gotgid
;
4177 u_int64_t xid
, savedxid
= 0;
4178 nfsnode_t dnp
= VTONFS(dvp
);
4179 nfsnode_t np
, newnp
= NULL
;
4180 vnode_t newvp
= NULL
;
4181 struct nfsm_chain nmreq
, nmrep
;
4182 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
4183 uint32_t rflags
, delegation
= 0, recall
= 0, val
;
4184 struct nfs_stateid stateid
, dstateid
, *sid
;
4186 struct nfsreq
*req
= NULL
;
4187 struct nfs_dulookup dul
;
4195 nfsvers
= nmp
->nm_vers
;
4197 np
= *vpp
? VTONFS(*vpp
) : NULL
;
4198 if (create
&& vap
) {
4199 exclusive
= (vap
->va_vaflags
& VA_EXCLUSIVE
);
4200 nfs_avoid_needless_id_setting_on_create(dnp
, vap
, ctx
);
4201 gotuid
= VATTR_IS_ACTIVE(vap
, va_uid
);
4202 gotgid
= VATTR_IS_ACTIVE(vap
, va_gid
);
4204 exclusive
= gotuid
= gotgid
= 0;
4207 sid
= &nofp
->nof_stateid
;
4209 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
4213 if ((error
= nfs_open_owner_set_busy(noop
, thd
)))
4218 nfsm_chain_null(&nmreq
);
4219 nfsm_chain_null(&nmrep
);
4221 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
4223 nfsm_chain_build_alloc_init(error
, &nmreq
, 53 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
4224 nfsm_chain_add_compound_header(error
, &nmreq
, create
? "create" : "open", numops
);
4226 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
4227 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
4229 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
4231 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
4232 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
4233 nfsm_chain_add_32(error
, &nmreq
, share_access
);
4234 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
4236 // open owner: clientid + uid
4237 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
); // open_owner4.clientid
4238 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
4239 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
)); // open_owner4.owner
4242 nfsm_chain_add_32(error
, &nmreq
, create
);
4245 static uint32_t create_verf
; // XXX need a better verifier
4247 nfsm_chain_add_32(error
, &nmreq
, NFS_CREATE_EXCLUSIVE
);
4248 /* insert 64 bit verifier */
4249 nfsm_chain_add_32(error
, &nmreq
, create_verf
);
4250 nfsm_chain_add_32(error
, &nmreq
, create_verf
);
4252 nfsm_chain_add_32(error
, &nmreq
, NFS_CREATE_UNCHECKED
);
4253 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
4258 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_NULL
);
4259 nfsm_chain_add_string(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
);
4261 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
4262 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
4263 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
4264 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
4265 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
4267 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
4269 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
4270 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
4271 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
4272 nfsm_chain_build_done(error
, &nmreq
);
4273 nfsm_assert(error
, (numops
== 0), EPROTO
);
4275 error
= busyerror
= nfs_node_set_busy(dnp
, thd
);
4279 nfs_dulookup_init(&dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
4281 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, NULL
, &req
);
4284 nfs_dulookup_start(&dul
, dnp
, ctx
);
4285 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
4290 nfs_dulookup_finish(&dul
, dnp
, ctx
);
4292 if ((lockerror
= nfs_node_lock(dnp
)))
4294 nfsm_chain_skip_tag(error
, &nmrep
);
4295 nfsm_chain_get_32(error
, &nmrep
, numops
);
4296 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
4297 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
4299 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
4300 nfs_owner_seqid_increment(noop
, NULL
, error
);
4301 nfsm_chain_get_stateid(error
, &nmrep
, sid
);
4302 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
4303 nfsm_chain_get_32(error
, &nmrep
, rflags
);
4304 bmlen
= NFS_ATTR_BITMAP_LEN
;
4305 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
4306 nfsm_chain_get_32(error
, &nmrep
, delegation
);
4308 switch (delegation
) {
4309 case NFS_OPEN_DELEGATE_NONE
:
4311 case NFS_OPEN_DELEGATE_READ
:
4312 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
4313 nfsm_chain_get_32(error
, &nmrep
, recall
);
4315 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
4316 nfsm_chain_get_32(error
, &nmrep
, val
); /* string length */
4317 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(val
));
4319 case NFS_OPEN_DELEGATE_WRITE
:
4320 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
4321 nfsm_chain_get_32(error
, &nmrep
, recall
);
4323 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
4325 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
4326 nfsm_chain_get_32(error
, &nmrep
, val
); /* string length */
4327 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(val
));
4333 /* At this point if we have no error, the object was created/opened. */
4334 /* if we don't get attributes, then we should lookitup. */
4337 if (create
&& !exclusive
)
4338 nfs_vattr_set_supported(bitmap
, vap
);
4339 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
4341 NFS_CLEAR_ATTRIBUTES(nvattr
.nva_bitmap
);
4342 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
);
4344 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
4345 printf("nfs: open/create didn't return filehandle?\n");
4349 if (!create
&& np
&& !NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
4350 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
4351 printf("nfs4_open_rpc: warning: file handle mismatch\n");
4353 /* directory attributes: if we don't get them, make sure to invalidate */
4354 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
4355 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
4356 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, NULL
, &xid
);
4358 NATTRINVALIDATE(dnp
);
4361 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
)
4362 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
4364 if (rflags
& NFS_OPEN_RESULT_CONFIRM
) {
4365 nfs_node_unlock(dnp
);
4367 nfsm_chain_cleanup(&nmreq
);
4368 nfsm_chain_cleanup(&nmrep
);
4369 // PUTFH, OPEN_CONFIRM, GETATTR
4371 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
4372 nfsm_chain_add_compound_header(error
, &nmreq
, "open_confirm", numops
);
4374 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
4375 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, fh
.fh_data
, fh
.fh_len
);
4377 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN_CONFIRM
);
4378 nfsm_chain_add_stateid(error
, &nmreq
, sid
);
4379 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
4381 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
4382 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
4383 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
4384 nfsm_chain_build_done(error
, &nmreq
);
4385 nfsm_assert(error
, (numops
== 0), EPROTO
);
4387 error
= nfs_request2(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, 0, &nmrep
, &xid
, &status
);
4389 nfsm_chain_skip_tag(error
, &nmrep
);
4390 nfsm_chain_get_32(error
, &nmrep
, numops
);
4391 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
4393 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN_CONFIRM
);
4394 nfs_owner_seqid_increment(noop
, NULL
, error
);
4395 nfsm_chain_get_stateid(error
, &nmrep
, sid
);
4396 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
4398 NFS_CLEAR_ATTRIBUTES(nvattr
.nva_bitmap
);
4399 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, NULL
, NULL
);
4402 if ((lockerror
= nfs_node_lock(dnp
)))
4407 nfsm_chain_cleanup(&nmreq
);
4408 nfsm_chain_cleanup(&nmrep
);
4410 if (!lockerror
&& create
) {
4411 if (!open_error
&& (dnp
->n_flag
& NNEGNCENTRIES
)) {
4412 dnp
->n_flag
&= ~NNEGNCENTRIES
;
4413 cache_purge_negatives(dvp
);
4415 dnp
->n_flag
|= NMODIFIED
;
4416 nfs_node_unlock(dnp
);
4418 nfs_getattr(dnp
, &dnvattr
, ctx
, NGA_CACHED
);
4421 nfs_node_unlock(dnp
);
4422 if (!error
&& create
&& fh
.fh_len
) {
4423 /* create the vnode with the filehandle and attributes */
4425 error
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, NG_MAKEENTRY
, &newnp
);
4427 newvp
= NFSTOV(newnp
);
4430 nfs_node_clear_busy(dnp
);
4431 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
4434 if (!error
&& np
&& !recall
) {
4435 /* stuff the delegation state in the node */
4436 lck_mtx_lock(&np
->n_openlock
);
4437 np
->n_openflags
&= ~N_DELEG_MASK
;
4438 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
4439 np
->n_dstateid
= dstateid
;
4440 lck_mtx_unlock(&np
->n_openlock
);
4443 nfs4_delegreturn_rpc(nmp
, fh
.fh_data
, fh
.fh_len
, &dstateid
, thd
, cred
);
4445 lck_mtx_lock(&np
->n_openlock
);
4446 np
->n_openflags
&= ~N_DELEG_MASK
;
4447 lck_mtx_unlock(&np
->n_openlock
);
4452 if (exclusive
&& (error
== NFSERR_NOTSUPP
)) {
4457 nfs_node_unlock(newnp
);
4460 } else if (create
) {
4461 nfs_node_unlock(newnp
);
4463 error
= nfs4_setattr_rpc(newnp
, vap
, ctx
);
4464 if (error
&& (gotuid
|| gotgid
)) {
4465 /* it's possible the server didn't like our attempt to set IDs. */
4466 /* so, let's try it again without those */
4467 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
4468 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
4469 error
= nfs4_setattr_rpc(newnp
, vap
, ctx
);
4477 nfs_open_owner_clear_busy(noop
);
4482 * Send an OPEN RPC to reclaim an open file.
4485 nfs4_open_reclaim_rpc(
4486 struct nfs_open_file
*nofp
,
4490 struct nfsmount
*nmp
;
4491 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
4492 struct nfs_vattr nvattr
;
4493 int error
= 0, lockerror
= ENOENT
, status
;
4494 int nfsvers
, numops
;
4496 nfsnode_t np
= nofp
->nof_np
;
4497 struct nfsm_chain nmreq
, nmrep
;
4498 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
4499 uint32_t rflags
= 0, delegation
, recall
= 0, val
;
4501 struct nfs_stateid dstateid
;
4506 nfsvers
= nmp
->nm_vers
;
4508 if ((error
= nfs_open_owner_set_busy(noop
, current_thread())))
4511 delegation
= NFS_OPEN_DELEGATE_NONE
;
4513 nfsm_chain_null(&nmreq
);
4514 nfsm_chain_null(&nmrep
);
4516 // PUTFH, OPEN, GETATTR(FH)
4518 nfsm_chain_build_alloc_init(error
, &nmreq
, 48 * NFSX_UNSIGNED
);
4519 nfsm_chain_add_compound_header(error
, &nmreq
, "open_reclaim", numops
);
4521 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
4522 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
4524 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
4525 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
4526 nfsm_chain_add_32(error
, &nmreq
, share_access
);
4527 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
4528 // open owner: clientid + uid
4529 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
); // open_owner4.clientid
4530 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
4531 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
)); // open_owner4.owner
4533 nfsm_chain_add_32(error
, &nmreq
, NFS_OPEN_NOCREATE
);
4535 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_PREVIOUS
);
4536 delegation
= (np
->n_openflags
& N_DELEG_READ
) ? NFS_OPEN_DELEGATE_READ
:
4537 (np
->n_openflags
& N_DELEG_WRITE
) ? NFS_OPEN_DELEGATE_WRITE
:
4538 NFS_OPEN_DELEGATE_NONE
;
4539 nfsm_chain_add_32(error
, &nmreq
, delegation
);
4540 delegation
= NFS_OPEN_DELEGATE_NONE
;
4542 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
4543 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
4544 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
4545 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
4546 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
4547 nfsm_chain_build_done(error
, &nmreq
);
4548 nfsm_assert(error
, (numops
== 0), EPROTO
);
4551 error
= nfs_request2(np
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, current_thread(), noop
->noo_cred
, R_RECOVER
, &nmrep
, &xid
, &status
);
4553 if ((lockerror
= nfs_node_lock(np
)))
4555 nfsm_chain_skip_tag(error
, &nmrep
);
4556 nfsm_chain_get_32(error
, &nmrep
, numops
);
4557 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
4559 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
4560 nfs_owner_seqid_increment(noop
, NULL
, error
);
4561 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
4562 nfsm_chain_check_change_info(error
, &nmrep
, np
);
4563 nfsm_chain_get_32(error
, &nmrep
, rflags
);
4564 bmlen
= NFS_ATTR_BITMAP_LEN
;
4565 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
4566 nfsm_chain_get_32(error
, &nmrep
, delegation
);
4568 switch (delegation
) {
4569 case NFS_OPEN_DELEGATE_NONE
:
4571 case NFS_OPEN_DELEGATE_READ
:
4572 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
4573 nfsm_chain_get_32(error
, &nmrep
, recall
);
4575 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
4576 nfsm_chain_get_32(error
, &nmrep
, val
); /* string length */
4577 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(val
));
4579 /* stuff the delegation state in the node */
4580 lck_mtx_lock(&np
->n_openlock
);
4581 np
->n_openflags
&= ~N_DELEG_MASK
;
4582 np
->n_openflags
|= N_DELEG_READ
;
4583 np
->n_dstateid
= dstateid
;
4584 lck_mtx_unlock(&np
->n_openlock
);
4587 case NFS_OPEN_DELEGATE_WRITE
:
4588 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
4589 nfsm_chain_get_32(error
, &nmrep
, recall
);
4591 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
4593 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
4594 nfsm_chain_get_32(error
, &nmrep
, val
); /* string length */
4595 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(val
));
4597 /* stuff the delegation state in the node */
4598 lck_mtx_lock(&np
->n_openlock
);
4599 np
->n_openflags
&= ~N_DELEG_MASK
;
4600 np
->n_openflags
|= N_DELEG_WRITE
;
4601 np
->n_dstateid
= dstateid
;
4602 lck_mtx_unlock(&np
->n_openlock
);
4610 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
4611 NFS_CLEAR_ATTRIBUTES(nvattr
.nva_bitmap
);
4612 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
);
4614 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
4615 printf("nfs: open reclaim didn't return filehandle?\n");
4619 if (!NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
4620 // XXX what if fh doesn't match the vnode we think we're re-opening?
4621 printf("nfs4_open_reclaim_rpc: warning: file handle mismatch\n");
4623 error
= nfs_loadattrcache(np
, &nvattr
, &xid
, 1);
4625 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
)
4626 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
4628 nfsm_chain_cleanup(&nmreq
);
4629 nfsm_chain_cleanup(&nmrep
);
4631 nfs_node_unlock(np
);
4632 nfs_open_owner_clear_busy(noop
);
4633 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
4635 nfs4_delegreturn_rpc(nmp
, fh
.fh_data
, fh
.fh_len
, &dstateid
, current_thread(), noop
->noo_cred
);
4636 lck_mtx_lock(&np
->n_openlock
);
4637 np
->n_openflags
&= ~N_DELEG_MASK
;
4638 lck_mtx_unlock(&np
->n_openlock
);
4645 nfs4_open_downgrade_rpc(
4647 struct nfs_open_file
*nofp
,
4650 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
4651 struct nfsmount
*nmp
;
4652 int error
, lockerror
= ENOENT
, status
, nfsvers
, numops
;
4653 struct nfsm_chain nmreq
, nmrep
;
4659 nfsvers
= nmp
->nm_vers
;
4661 if ((error
= nfs_open_owner_set_busy(noop
, vfs_context_thread(ctx
))))
4664 nfsm_chain_null(&nmreq
);
4665 nfsm_chain_null(&nmrep
);
4667 // PUTFH, OPEN_DOWNGRADE, GETATTR
4669 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
4670 nfsm_chain_add_compound_header(error
, &nmreq
, "open_downgrd", numops
);
4672 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
4673 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
4675 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN_DOWNGRADE
);
4676 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
4677 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
4678 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_access
);
4679 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_deny
);
4681 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
4682 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
4683 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
4684 nfsm_chain_build_done(error
, &nmreq
);
4685 nfsm_assert(error
, (numops
== 0), EPROTO
);
4687 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &nmrep
, &xid
, &status
);
4689 if ((lockerror
= nfs_node_lock(np
)))
4691 nfsm_chain_skip_tag(error
, &nmrep
);
4692 nfsm_chain_get_32(error
, &nmrep
, numops
);
4693 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
4695 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN_DOWNGRADE
);
4696 nfs_owner_seqid_increment(noop
, NULL
, error
);
4697 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
4698 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
4699 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, NULL
, &xid
);
4702 nfs_node_unlock(np
);
4703 nfs_open_owner_clear_busy(noop
);
4704 nfsm_chain_cleanup(&nmreq
);
4705 nfsm_chain_cleanup(&nmrep
);
4712 struct nfs_open_file
*nofp
,
4717 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
4718 struct nfsmount
*nmp
;
4719 int error
, lockerror
= ENOENT
, status
, nfsvers
, numops
;
4720 struct nfsm_chain nmreq
, nmrep
;
4726 nfsvers
= nmp
->nm_vers
;
4728 if ((error
= nfs_open_owner_set_busy(noop
, thd
)))
4731 nfsm_chain_null(&nmreq
);
4732 nfsm_chain_null(&nmrep
);
4734 // PUTFH, CLOSE, GETFH
4736 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
4737 nfsm_chain_add_compound_header(error
, &nmreq
, "close", numops
);
4739 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
4740 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
4742 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_CLOSE
);
4743 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
4744 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
4746 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
4747 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
4748 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
4749 nfsm_chain_build_done(error
, &nmreq
);
4750 nfsm_assert(error
, (numops
== 0), EPROTO
);
4752 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, flag
, &nmrep
, &xid
, &status
);
4754 if ((lockerror
= nfs_node_lock(np
)))
4756 nfsm_chain_skip_tag(error
, &nmrep
);
4757 nfsm_chain_get_32(error
, &nmrep
, numops
);
4758 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
4760 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_CLOSE
);
4761 nfs_owner_seqid_increment(noop
, NULL
, error
);
4762 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
4763 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
4764 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, NULL
, &xid
);
4767 nfs_node_unlock(np
);
4768 nfs_open_owner_clear_busy(noop
);
4769 nfsm_chain_cleanup(&nmreq
);
4770 nfsm_chain_cleanup(&nmrep
);
4776 nfs4_delegreturn_rpc(struct nfsmount
*nmp
, u_char
*fhp
, int fhlen
, struct nfs_stateid
*sid
, thread_t thd
, kauth_cred_t cred
)
4778 int error
= 0, status
, numops
;
4780 struct nfsm_chain nmreq
, nmrep
;
4782 nfsm_chain_null(&nmreq
);
4783 nfsm_chain_null(&nmrep
);
4785 // PUTFH, DELEGRETURN
4787 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
4788 nfsm_chain_add_compound_header(error
, &nmreq
, "delegreturn", numops
);
4790 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
4791 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, fhp
, fhlen
);
4793 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_DELEGRETURN
);
4794 nfsm_chain_add_stateid(error
, &nmreq
, sid
);
4795 nfsm_chain_build_done(error
, &nmreq
);
4796 nfsm_assert(error
, (numops
== 0), EPROTO
);
4798 error
= nfs_request2(NULL
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, R_RECOVER
, &nmrep
, &xid
, &status
);
4799 nfsm_chain_skip_tag(error
, &nmrep
);
4800 nfsm_chain_get_32(error
, &nmrep
, numops
);
4801 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
4802 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_DELEGRETURN
);
4804 nfsm_chain_cleanup(&nmreq
);
4805 nfsm_chain_cleanup(&nmrep
);
4812 * Just call nfs_bioread() to do the work.
4814 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
4815 * without first calling VNOP_OPEN, so we make sure the file is open here.
4819 struct vnop_read_args
/* {
4820 struct vnodeop_desc *a_desc;
4824 vfs_context_t a_context;
4827 vnode_t vp
= ap
->a_vp
;
4828 vfs_context_t ctx
= ap
->a_context
;
4830 struct nfsmount
*nmp
;
4831 struct nfs_open_owner
*noop
;
4832 struct nfs_open_file
*nofp
;
4835 if (vnode_vtype(ap
->a_vp
) != VREG
)
4843 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
4847 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 1);
4848 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
4849 printf("nfs_vnop_read: LOST\n");
4852 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
4853 nfs4_reopen(nofp
, vfs_context_thread(ctx
));
4858 nfs_open_owner_rele(noop
);
4861 if (!nofp
->nof_access
) {
4862 /* we don't have the file open, so open it for read access */
4863 error
= nfs_mount_state_in_use_start(nmp
);
4865 nfs_open_owner_rele(noop
);
4868 error
= nfs_open_file_set_busy(nofp
, vfs_context_thread(ctx
));
4872 error
= nfs4_open(np
, nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
4874 nofp
->nof_flags
|= NFS_OPEN_FILE_NEEDCLOSE
;
4876 nfs_open_file_clear_busy(nofp
);
4877 if (nfs_mount_state_in_use_end(nmp
, error
)) {
4882 nfs_open_owner_rele(noop
);
4885 return (nfs_bioread(VTONFS(ap
->a_vp
), ap
->a_uio
, ap
->a_ioflag
, ap
->a_context
));
4889 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
4890 * Files are created using the NFSv4 OPEN RPC. So we must open the
4891 * file to create it and then close it.
4895 struct vnop_create_args
/* {
4896 struct vnodeop_desc *a_desc;
4899 struct componentname *a_cnp;
4900 struct vnode_attr *a_vap;
4901 vfs_context_t a_context;
4904 vfs_context_t ctx
= ap
->a_context
;
4905 struct componentname
*cnp
= ap
->a_cnp
;
4906 struct vnode_attr
*vap
= ap
->a_vap
;
4907 vnode_t dvp
= ap
->a_dvp
;
4908 vnode_t
*vpp
= ap
->a_vpp
;
4909 struct nfsmount
*nmp
;
4912 struct nfs_open_owner
*noop
= NULL
;
4913 struct nfs_open_file
*nofp
= NULL
;
4919 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp
), vap
, ctx
);
4921 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
4926 error
= nfs_mount_state_in_use_start(nmp
);
4928 nfs_open_owner_rele(noop
);
4932 error
= nfs_open_file_find(NULL
, noop
, &nofp
, 0, 0, 1);
4933 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
4934 printf("nfs_vnop_create: LOST\n");
4937 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
4938 nfs_mount_state_in_use_end(nmp
, 0);
4939 nfs4_reopen(nofp
, vfs_context_thread(ctx
));
4944 error
= nfs_open_file_set_busy(nofp
, vfs_context_thread(ctx
));
4950 nofp
->nof_opencnt
++;
4951 nofp
->nof_access
= NFS_OPEN_SHARE_ACCESS_BOTH
;
4952 nofp
->nof_deny
= NFS_OPEN_SHARE_DENY_NONE
;
4955 error
= nfs4_open_rpc(nofp
, ctx
, cnp
, vap
, dvp
, vpp
, NFS_OPEN_CREATE
,
4956 NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
);
4957 if (!error
&& !*vpp
) {
4958 printf("nfs4_open_rpc returned without a node?\n");
4959 /* Hmmm... with no node, we have no filehandle and can't close it */
4964 nofp
->nof_access
= 0;
4966 nofp
->nof_opencnt
--;
4969 nofp
->nof_np
= np
= VTONFS(*vpp
);
4970 /* insert nofp onto np's open list */
4971 TAILQ_INSERT_HEAD(&np
->n_opens
, nofp
, nof_link
);
4973 nofp
->nof_flags
|= NFS_OPEN_FILE_CREATE
;
4974 nofp
->nof_creator
= current_thread();
4979 nfs_open_file_clear_busy(nofp
);
4980 if (nfs_mount_state_in_use_end(nmp
, error
)) {
4985 nfs_open_owner_rele(noop
);
4990 nfs_avoid_needless_id_setting_on_create(nfsnode_t dnp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
4993 * Don't bother setting UID if it's the same as the credential performing the create.
4994 * Don't bother setting GID if it's the same as the directory or credential.
4996 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
4997 if (kauth_cred_getuid(vfs_context_ucred(ctx
)) == vap
->va_uid
)
4998 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
5000 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
5001 if ((vap
->va_gid
== dnp
->n_vattr
.nva_gid
) ||
5002 (kauth_cred_getgid(vfs_context_ucred(ctx
)) == vap
->va_gid
))
5003 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
5008 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
5014 struct componentname
*cnp
,
5015 struct vnode_attr
*vap
,
5020 struct nfsmount
*nmp
;
5021 struct nfs_vattr nvattr
, dnvattr
;
5022 int error
= 0, create_error
= EIO
, lockerror
= ENOENT
, busyerror
= ENOENT
, status
;
5023 int nfsvers
, numops
;
5024 u_int64_t xid
, savedxid
= 0;
5025 nfsnode_t np
= NULL
;
5026 vnode_t newvp
= NULL
;
5027 struct nfsm_chain nmreq
, nmrep
;
5028 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
5032 struct nfsreq
*req
= NULL
;
5033 struct nfs_dulookup dul
;
5035 nmp
= NFSTONMP(dnp
);
5038 nfsvers
= nmp
->nm_vers
;
5040 sd
.specdata1
= sd
.specdata2
= 0;
5049 if (!VATTR_IS_ACTIVE(vap
, va_rdev
))
5051 sd
.specdata1
= major(vap
->va_rdev
);
5052 sd
.specdata2
= minor(vap
->va_rdev
);
5065 nfs_avoid_needless_id_setting_on_create(dnp
, vap
, ctx
);
5067 error
= busyerror
= nfs_node_set_busy(dnp
, vfs_context_thread(ctx
));
5068 nfs_dulookup_init(&dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
5070 nfsm_chain_null(&nmreq
);
5071 nfsm_chain_null(&nmrep
);
5073 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
5075 nfsm_chain_build_alloc_init(error
, &nmreq
, 66 * NFSX_UNSIGNED
);
5076 nfsm_chain_add_compound_header(error
, &nmreq
, tag
, numops
);
5078 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5079 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
5081 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
5083 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_CREATE
);
5084 nfsm_chain_add_32(error
, &nmreq
, type
);
5085 if (type
== NFLNK
) {
5086 nfsm_chain_add_string(error
, &nmreq
, link
, strlen(link
));
5087 } else if ((type
== NFBLK
) || (type
== NFCHR
)) {
5088 nfsm_chain_add_32(error
, &nmreq
, sd
.specdata1
);
5089 nfsm_chain_add_32(error
, &nmreq
, sd
.specdata2
);
5091 nfsm_chain_add_string(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
);
5092 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
5094 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5095 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
5096 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
5097 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
5098 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
5100 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
5102 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5103 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
5104 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
5105 nfsm_chain_build_done(error
, &nmreq
);
5106 nfsm_assert(error
, (numops
== 0), EPROTO
);
5109 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
5110 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), NULL
, &req
);
5112 nfs_dulookup_start(&dul
, dnp
, ctx
);
5113 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
5116 if ((lockerror
= nfs_node_lock(dnp
)))
5118 nfsm_chain_skip_tag(error
, &nmrep
);
5119 nfsm_chain_get_32(error
, &nmrep
, numops
);
5120 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5121 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
5123 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_CREATE
);
5124 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
5125 bmlen
= NFS_ATTR_BITMAP_LEN
;
5126 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
5127 /* At this point if we have no error, the object was created. */
5128 /* if we don't get attributes, then we should lookitup. */
5129 create_error
= error
;
5131 nfs_vattr_set_supported(bitmap
, vap
);
5132 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5134 NFS_CLEAR_ATTRIBUTES(nvattr
.nva_bitmap
);
5135 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
);
5137 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
5138 printf("nfs: create/%s didn't return filehandle?\n", tag
);
5142 /* directory attributes: if we don't get them, make sure to invalidate */
5143 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
5144 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5146 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, NULL
, &xid
);
5148 NATTRINVALIDATE(dnp
);
5151 nfsm_chain_cleanup(&nmreq
);
5152 nfsm_chain_cleanup(&nmrep
);
5155 if (!create_error
&& (dnp
->n_flag
& NNEGNCENTRIES
)) {
5156 dnp
->n_flag
&= ~NNEGNCENTRIES
;
5157 cache_purge_negatives(NFSTOV(dnp
));
5159 dnp
->n_flag
|= NMODIFIED
;
5160 nfs_node_unlock(dnp
);
5161 /* nfs_getattr() will check changed and purge caches */
5162 nfs_getattr(dnp
, &dnvattr
, ctx
, NGA_CACHED
);
5165 if (!error
&& fh
.fh_len
) {
5166 /* create the vnode with the filehandle and attributes */
5168 error
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, NG_MAKEENTRY
, &np
);
5173 nfs_dulookup_finish(&dul
, dnp
, ctx
);
5176 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
5177 * if we can succeed in looking up the object.
5179 if ((create_error
== EEXIST
) || (!create_error
&& !newvp
)) {
5180 error
= nfs_lookitup(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, &np
);
5183 if (vnode_vtype(newvp
) != VLNK
)
5188 nfs_node_clear_busy(dnp
);
5191 nfs_node_unlock(np
);
5195 nfs_node_unlock(np
);
5203 struct vnop_mknod_args
/* {
5204 struct vnodeop_desc *a_desc;
5207 struct componentname *a_cnp;
5208 struct vnode_attr *a_vap;
5209 vfs_context_t a_context;
5212 nfsnode_t np
= NULL
;
5213 struct nfsmount
*nmp
;
5216 nmp
= VTONMP(ap
->a_dvp
);
5220 if (!VATTR_IS_ACTIVE(ap
->a_vap
, va_type
))
5222 switch (ap
->a_vap
->va_type
) {
5232 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
5233 vtonfs_type(ap
->a_vap
->va_type
, nmp
->nm_vers
), NULL
, &np
);
5235 *ap
->a_vpp
= NFSTOV(np
);
5241 struct vnop_mkdir_args
/* {
5242 struct vnodeop_desc *a_desc;
5245 struct componentname *a_cnp;
5246 struct vnode_attr *a_vap;
5247 vfs_context_t a_context;
5250 nfsnode_t np
= NULL
;
5253 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
5256 *ap
->a_vpp
= NFSTOV(np
);
5262 struct vnop_symlink_args
/* {
5263 struct vnodeop_desc *a_desc;
5266 struct componentname *a_cnp;
5267 struct vnode_attr *a_vap;
5269 vfs_context_t a_context;
5272 nfsnode_t np
= NULL
;
5275 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
5276 NFLNK
, ap
->a_target
, &np
);
5278 *ap
->a_vpp
= NFSTOV(np
);
5284 struct vnop_link_args
/* {
5285 struct vnodeop_desc *a_desc;
5288 struct componentname *a_cnp;
5289 vfs_context_t a_context;
5292 vfs_context_t ctx
= ap
->a_context
;
5293 vnode_t vp
= ap
->a_vp
;
5294 vnode_t tdvp
= ap
->a_tdvp
;
5295 struct componentname
*cnp
= ap
->a_cnp
;
5296 int error
= 0, lockerror
= ENOENT
, status
;
5297 struct nfsmount
*nmp
;
5298 nfsnode_t np
= VTONFS(vp
);
5299 nfsnode_t tdnp
= VTONFS(tdvp
);
5300 int nfsvers
, numops
;
5301 u_int64_t xid
, savedxid
;
5302 struct nfsm_chain nmreq
, nmrep
;
5304 if (vnode_mount(vp
) != vnode_mount(tdvp
))
5310 nfsvers
= nmp
->nm_vers
;
5313 * Push all writes to the server, so that the attribute cache
5314 * doesn't get "out of sync" with the server.
5315 * XXX There should be a better way!
5317 nfs_flush(np
, MNT_WAIT
, vfs_context_thread(ctx
), V_IGNORE_WRITEERR
);
5319 if ((error
= nfs_node_set_busy2(tdnp
, np
, vfs_context_thread(ctx
))))
5322 nfsm_chain_null(&nmreq
);
5323 nfsm_chain_null(&nmrep
);
5325 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
5327 nfsm_chain_build_alloc_init(error
, &nmreq
, 29 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
5328 nfsm_chain_add_compound_header(error
, &nmreq
, "link", numops
);
5330 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5331 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
5333 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
5335 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5336 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, tdnp
->n_fhp
, tdnp
->n_fhsize
);
5338 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LINK
);
5339 nfsm_chain_add_string(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
);
5341 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5342 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
5343 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
5345 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
5347 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5348 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
5349 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
5350 nfsm_chain_build_done(error
, &nmreq
);
5351 nfsm_assert(error
, (numops
== 0), EPROTO
);
5353 error
= nfs_request(tdnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &nmrep
, &xid
, &status
);
5355 if ((lockerror
= nfs_node_lock2(tdnp
, np
))) {
5359 nfsm_chain_skip_tag(error
, &nmrep
);
5360 nfsm_chain_get_32(error
, &nmrep
, numops
);
5361 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5362 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
5363 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5364 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LINK
);
5365 nfsm_chain_check_change_info(error
, &nmrep
, tdnp
);
5366 /* directory attributes: if we don't get them, make sure to invalidate */
5367 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5369 nfsm_chain_loadattr(error
, &nmrep
, tdnp
, nfsvers
, NULL
, &xid
);
5371 NATTRINVALIDATE(tdnp
);
5372 /* link attributes: if we don't get them, make sure to invalidate */
5373 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
5374 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5376 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, NULL
, &xid
);
5378 NATTRINVALIDATE(np
);
5380 nfsm_chain_cleanup(&nmreq
);
5381 nfsm_chain_cleanup(&nmrep
);
5383 tdnp
->n_flag
|= NMODIFIED
;
5384 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
5385 if (error
== EEXIST
)
5387 if (!error
&& (tdnp
->n_flag
& NNEGNCENTRIES
)) {
5388 tdnp
->n_flag
&= ~NNEGNCENTRIES
;
5389 cache_purge_negatives(tdvp
);
5392 nfs_node_unlock2(tdnp
, np
);
5393 nfs_node_clear_busy2(tdnp
, np
);
5399 struct vnop_rmdir_args
/* {
5400 struct vnodeop_desc *a_desc;
5403 struct componentname *a_cnp;
5404 vfs_context_t a_context;
5407 vfs_context_t ctx
= ap
->a_context
;
5408 vnode_t vp
= ap
->a_vp
;
5409 vnode_t dvp
= ap
->a_dvp
;
5410 struct componentname
*cnp
= ap
->a_cnp
;
5412 nfsnode_t np
= VTONFS(vp
);
5413 nfsnode_t dnp
= VTONFS(dvp
);
5414 struct nfs_vattr dnvattr
;
5415 struct nfs_dulookup dul
;
5417 if (vnode_vtype(vp
) != VDIR
)
5420 if ((error
= nfs_node_set_busy2(dnp
, np
, vfs_context_thread(ctx
))))
5423 nfs_dulookup_init(&dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
5424 nfs_dulookup_start(&dul
, dnp
, ctx
);
5426 error
= nfs4_remove_rpc(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
,
5427 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
5429 nfs_name_cache_purge(dnp
, np
, cnp
, ctx
);
5430 /* nfs_getattr() will check changed and purge caches */
5431 nfs_getattr(dnp
, &dnvattr
, ctx
, NGA_CACHED
);
5432 nfs_dulookup_finish(&dul
, dnp
, ctx
);
5433 nfs_node_clear_busy2(dnp
, np
);
5436 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
5438 if (error
== ENOENT
)
5442 * remove nfsnode from hash now so we can't accidentally find it
5443 * again if another object gets created with the same filehandle
5444 * before this vnode gets reclaimed
5446 lck_mtx_lock(nfs_node_hash_mutex
);
5447 if (np
->n_hflag
& NHHASHED
) {
5448 LIST_REMOVE(np
, n_hash
);
5449 np
->n_hflag
&= ~NHHASHED
;
5450 FSDBG(266, 0, np
, np
->n_flag
, 0xb1eb1e);
5452 lck_mtx_unlock(nfs_node_hash_mutex
);