2 * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <nfs/nfs_conf.h>
33 * vnode op calls for NFS version 4
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/resourcevar.h>
39 #include <sys/proc_internal.h>
40 #include <sys/kauth.h>
41 #include <sys/mount_internal.h>
42 #include <sys/malloc.h>
43 #include <sys/kpi_mbuf.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/dirent.h>
47 #include <sys/fcntl.h>
48 #include <sys/lockf.h>
49 #include <sys/ubc_internal.h>
51 #include <sys/signalvar.h>
52 #include <sys/uio_internal.h>
53 #include <sys/xattr.h>
54 #include <sys/paths.h>
56 #include <vfs/vfs_support.h>
61 #include <kern/clock.h>
62 #include <libkern/OSAtomic.h>
64 #include <miscfs/fifofs/fifo.h>
65 #include <miscfs/specfs/specdev.h>
67 #include <nfs/rpcv2.h>
68 #include <nfs/nfsproto.h>
70 #include <nfs/nfsnode.h>
71 #include <nfs/nfs_gss.h>
72 #include <nfs/nfsmount.h>
73 #include <nfs/nfs_lock.h>
74 #include <nfs/xdr_subs.h>
75 #include <nfs/nfsm_subs.h>
78 #include <netinet/in.h>
79 #include <netinet/in_var.h>
80 #include <vm/vm_kern.h>
82 #include <kern/task.h>
83 #include <kern/sched_prim.h>
87 nfs4_access_rpc(nfsnode_t np
, u_int32_t
*access
, int rpcflags
, vfs_context_t ctx
)
89 int error
= 0, lockerror
= ENOENT
, status
, numops
, slot
;
91 struct nfsm_chain nmreq
, nmrep
;
93 uint32_t access_result
= 0, supported
= 0, missing
;
94 struct nfsmount
*nmp
= NFSTONMP(np
);
95 int nfsvers
= nmp
->nm_vers
;
97 struct nfsreq_secinfo_args si
;
99 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
103 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
104 nfsm_chain_null(&nmreq
);
105 nfsm_chain_null(&nmrep
);
107 // PUTFH, ACCESS, GETATTR
109 nfsm_chain_build_alloc_init(error
, &nmreq
, 17 * NFSX_UNSIGNED
);
110 nfsm_chain_add_compound_header(error
, &nmreq
, "access", nmp
->nm_minor_vers
, numops
);
112 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
113 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
115 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_ACCESS
);
116 nfsm_chain_add_32(error
, &nmreq
, *access
);
118 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
119 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
120 nfsm_chain_build_done(error
, &nmreq
);
121 nfsm_assert(error
, (numops
== 0), EPROTO
);
123 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
124 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
125 &si
, rpcflags
, &nmrep
, &xid
, &status
);
127 if ((lockerror
= nfs_node_lock(np
))) {
130 nfsm_chain_skip_tag(error
, &nmrep
);
131 nfsm_chain_get_32(error
, &nmrep
, numops
);
132 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
133 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_ACCESS
);
134 nfsm_chain_get_32(error
, &nmrep
, supported
);
135 nfsm_chain_get_32(error
, &nmrep
, access_result
);
137 if ((missing
= (*access
& ~supported
))) {
138 /* missing support for something(s) we wanted */
139 if (missing
& NFS_ACCESS_DELETE
) {
141 * If the server doesn't report DELETE (possible
142 * on UNIX systems), we'll assume that it is OK
143 * and just let any subsequent delete action fail
144 * if it really isn't deletable.
146 access_result
|= NFS_ACCESS_DELETE
;
149 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
150 if (nfs_access_dotzfs
) {
151 vnode_t dvp
= NULLVP
;
152 if (np
->n_flag
& NISDOTZFSCHILD
) { /* may be able to create/delete snapshot dirs */
153 access_result
|= (NFS_ACCESS_MODIFY
| NFS_ACCESS_EXTEND
| NFS_ACCESS_DELETE
);
154 } else if (((dvp
= vnode_getparent(NFSTOV(np
))) != NULLVP
) && (VTONFS(dvp
)->n_flag
& NISDOTZFSCHILD
)) {
155 access_result
|= NFS_ACCESS_DELETE
; /* may be able to delete snapshot dirs */
161 /* Some servers report DELETE support but erroneously give a denied answer. */
162 if (nfs_access_delete
&& (*access
& NFS_ACCESS_DELETE
) && !(access_result
& NFS_ACCESS_DELETE
)) {
163 access_result
|= NFS_ACCESS_DELETE
;
165 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
166 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
169 if (nfs_mount_gone(nmp
)) {
174 if (auth_is_kerberized(np
->n_auth
) || auth_is_kerberized(nmp
->nm_auth
)) {
175 uid
= nfs_cred_getasid2uid(vfs_context_ucred(ctx
));
177 uid
= kauth_cred_getuid(vfs_context_ucred(ctx
));
179 slot
= nfs_node_access_slot(np
, uid
, 1);
180 np
->n_accessuid
[slot
] = uid
;
182 np
->n_accessstamp
[slot
] = now
.tv_sec
;
183 np
->n_access
[slot
] = access_result
;
185 /* pass back the access returned with this request */
186 *access
= np
->n_access
[slot
];
191 nfsm_chain_cleanup(&nmreq
);
192 nfsm_chain_cleanup(&nmrep
);
204 struct nfs_vattr
*nvap
,
207 struct nfsmount
*nmp
= mp
? VFSTONFS(mp
) : NFSTONMP(np
);
208 int error
= 0, status
, nfsvers
, numops
, rpcflags
= 0, acls
;
209 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
210 struct nfsm_chain nmreq
, nmrep
;
211 struct nfsreq_secinfo_args si
;
213 if (nfs_mount_gone(nmp
)) {
216 nfsvers
= nmp
->nm_vers
;
217 acls
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_ACL
);
219 if (np
&& (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)) {
220 nfs4_default_attrs_for_referral_trigger(VTONFS(np
->n_parent
), NULL
, 0, nvap
, NULL
);
224 if (flags
& NGA_MONITOR
) { /* vnode monitor requests should be soft */
225 rpcflags
= R_RECOVER
;
228 if (flags
& NGA_SOFT
) { /* Return ETIMEDOUT if server not responding */
232 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
233 nfsm_chain_null(&nmreq
);
234 nfsm_chain_null(&nmrep
);
238 nfsm_chain_build_alloc_init(error
, &nmreq
, 15 * NFSX_UNSIGNED
);
239 nfsm_chain_add_compound_header(error
, &nmreq
, "getattr", nmp
->nm_minor_vers
, numops
);
241 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
242 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, fhp
, fhsize
);
244 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
245 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
246 if ((flags
& NGA_ACL
) && acls
) {
247 NFS_BITMAP_SET(bitmap
, NFS_FATTR_ACL
);
249 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
250 nfsm_chain_build_done(error
, &nmreq
);
251 nfsm_assert(error
, (numops
== 0), EPROTO
);
253 error
= nfs_request2(np
, mp
, &nmreq
, NFSPROC4_COMPOUND
,
254 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
255 NULL
, rpcflags
, &nmrep
, xidp
, &status
);
257 nfsm_chain_skip_tag(error
, &nmrep
);
258 nfsm_chain_get_32(error
, &nmrep
, numops
);
259 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
260 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
262 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
, NULL
);
264 if ((flags
& NGA_ACL
) && acls
&& !NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_ACL
)) {
265 /* we asked for the ACL but didn't get one... assume there isn't one */
266 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_ACL
);
267 nvap
->nva_acl
= NULL
;
270 nfsm_chain_cleanup(&nmreq
);
271 nfsm_chain_cleanup(&nmrep
);
276 nfs4_readlink_rpc(nfsnode_t np
, char *buf
, size_t *buflenp
, vfs_context_t ctx
)
278 struct nfsmount
*nmp
;
279 int error
= 0, lockerror
= ENOENT
, status
, numops
;
282 struct nfsm_chain nmreq
, nmrep
;
283 struct nfsreq_secinfo_args si
;
286 if (nfs_mount_gone(nmp
)) {
289 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
292 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
293 nfsm_chain_null(&nmreq
);
294 nfsm_chain_null(&nmrep
);
296 // PUTFH, GETATTR, READLINK
298 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
299 nfsm_chain_add_compound_header(error
, &nmreq
, "readlink", nmp
->nm_minor_vers
, numops
);
301 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
302 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
304 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
305 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
307 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READLINK
);
308 nfsm_chain_build_done(error
, &nmreq
);
309 nfsm_assert(error
, (numops
== 0), EPROTO
);
311 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
313 if ((lockerror
= nfs_node_lock(np
))) {
316 nfsm_chain_skip_tag(error
, &nmrep
);
317 nfsm_chain_get_32(error
, &nmrep
, numops
);
318 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
319 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
320 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
321 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READLINK
);
322 nfsm_chain_get_32(error
, &nmrep
, len
);
324 if (len
>= *buflenp
) {
325 if (np
->n_size
&& (np
->n_size
< *buflenp
)) {
331 nfsm_chain_get_opaque(error
, &nmrep
, len
, buf
);
339 nfsm_chain_cleanup(&nmreq
);
340 nfsm_chain_cleanup(&nmrep
);
351 struct nfsreq_cbinfo
*cb
,
352 struct nfsreq
**reqp
)
354 struct nfsmount
*nmp
;
355 int error
= 0, nfsvers
, numops
;
357 struct nfsm_chain nmreq
;
358 struct nfsreq_secinfo_args si
;
361 if (nfs_mount_gone(nmp
)) {
364 nfsvers
= nmp
->nm_vers
;
365 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
369 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
370 nfsm_chain_null(&nmreq
);
374 nfsm_chain_build_alloc_init(error
, &nmreq
, 22 * NFSX_UNSIGNED
);
375 nfsm_chain_add_compound_header(error
, &nmreq
, "read", nmp
->nm_minor_vers
, numops
);
377 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
378 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
380 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READ
);
381 nfs_get_stateid(np
, thd
, cred
, &stateid
);
382 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
383 nfsm_chain_add_64(error
, &nmreq
, offset
);
384 nfsm_chain_add_32(error
, &nmreq
, len
);
385 nfsm_chain_build_done(error
, &nmreq
);
386 nfsm_assert(error
, (numops
== 0), EPROTO
);
388 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, 0, cb
, reqp
);
390 nfsm_chain_cleanup(&nmreq
);
395 nfs4_read_rpc_async_finish(
402 struct nfsmount
*nmp
;
403 int error
= 0, lockerror
, nfsvers
, numops
, status
, eof
= 0;
406 struct nfsm_chain nmrep
;
409 if (nfs_mount_gone(nmp
)) {
410 nfs_request_async_cancel(req
);
413 nfsvers
= nmp
->nm_vers
;
415 nfsm_chain_null(&nmrep
);
417 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
418 if (error
== EINPROGRESS
) { /* async request restarted */
422 if ((lockerror
= nfs_node_lock(np
))) {
425 nfsm_chain_skip_tag(error
, &nmrep
);
426 nfsm_chain_get_32(error
, &nmrep
, numops
);
427 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
428 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READ
);
429 nfsm_chain_get_32(error
, &nmrep
, eof
);
430 nfsm_chain_get_32(error
, &nmrep
, retlen
);
432 *lenp
= MIN(retlen
, *lenp
);
433 error
= nfsm_chain_get_uio(&nmrep
, *lenp
, uio
);
439 if (!eof
&& !retlen
) {
444 nfsm_chain_cleanup(&nmrep
);
445 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) {
446 microuptime(&np
->n_lastio
);
452 nfs4_write_rpc_async(
459 struct nfsreq_cbinfo
*cb
,
460 struct nfsreq
**reqp
)
462 struct nfsmount
*nmp
;
464 int error
= 0, nfsvers
, numops
;
466 struct nfsm_chain nmreq
;
467 struct nfsreq_secinfo_args si
;
470 if (nfs_mount_gone(nmp
)) {
473 nfsvers
= nmp
->nm_vers
;
474 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
478 /* for async mounts, don't bother sending sync write requests */
479 if ((iomode
!= NFS_WRITE_UNSTABLE
) && nfs_allow_async
&&
480 ((mp
= NFSTOMP(np
))) && (vfs_flags(mp
) & MNT_ASYNC
)) {
481 iomode
= NFS_WRITE_UNSTABLE
;
484 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
485 nfsm_chain_null(&nmreq
);
487 // PUTFH, WRITE, GETATTR
489 nfsm_chain_build_alloc_init(error
, &nmreq
, 25 * NFSX_UNSIGNED
+ len
);
490 nfsm_chain_add_compound_header(error
, &nmreq
, "write", nmp
->nm_minor_vers
, numops
);
492 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
493 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
495 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_WRITE
);
496 nfs_get_stateid(np
, thd
, cred
, &stateid
);
497 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
498 nfsm_chain_add_64(error
, &nmreq
, uio_offset(uio
));
499 nfsm_chain_add_32(error
, &nmreq
, iomode
);
500 nfsm_chain_add_32(error
, &nmreq
, len
);
502 error
= nfsm_chain_add_uio(&nmreq
, uio
, len
);
505 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
506 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs4_getattr_write_bitmap
, nmp
, np
);
507 nfsm_chain_build_done(error
, &nmreq
);
508 nfsm_assert(error
, (numops
== 0), EPROTO
);
511 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, 0, cb
, reqp
);
513 nfsm_chain_cleanup(&nmreq
);
518 nfs4_write_rpc_async_finish(
525 struct nfsmount
*nmp
;
526 int error
= 0, lockerror
= ENOENT
, nfsvers
, numops
, status
;
527 int committed
= NFS_WRITE_FILESYNC
;
529 u_int64_t xid
, wverf
;
531 struct nfsm_chain nmrep
;
534 if (nfs_mount_gone(nmp
)) {
535 nfs_request_async_cancel(req
);
538 nfsvers
= nmp
->nm_vers
;
540 nfsm_chain_null(&nmrep
);
542 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
543 if (error
== EINPROGRESS
) { /* async request restarted */
547 if (nfs_mount_gone(nmp
)) {
550 if (!error
&& (lockerror
= nfs_node_lock(np
))) {
553 nfsm_chain_skip_tag(error
, &nmrep
);
554 nfsm_chain_get_32(error
, &nmrep
, numops
);
555 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
556 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_WRITE
);
557 nfsm_chain_get_32(error
, &nmrep
, rlen
);
563 nfsm_chain_get_32(error
, &nmrep
, committed
);
564 nfsm_chain_get_64(error
, &nmrep
, wverf
);
569 lck_mtx_lock(&nmp
->nm_lock
);
570 if (!(nmp
->nm_state
& NFSSTA_HASWRITEVERF
)) {
571 nmp
->nm_verf
= wverf
;
572 nmp
->nm_state
|= NFSSTA_HASWRITEVERF
;
573 } else if (nmp
->nm_verf
!= wverf
) {
574 nmp
->nm_verf
= wverf
;
576 lck_mtx_unlock(&nmp
->nm_lock
);
577 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
580 * NFSv4 WRITE RPCs contain partial GETATTR requests - only type, change, size, metadatatime and modifytime are requested.
581 * In such cases, we do not update the time stamp - but the requested attributes.
583 np
->n_vattr
.nva_flags
|= NFS_FFLAG_PARTIAL_WRITE
;
584 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
585 np
->n_vattr
.nva_flags
&= ~NFS_FFLAG_PARTIAL_WRITE
;
591 nfsm_chain_cleanup(&nmrep
);
592 if ((committed
!= NFS_WRITE_FILESYNC
) && nfs_allow_async
&&
593 ((mp
= NFSTOMP(np
))) && (vfs_flags(mp
) & MNT_ASYNC
)) {
594 committed
= NFS_WRITE_FILESYNC
;
596 *iomodep
= committed
;
597 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) {
598 microuptime(&np
->n_lastio
);
611 int error
= 0, lockerror
= ENOENT
, remove_error
= 0, status
;
612 struct nfsmount
*nmp
;
615 struct nfsm_chain nmreq
, nmrep
;
616 struct nfsreq_secinfo_args si
;
619 if (nfs_mount_gone(nmp
)) {
622 nfsvers
= nmp
->nm_vers
;
623 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
626 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
628 nfsm_chain_null(&nmreq
);
629 nfsm_chain_null(&nmrep
);
631 // PUTFH, REMOVE, GETATTR
633 nfsm_chain_build_alloc_init(error
, &nmreq
, 17 * NFSX_UNSIGNED
+ namelen
);
634 nfsm_chain_add_compound_header(error
, &nmreq
, "remove", nmp
->nm_minor_vers
, numops
);
636 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
637 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
639 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_REMOVE
);
640 nfsm_chain_add_name(error
, &nmreq
, name
, namelen
, nmp
);
642 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
643 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
644 nfsm_chain_build_done(error
, &nmreq
);
645 nfsm_assert(error
, (numops
== 0), EPROTO
);
648 error
= nfs_request2(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, 0, &nmrep
, &xid
, &status
);
650 if ((lockerror
= nfs_node_lock(dnp
))) {
653 nfsm_chain_skip_tag(error
, &nmrep
);
654 nfsm_chain_get_32(error
, &nmrep
, numops
);
655 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
656 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_REMOVE
);
657 remove_error
= error
;
658 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
659 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
660 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
661 if (error
&& !lockerror
) {
662 NATTRINVALIDATE(dnp
);
665 nfsm_chain_cleanup(&nmreq
);
666 nfsm_chain_cleanup(&nmrep
);
669 dnp
->n_flag
|= NMODIFIED
;
670 nfs_node_unlock(dnp
);
672 if (error
== NFSERR_GRACE
) {
673 tsleep(&nmp
->nm_state
, (PZERO
- 1), "nfsgrace", 2 * hz
);
690 int error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
;
691 struct nfsmount
*nmp
;
692 u_int64_t xid
, savedxid
;
693 struct nfsm_chain nmreq
, nmrep
;
694 struct nfsreq_secinfo_args si
;
696 nmp
= NFSTONMP(fdnp
);
697 if (nfs_mount_gone(nmp
)) {
700 nfsvers
= nmp
->nm_vers
;
701 if (fdnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
704 if (tdnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
708 NFSREQ_SECINFO_SET(&si
, fdnp
, NULL
, 0, NULL
, 0);
709 nfsm_chain_null(&nmreq
);
710 nfsm_chain_null(&nmrep
);
712 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
714 nfsm_chain_build_alloc_init(error
, &nmreq
, 30 * NFSX_UNSIGNED
+ fnamelen
+ tnamelen
);
715 nfsm_chain_add_compound_header(error
, &nmreq
, "rename", nmp
->nm_minor_vers
, numops
);
717 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
718 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, fdnp
->n_fhp
, fdnp
->n_fhsize
);
720 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
722 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
723 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, tdnp
->n_fhp
, tdnp
->n_fhsize
);
725 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RENAME
);
726 nfsm_chain_add_name(error
, &nmreq
, fnameptr
, fnamelen
, nmp
);
727 nfsm_chain_add_name(error
, &nmreq
, tnameptr
, tnamelen
, nmp
);
729 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
730 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, tdnp
);
732 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
734 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
735 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, fdnp
);
736 nfsm_chain_build_done(error
, &nmreq
);
737 nfsm_assert(error
, (numops
== 0), EPROTO
);
740 error
= nfs_request(fdnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
742 if ((lockerror
= nfs_node_lock2(fdnp
, tdnp
))) {
745 nfsm_chain_skip_tag(error
, &nmrep
);
746 nfsm_chain_get_32(error
, &nmrep
, numops
);
747 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
748 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
749 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
750 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RENAME
);
751 nfsm_chain_check_change_info(error
, &nmrep
, fdnp
);
752 nfsm_chain_check_change_info(error
, &nmrep
, tdnp
);
753 /* directory attributes: if we don't get them, make sure to invalidate */
754 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
756 nfsm_chain_loadattr(error
, &nmrep
, tdnp
, nfsvers
, &xid
);
757 if (error
&& !lockerror
) {
758 NATTRINVALIDATE(tdnp
);
760 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
761 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
763 nfsm_chain_loadattr(error
, &nmrep
, fdnp
, nfsvers
, &xid
);
764 if (error
&& !lockerror
) {
765 NATTRINVALIDATE(fdnp
);
768 nfsm_chain_cleanup(&nmreq
);
769 nfsm_chain_cleanup(&nmrep
);
771 fdnp
->n_flag
|= NMODIFIED
;
772 tdnp
->n_flag
|= NMODIFIED
;
773 nfs_node_unlock2(fdnp
, tdnp
);
779 * NFS V4 readdir RPC.
782 nfs4_readdir_rpc(nfsnode_t dnp
, struct nfsbuf
*bp
, vfs_context_t ctx
)
784 struct nfsmount
*nmp
;
785 int error
= 0, lockerror
, nfsvers
, namedattr
, rdirplus
, bigcookies
, numops
;
786 int i
, status
, more_entries
= 1, eof
, bp_dropped
= 0;
787 uint16_t namlen
, reclen
;
788 uint32_t nmreaddirsize
, nmrsize
;
789 uint32_t namlen32
, skiplen
, fhlen
, xlen
, attrlen
;
790 uint64_t padlen
, cookie
, lastcookie
, xid
, savedxid
, space_free
, space_needed
;
791 struct nfsm_chain nmreq
, nmrep
, nmrepsave
;
793 struct nfs_vattr
*nvattr
, *nvattrp
;
794 struct nfs_dir_buf_header
*ndbhp
;
798 uint32_t entry_attrs
[NFS_ATTR_BITMAP_LEN
];
800 struct nfsreq_secinfo_args si
;
803 if (nfs_mount_gone(nmp
)) {
806 nfsvers
= nmp
->nm_vers
;
807 nmreaddirsize
= nmp
->nm_readdirsize
;
808 nmrsize
= nmp
->nm_rsize
;
809 bigcookies
= nmp
->nm_state
& NFSSTA_BIGCOOKIES
;
810 namedattr
= (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) ? 1 : 0;
811 rdirplus
= (NMFLAG(nmp
, RDIRPLUS
) || namedattr
) ? 1 : 0;
812 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
815 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
818 * Set up attribute request for entries.
819 * For READDIRPLUS functionality, get everything.
820 * Otherwise, just get what we need for struct direntry.
824 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, entry_attrs
);
825 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_FILEHANDLE
);
828 NFS_CLEAR_ATTRIBUTES(entry_attrs
);
829 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_TYPE
);
830 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_FILEID
);
831 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_MOUNTED_ON_FILEID
);
833 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_RDATTR_ERROR
);
835 /* lock to protect access to cookie verifier */
836 if ((lockerror
= nfs_node_lock(dnp
))) {
840 fh
= zalloc(nfs_fhandle_zone
);
841 MALLOC(nvattr
, struct nfs_vattr
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
);
843 /* determine cookie to use, and move dp to the right offset */
844 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
845 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
846 if (ndbhp
->ndbh_count
) {
847 for (i
= 0; i
< ndbhp
->ndbh_count
- 1; i
++) {
848 dp
= NFS_DIRENTRY_NEXT(dp
);
850 cookie
= dp
->d_seekoff
;
851 dp
= NFS_DIRENTRY_NEXT(dp
);
853 cookie
= bp
->nb_lblkno
;
854 /* increment with every buffer read */
855 OSAddAtomic64(1, &nfsstats
.readdir_bios
);
860 * The NFS client is responsible for the "." and ".." entries in the
861 * directory. So, we put them at the start of the first buffer.
862 * Don't bother for attribute directories.
864 if (((bp
->nb_lblkno
== 0) && (ndbhp
->ndbh_count
== 0)) &&
865 !(dnp
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
867 fhlen
= rdirplus
? fh
->fh_len
+ 1 : 0;
868 xlen
= rdirplus
? (fhlen
+ sizeof(time_t)) : 0;
871 reclen
= NFS_DIRENTRY_LEN_16(namlen
+ xlen
);
873 bzero(&dp
->d_name
[namlen
+ 1], xlen
);
875 dp
->d_namlen
= namlen
;
876 strlcpy(dp
->d_name
, ".", namlen
+ 1);
877 dp
->d_fileno
= dnp
->n_vattr
.nva_fileid
;
879 dp
->d_reclen
= reclen
;
881 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
882 dp
= NFS_DIRENTRY_NEXT(dp
);
883 padlen
= (char*)dp
- padstart
;
885 bzero(padstart
, padlen
);
887 if (rdirplus
) { /* zero out attributes */
888 bzero(NFS_DIR_BUF_NVATTR(bp
, 0), sizeof(struct nfs_vattr
));
893 reclen
= NFS_DIRENTRY_LEN_16(namlen
+ xlen
);
895 bzero(&dp
->d_name
[namlen
+ 1], xlen
);
897 dp
->d_namlen
= namlen
;
898 strlcpy(dp
->d_name
, "..", namlen
+ 1);
900 dp
->d_fileno
= VTONFS(dnp
->n_parent
)->n_vattr
.nva_fileid
;
902 dp
->d_fileno
= dnp
->n_vattr
.nva_fileid
;
905 dp
->d_reclen
= reclen
;
907 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
908 dp
= NFS_DIRENTRY_NEXT(dp
);
909 padlen
= (char*)dp
- padstart
;
911 bzero(padstart
, padlen
);
913 if (rdirplus
) { /* zero out attributes */
914 bzero(NFS_DIR_BUF_NVATTR(bp
, 1), sizeof(struct nfs_vattr
));
917 ndbhp
->ndbh_entry_end
= (char*)dp
- bp
->nb_data
;
918 ndbhp
->ndbh_count
= 2;
922 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
923 * the buffer is full (or we hit EOF). Then put the remainder of the
924 * results in the next buffer(s).
926 nfsm_chain_null(&nmreq
);
927 nfsm_chain_null(&nmrep
);
928 while (nfs_dir_buf_freespace(bp
, rdirplus
) && !(ndbhp
->ndbh_flags
& NDB_FULL
)) {
929 // PUTFH, GETATTR, READDIR
931 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
932 nfsm_chain_add_compound_header(error
, &nmreq
, tag
, nmp
->nm_minor_vers
, numops
);
934 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
935 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
937 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
938 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
940 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READDIR
);
941 nfsm_chain_add_64(error
, &nmreq
, (cookie
<= 2) ? 0 : cookie
);
942 nfsm_chain_add_64(error
, &nmreq
, dnp
->n_cookieverf
);
943 nfsm_chain_add_32(error
, &nmreq
, nmreaddirsize
);
944 nfsm_chain_add_32(error
, &nmreq
, nmrsize
);
945 nfsm_chain_add_bitmap_supported(error
, &nmreq
, entry_attrs
, nmp
, dnp
);
946 nfsm_chain_build_done(error
, &nmreq
);
947 nfsm_assert(error
, (numops
== 0), EPROTO
);
948 nfs_node_unlock(dnp
);
950 error
= nfs_request(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
952 if ((lockerror
= nfs_node_lock(dnp
))) {
957 nfsm_chain_skip_tag(error
, &nmrep
);
958 nfsm_chain_get_32(error
, &nmrep
, numops
);
959 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
960 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
961 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
962 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READDIR
);
963 nfsm_chain_get_64(error
, &nmrep
, dnp
->n_cookieverf
);
964 nfsm_chain_get_32(error
, &nmrep
, more_entries
);
967 nfs_node_unlock(dnp
);
974 if (lastcookie
== 0) {
975 dnp
->n_rdirplusstamp_sof
= now
.tv_sec
;
976 dnp
->n_rdirplusstamp_eof
= 0;
980 /* loop through the entries packing them into the buffer */
981 while (more_entries
) {
982 /* Entry: COOKIE, NAME, FATTR */
983 nfsm_chain_get_64(error
, &nmrep
, cookie
);
984 nfsm_chain_get_32(error
, &nmrep
, namlen32
);
985 if (namlen32
> UINT16_MAX
) {
989 namlen
= (uint16_t)namlen32
;
991 if (!bigcookies
&& (cookie
>> 32) && (nmp
== NFSTONMP(dnp
))) {
992 /* we've got a big cookie, make sure flag is set */
993 lck_mtx_lock(&nmp
->nm_lock
);
994 nmp
->nm_state
|= NFSSTA_BIGCOOKIES
;
995 lck_mtx_unlock(&nmp
->nm_lock
);
998 /* just truncate names that don't fit in direntry.d_name */
1003 if (namlen
> (sizeof(dp
->d_name
) - 1)) {
1004 skiplen
= namlen
- sizeof(dp
->d_name
) + 1;
1005 namlen
= sizeof(dp
->d_name
) - 1;
1009 /* guess that fh size will be same as parent */
1010 fhlen
= rdirplus
? (1 + dnp
->n_fhsize
) : 0;
1011 xlen
= rdirplus
? (fhlen
+ sizeof(time_t)) : 0;
1012 attrlen
= rdirplus
? sizeof(struct nfs_vattr
) : 0;
1013 reclen
= NFS_DIRENTRY_LEN_16(namlen
+ xlen
);
1014 space_needed
= reclen
+ attrlen
;
1015 space_free
= nfs_dir_buf_freespace(bp
, rdirplus
);
1016 if (space_needed
> space_free
) {
1018 * We still have entries to pack, but we've
1019 * run out of room in the current buffer.
1020 * So we need to move to the next buffer.
1021 * The block# for the next buffer is the
1022 * last cookie in the current buffer.
1025 ndbhp
->ndbh_flags
|= NDB_FULL
;
1026 nfs_buf_release(bp
, 0);
1029 error
= nfs_buf_get(dnp
, lastcookie
, NFS_DIRBLKSIZ
, vfs_context_thread(ctx
), NBLK_READ
, &bp
);
1031 /* initialize buffer */
1032 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
1033 ndbhp
->ndbh_flags
= 0;
1034 ndbhp
->ndbh_count
= 0;
1035 ndbhp
->ndbh_entry_end
= sizeof(*ndbhp
);
1036 ndbhp
->ndbh_ncgen
= dnp
->n_ncgen
;
1037 space_free
= nfs_dir_buf_freespace(bp
, rdirplus
);
1038 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
1039 /* increment with every buffer read */
1040 OSAddAtomic64(1, &nfsstats
.readdir_bios
);
1043 dp
->d_fileno
= cookie
; /* placeholder */
1044 dp
->d_seekoff
= cookie
;
1045 dp
->d_namlen
= namlen
;
1046 dp
->d_reclen
= reclen
;
1047 dp
->d_type
= DT_UNKNOWN
;
1048 nfsm_chain_get_opaque(error
, &nmrep
, namlen
, dp
->d_name
);
1050 dp
->d_name
[namlen
] = '\0';
1052 nfsm_chain_adv(error
, &nmrep
,
1053 nfsm_rndup(namlen
+ skiplen
) - nfsm_rndup(namlen
));
1056 nvattrp
= rdirplus
? NFS_DIR_BUF_NVATTR(bp
, ndbhp
->ndbh_count
) : nvattr
;
1057 error
= nfs4_parsefattr(&nmrep
, NULL
, nvattrp
, fh
, NULL
, NULL
);
1058 if (!error
&& NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_ACL
)) {
1059 /* we do NOT want ACLs returned to us here */
1060 NFS_BITMAP_CLR(nvattrp
->nva_bitmap
, NFS_FATTR_ACL
);
1061 if (nvattrp
->nva_acl
) {
1062 kauth_acl_free(nvattrp
->nva_acl
);
1063 nvattrp
->nva_acl
= NULL
;
1066 if (error
&& NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_RDATTR_ERROR
)) {
1067 /* OK, we may not have gotten all of the attributes but we will use what we can. */
1068 if ((error
== NFSERR_MOVED
) || (error
== NFSERR_INVAL
)) {
1069 /* set this up to look like a referral trigger */
1070 nfs4_default_attrs_for_referral_trigger(dnp
, dp
->d_name
, namlen
, nvattrp
, fh
);
1074 /* check for more entries after this one */
1075 nfsm_chain_get_32(error
, &nmrep
, more_entries
);
1078 /* Skip any "." and ".." entries returned from server. */
1079 /* Also skip any bothersome named attribute entries. */
1080 if (((dp
->d_name
[0] == '.') && ((namlen
== 1) || ((namlen
== 2) && (dp
->d_name
[1] == '.')))) ||
1081 (namedattr
&& (namlen
== 11) && (!strcmp(dp
->d_name
, "SUNWattr_ro") || !strcmp(dp
->d_name
, "SUNWattr_rw")))) {
1082 lastcookie
= cookie
;
1086 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_TYPE
)) {
1087 dp
->d_type
= IFTODT(VTTOIF(nvattrp
->nva_type
));
1089 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_FILEID
)) {
1090 dp
->d_fileno
= nvattrp
->nva_fileid
;
1093 /* fileid is already in d_fileno, so stash xid in attrs */
1094 nvattrp
->nva_fileid
= savedxid
;
1095 nvattrp
->nva_flags
|= NFS_FFLAG_FILEID_CONTAINS_XID
;
1096 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
1097 fhlen
= fh
->fh_len
+ 1;
1098 xlen
= fhlen
+ sizeof(time_t);
1099 reclen
= NFS_DIRENTRY_LEN_16(namlen
+ xlen
);
1100 space_needed
= reclen
+ attrlen
;
1101 if (space_needed
> space_free
) {
1102 /* didn't actually have the room... move on to next buffer */
1106 /* pack the file handle into the record */
1107 dp
->d_name
[dp
->d_namlen
+ 1] = (unsigned char)fh
->fh_len
; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */
1108 bcopy(fh
->fh_data
, &dp
->d_name
[dp
->d_namlen
+ 2], fh
->fh_len
);
1110 /* mark the file handle invalid */
1112 fhlen
= fh
->fh_len
+ 1;
1113 xlen
= fhlen
+ sizeof(time_t);
1114 reclen
= NFS_DIRENTRY_LEN_16(namlen
+ xlen
);
1115 bzero(&dp
->d_name
[dp
->d_namlen
+ 1], fhlen
);
1117 *(time_t*)(&dp
->d_name
[dp
->d_namlen
+ 1 + fhlen
]) = now
.tv_sec
;
1118 dp
->d_reclen
= reclen
;
1119 nfs_rdirplus_update_node_attrs(dnp
, dp
, fh
, nvattrp
, &savedxid
);
1121 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
1122 ndbhp
->ndbh_count
++;
1123 lastcookie
= cookie
;
1125 /* advance to next direntry in buffer */
1126 dp
= NFS_DIRENTRY_NEXT(dp
);
1127 ndbhp
->ndbh_entry_end
= (char*)dp
- bp
->nb_data
;
1128 /* zero out the pad bytes */
1129 padlen
= (char*)dp
- padstart
;
1131 bzero(padstart
, padlen
);
1134 /* Finally, get the eof boolean */
1135 nfsm_chain_get_32(error
, &nmrep
, eof
);
1138 ndbhp
->ndbh_flags
|= (NDB_FULL
| NDB_EOF
);
1139 nfs_node_lock_force(dnp
);
1140 dnp
->n_eofcookie
= lastcookie
;
1142 dnp
->n_rdirplusstamp_eof
= now
.tv_sec
;
1144 nfs_node_unlock(dnp
);
1149 nfs_buf_release(bp
, 0);
1153 if ((lockerror
= nfs_node_lock(dnp
))) {
1157 nfsm_chain_cleanup(&nmrep
);
1158 nfsm_chain_null(&nmreq
);
1161 if (bp_dropped
&& bp
) {
1162 nfs_buf_release(bp
, 0);
1165 nfs_node_unlock(dnp
);
1167 nfsm_chain_cleanup(&nmreq
);
1168 nfsm_chain_cleanup(&nmrep
);
1169 NFS_ZFREE(nfs_fhandle_zone
, fh
);
1170 FREE(nvattr
, M_TEMP
);
1171 return bp_dropped
? NFSERR_DIRBUFDROPPED
: error
;
1175 nfs4_lookup_rpc_async(
1180 struct nfsreq
**reqp
)
1182 int error
= 0, isdotdot
= 0, nfsvers
, numops
;
1183 struct nfsm_chain nmreq
;
1184 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
1185 struct nfsmount
*nmp
;
1186 struct nfsreq_secinfo_args si
;
1188 nmp
= NFSTONMP(dnp
);
1189 if (nfs_mount_gone(nmp
)) {
1192 nfsvers
= nmp
->nm_vers
;
1193 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
1197 if ((name
[0] == '.') && (name
[1] == '.') && (namelen
== 2)) {
1199 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
1201 NFSREQ_SECINFO_SET(&si
, dnp
, dnp
->n_fhp
, dnp
->n_fhsize
, name
, namelen
);
1204 nfsm_chain_null(&nmreq
);
1206 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1208 nfsm_chain_build_alloc_init(error
, &nmreq
, 20 * NFSX_UNSIGNED
+ namelen
);
1209 nfsm_chain_add_compound_header(error
, &nmreq
, "lookup", nmp
->nm_minor_vers
, numops
);
1211 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1212 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
1214 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1215 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
1218 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUPP
);
1220 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUP
);
1221 nfsm_chain_add_name(error
, &nmreq
, name
, namelen
, nmp
);
1224 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETFH
);
1226 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1227 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
1228 /* some ".zfs" directories can't handle being asked for some attributes */
1229 if ((dnp
->n_flag
& NISDOTZFS
) && !isdotdot
) {
1230 NFS_BITMAP_CLR(bitmap
, NFS_FATTR_NAMED_ATTR
);
1232 if ((dnp
->n_flag
& NISDOTZFSCHILD
) && isdotdot
) {
1233 NFS_BITMAP_CLR(bitmap
, NFS_FATTR_NAMED_ATTR
);
1235 if (((namelen
== 4) && (name
[0] == '.') && (name
[1] == 'z') && (name
[2] == 'f') && (name
[3] == 's'))) {
1236 NFS_BITMAP_CLR(bitmap
, NFS_FATTR_NAMED_ATTR
);
1238 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, NULL
);
1239 nfsm_chain_build_done(error
, &nmreq
);
1240 nfsm_assert(error
, (numops
== 0), EPROTO
);
1242 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
1243 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, 0, NULL
, reqp
);
1245 nfsm_chain_cleanup(&nmreq
);
1251 nfs4_lookup_rpc_async_finish(
1259 struct nfs_vattr
*nvap
)
1261 int error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
, isdotdot
= 0;
1262 uint32_t op
= NFS_OP_LOOKUP
;
1264 struct nfsmount
*nmp
;
1265 struct nfsm_chain nmrep
;
1267 nmp
= NFSTONMP(dnp
);
1271 nfsvers
= nmp
->nm_vers
;
1272 if ((name
[0] == '.') && (name
[1] == '.') && (namelen
== 2)) {
1276 nfsm_chain_null(&nmrep
);
1278 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
1280 if ((lockerror
= nfs_node_lock(dnp
))) {
1283 nfsm_chain_skip_tag(error
, &nmrep
);
1284 nfsm_chain_get_32(error
, &nmrep
, numops
);
1285 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1286 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1290 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
1292 nfsm_chain_op_check(error
, &nmrep
, (isdotdot
? NFS_OP_LOOKUPP
: NFS_OP_LOOKUP
));
1293 nfsmout_if(error
|| !fhp
|| !nvap
);
1294 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETFH
);
1295 nfsm_chain_get_32(error
, &nmrep
, fhp
->fh_len
);
1296 if (error
== 0 && fhp
->fh_len
> sizeof(fhp
->fh_data
)) {
1300 nfsm_chain_get_opaque(error
, &nmrep
, fhp
->fh_len
, fhp
->fh_data
);
1301 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1302 if ((error
== NFSERR_MOVED
) || (error
== NFSERR_INVAL
)) {
1303 /* set this up to look like a referral trigger */
1304 nfs4_default_attrs_for_referral_trigger(dnp
, name
, namelen
, nvap
, fhp
);
1308 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
, NULL
);
1312 nfs_node_unlock(dnp
);
1314 nfsm_chain_cleanup(&nmrep
);
1315 if (!error
&& (op
== NFS_OP_LOOKUP
) && (nmp
->nm_state
& NFSSTA_NEEDSECINFO
)) {
1316 /* We still need to get SECINFO to set default for mount. */
1317 /* Do so for the first LOOKUP that returns successfully. */
1320 sec
.count
= NX_MAX_SEC_FLAVORS
;
1321 error
= nfs4_secinfo_rpc(nmp
, &req
->r_secinfo
, vfs_context_ucred(ctx
), sec
.flavors
, &sec
.count
);
1322 /* [sigh] some implementations return "illegal" error for unsupported ops */
1323 if (error
== NFSERR_OP_ILLEGAL
) {
1327 /* set our default security flavor to the first in the list */
1328 lck_mtx_lock(&nmp
->nm_lock
);
1330 nmp
->nm_auth
= sec
.flavors
[0];
1332 nmp
->nm_state
&= ~NFSSTA_NEEDSECINFO
;
1333 lck_mtx_unlock(&nmp
->nm_lock
);
1347 struct nfsmount
*nmp
;
1348 int error
= 0, lockerror
, status
, nfsvers
, numops
;
1349 u_int64_t xid
, newwverf
;
1351 struct nfsm_chain nmreq
, nmrep
;
1352 struct nfsreq_secinfo_args si
;
1355 FSDBG(521, np
, offset
, count
, nmp
? nmp
->nm_state
: 0);
1356 if (nfs_mount_gone(nmp
)) {
1359 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
1362 if (!(nmp
->nm_state
& NFSSTA_HASWRITEVERF
)) {
1365 nfsvers
= nmp
->nm_vers
;
1366 count32
= count
> UINT32_MAX
? 0 : (uint32_t)count
;
1368 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
1369 nfsm_chain_null(&nmreq
);
1370 nfsm_chain_null(&nmrep
);
1372 // PUTFH, COMMIT, GETATTR
1374 nfsm_chain_build_alloc_init(error
, &nmreq
, 19 * NFSX_UNSIGNED
);
1375 nfsm_chain_add_compound_header(error
, &nmreq
, "commit", nmp
->nm_minor_vers
, numops
);
1377 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1378 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1380 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_COMMIT
);
1381 nfsm_chain_add_64(error
, &nmreq
, offset
);
1382 nfsm_chain_add_32(error
, &nmreq
, count32
);
1384 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1385 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
1386 nfsm_chain_build_done(error
, &nmreq
);
1387 nfsm_assert(error
, (numops
== 0), EPROTO
);
1389 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
1390 current_thread(), cred
, &si
, 0, &nmrep
, &xid
, &status
);
1392 if ((lockerror
= nfs_node_lock(np
))) {
1395 nfsm_chain_skip_tag(error
, &nmrep
);
1396 nfsm_chain_get_32(error
, &nmrep
, numops
);
1397 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1398 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_COMMIT
);
1399 nfsm_chain_get_64(error
, &nmrep
, newwverf
);
1400 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1401 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
1403 nfs_node_unlock(np
);
1406 lck_mtx_lock(&nmp
->nm_lock
);
1407 if (nmp
->nm_verf
!= newwverf
) {
1408 nmp
->nm_verf
= newwverf
;
1410 if (wverf
!= newwverf
) {
1411 error
= NFSERR_STALEWRITEVERF
;
1413 lck_mtx_unlock(&nmp
->nm_lock
);
1415 nfsm_chain_cleanup(&nmreq
);
1416 nfsm_chain_cleanup(&nmrep
);
1423 struct nfs_fsattr
*nfsap
,
1427 int error
= 0, lockerror
, status
, nfsvers
, numops
;
1428 struct nfsm_chain nmreq
, nmrep
;
1429 struct nfsmount
*nmp
= NFSTONMP(np
);
1430 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
1431 struct nfs_vattr
*nvattr
;
1432 struct nfsreq_secinfo_args si
;
1434 if (nfs_mount_gone(nmp
)) {
1437 nfsvers
= nmp
->nm_vers
;
1438 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
1442 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
1443 MALLOC(nvattr
, struct nfs_vattr
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
);
1444 NVATTR_INIT(nvattr
);
1445 nfsm_chain_null(&nmreq
);
1446 nfsm_chain_null(&nmrep
);
1448 /* NFSv4: fetch "pathconf" info for this node */
1451 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
1452 nfsm_chain_add_compound_header(error
, &nmreq
, "pathconf", nmp
->nm_minor_vers
, numops
);
1454 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1455 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1457 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1458 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
1459 NFS_BITMAP_SET(bitmap
, NFS_FATTR_MAXLINK
);
1460 NFS_BITMAP_SET(bitmap
, NFS_FATTR_MAXNAME
);
1461 NFS_BITMAP_SET(bitmap
, NFS_FATTR_NO_TRUNC
);
1462 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CHOWN_RESTRICTED
);
1463 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CASE_INSENSITIVE
);
1464 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CASE_PRESERVING
);
1465 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
1466 nfsm_chain_build_done(error
, &nmreq
);
1467 nfsm_assert(error
, (numops
== 0), EPROTO
);
1469 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
1471 nfsm_chain_skip_tag(error
, &nmrep
);
1472 nfsm_chain_get_32(error
, &nmrep
, numops
);
1473 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1474 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1476 error
= nfs4_parsefattr(&nmrep
, nfsap
, nvattr
, NULL
, NULL
, NULL
);
1478 if ((lockerror
= nfs_node_lock(np
))) {
1482 nfs_loadattrcache(np
, nvattr
, &xid
, 0);
1485 nfs_node_unlock(np
);
1488 NVATTR_CLEANUP(nvattr
);
1489 FREE(nvattr
, M_TEMP
);
1490 nfsm_chain_cleanup(&nmreq
);
1491 nfsm_chain_cleanup(&nmrep
);
1497 struct vnop_getattr_args
/* {
1498 * struct vnodeop_desc *a_desc;
1500 * struct vnode_attr *a_vap;
1501 * vfs_context_t a_context;
1504 struct vnode_attr
*vap
= ap
->a_vap
;
1505 struct nfsmount
*nmp
;
1506 struct nfs_vattr
*nva
;
1507 int error
, acls
, ngaflags
;
1509 nmp
= VTONMP(ap
->a_vp
);
1510 if (nfs_mount_gone(nmp
)) {
1513 acls
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_ACL
);
1515 ngaflags
= NGA_CACHED
;
1516 if (VATTR_IS_ACTIVE(vap
, va_acl
) && acls
) {
1517 ngaflags
|= NGA_ACL
;
1519 MALLOC(nva
, struct nfs_vattr
*, sizeof(*nva
), M_TEMP
, M_WAITOK
);
1520 error
= nfs_getattr(VTONFS(ap
->a_vp
), nva
, ap
->a_context
, ngaflags
);
1525 /* copy what we have in nva to *a_vap */
1526 if (VATTR_IS_ACTIVE(vap
, va_rdev
) && NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_RAWDEV
)) {
1527 dev_t rdev
= makedev(nva
->nva_rawdev
.specdata1
, nva
->nva_rawdev
.specdata2
);
1528 VATTR_RETURN(vap
, va_rdev
, rdev
);
1530 if (VATTR_IS_ACTIVE(vap
, va_nlink
) && NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_NUMLINKS
)) {
1531 VATTR_RETURN(vap
, va_nlink
, nva
->nva_nlink
);
1533 if (VATTR_IS_ACTIVE(vap
, va_data_size
) && NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_SIZE
)) {
1534 VATTR_RETURN(vap
, va_data_size
, nva
->nva_size
);
1536 // VATTR_RETURN(vap, va_data_alloc, ???);
1537 // VATTR_RETURN(vap, va_total_size, ???);
1538 if (VATTR_IS_ACTIVE(vap
, va_total_alloc
) && NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_SPACE_USED
)) {
1539 VATTR_RETURN(vap
, va_total_alloc
, nva
->nva_bytes
);
1541 if (VATTR_IS_ACTIVE(vap
, va_uid
) && NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_OWNER
)) {
1542 VATTR_RETURN(vap
, va_uid
, nva
->nva_uid
);
1544 if (VATTR_IS_ACTIVE(vap
, va_uuuid
) && NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_OWNER
)) {
1545 VATTR_RETURN(vap
, va_uuuid
, nva
->nva_uuuid
);
1547 if (VATTR_IS_ACTIVE(vap
, va_gid
) && NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_OWNER_GROUP
)) {
1548 VATTR_RETURN(vap
, va_gid
, nva
->nva_gid
);
1550 if (VATTR_IS_ACTIVE(vap
, va_guuid
) && NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_OWNER_GROUP
)) {
1551 VATTR_RETURN(vap
, va_guuid
, nva
->nva_guuid
);
1553 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
1554 if (NMFLAG(nmp
, ACLONLY
) || !NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_MODE
)) {
1555 VATTR_RETURN(vap
, va_mode
, ACCESSPERMS
);
1557 VATTR_RETURN(vap
, va_mode
, nva
->nva_mode
);
1560 if (VATTR_IS_ACTIVE(vap
, va_flags
) &&
1561 (NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_ARCHIVE
) ||
1562 NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_HIDDEN
) ||
1563 (nva
->nva_flags
& NFS_FFLAG_TRIGGER
))) {
1565 if (NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_ARCHIVE
) &&
1566 (nva
->nva_flags
& NFS_FFLAG_ARCHIVED
)) {
1567 flags
|= SF_ARCHIVED
;
1569 if (NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_HIDDEN
) &&
1570 (nva
->nva_flags
& NFS_FFLAG_HIDDEN
)) {
1573 VATTR_RETURN(vap
, va_flags
, flags
);
1575 if (VATTR_IS_ACTIVE(vap
, va_create_time
) && NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_TIME_CREATE
)) {
1576 vap
->va_create_time
.tv_sec
= nva
->nva_timesec
[NFSTIME_CREATE
];
1577 vap
->va_create_time
.tv_nsec
= nva
->nva_timensec
[NFSTIME_CREATE
];
1578 VATTR_SET_SUPPORTED(vap
, va_create_time
);
1580 if (VATTR_IS_ACTIVE(vap
, va_access_time
) && NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_TIME_ACCESS
)) {
1581 vap
->va_access_time
.tv_sec
= nva
->nva_timesec
[NFSTIME_ACCESS
];
1582 vap
->va_access_time
.tv_nsec
= nva
->nva_timensec
[NFSTIME_ACCESS
];
1583 VATTR_SET_SUPPORTED(vap
, va_access_time
);
1585 if (VATTR_IS_ACTIVE(vap
, va_modify_time
) && NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_TIME_MODIFY
)) {
1586 vap
->va_modify_time
.tv_sec
= nva
->nva_timesec
[NFSTIME_MODIFY
];
1587 vap
->va_modify_time
.tv_nsec
= nva
->nva_timensec
[NFSTIME_MODIFY
];
1588 VATTR_SET_SUPPORTED(vap
, va_modify_time
);
1590 if (VATTR_IS_ACTIVE(vap
, va_change_time
) && NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_TIME_METADATA
)) {
1591 vap
->va_change_time
.tv_sec
= nva
->nva_timesec
[NFSTIME_CHANGE
];
1592 vap
->va_change_time
.tv_nsec
= nva
->nva_timensec
[NFSTIME_CHANGE
];
1593 VATTR_SET_SUPPORTED(vap
, va_change_time
);
1595 if (VATTR_IS_ACTIVE(vap
, va_backup_time
) && NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_TIME_BACKUP
)) {
1596 vap
->va_backup_time
.tv_sec
= nva
->nva_timesec
[NFSTIME_BACKUP
];
1597 vap
->va_backup_time
.tv_nsec
= nva
->nva_timensec
[NFSTIME_BACKUP
];
1598 VATTR_SET_SUPPORTED(vap
, va_backup_time
);
1600 if (VATTR_IS_ACTIVE(vap
, va_fileid
) && NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_FILEID
)) {
1601 VATTR_RETURN(vap
, va_fileid
, nva
->nva_fileid
);
1603 if (VATTR_IS_ACTIVE(vap
, va_type
) && NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_TYPE
)) {
1604 VATTR_RETURN(vap
, va_type
, nva
->nva_type
);
1606 if (VATTR_IS_ACTIVE(vap
, va_filerev
) && NFS_BITMAP_ISSET(nva
->nva_bitmap
, NFS_FATTR_CHANGE
)) {
1607 VATTR_RETURN(vap
, va_filerev
, nva
->nva_change
);
1610 if (VATTR_IS_ACTIVE(vap
, va_acl
) && acls
) {
1611 VATTR_RETURN(vap
, va_acl
, nva
->nva_acl
);
1612 nva
->nva_acl
= NULL
;
1615 // other attrs we might support someday:
1616 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
1618 NVATTR_CLEANUP(nva
);
1627 struct vnode_attr
*vap
,
1630 struct nfsmount
*nmp
= NFSTONMP(np
);
1631 int error
= 0, setattr_error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
;
1632 u_int64_t xid
, nextxid
;
1633 struct nfsm_chain nmreq
, nmrep
;
1634 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
1635 uint32_t getbitmap
[NFS_ATTR_BITMAP_LEN
];
1636 uint32_t setbitmap
[NFS_ATTR_BITMAP_LEN
];
1637 nfs_stateid stateid
;
1638 struct nfsreq_secinfo_args si
;
1640 if (nfs_mount_gone(nmp
)) {
1643 nfsvers
= nmp
->nm_vers
;
1644 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
1648 if (VATTR_IS_ACTIVE(vap
, va_flags
) && (vap
->va_flags
& ~(SF_ARCHIVED
| UF_HIDDEN
))) {
1649 /* we don't support setting unsupported flags (duh!) */
1650 if (vap
->va_active
& ~VNODE_ATTR_va_flags
) {
1651 return EINVAL
; /* return EINVAL if other attributes also set */
1653 return ENOTSUP
; /* return ENOTSUP for chflags(2) */
1657 /* don't bother requesting some changes if they don't look like they are changing */
1658 if (VATTR_IS_ACTIVE(vap
, va_uid
) && (vap
->va_uid
== np
->n_vattr
.nva_uid
)) {
1659 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
1661 if (VATTR_IS_ACTIVE(vap
, va_gid
) && (vap
->va_gid
== np
->n_vattr
.nva_gid
)) {
1662 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
1664 if (VATTR_IS_ACTIVE(vap
, va_uuuid
) && kauth_guid_equal(&vap
->va_uuuid
, &np
->n_vattr
.nva_uuuid
)) {
1665 VATTR_CLEAR_ACTIVE(vap
, va_uuuid
);
1667 if (VATTR_IS_ACTIVE(vap
, va_guuid
) && kauth_guid_equal(&vap
->va_guuid
, &np
->n_vattr
.nva_guuid
)) {
1668 VATTR_CLEAR_ACTIVE(vap
, va_guuid
);
1672 /* do nothing if no attributes will be sent */
1673 nfs_vattr_set_bitmap(nmp
, bitmap
, vap
);
1674 if (!bitmap
[0] && !bitmap
[1]) {
1678 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
1679 nfsm_chain_null(&nmreq
);
1680 nfsm_chain_null(&nmrep
);
1683 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1684 * need to invalidate any cached ACL. And if we had an ACL cached,
1685 * we might as well also fetch the new value.
1687 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, getbitmap
);
1688 if (NFS_BITMAP_ISSET(bitmap
, NFS_FATTR_ACL
) ||
1689 NFS_BITMAP_ISSET(bitmap
, NFS_FATTR_MODE
)) {
1690 if (NACLVALID(np
)) {
1691 NFS_BITMAP_SET(getbitmap
, NFS_FATTR_ACL
);
1696 // PUTFH, SETATTR, GETATTR
1698 nfsm_chain_build_alloc_init(error
, &nmreq
, 40 * NFSX_UNSIGNED
);
1699 nfsm_chain_add_compound_header(error
, &nmreq
, "setattr", nmp
->nm_minor_vers
, numops
);
1701 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1702 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1704 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SETATTR
);
1705 if (VATTR_IS_ACTIVE(vap
, va_data_size
)) {
1706 nfs_get_stateid(np
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &stateid
);
1708 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
1710 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
1711 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
1713 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1714 nfsm_chain_add_bitmap_supported(error
, &nmreq
, getbitmap
, nmp
, np
);
1715 nfsm_chain_build_done(error
, &nmreq
);
1716 nfsm_assert(error
, (numops
== 0), EPROTO
);
1718 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
1720 if ((lockerror
= nfs_node_lock(np
))) {
1723 nfsm_chain_skip_tag(error
, &nmrep
);
1724 nfsm_chain_get_32(error
, &nmrep
, numops
);
1725 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1727 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SETATTR
);
1728 nfsmout_if(error
== EBADRPC
);
1729 setattr_error
= error
;
1731 bmlen
= NFS_ATTR_BITMAP_LEN
;
1732 nfsm_chain_get_bitmap(error
, &nmrep
, setbitmap
, bmlen
);
1734 if (VATTR_IS_ACTIVE(vap
, va_data_size
) && (np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
1735 microuptime(&np
->n_lastio
);
1737 nfs_vattr_set_supported(setbitmap
, vap
);
1738 error
= setattr_error
;
1740 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1741 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
1743 NATTRINVALIDATE(np
);
1746 * We just changed the attributes and we want to make sure that we
1747 * see the latest attributes. Get the next XID. If it's not the
1748 * next XID after the SETATTR XID, then it's possible that another
1749 * RPC was in flight at the same time and it might put stale attributes
1750 * in the cache. In that case, we invalidate the attributes and set
1751 * the attribute cache XID to guarantee that newer attributes will
1755 nfs_get_xid(&nextxid
);
1756 if (nextxid
!= (xid
+ 1)) {
1757 np
->n_xid
= nextxid
;
1758 NATTRINVALIDATE(np
);
1762 nfs_node_unlock(np
);
1764 nfsm_chain_cleanup(&nmreq
);
1765 nfsm_chain_cleanup(&nmrep
);
1766 if ((setattr_error
== EINVAL
) && VATTR_IS_ACTIVE(vap
, va_acl
) && VATTR_IS_ACTIVE(vap
, va_mode
) && !NMFLAG(nmp
, ACLONLY
)) {
1768 * Some server's may not like ACL/mode combos that get sent.
1769 * If it looks like that's what the server choked on, try setting
1770 * just the ACL and not the mode (unless it looks like everything
1771 * but mode was already successfully set).
1773 if (((bitmap
[0] & setbitmap
[0]) != bitmap
[0]) ||
1774 ((bitmap
[1] & (setbitmap
[1] | NFS_FATTR_MODE
)) != bitmap
[1])) {
1775 VATTR_CLEAR_ACTIVE(vap
, va_mode
);
1782 #endif /* CONFIG_NFS4 */
1785 * Wait for any pending recovery to complete.
1788 nfs_mount_state_wait_for_recovery(struct nfsmount
*nmp
)
1790 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
1791 int error
= 0, slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
1793 lck_mtx_lock(&nmp
->nm_lock
);
1794 while (nmp
->nm_state
& NFSSTA_RECOVER
) {
1795 if ((error
= nfs_sigintr(nmp
, NULL
, current_thread(), 1))) {
1798 nfs_mount_sock_thread_wake(nmp
);
1799 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, slpflag
| (PZERO
- 1), "nfsrecoverwait", &ts
);
1802 lck_mtx_unlock(&nmp
->nm_lock
);
1808 * We're about to use/manipulate NFS mount's open/lock state.
1809 * Wait for any pending state recovery to complete, then
1810 * mark the state as being in use (which will hold off
1811 * the recovery thread until we're done).
1814 nfs_mount_state_in_use_start(struct nfsmount
*nmp
, thread_t thd
)
1816 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
1817 int error
= 0, slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
1819 if (nfs_mount_gone(nmp
)) {
1822 lck_mtx_lock(&nmp
->nm_lock
);
1823 if (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) {
1824 lck_mtx_unlock(&nmp
->nm_lock
);
1827 while (nmp
->nm_state
& NFSSTA_RECOVER
) {
1828 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 1))) {
1831 nfs_mount_sock_thread_wake(nmp
);
1832 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, slpflag
| (PZERO
- 1), "nfsrecoverwait", &ts
);
1836 nmp
->nm_stateinuse
++;
1838 lck_mtx_unlock(&nmp
->nm_lock
);
1844 * We're done using/manipulating the NFS mount's open/lock
1845 * state. If the given error indicates that recovery should
1846 * be performed, we'll initiate recovery.
1849 nfs_mount_state_in_use_end(struct nfsmount
*nmp
, int error
)
1851 int restart
= nfs_mount_state_error_should_restart(error
);
1853 if (nfs_mount_gone(nmp
)) {
1856 lck_mtx_lock(&nmp
->nm_lock
);
1857 if (restart
&& (error
!= NFSERR_OLD_STATEID
) && (error
!= NFSERR_GRACE
)) {
1858 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
1859 error
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nmp
->nm_stategenid
);
1860 nfs_need_recover(nmp
, error
);
1862 if (nmp
->nm_stateinuse
> 0) {
1863 nmp
->nm_stateinuse
--;
1865 panic("NFS mount state in use count underrun");
1867 if (!nmp
->nm_stateinuse
&& (nmp
->nm_state
& NFSSTA_RECOVER
)) {
1868 wakeup(&nmp
->nm_stateinuse
);
1870 lck_mtx_unlock(&nmp
->nm_lock
);
1871 if (error
== NFSERR_GRACE
) {
1872 tsleep(&nmp
->nm_state
, (PZERO
- 1), "nfsgrace", 2 * hz
);
1879 * Does the error mean we should restart/redo a state-related operation?
1882 nfs_mount_state_error_should_restart(int error
)
1885 case NFSERR_STALE_STATEID
:
1886 case NFSERR_STALE_CLIENTID
:
1887 case NFSERR_ADMIN_REVOKED
:
1888 case NFSERR_EXPIRED
:
1889 case NFSERR_OLD_STATEID
:
1890 case NFSERR_BAD_STATEID
:
1898 * In some cases we may want to limit how many times we restart a
1899 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1900 * Base the limit on the lease (as long as it's not too short).
1903 nfs_mount_state_max_restarts(struct nfsmount
*nmp
)
1905 return MAX(nmp
->nm_fsattr
.nfsa_lease
, 60);
1909 * Does the error mean we probably lost a delegation?
1912 nfs_mount_state_error_delegation_lost(int error
)
1915 case NFSERR_STALE_STATEID
:
1916 case NFSERR_ADMIN_REVOKED
:
1917 case NFSERR_EXPIRED
:
1918 case NFSERR_OLD_STATEID
:
1919 case NFSERR_BAD_STATEID
:
1920 case NFSERR_GRACE
: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
1928 * Mark an NFS node's open state as busy.
1931 nfs_open_state_set_busy(nfsnode_t np
, thread_t thd
)
1933 struct nfsmount
*nmp
;
1934 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
1935 int error
= 0, slpflag
;
1938 if (nfs_mount_gone(nmp
)) {
1941 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
1943 lck_mtx_lock(&np
->n_openlock
);
1944 while (np
->n_openflags
& N_OPENBUSY
) {
1945 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
1948 np
->n_openflags
|= N_OPENWANT
;
1949 msleep(&np
->n_openflags
, &np
->n_openlock
, slpflag
, "nfs_open_state_set_busy", &ts
);
1953 np
->n_openflags
|= N_OPENBUSY
;
1955 lck_mtx_unlock(&np
->n_openlock
);
1961 * Clear an NFS node's open state busy flag and wake up
1962 * anyone wanting it.
1965 nfs_open_state_clear_busy(nfsnode_t np
)
1969 lck_mtx_lock(&np
->n_openlock
);
1970 if (!(np
->n_openflags
& N_OPENBUSY
)) {
1971 panic("nfs_open_state_clear_busy");
1973 wanted
= (np
->n_openflags
& N_OPENWANT
);
1974 np
->n_openflags
&= ~(N_OPENBUSY
| N_OPENWANT
);
1975 lck_mtx_unlock(&np
->n_openlock
);
1977 wakeup(&np
->n_openflags
);
1982 * Search a mount's open owner list for the owner for this credential.
1983 * If not found and "alloc" is set, then allocate a new one.
1985 struct nfs_open_owner
*
1986 nfs_open_owner_find(struct nfsmount
*nmp
, kauth_cred_t cred
, int alloc
)
1988 uid_t uid
= kauth_cred_getuid(cred
);
1989 struct nfs_open_owner
*noop
, *newnoop
= NULL
;
1992 lck_mtx_lock(&nmp
->nm_lock
);
1993 TAILQ_FOREACH(noop
, &nmp
->nm_open_owners
, noo_link
) {
1994 if (kauth_cred_getuid(noop
->noo_cred
) == uid
) {
1999 if (!noop
&& !newnoop
&& alloc
) {
2000 lck_mtx_unlock(&nmp
->nm_lock
);
2001 MALLOC(newnoop
, struct nfs_open_owner
*, sizeof(struct nfs_open_owner
), M_TEMP
, M_WAITOK
);
2005 bzero(newnoop
, sizeof(*newnoop
));
2006 lck_mtx_init(&newnoop
->noo_lock
, &nfs_open_grp
, LCK_ATTR_NULL
);
2007 newnoop
->noo_mount
= nmp
;
2008 kauth_cred_ref(cred
);
2009 newnoop
->noo_cred
= cred
;
2010 newnoop
->noo_name
= OSAddAtomic(1, &nfs_open_owner_seqnum
);
2011 TAILQ_INIT(&newnoop
->noo_opens
);
2014 if (!noop
&& newnoop
) {
2015 newnoop
->noo_flags
|= NFS_OPEN_OWNER_LINK
;
2016 os_ref_init(&newnoop
->noo_refcnt
, NULL
);
2017 TAILQ_INSERT_HEAD(&nmp
->nm_open_owners
, newnoop
, noo_link
);
2020 lck_mtx_unlock(&nmp
->nm_lock
);
2022 if (newnoop
&& (noop
!= newnoop
)) {
2023 nfs_open_owner_destroy(newnoop
);
2027 nfs_open_owner_ref(noop
);
2034 * destroy an open owner that's no longer needed
2037 nfs_open_owner_destroy(struct nfs_open_owner
*noop
)
2039 if (noop
->noo_cred
) {
2040 kauth_cred_unref(&noop
->noo_cred
);
2042 lck_mtx_destroy(&noop
->noo_lock
, &nfs_open_grp
);
2047 * acquire a reference count on an open owner
2050 nfs_open_owner_ref(struct nfs_open_owner
*noop
)
2052 lck_mtx_lock(&noop
->noo_lock
);
2053 os_ref_retain_locked(&noop
->noo_refcnt
);
2054 lck_mtx_unlock(&noop
->noo_lock
);
2058 * drop a reference count on an open owner and destroy it if
2059 * it is no longer referenced and no longer on the mount's list.
2062 nfs_open_owner_rele(struct nfs_open_owner
*noop
)
2064 os_ref_count_t newcount
;
2066 lck_mtx_lock(&noop
->noo_lock
);
2067 if (os_ref_get_count(&noop
->noo_refcnt
) < 1) {
2068 panic("nfs_open_owner_rele: no refcnt");
2070 newcount
= os_ref_release_locked(&noop
->noo_refcnt
);
2071 if (!newcount
&& (noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
)) {
2072 panic("nfs_open_owner_rele: busy");
2074 /* XXX we may potentially want to clean up idle/unused open owner structures */
2075 if (newcount
|| (noop
->noo_flags
& NFS_OPEN_OWNER_LINK
)) {
2076 lck_mtx_unlock(&noop
->noo_lock
);
2079 /* owner is no longer referenced or linked to mount, so destroy it */
2080 lck_mtx_unlock(&noop
->noo_lock
);
2081 nfs_open_owner_destroy(noop
);
2085 * Mark an open owner as busy because we are about to
2086 * start an operation that uses and updates open owner state.
2089 nfs_open_owner_set_busy(struct nfs_open_owner
*noop
, thread_t thd
)
2091 struct nfsmount
*nmp
;
2092 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
2093 int error
= 0, slpflag
;
2095 nmp
= noop
->noo_mount
;
2096 if (nfs_mount_gone(nmp
)) {
2099 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
2101 lck_mtx_lock(&noop
->noo_lock
);
2102 while (noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
) {
2103 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
2106 noop
->noo_flags
|= NFS_OPEN_OWNER_WANT
;
2107 msleep(noop
, &noop
->noo_lock
, slpflag
, "nfs_open_owner_set_busy", &ts
);
2111 noop
->noo_flags
|= NFS_OPEN_OWNER_BUSY
;
2113 lck_mtx_unlock(&noop
->noo_lock
);
2119 * Clear the busy flag on an open owner and wake up anyone waiting
2123 nfs_open_owner_clear_busy(struct nfs_open_owner
*noop
)
2127 lck_mtx_lock(&noop
->noo_lock
);
2128 if (!(noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
)) {
2129 panic("nfs_open_owner_clear_busy");
2131 wanted
= (noop
->noo_flags
& NFS_OPEN_OWNER_WANT
);
2132 noop
->noo_flags
&= ~(NFS_OPEN_OWNER_BUSY
| NFS_OPEN_OWNER_WANT
);
2133 lck_mtx_unlock(&noop
->noo_lock
);
2140 * Given an open/lock owner and an error code, increment the
2141 * sequence ID if appropriate.
2144 nfs_owner_seqid_increment(struct nfs_open_owner
*noop
, struct nfs_lock_owner
*nlop
, int error
)
2147 case NFSERR_STALE_CLIENTID
:
2148 case NFSERR_STALE_STATEID
:
2149 case NFSERR_OLD_STATEID
:
2150 case NFSERR_BAD_STATEID
:
2151 case NFSERR_BAD_SEQID
:
2153 case NFSERR_RESOURCE
:
2154 case NFSERR_NOFILEHANDLE
:
2155 /* do not increment the open seqid on these errors */
2167 * Search a node's open file list for any conflicts with this request.
2168 * Also find this open owner's open file structure.
2169 * If not found and "alloc" is set, then allocate one.
2174 struct nfs_open_owner
*noop
,
2175 struct nfs_open_file
**nofpp
,
2176 uint32_t accessMode
,
2181 return nfs_open_file_find_internal(np
, noop
, nofpp
, accessMode
, denyMode
, alloc
);
2185 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
2186 * if an existing one is not found. This is used in "create" scenarios to
2187 * officially add the provisional nofp to the node once the node is created.
2190 nfs_open_file_find_internal(
2192 struct nfs_open_owner
*noop
,
2193 struct nfs_open_file
**nofpp
,
2194 uint32_t accessMode
,
2198 struct nfs_open_file
*nofp
= NULL
, *nofp2
, *newnofp
= NULL
;
2204 lck_mtx_lock(&np
->n_openlock
);
2205 TAILQ_FOREACH(nofp2
, &np
->n_opens
, nof_link
) {
2206 if (nofp2
->nof_owner
== noop
) {
2212 if ((accessMode
& nofp2
->nof_deny
) || (denyMode
& nofp2
->nof_access
)) {
2213 /* This request conflicts with an existing open on this client. */
2214 lck_mtx_unlock(&np
->n_openlock
);
2220 * If this open owner doesn't have an open
2221 * file structure yet, we create one for it.
2223 if (!nofp
&& !*nofpp
&& !newnofp
&& alloc
) {
2224 lck_mtx_unlock(&np
->n_openlock
);
2226 MALLOC(newnofp
, struct nfs_open_file
*, sizeof(struct nfs_open_file
), M_TEMP
, M_WAITOK
);
2230 bzero(newnofp
, sizeof(*newnofp
));
2231 lck_mtx_init(&newnofp
->nof_lock
, &nfs_open_grp
, LCK_ATTR_NULL
);
2232 newnofp
->nof_owner
= noop
;
2233 nfs_open_owner_ref(noop
);
2234 newnofp
->nof_np
= np
;
2235 lck_mtx_lock(&noop
->noo_lock
);
2236 TAILQ_INSERT_HEAD(&noop
->noo_opens
, newnofp
, nof_oolink
);
2237 lck_mtx_unlock(&noop
->noo_lock
);
2244 (*nofpp
)->nof_np
= np
;
2250 TAILQ_INSERT_HEAD(&np
->n_opens
, nofp
, nof_link
);
2254 lck_mtx_unlock(&np
->n_openlock
);
2257 if (alloc
&& newnofp
&& (nofp
!= newnofp
)) {
2258 nfs_open_file_destroy(newnofp
);
2262 return nofp
? 0 : ESRCH
;
2266 * Destroy an open file structure.
2269 nfs_open_file_destroy(struct nfs_open_file
*nofp
)
2271 lck_mtx_lock(&nofp
->nof_owner
->noo_lock
);
2272 TAILQ_REMOVE(&nofp
->nof_owner
->noo_opens
, nofp
, nof_oolink
);
2273 lck_mtx_unlock(&nofp
->nof_owner
->noo_lock
);
2274 nfs_open_owner_rele(nofp
->nof_owner
);
2275 lck_mtx_destroy(&nofp
->nof_lock
, &nfs_open_grp
);
2280 * Mark an open file as busy because we are about to
2281 * start an operation that uses and updates open file state.
2284 nfs_open_file_set_busy(struct nfs_open_file
*nofp
, thread_t thd
)
2286 struct nfsmount
*nmp
;
2287 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
2288 int error
= 0, slpflag
;
2290 nmp
= nofp
->nof_owner
->noo_mount
;
2291 if (nfs_mount_gone(nmp
)) {
2294 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
2296 lck_mtx_lock(&nofp
->nof_lock
);
2297 while (nofp
->nof_flags
& NFS_OPEN_FILE_BUSY
) {
2298 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
2301 nofp
->nof_flags
|= NFS_OPEN_FILE_WANT
;
2302 msleep(nofp
, &nofp
->nof_lock
, slpflag
, "nfs_open_file_set_busy", &ts
);
2306 nofp
->nof_flags
|= NFS_OPEN_FILE_BUSY
;
2308 lck_mtx_unlock(&nofp
->nof_lock
);
2314 * Clear the busy flag on an open file and wake up anyone waiting
2318 nfs_open_file_clear_busy(struct nfs_open_file
*nofp
)
2322 lck_mtx_lock(&nofp
->nof_lock
);
2323 if (!(nofp
->nof_flags
& NFS_OPEN_FILE_BUSY
)) {
2324 panic("nfs_open_file_clear_busy");
2326 wanted
= (nofp
->nof_flags
& NFS_OPEN_FILE_WANT
);
2327 nofp
->nof_flags
&= ~(NFS_OPEN_FILE_BUSY
| NFS_OPEN_FILE_WANT
);
2328 lck_mtx_unlock(&nofp
->nof_lock
);
2335 * Add the open state for the given access/deny modes to this open file.
2338 nfs_open_file_add_open(struct nfs_open_file
*nofp
, uint32_t accessMode
, uint32_t denyMode
, int delegated
)
2340 lck_mtx_lock(&nofp
->nof_lock
);
2341 nofp
->nof_access
|= accessMode
;
2342 nofp
->nof_deny
|= denyMode
;
2345 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2346 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2348 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2350 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2353 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2354 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2356 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2358 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2359 nofp
->nof_d_rw_dw
++;
2361 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2362 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2363 nofp
->nof_d_r_drw
++;
2364 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2365 nofp
->nof_d_w_drw
++;
2366 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2367 nofp
->nof_d_rw_drw
++;
2371 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2372 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2374 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2376 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2379 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2380 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2382 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2384 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2387 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2388 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2390 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2392 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2398 nofp
->nof_opencnt
++;
2399 lck_mtx_unlock(&nofp
->nof_lock
);
2403 * Find which particular open combo will be closed and report what
2404 * the new modes will be and whether the open was delegated.
2407 nfs_open_file_remove_open_find(
2408 struct nfs_open_file
*nofp
,
2409 uint32_t accessMode
,
2411 uint8_t *newAccessMode
,
2412 uint8_t *newDenyMode
,
2416 * Calculate new modes: a mode bit gets removed when there's only
2417 * one count in all the corresponding counts
2419 *newAccessMode
= nofp
->nof_access
;
2420 *newDenyMode
= nofp
->nof_deny
;
2422 if ((accessMode
& NFS_OPEN_SHARE_ACCESS_READ
) &&
2423 (nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
) &&
2424 ((nofp
->nof_r
+ nofp
->nof_d_r
+
2425 nofp
->nof_rw
+ nofp
->nof_d_rw
+
2426 nofp
->nof_r_dw
+ nofp
->nof_d_r_dw
+
2427 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
+
2428 nofp
->nof_r_drw
+ nofp
->nof_d_r_drw
+
2429 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
) == 1)) {
2430 *newAccessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2432 if ((accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
) &&
2433 (nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_WRITE
) &&
2434 ((nofp
->nof_w
+ nofp
->nof_d_w
+
2435 nofp
->nof_rw
+ nofp
->nof_d_rw
+
2436 nofp
->nof_w_dw
+ nofp
->nof_d_w_dw
+
2437 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
+
2438 nofp
->nof_w_drw
+ nofp
->nof_d_w_drw
+
2439 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
) == 1)) {
2440 *newAccessMode
&= ~NFS_OPEN_SHARE_ACCESS_WRITE
;
2442 if ((denyMode
& NFS_OPEN_SHARE_DENY_READ
) &&
2443 (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_READ
) &&
2444 ((nofp
->nof_r_drw
+ nofp
->nof_d_r_drw
+
2445 nofp
->nof_w_drw
+ nofp
->nof_d_w_drw
+
2446 nofp
->nof_rw_drw
+ nofp
->nof_d_rw_drw
) == 1)) {
2447 *newDenyMode
&= ~NFS_OPEN_SHARE_DENY_READ
;
2449 if ((denyMode
& NFS_OPEN_SHARE_DENY_WRITE
) &&
2450 (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_WRITE
) &&
2451 ((nofp
->nof_r_drw
+ nofp
->nof_d_r_drw
+
2452 nofp
->nof_w_drw
+ nofp
->nof_d_w_drw
+
2453 nofp
->nof_rw_drw
+ nofp
->nof_d_rw_drw
+
2454 nofp
->nof_r_dw
+ nofp
->nof_d_r_dw
+
2455 nofp
->nof_w_dw
+ nofp
->nof_d_w_dw
+
2456 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
) == 1)) {
2457 *newDenyMode
&= ~NFS_OPEN_SHARE_DENY_WRITE
;
2460 /* Find the corresponding open access/deny mode counter. */
2461 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2462 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2463 *delegated
= (nofp
->nof_d_r
!= 0);
2464 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2465 *delegated
= (nofp
->nof_d_w
!= 0);
2466 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2467 *delegated
= (nofp
->nof_d_rw
!= 0);
2471 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2472 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2473 *delegated
= (nofp
->nof_d_r_dw
!= 0);
2474 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2475 *delegated
= (nofp
->nof_d_w_dw
!= 0);
2476 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2477 *delegated
= (nofp
->nof_d_rw_dw
!= 0);
2481 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2482 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2483 *delegated
= (nofp
->nof_d_r_drw
!= 0);
2484 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2485 *delegated
= (nofp
->nof_d_w_drw
!= 0);
2486 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2487 *delegated
= (nofp
->nof_d_rw_drw
!= 0);
2495 * Remove the open state for the given access/deny modes to this open file.
2498 nfs_open_file_remove_open(struct nfs_open_file
*nofp
, uint32_t accessMode
, uint32_t denyMode
)
2500 uint8_t newAccessMode
, newDenyMode
;
2503 lck_mtx_lock(&nofp
->nof_lock
);
2504 nfs_open_file_remove_open_find(nofp
, accessMode
, denyMode
, &newAccessMode
, &newDenyMode
, &delegated
);
2506 /* Decrement the corresponding open access/deny mode counter. */
2507 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2508 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2510 if (nofp
->nof_d_r
== 0) {
2511 NP(nofp
->nof_np
, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2516 if (nofp
->nof_r
== 0) {
2517 NP(nofp
->nof_np
, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2522 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2524 if (nofp
->nof_d_w
== 0) {
2525 NP(nofp
->nof_np
, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2530 if (nofp
->nof_w
== 0) {
2531 NP(nofp
->nof_np
, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2536 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2538 if (nofp
->nof_d_rw
== 0) {
2539 NP(nofp
->nof_np
, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2544 if (nofp
->nof_rw
== 0) {
2545 NP(nofp
->nof_np
, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2551 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2552 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2554 if (nofp
->nof_d_r_dw
== 0) {
2555 NP(nofp
->nof_np
, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2560 if (nofp
->nof_r_dw
== 0) {
2561 NP(nofp
->nof_np
, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2566 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2568 if (nofp
->nof_d_w_dw
== 0) {
2569 NP(nofp
->nof_np
, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2574 if (nofp
->nof_w_dw
== 0) {
2575 NP(nofp
->nof_np
, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2580 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2582 if (nofp
->nof_d_rw_dw
== 0) {
2583 NP(nofp
->nof_np
, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2585 nofp
->nof_d_rw_dw
--;
2588 if (nofp
->nof_rw_dw
== 0) {
2589 NP(nofp
->nof_np
, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2595 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2596 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2598 if (nofp
->nof_d_r_drw
== 0) {
2599 NP(nofp
->nof_np
, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2601 nofp
->nof_d_r_drw
--;
2604 if (nofp
->nof_r_drw
== 0) {
2605 NP(nofp
->nof_np
, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2610 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2612 if (nofp
->nof_d_w_drw
== 0) {
2613 NP(nofp
->nof_np
, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2615 nofp
->nof_d_w_drw
--;
2618 if (nofp
->nof_w_drw
== 0) {
2619 NP(nofp
->nof_np
, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2624 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2626 if (nofp
->nof_d_rw_drw
== 0) {
2627 NP(nofp
->nof_np
, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2629 nofp
->nof_d_rw_drw
--;
2632 if (nofp
->nof_rw_drw
== 0) {
2633 NP(nofp
->nof_np
, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2641 /* update the modes */
2642 nofp
->nof_access
= newAccessMode
;
2643 nofp
->nof_deny
= newDenyMode
;
2644 nofp
->nof_opencnt
--;
2645 lck_mtx_unlock(&nofp
->nof_lock
);
2650 * Get the current (delegation, lock, open, default) stateid for this node.
2651 * If node has a delegation, use that stateid.
2652 * If pid has a lock, use the lockowner's stateid.
2653 * Or use the open file's stateid.
2654 * If no open file, use a default stateid of all ones.
2657 nfs_get_stateid(nfsnode_t np
, thread_t thd
, kauth_cred_t cred
, nfs_stateid
*sid
)
2659 struct nfsmount
*nmp
= NFSTONMP(np
);
2660 proc_t p
= thd
? get_bsdthreadtask_info(thd
) : current_proc(); // XXX async I/O requests don't have a thread
2661 struct nfs_open_owner
*noop
= NULL
;
2662 struct nfs_open_file
*nofp
= NULL
;
2663 struct nfs_lock_owner
*nlop
= NULL
;
2664 nfs_stateid
*s
= NULL
;
2666 if (np
->n_openflags
& N_DELEG_MASK
) {
2667 s
= &np
->n_dstateid
;
2670 nlop
= nfs_lock_owner_find(np
, p
, 0);
2672 if (nlop
&& !TAILQ_EMPTY(&nlop
->nlo_locks
)) {
2673 /* we hold locks, use lock stateid */
2674 s
= &nlop
->nlo_stateid
;
2675 } else if (((noop
= nfs_open_owner_find(nmp
, cred
, 0))) &&
2676 (nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 0) == 0) &&
2677 !(nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) &&
2679 /* we (should) have the file open, use open stateid */
2680 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
2681 nfs4_reopen(nofp
, thd
);
2683 if (!(nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
2684 s
= &nofp
->nof_stateid
;
2690 sid
->seqid
= s
->seqid
;
2691 sid
->other
[0] = s
->other
[0];
2692 sid
->other
[1] = s
->other
[1];
2693 sid
->other
[2] = s
->other
[2];
2695 /* named attributes may not have a stateid for reads, so don't complain for them */
2696 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
2697 NP(np
, "nfs_get_stateid: no stateid");
2699 sid
->seqid
= sid
->other
[0] = sid
->other
[1] = sid
->other
[2] = 0xffffffff;
2702 nfs_lock_owner_rele(nlop
);
2705 nfs_open_owner_rele(noop
);
2711 * When we have a delegation, we may be able to perform the OPEN locally.
2712 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2715 nfs4_open_delegated(
2717 struct nfs_open_file
*nofp
,
2718 uint32_t accessMode
,
2722 int error
= 0, ismember
, readtoo
= 0, authorized
= 0;
2724 struct kauth_acl_eval eval
;
2725 kauth_cred_t cred
= vfs_context_ucred(ctx
);
2727 if (!(accessMode
& NFS_OPEN_SHARE_ACCESS_READ
)) {
2729 * Try to open it for read access too,
2730 * so the buffer cache can read data.
2733 accessMode
|= NFS_OPEN_SHARE_ACCESS_READ
;
2738 if (accessMode
& NFS_OPEN_SHARE_ACCESS_READ
) {
2739 action
|= KAUTH_VNODE_READ_DATA
;
2741 if (accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
) {
2742 action
|= KAUTH_VNODE_WRITE_DATA
;
2745 /* evaluate ACE (if we have one) */
2746 if (np
->n_dace
.ace_flags
) {
2747 eval
.ae_requested
= action
;
2748 eval
.ae_acl
= &np
->n_dace
;
2750 eval
.ae_options
= 0;
2751 if (np
->n_vattr
.nva_uid
== kauth_cred_getuid(cred
)) {
2752 eval
.ae_options
|= KAUTH_AEVAL_IS_OWNER
;
2754 error
= kauth_cred_ismember_gid(cred
, np
->n_vattr
.nva_gid
, &ismember
);
2755 if (!error
&& ismember
) {
2756 eval
.ae_options
|= KAUTH_AEVAL_IN_GROUP
;
2759 eval
.ae_exp_gall
= KAUTH_VNODE_GENERIC_ALL_BITS
;
2760 eval
.ae_exp_gread
= KAUTH_VNODE_GENERIC_READ_BITS
;
2761 eval
.ae_exp_gwrite
= KAUTH_VNODE_GENERIC_WRITE_BITS
;
2762 eval
.ae_exp_gexec
= KAUTH_VNODE_GENERIC_EXECUTE_BITS
;
2764 error
= kauth_acl_evaluate(cred
, &eval
);
2766 if (!error
&& (eval
.ae_result
== KAUTH_RESULT_ALLOW
)) {
2772 /* need to ask the server via ACCESS */
2773 struct vnop_access_args naa
;
2774 naa
.a_desc
= &vnop_access_desc
;
2775 naa
.a_vp
= NFSTOV(np
);
2776 naa
.a_action
= action
;
2777 naa
.a_context
= ctx
;
2778 if (!(error
= nfs_vnop_access(&naa
))) {
2785 /* try again without the extra read access */
2786 accessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2790 return error
? error
: EACCES
;
2793 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 1);
2800 * Open a file with the given access/deny modes.
2802 * If we have a delegation, we may be able to handle the open locally.
2803 * Otherwise, we will always send the open RPC even if this open's mode is
2804 * a subset of all the existing opens. This makes sure that we will always
2805 * be able to do a downgrade to any of the open modes.
2807 * Note: local conflicts should have already been checked in nfs_open_file_find().
2812 struct nfs_open_file
*nofp
,
2813 uint32_t accessMode
,
2817 vnode_t vp
= NFSTOV(np
);
2819 struct componentname cn
;
2820 const char *vname
= NULL
;
2822 char smallname
[128];
2823 char *filename
= NULL
;
2824 int error
= 0, readtoo
= 0;
2827 * We can handle the OPEN ourselves if we have a delegation,
2828 * unless it's a read delegation and the open is asking for
2829 * either write access or deny read. We also don't bother to
2830 * use the delegation if it's being returned.
2832 if (np
->n_openflags
& N_DELEG_MASK
) {
2833 if ((error
= nfs_open_state_set_busy(np
, vfs_context_thread(ctx
)))) {
2836 if ((np
->n_openflags
& N_DELEG_MASK
) && !(np
->n_openflags
& N_DELEG_RETURN
) &&
2837 (((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_WRITE
) ||
2838 (!(accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
) && !(denyMode
& NFS_OPEN_SHARE_DENY_READ
)))) {
2839 error
= nfs4_open_delegated(np
, nofp
, accessMode
, denyMode
, ctx
);
2840 nfs_open_state_clear_busy(np
);
2843 nfs_open_state_clear_busy(np
);
2847 * [sigh] We can't trust VFS to get the parent right for named
2848 * attribute nodes. (It likes to reparent the nodes after we've
2849 * created them.) Luckily we can probably get the right parent
2850 * from the n_parent we have stashed away.
2852 if ((np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) &&
2853 (((dvp
= np
->n_parent
)) && (error
= vnode_get(dvp
)))) {
2857 dvp
= vnode_getparent(vp
);
2859 vname
= vnode_getname(vp
);
2860 if (!dvp
|| !vname
) {
2866 filename
= &smallname
[0];
2867 namelen
= snprintf(filename
, sizeof(smallname
), "%s", vname
);
2868 if (namelen
>= sizeof(smallname
)) {
2869 MALLOC(filename
, char *, namelen
+ 1, M_TEMP
, M_WAITOK
);
2874 snprintf(filename
, namelen
+ 1, "%s", vname
);
2876 bzero(&cn
, sizeof(cn
));
2877 cn
.cn_nameptr
= filename
;
2878 cn
.cn_namelen
= namelen
;
2880 if (!(accessMode
& NFS_OPEN_SHARE_ACCESS_READ
)) {
2882 * Try to open it for read access too,
2883 * so the buffer cache can read data.
2886 accessMode
|= NFS_OPEN_SHARE_ACCESS_READ
;
2889 error
= nfs4_open_rpc(nofp
, ctx
, &cn
, NULL
, dvp
, &vp
, NFS_OPEN_NOCREATE
, accessMode
, denyMode
);
2891 if (!nfs_mount_state_error_should_restart(error
) &&
2892 (error
!= EINTR
) && (error
!= ERESTART
) && readtoo
) {
2893 /* try again without the extra read access */
2894 accessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2900 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0);
2902 if (filename
&& (filename
!= &smallname
[0])) {
2903 FREE(filename
, M_TEMP
);
2906 vnode_putname(vname
);
2908 if (dvp
!= NULLVP
) {
2913 #endif /* CONFIG_NFS4 */
2917 struct vnop_mmap_args
/* {
2918 * struct vnodeop_desc *a_desc;
2921 * vfs_context_t a_context;
2924 vfs_context_t ctx
= ap
->a_context
;
2925 vnode_t vp
= ap
->a_vp
;
2926 nfsnode_t np
= VTONFS(vp
);
2927 int error
= 0, delegated
= 0;
2928 uint8_t accessMode
, denyMode
;
2929 struct nfsmount
*nmp
;
2930 struct nfs_open_owner
*noop
= NULL
;
2931 struct nfs_open_file
*nofp
= NULL
;
2934 if (nfs_mount_gone(nmp
)) {
2938 if (!vnode_isreg(vp
) || !(ap
->a_fflags
& (PROT_READ
| PROT_WRITE
))) {
2941 if (np
->n_flag
& NREVOKE
) {
2946 * fflags contains some combination of: PROT_READ, PROT_WRITE
2947 * Since it's not possible to mmap() without having the file open for reading,
2948 * read access is always there (regardless if PROT_READ is not set).
2950 accessMode
= NFS_OPEN_SHARE_ACCESS_READ
;
2951 if (ap
->a_fflags
& PROT_WRITE
) {
2952 accessMode
|= NFS_OPEN_SHARE_ACCESS_WRITE
;
2954 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2956 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
2962 error
= nfs_mount_state_in_use_start(nmp
, NULL
);
2964 nfs_open_owner_rele(noop
);
2967 if (np
->n_flag
& NREVOKE
) {
2969 nfs_mount_state_in_use_end(nmp
, 0);
2970 nfs_open_owner_rele(noop
);
2974 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 1);
2975 if (error
|| (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
))) {
2976 NP(np
, "nfs_vnop_mmap: no open file for owner, error %d, %d", error
, kauth_cred_getuid(noop
->noo_cred
));
2980 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
2981 error
= nfs4_reopen(nofp
, NULL
);
2984 nfs_mount_state_in_use_end(nmp
, 0);
2990 error
= nfs_open_file_set_busy(nofp
, NULL
);
2998 * The open reference for mmap must mirror an existing open because
2999 * we may need to reclaim it after the file is closed.
3000 * So grab another open count matching the accessMode passed in.
3001 * If we already had an mmap open, prefer read/write without deny mode.
3002 * This means we may have to drop the current mmap open first.
3004 * N.B. We should have an open for the mmap, because, mmap was
3005 * called on an open descriptor, or we've created an open for read
3006 * from reading the first page for execve. However, if we piggy
3007 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
3008 * that open may have closed.
3011 if (!(nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
)) {
3012 if (nofp
->nof_flags
& NFS_OPEN_FILE_NEEDCLOSE
) {
3013 /* We shouldn't get here. We've already open the file for execve */
3014 NP(np
, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
3015 nofp
->nof_access
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
), thread_tid(vfs_context_thread(ctx
)));
3018 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
3019 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
3021 if (accessMode
!= NFS_OPEN_SHARE_ACCESS_READ
|| (accessMode
& nofp
->nof_deny
)) {
3022 /* not asking for just read access -> fail */
3026 /* we don't have the file open, so open it for read access */
3027 if (nmp
->nm_vers
< NFS_VER4
) {
3028 /* NFS v2/v3 opens are always allowed - so just add it. */
3029 nfs_open_file_add_open(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, 0);
3034 error
= nfs4_open(np
, nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
3038 nofp
->nof_flags
|= NFS_OPEN_FILE_NEEDCLOSE
;
3045 /* determine deny mode for open */
3046 if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
3047 if (nofp
->nof_d_rw
|| nofp
->nof_d_rw_dw
|| nofp
->nof_d_rw_drw
) {
3049 if (nofp
->nof_d_rw
) {
3050 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3051 } else if (nofp
->nof_d_rw_dw
) {
3052 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3053 } else if (nofp
->nof_d_rw_drw
) {
3054 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3056 } else if (nofp
->nof_rw
|| nofp
->nof_rw_dw
|| nofp
->nof_rw_drw
) {
3059 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3060 } else if (nofp
->nof_rw_dw
) {
3061 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3062 } else if (nofp
->nof_rw_drw
) {
3063 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3068 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
3069 if (nofp
->nof_d_r
|| nofp
->nof_d_r_dw
|| nofp
->nof_d_r_drw
) {
3071 if (nofp
->nof_d_r
) {
3072 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3073 } else if (nofp
->nof_d_r_dw
) {
3074 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3075 } else if (nofp
->nof_d_r_drw
) {
3076 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3078 } else if (nofp
->nof_r
|| nofp
->nof_r_dw
|| nofp
->nof_r_drw
) {
3081 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3082 } else if (nofp
->nof_r_dw
) {
3083 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3084 } else if (nofp
->nof_r_drw
) {
3085 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3087 } else if (nofp
->nof_d_rw
|| nofp
->nof_d_rw_dw
|| nofp
->nof_d_rw_drw
) {
3089 * This clause and the one below is to co-opt a read write access
3090 * for a read only mmaping. We probably got here in that an
3091 * existing rw open for an executable file already exists.
3094 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
3095 if (nofp
->nof_d_rw
) {
3096 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3097 } else if (nofp
->nof_d_rw_dw
) {
3098 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3099 } else if (nofp
->nof_d_rw_drw
) {
3100 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3102 } else if (nofp
->nof_rw
|| nofp
->nof_rw_dw
|| nofp
->nof_rw_drw
) {
3104 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
3106 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3107 } else if (nofp
->nof_rw_dw
) {
3108 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3109 } else if (nofp
->nof_rw_drw
) {
3110 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3116 if (error
) { /* mmap mode without proper open mode */
3121 * If the existing mmap access is more than the new access OR the
3122 * existing access is the same and the existing deny mode is less,
3123 * then we'll stick with the existing mmap open mode.
3125 if ((nofp
->nof_mmap_access
> accessMode
) ||
3126 ((nofp
->nof_mmap_access
== accessMode
) && (nofp
->nof_mmap_deny
<= denyMode
))) {
3130 /* update mmap open mode */
3131 if (nofp
->nof_mmap_access
) {
3132 error
= nfs_close(np
, nofp
, nofp
->nof_mmap_access
, nofp
->nof_mmap_deny
, ctx
);
3134 if (!nfs_mount_state_error_should_restart(error
)) {
3135 NP(np
, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
3137 NP(np
, "nfs_vnop_mmap: update, close error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
3140 nofp
->nof_mmap_access
= nofp
->nof_mmap_deny
= 0;
3143 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, delegated
);
3144 nofp
->nof_mmap_access
= accessMode
;
3145 nofp
->nof_mmap_deny
= denyMode
;
3149 nfs_open_file_clear_busy(nofp
);
3151 if (nfs_mount_state_in_use_end(nmp
, error
)) {
3156 nfs_open_owner_rele(noop
);
3161 nfs_node_lock_force(np
);
3162 if ((np
->n_flag
& NISMAPPED
) == 0) {
3163 np
->n_flag
|= NISMAPPED
;
3166 nfs_node_unlock(np
);
3168 lck_mtx_lock(&nmp
->nm_lock
);
3169 nmp
->nm_state
&= ~NFSSTA_SQUISHY
;
3170 nmp
->nm_curdeadtimeout
= nmp
->nm_deadtimeout
;
3171 if (nmp
->nm_curdeadtimeout
<= 0) {
3172 nmp
->nm_deadto_start
= 0;
3175 lck_mtx_unlock(&nmp
->nm_lock
);
3183 nfs_vnop_mmap_check(
3184 struct vnop_mmap_check_args
/* {
3185 * struct vnodeop_desc *a_desc;
3188 * vfs_context_t a_context;
3191 vfs_context_t ctx
= ap
->a_context
;
3192 vnode_t vp
= ap
->a_vp
;
3193 struct nfsmount
*nmp
= VTONMP(vp
);
3194 struct vnop_access_args naa
;
3197 if (nfs_mount_gone(nmp
)) {
3201 if (vnode_isreg(vp
)) {
3203 * We only need to ensure that a page-in will be
3204 * possible with these credentials. Everything
3205 * else has been checked at other layers.
3207 naa
.a_desc
= &vnop_access_desc
;
3209 naa
.a_action
= KAUTH_VNODE_READ_DATA
;
3210 naa
.a_context
= ctx
;
3212 /* compute actual success/failure based on accessibility */
3213 error
= nfs_vnop_access(&naa
);
3221 struct vnop_mnomap_args
/* {
3222 * struct vnodeop_desc *a_desc;
3224 * vfs_context_t a_context;
3227 vfs_context_t ctx
= ap
->a_context
;
3228 vnode_t vp
= ap
->a_vp
;
3229 nfsnode_t np
= VTONFS(vp
);
3230 struct nfsmount
*nmp
;
3231 struct nfs_open_file
*nofp
= NULL
;
3234 int is_mapped_flag
= 0;
3237 if (nfs_mount_gone(nmp
)) {
3241 nfs_node_lock_force(np
);
3242 if (np
->n_flag
& NISMAPPED
) {
3244 np
->n_flag
&= ~NISMAPPED
;
3246 nfs_node_unlock(np
);
3247 if (is_mapped_flag
) {
3248 lck_mtx_lock(&nmp
->nm_lock
);
3249 if (nmp
->nm_mappers
) {
3252 NP(np
, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
3254 lck_mtx_unlock(&nmp
->nm_lock
);
3257 /* flush buffers/ubc before we drop the open (in case it's our last open) */
3258 nfs_flush(np
, MNT_WAIT
, vfs_context_thread(ctx
), V_IGNORE_WRITEERR
);
3259 if (UBCINFOEXISTS(vp
) && (size
= ubc_getsize(vp
))) {
3260 ubc_msync(vp
, 0, size
, NULL
, UBC_PUSHALL
| UBC_SYNC
);
3263 /* walk all open files and close all mmap opens */
3265 error
= nfs_mount_state_in_use_start(nmp
, NULL
);
3269 lck_mtx_lock(&np
->n_openlock
);
3270 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
3271 if (!nofp
->nof_mmap_access
) {
3274 lck_mtx_unlock(&np
->n_openlock
);
3276 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
3277 error
= nfs4_reopen(nofp
, NULL
);
3279 nfs_mount_state_in_use_end(nmp
, 0);
3285 error
= nfs_open_file_set_busy(nofp
, NULL
);
3288 lck_mtx_lock(&np
->n_openlock
);
3291 if (nofp
->nof_mmap_access
) {
3292 error
= nfs_close(np
, nofp
, nofp
->nof_mmap_access
, nofp
->nof_mmap_deny
, ctx
);
3293 if (!nfs_mount_state_error_should_restart(error
)) {
3294 if (error
) { /* not a state-operation-restarting error, so just clear the access */
3295 NP(np
, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
3297 nofp
->nof_mmap_access
= nofp
->nof_mmap_deny
= 0;
3300 NP(np
, "nfs_vnop_mnomap: error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
3303 nfs_open_file_clear_busy(nofp
);
3304 nfs_mount_state_in_use_end(nmp
, error
);
3307 lck_mtx_unlock(&np
->n_openlock
);
3308 nfs_mount_state_in_use_end(nmp
, error
);
3313 * Search a node's lock owner list for the owner for this process.
3314 * If not found and "alloc" is set, then allocate a new one.
3316 struct nfs_lock_owner
*
3317 nfs_lock_owner_find(nfsnode_t np
, proc_t p
, int alloc
)
3319 pid_t pid
= proc_pid(p
);
3320 struct nfs_lock_owner
*nlop
, *newnlop
= NULL
;
3323 lck_mtx_lock(&np
->n_openlock
);
3324 TAILQ_FOREACH(nlop
, &np
->n_lock_owners
, nlo_link
) {
3325 os_ref_count_t newcount
;
3327 if (nlop
->nlo_pid
!= pid
) {
3330 if (timevalcmp(&nlop
->nlo_pid_start
, &p
->p_start
, ==)) {
3333 /* stale lock owner... reuse it if we can */
3334 if (os_ref_get_count(&nlop
->nlo_refcnt
)) {
3335 TAILQ_REMOVE(&np
->n_lock_owners
, nlop
, nlo_link
);
3336 nlop
->nlo_flags
&= ~NFS_LOCK_OWNER_LINK
;
3337 newcount
= os_ref_release_locked(&nlop
->nlo_refcnt
);
3338 lck_mtx_unlock(&np
->n_openlock
);
3341 nlop
->nlo_pid_start
= p
->p_start
;
3342 nlop
->nlo_seqid
= 0;
3343 nlop
->nlo_stategenid
= 0;
3347 if (!nlop
&& !newnlop
&& alloc
) {
3348 lck_mtx_unlock(&np
->n_openlock
);
3349 MALLOC(newnlop
, struct nfs_lock_owner
*, sizeof(struct nfs_lock_owner
), M_TEMP
, M_WAITOK
);
3353 bzero(newnlop
, sizeof(*newnlop
));
3354 lck_mtx_init(&newnlop
->nlo_lock
, &nfs_open_grp
, LCK_ATTR_NULL
);
3355 newnlop
->nlo_pid
= pid
;
3356 newnlop
->nlo_pid_start
= p
->p_start
;
3357 newnlop
->nlo_name
= OSAddAtomic(1, &nfs_lock_owner_seqnum
);
3358 TAILQ_INIT(&newnlop
->nlo_locks
);
3361 if (!nlop
&& newnlop
) {
3362 newnlop
->nlo_flags
|= NFS_LOCK_OWNER_LINK
;
3363 os_ref_init(&newnlop
->nlo_refcnt
, NULL
);
3364 TAILQ_INSERT_HEAD(&np
->n_lock_owners
, newnlop
, nlo_link
);
3367 lck_mtx_unlock(&np
->n_openlock
);
3369 if (newnlop
&& (nlop
!= newnlop
)) {
3370 nfs_lock_owner_destroy(newnlop
);
3374 nfs_lock_owner_ref(nlop
);
3381 * destroy a lock owner that's no longer needed
3384 nfs_lock_owner_destroy(struct nfs_lock_owner
*nlop
)
3386 if (nlop
->nlo_open_owner
) {
3387 nfs_open_owner_rele(nlop
->nlo_open_owner
);
3388 nlop
->nlo_open_owner
= NULL
;
3390 lck_mtx_destroy(&nlop
->nlo_lock
, &nfs_open_grp
);
3395 * acquire a reference count on a lock owner
3398 nfs_lock_owner_ref(struct nfs_lock_owner
*nlop
)
3400 lck_mtx_lock(&nlop
->nlo_lock
);
3401 os_ref_retain_locked(&nlop
->nlo_refcnt
);
3402 lck_mtx_unlock(&nlop
->nlo_lock
);
3406 * drop a reference count on a lock owner and destroy it if
3407 * it is no longer referenced and no longer on the mount's list.
3410 nfs_lock_owner_rele(struct nfs_lock_owner
*nlop
)
3412 os_ref_count_t newcount
;
3414 lck_mtx_lock(&nlop
->nlo_lock
);
3415 if (os_ref_get_count(&nlop
->nlo_refcnt
) < 1) {
3416 panic("nfs_lock_owner_rele: no refcnt");
3418 newcount
= os_ref_release_locked(&nlop
->nlo_refcnt
);
3419 if (!newcount
&& (nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
)) {
3420 panic("nfs_lock_owner_rele: busy");
3422 /* XXX we may potentially want to clean up idle/unused lock owner structures */
3423 if (newcount
|| (nlop
->nlo_flags
& NFS_LOCK_OWNER_LINK
)) {
3424 lck_mtx_unlock(&nlop
->nlo_lock
);
3427 /* owner is no longer referenced or linked to mount, so destroy it */
3428 lck_mtx_unlock(&nlop
->nlo_lock
);
3429 nfs_lock_owner_destroy(nlop
);
3433 * Mark a lock owner as busy because we are about to
3434 * start an operation that uses and updates lock owner state.
3437 nfs_lock_owner_set_busy(struct nfs_lock_owner
*nlop
, thread_t thd
)
3439 struct nfsmount
*nmp
;
3440 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
3441 int error
= 0, slpflag
;
3443 nmp
= nlop
->nlo_open_owner
->noo_mount
;
3444 if (nfs_mount_gone(nmp
)) {
3447 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
3449 lck_mtx_lock(&nlop
->nlo_lock
);
3450 while (nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
) {
3451 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
3454 nlop
->nlo_flags
|= NFS_LOCK_OWNER_WANT
;
3455 msleep(nlop
, &nlop
->nlo_lock
, slpflag
, "nfs_lock_owner_set_busy", &ts
);
3459 nlop
->nlo_flags
|= NFS_LOCK_OWNER_BUSY
;
3461 lck_mtx_unlock(&nlop
->nlo_lock
);
3467 * Clear the busy flag on a lock owner and wake up anyone waiting
3471 nfs_lock_owner_clear_busy(struct nfs_lock_owner
*nlop
)
3475 lck_mtx_lock(&nlop
->nlo_lock
);
3476 if (!(nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
)) {
3477 panic("nfs_lock_owner_clear_busy");
3479 wanted
= (nlop
->nlo_flags
& NFS_LOCK_OWNER_WANT
);
3480 nlop
->nlo_flags
&= ~(NFS_LOCK_OWNER_BUSY
| NFS_LOCK_OWNER_WANT
);
3481 lck_mtx_unlock(&nlop
->nlo_lock
);
3488 * Insert a held lock into a lock owner's sorted list.
3489 * (flock locks are always inserted at the head the list)
3492 nfs_lock_owner_insert_held_lock(struct nfs_lock_owner
*nlop
, struct nfs_file_lock
*newnflp
)
3494 struct nfs_file_lock
*nflp
;
3496 /* insert new lock in lock owner's held lock list */
3497 lck_mtx_lock(&nlop
->nlo_lock
);
3498 if ((newnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
) {
3499 TAILQ_INSERT_HEAD(&nlop
->nlo_locks
, newnflp
, nfl_lolink
);
3501 TAILQ_FOREACH(nflp
, &nlop
->nlo_locks
, nfl_lolink
) {
3502 if (newnflp
->nfl_start
< nflp
->nfl_start
) {
3507 TAILQ_INSERT_BEFORE(nflp
, newnflp
, nfl_lolink
);
3509 TAILQ_INSERT_TAIL(&nlop
->nlo_locks
, newnflp
, nfl_lolink
);
3512 lck_mtx_unlock(&nlop
->nlo_lock
);
3516 * Get a file lock structure for this lock owner.
3518 struct nfs_file_lock
*
3519 nfs_file_lock_alloc(struct nfs_lock_owner
*nlop
)
3521 struct nfs_file_lock
*nflp
= NULL
;
3523 lck_mtx_lock(&nlop
->nlo_lock
);
3524 if (!nlop
->nlo_alock
.nfl_owner
) {
3525 nflp
= &nlop
->nlo_alock
;
3526 nflp
->nfl_owner
= nlop
;
3528 lck_mtx_unlock(&nlop
->nlo_lock
);
3530 MALLOC(nflp
, struct nfs_file_lock
*, sizeof(struct nfs_file_lock
), M_TEMP
, M_WAITOK
);
3534 bzero(nflp
, sizeof(*nflp
));
3535 nflp
->nfl_flags
|= NFS_FILE_LOCK_ALLOC
;
3536 nflp
->nfl_owner
= nlop
;
3538 nfs_lock_owner_ref(nlop
);
3543 * destroy the given NFS file lock structure
3546 nfs_file_lock_destroy(struct nfs_file_lock
*nflp
)
3548 struct nfs_lock_owner
*nlop
= nflp
->nfl_owner
;
3550 if (nflp
->nfl_flags
& NFS_FILE_LOCK_ALLOC
) {
3551 nflp
->nfl_owner
= NULL
;
3554 lck_mtx_lock(&nlop
->nlo_lock
);
3555 bzero(nflp
, sizeof(*nflp
));
3556 lck_mtx_unlock(&nlop
->nlo_lock
);
3558 nfs_lock_owner_rele(nlop
);
3562 * Check if one file lock conflicts with another.
3563 * (nflp1 is the new lock. nflp2 is the existing lock.)
3566 nfs_file_lock_conflict(struct nfs_file_lock
*nflp1
, struct nfs_file_lock
*nflp2
, int *willsplit
)
3568 /* no conflict if lock is dead */
3569 if ((nflp1
->nfl_flags
& NFS_FILE_LOCK_DEAD
) || (nflp2
->nfl_flags
& NFS_FILE_LOCK_DEAD
)) {
3572 /* no conflict if it's ours - unless the lock style doesn't match */
3573 if ((nflp1
->nfl_owner
== nflp2
->nfl_owner
) &&
3574 ((nflp1
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == (nflp2
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
))) {
3575 if (willsplit
&& (nflp1
->nfl_type
!= nflp2
->nfl_type
) &&
3576 (nflp1
->nfl_start
> nflp2
->nfl_start
) &&
3577 (nflp1
->nfl_end
< nflp2
->nfl_end
)) {
3582 /* no conflict if ranges don't overlap */
3583 if ((nflp1
->nfl_start
> nflp2
->nfl_end
) || (nflp1
->nfl_end
< nflp2
->nfl_start
)) {
3586 /* no conflict if neither lock is exclusive */
3587 if ((nflp1
->nfl_type
!= F_WRLCK
) && (nflp2
->nfl_type
!= F_WRLCK
)) {
3596 * Send an NFSv4 LOCK RPC to the server.
3601 struct nfs_open_file
*nofp
,
3602 struct nfs_file_lock
*nflp
,
3608 struct nfs_lock_owner
*nlop
= nflp
->nfl_owner
;
3609 struct nfsmount
*nmp
;
3610 struct nfsm_chain nmreq
, nmrep
;
3613 int error
= 0, lockerror
= ENOENT
, newlocker
, numops
, status
;
3614 struct nfsreq_secinfo_args si
;
3617 if (nfs_mount_gone(nmp
)) {
3620 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
3624 newlocker
= (nlop
->nlo_stategenid
!= nmp
->nm_stategenid
);
3625 locktype
= (nflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
) ?
3626 ((nflp
->nfl_type
== F_WRLCK
) ?
3627 NFS_LOCK_TYPE_WRITEW
:
3628 NFS_LOCK_TYPE_READW
) :
3629 ((nflp
->nfl_type
== F_WRLCK
) ?
3630 NFS_LOCK_TYPE_WRITE
:
3631 NFS_LOCK_TYPE_READ
);
3633 error
= nfs_open_file_set_busy(nofp
, thd
);
3637 error
= nfs_open_owner_set_busy(nofp
->nof_owner
, thd
);
3639 nfs_open_file_clear_busy(nofp
);
3642 if (!nlop
->nlo_open_owner
) {
3643 nfs_open_owner_ref(nofp
->nof_owner
);
3644 nlop
->nlo_open_owner
= nofp
->nof_owner
;
3647 error
= nfs_lock_owner_set_busy(nlop
, thd
);
3650 nfs_open_owner_clear_busy(nofp
->nof_owner
);
3651 nfs_open_file_clear_busy(nofp
);
3656 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
3657 nfsm_chain_null(&nmreq
);
3658 nfsm_chain_null(&nmrep
);
3660 // PUTFH, GETATTR, LOCK
3662 nfsm_chain_build_alloc_init(error
, &nmreq
, 33 * NFSX_UNSIGNED
);
3663 nfsm_chain_add_compound_header(error
, &nmreq
, "lock", nmp
->nm_minor_vers
, numops
);
3665 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3666 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3668 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3669 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
3671 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCK
);
3672 nfsm_chain_add_32(error
, &nmreq
, locktype
);
3673 nfsm_chain_add_32(error
, &nmreq
, reclaim
);
3674 nfsm_chain_add_64(error
, &nmreq
, nflp
->nfl_start
);
3675 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(nflp
->nfl_start
, nflp
->nfl_end
));
3676 nfsm_chain_add_32(error
, &nmreq
, newlocker
);
3678 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_owner
->noo_seqid
);
3679 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
3680 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3681 nfsm_chain_add_lock_owner4(error
, &nmreq
, nmp
, nlop
);
3683 nfsm_chain_add_stateid(error
, &nmreq
, &nlop
->nlo_stateid
);
3684 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3686 nfsm_chain_build_done(error
, &nmreq
);
3687 nfsm_assert(error
, (numops
== 0), EPROTO
);
3690 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
| R_NOINTR
, &nmrep
, &xid
, &status
);
3692 if ((lockerror
= nfs_node_lock(np
))) {
3695 nfsm_chain_skip_tag(error
, &nmrep
);
3696 nfsm_chain_get_32(error
, &nmrep
, numops
);
3697 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3699 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3700 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
3702 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCK
);
3703 nfs_owner_seqid_increment(newlocker
? nofp
->nof_owner
: NULL
, nlop
, error
);
3704 nfsm_chain_get_stateid(error
, &nmrep
, &nlop
->nlo_stateid
);
3706 /* Update the lock owner's stategenid once it appears the server has state for it. */
3707 /* We determine this by noting the request was successful (we got a stateid). */
3708 if (newlocker
&& !error
) {
3709 nlop
->nlo_stategenid
= nmp
->nm_stategenid
;
3713 nfs_node_unlock(np
);
3715 nfs_lock_owner_clear_busy(nlop
);
3717 nfs_open_owner_clear_busy(nofp
->nof_owner
);
3718 nfs_open_file_clear_busy(nofp
);
3720 nfsm_chain_cleanup(&nmreq
);
3721 nfsm_chain_cleanup(&nmrep
);
3726 * Send an NFSv4 LOCKU RPC to the server.
3731 struct nfs_lock_owner
*nlop
,
3739 struct nfsmount
*nmp
;
3740 struct nfsm_chain nmreq
, nmrep
;
3742 int error
= 0, lockerror
= ENOENT
, numops
, status
;
3743 struct nfsreq_secinfo_args si
;
3746 if (nfs_mount_gone(nmp
)) {
3749 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
3753 error
= nfs_lock_owner_set_busy(nlop
, NULL
);
3758 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
3759 nfsm_chain_null(&nmreq
);
3760 nfsm_chain_null(&nmrep
);
3762 // PUTFH, GETATTR, LOCKU
3764 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
3765 nfsm_chain_add_compound_header(error
, &nmreq
, "unlock", nmp
->nm_minor_vers
, numops
);
3767 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3768 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3770 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3771 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
3773 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCKU
);
3774 nfsm_chain_add_32(error
, &nmreq
, (type
== F_WRLCK
) ? NFS_LOCK_TYPE_WRITE
: NFS_LOCK_TYPE_READ
);
3775 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3776 nfsm_chain_add_stateid(error
, &nmreq
, &nlop
->nlo_stateid
);
3777 nfsm_chain_add_64(error
, &nmreq
, start
);
3778 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(start
, end
));
3779 nfsm_chain_build_done(error
, &nmreq
);
3780 nfsm_assert(error
, (numops
== 0), EPROTO
);
3783 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
| R_NOINTR
, &nmrep
, &xid
, &status
);
3785 if ((lockerror
= nfs_node_lock(np
))) {
3788 nfsm_chain_skip_tag(error
, &nmrep
);
3789 nfsm_chain_get_32(error
, &nmrep
, numops
);
3790 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3792 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3793 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
3795 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCKU
);
3796 nfs_owner_seqid_increment(NULL
, nlop
, error
);
3797 nfsm_chain_get_stateid(error
, &nmrep
, &nlop
->nlo_stateid
);
3800 nfs_node_unlock(np
);
3802 nfs_lock_owner_clear_busy(nlop
);
3803 nfsm_chain_cleanup(&nmreq
);
3804 nfsm_chain_cleanup(&nmrep
);
3809 * Send an NFSv4 LOCKT RPC to the server.
3814 struct nfs_lock_owner
*nlop
,
3820 struct nfsmount
*nmp
;
3821 struct nfsm_chain nmreq
, nmrep
;
3822 uint64_t xid
, val64
= 0;
3824 int error
= 0, lockerror
, numops
, status
;
3825 struct nfsreq_secinfo_args si
;
3828 if (nfs_mount_gone(nmp
)) {
3831 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
3836 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
3837 nfsm_chain_null(&nmreq
);
3838 nfsm_chain_null(&nmrep
);
3840 // PUTFH, GETATTR, LOCKT
3842 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
3843 nfsm_chain_add_compound_header(error
, &nmreq
, "locktest", nmp
->nm_minor_vers
, numops
);
3845 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3846 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3848 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3849 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
3851 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCKT
);
3852 nfsm_chain_add_32(error
, &nmreq
, (fl
->l_type
== F_WRLCK
) ? NFS_LOCK_TYPE_WRITE
: NFS_LOCK_TYPE_READ
);
3853 nfsm_chain_add_64(error
, &nmreq
, start
);
3854 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(start
, end
));
3855 nfsm_chain_add_lock_owner4(error
, &nmreq
, nmp
, nlop
);
3856 nfsm_chain_build_done(error
, &nmreq
);
3857 nfsm_assert(error
, (numops
== 0), EPROTO
);
3860 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
3862 if ((lockerror
= nfs_node_lock(np
))) {
3865 nfsm_chain_skip_tag(error
, &nmrep
);
3866 nfsm_chain_get_32(error
, &nmrep
, numops
);
3867 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3869 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3870 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
3872 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCKT
);
3873 if (error
== NFSERR_DENIED
) {
3875 nfsm_chain_get_64(error
, &nmrep
, fl
->l_start
);
3876 nfsm_chain_get_64(error
, &nmrep
, val64
);
3877 fl
->l_len
= (val64
== UINT64_MAX
) ? 0 : val64
;
3878 nfsm_chain_get_32(error
, &nmrep
, val
);
3879 fl
->l_type
= (val
== NFS_LOCK_TYPE_WRITE
) ? F_WRLCK
: F_RDLCK
;
3881 fl
->l_whence
= SEEK_SET
;
3882 } else if (!error
) {
3883 fl
->l_type
= F_UNLCK
;
3887 nfs_node_unlock(np
);
3889 nfsm_chain_cleanup(&nmreq
);
3890 nfsm_chain_cleanup(&nmrep
);
3893 #endif /* CONFIG_NFS4 */
3896 * Check for any conflicts with the given lock.
3898 * Checking for a lock doesn't require the file to be opened.
3899 * So we skip all the open owner, open file, lock owner work
3900 * and just check for a conflicting lock.
3903 nfs_advlock_getlock(
3905 struct nfs_lock_owner
*nlop
,
3911 struct nfsmount
*nmp
;
3912 struct nfs_file_lock
*nflp
;
3913 int error
= 0, answered
= 0;
3916 if (nfs_mount_gone(nmp
)) {
3921 if ((error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
)))) {
3925 lck_mtx_lock(&np
->n_openlock
);
3926 /* scan currently held locks for conflict */
3927 TAILQ_FOREACH(nflp
, &np
->n_locks
, nfl_link
) {
3928 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
3931 if ((start
<= nflp
->nfl_end
) && (end
>= nflp
->nfl_start
) &&
3932 ((fl
->l_type
== F_WRLCK
) || (nflp
->nfl_type
== F_WRLCK
))) {
3937 /* found a conflicting lock */
3938 fl
->l_type
= nflp
->nfl_type
;
3939 fl
->l_pid
= (nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_FLOCK
) ? -1 : nflp
->nfl_owner
->nlo_pid
;
3940 fl
->l_start
= nflp
->nfl_start
;
3941 fl
->l_len
= NFS_FLOCK_LENGTH(nflp
->nfl_start
, nflp
->nfl_end
);
3942 fl
->l_whence
= SEEK_SET
;
3944 } else if ((np
->n_openflags
& N_DELEG_WRITE
) && !(np
->n_openflags
& N_DELEG_RETURN
)) {
3946 * If we have a write delegation, we know there can't be other
3947 * locks on the server. So the answer is no conflicting lock found.
3949 fl
->l_type
= F_UNLCK
;
3952 lck_mtx_unlock(&np
->n_openlock
);
3954 nfs_mount_state_in_use_end(nmp
, 0);
3958 /* no conflict found locally, so ask the server */
3959 error
= nmp
->nm_funcs
->nf_getlock_rpc(np
, nlop
, fl
, start
, end
, ctx
);
3961 if (nfs_mount_state_in_use_end(nmp
, error
)) {
3968 * Acquire a file lock for the given range.
3970 * Add the lock (request) to the lock queue.
3971 * Scan the lock queue for any conflicting locks.
3972 * If a conflict is found, block or return an error.
3973 * Once end of queue is reached, send request to the server.
3974 * If the server grants the lock, scan the lock queue and
3975 * update any existing locks. Then (optionally) scan the
3976 * queue again to coalesce any locks adjacent to the new one.
3979 nfs_advlock_setlock(
3981 struct nfs_open_file
*nofp
,
3982 struct nfs_lock_owner
*nlop
,
3990 struct nfsmount
*nmp
;
3991 struct nfs_file_lock
*newnflp
, *nflp
, *nflp2
= NULL
, *nextnflp
, *flocknflp
= NULL
;
3992 struct nfs_file_lock
*coalnflp
;
3993 int error
= 0, error2
, willsplit
= 0, delay
, slpflag
, busy
= 0, inuse
= 0, restart
, inqueue
= 0;
3994 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
3997 if (nfs_mount_gone(nmp
)) {
4000 slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
4002 if ((type
!= F_RDLCK
) && (type
!= F_WRLCK
)) {
4006 /* allocate a new lock */
4007 newnflp
= nfs_file_lock_alloc(nlop
);
4011 newnflp
->nfl_start
= start
;
4012 newnflp
->nfl_end
= end
;
4013 newnflp
->nfl_type
= type
;
4014 if (op
== F_SETLKW
) {
4015 newnflp
->nfl_flags
|= NFS_FILE_LOCK_WAIT
;
4017 newnflp
->nfl_flags
|= style
;
4018 newnflp
->nfl_flags
|= NFS_FILE_LOCK_BLOCKED
;
4020 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) && (type
== F_WRLCK
)) {
4022 * For exclusive flock-style locks, if we block waiting for the
4023 * lock, we need to first release any currently held shared
4024 * flock-style lock. So, the first thing we do is check if we
4025 * have a shared flock-style lock.
4027 nflp
= TAILQ_FIRST(&nlop
->nlo_locks
);
4028 if (nflp
&& ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != NFS_FILE_LOCK_STYLE_FLOCK
)) {
4031 if (nflp
&& (nflp
->nfl_type
!= F_RDLCK
)) {
4039 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
4044 if (np
->n_flag
& NREVOKE
) {
4046 nfs_mount_state_in_use_end(nmp
, 0);
4051 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
4052 nfs_mount_state_in_use_end(nmp
, 0);
4054 error
= nfs4_reopen(nofp
, vfs_context_thread(ctx
));
4062 lck_mtx_lock(&np
->n_openlock
);
4064 /* insert new lock at beginning of list */
4065 TAILQ_INSERT_HEAD(&np
->n_locks
, newnflp
, nfl_link
);
4069 /* scan current list of locks (held and pending) for conflicts */
4070 for (nflp
= TAILQ_NEXT(newnflp
, nfl_link
); nflp
; nflp
= nextnflp
) {
4071 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4072 if (!nfs_file_lock_conflict(newnflp
, nflp
, &willsplit
)) {
4076 if (!(newnflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
)) {
4080 /* Block until this lock is no longer held. */
4081 if (nflp
->nfl_blockcnt
== UINT_MAX
) {
4085 nflp
->nfl_blockcnt
++;
4088 /* release any currently held shared lock before sleeping */
4089 lck_mtx_unlock(&np
->n_openlock
);
4090 nfs_mount_state_in_use_end(nmp
, 0);
4092 error
= nfs_advlock_unlock(np
, nofp
, nlop
, 0, UINT64_MAX
, NFS_FILE_LOCK_STYLE_FLOCK
, ctx
);
4095 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
4098 lck_mtx_lock(&np
->n_openlock
);
4102 lck_mtx_lock(&np
->n_openlock
);
4103 /* no need to block/sleep if the conflict is gone */
4104 if (!nfs_file_lock_conflict(newnflp
, nflp
, NULL
)) {
4108 msleep(nflp
, &np
->n_openlock
, slpflag
, "nfs_advlock_setlock_blocked", &ts
);
4110 error
= nfs_sigintr(NFSTONMP(np
), NULL
, vfs_context_thread(ctx
), 0);
4111 if (!error
&& (nmp
->nm_state
& NFSSTA_RECOVER
)) {
4112 /* looks like we have a recover pending... restart */
4114 lck_mtx_unlock(&np
->n_openlock
);
4115 nfs_mount_state_in_use_end(nmp
, 0);
4117 lck_mtx_lock(&np
->n_openlock
);
4120 if (!error
&& (np
->n_flag
& NREVOKE
)) {
4123 } while (!error
&& nfs_file_lock_conflict(newnflp
, nflp
, NULL
));
4124 nflp
->nfl_blockcnt
--;
4125 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) && !nflp
->nfl_blockcnt
) {
4126 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4127 nfs_file_lock_destroy(nflp
);
4129 if (error
|| restart
) {
4132 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
4133 /* So, start this lock-scanning loop over from where it started. */
4134 nextnflp
= TAILQ_NEXT(newnflp
, nfl_link
);
4136 lck_mtx_unlock(&np
->n_openlock
);
4146 * It looks like this operation is splitting a lock.
4147 * We allocate a new lock now so we don't have to worry
4148 * about the allocation failing after we've updated some state.
4150 nflp2
= nfs_file_lock_alloc(nlop
);
4157 /* once scan for local conflicts is clear, send request to server */
4158 if ((error
= nfs_open_state_set_busy(np
, vfs_context_thread(ctx
)))) {
4165 /* do we have a delegation? (that we're not returning?) */
4166 if ((np
->n_openflags
& N_DELEG_MASK
) && !(np
->n_openflags
& N_DELEG_RETURN
)) {
4167 if (np
->n_openflags
& N_DELEG_WRITE
) {
4168 /* with a write delegation, just take the lock delegated */
4169 newnflp
->nfl_flags
|= NFS_FILE_LOCK_DELEGATED
;
4171 /* make sure the lock owner knows its open owner */
4172 if (!nlop
->nlo_open_owner
) {
4173 nfs_open_owner_ref(nofp
->nof_owner
);
4174 nlop
->nlo_open_owner
= nofp
->nof_owner
;
4179 * If we don't have any non-delegated opens but we do have
4180 * delegated opens, then we need to first claim the delegated
4181 * opens so that the lock request on the server can be associated
4182 * with an open it knows about.
4184 if ((!nofp
->nof_rw_drw
&& !nofp
->nof_w_drw
&& !nofp
->nof_r_drw
&&
4185 !nofp
->nof_rw_dw
&& !nofp
->nof_w_dw
&& !nofp
->nof_r_dw
&&
4186 !nofp
->nof_rw
&& !nofp
->nof_w
&& !nofp
->nof_r
) &&
4187 (nofp
->nof_d_rw_drw
|| nofp
->nof_d_w_drw
|| nofp
->nof_d_r_drw
||
4188 nofp
->nof_d_rw_dw
|| nofp
->nof_d_w_dw
|| nofp
->nof_d_r_dw
||
4189 nofp
->nof_d_rw
|| nofp
->nof_d_w
|| nofp
->nof_d_r
)) {
4190 error
= nfs4_claim_delegated_state_for_open_file(nofp
, 0);
4198 if (np
->n_flag
& NREVOKE
) {
4203 nfs_open_state_clear_busy(np
);
4206 error
= nmp
->nm_funcs
->nf_setlock_rpc(np
, nofp
, newnflp
, 0, 0, vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4207 if (!busy
&& !nfs_open_state_set_busy(np
, vfs_context_thread(ctx
))) {
4211 if (!error
|| ((error
!= NFSERR_DENIED
) && (error
!= NFSERR_GRACE
))) {
4214 /* request was denied due to either conflict or grace period */
4215 if ((error
== NFSERR_DENIED
) && !(newnflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
)) {
4220 /* release any currently held shared lock before sleeping */
4221 nfs_open_state_clear_busy(np
);
4224 nfs_mount_state_in_use_end(nmp
, 0);
4227 error2
= nfs_advlock_unlock(np
, nofp
, nlop
, 0, UINT64_MAX
, NFS_FILE_LOCK_STYLE_FLOCK
, ctx
);
4230 error2
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
4234 error2
= nfs_open_state_set_busy(np
, vfs_context_thread(ctx
));
4243 * Wait a little bit and send the request again.
4244 * Except for retries of blocked v2/v3 request where we've already waited a bit.
4246 if ((nmp
->nm_vers
>= NFS_VER4
) || (error
== NFSERR_GRACE
)) {
4247 if (error
== NFSERR_GRACE
) {
4253 tsleep(newnflp
, slpflag
, "nfs_advlock_setlock_delay", delay
* (hz
/ 2));
4256 error
= nfs_sigintr(NFSTONMP(np
), NULL
, vfs_context_thread(ctx
), 0);
4257 if (!error
&& (nmp
->nm_state
& NFSSTA_RECOVER
)) {
4258 /* looks like we have a recover pending... restart */
4259 nfs_open_state_clear_busy(np
);
4262 nfs_mount_state_in_use_end(nmp
, 0);
4267 if (!error
&& (np
->n_flag
& NREVOKE
)) {
4273 if (nfs_mount_state_error_should_restart(error
)) {
4274 /* looks like we need to restart this operation */
4276 nfs_open_state_clear_busy(np
);
4280 nfs_mount_state_in_use_end(nmp
, error
);
4285 lck_mtx_lock(&np
->n_openlock
);
4286 newnflp
->nfl_flags
&= ~NFS_FILE_LOCK_BLOCKED
;
4288 newnflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4289 if (newnflp
->nfl_blockcnt
) {
4290 /* wake up anyone blocked on this lock */
4293 /* remove newnflp from lock list and destroy */
4295 TAILQ_REMOVE(&np
->n_locks
, newnflp
, nfl_link
);
4297 nfs_file_lock_destroy(newnflp
);
4299 lck_mtx_unlock(&np
->n_openlock
);
4301 nfs_open_state_clear_busy(np
);
4304 nfs_mount_state_in_use_end(nmp
, error
);
4307 nfs_file_lock_destroy(nflp2
);
4312 /* server granted the lock */
4315 * Scan for locks to update.
4317 * Locks completely covered are killed.
4318 * At most two locks may need to be clipped.
4319 * It's possible that a single lock may need to be split.
4321 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4322 if (nflp
== newnflp
) {
4325 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
4328 if (nflp
->nfl_owner
!= nlop
) {
4331 if ((newnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != (nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
)) {
4334 if ((newnflp
->nfl_start
> nflp
->nfl_end
) || (newnflp
->nfl_end
< nflp
->nfl_start
)) {
4337 /* here's one to update */
4338 if ((newnflp
->nfl_start
<= nflp
->nfl_start
) && (newnflp
->nfl_end
>= nflp
->nfl_end
)) {
4339 /* The entire lock is being replaced. */
4340 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4341 lck_mtx_lock(&nlop
->nlo_lock
);
4342 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4343 lck_mtx_unlock(&nlop
->nlo_lock
);
4344 /* lock will be destroyed below, if no waiters */
4345 } else if ((newnflp
->nfl_start
> nflp
->nfl_start
) && (newnflp
->nfl_end
< nflp
->nfl_end
)) {
4346 /* We're replacing a range in the middle of a lock. */
4347 /* The current lock will be split into two locks. */
4348 /* Update locks and insert new lock after current lock. */
4349 nflp2
->nfl_flags
|= (nflp
->nfl_flags
& (NFS_FILE_LOCK_STYLE_MASK
| NFS_FILE_LOCK_DELEGATED
));
4350 nflp2
->nfl_type
= nflp
->nfl_type
;
4351 nflp2
->nfl_start
= newnflp
->nfl_end
+ 1;
4352 nflp2
->nfl_end
= nflp
->nfl_end
;
4353 nflp
->nfl_end
= newnflp
->nfl_start
- 1;
4354 TAILQ_INSERT_AFTER(&np
->n_locks
, nflp
, nflp2
, nfl_link
);
4355 nfs_lock_owner_insert_held_lock(nlop
, nflp2
);
4358 } else if (newnflp
->nfl_start
> nflp
->nfl_start
) {
4359 /* We're replacing the end of a lock. */
4360 nflp
->nfl_end
= newnflp
->nfl_start
- 1;
4361 } else if (newnflp
->nfl_end
< nflp
->nfl_end
) {
4362 /* We're replacing the start of a lock. */
4363 nflp
->nfl_start
= newnflp
->nfl_end
+ 1;
4365 if (nflp
->nfl_blockcnt
) {
4366 /* wake up anyone blocked on this lock */
4368 } else if (nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) {
4369 /* remove nflp from lock list and destroy */
4370 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4371 nfs_file_lock_destroy(nflp
);
4375 nfs_lock_owner_insert_held_lock(nlop
, newnflp
);
4378 * POSIX locks should be coalesced when possible.
4380 if ((style
== NFS_FILE_LOCK_STYLE_POSIX
) && (nofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
)) {
4382 * Walk through the lock queue and check each of our held locks with
4383 * the previous and next locks in the lock owner's "held lock list".
4384 * If the two locks can be coalesced, we merge the current lock into
4385 * the other (previous or next) lock. Merging this way makes sure that
4386 * lock ranges are always merged forward in the lock queue. This is
4387 * important because anyone blocked on the lock being "merged away"
4388 * will still need to block on that range and it will simply continue
4389 * checking locks that are further down the list.
4391 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4392 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
4395 if (nflp
->nfl_owner
!= nlop
) {
4398 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != NFS_FILE_LOCK_STYLE_POSIX
) {
4401 if (((coalnflp
= TAILQ_PREV(nflp
, nfs_file_lock_queue
, nfl_lolink
))) &&
4402 ((coalnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) &&
4403 (coalnflp
->nfl_type
== nflp
->nfl_type
) &&
4404 (coalnflp
->nfl_end
== (nflp
->nfl_start
- 1))) {
4405 coalnflp
->nfl_end
= nflp
->nfl_end
;
4406 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4407 lck_mtx_lock(&nlop
->nlo_lock
);
4408 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4409 lck_mtx_unlock(&nlop
->nlo_lock
);
4410 } else if (((coalnflp
= TAILQ_NEXT(nflp
, nfl_lolink
))) &&
4411 ((coalnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) &&
4412 (coalnflp
->nfl_type
== nflp
->nfl_type
) &&
4413 (coalnflp
->nfl_start
== (nflp
->nfl_end
+ 1))) {
4414 coalnflp
->nfl_start
= nflp
->nfl_start
;
4415 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4416 lck_mtx_lock(&nlop
->nlo_lock
);
4417 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4418 lck_mtx_unlock(&nlop
->nlo_lock
);
4420 if (!(nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
)) {
4423 if (nflp
->nfl_blockcnt
) {
4424 /* wake up anyone blocked on this lock */
4427 /* remove nflp from lock list and destroy */
4428 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4429 nfs_file_lock_destroy(nflp
);
4434 lck_mtx_unlock(&np
->n_openlock
);
4435 nfs_open_state_clear_busy(np
);
4438 nfs_mount_state_in_use_end(nmp
, error
);
4441 nfs_file_lock_destroy(nflp2
);
4447 * Release all (same style) locks within the given range.
4452 struct nfs_open_file
*nofp
4457 struct nfs_lock_owner
*nlop
,
4463 struct nfsmount
*nmp
;
4464 struct nfs_file_lock
*nflp
, *nextnflp
, *newnflp
= NULL
;
4465 int error
= 0, willsplit
= 0, send_unlock_rpcs
= 1;
4468 if (nfs_mount_gone(nmp
)) {
4473 if ((error
= nfs_mount_state_in_use_start(nmp
, NULL
))) {
4477 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
4478 nfs_mount_state_in_use_end(nmp
, 0);
4479 error
= nfs4_reopen(nofp
, NULL
);
4486 if ((error
= nfs_open_state_set_busy(np
, NULL
))) {
4487 nfs_mount_state_in_use_end(nmp
, error
);
4491 lck_mtx_lock(&np
->n_openlock
);
4492 if ((start
> 0) && (end
< UINT64_MAX
) && !willsplit
) {
4494 * We may need to allocate a new lock if an existing lock gets split.
4495 * So, we first scan the list to check for a split, and if there's
4496 * going to be one, we'll allocate one now.
4498 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4499 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
4502 if (nflp
->nfl_owner
!= nlop
) {
4505 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != style
) {
4508 if ((start
> nflp
->nfl_end
) || (end
< nflp
->nfl_start
)) {
4511 if ((start
> nflp
->nfl_start
) && (end
< nflp
->nfl_end
)) {
4517 lck_mtx_unlock(&np
->n_openlock
);
4518 nfs_open_state_clear_busy(np
);
4519 nfs_mount_state_in_use_end(nmp
, 0);
4520 newnflp
= nfs_file_lock_alloc(nlop
);
4529 * Free all of our locks in the given range.
4531 * Note that this process requires sending requests to the server.
4532 * Because of this, we will release the n_openlock while performing
4533 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4534 * locks from changing underneath us. However, other entries in the
4535 * list may be removed. So we need to be careful walking the list.
4539 * Don't unlock ranges that are held by other-style locks.
4540 * If style is posix, don't send any unlock rpcs if flock is held.
4541 * If we unlock an flock, don't send unlock rpcs for any posix-style
4542 * ranges held - instead send unlocks for the ranges not held.
4544 if ((style
== NFS_FILE_LOCK_STYLE_POSIX
) &&
4545 ((nflp
= TAILQ_FIRST(&nlop
->nlo_locks
))) &&
4546 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
)) {
4547 send_unlock_rpcs
= 0;
4549 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) &&
4550 ((nflp
= TAILQ_FIRST(&nlop
->nlo_locks
))) &&
4551 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
) &&
4552 ((nflp
= TAILQ_NEXT(nflp
, nfl_lolink
))) &&
4553 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
)) {
4555 int type
= TAILQ_FIRST(&nlop
->nlo_locks
)->nfl_type
;
4556 int delegated
= (TAILQ_FIRST(&nlop
->nlo_locks
)->nfl_flags
& NFS_FILE_LOCK_DELEGATED
);
4557 while (!delegated
&& nflp
) {
4558 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) {
4559 /* unlock the range preceding this lock */
4560 lck_mtx_unlock(&np
->n_openlock
);
4561 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, type
, s
, nflp
->nfl_start
- 1, 0,
4562 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4563 if (nfs_mount_state_error_should_restart(error
)) {
4564 nfs_open_state_clear_busy(np
);
4565 nfs_mount_state_in_use_end(nmp
, error
);
4568 lck_mtx_lock(&np
->n_openlock
);
4572 s
= nflp
->nfl_end
+ 1;
4574 nflp
= TAILQ_NEXT(nflp
, nfl_lolink
);
4577 lck_mtx_unlock(&np
->n_openlock
);
4578 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, type
, s
, end
, 0,
4579 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4580 if (nfs_mount_state_error_should_restart(error
)) {
4581 nfs_open_state_clear_busy(np
);
4582 nfs_mount_state_in_use_end(nmp
, error
);
4585 lck_mtx_lock(&np
->n_openlock
);
4590 send_unlock_rpcs
= 0;
4593 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4594 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
4597 if (nflp
->nfl_owner
!= nlop
) {
4600 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != style
) {
4603 if ((start
> nflp
->nfl_end
) || (end
< nflp
->nfl_start
)) {
4606 /* here's one to unlock */
4607 if ((start
<= nflp
->nfl_start
) && (end
>= nflp
->nfl_end
)) {
4608 /* The entire lock is being unlocked. */
4609 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4610 lck_mtx_unlock(&np
->n_openlock
);
4611 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, nflp
->nfl_start
, nflp
->nfl_end
, 0,
4612 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4613 if (nfs_mount_state_error_should_restart(error
)) {
4614 nfs_open_state_clear_busy(np
);
4615 nfs_mount_state_in_use_end(nmp
, error
);
4618 lck_mtx_lock(&np
->n_openlock
);
4620 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4624 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4625 lck_mtx_lock(&nlop
->nlo_lock
);
4626 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4627 lck_mtx_unlock(&nlop
->nlo_lock
);
4628 /* lock will be destroyed below, if no waiters */
4629 } else if ((start
> nflp
->nfl_start
) && (end
< nflp
->nfl_end
)) {
4630 /* We're unlocking a range in the middle of a lock. */
4631 /* The current lock will be split into two locks. */
4632 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4633 lck_mtx_unlock(&np
->n_openlock
);
4634 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, start
, end
, 0,
4635 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4636 if (nfs_mount_state_error_should_restart(error
)) {
4637 nfs_open_state_clear_busy(np
);
4638 nfs_mount_state_in_use_end(nmp
, error
);
4641 lck_mtx_lock(&np
->n_openlock
);
4646 /* update locks and insert new lock after current lock */
4647 newnflp
->nfl_flags
|= (nflp
->nfl_flags
& (NFS_FILE_LOCK_STYLE_MASK
| NFS_FILE_LOCK_DELEGATED
));
4648 newnflp
->nfl_type
= nflp
->nfl_type
;
4649 newnflp
->nfl_start
= end
+ 1;
4650 newnflp
->nfl_end
= nflp
->nfl_end
;
4651 nflp
->nfl_end
= start
- 1;
4652 TAILQ_INSERT_AFTER(&np
->n_locks
, nflp
, newnflp
, nfl_link
);
4653 nfs_lock_owner_insert_held_lock(nlop
, newnflp
);
4656 } else if (start
> nflp
->nfl_start
) {
4657 /* We're unlocking the end of a lock. */
4658 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4659 lck_mtx_unlock(&np
->n_openlock
);
4660 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, start
, nflp
->nfl_end
, 0,
4661 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4662 if (nfs_mount_state_error_should_restart(error
)) {
4663 nfs_open_state_clear_busy(np
);
4664 nfs_mount_state_in_use_end(nmp
, error
);
4667 lck_mtx_lock(&np
->n_openlock
);
4669 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4673 nflp
->nfl_end
= start
- 1;
4674 } else if (end
< nflp
->nfl_end
) {
4675 /* We're unlocking the start of a lock. */
4676 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4677 lck_mtx_unlock(&np
->n_openlock
);
4678 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, nflp
->nfl_start
, end
, 0,
4679 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4680 if (nfs_mount_state_error_should_restart(error
)) {
4681 nfs_open_state_clear_busy(np
);
4682 nfs_mount_state_in_use_end(nmp
, error
);
4685 lck_mtx_lock(&np
->n_openlock
);
4687 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4691 nflp
->nfl_start
= end
+ 1;
4693 if (nflp
->nfl_blockcnt
) {
4694 /* wake up anyone blocked on this lock */
4696 } else if (nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) {
4697 /* remove nflp from lock list and destroy */
4698 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4699 nfs_file_lock_destroy(nflp
);
4703 lck_mtx_unlock(&np
->n_openlock
);
4704 nfs_open_state_clear_busy(np
);
4705 nfs_mount_state_in_use_end(nmp
, 0);
4708 nfs_file_lock_destroy(newnflp
);
4714 * NFSv4 advisory file locking
4718 struct vnop_advlock_args
/* {
4719 * struct vnodeop_desc *a_desc;
4723 * struct flock *a_fl;
4725 * vfs_context_t a_context;
4728 vnode_t vp
= ap
->a_vp
;
4729 nfsnode_t np
= VTONFS(ap
->a_vp
);
4730 struct flock
*fl
= ap
->a_fl
;
4732 int flags
= ap
->a_flags
;
4733 vfs_context_t ctx
= ap
->a_context
;
4734 struct nfsmount
*nmp
;
4735 struct nfs_open_owner
*noop
= NULL
;
4736 struct nfs_open_file
*nofp
= NULL
;
4737 struct nfs_lock_owner
*nlop
= NULL
;
4739 uint64_t start
, end
;
4740 int error
= 0, modified
, style
;
4742 #define OFF_MAX QUAD_MAX
4744 nmp
= VTONMP(ap
->a_vp
);
4745 if (nfs_mount_gone(nmp
)) {
4748 lck_mtx_lock(&nmp
->nm_lock
);
4749 if ((nmp
->nm_vers
<= NFS_VER3
) && (nmp
->nm_lockmode
== NFS_LOCK_MODE_DISABLED
)) {
4750 lck_mtx_unlock(&nmp
->nm_lock
);
4753 lck_mtx_unlock(&nmp
->nm_lock
);
4755 if (np
->n_flag
& NREVOKE
) {
4758 vtype
= vnode_vtype(ap
->a_vp
);
4759 if (vtype
== VDIR
) { /* ignore lock requests on directories */
4762 if (vtype
!= VREG
) { /* anything other than regular files is invalid */
4766 /* Convert the flock structure into a start and end. */
4767 switch (fl
->l_whence
) {
4771 * Caller is responsible for adding any necessary offset
4772 * to fl->l_start when SEEK_CUR is used.
4774 lstart
= fl
->l_start
;
4777 /* need to flush, and refetch attributes to make */
4778 /* sure we have the correct end of file offset */
4779 if ((error
= nfs_node_lock(np
))) {
4782 modified
= (np
->n_flag
& NMODIFIED
);
4783 nfs_node_unlock(np
);
4784 if (modified
&& ((error
= nfs_vinvalbuf(vp
, V_SAVE
, ctx
, 1)))) {
4787 if ((error
= nfs_getattr(np
, NULL
, ctx
, NGA_UNCACHED
))) {
4790 nfs_data_lock(np
, NFS_DATA_LOCK_SHARED
);
4791 if ((np
->n_size
> OFF_MAX
) ||
4792 ((fl
->l_start
> 0) && (np
->n_size
> (u_quad_t
)(OFF_MAX
- fl
->l_start
)))) {
4795 lstart
= np
->n_size
+ fl
->l_start
;
4796 nfs_data_unlock(np
);
4808 if (fl
->l_len
== 0) {
4810 } else if (fl
->l_len
> 0) {
4811 if ((fl
->l_len
- 1) > (OFF_MAX
- lstart
)) {
4814 end
= start
- 1 + fl
->l_len
;
4815 } else { /* l_len is negative */
4816 if ((lstart
+ fl
->l_len
) < 0) {
4822 if ((nmp
->nm_vers
== NFS_VER2
) && ((start
> INT32_MAX
) || (fl
->l_len
&& (end
> INT32_MAX
)))) {
4826 style
= (flags
& F_FLOCK
) ? NFS_FILE_LOCK_STYLE_FLOCK
: NFS_FILE_LOCK_STYLE_POSIX
;
4827 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) && ((start
!= 0) || (end
!= UINT64_MAX
))) {
4831 /* find the lock owner, alloc if not unlock */
4832 nlop
= nfs_lock_owner_find(np
, vfs_context_proc(ctx
), (op
!= F_UNLCK
));
4834 error
= (op
== F_UNLCK
) ? 0 : ENOMEM
;
4836 NP(np
, "nfs_vnop_advlock: no lock owner, error %d", error
);
4841 if (op
== F_GETLK
) {
4842 error
= nfs_advlock_getlock(np
, nlop
, fl
, start
, end
, ctx
);
4844 /* find the open owner */
4845 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 0);
4847 NP(np
, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx
)));
4851 /* find the open file */
4855 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 0);
4859 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
4860 NP(np
, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
4864 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
4865 error
= nfs4_reopen(nofp
, ((op
== F_UNLCK
) ? NULL
: vfs_context_thread(ctx
)));
4873 NP(np
, "nfs_vnop_advlock: no open file %d, %d", error
, kauth_cred_getuid(noop
->noo_cred
));
4876 if (op
== F_UNLCK
) {
4877 error
= nfs_advlock_unlock(np
, nofp
, nlop
, start
, end
, style
, ctx
);
4878 } else if ((op
== F_SETLK
) || (op
== F_SETLKW
)) {
4879 if ((op
== F_SETLK
) && (flags
& F_WAIT
)) {
4882 error
= nfs_advlock_setlock(np
, nofp
, nlop
, op
, start
, end
, style
, fl
->l_type
, ctx
);
4884 /* not getlk, unlock or lock? */
4891 nfs_lock_owner_rele(nlop
);
4894 nfs_open_owner_rele(noop
);
4900 * Check if an open owner holds any locks on a file.
4903 nfs_check_for_locks(struct nfs_open_owner
*noop
, struct nfs_open_file
*nofp
)
4905 struct nfs_lock_owner
*nlop
;
4907 TAILQ_FOREACH(nlop
, &nofp
->nof_np
->n_lock_owners
, nlo_link
) {
4908 if (nlop
->nlo_open_owner
!= noop
) {
4911 if (!TAILQ_EMPTY(&nlop
->nlo_locks
)) {
4915 return nlop
? 1 : 0;
4920 * Reopen simple (no deny, no locks) open state that was lost.
4923 nfs4_reopen(struct nfs_open_file
*nofp
, thread_t thd
)
4925 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
4926 struct nfsmount
*nmp
= NFSTONMP(nofp
->nof_np
);
4927 nfsnode_t np
= nofp
->nof_np
;
4928 vnode_t vp
= NFSTOV(np
);
4930 struct componentname cn
;
4931 const char *vname
= NULL
;
4932 const char *name
= NULL
;
4934 char smallname
[128];
4935 char *filename
= NULL
;
4936 int error
= 0, done
= 0, slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
4937 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
4939 lck_mtx_lock(&nofp
->nof_lock
);
4940 while (nofp
->nof_flags
& NFS_OPEN_FILE_REOPENING
) {
4941 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
4944 msleep(&nofp
->nof_flags
, &nofp
->nof_lock
, slpflag
| (PZERO
- 1), "nfsreopenwait", &ts
);
4947 if (error
|| !(nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
4948 lck_mtx_unlock(&nofp
->nof_lock
);
4951 nofp
->nof_flags
|= NFS_OPEN_FILE_REOPENING
;
4952 lck_mtx_unlock(&nofp
->nof_lock
);
4954 nfs_node_lock_force(np
);
4955 if ((vnode_vtype(vp
) != VDIR
) && np
->n_sillyrename
) {
4957 * The node's been sillyrenamed, so we need to use
4958 * the sillyrename directory/name to do the open.
4960 struct nfs_sillyrename
*nsp
= np
->n_sillyrename
;
4961 dvp
= NFSTOV(nsp
->nsr_dnp
);
4962 if ((error
= vnode_get(dvp
))) {
4964 nfs_node_unlock(np
);
4967 name
= nsp
->nsr_name
;
4970 * [sigh] We can't trust VFS to get the parent right for named
4971 * attribute nodes. (It likes to reparent the nodes after we've
4972 * created them.) Luckily we can probably get the right parent
4973 * from the n_parent we have stashed away.
4975 if ((np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) &&
4976 (((dvp
= np
->n_parent
)) && (error
= vnode_get(dvp
)))) {
4980 dvp
= vnode_getparent(vp
);
4982 vname
= vnode_getname(vp
);
4983 if (!dvp
|| !vname
) {
4987 nfs_node_unlock(np
);
4992 filename
= &smallname
[0];
4993 namelen
= snprintf(filename
, sizeof(smallname
), "%s", name
);
4994 if (namelen
>= sizeof(smallname
)) {
4995 MALLOC(filename
, char *, namelen
+ 1, M_TEMP
, M_WAITOK
);
5000 snprintf(filename
, namelen
+ 1, "%s", name
);
5002 nfs_node_unlock(np
);
5003 bzero(&cn
, sizeof(cn
));
5004 cn
.cn_nameptr
= filename
;
5005 cn
.cn_namelen
= namelen
;
5009 if ((error
= nfs_mount_state_in_use_start(nmp
, thd
))) {
5014 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
);
5016 if (!error
&& nofp
->nof_w
) {
5017 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_NONE
);
5019 if (!error
&& nofp
->nof_r
) {
5020 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
);
5023 if (nfs_mount_state_in_use_end(nmp
, error
)) {
5024 if (error
== NFSERR_GRACE
) {
5027 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error
,
5028 (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) ? 1 : 0, name
? name
: "???");
5034 if (error
&& (error
!= EINTR
) && (error
!= ERESTART
)) {
5035 nfs_revoke_open_state_for_node(np
);
5037 lck_mtx_lock(&nofp
->nof_lock
);
5038 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPENING
;
5040 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPEN
;
5042 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error
,
5043 (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) ? 1 : 0, name
? name
: "???");
5045 lck_mtx_unlock(&nofp
->nof_lock
);
5046 if (filename
&& (filename
!= &smallname
[0])) {
5047 FREE(filename
, M_TEMP
);
5050 vnode_putname(vname
);
5052 if (dvp
!= NULLVP
) {
5059 * Send a normal OPEN RPC to open/create a file.
5063 struct nfs_open_file
*nofp
,
5065 struct componentname
*cnp
,
5066 struct vnode_attr
*vap
,
5073 return nfs4_open_rpc_internal(nofp
, ctx
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
5074 cnp
, vap
, dvp
, vpp
, create
, share_access
, share_deny
);
5078 * Send an OPEN RPC to reopen a file.
5081 nfs4_open_reopen_rpc(
5082 struct nfs_open_file
*nofp
,
5085 struct componentname
*cnp
,
5091 return nfs4_open_rpc_internal(nofp
, NULL
, thd
, cred
, cnp
, NULL
, dvp
, vpp
, NFS_OPEN_NOCREATE
, share_access
, share_deny
);
5095 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
5098 nfs4_open_confirm_rpc(
5099 struct nfsmount
*nmp
,
5103 struct nfs_open_owner
*noop
,
5107 struct nfs_vattr
*nvap
,
5110 struct nfsm_chain nmreq
, nmrep
;
5111 int error
= 0, status
, numops
;
5112 struct nfsreq_secinfo_args si
;
5114 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
5115 nfsm_chain_null(&nmreq
);
5116 nfsm_chain_null(&nmrep
);
5118 // PUTFH, OPEN_CONFIRM, GETATTR
5120 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
5121 nfsm_chain_add_compound_header(error
, &nmreq
, "open_confirm", nmp
->nm_minor_vers
, numops
);
5123 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5124 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, fhp
, fhlen
);
5126 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN_CONFIRM
);
5127 nfsm_chain_add_stateid(error
, &nmreq
, sid
);
5128 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5130 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5131 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
5132 nfsm_chain_build_done(error
, &nmreq
);
5133 nfsm_assert(error
, (numops
== 0), EPROTO
);
5135 error
= nfs_request2(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, R_NOINTR
, &nmrep
, xidp
, &status
);
5137 nfsm_chain_skip_tag(error
, &nmrep
);
5138 nfsm_chain_get_32(error
, &nmrep
, numops
);
5139 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5141 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN_CONFIRM
);
5142 nfs_owner_seqid_increment(noop
, NULL
, error
);
5143 nfsm_chain_get_stateid(error
, &nmrep
, sid
);
5144 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5146 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
, NULL
);
5148 nfsm_chain_cleanup(&nmreq
);
5149 nfsm_chain_cleanup(&nmrep
);
5154 * common OPEN RPC code
5156 * If create is set, ctx must be passed in.
5157 * Returns a node on success if no node passed in.
5160 nfs4_open_rpc_internal(
5161 struct nfs_open_file
*nofp
,
5165 struct componentname
*cnp
,
5166 struct vnode_attr
*vap
,
5173 struct nfsmount
*nmp
;
5174 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5175 struct nfs_vattr
*nvattr
;
5176 int error
= 0, open_error
= EIO
, lockerror
= ENOENT
, busyerror
= ENOENT
, status
;
5177 int nfsvers
, namedattrs
, numops
, exclusive
= 0, gotuid
, gotgid
;
5178 u_int64_t xid
, savedxid
= 0;
5179 nfsnode_t dnp
= VTONFS(dvp
);
5180 nfsnode_t np
, newnp
= NULL
;
5181 vnode_t newvp
= NULL
;
5182 struct nfsm_chain nmreq
, nmrep
;
5183 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
5184 uint32_t rflags
, delegation
, recall
;
5185 struct nfs_stateid stateid
, dstateid
, *sid
;
5188 struct nfs_dulookup
*dul
;
5190 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
;
5191 struct kauth_ace ace
;
5192 struct nfsreq_secinfo_args si
;
5194 if (create
&& !ctx
) {
5199 if (nfs_mount_gone(nmp
)) {
5202 nfsvers
= nmp
->nm_vers
;
5203 namedattrs
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
);
5204 bzero(&dstateid
, sizeof(dstateid
));
5205 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
5209 np
= *vpp
? VTONFS(*vpp
) : NULL
;
5210 if (create
&& vap
) {
5211 exclusive
= (vap
->va_vaflags
& VA_EXCLUSIVE
);
5212 nfs_avoid_needless_id_setting_on_create(dnp
, vap
, ctx
);
5213 gotuid
= VATTR_IS_ACTIVE(vap
, va_uid
);
5214 gotgid
= VATTR_IS_ACTIVE(vap
, va_gid
);
5215 if (exclusive
&& (!VATTR_IS_ACTIVE(vap
, va_access_time
) || !VATTR_IS_ACTIVE(vap
, va_modify_time
))) {
5216 vap
->va_vaflags
|= VA_UTIMES_NULL
;
5219 exclusive
= gotuid
= gotgid
= 0;
5222 sid
= &nofp
->nof_stateid
;
5224 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
5228 if ((error
= nfs_open_owner_set_busy(noop
, thd
))) {
5232 fh
= zalloc(nfs_fhandle_zone
);
5233 req
= zalloc(nfs_req_zone
);
5234 MALLOC(dul
, struct nfs_dulookup
*, sizeof(*dul
), M_TEMP
, M_WAITOK
);
5235 MALLOC(nvattr
, struct nfs_vattr
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
);
5238 rflags
= delegation
= recall
= 0;
5241 slen
= sizeof(sbuf
);
5242 NVATTR_INIT(nvattr
);
5243 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, cnp
->cn_nameptr
, cnp
->cn_namelen
);
5245 nfsm_chain_null(&nmreq
);
5246 nfsm_chain_null(&nmrep
);
5248 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
5250 nfsm_chain_build_alloc_init(error
, &nmreq
, 53 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
5251 nfsm_chain_add_compound_header(error
, &nmreq
, create
? "create" : "open", nmp
->nm_minor_vers
, numops
);
5253 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5254 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
5256 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
5258 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
5259 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5260 nfsm_chain_add_32(error
, &nmreq
, share_access
);
5261 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
5262 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
);
5263 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
5264 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
));
5265 nfsm_chain_add_32(error
, &nmreq
, create
);
5268 static uint32_t create_verf
; // XXX need a better verifier
5270 nfsm_chain_add_32(error
, &nmreq
, NFS_CREATE_EXCLUSIVE
);
5271 /* insert 64 bit verifier */
5272 nfsm_chain_add_32(error
, &nmreq
, create_verf
);
5273 nfsm_chain_add_32(error
, &nmreq
, create_verf
);
5275 nfsm_chain_add_32(error
, &nmreq
, NFS_CREATE_UNCHECKED
);
5276 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
5279 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_NULL
);
5280 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
5282 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5283 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
5284 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
5285 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
5287 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
5289 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5290 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
5291 nfsm_chain_build_done(error
, &nmreq
);
5292 nfsm_assert(error
, (numops
== 0), EPROTO
);
5294 error
= busyerror
= nfs_node_set_busy(dnp
, thd
);
5298 if (create
&& !namedattrs
) {
5299 nfs_dulookup_init(dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
5302 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, R_NOINTR
, NULL
, &req
);
5304 if (create
&& !namedattrs
) {
5305 nfs_dulookup_start(dul
, dnp
, ctx
);
5307 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
5311 if (create
&& !namedattrs
) {
5312 nfs_dulookup_finish(dul
, dnp
, ctx
);
5315 if ((lockerror
= nfs_node_lock(dnp
))) {
5318 nfsm_chain_skip_tag(error
, &nmrep
);
5319 nfsm_chain_get_32(error
, &nmrep
, numops
);
5320 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5321 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
5323 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
5324 nfs_owner_seqid_increment(noop
, NULL
, error
);
5325 nfsm_chain_get_stateid(error
, &nmrep
, sid
);
5326 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
5327 nfsm_chain_get_32(error
, &nmrep
, rflags
);
5328 bmlen
= NFS_ATTR_BITMAP_LEN
;
5329 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
5330 nfsm_chain_get_32(error
, &nmrep
, delegation
);
5332 switch (delegation
) {
5333 case NFS_OPEN_DELEGATE_NONE
:
5335 case NFS_OPEN_DELEGATE_READ
:
5336 case NFS_OPEN_DELEGATE_WRITE
:
5337 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
5338 nfsm_chain_get_32(error
, &nmrep
, recall
);
5339 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) { // space (skip) XXX
5340 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
5342 /* if we have any trouble accepting the ACE, just invalidate it */
5343 ace_type
= ace_flags
= ace_mask
= len
= 0;
5344 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
5345 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
5346 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
5347 nfsm_chain_get_32(error
, &nmrep
, len
);
5348 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
5349 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
5350 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
5351 if (!error
&& (len
>= slen
)) {
5352 MALLOC(s
, char*, len
+ 1, M_TEMP
, M_WAITOK
);
5360 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
5362 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
5366 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
))) {
5373 if (s
&& (s
!= sbuf
)) {
5382 /* At this point if we have no error, the object was created/opened. */
5385 if (create
&& vap
&& !exclusive
) {
5386 nfs_vattr_set_supported(bitmap
, vap
);
5388 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5390 error
= nfs4_parsefattr(&nmrep
, NULL
, nvattr
, fh
, NULL
, NULL
);
5392 if (!NFS_BITMAP_ISSET(nvattr
->nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
5393 printf("nfs: open/create didn't return filehandle? %s\n", cnp
->cn_nameptr
);
5397 if (!create
&& np
&& !NFS_CMPFH(np
, fh
->fh_data
, fh
->fh_len
)) {
5398 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
5399 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5400 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
5401 NP(np
, "nfs4_open_rpc: warning: file handle mismatch");
5404 /* directory attributes: if we don't get them, make sure to invalidate */
5405 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
5406 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5407 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
5409 NATTRINVALIDATE(dnp
);
5413 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
) {
5414 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
5417 if (rflags
& NFS_OPEN_RESULT_CONFIRM
) {
5418 nfs_node_unlock(dnp
);
5420 NVATTR_CLEANUP(nvattr
);
5421 error
= nfs4_open_confirm_rpc(nmp
, dnp
, fh
->fh_data
, fh
->fh_len
, noop
, sid
, thd
, cred
, nvattr
, &xid
);
5424 if ((lockerror
= nfs_node_lock(dnp
))) {
5430 nfsm_chain_cleanup(&nmreq
);
5431 nfsm_chain_cleanup(&nmrep
);
5433 if (!lockerror
&& create
) {
5434 if (!open_error
&& (dnp
->n_flag
& NNEGNCENTRIES
)) {
5435 dnp
->n_flag
&= ~NNEGNCENTRIES
;
5436 cache_purge_negatives(dvp
);
5438 dnp
->n_flag
|= NMODIFIED
;
5439 nfs_node_unlock(dnp
);
5441 nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
);
5444 nfs_node_unlock(dnp
);
5446 if (!error
&& !np
&& fh
->fh_len
) {
5447 /* create the vnode with the filehandle and attributes */
5449 error
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
->fh_data
, fh
->fh_len
, nvattr
, &xid
, req
->r_auth
, NG_MAKEENTRY
, &newnp
);
5451 newvp
= NFSTOV(newnp
);
5454 NVATTR_CLEANUP(nvattr
);
5456 nfs_node_clear_busy(dnp
);
5458 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
5462 if (!error
&& np
&& !recall
) {
5463 /* stuff the delegation state in the node */
5464 lck_mtx_lock(&np
->n_openlock
);
5465 np
->n_openflags
&= ~N_DELEG_MASK
;
5466 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5467 np
->n_dstateid
= dstateid
;
5469 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5470 lck_mtx_lock(&nmp
->nm_lock
);
5471 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5472 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5474 lck_mtx_unlock(&nmp
->nm_lock
);
5476 lck_mtx_unlock(&np
->n_openlock
);
5478 /* give the delegation back */
5480 if (NFS_CMPFH(np
, fh
->fh_data
, fh
->fh_len
)) {
5481 /* update delegation state and return it */
5482 lck_mtx_lock(&np
->n_openlock
);
5483 np
->n_openflags
&= ~N_DELEG_MASK
;
5484 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5485 np
->n_dstateid
= dstateid
;
5487 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5488 lck_mtx_lock(&nmp
->nm_lock
);
5489 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5490 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5492 lck_mtx_unlock(&nmp
->nm_lock
);
5494 lck_mtx_unlock(&np
->n_openlock
);
5495 /* don't need to send a separate delegreturn for fh */
5498 /* return np's current delegation */
5499 nfs4_delegation_return(np
, 0, thd
, cred
);
5501 if (fh
->fh_len
) { /* return fh's delegation if it wasn't for np */
5502 nfs4_delegreturn_rpc(nmp
, fh
->fh_data
, fh
->fh_len
, &dstateid
, 0, thd
, cred
);
5507 if (exclusive
&& (error
== NFSERR_NOTSUPP
)) {
5512 nfs_node_unlock(newnp
);
5515 } else if (create
) {
5516 nfs_node_unlock(newnp
);
5518 error
= nfs4_setattr_rpc(newnp
, vap
, ctx
);
5519 if (error
&& (gotuid
|| gotgid
)) {
5520 /* it's possible the server didn't like our attempt to set IDs. */
5521 /* so, let's try it again without those */
5522 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
5523 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
5524 error
= nfs4_setattr_rpc(newnp
, vap
, ctx
);
5533 nfs_open_owner_clear_busy(noop
);
5534 NFS_ZFREE(nfs_fhandle_zone
, fh
);
5535 NFS_ZFREE(nfs_req_zone
, req
);
5537 FREE(nvattr
, M_TEMP
);
5543 * Send an OPEN RPC to claim a delegated open for a file
5546 nfs4_claim_delegated_open_rpc(
5547 struct nfs_open_file
*nofp
,
5552 struct nfsmount
*nmp
;
5553 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5554 struct nfs_vattr
*nvattr
;
5555 int error
= 0, lockerror
= ENOENT
, status
;
5556 int nfsvers
, numops
;
5558 nfsnode_t np
= nofp
->nof_np
;
5559 struct nfsm_chain nmreq
, nmrep
;
5560 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
5561 uint32_t rflags
= 0, delegation
, recall
= 0;
5563 struct nfs_stateid dstateid
;
5564 char sbuf
[64], *s
= sbuf
;
5565 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
= sizeof(sbuf
);
5566 struct kauth_ace ace
;
5568 const char *vname
= NULL
;
5569 const char *name
= NULL
;
5571 char smallname
[128];
5572 char *filename
= NULL
;
5573 struct nfsreq_secinfo_args si
;
5576 if (nfs_mount_gone(nmp
)) {
5579 fh
= zalloc(nfs_fhandle_zone
);
5580 MALLOC(nvattr
, struct nfs_vattr
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
);
5581 nfsvers
= nmp
->nm_vers
;
5583 nfs_node_lock_force(np
);
5584 if ((vnode_vtype(NFSTOV(np
)) != VDIR
) && np
->n_sillyrename
) {
5586 * The node's been sillyrenamed, so we need to use
5587 * the sillyrename directory/name to do the open.
5589 struct nfs_sillyrename
*nsp
= np
->n_sillyrename
;
5590 dvp
= NFSTOV(nsp
->nsr_dnp
);
5591 if ((error
= vnode_get(dvp
))) {
5593 nfs_node_unlock(np
);
5596 name
= nsp
->nsr_name
;
5599 * [sigh] We can't trust VFS to get the parent right for named
5600 * attribute nodes. (It likes to reparent the nodes after we've
5601 * created them.) Luckily we can probably get the right parent
5602 * from the n_parent we have stashed away.
5604 if ((np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) &&
5605 (((dvp
= np
->n_parent
)) && (error
= vnode_get(dvp
)))) {
5609 dvp
= vnode_getparent(NFSTOV(np
));
5611 vname
= vnode_getname(NFSTOV(np
));
5612 if (!dvp
|| !vname
) {
5616 nfs_node_unlock(np
);
5621 filename
= &smallname
[0];
5622 namelen
= snprintf(filename
, sizeof(smallname
), "%s", name
);
5623 if (namelen
>= sizeof(smallname
)) {
5624 MALLOC(filename
, char *, namelen
+ 1, M_TEMP
, M_WAITOK
);
5627 nfs_node_unlock(np
);
5630 snprintf(filename
, namelen
+ 1, "%s", name
);
5632 nfs_node_unlock(np
);
5634 if ((error
= nfs_open_owner_set_busy(noop
, NULL
))) {
5637 NVATTR_INIT(nvattr
);
5638 delegation
= NFS_OPEN_DELEGATE_NONE
;
5639 dstateid
= np
->n_dstateid
;
5640 NFSREQ_SECINFO_SET(&si
, VTONFS(dvp
), NULL
, 0, filename
, namelen
);
5642 nfsm_chain_null(&nmreq
);
5643 nfsm_chain_null(&nmrep
);
5645 // PUTFH, OPEN, GETATTR(FH)
5647 nfsm_chain_build_alloc_init(error
, &nmreq
, 48 * NFSX_UNSIGNED
);
5648 nfsm_chain_add_compound_header(error
, &nmreq
, "open_claim_d", nmp
->nm_minor_vers
, numops
);
5650 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5651 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, VTONFS(dvp
)->n_fhp
, VTONFS(dvp
)->n_fhsize
);
5653 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
5654 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5655 nfsm_chain_add_32(error
, &nmreq
, share_access
);
5656 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
5657 // open owner: clientid + uid
5658 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
); // open_owner4.clientid
5659 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
5660 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
)); // open_owner4.owner
5662 nfsm_chain_add_32(error
, &nmreq
, NFS_OPEN_NOCREATE
);
5664 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_DELEGATE_CUR
);
5665 nfsm_chain_add_stateid(error
, &nmreq
, &np
->n_dstateid
);
5666 nfsm_chain_add_name(error
, &nmreq
, filename
, namelen
, nmp
);
5668 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5669 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
5670 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
5671 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
5672 nfsm_chain_build_done(error
, &nmreq
);
5673 nfsm_assert(error
, (numops
== 0), EPROTO
);
5676 error
= nfs_request2(np
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, current_thread(),
5677 noop
->noo_cred
, &si
, flags
| R_NOINTR
, &nmrep
, &xid
, &status
);
5679 if ((lockerror
= nfs_node_lock(np
))) {
5682 nfsm_chain_skip_tag(error
, &nmrep
);
5683 nfsm_chain_get_32(error
, &nmrep
, numops
);
5684 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5686 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
5687 nfs_owner_seqid_increment(noop
, NULL
, error
);
5688 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
5689 nfsm_chain_check_change_info(error
, &nmrep
, np
);
5690 nfsm_chain_get_32(error
, &nmrep
, rflags
);
5691 bmlen
= NFS_ATTR_BITMAP_LEN
;
5692 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
5693 nfsm_chain_get_32(error
, &nmrep
, delegation
);
5695 switch (delegation
) {
5696 case NFS_OPEN_DELEGATE_NONE
:
5697 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
5698 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
5700 case NFS_OPEN_DELEGATE_READ
:
5701 case NFS_OPEN_DELEGATE_WRITE
:
5702 if ((((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_READ
) &&
5703 (delegation
== NFS_OPEN_DELEGATE_WRITE
)) ||
5704 (((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_WRITE
) &&
5705 (delegation
== NFS_OPEN_DELEGATE_READ
))) {
5706 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
5707 ((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_WRITE
) ? "W" : "R",
5708 (delegation
== NFS_OPEN_DELEGATE_WRITE
) ? "W" : "R", filename
? filename
: "???");
5710 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
5711 nfsm_chain_get_32(error
, &nmrep
, recall
);
5712 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) { // space (skip) XXX
5713 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
5715 /* if we have any trouble accepting the ACE, just invalidate it */
5716 ace_type
= ace_flags
= ace_mask
= len
= 0;
5717 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
5718 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
5719 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
5720 nfsm_chain_get_32(error
, &nmrep
, len
);
5721 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
5722 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
5723 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
5724 if (!error
&& (len
>= slen
)) {
5725 MALLOC(s
, char*, len
+ 1, M_TEMP
, M_WAITOK
);
5733 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
5735 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
5739 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
))) {
5746 if (s
&& (s
!= sbuf
)) {
5750 /* stuff the latest delegation state in the node */
5751 lck_mtx_lock(&np
->n_openlock
);
5752 np
->n_openflags
&= ~N_DELEG_MASK
;
5753 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5754 np
->n_dstateid
= dstateid
;
5756 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5757 lck_mtx_lock(&nmp
->nm_lock
);
5758 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5759 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5761 lck_mtx_unlock(&nmp
->nm_lock
);
5763 lck_mtx_unlock(&np
->n_openlock
);
5772 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5773 error
= nfs4_parsefattr(&nmrep
, NULL
, nvattr
, fh
, NULL
, NULL
);
5775 if (!NFS_BITMAP_ISSET(nvattr
->nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
5776 printf("nfs: open reclaim didn't return filehandle? %s\n", filename
? filename
: "???");
5780 if (!NFS_CMPFH(np
, fh
->fh_data
, fh
->fh_len
)) {
5781 // XXX what if fh doesn't match the vnode we think we're re-opening?
5782 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5783 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
5784 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename
? filename
: "???");
5787 error
= nfs_loadattrcache(np
, nvattr
, &xid
, 1);
5789 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
) {
5790 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
5793 NVATTR_CLEANUP(nvattr
);
5794 FREE(nvattr
, M_TEMP
);
5795 NFS_ZFREE(nfs_fhandle_zone
, fh
);
5796 nfsm_chain_cleanup(&nmreq
);
5797 nfsm_chain_cleanup(&nmrep
);
5799 nfs_node_unlock(np
);
5801 nfs_open_owner_clear_busy(noop
);
5802 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
5805 * We're making a delegated claim.
5806 * Don't return the delegation here in case we have more to claim.
5807 * Just make sure it's queued up to be returned.
5809 nfs4_delegation_return_enqueue(np
);
5814 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5815 if (filename
&& (filename
!= &smallname
[0])) {
5816 FREE(filename
, M_TEMP
);
5819 vnode_putname(vname
);
5821 if (dvp
!= NULLVP
) {
5828 * Send an OPEN RPC to reclaim an open file.
5831 nfs4_open_reclaim_rpc(
5832 struct nfs_open_file
*nofp
,
5836 struct nfsmount
*nmp
;
5837 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5838 struct nfs_vattr
*nvattr
;
5839 int error
= 0, lockerror
= ENOENT
, status
;
5840 int nfsvers
, numops
;
5842 nfsnode_t np
= nofp
->nof_np
;
5843 struct nfsm_chain nmreq
, nmrep
;
5844 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
5845 uint32_t rflags
= 0, delegation
, recall
= 0;
5847 struct nfs_stateid dstateid
;
5848 char sbuf
[64], *s
= sbuf
;
5849 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
= sizeof(sbuf
);
5850 struct kauth_ace ace
;
5851 struct nfsreq_secinfo_args si
;
5854 if (nfs_mount_gone(nmp
)) {
5857 nfsvers
= nmp
->nm_vers
;
5859 if ((error
= nfs_open_owner_set_busy(noop
, NULL
))) {
5863 fh
= zalloc(nfs_fhandle_zone
);
5864 MALLOC(nvattr
, struct nfs_vattr
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
);
5865 NVATTR_INIT(nvattr
);
5866 delegation
= NFS_OPEN_DELEGATE_NONE
;
5867 dstateid
= np
->n_dstateid
;
5868 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
5870 nfsm_chain_null(&nmreq
);
5871 nfsm_chain_null(&nmrep
);
5873 // PUTFH, OPEN, GETATTR(FH)
5875 nfsm_chain_build_alloc_init(error
, &nmreq
, 48 * NFSX_UNSIGNED
);
5876 nfsm_chain_add_compound_header(error
, &nmreq
, "open_reclaim", nmp
->nm_minor_vers
, numops
);
5878 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5879 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
5881 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
5882 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5883 nfsm_chain_add_32(error
, &nmreq
, share_access
);
5884 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
5885 // open owner: clientid + uid
5886 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
); // open_owner4.clientid
5887 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
5888 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
)); // open_owner4.owner
5890 nfsm_chain_add_32(error
, &nmreq
, NFS_OPEN_NOCREATE
);
5892 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_PREVIOUS
);
5893 delegation
= (np
->n_openflags
& N_DELEG_READ
) ? NFS_OPEN_DELEGATE_READ
:
5894 (np
->n_openflags
& N_DELEG_WRITE
) ? NFS_OPEN_DELEGATE_WRITE
:
5895 NFS_OPEN_DELEGATE_NONE
;
5896 nfsm_chain_add_32(error
, &nmreq
, delegation
);
5897 delegation
= NFS_OPEN_DELEGATE_NONE
;
5899 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5900 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
5901 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
5902 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
5903 nfsm_chain_build_done(error
, &nmreq
);
5904 nfsm_assert(error
, (numops
== 0), EPROTO
);
5907 error
= nfs_request2(np
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, current_thread(),
5908 noop
->noo_cred
, &si
, R_RECOVER
| R_NOINTR
, &nmrep
, &xid
, &status
);
5910 if ((lockerror
= nfs_node_lock(np
))) {
5913 nfsm_chain_skip_tag(error
, &nmrep
);
5914 nfsm_chain_get_32(error
, &nmrep
, numops
);
5915 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5917 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
5918 nfs_owner_seqid_increment(noop
, NULL
, error
);
5919 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
5920 nfsm_chain_check_change_info(error
, &nmrep
, np
);
5921 nfsm_chain_get_32(error
, &nmrep
, rflags
);
5922 bmlen
= NFS_ATTR_BITMAP_LEN
;
5923 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
5924 nfsm_chain_get_32(error
, &nmrep
, delegation
);
5926 switch (delegation
) {
5927 case NFS_OPEN_DELEGATE_NONE
:
5928 if (np
->n_openflags
& N_DELEG_MASK
) {
5930 * Hey! We were supposed to get our delegation back even
5931 * if it was getting immediately recalled. Bad server!
5933 * Just try to return the existing delegation.
5935 // NP(np, "nfs: open reclaim didn't return delegation?");
5936 delegation
= (np
->n_openflags
& N_DELEG_WRITE
) ? NFS_OPEN_DELEGATE_WRITE
: NFS_OPEN_DELEGATE_READ
;
5940 case NFS_OPEN_DELEGATE_READ
:
5941 case NFS_OPEN_DELEGATE_WRITE
:
5942 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
5943 nfsm_chain_get_32(error
, &nmrep
, recall
);
5944 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) { // space (skip) XXX
5945 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
5947 /* if we have any trouble accepting the ACE, just invalidate it */
5948 ace_type
= ace_flags
= ace_mask
= len
= 0;
5949 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
5950 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
5951 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
5952 nfsm_chain_get_32(error
, &nmrep
, len
);
5953 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
5954 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
5955 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
5956 if (!error
&& (len
>= slen
)) {
5957 MALLOC(s
, char*, len
+ 1, M_TEMP
, M_WAITOK
);
5965 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
5967 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
5971 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
))) {
5978 if (s
&& (s
!= sbuf
)) {
5982 /* stuff the delegation state in the node */
5983 lck_mtx_lock(&np
->n_openlock
);
5984 np
->n_openflags
&= ~N_DELEG_MASK
;
5985 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5986 np
->n_dstateid
= dstateid
;
5988 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5989 lck_mtx_lock(&nmp
->nm_lock
);
5990 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5991 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5993 lck_mtx_unlock(&nmp
->nm_lock
);
5995 lck_mtx_unlock(&np
->n_openlock
);
6004 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6005 error
= nfs4_parsefattr(&nmrep
, NULL
, nvattr
, fh
, NULL
, NULL
);
6007 if (!NFS_BITMAP_ISSET(nvattr
->nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
6008 NP(np
, "nfs: open reclaim didn't return filehandle?");
6012 if (!NFS_CMPFH(np
, fh
->fh_data
, fh
->fh_len
)) {
6013 // XXX what if fh doesn't match the vnode we think we're re-opening?
6014 // That should be pretty hard in this case, given that we are doing
6015 // the open reclaim using the file handle (and not a dir/name pair).
6016 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
6017 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
6018 NP(np
, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
6021 error
= nfs_loadattrcache(np
, nvattr
, &xid
, 1);
6023 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
) {
6024 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
6028 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
6029 NVATTR_CLEANUP(nvattr
);
6030 FREE(nvattr
, M_TEMP
);
6031 NFS_ZFREE(nfs_fhandle_zone
, fh
);
6032 nfsm_chain_cleanup(&nmreq
);
6033 nfsm_chain_cleanup(&nmrep
);
6035 nfs_node_unlock(np
);
6037 nfs_open_owner_clear_busy(noop
);
6038 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
6040 nfs4_delegation_return_enqueue(np
);
6047 nfs4_open_downgrade_rpc(
6049 struct nfs_open_file
*nofp
,
6052 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
6053 struct nfsmount
*nmp
;
6054 int error
, lockerror
= ENOENT
, status
, nfsvers
, numops
;
6055 struct nfsm_chain nmreq
, nmrep
;
6057 struct nfsreq_secinfo_args si
;
6060 if (nfs_mount_gone(nmp
)) {
6063 nfsvers
= nmp
->nm_vers
;
6065 if ((error
= nfs_open_owner_set_busy(noop
, NULL
))) {
6069 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
6070 nfsm_chain_null(&nmreq
);
6071 nfsm_chain_null(&nmrep
);
6073 // PUTFH, OPEN_DOWNGRADE, GETATTR
6075 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
6076 nfsm_chain_add_compound_header(error
, &nmreq
, "open_downgrd", nmp
->nm_minor_vers
, numops
);
6078 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6079 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
6081 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN_DOWNGRADE
);
6082 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
6083 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
6084 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_access
);
6085 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_deny
);
6087 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6088 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
6089 nfsm_chain_build_done(error
, &nmreq
);
6090 nfsm_assert(error
, (numops
== 0), EPROTO
);
6092 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
6093 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
6094 &si
, R_NOINTR
, &nmrep
, &xid
, &status
);
6096 if ((lockerror
= nfs_node_lock(np
))) {
6099 nfsm_chain_skip_tag(error
, &nmrep
);
6100 nfsm_chain_get_32(error
, &nmrep
, numops
);
6101 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6103 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN_DOWNGRADE
);
6104 nfs_owner_seqid_increment(noop
, NULL
, error
);
6105 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
6106 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6107 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
6110 nfs_node_unlock(np
);
6112 nfs_open_owner_clear_busy(noop
);
6113 nfsm_chain_cleanup(&nmreq
);
6114 nfsm_chain_cleanup(&nmrep
);
6121 struct nfs_open_file
*nofp
,
6126 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
6127 struct nfsmount
*nmp
;
6128 int error
, lockerror
= ENOENT
, status
, nfsvers
, numops
;
6129 struct nfsm_chain nmreq
, nmrep
;
6131 struct nfsreq_secinfo_args si
;
6134 if (nfs_mount_gone(nmp
)) {
6137 nfsvers
= nmp
->nm_vers
;
6139 if ((error
= nfs_open_owner_set_busy(noop
, NULL
))) {
6143 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
6144 nfsm_chain_null(&nmreq
);
6145 nfsm_chain_null(&nmrep
);
6147 // PUTFH, CLOSE, GETATTR
6149 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
6150 nfsm_chain_add_compound_header(error
, &nmreq
, "close", nmp
->nm_minor_vers
, numops
);
6152 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6153 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
6155 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_CLOSE
);
6156 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
6157 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
6159 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6160 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
6161 nfsm_chain_build_done(error
, &nmreq
);
6162 nfsm_assert(error
, (numops
== 0), EPROTO
);
6164 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
| R_NOINTR
, &nmrep
, &xid
, &status
);
6166 if ((lockerror
= nfs_node_lock(np
))) {
6169 nfsm_chain_skip_tag(error
, &nmrep
);
6170 nfsm_chain_get_32(error
, &nmrep
, numops
);
6171 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6173 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_CLOSE
);
6174 nfs_owner_seqid_increment(noop
, NULL
, error
);
6175 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
6176 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6177 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
6180 nfs_node_unlock(np
);
6182 nfs_open_owner_clear_busy(noop
);
6183 nfsm_chain_cleanup(&nmreq
);
6184 nfsm_chain_cleanup(&nmrep
);
6190 * Claim the delegated open combinations this open file holds.
6193 nfs4_claim_delegated_state_for_open_file(struct nfs_open_file
*nofp
, int flags
)
6195 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
6196 struct nfs_lock_owner
*nlop
;
6197 struct nfs_file_lock
*nflp
, *nextnflp
;
6198 struct nfsmount
*nmp
;
6199 int error
= 0, reopen
= 0;
6201 if (nofp
->nof_d_rw_drw
) {
6202 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_BOTH
, flags
);
6204 lck_mtx_lock(&nofp
->nof_lock
);
6205 nofp
->nof_rw_drw
+= nofp
->nof_d_rw_drw
;
6206 nofp
->nof_d_rw_drw
= 0;
6207 lck_mtx_unlock(&nofp
->nof_lock
);
6210 if (!error
&& nofp
->nof_d_w_drw
) {
6211 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_BOTH
, flags
);
6213 lck_mtx_lock(&nofp
->nof_lock
);
6214 nofp
->nof_w_drw
+= nofp
->nof_d_w_drw
;
6215 nofp
->nof_d_w_drw
= 0;
6216 lck_mtx_unlock(&nofp
->nof_lock
);
6219 if (!error
&& nofp
->nof_d_r_drw
) {
6220 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_BOTH
, flags
);
6222 lck_mtx_lock(&nofp
->nof_lock
);
6223 nofp
->nof_r_drw
+= nofp
->nof_d_r_drw
;
6224 nofp
->nof_d_r_drw
= 0;
6225 lck_mtx_unlock(&nofp
->nof_lock
);
6228 if (!error
&& nofp
->nof_d_rw_dw
) {
6229 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_WRITE
, flags
);
6231 lck_mtx_lock(&nofp
->nof_lock
);
6232 nofp
->nof_rw_dw
+= nofp
->nof_d_rw_dw
;
6233 nofp
->nof_d_rw_dw
= 0;
6234 lck_mtx_unlock(&nofp
->nof_lock
);
6237 if (!error
&& nofp
->nof_d_w_dw
) {
6238 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_WRITE
, flags
);
6240 lck_mtx_lock(&nofp
->nof_lock
);
6241 nofp
->nof_w_dw
+= nofp
->nof_d_w_dw
;
6242 nofp
->nof_d_w_dw
= 0;
6243 lck_mtx_unlock(&nofp
->nof_lock
);
6246 if (!error
&& nofp
->nof_d_r_dw
) {
6247 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_WRITE
, flags
);
6249 lck_mtx_lock(&nofp
->nof_lock
);
6250 nofp
->nof_r_dw
+= nofp
->nof_d_r_dw
;
6251 nofp
->nof_d_r_dw
= 0;
6252 lck_mtx_unlock(&nofp
->nof_lock
);
6255 /* non-deny-mode opens may be reopened if no locks are held */
6256 if (!error
&& nofp
->nof_d_rw
) {
6257 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
, flags
);
6258 /* for some errors, we should just try reopening the file */
6259 if (nfs_mount_state_error_delegation_lost(error
)) {
6262 if (!error
|| reopen
) {
6263 lck_mtx_lock(&nofp
->nof_lock
);
6264 nofp
->nof_rw
+= nofp
->nof_d_rw
;
6266 lck_mtx_unlock(&nofp
->nof_lock
);
6269 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
6270 if ((!error
|| reopen
) && nofp
->nof_d_w
) {
6272 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_NONE
, flags
);
6273 /* for some errors, we should just try reopening the file */
6274 if (nfs_mount_state_error_delegation_lost(error
)) {
6278 if (!error
|| reopen
) {
6279 lck_mtx_lock(&nofp
->nof_lock
);
6280 nofp
->nof_w
+= nofp
->nof_d_w
;
6282 lck_mtx_unlock(&nofp
->nof_lock
);
6285 if ((!error
|| reopen
) && nofp
->nof_d_r
) {
6287 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, flags
);
6288 /* for some errors, we should just try reopening the file */
6289 if (nfs_mount_state_error_delegation_lost(error
)) {
6293 if (!error
|| reopen
) {
6294 lck_mtx_lock(&nofp
->nof_lock
);
6295 nofp
->nof_r
+= nofp
->nof_d_r
;
6297 lck_mtx_unlock(&nofp
->nof_lock
);
6303 * Any problems with the delegation probably indicates that we
6304 * should review/return all of our current delegation state.
6306 if ((nmp
= NFSTONMP(nofp
->nof_np
))) {
6307 nfs4_delegation_return_enqueue(nofp
->nof_np
);
6308 lck_mtx_lock(&nmp
->nm_lock
);
6309 nfs_need_recover(nmp
, NFSERR_EXPIRED
);
6310 lck_mtx_unlock(&nmp
->nm_lock
);
6312 if (reopen
&& (nfs_check_for_locks(noop
, nofp
) == 0)) {
6313 /* just reopen the file on next access */
6314 NP(nofp
->nof_np
, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
6315 reopen
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6316 lck_mtx_lock(&nofp
->nof_lock
);
6317 nofp
->nof_flags
|= NFS_OPEN_FILE_REOPEN
;
6318 lck_mtx_unlock(&nofp
->nof_lock
);
6322 NP(nofp
->nof_np
, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
6323 reopen
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6327 if (!error
&& ((nmp
= NFSTONMP(nofp
->nof_np
)))) {
6328 /* claim delegated locks */
6329 TAILQ_FOREACH(nlop
, &nofp
->nof_np
->n_lock_owners
, nlo_link
) {
6330 if (nlop
->nlo_open_owner
!= noop
) {
6333 TAILQ_FOREACH_SAFE(nflp
, &nlop
->nlo_locks
, nfl_lolink
, nextnflp
) {
6334 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
6335 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_DEAD
| NFS_FILE_LOCK_BLOCKED
)) {
6338 /* skip non-delegated locks */
6339 if (!(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
6342 error
= nmp
->nm_funcs
->nf_setlock_rpc(nofp
->nof_np
, nofp
, nflp
, 0, flags
, current_thread(), noop
->noo_cred
);
6344 NP(nofp
->nof_np
, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
6345 nflp
->nfl_start
, nflp
->nfl_end
, error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6349 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
6350 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6359 if (!error
) { /* all state claimed successfully! */
6363 /* restart if it looks like a problem more than just losing the delegation */
6364 if (!nfs_mount_state_error_delegation_lost(error
) &&
6365 ((error
== ETIMEDOUT
) || nfs_mount_state_error_should_restart(error
))) {
6366 NP(nofp
->nof_np
, "nfs delegated lock claim error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6367 if ((error
== ETIMEDOUT
) && ((nmp
= NFSTONMP(nofp
->nof_np
)))) {
6368 nfs_need_reconnect(nmp
);
6373 /* delegated state lost (once held but now not claimable) */
6374 NP(nofp
->nof_np
, "nfs delegated state claim error %d, state lost, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6377 * Any problems with the delegation probably indicates that we
6378 * should review/return all of our current delegation state.
6380 if ((nmp
= NFSTONMP(nofp
->nof_np
))) {
6381 nfs4_delegation_return_enqueue(nofp
->nof_np
);
6382 lck_mtx_lock(&nmp
->nm_lock
);
6383 nfs_need_recover(nmp
, NFSERR_EXPIRED
);
6384 lck_mtx_unlock(&nmp
->nm_lock
);
6387 /* revoke all open file state */
6388 nfs_revoke_open_state_for_node(nofp
->nof_np
);
6392 #endif /* CONFIG_NFS4*/
6395 * Release all open state for the given node.
6398 nfs_release_open_state_for_node(nfsnode_t np
, int force
)
6400 struct nfsmount
*nmp
= NFSTONMP(np
);
6401 struct nfs_open_file
*nofp
;
6402 struct nfs_file_lock
*nflp
, *nextnflp
;
6404 /* drop held locks */
6405 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
6406 /* skip dead & blocked lock requests */
6407 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_DEAD
| NFS_FILE_LOCK_BLOCKED
)) {
6410 /* send an unlock if not a delegated lock */
6411 if (!force
&& nmp
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
6412 nmp
->nm_funcs
->nf_unlock_rpc(np
, nflp
->nfl_owner
, F_WRLCK
, nflp
->nfl_start
, nflp
->nfl_end
, R_RECOVER
,
6413 NULL
, nflp
->nfl_owner
->nlo_open_owner
->noo_cred
);
6415 /* kill/remove the lock */
6416 lck_mtx_lock(&np
->n_openlock
);
6417 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
6418 lck_mtx_lock(&nflp
->nfl_owner
->nlo_lock
);
6419 TAILQ_REMOVE(&nflp
->nfl_owner
->nlo_locks
, nflp
, nfl_lolink
);
6420 lck_mtx_unlock(&nflp
->nfl_owner
->nlo_lock
);
6421 if (nflp
->nfl_blockcnt
) {
6422 /* wake up anyone blocked on this lock */
6425 /* remove nflp from lock list and destroy */
6426 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
6427 nfs_file_lock_destroy(nflp
);
6429 lck_mtx_unlock(&np
->n_openlock
);
6432 lck_mtx_lock(&np
->n_openlock
);
6434 /* drop all opens */
6435 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
6436 if (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) {
6439 /* mark open state as lost */
6440 lck_mtx_lock(&nofp
->nof_lock
);
6441 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPEN
;
6442 nofp
->nof_flags
|= NFS_OPEN_FILE_LOST
;
6444 lck_mtx_unlock(&nofp
->nof_lock
);
6446 if (!force
&& nmp
&& (nmp
->nm_vers
>= NFS_VER4
)) {
6447 nfs4_close_rpc(np
, nofp
, NULL
, nofp
->nof_owner
->noo_cred
, R_RECOVER
);
6452 lck_mtx_unlock(&np
->n_openlock
);
6456 * State for a node has been lost, drop it, and revoke the node.
6457 * Attempt to return any state if possible in case the server
6458 * might somehow think we hold it.
6461 nfs_revoke_open_state_for_node(nfsnode_t np
)
6463 struct nfsmount
*nmp
;
6465 /* mark node as needing to be revoked */
6466 nfs_node_lock_force(np
);
6467 if (np
->n_flag
& NREVOKE
) { /* already revoked? */
6468 NP(np
, "nfs_revoke_open_state_for_node(): already revoked");
6469 nfs_node_unlock(np
);
6472 np
->n_flag
|= NREVOKE
;
6473 nfs_node_unlock(np
);
6475 nfs_release_open_state_for_node(np
, 0);
6476 NP(np
, "nfs: state lost for %p 0x%x", np
, np
->n_flag
);
6478 /* mark mount as needing a revoke scan and have the socket thread do it. */
6479 if ((nmp
= NFSTONMP(np
))) {
6480 lck_mtx_lock(&nmp
->nm_lock
);
6481 nmp
->nm_state
|= NFSSTA_REVOKE
;
6482 nfs_mount_sock_thread_wake(nmp
);
6483 lck_mtx_unlock(&nmp
->nm_lock
);
6489 * Claim the delegated open combinations that each of this node's open files hold.
6492 nfs4_claim_delegated_state_for_node(nfsnode_t np
, int flags
)
6494 struct nfs_open_file
*nofp
;
6497 lck_mtx_lock(&np
->n_openlock
);
6499 /* walk the open file list looking for opens with delegated state to claim */
6501 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
6502 if (!nofp
->nof_d_rw_drw
&& !nofp
->nof_d_w_drw
&& !nofp
->nof_d_r_drw
&&
6503 !nofp
->nof_d_rw_dw
&& !nofp
->nof_d_w_dw
&& !nofp
->nof_d_r_dw
&&
6504 !nofp
->nof_d_rw
&& !nofp
->nof_d_w
&& !nofp
->nof_d_r
) {
6507 lck_mtx_unlock(&np
->n_openlock
);
6508 error
= nfs4_claim_delegated_state_for_open_file(nofp
, flags
);
6509 lck_mtx_lock(&np
->n_openlock
);
6516 lck_mtx_unlock(&np
->n_openlock
);
6522 * Mark a node as needed to have its delegation returned.
6523 * Queue it up on the delegation return queue.
6524 * Make sure the thread is running.
6527 nfs4_delegation_return_enqueue(nfsnode_t np
)
6529 struct nfsmount
*nmp
;
6532 if (nfs_mount_gone(nmp
)) {
6536 lck_mtx_lock(&np
->n_openlock
);
6537 np
->n_openflags
|= N_DELEG_RETURN
;
6538 lck_mtx_unlock(&np
->n_openlock
);
6540 lck_mtx_lock(&nmp
->nm_lock
);
6541 if (np
->n_dreturn
.tqe_next
== NFSNOLIST
) {
6542 TAILQ_INSERT_TAIL(&nmp
->nm_dreturnq
, np
, n_dreturn
);
6544 nfs_mount_sock_thread_wake(nmp
);
6545 lck_mtx_unlock(&nmp
->nm_lock
);
6549 * return any delegation we may have for the given node
6552 nfs4_delegation_return(nfsnode_t np
, int flags
, thread_t thd
, kauth_cred_t cred
)
6554 struct nfsmount
*nmp
;
6556 nfs_stateid dstateid
;
6560 if (nfs_mount_gone(nmp
)) {
6564 fh
= zalloc(nfs_fhandle_zone
);
6566 /* first, make sure the node's marked for delegation return */
6567 lck_mtx_lock(&np
->n_openlock
);
6568 np
->n_openflags
|= (N_DELEG_RETURN
| N_DELEG_RETURNING
);
6569 lck_mtx_unlock(&np
->n_openlock
);
6571 /* make sure nobody else is using the delegation state */
6572 if ((error
= nfs_open_state_set_busy(np
, NULL
))) {
6576 /* claim any delegated state */
6577 if ((error
= nfs4_claim_delegated_state_for_node(np
, flags
))) {
6581 /* return the delegation */
6582 lck_mtx_lock(&np
->n_openlock
);
6583 dstateid
= np
->n_dstateid
;
6584 fh
->fh_len
= np
->n_fhsize
;
6585 bcopy(np
->n_fhp
, fh
->fh_data
, fh
->fh_len
);
6586 lck_mtx_unlock(&np
->n_openlock
);
6587 error
= nfs4_delegreturn_rpc(NFSTONMP(np
), fh
->fh_data
, fh
->fh_len
, &dstateid
, flags
, thd
, cred
);
6588 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
6589 if ((error
!= ETIMEDOUT
) && (error
!= NFSERR_MOVED
) && (error
!= NFSERR_LEASE_MOVED
)) {
6590 lck_mtx_lock(&np
->n_openlock
);
6591 np
->n_openflags
&= ~N_DELEG_MASK
;
6592 lck_mtx_lock(&nmp
->nm_lock
);
6593 if (np
->n_dlink
.tqe_next
!= NFSNOLIST
) {
6594 TAILQ_REMOVE(&nmp
->nm_delegations
, np
, n_dlink
);
6595 np
->n_dlink
.tqe_next
= NFSNOLIST
;
6597 lck_mtx_unlock(&nmp
->nm_lock
);
6598 lck_mtx_unlock(&np
->n_openlock
);
6602 /* make sure it's no longer on the return queue and clear the return flags */
6603 lck_mtx_lock(&nmp
->nm_lock
);
6604 if (np
->n_dreturn
.tqe_next
!= NFSNOLIST
) {
6605 TAILQ_REMOVE(&nmp
->nm_dreturnq
, np
, n_dreturn
);
6606 np
->n_dreturn
.tqe_next
= NFSNOLIST
;
6608 lck_mtx_unlock(&nmp
->nm_lock
);
6609 lck_mtx_lock(&np
->n_openlock
);
6610 np
->n_openflags
&= ~(N_DELEG_RETURN
| N_DELEG_RETURNING
);
6611 lck_mtx_unlock(&np
->n_openlock
);
6614 NP(np
, "nfs4_delegation_return, error %d", error
);
6615 if (error
== ETIMEDOUT
) {
6616 nfs_need_reconnect(nmp
);
6618 if (nfs_mount_state_error_should_restart(error
)) {
6619 /* make sure recovery happens */
6620 lck_mtx_lock(&nmp
->nm_lock
);
6621 nfs_need_recover(nmp
, nfs_mount_state_error_delegation_lost(error
) ? NFSERR_EXPIRED
: 0);
6622 lck_mtx_unlock(&nmp
->nm_lock
);
6626 nfs_open_state_clear_busy(np
);
6627 NFS_ZFREE(nfs_fhandle_zone
, fh
);
6632 * RPC to return a delegation for a file handle
6635 nfs4_delegreturn_rpc(struct nfsmount
*nmp
, u_char
*fhp
, int fhlen
, struct nfs_stateid
*sid
, int flags
, thread_t thd
, kauth_cred_t cred
)
6637 int error
= 0, status
, numops
;
6639 struct nfsm_chain nmreq
, nmrep
;
6640 struct nfsreq_secinfo_args si
;
6642 NFSREQ_SECINFO_SET(&si
, NULL
, fhp
, fhlen
, NULL
, 0);
6643 nfsm_chain_null(&nmreq
);
6644 nfsm_chain_null(&nmrep
);
6646 // PUTFH, DELEGRETURN
6648 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
6649 nfsm_chain_add_compound_header(error
, &nmreq
, "delegreturn", nmp
->nm_minor_vers
, numops
);
6651 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6652 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, fhp
, fhlen
);
6654 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_DELEGRETURN
);
6655 nfsm_chain_add_stateid(error
, &nmreq
, sid
);
6656 nfsm_chain_build_done(error
, &nmreq
);
6657 nfsm_assert(error
, (numops
== 0), EPROTO
);
6659 error
= nfs_request2(NULL
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
, &nmrep
, &xid
, &status
);
6660 nfsm_chain_skip_tag(error
, &nmrep
);
6661 nfsm_chain_get_32(error
, &nmrep
, numops
);
6662 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6663 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_DELEGRETURN
);
6665 nfsm_chain_cleanup(&nmreq
);
6666 nfsm_chain_cleanup(&nmrep
);
6669 #endif /* CONFIG_NFS4 */
6673 * Just call nfs_bioread() to do the work.
6675 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6676 * without first calling VNOP_OPEN, so we make sure the file is open here.
6680 struct vnop_read_args
/* {
6681 * struct vnodeop_desc *a_desc;
6683 * struct uio *a_uio;
6685 * vfs_context_t a_context;
6688 vnode_t vp
= ap
->a_vp
;
6689 vfs_context_t ctx
= ap
->a_context
;
6691 struct nfsmount
*nmp
;
6692 struct nfs_open_owner
*noop
;
6693 struct nfs_open_file
*nofp
;
6696 if (vnode_vtype(ap
->a_vp
) != VREG
) {
6697 return (vnode_vtype(vp
) == VDIR
) ? EISDIR
: EPERM
;
6702 if (nfs_mount_gone(nmp
)) {
6705 if (np
->n_flag
& NREVOKE
) {
6709 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
6714 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 1);
6715 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
6716 NP(np
, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop
->noo_cred
));
6720 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
6721 error
= nfs4_reopen(nofp
, vfs_context_thread(ctx
));
6729 nfs_open_owner_rele(noop
);
6733 * Since the read path is a hot path, if we already have
6734 * read access, lets go and try and do the read, without
6735 * busying the mount and open file node for this open owner.
6737 * N.B. This is inherently racy w.r.t. an execve using
6738 * an already open file, in that the read at the end of
6739 * this routine will be racing with a potential close.
6740 * The code below ultimately has the same problem. In practice
6741 * this does not seem to be an issue.
6743 if (nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
) {
6744 nfs_open_owner_rele(noop
);
6747 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
6749 nfs_open_owner_rele(noop
);
6753 * If we don't have a file already open with the access we need (read) then
6754 * we need to open one. Otherwise we just co-opt an open. We might not already
6755 * have access because we're trying to read the first page of the
6758 error
= nfs_open_file_set_busy(nofp
, vfs_context_thread(ctx
));
6760 nfs_mount_state_in_use_end(nmp
, 0);
6761 nfs_open_owner_rele(noop
);
6764 if (!(nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
)) {
6765 /* we don't have the file open, so open it for read access if we're not denied */
6766 if (nofp
->nof_flags
& NFS_OPEN_FILE_NEEDCLOSE
) {
6767 NP(np
, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
6768 nofp
->nof_access
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
), thread_tid(vfs_context_thread(ctx
)));
6770 if (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_READ
) {
6771 nfs_open_file_clear_busy(nofp
);
6772 nfs_mount_state_in_use_end(nmp
, 0);
6773 nfs_open_owner_rele(noop
);
6776 if (np
->n_flag
& NREVOKE
) {
6778 nfs_open_file_clear_busy(nofp
);
6779 nfs_mount_state_in_use_end(nmp
, 0);
6780 nfs_open_owner_rele(noop
);
6783 if (nmp
->nm_vers
< NFS_VER4
) {
6784 /* NFS v2/v3 opens are always allowed - so just add it. */
6785 nfs_open_file_add_open(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, 0);
6789 error
= nfs4_open(np
, nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
6793 nofp
->nof_flags
|= NFS_OPEN_FILE_NEEDCLOSE
;
6797 nfs_open_file_clear_busy(nofp
);
6799 if (nfs_mount_state_in_use_end(nmp
, error
)) {
6803 nfs_open_owner_rele(noop
);
6808 return nfs_bioread(VTONFS(ap
->a_vp
), ap
->a_uio
, ap
->a_ioflag
, ap
->a_context
);
6813 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6814 * Files are created using the NFSv4 OPEN RPC. So we must open the
6815 * file to create it and then close it.
6819 struct vnop_create_args
/* {
6820 * struct vnodeop_desc *a_desc;
6823 * struct componentname *a_cnp;
6824 * struct vnode_attr *a_vap;
6825 * vfs_context_t a_context;
6828 vfs_context_t ctx
= ap
->a_context
;
6829 struct componentname
*cnp
= ap
->a_cnp
;
6830 struct vnode_attr
*vap
= ap
->a_vap
;
6831 vnode_t dvp
= ap
->a_dvp
;
6832 vnode_t
*vpp
= ap
->a_vpp
;
6833 struct nfsmount
*nmp
;
6835 int error
= 0, busyerror
= 0, accessMode
, denyMode
;
6836 struct nfs_open_owner
*noop
= NULL
;
6837 struct nfs_open_file
*newnofp
= NULL
, *nofp
= NULL
;
6840 if (nfs_mount_gone(nmp
)) {
6845 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp
), vap
, ctx
);
6848 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
6854 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
6856 nfs_open_owner_rele(noop
);
6860 /* grab a provisional, nodeless open file */
6861 error
= nfs_open_file_find(NULL
, noop
, &newnofp
, 0, 0, 1);
6862 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
6863 printf("nfs_vnop_create: LOST\n");
6866 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
6867 /* This shouldn't happen given that this is a new, nodeless nofp */
6868 error
= nfs4_reopen(newnofp
, vfs_context_thread(ctx
));
6869 nfs_open_file_destroy(newnofp
);
6872 nfs_mount_state_in_use_end(nmp
, 0);
6877 error
= nfs_open_file_set_busy(newnofp
, vfs_context_thread(ctx
));
6881 nfs_open_file_destroy(newnofp
);
6888 * We're just trying to create the file.
6889 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6891 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
6892 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
6894 /* Do the open/create */
6895 error
= nfs4_open_rpc(newnofp
, ctx
, cnp
, vap
, dvp
, vpp
, NFS_OPEN_CREATE
, accessMode
, denyMode
);
6896 if ((error
== EACCES
) && vap
&& !(vap
->va_vaflags
& VA_EXCLUSIVE
) &&
6897 VATTR_IS_ACTIVE(vap
, va_mode
) && !(vap
->va_mode
& S_IWUSR
)) {
6899 * Hmm... it looks like we may have a situation where the request was
6900 * retransmitted because we didn't get the first response which successfully
6901 * created/opened the file and then the second time we were denied the open
6902 * because the mode the file was created with doesn't allow write access.
6904 * We'll try to work around this by temporarily updating the mode and
6905 * retrying the open.
6907 struct vnode_attr vattr
;
6909 /* first make sure it's there */
6910 int error2
= nfs_lookitup(VTONFS(dvp
), cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, &np
);
6911 if (!error2
&& np
) {
6912 nfs_node_unlock(np
);
6914 if (vnode_vtype(NFSTOV(np
)) == VREG
) {
6916 VATTR_SET(&vattr
, va_mode
, (vap
->va_mode
| S_IWUSR
));
6917 if (!nfs4_setattr_rpc(np
, &vattr
, ctx
)) {
6918 error2
= nfs4_open_rpc(newnofp
, ctx
, cnp
, NULL
, dvp
, vpp
, NFS_OPEN_NOCREATE
, accessMode
, denyMode
);
6920 VATTR_SET(&vattr
, va_mode
, vap
->va_mode
);
6921 nfs4_setattr_rpc(np
, &vattr
, ctx
);
6933 if (!error
&& !*vpp
) {
6934 printf("nfs4_open_rpc returned without a node?\n");
6935 /* Hmmm... with no node, we have no filehandle and can't close it */
6939 /* need to cleanup our temporary nofp */
6940 nfs_open_file_clear_busy(newnofp
);
6941 nfs_open_file_destroy(newnofp
);
6945 /* After we have a node, add our open file struct to the node */
6947 nfs_open_file_add_open(newnofp
, accessMode
, denyMode
, 0);
6949 error
= nfs_open_file_find_internal(np
, noop
, &nofp
, 0, 0, 0);
6951 /* This shouldn't happen, because we passed in a new nofp to use. */
6952 printf("nfs_open_file_find_internal failed! %d\n", error
);
6954 } else if (nofp
!= newnofp
) {
6956 * Hmm... an open file struct already exists.
6957 * Mark the existing one busy and merge our open into it.
6958 * Then destroy the one we created.
6959 * Note: there's no chance of an open confict because the
6960 * open has already been granted.
6962 busyerror
= nfs_open_file_set_busy(nofp
, NULL
);
6963 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0);
6964 nofp
->nof_stateid
= newnofp
->nof_stateid
;
6965 if (newnofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
) {
6966 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
6968 nfs_open_file_clear_busy(newnofp
);
6969 nfs_open_file_destroy(newnofp
);
6972 /* mark the node as holding a create-initiated open */
6973 nofp
->nof_flags
|= NFS_OPEN_FILE_CREATE
;
6974 nofp
->nof_creator
= current_thread();
6976 if (nofp
&& !busyerror
) {
6977 nfs_open_file_clear_busy(nofp
);
6979 if (nfs_mount_state_in_use_end(nmp
, error
)) {
6980 nofp
= newnofp
= NULL
;
6985 nfs_open_owner_rele(noop
);
6991 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6997 struct componentname
*cnp
,
6998 struct vnode_attr
*vap
,
7003 struct nfsmount
*nmp
;
7004 struct nfs_vattr
*nvattr
;
7005 int error
= 0, create_error
= EIO
, lockerror
= ENOENT
, busyerror
= ENOENT
, status
;
7006 int nfsvers
, namedattrs
, numops
;
7007 u_int64_t xid
= 0, savedxid
= 0;
7008 nfsnode_t np
= NULL
;
7009 vnode_t newvp
= NULL
;
7010 struct nfsm_chain nmreq
, nmrep
;
7011 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
7016 struct nfs_dulookup
*dul
;
7017 struct nfsreq_secinfo_args si
;
7019 nmp
= NFSTONMP(dnp
);
7020 if (nfs_mount_gone(nmp
)) {
7023 nfsvers
= nmp
->nm_vers
;
7024 namedattrs
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
);
7025 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
7029 sd
.specdata1
= sd
.specdata2
= 0;
7038 if (!VATTR_IS_ACTIVE(vap
, va_rdev
)) {
7041 sd
.specdata1
= major(vap
->va_rdev
);
7042 sd
.specdata2
= minor(vap
->va_rdev
);
7055 fh
= zalloc(nfs_fhandle_zone
);
7056 req
= zalloc(nfs_req_zone
);
7057 MALLOC(dul
, struct nfs_dulookup
*, sizeof(*dul
), M_TEMP
, M_WAITOK
);
7058 MALLOC(nvattr
, struct nfs_vattr
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
);
7059 nfs_avoid_needless_id_setting_on_create(dnp
, vap
, ctx
);
7061 error
= busyerror
= nfs_node_set_busy(dnp
, vfs_context_thread(ctx
));
7063 nfs_dulookup_init(dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
7066 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
7067 NVATTR_INIT(nvattr
);
7068 nfsm_chain_null(&nmreq
);
7069 nfsm_chain_null(&nmrep
);
7071 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
7073 nfsm_chain_build_alloc_init(error
, &nmreq
, 66 * NFSX_UNSIGNED
);
7074 nfsm_chain_add_compound_header(error
, &nmreq
, tag
, nmp
->nm_minor_vers
, numops
);
7076 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7077 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
7079 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
7081 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_CREATE
);
7082 nfsm_chain_add_32(error
, &nmreq
, type
);
7083 if (type
== NFLNK
) {
7084 nfsm_chain_add_name(error
, &nmreq
, link
, strlen(link
), nmp
);
7085 } else if ((type
== NFBLK
) || (type
== NFCHR
)) {
7086 nfsm_chain_add_32(error
, &nmreq
, sd
.specdata1
);
7087 nfsm_chain_add_32(error
, &nmreq
, sd
.specdata2
);
7089 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
7090 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
7092 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7093 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
7094 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
7095 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, NULL
);
7097 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
7099 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7100 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
7101 nfsm_chain_build_done(error
, &nmreq
);
7102 nfsm_assert(error
, (numops
== 0), EPROTO
);
7105 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
7106 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, 0, NULL
, &req
);
7109 nfs_dulookup_start(dul
, dnp
, ctx
);
7111 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
7114 if ((lockerror
= nfs_node_lock(dnp
))) {
7117 nfsm_chain_skip_tag(error
, &nmrep
);
7118 nfsm_chain_get_32(error
, &nmrep
, numops
);
7119 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7120 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
7122 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_CREATE
);
7123 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
7124 bmlen
= NFS_ATTR_BITMAP_LEN
;
7125 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
7126 /* At this point if we have no error, the object was created. */
7127 /* if we don't get attributes, then we should lookitup. */
7128 create_error
= error
;
7130 nfs_vattr_set_supported(bitmap
, vap
);
7131 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7133 error
= nfs4_parsefattr(&nmrep
, NULL
, nvattr
, fh
, NULL
, NULL
);
7135 if (!NFS_BITMAP_ISSET(nvattr
->nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
7136 printf("nfs: create/%s didn't return filehandle? %s\n", tag
, cnp
->cn_nameptr
);
7140 /* directory attributes: if we don't get them, make sure to invalidate */
7141 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
7142 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7144 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
7146 NATTRINVALIDATE(dnp
);
7150 nfsm_chain_cleanup(&nmreq
);
7151 nfsm_chain_cleanup(&nmrep
);
7154 if (!create_error
&& (dnp
->n_flag
& NNEGNCENTRIES
)) {
7155 dnp
->n_flag
&= ~NNEGNCENTRIES
;
7156 cache_purge_negatives(NFSTOV(dnp
));
7158 dnp
->n_flag
|= NMODIFIED
;
7159 nfs_node_unlock(dnp
);
7160 /* nfs_getattr() will check changed and purge caches */
7161 nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
);
7164 if (!error
&& fh
->fh_len
) {
7165 /* create the vnode with the filehandle and attributes */
7167 error
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
->fh_data
, fh
->fh_len
, nvattr
, &xid
, req
->r_auth
, NG_MAKEENTRY
, &np
);
7174 nfs_dulookup_finish(dul
, dnp
, ctx
);
7177 NVATTR_CLEANUP(nvattr
);
7178 NFS_ZFREE(nfs_fhandle_zone
, fh
);
7179 NFS_ZFREE(nfs_req_zone
, req
);
7181 FREE(nvattr
, M_TEMP
);
7184 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
7185 * if we can succeed in looking up the object.
7187 if ((create_error
== EEXIST
) || (!create_error
&& !newvp
)) {
7188 error
= nfs_lookitup(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, &np
);
7191 if (vnode_vtype(newvp
) != nfstov_type(type
, nfsvers
)) {
7197 nfs_node_clear_busy(dnp
);
7201 nfs_node_unlock(np
);
7205 nfs_node_unlock(np
);
7213 struct vnop_mknod_args
/* {
7214 * struct vnodeop_desc *a_desc;
7217 * struct componentname *a_cnp;
7218 * struct vnode_attr *a_vap;
7219 * vfs_context_t a_context;
7222 nfsnode_t np
= NULL
;
7223 struct nfsmount
*nmp
;
7226 nmp
= VTONMP(ap
->a_dvp
);
7227 if (nfs_mount_gone(nmp
)) {
7231 if (!VATTR_IS_ACTIVE(ap
->a_vap
, va_type
)) {
7234 switch (ap
->a_vap
->va_type
) {
7244 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
7245 vtonfs_type(ap
->a_vap
->va_type
, nmp
->nm_vers
), NULL
, &np
);
7247 *ap
->a_vpp
= NFSTOV(np
);
7254 struct vnop_mkdir_args
/* {
7255 * struct vnodeop_desc *a_desc;
7258 * struct componentname *a_cnp;
7259 * struct vnode_attr *a_vap;
7260 * vfs_context_t a_context;
7263 nfsnode_t np
= NULL
;
7266 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
7269 *ap
->a_vpp
= NFSTOV(np
);
7276 struct vnop_symlink_args
/* {
7277 * struct vnodeop_desc *a_desc;
7280 * struct componentname *a_cnp;
7281 * struct vnode_attr *a_vap;
7283 * vfs_context_t a_context;
7286 nfsnode_t np
= NULL
;
7289 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
7290 NFLNK
, ap
->a_target
, &np
);
7292 *ap
->a_vpp
= NFSTOV(np
);
7299 struct vnop_link_args
/* {
7300 * struct vnodeop_desc *a_desc;
7303 * struct componentname *a_cnp;
7304 * vfs_context_t a_context;
7307 vfs_context_t ctx
= ap
->a_context
;
7308 vnode_t vp
= ap
->a_vp
;
7309 vnode_t tdvp
= ap
->a_tdvp
;
7310 struct componentname
*cnp
= ap
->a_cnp
;
7311 int error
= 0, lockerror
= ENOENT
, status
;
7312 struct nfsmount
*nmp
;
7313 nfsnode_t np
= VTONFS(vp
);
7314 nfsnode_t tdnp
= VTONFS(tdvp
);
7315 int nfsvers
, numops
;
7316 u_int64_t xid
, savedxid
;
7317 struct nfsm_chain nmreq
, nmrep
;
7318 struct nfsreq_secinfo_args si
;
7320 if (vnode_mount(vp
) != vnode_mount(tdvp
)) {
7325 if (nfs_mount_gone(nmp
)) {
7328 nfsvers
= nmp
->nm_vers
;
7329 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
7332 if (tdnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
7337 * Push all writes to the server, so that the attribute cache
7338 * doesn't get "out of sync" with the server.
7339 * XXX There should be a better way!
7341 nfs_flush(np
, MNT_WAIT
, vfs_context_thread(ctx
), V_IGNORE_WRITEERR
);
7343 if ((error
= nfs_node_set_busy2(tdnp
, np
, vfs_context_thread(ctx
)))) {
7347 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
7348 nfsm_chain_null(&nmreq
);
7349 nfsm_chain_null(&nmrep
);
7351 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
7353 nfsm_chain_build_alloc_init(error
, &nmreq
, 29 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
7354 nfsm_chain_add_compound_header(error
, &nmreq
, "link", nmp
->nm_minor_vers
, numops
);
7356 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7357 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
7359 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
7361 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7362 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, tdnp
->n_fhp
, tdnp
->n_fhsize
);
7364 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LINK
);
7365 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
7367 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7368 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, tdnp
);
7370 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
7372 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7373 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
7374 nfsm_chain_build_done(error
, &nmreq
);
7375 nfsm_assert(error
, (numops
== 0), EPROTO
);
7377 error
= nfs_request(tdnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
7379 if ((lockerror
= nfs_node_lock2(tdnp
, np
))) {
7383 nfsm_chain_skip_tag(error
, &nmrep
);
7384 nfsm_chain_get_32(error
, &nmrep
, numops
);
7385 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7386 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
7387 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7388 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LINK
);
7389 nfsm_chain_check_change_info(error
, &nmrep
, tdnp
);
7390 /* directory attributes: if we don't get them, make sure to invalidate */
7391 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7393 nfsm_chain_loadattr(error
, &nmrep
, tdnp
, nfsvers
, &xid
);
7395 NATTRINVALIDATE(tdnp
);
7397 /* link attributes: if we don't get them, make sure to invalidate */
7398 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
7399 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7401 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
7403 NATTRINVALIDATE(np
);
7406 nfsm_chain_cleanup(&nmreq
);
7407 nfsm_chain_cleanup(&nmrep
);
7409 tdnp
->n_flag
|= NMODIFIED
;
7411 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
7412 if (error
== EEXIST
) {
7415 if (!error
&& (tdnp
->n_flag
& NNEGNCENTRIES
)) {
7416 tdnp
->n_flag
&= ~NNEGNCENTRIES
;
7417 cache_purge_negatives(tdvp
);
7420 nfs_node_unlock2(tdnp
, np
);
7422 nfs_node_clear_busy2(tdnp
, np
);
7428 struct vnop_rmdir_args
/* {
7429 * struct vnodeop_desc *a_desc;
7432 * struct componentname *a_cnp;
7433 * vfs_context_t a_context;
7436 vfs_context_t ctx
= ap
->a_context
;
7437 vnode_t vp
= ap
->a_vp
;
7438 vnode_t dvp
= ap
->a_dvp
;
7439 struct componentname
*cnp
= ap
->a_cnp
;
7440 struct nfsmount
*nmp
;
7441 int error
= 0, namedattrs
;
7442 nfsnode_t np
= VTONFS(vp
);
7443 nfsnode_t dnp
= VTONFS(dvp
);
7444 struct nfs_dulookup
*dul
;
7446 if (vnode_vtype(vp
) != VDIR
) {
7450 nmp
= NFSTONMP(dnp
);
7451 if (nfs_mount_gone(nmp
)) {
7454 namedattrs
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
);
7456 if ((error
= nfs_node_set_busy2(dnp
, np
, vfs_context_thread(ctx
)))) {
7460 MALLOC(dul
, struct nfs_dulookup
*, sizeof(*dul
), M_TEMP
, M_WAITOK
);
7462 nfs_dulookup_init(dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
7463 nfs_dulookup_start(dul
, dnp
, ctx
);
7466 error
= nfs4_remove_rpc(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
,
7467 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
7469 nfs_name_cache_purge(dnp
, np
, cnp
, ctx
);
7470 /* nfs_getattr() will check changed and purge caches */
7471 nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
);
7473 nfs_dulookup_finish(dul
, dnp
, ctx
);
7475 nfs_node_clear_busy2(dnp
, np
);
7478 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
7480 if (error
== ENOENT
) {
7485 * remove nfsnode from hash now so we can't accidentally find it
7486 * again if another object gets created with the same filehandle
7487 * before this vnode gets reclaimed
7489 lck_mtx_lock(&nfs_node_hash_mutex
);
7490 if (np
->n_hflag
& NHHASHED
) {
7491 LIST_REMOVE(np
, n_hash
);
7492 np
->n_hflag
&= ~NHHASHED
;
7493 FSDBG(266, 0, np
, np
->n_flag
, 0xb1eb1e);
7495 lck_mtx_unlock(&nfs_node_hash_mutex
);
7502 * NFSv4 Named Attributes
7504 * Both the extended attributes interface and the named streams interface
7505 * are backed by NFSv4 named attributes. The implementations for both use
7506 * a common set of routines in an attempt to reduce code duplication, to
7507 * increase efficiency, to increase caching of both names and data, and to
7508 * confine the complexity.
7510 * Each NFS node caches its named attribute directory's file handle.
7511 * The directory nodes for the named attribute directories are handled
7512 * exactly like regular directories (with a couple minor exceptions).
7513 * Named attribute nodes are also treated as much like regular files as
7516 * Most of the heavy lifting is done by nfs4_named_attr_get().
7520 * Get the given node's attribute directory node.
7521 * If !fetch, then only return a cached node.
7522 * Otherwise, we will attempt to fetch the node from the server.
7523 * (Note: the node should be marked busy.)
7526 nfs4_named_attr_dir_get(nfsnode_t np
, int fetch
, vfs_context_t ctx
)
7528 nfsnode_t adnp
= NULL
;
7529 struct nfsmount
*nmp
;
7530 int error
= 0, status
, numops
;
7531 struct nfsm_chain nmreq
, nmrep
;
7533 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
7535 struct nfs_vattr
*nvattr
;
7536 struct componentname cn
;
7538 struct nfsreq_secinfo_args si
;
7541 if (nfs_mount_gone(nmp
)) {
7544 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
7548 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
7549 fh
= zalloc(nfs_fhandle_zone
);
7550 req
= zalloc(nfs_req_zone
);
7551 MALLOC(nvattr
, struct nfs_vattr
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
);
7552 NVATTR_INIT(nvattr
);
7553 nfsm_chain_null(&nmreq
);
7554 nfsm_chain_null(&nmrep
);
7556 bzero(&cn
, sizeof(cn
));
7557 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER
, const, char *); /* "/..namedfork/" */
7558 cn
.cn_namelen
= NFS_STRLEN_INT(_PATH_FORKSPECIFIER
);
7559 cn
.cn_nameiop
= LOOKUP
;
7561 if (np
->n_attrdirfh
) {
7562 // XXX can't set parent correctly (to np) yet
7563 error
= nfs_nget(nmp
->nm_mountp
, NULL
, &cn
, np
->n_attrdirfh
+ 1, *np
->n_attrdirfh
,
7564 NULL
, NULL
, RPCAUTH_UNKNOWN
, NG_NOCREATE
, &adnp
);
7574 // PUTFH, OPENATTR, GETATTR
7576 nfsm_chain_build_alloc_init(error
, &nmreq
, 22 * NFSX_UNSIGNED
);
7577 nfsm_chain_add_compound_header(error
, &nmreq
, "openattr", nmp
->nm_minor_vers
, numops
);
7579 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7580 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, np
->n_fhp
, np
->n_fhsize
);
7582 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPENATTR
);
7583 nfsm_chain_add_32(error
, &nmreq
, 0);
7585 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7586 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
7587 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
7588 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
7589 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7590 nfsm_chain_build_done(error
, &nmreq
);
7591 nfsm_assert(error
, (numops
== 0), EPROTO
);
7593 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
7594 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, 0, NULL
, &req
);
7596 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
7599 nfsm_chain_skip_tag(error
, &nmrep
);
7600 nfsm_chain_get_32(error
, &nmrep
, numops
);
7601 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7602 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPENATTR
);
7603 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7605 error
= nfs4_parsefattr(&nmrep
, NULL
, nvattr
, fh
, NULL
, NULL
);
7607 if (!NFS_BITMAP_ISSET(nvattr
->nva_bitmap
, NFS_FATTR_FILEHANDLE
) || !fh
->fh_len
) {
7611 if (!np
->n_attrdirfh
|| (*np
->n_attrdirfh
!= fh
->fh_len
)) {
7612 /* (re)allocate attrdir fh buffer */
7613 if (np
->n_attrdirfh
) {
7614 FREE(np
->n_attrdirfh
, M_TEMP
);
7616 MALLOC(np
->n_attrdirfh
, u_char
*, fh
->fh_len
+ 1, M_TEMP
, M_WAITOK
);
7618 if (!np
->n_attrdirfh
) {
7622 /* cache the attrdir fh in the node */
7623 *np
->n_attrdirfh
= (unsigned char)fh
->fh_len
; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */
7624 bcopy(fh
->fh_data
, np
->n_attrdirfh
+ 1, fh
->fh_len
);
7625 /* create node for attrdir */
7626 // XXX can't set parent correctly (to np) yet
7627 error
= nfs_nget(NFSTOMP(np
), NULL
, &cn
, fh
->fh_data
, fh
->fh_len
, nvattr
, &xid
, req
->r_auth
, 0, &adnp
);
7629 NVATTR_CLEANUP(nvattr
);
7630 NFS_ZFREE(nfs_fhandle_zone
, fh
);
7631 NFS_ZFREE(nfs_req_zone
, req
);
7632 FREE(nvattr
, M_TEMP
);
7633 nfsm_chain_cleanup(&nmreq
);
7634 nfsm_chain_cleanup(&nmrep
);
7637 /* sanity check that this node is an attribute directory */
7638 if (adnp
->n_vattr
.nva_type
!= VDIR
) {
7641 if (!(adnp
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
7644 nfs_node_unlock(adnp
);
7646 vnode_put(NFSTOV(adnp
));
7649 return error
? NULL
: adnp
;
7653 * Get the given node's named attribute node for the name given.
7655 * In an effort to increase the performance of named attribute access, we try
7656 * to reduce server requests by doing the following:
7658 * - cache the node's named attribute directory file handle in the node
7659 * - maintain a directory vnode for the attribute directory
7660 * - use name cache entries (positive and negative) to speed up lookups
7661 * - optionally open the named attribute (with the given accessMode) in the same RPC
7662 * - combine attribute directory retrieval with the lookup/open RPC
7663 * - optionally prefetch the named attribute's first block of data in the same RPC
7665 * Also, in an attempt to reduce the number of copies/variations of this code,
7666 * parts of the RPC building/processing code are conditionalized on what is
7667 * needed for any particular request (openattr, lookup vs. open, read).
7669 * Note that because we may not have the attribute directory node when we start
7670 * the lookup/open, we lock both the node and the attribute directory node.
7673 #define NFS_GET_NAMED_ATTR_CREATE 0x1
7674 #define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
7675 #define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
7676 #define NFS_GET_NAMED_ATTR_PREFETCH 0x8
7679 nfs4_named_attr_get(
7681 struct componentname
*cnp
,
7682 uint32_t accessMode
,
7686 struct nfs_open_file
**nofpp
)
7688 struct nfsmount
*nmp
;
7689 int error
= 0, open_error
= EIO
;
7690 int inuse
= 0, adlockerror
= ENOENT
, busyerror
= ENOENT
, adbusyerror
= ENOENT
, nofpbusyerror
= ENOENT
;
7691 int create
, guarded
, prefetch
, truncate
, noopbusy
= 0;
7692 int open
, status
, numops
, hadattrdir
, negnamecache
;
7693 struct nfs_vattr
*nvattr
;
7694 struct vnode_attr vattr
;
7695 nfsnode_t adnp
= NULL
, anp
= NULL
;
7697 u_int64_t xid
= 0, savedxid
= 0;
7698 struct nfsm_chain nmreq
, nmrep
;
7699 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
7700 uint32_t denyMode
= 0, rflags
, delegation
, recall
, eof
, rlen
, retlen
;
7701 nfs_stateid stateid
, dstateid
;
7703 struct nfs_open_owner
*noop
= NULL
;
7704 struct nfs_open_file
*newnofp
= NULL
, *nofp
= NULL
;
7705 struct vnop_access_args naa
;
7710 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
;
7711 struct kauth_ace ace
;
7713 struct nfsreq_secinfo_args si
;
7716 rflags
= delegation
= recall
= eof
= rlen
= retlen
= 0;
7719 slen
= sizeof(sbuf
);
7722 if (nfs_mount_gone(nmp
)) {
7725 fh
= zalloc(nfs_fhandle_zone
);
7726 req
= zalloc(nfs_req_zone
);
7727 MALLOC(nvattr
, struct nfs_vattr
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
);
7728 NVATTR_INIT(nvattr
);
7730 bzero(&dstateid
, sizeof(dstateid
));
7731 negnamecache
= !NMFLAG(nmp
, NONEGNAMECACHE
);
7732 thd
= vfs_context_thread(ctx
);
7733 cred
= vfs_context_ucred(ctx
);
7734 create
= (flags
& NFS_GET_NAMED_ATTR_CREATE
) ? NFS_OPEN_CREATE
: NFS_OPEN_NOCREATE
;
7735 guarded
= (flags
& NFS_GET_NAMED_ATTR_CREATE_GUARDED
) ? NFS_CREATE_GUARDED
: NFS_CREATE_UNCHECKED
;
7736 truncate
= (flags
& NFS_GET_NAMED_ATTR_TRUNCATE
);
7737 prefetch
= (flags
& NFS_GET_NAMED_ATTR_PREFETCH
);
7740 error
= nfs_getattr(np
, nvattr
, ctx
, NGA_CACHED
);
7744 if (NFS_BITMAP_ISSET(nvattr
->nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
7745 !(nvattr
->nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
)) {
7749 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_NONE
) {
7750 /* shouldn't happen... but just be safe */
7751 printf("nfs4_named_attr_get: create with no access %s\n", cnp
->cn_nameptr
);
7752 accessMode
= NFS_OPEN_SHARE_ACCESS_READ
;
7754 open
= (accessMode
!= NFS_OPEN_SHARE_ACCESS_NONE
);
7757 * We're trying to open the file.
7758 * We'll create/open it with the given access mode,
7759 * and set NFS_OPEN_FILE_CREATE.
7761 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
7762 if (prefetch
&& guarded
) {
7763 prefetch
= 0; /* no sense prefetching data that can't be there */
7765 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
7772 if ((error
= busyerror
= nfs_node_set_busy(np
, vfs_context_thread(ctx
)))) {
7776 adnp
= nfs4_named_attr_dir_get(np
, 0, ctx
);
7777 hadattrdir
= (adnp
!= NULL
);
7780 /* use the special state ID because we don't have a real one to send */
7781 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
7782 rlen
= MIN(nmp
->nm_rsize
, nmp
->nm_biosize
);
7784 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
7785 nfsm_chain_null(&nmreq
);
7786 nfsm_chain_null(&nmrep
);
7789 if ((error
= adbusyerror
= nfs_node_set_busy(adnp
, vfs_context_thread(ctx
)))) {
7792 /* nfs_getattr() will check changed and purge caches */
7793 error
= nfs_getattr(adnp
, NULL
, ctx
, NGA_CACHED
);
7795 error
= cache_lookup(NFSTOV(adnp
), &avp
, cnp
);
7798 /* negative cache entry */
7802 /* try dir buf cache lookup */
7803 error
= nfs_dir_buf_cache_lookup(adnp
, &anp
, cnp
, ctx
, 0, NULL
);
7804 if (!error
&& anp
) {
7805 /* dir buf cache hit */
7809 if (error
!= -1) { /* cache miss */
7814 /* cache hit, not really an error */
7815 OSAddAtomic64(1, &nfsstats
.lookupcache_hits
);
7817 *anpp
= anp
= VTONFS(avp
);
7820 nfs_node_clear_busy(adnp
);
7821 adbusyerror
= ENOENT
;
7823 /* check for directory access */
7824 naa
.a_desc
= &vnop_access_desc
;
7825 naa
.a_vp
= NFSTOV(adnp
);
7826 naa
.a_action
= KAUTH_VNODE_SEARCH
;
7827 naa
.a_context
= ctx
;
7829 /* compute actual success/failure based on accessibility */
7830 error
= nfs_vnop_access(&naa
);
7833 /* we either found it, or hit an error */
7834 if (!error
&& guarded
) {
7835 /* found cached entry but told not to use it */
7837 vnode_put(NFSTOV(anp
));
7840 /* we're done if error or we don't need to open */
7841 if (error
|| !open
) {
7844 /* no error and we need to open... */
7850 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
7852 nfs_open_owner_rele(noop
);
7858 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7859 error
= nfs_open_file_find(anp
, noop
, &newnofp
, 0, 0, 1);
7860 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
7861 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop
->noo_cred
), cnp
->cn_nameptr
);
7864 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
7865 error
= nfs4_reopen(newnofp
, vfs_context_thread(ctx
));
7866 nfs_open_file_destroy(newnofp
);
7869 nfs_mount_state_in_use_end(nmp
, 0);
7875 error
= nfs_open_file_set_busy(newnofp
, vfs_context_thread(ctx
));
7879 nfs_open_file_destroy(newnofp
);
7886 * We already have the node. So we just need to open
7887 * it - which we may be able to do with a delegation.
7889 open_error
= error
= nfs4_open(anp
, newnofp
, accessMode
, denyMode
, ctx
);
7891 /* open succeeded, so our open file is no longer temporary */
7904 * We either don't have the attrdir or we didn't find the attribute
7905 * in the name cache, so we need to talk to the server.
7907 * If we don't have the attrdir, we'll need to ask the server for that too.
7908 * If the caller is requesting that the attribute be created, we need to
7909 * make sure the attrdir is created.
7910 * The caller may also request that the first block of an existing attribute
7911 * be retrieved at the same time.
7915 /* need to mark the open owner busy during the RPC */
7916 if ((error
= nfs_open_owner_set_busy(noop
, thd
))) {
7923 * We'd like to get updated post-open/lookup attributes for the
7924 * directory and we may also want to prefetch some data via READ.
7925 * We'd like the READ results to be last so that we can leave the
7926 * data in the mbufs until the end.
7928 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7932 numops
+= 3; // also sending: OPENATTR, GETATTR, OPENATTR
7935 numops
+= 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7937 nfsm_chain_build_alloc_init(error
, &nmreq
, 64 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
7938 nfsm_chain_add_compound_header(error
, &nmreq
, "getnamedattr", nmp
->nm_minor_vers
, numops
);
7941 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7942 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, adnp
->n_fhp
, adnp
->n_fhsize
);
7945 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7946 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, np
->n_fhp
, np
->n_fhsize
);
7948 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPENATTR
);
7949 nfsm_chain_add_32(error
, &nmreq
, create
? 1 : 0);
7951 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7952 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
7953 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
7954 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
7955 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7959 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
7960 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
7961 nfsm_chain_add_32(error
, &nmreq
, accessMode
);
7962 nfsm_chain_add_32(error
, &nmreq
, denyMode
);
7963 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
);
7964 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
7965 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
));
7966 nfsm_chain_add_32(error
, &nmreq
, create
);
7968 nfsm_chain_add_32(error
, &nmreq
, guarded
);
7971 VATTR_SET(&vattr
, va_data_size
, 0);
7973 nfsm_chain_add_fattr4(error
, &nmreq
, &vattr
, nmp
);
7975 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_NULL
);
7976 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
7979 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUP
);
7980 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
7983 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7984 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
7985 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
7986 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
7987 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7990 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
7994 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7995 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, adnp
->n_fhp
, adnp
->n_fhsize
);
7998 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7999 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, np
->n_fhp
, np
->n_fhsize
);
8001 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPENATTR
);
8002 nfsm_chain_add_32(error
, &nmreq
, 0);
8005 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
8006 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
8007 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
8010 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
8012 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_NVERIFY
);
8014 VATTR_SET(&vattr
, va_data_size
, 0);
8015 nfsm_chain_add_fattr4(error
, &nmreq
, &vattr
, nmp
);
8017 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READ
);
8018 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
8019 nfsm_chain_add_64(error
, &nmreq
, 0);
8020 nfsm_chain_add_32(error
, &nmreq
, rlen
);
8022 nfsm_chain_build_done(error
, &nmreq
);
8023 nfsm_assert(error
, (numops
== 0), EPROTO
);
8025 error
= nfs_request_async(hadattrdir
? adnp
: np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
8026 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, open
? R_NOINTR
: 0, NULL
, &req
);
8028 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
8031 if (hadattrdir
&& ((adlockerror
= nfs_node_lock(adnp
)))) {
8032 error
= adlockerror
;
8035 nfsm_chain_skip_tag(error
, &nmrep
);
8036 nfsm_chain_get_32(error
, &nmrep
, numops
);
8037 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
8039 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPENATTR
);
8040 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
8042 error
= nfs4_parsefattr(&nmrep
, NULL
, nvattr
, fh
, NULL
, NULL
);
8044 if (NFS_BITMAP_ISSET(nvattr
->nva_bitmap
, NFS_FATTR_FILEHANDLE
) && fh
->fh_len
) {
8045 if (!np
->n_attrdirfh
|| (*np
->n_attrdirfh
!= fh
->fh_len
)) {
8046 /* (re)allocate attrdir fh buffer */
8047 if (np
->n_attrdirfh
) {
8048 FREE(np
->n_attrdirfh
, M_TEMP
);
8050 MALLOC(np
->n_attrdirfh
, u_char
*, fh
->fh_len
+ 1, M_TEMP
, M_WAITOK
);
8052 if (np
->n_attrdirfh
) {
8053 /* remember the attrdir fh in the node */
8054 *np
->n_attrdirfh
= (unsigned char)fh
->fh_len
; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */
8055 bcopy(fh
->fh_data
, np
->n_attrdirfh
+ 1, fh
->fh_len
);
8056 /* create busied node for attrdir */
8057 struct componentname cn
;
8058 bzero(&cn
, sizeof(cn
));
8059 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER
, const, char *); /* "/..namedfork/" */
8060 cn
.cn_namelen
= NFS_STRLEN_INT(_PATH_FORKSPECIFIER
);
8061 cn
.cn_nameiop
= LOOKUP
;
8062 // XXX can't set parent correctly (to np) yet
8063 error
= nfs_nget(NFSTOMP(np
), NULL
, &cn
, fh
->fh_data
, fh
->fh_len
, nvattr
, &xid
, req
->r_auth
, 0, &adnp
);
8066 /* set the node busy */
8067 SET(adnp
->n_flag
, NBUSY
);
8070 /* if no adnp, oh well... */
8074 NVATTR_CLEANUP(nvattr
);
8078 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
8079 nfs_owner_seqid_increment(noop
, NULL
, error
);
8080 nfsm_chain_get_stateid(error
, &nmrep
, &newnofp
->nof_stateid
);
8081 nfsm_chain_check_change_info(error
, &nmrep
, adnp
);
8082 nfsm_chain_get_32(error
, &nmrep
, rflags
);
8083 bmlen
= NFS_ATTR_BITMAP_LEN
;
8084 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
8085 nfsm_chain_get_32(error
, &nmrep
, delegation
);
8087 switch (delegation
) {
8088 case NFS_OPEN_DELEGATE_NONE
:
8090 case NFS_OPEN_DELEGATE_READ
:
8091 case NFS_OPEN_DELEGATE_WRITE
:
8092 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
8093 nfsm_chain_get_32(error
, &nmrep
, recall
);
8094 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) { // space (skip) XXX
8095 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
8097 /* if we have any trouble accepting the ACE, just invalidate it */
8098 ace_type
= ace_flags
= ace_mask
= len
= 0;
8099 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
8100 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
8101 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
8102 nfsm_chain_get_32(error
, &nmrep
, len
);
8103 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
8104 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
8105 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
8106 if (!error
&& (len
>= slen
)) {
8107 MALLOC(s
, char*, len
+ 1, M_TEMP
, M_WAITOK
);
8115 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
8117 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
8121 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
))) {
8128 if (s
&& (s
!= sbuf
)) {
8137 /* At this point if we have no error, the object was created/opened. */
8140 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOOKUP
);
8142 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
8144 error
= nfs4_parsefattr(&nmrep
, NULL
, nvattr
, fh
, NULL
, NULL
);
8146 if (!NFS_BITMAP_ISSET(nvattr
->nva_bitmap
, NFS_FATTR_FILEHANDLE
) || !fh
->fh_len
) {
8151 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
8153 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
8155 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPENATTR
);
8157 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
8160 nfsm_chain_loadattr(error
, &nmrep
, adnp
, nmp
->nm_vers
, &xid
);
8164 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
) {
8165 newnofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
8167 if (rflags
& NFS_OPEN_RESULT_CONFIRM
) {
8169 nfs_node_unlock(adnp
);
8170 adlockerror
= ENOENT
;
8172 NVATTR_CLEANUP(nvattr
);
8173 error
= nfs4_open_confirm_rpc(nmp
, adnp
? adnp
: np
, fh
->fh_data
, fh
->fh_len
, noop
, &newnofp
->nof_stateid
, thd
, cred
, nvattr
, &xid
);
8176 if ((adlockerror
= nfs_node_lock(adnp
))) {
8177 error
= adlockerror
;
8183 if (open
&& adnp
&& !adlockerror
) {
8184 if (!open_error
&& (adnp
->n_flag
& NNEGNCENTRIES
)) {
8185 adnp
->n_flag
&= ~NNEGNCENTRIES
;
8186 cache_purge_negatives(NFSTOV(adnp
));
8188 adnp
->n_flag
|= NMODIFIED
;
8189 nfs_node_unlock(adnp
);
8190 adlockerror
= ENOENT
;
8191 nfs_getattr(adnp
, NULL
, ctx
, NGA_CACHED
);
8193 if (adnp
&& !adlockerror
&& (error
== ENOENT
) &&
8194 (cnp
->cn_flags
& MAKEENTRY
) && (cnp
->cn_nameiop
!= CREATE
) && negnamecache
) {
8195 /* add a negative entry in the name cache */
8196 cache_enter(NFSTOV(adnp
), NULL
, cnp
);
8197 adnp
->n_flag
|= NNEGNCENTRIES
;
8199 if (adnp
&& !adlockerror
) {
8200 nfs_node_unlock(adnp
);
8201 adlockerror
= ENOENT
;
8203 if (!error
&& !anp
&& fh
->fh_len
) {
8204 /* create the vnode with the filehandle and attributes */
8206 error
= nfs_nget(NFSTOMP(np
), adnp
, cnp
, fh
->fh_data
, fh
->fh_len
, nvattr
, &xid
, req
->r_auth
, NG_MAKEENTRY
, &anp
);
8209 nfs_node_unlock(anp
);
8211 if (!error
&& open
) {
8212 nfs_open_file_add_open(newnofp
, accessMode
, denyMode
, 0);
8213 /* After we have a node, add our open file struct to the node */
8215 error
= nfs_open_file_find_internal(anp
, noop
, &nofp
, 0, 0, 0);
8217 /* This shouldn't happen, because we passed in a new nofp to use. */
8218 printf("nfs_open_file_find_internal failed! %d\n", error
);
8220 } else if (nofp
!= newnofp
) {
8222 * Hmm... an open file struct already exists.
8223 * Mark the existing one busy and merge our open into it.
8224 * Then destroy the one we created.
8225 * Note: there's no chance of an open confict because the
8226 * open has already been granted.
8228 nofpbusyerror
= nfs_open_file_set_busy(nofp
, NULL
);
8229 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0);
8230 nofp
->nof_stateid
= newnofp
->nof_stateid
;
8231 if (newnofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
) {
8232 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
8234 nfs_open_file_clear_busy(newnofp
);
8235 nfs_open_file_destroy(newnofp
);
8241 /* mark the node as holding a create-initiated open */
8242 nofp
->nof_flags
|= NFS_OPEN_FILE_CREATE
;
8243 nofp
->nof_creator
= current_thread();
8250 NVATTR_CLEANUP(nvattr
);
8251 if (open
&& ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
))) {
8252 if (!error
&& anp
&& !recall
) {
8253 /* stuff the delegation state in the node */
8254 lck_mtx_lock(&anp
->n_openlock
);
8255 anp
->n_openflags
&= ~N_DELEG_MASK
;
8256 anp
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
8257 anp
->n_dstateid
= dstateid
;
8259 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
8260 lck_mtx_lock(&nmp
->nm_lock
);
8261 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
8262 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, anp
, n_dlink
);
8264 lck_mtx_unlock(&nmp
->nm_lock
);
8266 lck_mtx_unlock(&anp
->n_openlock
);
8268 /* give the delegation back */
8270 if (NFS_CMPFH(anp
, fh
->fh_data
, fh
->fh_len
)) {
8271 /* update delegation state and return it */
8272 lck_mtx_lock(&anp
->n_openlock
);
8273 anp
->n_openflags
&= ~N_DELEG_MASK
;
8274 anp
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
8275 anp
->n_dstateid
= dstateid
;
8277 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
8278 lck_mtx_lock(&nmp
->nm_lock
);
8279 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
8280 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, anp
, n_dlink
);
8282 lck_mtx_unlock(&nmp
->nm_lock
);
8284 lck_mtx_unlock(&anp
->n_openlock
);
8285 /* don't need to send a separate delegreturn for fh */
8288 /* return anp's current delegation */
8289 nfs4_delegation_return(anp
, 0, thd
, cred
);
8291 if (fh
->fh_len
) { /* return fh's delegation if it wasn't for anp */
8292 nfs4_delegreturn_rpc(nmp
, fh
->fh_data
, fh
->fh_len
, &dstateid
, 0, thd
, cred
);
8298 /* need to cleanup our temporary nofp */
8299 nfs_open_file_clear_busy(newnofp
);
8300 nfs_open_file_destroy(newnofp
);
8302 } else if (nofp
&& !nofpbusyerror
) {
8303 nfs_open_file_clear_busy(nofp
);
8304 nofpbusyerror
= ENOENT
;
8306 if (inuse
&& nfs_mount_state_in_use_end(nmp
, error
)) {
8308 nofp
= newnofp
= NULL
;
8309 rflags
= delegation
= recall
= eof
= rlen
= retlen
= 0;
8312 slen
= sizeof(sbuf
);
8313 nfsm_chain_cleanup(&nmreq
);
8314 nfsm_chain_cleanup(&nmrep
);
8316 vnode_put(NFSTOV(anp
));
8319 hadattrdir
= (adnp
!= NULL
);
8321 nfs_open_owner_clear_busy(noop
);
8329 nfs_open_owner_clear_busy(noop
);
8332 nfs_open_owner_rele(noop
);
8335 if (!error
&& prefetch
&& nmrep
.nmc_mhead
) {
8336 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
8337 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_NVERIFY
);
8338 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READ
);
8339 nfsm_chain_get_32(error
, &nmrep
, eof
);
8340 nfsm_chain_get_32(error
, &nmrep
, retlen
);
8341 if (!error
&& anp
) {
8343 * There can be one problem with doing the prefetch.
8344 * Because we don't have the node before we start the RPC, we
8345 * can't have the buffer busy while the READ is performed.
8346 * So there is a chance that other I/O occured on the same
8347 * range of data while we were performing this RPC. If that
8348 * happens, then it's possible the data we have in the READ
8349 * response is no longer up to date.
8350 * Once we have the node and the buffer, we need to make sure
8351 * that there's no chance we could be putting stale data in
8353 * So, we check if the range read is dirty or if any I/O may
8354 * have occured on it while we were performing our RPC.
8356 struct nfsbuf
*bp
= NULL
;
8358 nfsbufpgs pagemask
, pagemaskand
;
8360 retlen
= MIN(retlen
, rlen
);
8362 /* check if node needs size update or invalidation */
8363 if (ISSET(anp
->n_flag
, NUPDATESIZE
)) {
8364 nfs_data_update_size(anp
, 0);
8366 if (!(error
= nfs_node_lock(anp
))) {
8367 if (anp
->n_flag
& NNEEDINVALIDATE
) {
8368 anp
->n_flag
&= ~NNEEDINVALIDATE
;
8369 nfs_node_unlock(anp
);
8370 error
= nfs_vinvalbuf(NFSTOV(anp
), V_SAVE
| V_IGNORE_WRITEERR
, ctx
, 1);
8371 if (!error
) { /* lets play it safe and just drop the data */
8375 nfs_node_unlock(anp
);
8379 /* calculate page mask for the range of data read */
8380 lastpg
= (retlen
- 1) / PAGE_SIZE
;
8381 nfs_buf_pgs_get_page_mask(&pagemask
, lastpg
+ 1);
8384 error
= nfs_buf_get(anp
, 0, nmp
->nm_biosize
, thd
, NBLK_READ
| NBLK_NOWAIT
, &bp
);
8386 /* don't save the data if dirty or potential I/O conflict */
8387 nfs_buf_pgs_bit_and(&bp
->nb_dirty
, &pagemask
, &pagemaskand
);
8388 if (!error
&& bp
&& !bp
->nb_dirtyoff
&& !nfs_buf_pgs_is_set(&pagemaskand
) &&
8389 timevalcmp(&anp
->n_lastio
, &now
, <)) {
8390 OSAddAtomic64(1, &nfsstats
.read_bios
);
8391 CLR(bp
->nb_flags
, (NB_DONE
| NB_ASYNC
));
8392 SET(bp
->nb_flags
, NB_READ
);
8394 nfsm_chain_get_opaque(error
, &nmrep
, retlen
, bp
->nb_data
);
8396 bp
->nb_error
= error
;
8397 SET(bp
->nb_flags
, NB_ERROR
);
8400 bp
->nb_endio
= rlen
;
8401 if ((retlen
> 0) && (bp
->nb_endio
< (int)retlen
)) {
8402 bp
->nb_endio
= retlen
;
8404 if (eof
|| (retlen
== 0)) {
8405 /* zero out the remaining data (up to EOF) */
8406 off_t rpcrem
, eofrem
, rem
;
8407 rpcrem
= (rlen
- retlen
);
8408 eofrem
= anp
->n_size
- (NBOFF(bp
) + retlen
);
8409 rem
= (rpcrem
< eofrem
) ? rpcrem
: eofrem
;
8411 bzero(bp
->nb_data
+ retlen
, rem
);
8413 } else if ((retlen
< rlen
) && !ISSET(bp
->nb_flags
, NB_ERROR
)) {
8414 /* ugh... short read ... just invalidate for now... */
8415 SET(bp
->nb_flags
, NB_INVAL
);
8418 nfs_buf_read_finish(bp
);
8419 microuptime(&anp
->n_lastio
);
8422 nfs_buf_release(bp
, 1);
8425 error
= 0; /* ignore any transient error in processing the prefetch */
8427 if (adnp
&& !adbusyerror
) {
8428 nfs_node_clear_busy(adnp
);
8429 adbusyerror
= ENOENT
;
8432 nfs_node_clear_busy(np
);
8436 vnode_put(NFSTOV(adnp
));
8439 nfs_mount_state_in_use_end(nmp
, error
);
8441 if (error
&& *anpp
) {
8442 vnode_put(NFSTOV(*anpp
));
8445 nfsm_chain_cleanup(&nmreq
);
8446 nfsm_chain_cleanup(&nmrep
);
8448 NFS_ZFREE(nfs_fhandle_zone
, fh
);
8449 NFS_ZFREE(nfs_req_zone
, req
);
8450 FREE(nvattr
, M_TEMP
);
8455 * Remove a named attribute.
8458 nfs4_named_attr_remove(nfsnode_t np
, nfsnode_t anp
, const char *name
, vfs_context_t ctx
)
8460 nfsnode_t adnp
= NULL
;
8461 struct nfsmount
*nmp
;
8462 struct componentname cn
;
8463 struct vnop_remove_args vra
;
8464 int error
, putanp
= 0;
8467 if (nfs_mount_gone(nmp
)) {
8471 bzero(&cn
, sizeof(cn
));
8472 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(name
, const, char *);
8473 cn
.cn_namelen
= NFS_STRLEN_INT(name
);
8474 cn
.cn_nameiop
= DELETE
;
8478 error
= nfs4_named_attr_get(np
, &cn
, NFS_OPEN_SHARE_ACCESS_NONE
,
8479 0, ctx
, &anp
, NULL
);
8480 if ((!error
&& !anp
) || (error
== ENOATTR
)) {
8485 vnode_put(NFSTOV(anp
));
8493 if ((error
= nfs_node_set_busy(np
, vfs_context_thread(ctx
)))) {
8496 adnp
= nfs4_named_attr_dir_get(np
, 1, ctx
);
8497 nfs_node_clear_busy(np
);
8503 vra
.a_desc
= &vnop_remove_desc
;
8504 vra
.a_dvp
= NFSTOV(adnp
);
8505 vra
.a_vp
= NFSTOV(anp
);
8508 vra
.a_context
= ctx
;
8509 error
= nfs_vnop_remove(&vra
);
8512 vnode_put(NFSTOV(adnp
));
8515 vnode_put(NFSTOV(anp
));
8522 struct vnop_getxattr_args
/* {
8523 * struct vnodeop_desc *a_desc;
8525 * const char * a_name;
8529 * vfs_context_t a_context;
8532 vfs_context_t ctx
= ap
->a_context
;
8533 struct nfsmount
*nmp
;
8534 struct nfs_vattr
*nvattr
;
8535 struct componentname cn
;
8537 int error
= 0, isrsrcfork
;
8539 nmp
= VTONMP(ap
->a_vp
);
8540 if (nfs_mount_gone(nmp
)) {
8544 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8548 MALLOC(nvattr
, struct nfs_vattr
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
);
8549 error
= nfs_getattr(VTONFS(ap
->a_vp
), nvattr
, ctx
, NGA_CACHED
);
8553 if (NFS_BITMAP_ISSET(nvattr
->nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
8554 !(nvattr
->nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
)) {
8559 bzero(&cn
, sizeof(cn
));
8560 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(ap
->a_name
, const, char *);
8561 cn
.cn_namelen
= NFS_STRLEN_INT(ap
->a_name
);
8562 cn
.cn_nameiop
= LOOKUP
;
8563 cn
.cn_flags
= MAKEENTRY
;
8565 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
8566 isrsrcfork
= (bcmp(ap
->a_name
, XATTR_RESOURCEFORK_NAME
, sizeof(XATTR_RESOURCEFORK_NAME
)) == 0);
8568 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_NONE
,
8569 !isrsrcfork
? NFS_GET_NAMED_ATTR_PREFETCH
: 0, ctx
, &anp
, NULL
);
8570 if ((!error
&& !anp
) || (error
== ENOENT
)) {
8575 error
= nfs_bioread(anp
, ap
->a_uio
, 0, ctx
);
8577 *ap
->a_size
= anp
->n_size
;
8581 vnode_put(NFSTOV(anp
));
8584 FREE(nvattr
, M_TEMP
);
8590 struct vnop_setxattr_args
/* {
8591 * struct vnodeop_desc *a_desc;
8593 * const char * a_name;
8596 * vfs_context_t a_context;
8599 vfs_context_t ctx
= ap
->a_context
;
8600 int options
= ap
->a_options
;
8601 uio_t uio
= ap
->a_uio
;
8602 const char *name
= ap
->a_name
;
8603 struct nfsmount
*nmp
;
8604 struct componentname cn
;
8605 nfsnode_t anp
= NULL
;
8606 int error
= 0, closeerror
= 0, flags
, isrsrcfork
, isfinderinfo
, empty
= 0, i
;
8607 #define FINDERINFOSIZE 32
8608 uint8_t finfo
[FINDERINFOSIZE
];
8610 struct nfs_open_file
*nofp
= NULL
;
8611 char uio_buf
[UIO_SIZEOF(1)];
8613 struct vnop_write_args vwa
;
8615 nmp
= VTONMP(ap
->a_vp
);
8616 if (nfs_mount_gone(nmp
)) {
8620 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8624 if ((options
& XATTR_CREATE
) && (options
& XATTR_REPLACE
)) {
8628 /* XXX limitation based on need to back up uio on short write */
8629 if (uio_iovcnt(uio
) > 1) {
8630 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
8634 bzero(&cn
, sizeof(cn
));
8635 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(name
, const, char *);
8636 cn
.cn_namelen
= NFS_STRLEN_INT(name
);
8637 cn
.cn_nameiop
= CREATE
;
8638 cn
.cn_flags
= MAKEENTRY
;
8640 isfinderinfo
= (bcmp(name
, XATTR_FINDERINFO_NAME
, sizeof(XATTR_FINDERINFO_NAME
)) == 0);
8641 isrsrcfork
= isfinderinfo
? 0 : (bcmp(name
, XATTR_RESOURCEFORK_NAME
, sizeof(XATTR_RESOURCEFORK_NAME
)) == 0);
8643 uio_setoffset(uio
, 0);
8646 if (uio_resid(uio
) != sizeof(finfo
)) {
8649 error
= uiomove((char*)&finfo
, sizeof(finfo
), uio
);
8653 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
8655 for (i
= 0, finfop
= (uint32_t*)&finfo
; i
< (int)(sizeof(finfo
) / sizeof(uint32_t)); i
++) {
8661 if (empty
&& !(options
& (XATTR_CREATE
| XATTR_REPLACE
))) {
8662 error
= nfs4_named_attr_remove(VTONFS(ap
->a_vp
), anp
, name
, ctx
);
8663 if (error
== ENOENT
) {
8668 /* first, let's see if we get a create/replace error */
8672 * create/open the xattr
8674 * We need to make sure not to create it if XATTR_REPLACE.
8675 * For all xattrs except the resource fork, we also want to
8676 * truncate the xattr to remove any current data. We'll do
8677 * that by setting the size to 0 on create/open.
8680 if (!(options
& XATTR_REPLACE
)) {
8681 flags
|= NFS_GET_NAMED_ATTR_CREATE
;
8683 if (options
& XATTR_CREATE
) {
8684 flags
|= NFS_GET_NAMED_ATTR_CREATE_GUARDED
;
8687 flags
|= NFS_GET_NAMED_ATTR_TRUNCATE
;
8690 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_BOTH
,
8691 flags
, ctx
, &anp
, &nofp
);
8692 if (!error
&& !anp
) {
8698 /* grab the open state from the get/create/open */
8699 if (nofp
&& !(error
= nfs_open_file_set_busy(nofp
, NULL
))) {
8700 nofp
->nof_flags
&= ~NFS_OPEN_FILE_CREATE
;
8701 nofp
->nof_creator
= NULL
;
8702 nfs_open_file_clear_busy(nofp
);
8705 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
8706 if (isfinderinfo
&& empty
) {
8711 * Write the data out and flush.
8713 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
8715 vwa
.a_desc
= &vnop_write_desc
;
8716 vwa
.a_vp
= NFSTOV(anp
);
8719 vwa
.a_context
= ctx
;
8721 auio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_WRITE
, &uio_buf
, sizeof(uio_buf
));
8722 uio_addiov(auio
, (uintptr_t)&finfo
, sizeof(finfo
));
8724 } else if (uio_resid(uio
) > 0) {
8728 error
= nfs_vnop_write(&vwa
);
8730 error
= nfs_flush(anp
, MNT_WAIT
, vfs_context_thread(ctx
), 0);
8734 /* Close the xattr. */
8736 int busyerror
= nfs_open_file_set_busy(nofp
, NULL
);
8737 closeerror
= nfs_close(anp
, nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
8739 nfs_open_file_clear_busy(nofp
);
8742 if (!error
&& isfinderinfo
&& empty
) { /* Setting an empty FinderInfo really means remove it */
8743 error
= nfs4_named_attr_remove(VTONFS(ap
->a_vp
), anp
, name
, ctx
);
8744 if (error
== ENOENT
) {
8753 vnode_put(NFSTOV(anp
));
8755 if (error
== ENOENT
) {
8762 nfs4_vnop_removexattr(
8763 struct vnop_removexattr_args
/* {
8764 * struct vnodeop_desc *a_desc;
8766 * const char * a_name;
8768 * vfs_context_t a_context;
8771 struct nfsmount
*nmp
= VTONMP(ap
->a_vp
);
8774 if (nfs_mount_gone(nmp
)) {
8777 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8781 error
= nfs4_named_attr_remove(VTONFS(ap
->a_vp
), NULL
, ap
->a_name
, ap
->a_context
);
8782 if (error
== ENOENT
) {
8789 nfs4_vnop_listxattr(
8790 struct vnop_listxattr_args
/* {
8791 * struct vnodeop_desc *a_desc;
8796 * vfs_context_t a_context;
8799 vfs_context_t ctx
= ap
->a_context
;
8800 nfsnode_t np
= VTONFS(ap
->a_vp
);
8801 uio_t uio
= ap
->a_uio
;
8802 nfsnode_t adnp
= NULL
;
8803 struct nfsmount
*nmp
;
8805 struct nfs_vattr
*nvattr
;
8806 uint64_t cookie
, nextcookie
, lbn
= 0;
8807 struct nfsbuf
*bp
= NULL
;
8808 struct nfs_dir_buf_header
*ndbhp
;
8809 struct direntry
*dp
;
8811 nmp
= VTONMP(ap
->a_vp
);
8812 if (nfs_mount_gone(nmp
)) {
8816 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8820 MALLOC(nvattr
, struct nfs_vattr
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
);
8821 error
= nfs_getattr(np
, nvattr
, ctx
, NGA_CACHED
);
8825 if (NFS_BITMAP_ISSET(nvattr
->nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
8826 !(nvattr
->nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
)) {
8831 if ((error
= nfs_node_set_busy(np
, vfs_context_thread(ctx
)))) {
8834 adnp
= nfs4_named_attr_dir_get(np
, 1, ctx
);
8835 nfs_node_clear_busy(np
);
8840 if ((error
= nfs_node_lock(adnp
))) {
8844 if (adnp
->n_flag
& NNEEDINVALIDATE
) {
8845 adnp
->n_flag
&= ~NNEEDINVALIDATE
;
8847 nfs_node_unlock(adnp
);
8848 error
= nfs_vinvalbuf(NFSTOV(adnp
), 0, ctx
, 1);
8850 error
= nfs_node_lock(adnp
);
8858 * check for need to invalidate when (re)starting at beginning
8860 if (adnp
->n_flag
& NMODIFIED
) {
8862 nfs_node_unlock(adnp
);
8863 if ((error
= nfs_vinvalbuf(NFSTOV(adnp
), 0, ctx
, 1))) {
8867 nfs_node_unlock(adnp
);
8869 /* nfs_getattr() will check changed and purge caches */
8870 if ((error
= nfs_getattr(adnp
, nvattr
, ctx
, NGA_UNCACHED
))) {
8874 if (uio
&& (uio_resid(uio
) == 0)) {
8879 nextcookie
= lbn
= 0;
8881 while (!error
&& !done
) {
8882 OSAddAtomic64(1, &nfsstats
.biocache_readdirs
);
8883 cookie
= nextcookie
;
8885 error
= nfs_buf_get(adnp
, lbn
, NFS_DIRBLKSIZ
, vfs_context_thread(ctx
), NBLK_READ
, &bp
);
8889 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
8890 if (!ISSET(bp
->nb_flags
, NB_CACHE
) || !ISSET(ndbhp
->ndbh_flags
, NDB_FULL
)) {
8891 if (!ISSET(bp
->nb_flags
, NB_CACHE
)) { /* initialize the buffer */
8892 ndbhp
->ndbh_flags
= 0;
8893 ndbhp
->ndbh_count
= 0;
8894 ndbhp
->ndbh_entry_end
= sizeof(*ndbhp
);
8895 ndbhp
->ndbh_ncgen
= adnp
->n_ncgen
;
8897 error
= nfs_buf_readdir(bp
, ctx
);
8898 if (error
== NFSERR_DIRBUFDROPPED
) {
8902 nfs_buf_release(bp
, 1);
8904 if (error
&& (error
!= ENXIO
) && (error
!= ETIMEDOUT
) && (error
!= EINTR
) && (error
!= ERESTART
)) {
8905 if (!nfs_node_lock(adnp
)) {
8907 nfs_node_unlock(adnp
);
8909 nfs_vinvalbuf(NFSTOV(adnp
), 0, ctx
, 1);
8910 if (error
== NFSERR_BAD_COOKIE
) {
8919 /* go through all the entries copying/counting */
8920 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
8921 for (i
= 0; i
< ndbhp
->ndbh_count
; i
++) {
8922 if (!xattr_protected(dp
->d_name
)) {
8924 *ap
->a_size
+= dp
->d_namlen
+ 1;
8925 } else if (uio_resid(uio
) < (dp
->d_namlen
+ 1)) {
8928 error
= uiomove(dp
->d_name
, dp
->d_namlen
+ 1, uio
);
8929 if (error
&& (error
!= EFAULT
)) {
8934 nextcookie
= dp
->d_seekoff
;
8935 dp
= NFS_DIRENTRY_NEXT(dp
);
8938 if (i
== ndbhp
->ndbh_count
) {
8939 /* hit end of buffer, move to next buffer */
8941 /* if we also hit EOF, we're done */
8942 if (ISSET(ndbhp
->ndbh_flags
, NDB_EOF
)) {
8946 if (!error
&& !done
&& (nextcookie
== cookie
)) {
8947 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie
, i
, ndbhp
->ndbh_count
);
8950 nfs_buf_release(bp
, 1);
8954 vnode_put(NFSTOV(adnp
));
8957 FREE(nvattr
, M_TEMP
);
8963 nfs4_vnop_getnamedstream(
8964 struct vnop_getnamedstream_args
/* {
8965 * struct vnodeop_desc *a_desc;
8968 * const char *a_name;
8969 * enum nsoperation a_operation;
8971 * vfs_context_t a_context;
8974 vfs_context_t ctx
= ap
->a_context
;
8975 struct nfsmount
*nmp
;
8976 struct nfs_vattr
*nvattr
;
8977 struct componentname cn
;
8981 nmp
= VTONMP(ap
->a_vp
);
8982 if (nfs_mount_gone(nmp
)) {
8986 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8990 MALLOC(nvattr
, struct nfs_vattr
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
);
8991 error
= nfs_getattr(VTONFS(ap
->a_vp
), nvattr
, ctx
, NGA_CACHED
);
8995 if (NFS_BITMAP_ISSET(nvattr
->nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
8996 !(nvattr
->nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
)) {
9001 bzero(&cn
, sizeof(cn
));
9002 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(ap
->a_name
, const, char *);
9003 cn
.cn_namelen
= NFS_STRLEN_INT(ap
->a_name
);
9004 cn
.cn_nameiop
= LOOKUP
;
9005 cn
.cn_flags
= MAKEENTRY
;
9007 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_NONE
,
9008 0, ctx
, &anp
, NULL
);
9009 if ((!error
&& !anp
) || (error
== ENOENT
)) {
9012 if (!error
&& anp
) {
9013 *ap
->a_svpp
= NFSTOV(anp
);
9015 vnode_put(NFSTOV(anp
));
9018 FREE(nvattr
, M_TEMP
);
9023 nfs4_vnop_makenamedstream(
9024 struct vnop_makenamedstream_args
/* {
9025 * struct vnodeop_desc *a_desc;
9028 * const char *a_name;
9030 * vfs_context_t a_context;
9033 vfs_context_t ctx
= ap
->a_context
;
9034 struct nfsmount
*nmp
;
9035 struct componentname cn
;
9039 nmp
= VTONMP(ap
->a_vp
);
9040 if (nfs_mount_gone(nmp
)) {
9044 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
9048 bzero(&cn
, sizeof(cn
));
9049 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(ap
->a_name
, const, char *);
9050 cn
.cn_namelen
= NFS_STRLEN_INT(ap
->a_name
);
9051 cn
.cn_nameiop
= CREATE
;
9052 cn
.cn_flags
= MAKEENTRY
;
9054 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_BOTH
,
9055 NFS_GET_NAMED_ATTR_CREATE
, ctx
, &anp
, NULL
);
9056 if ((!error
&& !anp
) || (error
== ENOENT
)) {
9059 if (!error
&& anp
) {
9060 *ap
->a_svpp
= NFSTOV(anp
);
9062 vnode_put(NFSTOV(anp
));
9068 nfs4_vnop_removenamedstream(
9069 struct vnop_removenamedstream_args
/* {
9070 * struct vnodeop_desc *a_desc;
9073 * const char *a_name;
9075 * vfs_context_t a_context;
9078 struct nfsmount
*nmp
= VTONMP(ap
->a_vp
);
9079 nfsnode_t np
= ap
->a_vp
? VTONFS(ap
->a_vp
) : NULL
;
9080 nfsnode_t anp
= ap
->a_svp
? VTONFS(ap
->a_svp
) : NULL
;
9082 if (nfs_mount_gone(nmp
)) {
9087 * Given that a_svp is a named stream, checking for
9088 * named attribute support is kinda pointless.
9090 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
9094 return nfs4_named_attr_remove(np
, anp
, ap
->a_name
, ap
->a_context
);
9098 #endif /* CONFIG_NFS4 */
9100 #endif /* CONFIG_NFS_CLIENT */