2 * Copyright (c) 2006-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * vnode op calls for NFS version 4
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/systm.h>
35 #include <sys/resourcevar.h>
36 #include <sys/proc_internal.h>
37 #include <sys/kauth.h>
38 #include <sys/mount_internal.h>
39 #include <sys/malloc.h>
40 #include <sys/kpi_mbuf.h>
42 #include <sys/vnode_internal.h>
43 #include <sys/dirent.h>
44 #include <sys/fcntl.h>
45 #include <sys/lockf.h>
46 #include <sys/ubc_internal.h>
48 #include <sys/signalvar.h>
49 #include <sys/uio_internal.h>
50 #include <sys/xattr.h>
51 #include <sys/paths.h>
53 #include <vfs/vfs_support.h>
58 #include <kern/clock.h>
59 #include <libkern/OSAtomic.h>
61 #include <miscfs/fifofs/fifo.h>
62 #include <miscfs/specfs/specdev.h>
64 #include <nfs/rpcv2.h>
65 #include <nfs/nfsproto.h>
67 #include <nfs/nfsnode.h>
68 #include <nfs/nfs_gss.h>
69 #include <nfs/nfsmount.h>
70 #include <nfs/nfs_lock.h>
71 #include <nfs/xdr_subs.h>
72 #include <nfs/nfsm_subs.h>
75 #include <netinet/in.h>
76 #include <netinet/in_var.h>
77 #include <vm/vm_kern.h>
79 #include <kern/task.h>
80 #include <kern/sched_prim.h>
83 nfs4_access_rpc(nfsnode_t np
, u_int32_t
*access
, int rpcflags
, vfs_context_t ctx
)
85 int error
= 0, lockerror
= ENOENT
, status
, numops
, slot
;
87 struct nfsm_chain nmreq
, nmrep
;
89 uint32_t access_result
= 0, supported
= 0, missing
;
90 struct nfsmount
*nmp
= NFSTONMP(np
);
91 int nfsvers
= nmp
->nm_vers
;
93 struct nfsreq_secinfo_args si
;
95 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
98 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
99 nfsm_chain_null(&nmreq
);
100 nfsm_chain_null(&nmrep
);
102 // PUTFH, ACCESS, GETATTR
104 nfsm_chain_build_alloc_init(error
, &nmreq
, 17 * NFSX_UNSIGNED
);
105 nfsm_chain_add_compound_header(error
, &nmreq
, "access", nmp
->nm_minor_vers
, numops
);
107 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
108 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
110 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_ACCESS
);
111 nfsm_chain_add_32(error
, &nmreq
, *access
);
113 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
114 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
115 nfsm_chain_build_done(error
, &nmreq
);
116 nfsm_assert(error
, (numops
== 0), EPROTO
);
118 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
119 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
120 &si
, rpcflags
, &nmrep
, &xid
, &status
);
122 if ((lockerror
= nfs_node_lock(np
)))
124 nfsm_chain_skip_tag(error
, &nmrep
);
125 nfsm_chain_get_32(error
, &nmrep
, numops
);
126 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
127 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_ACCESS
);
128 nfsm_chain_get_32(error
, &nmrep
, supported
);
129 nfsm_chain_get_32(error
, &nmrep
, access_result
);
131 if ((missing
= (*access
& ~supported
))) {
132 /* missing support for something(s) we wanted */
133 if (missing
& NFS_ACCESS_DELETE
) {
135 * If the server doesn't report DELETE (possible
136 * on UNIX systems), we'll assume that it is OK
137 * and just let any subsequent delete action fail
138 * if it really isn't deletable.
140 access_result
|= NFS_ACCESS_DELETE
;
143 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
144 if (nfs_access_dotzfs
) {
145 vnode_t dvp
= NULLVP
;
146 if (np
->n_flag
& NISDOTZFSCHILD
) /* may be able to create/delete snapshot dirs */
147 access_result
|= (NFS_ACCESS_MODIFY
|NFS_ACCESS_EXTEND
|NFS_ACCESS_DELETE
);
148 else if (((dvp
= vnode_getparent(NFSTOV(np
))) != NULLVP
) && (VTONFS(dvp
)->n_flag
& NISDOTZFSCHILD
))
149 access_result
|= NFS_ACCESS_DELETE
; /* may be able to delete snapshot dirs */
153 /* Some servers report DELETE support but erroneously give a denied answer. */
154 if (nfs_access_delete
&& (*access
& NFS_ACCESS_DELETE
) && !(access_result
& NFS_ACCESS_DELETE
))
155 access_result
|= NFS_ACCESS_DELETE
;
156 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
157 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
160 uid
= kauth_cred_getuid(vfs_context_ucred(ctx
));
161 slot
= nfs_node_access_slot(np
, uid
, 1);
162 np
->n_accessuid
[slot
] = uid
;
164 np
->n_accessstamp
[slot
] = now
.tv_sec
;
165 np
->n_access
[slot
] = access_result
;
167 /* pass back the access returned with this request */
168 *access
= np
->n_access
[slot
];
172 nfsm_chain_cleanup(&nmreq
);
173 nfsm_chain_cleanup(&nmrep
);
185 struct nfs_vattr
*nvap
,
188 struct nfsmount
*nmp
= mp
? VFSTONFS(mp
) : NFSTONMP(np
);
189 int error
= 0, status
, nfsvers
, numops
, rpcflags
= 0, acls
;
190 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
191 struct nfsm_chain nmreq
, nmrep
;
192 struct nfsreq_secinfo_args si
;
194 if (nfs_mount_gone(nmp
))
196 nfsvers
= nmp
->nm_vers
;
197 acls
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_ACL
);
199 if (np
&& (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)) {
200 nfs4_default_attrs_for_referral_trigger(VTONFS(np
->n_parent
), NULL
, 0, nvap
, NULL
);
204 if (flags
& NGA_MONITOR
) /* vnode monitor requests should be soft */
205 rpcflags
= R_RECOVER
;
207 if (flags
& NGA_SOFT
) /* Return ETIMEDOUT if server not responding */
210 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
211 nfsm_chain_null(&nmreq
);
212 nfsm_chain_null(&nmrep
);
216 nfsm_chain_build_alloc_init(error
, &nmreq
, 15 * NFSX_UNSIGNED
);
217 nfsm_chain_add_compound_header(error
, &nmreq
, "getattr", nmp
->nm_minor_vers
, numops
);
219 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
220 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, fhp
, fhsize
);
222 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
223 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
224 if ((flags
& NGA_ACL
) && acls
)
225 NFS_BITMAP_SET(bitmap
, NFS_FATTR_ACL
);
226 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
227 nfsm_chain_build_done(error
, &nmreq
);
228 nfsm_assert(error
, (numops
== 0), EPROTO
);
230 error
= nfs_request2(np
, mp
, &nmreq
, NFSPROC4_COMPOUND
,
231 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
232 NULL
, rpcflags
, &nmrep
, xidp
, &status
);
234 nfsm_chain_skip_tag(error
, &nmrep
);
235 nfsm_chain_get_32(error
, &nmrep
, numops
);
236 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
237 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
239 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
, NULL
);
241 if ((flags
& NGA_ACL
) && acls
&& !NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_ACL
)) {
242 /* we asked for the ACL but didn't get one... assume there isn't one */
243 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_ACL
);
244 nvap
->nva_acl
= NULL
;
247 nfsm_chain_cleanup(&nmreq
);
248 nfsm_chain_cleanup(&nmrep
);
253 nfs4_readlink_rpc(nfsnode_t np
, char *buf
, uint32_t *buflenp
, vfs_context_t ctx
)
255 struct nfsmount
*nmp
;
256 int error
= 0, lockerror
= ENOENT
, status
, numops
;
259 struct nfsm_chain nmreq
, nmrep
;
260 struct nfsreq_secinfo_args si
;
263 if (nfs_mount_gone(nmp
))
265 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
267 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
268 nfsm_chain_null(&nmreq
);
269 nfsm_chain_null(&nmrep
);
271 // PUTFH, GETATTR, READLINK
273 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
274 nfsm_chain_add_compound_header(error
, &nmreq
, "readlink", nmp
->nm_minor_vers
, numops
);
276 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
277 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
279 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
280 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
282 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READLINK
);
283 nfsm_chain_build_done(error
, &nmreq
);
284 nfsm_assert(error
, (numops
== 0), EPROTO
);
286 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
288 if ((lockerror
= nfs_node_lock(np
)))
290 nfsm_chain_skip_tag(error
, &nmrep
);
291 nfsm_chain_get_32(error
, &nmrep
, numops
);
292 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
293 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
294 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
295 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READLINK
);
296 nfsm_chain_get_32(error
, &nmrep
, len
);
298 if (len
>= *buflenp
) {
299 if (np
->n_size
&& (np
->n_size
< *buflenp
))
304 nfsm_chain_get_opaque(error
, &nmrep
, len
, buf
);
310 nfsm_chain_cleanup(&nmreq
);
311 nfsm_chain_cleanup(&nmrep
);
322 struct nfsreq_cbinfo
*cb
,
323 struct nfsreq
**reqp
)
325 struct nfsmount
*nmp
;
326 int error
= 0, nfsvers
, numops
;
328 struct nfsm_chain nmreq
;
329 struct nfsreq_secinfo_args si
;
332 if (nfs_mount_gone(nmp
))
334 nfsvers
= nmp
->nm_vers
;
335 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
338 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
339 nfsm_chain_null(&nmreq
);
341 // PUTFH, READ, GETATTR
343 nfsm_chain_build_alloc_init(error
, &nmreq
, 22 * NFSX_UNSIGNED
);
344 nfsm_chain_add_compound_header(error
, &nmreq
, "read", nmp
->nm_minor_vers
, numops
);
346 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
347 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
349 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READ
);
350 nfs_get_stateid(np
, thd
, cred
, &stateid
);
351 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
352 nfsm_chain_add_64(error
, &nmreq
, offset
);
353 nfsm_chain_add_32(error
, &nmreq
, len
);
355 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
356 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
357 nfsm_chain_build_done(error
, &nmreq
);
358 nfsm_assert(error
, (numops
== 0), EPROTO
);
360 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, 0, cb
, reqp
);
362 nfsm_chain_cleanup(&nmreq
);
367 nfs4_read_rpc_async_finish(
374 struct nfsmount
*nmp
;
375 int error
= 0, lockerror
, nfsvers
, numops
, status
, eof
= 0;
378 struct nfsm_chain nmrep
;
381 if (nfs_mount_gone(nmp
)) {
382 nfs_request_async_cancel(req
);
385 nfsvers
= nmp
->nm_vers
;
387 nfsm_chain_null(&nmrep
);
389 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
390 if (error
== EINPROGRESS
) /* async request restarted */
393 if ((lockerror
= nfs_node_lock(np
)))
395 nfsm_chain_skip_tag(error
, &nmrep
);
396 nfsm_chain_get_32(error
, &nmrep
, numops
);
397 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
398 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READ
);
399 nfsm_chain_get_32(error
, &nmrep
, eof
);
400 nfsm_chain_get_32(error
, &nmrep
, retlen
);
402 *lenp
= MIN(retlen
, *lenp
);
403 error
= nfsm_chain_get_uio(&nmrep
, *lenp
, uio
);
405 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
406 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
414 nfsm_chain_cleanup(&nmrep
);
415 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)
416 microuptime(&np
->n_lastio
);
421 nfs4_write_rpc_async(
428 struct nfsreq_cbinfo
*cb
,
429 struct nfsreq
**reqp
)
431 struct nfsmount
*nmp
;
433 int error
= 0, nfsvers
, numops
;
435 struct nfsm_chain nmreq
;
436 struct nfsreq_secinfo_args si
;
439 if (nfs_mount_gone(nmp
))
441 nfsvers
= nmp
->nm_vers
;
442 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
445 /* for async mounts, don't bother sending sync write requests */
446 if ((iomode
!= NFS_WRITE_UNSTABLE
) && nfs_allow_async
&&
447 ((mp
= NFSTOMP(np
))) && (vfs_flags(mp
) & MNT_ASYNC
))
448 iomode
= NFS_WRITE_UNSTABLE
;
450 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
451 nfsm_chain_null(&nmreq
);
453 // PUTFH, WRITE, GETATTR
455 nfsm_chain_build_alloc_init(error
, &nmreq
, 25 * NFSX_UNSIGNED
+ len
);
456 nfsm_chain_add_compound_header(error
, &nmreq
, "write", nmp
->nm_minor_vers
, numops
);
458 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
459 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
461 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_WRITE
);
462 nfs_get_stateid(np
, thd
, cred
, &stateid
);
463 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
464 nfsm_chain_add_64(error
, &nmreq
, uio_offset(uio
));
465 nfsm_chain_add_32(error
, &nmreq
, iomode
);
466 nfsm_chain_add_32(error
, &nmreq
, len
);
468 error
= nfsm_chain_add_uio(&nmreq
, uio
, len
);
470 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
471 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
472 nfsm_chain_build_done(error
, &nmreq
);
473 nfsm_assert(error
, (numops
== 0), EPROTO
);
476 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, 0, cb
, reqp
);
478 nfsm_chain_cleanup(&nmreq
);
483 nfs4_write_rpc_async_finish(
490 struct nfsmount
*nmp
;
491 int error
= 0, lockerror
= ENOENT
, nfsvers
, numops
, status
;
492 int committed
= NFS_WRITE_FILESYNC
;
494 u_int64_t xid
, wverf
;
496 struct nfsm_chain nmrep
;
499 if (nfs_mount_gone(nmp
)) {
500 nfs_request_async_cancel(req
);
503 nfsvers
= nmp
->nm_vers
;
505 nfsm_chain_null(&nmrep
);
507 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
508 if (error
== EINPROGRESS
) /* async request restarted */
511 if (nfs_mount_gone(nmp
))
513 if (!error
&& (lockerror
= nfs_node_lock(np
)))
515 nfsm_chain_skip_tag(error
, &nmrep
);
516 nfsm_chain_get_32(error
, &nmrep
, numops
);
517 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
518 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_WRITE
);
519 nfsm_chain_get_32(error
, &nmrep
, rlen
);
524 nfsm_chain_get_32(error
, &nmrep
, committed
);
525 nfsm_chain_get_64(error
, &nmrep
, wverf
);
529 lck_mtx_lock(&nmp
->nm_lock
);
530 if (!(nmp
->nm_state
& NFSSTA_HASWRITEVERF
)) {
531 nmp
->nm_verf
= wverf
;
532 nmp
->nm_state
|= NFSSTA_HASWRITEVERF
;
533 } else if (nmp
->nm_verf
!= wverf
) {
534 nmp
->nm_verf
= wverf
;
536 lck_mtx_unlock(&nmp
->nm_lock
);
537 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
538 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
542 nfsm_chain_cleanup(&nmrep
);
543 if ((committed
!= NFS_WRITE_FILESYNC
) && nfs_allow_async
&&
544 ((mp
= NFSTOMP(np
))) && (vfs_flags(mp
) & MNT_ASYNC
))
545 committed
= NFS_WRITE_FILESYNC
;
546 *iomodep
= committed
;
547 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)
548 microuptime(&np
->n_lastio
);
560 int error
= 0, lockerror
= ENOENT
, remove_error
= 0, status
;
561 struct nfsmount
*nmp
;
564 struct nfsm_chain nmreq
, nmrep
;
565 struct nfsreq_secinfo_args si
;
568 if (nfs_mount_gone(nmp
))
570 nfsvers
= nmp
->nm_vers
;
571 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
573 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
575 nfsm_chain_null(&nmreq
);
576 nfsm_chain_null(&nmrep
);
578 // PUTFH, REMOVE, GETATTR
580 nfsm_chain_build_alloc_init(error
, &nmreq
, 17 * NFSX_UNSIGNED
+ namelen
);
581 nfsm_chain_add_compound_header(error
, &nmreq
, "remove", nmp
->nm_minor_vers
, numops
);
583 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
584 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
586 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_REMOVE
);
587 nfsm_chain_add_name(error
, &nmreq
, name
, namelen
, nmp
);
589 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
590 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
591 nfsm_chain_build_done(error
, &nmreq
);
592 nfsm_assert(error
, (numops
== 0), EPROTO
);
595 error
= nfs_request2(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, 0, &nmrep
, &xid
, &status
);
597 if ((lockerror
= nfs_node_lock(dnp
)))
599 nfsm_chain_skip_tag(error
, &nmrep
);
600 nfsm_chain_get_32(error
, &nmrep
, numops
);
601 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
602 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_REMOVE
);
603 remove_error
= error
;
604 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
605 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
606 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
607 if (error
&& !lockerror
)
608 NATTRINVALIDATE(dnp
);
610 nfsm_chain_cleanup(&nmreq
);
611 nfsm_chain_cleanup(&nmrep
);
614 dnp
->n_flag
|= NMODIFIED
;
615 nfs_node_unlock(dnp
);
617 if (error
== NFSERR_GRACE
) {
618 tsleep(&nmp
->nm_state
, (PZERO
-1), "nfsgrace", 2*hz
);
622 return (remove_error
);
635 int error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
;
636 struct nfsmount
*nmp
;
637 u_int64_t xid
, savedxid
;
638 struct nfsm_chain nmreq
, nmrep
;
639 struct nfsreq_secinfo_args si
;
641 nmp
= NFSTONMP(fdnp
);
642 if (nfs_mount_gone(nmp
))
644 nfsvers
= nmp
->nm_vers
;
645 if (fdnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
647 if (tdnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
650 NFSREQ_SECINFO_SET(&si
, fdnp
, NULL
, 0, NULL
, 0);
651 nfsm_chain_null(&nmreq
);
652 nfsm_chain_null(&nmrep
);
654 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
656 nfsm_chain_build_alloc_init(error
, &nmreq
, 30 * NFSX_UNSIGNED
+ fnamelen
+ tnamelen
);
657 nfsm_chain_add_compound_header(error
, &nmreq
, "rename", nmp
->nm_minor_vers
, numops
);
659 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
660 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, fdnp
->n_fhp
, fdnp
->n_fhsize
);
662 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
664 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
665 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, tdnp
->n_fhp
, tdnp
->n_fhsize
);
667 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RENAME
);
668 nfsm_chain_add_name(error
, &nmreq
, fnameptr
, fnamelen
, nmp
);
669 nfsm_chain_add_name(error
, &nmreq
, tnameptr
, tnamelen
, nmp
);
671 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
672 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, tdnp
);
674 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
676 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
677 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, fdnp
);
678 nfsm_chain_build_done(error
, &nmreq
);
679 nfsm_assert(error
, (numops
== 0), EPROTO
);
682 error
= nfs_request(fdnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
684 if ((lockerror
= nfs_node_lock2(fdnp
, tdnp
)))
686 nfsm_chain_skip_tag(error
, &nmrep
);
687 nfsm_chain_get_32(error
, &nmrep
, numops
);
688 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
689 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
690 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
691 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RENAME
);
692 nfsm_chain_check_change_info(error
, &nmrep
, fdnp
);
693 nfsm_chain_check_change_info(error
, &nmrep
, tdnp
);
694 /* directory attributes: if we don't get them, make sure to invalidate */
695 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
697 nfsm_chain_loadattr(error
, &nmrep
, tdnp
, nfsvers
, &xid
);
698 if (error
&& !lockerror
)
699 NATTRINVALIDATE(tdnp
);
700 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
701 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
703 nfsm_chain_loadattr(error
, &nmrep
, fdnp
, nfsvers
, &xid
);
704 if (error
&& !lockerror
)
705 NATTRINVALIDATE(fdnp
);
707 nfsm_chain_cleanup(&nmreq
);
708 nfsm_chain_cleanup(&nmrep
);
710 fdnp
->n_flag
|= NMODIFIED
;
711 tdnp
->n_flag
|= NMODIFIED
;
712 nfs_node_unlock2(fdnp
, tdnp
);
718 * NFS V4 readdir RPC.
721 nfs4_readdir_rpc(nfsnode_t dnp
, struct nfsbuf
*bp
, vfs_context_t ctx
)
723 struct nfsmount
*nmp
;
724 int error
= 0, lockerror
, nfsvers
, namedattr
, rdirplus
, bigcookies
, numops
;
725 int i
, status
, more_entries
= 1, eof
, bp_dropped
= 0;
726 uint32_t nmreaddirsize
, nmrsize
;
727 uint32_t namlen
, skiplen
, fhlen
, xlen
, attrlen
, reclen
, space_free
, space_needed
;
728 uint64_t cookie
, lastcookie
, xid
, savedxid
;
729 struct nfsm_chain nmreq
, nmrep
, nmrepsave
;
731 struct nfs_vattr nvattr
, *nvattrp
;
732 struct nfs_dir_buf_header
*ndbhp
;
734 char *padstart
, padlen
;
736 uint32_t entry_attrs
[NFS_ATTR_BITMAP_LEN
];
738 struct nfsreq_secinfo_args si
;
741 if (nfs_mount_gone(nmp
))
743 nfsvers
= nmp
->nm_vers
;
744 nmreaddirsize
= nmp
->nm_readdirsize
;
745 nmrsize
= nmp
->nm_rsize
;
746 bigcookies
= nmp
->nm_state
& NFSSTA_BIGCOOKIES
;
747 namedattr
= (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) ? 1 : 0;
748 rdirplus
= (NMFLAG(nmp
, RDIRPLUS
) || namedattr
) ? 1 : 0;
749 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
751 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
754 * Set up attribute request for entries.
755 * For READDIRPLUS functionality, get everything.
756 * Otherwise, just get what we need for struct direntry.
760 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, entry_attrs
);
761 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_FILEHANDLE
);
764 NFS_CLEAR_ATTRIBUTES(entry_attrs
);
765 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_TYPE
);
766 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_FILEID
);
767 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_MOUNTED_ON_FILEID
);
769 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_RDATTR_ERROR
);
771 /* lock to protect access to cookie verifier */
772 if ((lockerror
= nfs_node_lock(dnp
)))
775 /* determine cookie to use, and move dp to the right offset */
776 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
777 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
778 if (ndbhp
->ndbh_count
) {
779 for (i
=0; i
< ndbhp
->ndbh_count
-1; i
++)
780 dp
= NFS_DIRENTRY_NEXT(dp
);
781 cookie
= dp
->d_seekoff
;
782 dp
= NFS_DIRENTRY_NEXT(dp
);
784 cookie
= bp
->nb_lblkno
;
785 /* increment with every buffer read */
786 OSAddAtomic64(1, &nfsstats
.readdir_bios
);
791 * The NFS client is responsible for the "." and ".." entries in the
792 * directory. So, we put them at the start of the first buffer.
793 * Don't bother for attribute directories.
795 if (((bp
->nb_lblkno
== 0) && (ndbhp
->ndbh_count
== 0)) &&
796 !(dnp
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
798 fhlen
= rdirplus
? fh
.fh_len
+ 1 : 0;
799 xlen
= rdirplus
? (fhlen
+ sizeof(time_t)) : 0;
802 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
804 bzero(&dp
->d_name
[namlen
+1], xlen
);
805 dp
->d_namlen
= namlen
;
806 strlcpy(dp
->d_name
, ".", namlen
+1);
807 dp
->d_fileno
= dnp
->n_vattr
.nva_fileid
;
809 dp
->d_reclen
= reclen
;
811 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
812 dp
= NFS_DIRENTRY_NEXT(dp
);
813 padlen
= (char*)dp
- padstart
;
815 bzero(padstart
, padlen
);
816 if (rdirplus
) /* zero out attributes */
817 bzero(NFS_DIR_BUF_NVATTR(bp
, 0), sizeof(struct nfs_vattr
));
821 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
823 bzero(&dp
->d_name
[namlen
+1], xlen
);
824 dp
->d_namlen
= namlen
;
825 strlcpy(dp
->d_name
, "..", namlen
+1);
827 dp
->d_fileno
= VTONFS(dnp
->n_parent
)->n_vattr
.nva_fileid
;
829 dp
->d_fileno
= dnp
->n_vattr
.nva_fileid
;
831 dp
->d_reclen
= reclen
;
833 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
834 dp
= NFS_DIRENTRY_NEXT(dp
);
835 padlen
= (char*)dp
- padstart
;
837 bzero(padstart
, padlen
);
838 if (rdirplus
) /* zero out attributes */
839 bzero(NFS_DIR_BUF_NVATTR(bp
, 1), sizeof(struct nfs_vattr
));
841 ndbhp
->ndbh_entry_end
= (char*)dp
- bp
->nb_data
;
842 ndbhp
->ndbh_count
= 2;
846 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
847 * the buffer is full (or we hit EOF). Then put the remainder of the
848 * results in the next buffer(s).
850 nfsm_chain_null(&nmreq
);
851 nfsm_chain_null(&nmrep
);
852 while (nfs_dir_buf_freespace(bp
, rdirplus
) && !(ndbhp
->ndbh_flags
& NDB_FULL
)) {
854 // PUTFH, GETATTR, READDIR
856 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
857 nfsm_chain_add_compound_header(error
, &nmreq
, tag
, nmp
->nm_minor_vers
, numops
);
859 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
860 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
862 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
863 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
865 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READDIR
);
866 nfsm_chain_add_64(error
, &nmreq
, (cookie
<= 2) ? 0 : cookie
);
867 nfsm_chain_add_64(error
, &nmreq
, dnp
->n_cookieverf
);
868 nfsm_chain_add_32(error
, &nmreq
, nmreaddirsize
);
869 nfsm_chain_add_32(error
, &nmreq
, nmrsize
);
870 nfsm_chain_add_bitmap_supported(error
, &nmreq
, entry_attrs
, nmp
, dnp
);
871 nfsm_chain_build_done(error
, &nmreq
);
872 nfsm_assert(error
, (numops
== 0), EPROTO
);
873 nfs_node_unlock(dnp
);
875 error
= nfs_request(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
877 if ((lockerror
= nfs_node_lock(dnp
)))
881 nfsm_chain_skip_tag(error
, &nmrep
);
882 nfsm_chain_get_32(error
, &nmrep
, numops
);
883 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
884 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
885 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
886 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READDIR
);
887 nfsm_chain_get_64(error
, &nmrep
, dnp
->n_cookieverf
);
888 nfsm_chain_get_32(error
, &nmrep
, more_entries
);
891 nfs_node_unlock(dnp
);
899 /* loop through the entries packing them into the buffer */
900 while (more_entries
) {
901 /* Entry: COOKIE, NAME, FATTR */
902 nfsm_chain_get_64(error
, &nmrep
, cookie
);
903 nfsm_chain_get_32(error
, &nmrep
, namlen
);
905 if (!bigcookies
&& (cookie
>> 32) && (nmp
== NFSTONMP(dnp
))) {
906 /* we've got a big cookie, make sure flag is set */
907 lck_mtx_lock(&nmp
->nm_lock
);
908 nmp
->nm_state
|= NFSSTA_BIGCOOKIES
;
909 lck_mtx_unlock(&nmp
->nm_lock
);
912 /* just truncate names that don't fit in direntry.d_name */
917 if (namlen
> (sizeof(dp
->d_name
)-1)) {
918 skiplen
= namlen
- sizeof(dp
->d_name
) + 1;
919 namlen
= sizeof(dp
->d_name
) - 1;
923 /* guess that fh size will be same as parent */
924 fhlen
= rdirplus
? (1 + dnp
->n_fhsize
) : 0;
925 xlen
= rdirplus
? (fhlen
+ sizeof(time_t)) : 0;
926 attrlen
= rdirplus
? sizeof(struct nfs_vattr
) : 0;
927 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
928 space_needed
= reclen
+ attrlen
;
929 space_free
= nfs_dir_buf_freespace(bp
, rdirplus
);
930 if (space_needed
> space_free
) {
932 * We still have entries to pack, but we've
933 * run out of room in the current buffer.
934 * So we need to move to the next buffer.
935 * The block# for the next buffer is the
936 * last cookie in the current buffer.
939 ndbhp
->ndbh_flags
|= NDB_FULL
;
940 nfs_buf_release(bp
, 0);
943 error
= nfs_buf_get(dnp
, lastcookie
, NFS_DIRBLKSIZ
, vfs_context_thread(ctx
), NBLK_READ
, &bp
);
945 /* initialize buffer */
946 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
947 ndbhp
->ndbh_flags
= 0;
948 ndbhp
->ndbh_count
= 0;
949 ndbhp
->ndbh_entry_end
= sizeof(*ndbhp
);
950 ndbhp
->ndbh_ncgen
= dnp
->n_ncgen
;
951 space_free
= nfs_dir_buf_freespace(bp
, rdirplus
);
952 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
953 /* increment with every buffer read */
954 OSAddAtomic64(1, &nfsstats
.readdir_bios
);
957 dp
->d_fileno
= cookie
; /* placeholder */
958 dp
->d_seekoff
= cookie
;
959 dp
->d_namlen
= namlen
;
960 dp
->d_reclen
= reclen
;
961 dp
->d_type
= DT_UNKNOWN
;
962 nfsm_chain_get_opaque(error
, &nmrep
, namlen
, dp
->d_name
);
964 dp
->d_name
[namlen
] = '\0';
966 nfsm_chain_adv(error
, &nmrep
,
967 nfsm_rndup(namlen
+ skiplen
) - nfsm_rndup(namlen
));
969 nvattrp
= rdirplus
? NFS_DIR_BUF_NVATTR(bp
, ndbhp
->ndbh_count
) : &nvattr
;
970 error
= nfs4_parsefattr(&nmrep
, NULL
, nvattrp
, &fh
, NULL
, NULL
);
971 if (!error
&& NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_ACL
)) {
972 /* we do NOT want ACLs returned to us here */
973 NFS_BITMAP_CLR(nvattrp
->nva_bitmap
, NFS_FATTR_ACL
);
974 if (nvattrp
->nva_acl
) {
975 kauth_acl_free(nvattrp
->nva_acl
);
976 nvattrp
->nva_acl
= NULL
;
979 if (error
&& NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_RDATTR_ERROR
)) {
980 /* OK, we may not have gotten all of the attributes but we will use what we can. */
981 if ((error
== NFSERR_MOVED
) || (error
== NFSERR_INVAL
)) {
982 /* set this up to look like a referral trigger */
983 nfs4_default_attrs_for_referral_trigger(dnp
, dp
->d_name
, namlen
, nvattrp
, &fh
);
987 /* check for more entries after this one */
988 nfsm_chain_get_32(error
, &nmrep
, more_entries
);
991 /* Skip any "." and ".." entries returned from server. */
992 /* Also skip any bothersome named attribute entries. */
993 if (((dp
->d_name
[0] == '.') && ((namlen
== 1) || ((namlen
== 2) && (dp
->d_name
[1] == '.')))) ||
994 (namedattr
&& (namlen
== 11) && (!strcmp(dp
->d_name
, "SUNWattr_ro") || !strcmp(dp
->d_name
, "SUNWattr_rw")))) {
999 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_TYPE
))
1000 dp
->d_type
= IFTODT(VTTOIF(nvattrp
->nva_type
));
1001 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_FILEID
))
1002 dp
->d_fileno
= nvattrp
->nva_fileid
;
1004 /* fileid is already in d_fileno, so stash xid in attrs */
1005 nvattrp
->nva_fileid
= savedxid
;
1006 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
1007 fhlen
= fh
.fh_len
+ 1;
1008 xlen
= fhlen
+ sizeof(time_t);
1009 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
1010 space_needed
= reclen
+ attrlen
;
1011 if (space_needed
> space_free
) {
1012 /* didn't actually have the room... move on to next buffer */
1016 /* pack the file handle into the record */
1017 dp
->d_name
[dp
->d_namlen
+1] = fh
.fh_len
;
1018 bcopy(fh
.fh_data
, &dp
->d_name
[dp
->d_namlen
+2], fh
.fh_len
);
1020 /* mark the file handle invalid */
1022 fhlen
= fh
.fh_len
+ 1;
1023 xlen
= fhlen
+ sizeof(time_t);
1024 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
1025 bzero(&dp
->d_name
[dp
->d_namlen
+1], fhlen
);
1027 *(time_t*)(&dp
->d_name
[dp
->d_namlen
+1+fhlen
]) = now
.tv_sec
;
1028 dp
->d_reclen
= reclen
;
1030 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
1031 ndbhp
->ndbh_count
++;
1032 lastcookie
= cookie
;
1034 /* advance to next direntry in buffer */
1035 dp
= NFS_DIRENTRY_NEXT(dp
);
1036 ndbhp
->ndbh_entry_end
= (char*)dp
- bp
->nb_data
;
1037 /* zero out the pad bytes */
1038 padlen
= (char*)dp
- padstart
;
1040 bzero(padstart
, padlen
);
1042 /* Finally, get the eof boolean */
1043 nfsm_chain_get_32(error
, &nmrep
, eof
);
1046 ndbhp
->ndbh_flags
|= (NDB_FULL
|NDB_EOF
);
1047 nfs_node_lock_force(dnp
);
1048 dnp
->n_eofcookie
= lastcookie
;
1049 nfs_node_unlock(dnp
);
1054 nfs_buf_release(bp
, 0);
1058 if ((lockerror
= nfs_node_lock(dnp
)))
1061 nfsm_chain_cleanup(&nmrep
);
1062 nfsm_chain_null(&nmreq
);
1065 if (bp_dropped
&& bp
)
1066 nfs_buf_release(bp
, 0);
1068 nfs_node_unlock(dnp
);
1069 nfsm_chain_cleanup(&nmreq
);
1070 nfsm_chain_cleanup(&nmrep
);
1071 return (bp_dropped
? NFSERR_DIRBUFDROPPED
: error
);
1075 nfs4_lookup_rpc_async(
1080 struct nfsreq
**reqp
)
1082 int error
= 0, isdotdot
= 0, nfsvers
, numops
;
1083 struct nfsm_chain nmreq
;
1084 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
1085 struct nfsmount
*nmp
;
1086 struct nfsreq_secinfo_args si
;
1088 nmp
= NFSTONMP(dnp
);
1089 if (nfs_mount_gone(nmp
))
1091 nfsvers
= nmp
->nm_vers
;
1092 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
1095 if ((name
[0] == '.') && (name
[1] == '.') && (namelen
== 2)) {
1097 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
1099 NFSREQ_SECINFO_SET(&si
, dnp
, dnp
->n_fhp
, dnp
->n_fhsize
, name
, namelen
);
1102 nfsm_chain_null(&nmreq
);
1104 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1106 nfsm_chain_build_alloc_init(error
, &nmreq
, 20 * NFSX_UNSIGNED
+ namelen
);
1107 nfsm_chain_add_compound_header(error
, &nmreq
, "lookup", nmp
->nm_minor_vers
, numops
);
1109 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1110 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
1112 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1113 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
1116 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUPP
);
1118 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUP
);
1119 nfsm_chain_add_name(error
, &nmreq
, name
, namelen
, nmp
);
1122 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETFH
);
1124 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1125 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
1126 /* some ".zfs" directories can't handle being asked for some attributes */
1127 if ((dnp
->n_flag
& NISDOTZFS
) && !isdotdot
)
1128 NFS_BITMAP_CLR(bitmap
, NFS_FATTR_NAMED_ATTR
);
1129 if ((dnp
->n_flag
& NISDOTZFSCHILD
) && isdotdot
)
1130 NFS_BITMAP_CLR(bitmap
, NFS_FATTR_NAMED_ATTR
);
1131 if (((namelen
== 4) && (name
[0] == '.') && (name
[1] == 'z') && (name
[2] == 'f') && (name
[3] == 's')))
1132 NFS_BITMAP_CLR(bitmap
, NFS_FATTR_NAMED_ATTR
);
1133 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, NULL
);
1134 nfsm_chain_build_done(error
, &nmreq
);
1135 nfsm_assert(error
, (numops
== 0), EPROTO
);
1137 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
1138 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, 0, NULL
, reqp
);
1140 nfsm_chain_cleanup(&nmreq
);
1146 nfs4_lookup_rpc_async_finish(
1154 struct nfs_vattr
*nvap
)
1156 int error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
, isdotdot
= 0;
1157 uint32_t op
= NFS_OP_LOOKUP
;
1159 struct nfsmount
*nmp
;
1160 struct nfsm_chain nmrep
;
1162 nmp
= NFSTONMP(dnp
);
1165 nfsvers
= nmp
->nm_vers
;
1166 if ((name
[0] == '.') && (name
[1] == '.') && (namelen
== 2))
1169 nfsm_chain_null(&nmrep
);
1171 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
1173 if ((lockerror
= nfs_node_lock(dnp
)))
1175 nfsm_chain_skip_tag(error
, &nmrep
);
1176 nfsm_chain_get_32(error
, &nmrep
, numops
);
1177 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1178 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1181 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
1183 nfsm_chain_op_check(error
, &nmrep
, (isdotdot
? NFS_OP_LOOKUPP
: NFS_OP_LOOKUP
));
1184 nfsmout_if(error
|| !fhp
|| !nvap
);
1185 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETFH
);
1186 nfsm_chain_get_32(error
, &nmrep
, fhp
->fh_len
);
1187 nfsm_chain_get_opaque(error
, &nmrep
, fhp
->fh_len
, fhp
->fh_data
);
1188 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1189 if ((error
== NFSERR_MOVED
) || (error
== NFSERR_INVAL
)) {
1190 /* set this up to look like a referral trigger */
1191 nfs4_default_attrs_for_referral_trigger(dnp
, name
, namelen
, nvap
, fhp
);
1195 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
, NULL
);
1199 nfs_node_unlock(dnp
);
1200 nfsm_chain_cleanup(&nmrep
);
1201 if (!error
&& (op
== NFS_OP_LOOKUP
) && (nmp
->nm_state
& NFSSTA_NEEDSECINFO
)) {
1202 /* We still need to get SECINFO to set default for mount. */
1203 /* Do so for the first LOOKUP that returns successfully. */
1206 sec
.count
= NX_MAX_SEC_FLAVORS
;
1207 error
= nfs4_secinfo_rpc(nmp
, &req
->r_secinfo
, vfs_context_ucred(ctx
), sec
.flavors
, &sec
.count
);
1208 /* [sigh] some implementations return "illegal" error for unsupported ops */
1209 if (error
== NFSERR_OP_ILLEGAL
)
1212 /* set our default security flavor to the first in the list */
1213 lck_mtx_lock(&nmp
->nm_lock
);
1215 nmp
->nm_auth
= sec
.flavors
[0];
1216 nmp
->nm_state
&= ~NFSSTA_NEEDSECINFO
;
1217 lck_mtx_unlock(&nmp
->nm_lock
);
1231 struct nfsmount
*nmp
;
1232 int error
= 0, lockerror
, status
, nfsvers
, numops
;
1233 u_int64_t xid
, newwverf
;
1235 struct nfsm_chain nmreq
, nmrep
;
1236 struct nfsreq_secinfo_args si
;
1239 FSDBG(521, np
, offset
, count
, nmp
? nmp
->nm_state
: 0);
1240 if (nfs_mount_gone(nmp
))
1242 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
1244 if (!(nmp
->nm_state
& NFSSTA_HASWRITEVERF
))
1246 nfsvers
= nmp
->nm_vers
;
1248 if (count
> UINT32_MAX
)
1253 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
1254 nfsm_chain_null(&nmreq
);
1255 nfsm_chain_null(&nmrep
);
1257 // PUTFH, COMMIT, GETATTR
1259 nfsm_chain_build_alloc_init(error
, &nmreq
, 19 * NFSX_UNSIGNED
);
1260 nfsm_chain_add_compound_header(error
, &nmreq
, "commit", nmp
->nm_minor_vers
, numops
);
1262 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1263 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1265 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_COMMIT
);
1266 nfsm_chain_add_64(error
, &nmreq
, offset
);
1267 nfsm_chain_add_32(error
, &nmreq
, count32
);
1269 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1270 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
1271 nfsm_chain_build_done(error
, &nmreq
);
1272 nfsm_assert(error
, (numops
== 0), EPROTO
);
1274 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
1275 current_thread(), cred
, &si
, 0, &nmrep
, &xid
, &status
);
1277 if ((lockerror
= nfs_node_lock(np
)))
1279 nfsm_chain_skip_tag(error
, &nmrep
);
1280 nfsm_chain_get_32(error
, &nmrep
, numops
);
1281 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1282 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_COMMIT
);
1283 nfsm_chain_get_64(error
, &nmrep
, newwverf
);
1284 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1285 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
1287 nfs_node_unlock(np
);
1289 lck_mtx_lock(&nmp
->nm_lock
);
1290 if (nmp
->nm_verf
!= newwverf
)
1291 nmp
->nm_verf
= newwverf
;
1292 if (wverf
!= newwverf
)
1293 error
= NFSERR_STALEWRITEVERF
;
1294 lck_mtx_unlock(&nmp
->nm_lock
);
1296 nfsm_chain_cleanup(&nmreq
);
1297 nfsm_chain_cleanup(&nmrep
);
1304 struct nfs_fsattr
*nfsap
,
1308 int error
= 0, lockerror
, status
, nfsvers
, numops
;
1309 struct nfsm_chain nmreq
, nmrep
;
1310 struct nfsmount
*nmp
= NFSTONMP(np
);
1311 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
1312 struct nfs_vattr nvattr
;
1313 struct nfsreq_secinfo_args si
;
1315 if (nfs_mount_gone(nmp
))
1317 nfsvers
= nmp
->nm_vers
;
1318 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
1321 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
1322 NVATTR_INIT(&nvattr
);
1323 nfsm_chain_null(&nmreq
);
1324 nfsm_chain_null(&nmrep
);
1326 /* NFSv4: fetch "pathconf" info for this node */
1329 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
1330 nfsm_chain_add_compound_header(error
, &nmreq
, "pathconf", nmp
->nm_minor_vers
, numops
);
1332 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1333 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1335 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1336 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
1337 NFS_BITMAP_SET(bitmap
, NFS_FATTR_MAXLINK
);
1338 NFS_BITMAP_SET(bitmap
, NFS_FATTR_MAXNAME
);
1339 NFS_BITMAP_SET(bitmap
, NFS_FATTR_NO_TRUNC
);
1340 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CHOWN_RESTRICTED
);
1341 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CASE_INSENSITIVE
);
1342 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CASE_PRESERVING
);
1343 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
1344 nfsm_chain_build_done(error
, &nmreq
);
1345 nfsm_assert(error
, (numops
== 0), EPROTO
);
1347 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
1349 nfsm_chain_skip_tag(error
, &nmrep
);
1350 nfsm_chain_get_32(error
, &nmrep
, numops
);
1351 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1352 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1354 error
= nfs4_parsefattr(&nmrep
, nfsap
, &nvattr
, NULL
, NULL
, NULL
);
1356 if ((lockerror
= nfs_node_lock(np
)))
1359 nfs_loadattrcache(np
, &nvattr
, &xid
, 0);
1361 nfs_node_unlock(np
);
1363 NVATTR_CLEANUP(&nvattr
);
1364 nfsm_chain_cleanup(&nmreq
);
1365 nfsm_chain_cleanup(&nmrep
);
1371 struct vnop_getattr_args
/* {
1372 struct vnodeop_desc *a_desc;
1374 struct vnode_attr *a_vap;
1375 vfs_context_t a_context;
1378 struct vnode_attr
*vap
= ap
->a_vap
;
1379 struct nfsmount
*nmp
;
1380 struct nfs_vattr nva
;
1381 int error
, acls
, ngaflags
;
1383 nmp
= VTONMP(ap
->a_vp
);
1384 if (nfs_mount_gone(nmp
))
1386 acls
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_ACL
);
1388 ngaflags
= NGA_CACHED
;
1389 if (VATTR_IS_ACTIVE(vap
, va_acl
) && acls
)
1390 ngaflags
|= NGA_ACL
;
1391 error
= nfs_getattr(VTONFS(ap
->a_vp
), &nva
, ap
->a_context
, ngaflags
);
1395 /* copy what we have in nva to *a_vap */
1396 if (VATTR_IS_ACTIVE(vap
, va_rdev
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_RAWDEV
)) {
1397 dev_t rdev
= makedev(nva
.nva_rawdev
.specdata1
, nva
.nva_rawdev
.specdata2
);
1398 VATTR_RETURN(vap
, va_rdev
, rdev
);
1400 if (VATTR_IS_ACTIVE(vap
, va_nlink
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_NUMLINKS
))
1401 VATTR_RETURN(vap
, va_nlink
, nva
.nva_nlink
);
1402 if (VATTR_IS_ACTIVE(vap
, va_data_size
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_SIZE
))
1403 VATTR_RETURN(vap
, va_data_size
, nva
.nva_size
);
1404 // VATTR_RETURN(vap, va_data_alloc, ???);
1405 // VATTR_RETURN(vap, va_total_size, ???);
1406 if (VATTR_IS_ACTIVE(vap
, va_total_alloc
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_SPACE_USED
))
1407 VATTR_RETURN(vap
, va_total_alloc
, nva
.nva_bytes
);
1408 if (VATTR_IS_ACTIVE(vap
, va_uid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER
))
1409 VATTR_RETURN(vap
, va_uid
, nva
.nva_uid
);
1410 if (VATTR_IS_ACTIVE(vap
, va_uuuid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER
))
1411 VATTR_RETURN(vap
, va_uuuid
, nva
.nva_uuuid
);
1412 if (VATTR_IS_ACTIVE(vap
, va_gid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER_GROUP
))
1413 VATTR_RETURN(vap
, va_gid
, nva
.nva_gid
);
1414 if (VATTR_IS_ACTIVE(vap
, va_guuid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER_GROUP
))
1415 VATTR_RETURN(vap
, va_guuid
, nva
.nva_guuid
);
1416 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
1417 if (NMFLAG(nmp
, ACLONLY
) || !NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_MODE
))
1418 VATTR_RETURN(vap
, va_mode
, 0777);
1420 VATTR_RETURN(vap
, va_mode
, nva
.nva_mode
);
1422 if (VATTR_IS_ACTIVE(vap
, va_flags
) &&
1423 (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_ARCHIVE
) ||
1424 NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_HIDDEN
) ||
1425 (nva
.nva_flags
& NFS_FFLAG_TRIGGER
))) {
1427 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_ARCHIVE
) &&
1428 (nva
.nva_flags
& NFS_FFLAG_ARCHIVED
))
1429 flags
|= SF_ARCHIVED
;
1430 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_HIDDEN
) &&
1431 (nva
.nva_flags
& NFS_FFLAG_HIDDEN
))
1433 VATTR_RETURN(vap
, va_flags
, flags
);
1435 if (VATTR_IS_ACTIVE(vap
, va_create_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_CREATE
)) {
1436 vap
->va_create_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_CREATE
];
1437 vap
->va_create_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_CREATE
];
1438 VATTR_SET_SUPPORTED(vap
, va_create_time
);
1440 if (VATTR_IS_ACTIVE(vap
, va_access_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_ACCESS
)) {
1441 vap
->va_access_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_ACCESS
];
1442 vap
->va_access_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_ACCESS
];
1443 VATTR_SET_SUPPORTED(vap
, va_access_time
);
1445 if (VATTR_IS_ACTIVE(vap
, va_modify_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_MODIFY
)) {
1446 vap
->va_modify_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_MODIFY
];
1447 vap
->va_modify_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_MODIFY
];
1448 VATTR_SET_SUPPORTED(vap
, va_modify_time
);
1450 if (VATTR_IS_ACTIVE(vap
, va_change_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_METADATA
)) {
1451 vap
->va_change_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_CHANGE
];
1452 vap
->va_change_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_CHANGE
];
1453 VATTR_SET_SUPPORTED(vap
, va_change_time
);
1455 if (VATTR_IS_ACTIVE(vap
, va_backup_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_BACKUP
)) {
1456 vap
->va_backup_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_BACKUP
];
1457 vap
->va_backup_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_BACKUP
];
1458 VATTR_SET_SUPPORTED(vap
, va_backup_time
);
1460 if (VATTR_IS_ACTIVE(vap
, va_fileid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_FILEID
))
1461 VATTR_RETURN(vap
, va_fileid
, nva
.nva_fileid
);
1462 if (VATTR_IS_ACTIVE(vap
, va_type
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TYPE
))
1463 VATTR_RETURN(vap
, va_type
, nva
.nva_type
);
1464 if (VATTR_IS_ACTIVE(vap
, va_filerev
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_CHANGE
))
1465 VATTR_RETURN(vap
, va_filerev
, nva
.nva_change
);
1467 if (VATTR_IS_ACTIVE(vap
, va_acl
) && acls
) {
1468 VATTR_RETURN(vap
, va_acl
, nva
.nva_acl
);
1472 // other attrs we might support someday:
1473 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
1475 NVATTR_CLEANUP(&nva
);
1482 struct vnode_attr
*vap
,
1485 struct nfsmount
*nmp
= NFSTONMP(np
);
1486 int error
= 0, setattr_error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
;
1487 u_int64_t xid
, nextxid
;
1488 struct nfsm_chain nmreq
, nmrep
;
1489 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
1490 uint32_t getbitmap
[NFS_ATTR_BITMAP_LEN
];
1491 uint32_t setbitmap
[NFS_ATTR_BITMAP_LEN
];
1492 nfs_stateid stateid
;
1493 struct nfsreq_secinfo_args si
;
1495 if (nfs_mount_gone(nmp
))
1497 nfsvers
= nmp
->nm_vers
;
1498 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
1501 if (VATTR_IS_ACTIVE(vap
, va_flags
) && (vap
->va_flags
& ~(SF_ARCHIVED
|UF_HIDDEN
))) {
1502 /* we don't support setting unsupported flags (duh!) */
1503 if (vap
->va_active
& ~VNODE_ATTR_va_flags
)
1504 return (EINVAL
); /* return EINVAL if other attributes also set */
1506 return (ENOTSUP
); /* return ENOTSUP for chflags(2) */
1509 /* don't bother requesting some changes if they don't look like they are changing */
1510 if (VATTR_IS_ACTIVE(vap
, va_uid
) && (vap
->va_uid
== np
->n_vattr
.nva_uid
))
1511 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
1512 if (VATTR_IS_ACTIVE(vap
, va_gid
) && (vap
->va_gid
== np
->n_vattr
.nva_gid
))
1513 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
1514 if (VATTR_IS_ACTIVE(vap
, va_uuuid
) && kauth_guid_equal(&vap
->va_uuuid
, &np
->n_vattr
.nva_uuuid
))
1515 VATTR_CLEAR_ACTIVE(vap
, va_uuuid
);
1516 if (VATTR_IS_ACTIVE(vap
, va_guuid
) && kauth_guid_equal(&vap
->va_guuid
, &np
->n_vattr
.nva_guuid
))
1517 VATTR_CLEAR_ACTIVE(vap
, va_guuid
);
1520 /* do nothing if no attributes will be sent */
1521 nfs_vattr_set_bitmap(nmp
, bitmap
, vap
);
1522 if (!bitmap
[0] && !bitmap
[1])
1525 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
1526 nfsm_chain_null(&nmreq
);
1527 nfsm_chain_null(&nmrep
);
1530 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1531 * need to invalidate any cached ACL. And if we had an ACL cached,
1532 * we might as well also fetch the new value.
1534 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, getbitmap
);
1535 if (NFS_BITMAP_ISSET(bitmap
, NFS_FATTR_ACL
) ||
1536 NFS_BITMAP_ISSET(bitmap
, NFS_FATTR_MODE
)) {
1538 NFS_BITMAP_SET(getbitmap
, NFS_FATTR_ACL
);
1542 // PUTFH, SETATTR, GETATTR
1544 nfsm_chain_build_alloc_init(error
, &nmreq
, 40 * NFSX_UNSIGNED
);
1545 nfsm_chain_add_compound_header(error
, &nmreq
, "setattr", nmp
->nm_minor_vers
, numops
);
1547 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1548 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1550 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SETATTR
);
1551 if (VATTR_IS_ACTIVE(vap
, va_data_size
))
1552 nfs_get_stateid(np
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &stateid
);
1554 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
1555 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
1556 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
1558 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1559 nfsm_chain_add_bitmap_supported(error
, &nmreq
, getbitmap
, nmp
, np
);
1560 nfsm_chain_build_done(error
, &nmreq
);
1561 nfsm_assert(error
, (numops
== 0), EPROTO
);
1563 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
1565 if ((lockerror
= nfs_node_lock(np
)))
1567 nfsm_chain_skip_tag(error
, &nmrep
);
1568 nfsm_chain_get_32(error
, &nmrep
, numops
);
1569 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1571 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SETATTR
);
1572 nfsmout_if(error
== EBADRPC
);
1573 setattr_error
= error
;
1575 bmlen
= NFS_ATTR_BITMAP_LEN
;
1576 nfsm_chain_get_bitmap(error
, &nmrep
, setbitmap
, bmlen
);
1578 if (VATTR_IS_ACTIVE(vap
, va_data_size
) && (np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
))
1579 microuptime(&np
->n_lastio
);
1580 nfs_vattr_set_supported(setbitmap
, vap
);
1581 error
= setattr_error
;
1583 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1584 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
1586 NATTRINVALIDATE(np
);
1588 * We just changed the attributes and we want to make sure that we
1589 * see the latest attributes. Get the next XID. If it's not the
1590 * next XID after the SETATTR XID, then it's possible that another
1591 * RPC was in flight at the same time and it might put stale attributes
1592 * in the cache. In that case, we invalidate the attributes and set
1593 * the attribute cache XID to guarantee that newer attributes will
1597 nfs_get_xid(&nextxid
);
1598 if (nextxid
!= (xid
+ 1)) {
1599 np
->n_xid
= nextxid
;
1600 NATTRINVALIDATE(np
);
1604 nfs_node_unlock(np
);
1605 nfsm_chain_cleanup(&nmreq
);
1606 nfsm_chain_cleanup(&nmrep
);
1607 if ((setattr_error
== EINVAL
) && VATTR_IS_ACTIVE(vap
, va_acl
) && VATTR_IS_ACTIVE(vap
, va_mode
) && !NMFLAG(nmp
, ACLONLY
)) {
1609 * Some server's may not like ACL/mode combos that get sent.
1610 * If it looks like that's what the server choked on, try setting
1611 * just the ACL and not the mode (unless it looks like everything
1612 * but mode was already successfully set).
1614 if (((bitmap
[0] & setbitmap
[0]) != bitmap
[0]) ||
1615 ((bitmap
[1] & (setbitmap
[1]|NFS_FATTR_MODE
)) != bitmap
[1])) {
1616 VATTR_CLEAR_ACTIVE(vap
, va_mode
);
1625 * Wait for any pending recovery to complete.
1628 nfs_mount_state_wait_for_recovery(struct nfsmount
*nmp
)
1630 struct timespec ts
= { 1, 0 };
1631 int error
= 0, slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
1633 lck_mtx_lock(&nmp
->nm_lock
);
1634 while (nmp
->nm_state
& NFSSTA_RECOVER
) {
1635 if ((error
= nfs_sigintr(nmp
, NULL
, current_thread(), 1)))
1637 nfs_mount_sock_thread_wake(nmp
);
1638 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, slpflag
|(PZERO
-1), "nfsrecoverwait", &ts
);
1641 lck_mtx_unlock(&nmp
->nm_lock
);
1647 * We're about to use/manipulate NFS mount's open/lock state.
1648 * Wait for any pending state recovery to complete, then
1649 * mark the state as being in use (which will hold off
1650 * the recovery thread until we're done).
1653 nfs_mount_state_in_use_start(struct nfsmount
*nmp
, thread_t thd
)
1655 struct timespec ts
= { 1, 0 };
1656 int error
= 0, slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
1658 if (nfs_mount_gone(nmp
))
1660 lck_mtx_lock(&nmp
->nm_lock
);
1661 if (nmp
->nm_state
& (NFSSTA_FORCE
|NFSSTA_DEAD
)) {
1662 lck_mtx_unlock(&nmp
->nm_lock
);
1665 while (nmp
->nm_state
& NFSSTA_RECOVER
) {
1666 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 1)))
1668 nfs_mount_sock_thread_wake(nmp
);
1669 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, slpflag
|(PZERO
-1), "nfsrecoverwait", &ts
);
1673 nmp
->nm_stateinuse
++;
1674 lck_mtx_unlock(&nmp
->nm_lock
);
1680 * We're done using/manipulating the NFS mount's open/lock
1681 * state. If the given error indicates that recovery should
1682 * be performed, we'll initiate recovery.
1685 nfs_mount_state_in_use_end(struct nfsmount
*nmp
, int error
)
1687 int restart
= nfs_mount_state_error_should_restart(error
);
1689 if (nfs_mount_gone(nmp
))
1691 lck_mtx_lock(&nmp
->nm_lock
);
1692 if (restart
&& (error
!= NFSERR_OLD_STATEID
) && (error
!= NFSERR_GRACE
)) {
1693 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
1694 error
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nmp
->nm_stategenid
);
1695 nfs_need_recover(nmp
, error
);
1697 if (nmp
->nm_stateinuse
> 0)
1698 nmp
->nm_stateinuse
--;
1700 panic("NFS mount state in use count underrun");
1701 if (!nmp
->nm_stateinuse
&& (nmp
->nm_state
& NFSSTA_RECOVER
))
1702 wakeup(&nmp
->nm_stateinuse
);
1703 lck_mtx_unlock(&nmp
->nm_lock
);
1704 if (error
== NFSERR_GRACE
)
1705 tsleep(&nmp
->nm_state
, (PZERO
-1), "nfsgrace", 2*hz
);
1711 * Does the error mean we should restart/redo a state-related operation?
1714 nfs_mount_state_error_should_restart(int error
)
1717 case NFSERR_STALE_STATEID
:
1718 case NFSERR_STALE_CLIENTID
:
1719 case NFSERR_ADMIN_REVOKED
:
1720 case NFSERR_EXPIRED
:
1721 case NFSERR_OLD_STATEID
:
1722 case NFSERR_BAD_STATEID
:
1730 * In some cases we may want to limit how many times we restart a
1731 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1732 * Base the limit on the lease (as long as it's not too short).
1735 nfs_mount_state_max_restarts(struct nfsmount
*nmp
)
1737 return (MAX(nmp
->nm_fsattr
.nfsa_lease
, 60));
1741 * Does the error mean we probably lost a delegation?
1744 nfs_mount_state_error_delegation_lost(int error
)
1747 case NFSERR_STALE_STATEID
:
1748 case NFSERR_ADMIN_REVOKED
:
1749 case NFSERR_EXPIRED
:
1750 case NFSERR_OLD_STATEID
:
1751 case NFSERR_BAD_STATEID
:
1752 case NFSERR_GRACE
: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
1760 * Mark an NFS node's open state as busy.
1763 nfs_open_state_set_busy(nfsnode_t np
, thread_t thd
)
1765 struct nfsmount
*nmp
;
1766 struct timespec ts
= {2, 0};
1767 int error
= 0, slpflag
;
1770 if (nfs_mount_gone(nmp
))
1772 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
1774 lck_mtx_lock(&np
->n_openlock
);
1775 while (np
->n_openflags
& N_OPENBUSY
) {
1776 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0)))
1778 np
->n_openflags
|= N_OPENWANT
;
1779 msleep(&np
->n_openflags
, &np
->n_openlock
, slpflag
, "nfs_open_state_set_busy", &ts
);
1783 np
->n_openflags
|= N_OPENBUSY
;
1784 lck_mtx_unlock(&np
->n_openlock
);
1790 * Clear an NFS node's open state busy flag and wake up
1791 * anyone wanting it.
1794 nfs_open_state_clear_busy(nfsnode_t np
)
1798 lck_mtx_lock(&np
->n_openlock
);
1799 if (!(np
->n_openflags
& N_OPENBUSY
))
1800 panic("nfs_open_state_clear_busy");
1801 wanted
= (np
->n_openflags
& N_OPENWANT
);
1802 np
->n_openflags
&= ~(N_OPENBUSY
|N_OPENWANT
);
1803 lck_mtx_unlock(&np
->n_openlock
);
1805 wakeup(&np
->n_openflags
);
1809 * Search a mount's open owner list for the owner for this credential.
1810 * If not found and "alloc" is set, then allocate a new one.
1812 struct nfs_open_owner
*
1813 nfs_open_owner_find(struct nfsmount
*nmp
, kauth_cred_t cred
, int alloc
)
1815 uid_t uid
= kauth_cred_getuid(cred
);
1816 struct nfs_open_owner
*noop
, *newnoop
= NULL
;
1819 lck_mtx_lock(&nmp
->nm_lock
);
1820 TAILQ_FOREACH(noop
, &nmp
->nm_open_owners
, noo_link
) {
1821 if (kauth_cred_getuid(noop
->noo_cred
) == uid
)
1825 if (!noop
&& !newnoop
&& alloc
) {
1826 lck_mtx_unlock(&nmp
->nm_lock
);
1827 MALLOC(newnoop
, struct nfs_open_owner
*, sizeof(struct nfs_open_owner
), M_TEMP
, M_WAITOK
);
1830 bzero(newnoop
, sizeof(*newnoop
));
1831 lck_mtx_init(&newnoop
->noo_lock
, nfs_open_grp
, LCK_ATTR_NULL
);
1832 newnoop
->noo_mount
= nmp
;
1833 kauth_cred_ref(cred
);
1834 newnoop
->noo_cred
= cred
;
1835 newnoop
->noo_name
= OSAddAtomic(1, &nfs_open_owner_seqnum
);
1836 TAILQ_INIT(&newnoop
->noo_opens
);
1839 if (!noop
&& newnoop
) {
1840 newnoop
->noo_flags
|= NFS_OPEN_OWNER_LINK
;
1841 TAILQ_INSERT_HEAD(&nmp
->nm_open_owners
, newnoop
, noo_link
);
1844 lck_mtx_unlock(&nmp
->nm_lock
);
1846 if (newnoop
&& (noop
!= newnoop
))
1847 nfs_open_owner_destroy(newnoop
);
1850 nfs_open_owner_ref(noop
);
1856 * destroy an open owner that's no longer needed
1859 nfs_open_owner_destroy(struct nfs_open_owner
*noop
)
1862 kauth_cred_unref(&noop
->noo_cred
);
1863 lck_mtx_destroy(&noop
->noo_lock
, nfs_open_grp
);
1868 * acquire a reference count on an open owner
1871 nfs_open_owner_ref(struct nfs_open_owner
*noop
)
1873 lck_mtx_lock(&noop
->noo_lock
);
1875 lck_mtx_unlock(&noop
->noo_lock
);
1879 * drop a reference count on an open owner and destroy it if
1880 * it is no longer referenced and no longer on the mount's list.
1883 nfs_open_owner_rele(struct nfs_open_owner
*noop
)
1885 lck_mtx_lock(&noop
->noo_lock
);
1886 if (noop
->noo_refcnt
< 1)
1887 panic("nfs_open_owner_rele: no refcnt");
1889 if (!noop
->noo_refcnt
&& (noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
))
1890 panic("nfs_open_owner_rele: busy");
1891 /* XXX we may potentially want to clean up idle/unused open owner structures */
1892 if (noop
->noo_refcnt
|| (noop
->noo_flags
& NFS_OPEN_OWNER_LINK
)) {
1893 lck_mtx_unlock(&noop
->noo_lock
);
1896 /* owner is no longer referenced or linked to mount, so destroy it */
1897 lck_mtx_unlock(&noop
->noo_lock
);
1898 nfs_open_owner_destroy(noop
);
1902 * Mark an open owner as busy because we are about to
1903 * start an operation that uses and updates open owner state.
1906 nfs_open_owner_set_busy(struct nfs_open_owner
*noop
, thread_t thd
)
1908 struct nfsmount
*nmp
;
1909 struct timespec ts
= {2, 0};
1910 int error
= 0, slpflag
;
1912 nmp
= noop
->noo_mount
;
1913 if (nfs_mount_gone(nmp
))
1915 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
1917 lck_mtx_lock(&noop
->noo_lock
);
1918 while (noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
) {
1919 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0)))
1921 noop
->noo_flags
|= NFS_OPEN_OWNER_WANT
;
1922 msleep(noop
, &noop
->noo_lock
, slpflag
, "nfs_open_owner_set_busy", &ts
);
1926 noop
->noo_flags
|= NFS_OPEN_OWNER_BUSY
;
1927 lck_mtx_unlock(&noop
->noo_lock
);
1933 * Clear the busy flag on an open owner and wake up anyone waiting
1937 nfs_open_owner_clear_busy(struct nfs_open_owner
*noop
)
1941 lck_mtx_lock(&noop
->noo_lock
);
1942 if (!(noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
))
1943 panic("nfs_open_owner_clear_busy");
1944 wanted
= (noop
->noo_flags
& NFS_OPEN_OWNER_WANT
);
1945 noop
->noo_flags
&= ~(NFS_OPEN_OWNER_BUSY
|NFS_OPEN_OWNER_WANT
);
1946 lck_mtx_unlock(&noop
->noo_lock
);
1952 * Given an open/lock owner and an error code, increment the
1953 * sequence ID if appropriate.
1956 nfs_owner_seqid_increment(struct nfs_open_owner
*noop
, struct nfs_lock_owner
*nlop
, int error
)
1959 case NFSERR_STALE_CLIENTID
:
1960 case NFSERR_STALE_STATEID
:
1961 case NFSERR_OLD_STATEID
:
1962 case NFSERR_BAD_STATEID
:
1963 case NFSERR_BAD_SEQID
:
1965 case NFSERR_RESOURCE
:
1966 case NFSERR_NOFILEHANDLE
:
1967 /* do not increment the open seqid on these errors */
1977 * Search a node's open file list for any conflicts with this request.
1978 * Also find this open owner's open file structure.
1979 * If not found and "alloc" is set, then allocate one.
1984 struct nfs_open_owner
*noop
,
1985 struct nfs_open_file
**nofpp
,
1986 uint32_t accessMode
,
1991 return nfs_open_file_find_internal(np
, noop
, nofpp
, accessMode
, denyMode
, alloc
);
1995 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
1996 * if an existing one is not found. This is used in "create" scenarios to
1997 * officially add the provisional nofp to the node once the node is created.
2000 nfs_open_file_find_internal(
2002 struct nfs_open_owner
*noop
,
2003 struct nfs_open_file
**nofpp
,
2004 uint32_t accessMode
,
2008 struct nfs_open_file
*nofp
= NULL
, *nofp2
, *newnofp
= NULL
;
2013 lck_mtx_lock(&np
->n_openlock
);
2014 TAILQ_FOREACH(nofp2
, &np
->n_opens
, nof_link
) {
2015 if (nofp2
->nof_owner
== noop
) {
2020 if ((accessMode
& nofp2
->nof_deny
) || (denyMode
& nofp2
->nof_access
)) {
2021 /* This request conflicts with an existing open on this client. */
2022 lck_mtx_unlock(&np
->n_openlock
);
2028 * If this open owner doesn't have an open
2029 * file structure yet, we create one for it.
2031 if (!nofp
&& !*nofpp
&& !newnofp
&& alloc
) {
2032 lck_mtx_unlock(&np
->n_openlock
);
2034 MALLOC(newnofp
, struct nfs_open_file
*, sizeof(struct nfs_open_file
), M_TEMP
, M_WAITOK
);
2037 bzero(newnofp
, sizeof(*newnofp
));
2038 lck_mtx_init(&newnofp
->nof_lock
, nfs_open_grp
, LCK_ATTR_NULL
);
2039 newnofp
->nof_owner
= noop
;
2040 nfs_open_owner_ref(noop
);
2041 newnofp
->nof_np
= np
;
2042 lck_mtx_lock(&noop
->noo_lock
);
2043 TAILQ_INSERT_HEAD(&noop
->noo_opens
, newnofp
, nof_oolink
);
2044 lck_mtx_unlock(&noop
->noo_lock
);
2050 (*nofpp
)->nof_np
= np
;
2056 TAILQ_INSERT_HEAD(&np
->n_opens
, nofp
, nof_link
);
2059 lck_mtx_unlock(&np
->n_openlock
);
2061 if (alloc
&& newnofp
&& (nofp
!= newnofp
))
2062 nfs_open_file_destroy(newnofp
);
2065 return (nofp
? 0 : ESRCH
);
2069 * Destroy an open file structure.
2072 nfs_open_file_destroy(struct nfs_open_file
*nofp
)
2074 lck_mtx_lock(&nofp
->nof_owner
->noo_lock
);
2075 TAILQ_REMOVE(&nofp
->nof_owner
->noo_opens
, nofp
, nof_oolink
);
2076 lck_mtx_unlock(&nofp
->nof_owner
->noo_lock
);
2077 nfs_open_owner_rele(nofp
->nof_owner
);
2078 lck_mtx_destroy(&nofp
->nof_lock
, nfs_open_grp
);
2083 * Mark an open file as busy because we are about to
2084 * start an operation that uses and updates open file state.
2087 nfs_open_file_set_busy(struct nfs_open_file
*nofp
, thread_t thd
)
2089 struct nfsmount
*nmp
;
2090 struct timespec ts
= {2, 0};
2091 int error
= 0, slpflag
;
2093 nmp
= nofp
->nof_owner
->noo_mount
;
2094 if (nfs_mount_gone(nmp
))
2096 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
2098 lck_mtx_lock(&nofp
->nof_lock
);
2099 while (nofp
->nof_flags
& NFS_OPEN_FILE_BUSY
) {
2100 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0)))
2102 nofp
->nof_flags
|= NFS_OPEN_FILE_WANT
;
2103 msleep(nofp
, &nofp
->nof_lock
, slpflag
, "nfs_open_file_set_busy", &ts
);
2107 nofp
->nof_flags
|= NFS_OPEN_FILE_BUSY
;
2108 lck_mtx_unlock(&nofp
->nof_lock
);
2114 * Clear the busy flag on an open file and wake up anyone waiting
2118 nfs_open_file_clear_busy(struct nfs_open_file
*nofp
)
2122 lck_mtx_lock(&nofp
->nof_lock
);
2123 if (!(nofp
->nof_flags
& NFS_OPEN_FILE_BUSY
))
2124 panic("nfs_open_file_clear_busy");
2125 wanted
= (nofp
->nof_flags
& NFS_OPEN_FILE_WANT
);
2126 nofp
->nof_flags
&= ~(NFS_OPEN_FILE_BUSY
|NFS_OPEN_FILE_WANT
);
2127 lck_mtx_unlock(&nofp
->nof_lock
);
2133 * Add the open state for the given access/deny modes to this open file.
2136 nfs_open_file_add_open(struct nfs_open_file
*nofp
, uint32_t accessMode
, uint32_t denyMode
, int delegated
)
2138 lck_mtx_lock(&nofp
->nof_lock
);
2139 nofp
->nof_access
|= accessMode
;
2140 nofp
->nof_deny
|= denyMode
;
2143 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2144 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2146 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2148 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2150 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2151 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2153 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2155 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2156 nofp
->nof_d_rw_dw
++;
2157 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2158 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2159 nofp
->nof_d_r_drw
++;
2160 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2161 nofp
->nof_d_w_drw
++;
2162 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2163 nofp
->nof_d_rw_drw
++;
2166 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2167 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2169 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2171 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2173 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2174 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2176 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2178 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2180 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2181 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2183 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2185 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2190 nofp
->nof_opencnt
++;
2191 lck_mtx_unlock(&nofp
->nof_lock
);
2195 * Find which particular open combo will be closed and report what
2196 * the new modes will be and whether the open was delegated.
2199 nfs_open_file_remove_open_find(
2200 struct nfs_open_file
*nofp
,
2201 uint32_t accessMode
,
2203 uint32_t *newAccessMode
,
2204 uint32_t *newDenyMode
,
2208 * Calculate new modes: a mode bit gets removed when there's only
2209 * one count in all the corresponding counts
2211 *newAccessMode
= nofp
->nof_access
;
2212 *newDenyMode
= nofp
->nof_deny
;
2214 if ((accessMode
& NFS_OPEN_SHARE_ACCESS_READ
) &&
2215 (nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
) &&
2216 ((nofp
->nof_r
+ nofp
->nof_d_r
+
2217 nofp
->nof_rw
+ nofp
->nof_d_rw
+
2218 nofp
->nof_r_dw
+ nofp
->nof_d_r_dw
+
2219 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
+
2220 nofp
->nof_r_drw
+ nofp
->nof_d_r_drw
+
2221 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
) == 1))
2222 *newAccessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2223 if ((accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
) &&
2224 (nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_WRITE
) &&
2225 ((nofp
->nof_w
+ nofp
->nof_d_w
+
2226 nofp
->nof_rw
+ nofp
->nof_d_rw
+
2227 nofp
->nof_w_dw
+ nofp
->nof_d_w_dw
+
2228 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
+
2229 nofp
->nof_w_drw
+ nofp
->nof_d_w_drw
+
2230 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
) == 1))
2231 *newAccessMode
&= ~NFS_OPEN_SHARE_ACCESS_WRITE
;
2232 if ((denyMode
& NFS_OPEN_SHARE_DENY_READ
) &&
2233 (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_READ
) &&
2234 ((nofp
->nof_r_drw
+ nofp
->nof_d_r_drw
+
2235 nofp
->nof_w_drw
+ nofp
->nof_d_w_drw
+
2236 nofp
->nof_rw_drw
+ nofp
->nof_d_rw_drw
) == 1))
2237 *newDenyMode
&= ~NFS_OPEN_SHARE_DENY_READ
;
2238 if ((denyMode
& NFS_OPEN_SHARE_DENY_WRITE
) &&
2239 (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_WRITE
) &&
2240 ((nofp
->nof_r_drw
+ nofp
->nof_d_r_drw
+
2241 nofp
->nof_w_drw
+ nofp
->nof_d_w_drw
+
2242 nofp
->nof_rw_drw
+ nofp
->nof_d_rw_drw
+
2243 nofp
->nof_r_dw
+ nofp
->nof_d_r_dw
+
2244 nofp
->nof_w_dw
+ nofp
->nof_d_w_dw
+
2245 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
) == 1))
2246 *newDenyMode
&= ~NFS_OPEN_SHARE_DENY_WRITE
;
2248 /* Find the corresponding open access/deny mode counter. */
2249 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2250 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2251 *delegated
= (nofp
->nof_d_r
!= 0);
2252 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2253 *delegated
= (nofp
->nof_d_w
!= 0);
2254 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2255 *delegated
= (nofp
->nof_d_rw
!= 0);
2258 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2259 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2260 *delegated
= (nofp
->nof_d_r_dw
!= 0);
2261 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2262 *delegated
= (nofp
->nof_d_w_dw
!= 0);
2263 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2264 *delegated
= (nofp
->nof_d_rw_dw
!= 0);
2267 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2268 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2269 *delegated
= (nofp
->nof_d_r_drw
!= 0);
2270 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2271 *delegated
= (nofp
->nof_d_w_drw
!= 0);
2272 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2273 *delegated
= (nofp
->nof_d_rw_drw
!= 0);
2280 * Remove the open state for the given access/deny modes to this open file.
2283 nfs_open_file_remove_open(struct nfs_open_file
*nofp
, uint32_t accessMode
, uint32_t denyMode
)
2285 uint32_t newAccessMode
, newDenyMode
;
2288 lck_mtx_lock(&nofp
->nof_lock
);
2289 nfs_open_file_remove_open_find(nofp
, accessMode
, denyMode
, &newAccessMode
, &newDenyMode
, &delegated
);
2291 /* Decrement the corresponding open access/deny mode counter. */
2292 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2293 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2295 if (nofp
->nof_d_r
== 0)
2296 NP(nofp
->nof_np
, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2300 if (nofp
->nof_r
== 0)
2301 NP(nofp
->nof_np
, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2305 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2307 if (nofp
->nof_d_w
== 0)
2308 NP(nofp
->nof_np
, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2312 if (nofp
->nof_w
== 0)
2313 NP(nofp
->nof_np
, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2317 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2319 if (nofp
->nof_d_rw
== 0)
2320 NP(nofp
->nof_np
, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2324 if (nofp
->nof_rw
== 0)
2325 NP(nofp
->nof_np
, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2330 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2331 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2333 if (nofp
->nof_d_r_dw
== 0)
2334 NP(nofp
->nof_np
, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2338 if (nofp
->nof_r_dw
== 0)
2339 NP(nofp
->nof_np
, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2343 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2345 if (nofp
->nof_d_w_dw
== 0)
2346 NP(nofp
->nof_np
, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2350 if (nofp
->nof_w_dw
== 0)
2351 NP(nofp
->nof_np
, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2355 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2357 if (nofp
->nof_d_rw_dw
== 0)
2358 NP(nofp
->nof_np
, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2360 nofp
->nof_d_rw_dw
--;
2362 if (nofp
->nof_rw_dw
== 0)
2363 NP(nofp
->nof_np
, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2368 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2369 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2371 if (nofp
->nof_d_r_drw
== 0)
2372 NP(nofp
->nof_np
, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2374 nofp
->nof_d_r_drw
--;
2376 if (nofp
->nof_r_drw
== 0)
2377 NP(nofp
->nof_np
, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2381 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2383 if (nofp
->nof_d_w_drw
== 0)
2384 NP(nofp
->nof_np
, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2386 nofp
->nof_d_w_drw
--;
2388 if (nofp
->nof_w_drw
== 0)
2389 NP(nofp
->nof_np
, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2393 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2395 if (nofp
->nof_d_rw_drw
== 0)
2396 NP(nofp
->nof_np
, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2398 nofp
->nof_d_rw_drw
--;
2400 if (nofp
->nof_rw_drw
== 0)
2401 NP(nofp
->nof_np
, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2408 /* update the modes */
2409 nofp
->nof_access
= newAccessMode
;
2410 nofp
->nof_deny
= newDenyMode
;
2411 nofp
->nof_opencnt
--;
2412 lck_mtx_unlock(&nofp
->nof_lock
);
2417 * Get the current (delegation, lock, open, default) stateid for this node.
2418 * If node has a delegation, use that stateid.
2419 * If pid has a lock, use the lockowner's stateid.
2420 * Or use the open file's stateid.
2421 * If no open file, use a default stateid of all ones.
2424 nfs_get_stateid(nfsnode_t np
, thread_t thd
, kauth_cred_t cred
, nfs_stateid
*sid
)
2426 struct nfsmount
*nmp
= NFSTONMP(np
);
2427 proc_t p
= thd
? get_bsdthreadtask_info(thd
) : current_proc(); // XXX async I/O requests don't have a thread
2428 struct nfs_open_owner
*noop
= NULL
;
2429 struct nfs_open_file
*nofp
= NULL
;
2430 struct nfs_lock_owner
*nlop
= NULL
;
2431 nfs_stateid
*s
= NULL
;
2433 if (np
->n_openflags
& N_DELEG_MASK
) {
2434 s
= &np
->n_dstateid
;
2437 nlop
= nfs_lock_owner_find(np
, p
, 0);
2438 if (nlop
&& !TAILQ_EMPTY(&nlop
->nlo_locks
)) {
2439 /* we hold locks, use lock stateid */
2440 s
= &nlop
->nlo_stateid
;
2441 } else if (((noop
= nfs_open_owner_find(nmp
, cred
, 0))) &&
2442 (nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 0) == 0) &&
2443 !(nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) &&
2445 /* we (should) have the file open, use open stateid */
2446 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)
2447 nfs4_reopen(nofp
, thd
);
2448 if (!(nofp
->nof_flags
& NFS_OPEN_FILE_LOST
))
2449 s
= &nofp
->nof_stateid
;
2454 sid
->seqid
= s
->seqid
;
2455 sid
->other
[0] = s
->other
[0];
2456 sid
->other
[1] = s
->other
[1];
2457 sid
->other
[2] = s
->other
[2];
2459 /* named attributes may not have a stateid for reads, so don't complain for them */
2460 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
))
2461 NP(np
, "nfs_get_stateid: no stateid");
2462 sid
->seqid
= sid
->other
[0] = sid
->other
[1] = sid
->other
[2] = 0xffffffff;
2465 nfs_lock_owner_rele(nlop
);
2467 nfs_open_owner_rele(noop
);
2472 * When we have a delegation, we may be able to perform the OPEN locally.
2473 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2476 nfs4_open_delegated(
2478 struct nfs_open_file
*nofp
,
2479 uint32_t accessMode
,
2483 int error
= 0, ismember
, readtoo
= 0, authorized
= 0;
2485 struct kauth_acl_eval eval
;
2486 kauth_cred_t cred
= vfs_context_ucred(ctx
);
2488 if (!(accessMode
& NFS_OPEN_SHARE_ACCESS_READ
)) {
2490 * Try to open it for read access too,
2491 * so the buffer cache can read data.
2494 accessMode
|= NFS_OPEN_SHARE_ACCESS_READ
;
2499 if (accessMode
& NFS_OPEN_SHARE_ACCESS_READ
)
2500 action
|= KAUTH_VNODE_READ_DATA
;
2501 if (accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
)
2502 action
|= KAUTH_VNODE_WRITE_DATA
;
2504 /* evaluate ACE (if we have one) */
2505 if (np
->n_dace
.ace_flags
) {
2506 eval
.ae_requested
= action
;
2507 eval
.ae_acl
= &np
->n_dace
;
2509 eval
.ae_options
= 0;
2510 if (np
->n_vattr
.nva_uid
== kauth_cred_getuid(cred
))
2511 eval
.ae_options
|= KAUTH_AEVAL_IS_OWNER
;
2512 error
= kauth_cred_ismember_gid(cred
, np
->n_vattr
.nva_gid
, &ismember
);
2513 if (!error
&& ismember
)
2514 eval
.ae_options
|= KAUTH_AEVAL_IN_GROUP
;
2516 eval
.ae_exp_gall
= KAUTH_VNODE_GENERIC_ALL_BITS
;
2517 eval
.ae_exp_gread
= KAUTH_VNODE_GENERIC_READ_BITS
;
2518 eval
.ae_exp_gwrite
= KAUTH_VNODE_GENERIC_WRITE_BITS
;
2519 eval
.ae_exp_gexec
= KAUTH_VNODE_GENERIC_EXECUTE_BITS
;
2521 error
= kauth_acl_evaluate(cred
, &eval
);
2523 if (!error
&& (eval
.ae_result
== KAUTH_RESULT_ALLOW
))
2528 /* need to ask the server via ACCESS */
2529 struct vnop_access_args naa
;
2530 naa
.a_desc
= &vnop_access_desc
;
2531 naa
.a_vp
= NFSTOV(np
);
2532 naa
.a_action
= action
;
2533 naa
.a_context
= ctx
;
2534 if (!(error
= nfs_vnop_access(&naa
)))
2540 /* try again without the extra read access */
2541 accessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2545 return (error
? error
: EACCES
);
2548 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 1);
2555 * Open a file with the given access/deny modes.
2557 * If we have a delegation, we may be able to handle the open locally.
2558 * Otherwise, we will always send the open RPC even if this open's mode is
2559 * a subset of all the existing opens. This makes sure that we will always
2560 * be able to do a downgrade to any of the open modes.
2562 * Note: local conflicts should have already been checked in nfs_open_file_find().
2567 struct nfs_open_file
*nofp
,
2568 uint32_t accessMode
,
2572 vnode_t vp
= NFSTOV(np
);
2574 struct componentname cn
;
2575 const char *vname
= NULL
;
2577 char smallname
[128];
2578 char *filename
= NULL
;
2579 int error
= 0, readtoo
= 0;
2582 * We can handle the OPEN ourselves if we have a delegation,
2583 * unless it's a read delegation and the open is asking for
2584 * either write access or deny read. We also don't bother to
2585 * use the delegation if it's being returned.
2587 if (np
->n_openflags
& N_DELEG_MASK
) {
2588 if ((error
= nfs_open_state_set_busy(np
, vfs_context_thread(ctx
))))
2590 if ((np
->n_openflags
& N_DELEG_MASK
) && !(np
->n_openflags
& N_DELEG_RETURN
) &&
2591 (((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_WRITE
) ||
2592 (!(accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
) && !(denyMode
& NFS_OPEN_SHARE_DENY_READ
)))) {
2593 error
= nfs4_open_delegated(np
, nofp
, accessMode
, denyMode
, ctx
);
2594 nfs_open_state_clear_busy(np
);
2597 nfs_open_state_clear_busy(np
);
2601 * [sigh] We can't trust VFS to get the parent right for named
2602 * attribute nodes. (It likes to reparent the nodes after we've
2603 * created them.) Luckily we can probably get the right parent
2604 * from the n_parent we have stashed away.
2606 if ((np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) &&
2607 (((dvp
= np
->n_parent
)) && (error
= vnode_get(dvp
))))
2610 dvp
= vnode_getparent(vp
);
2611 vname
= vnode_getname(vp
);
2612 if (!dvp
|| !vname
) {
2617 filename
= &smallname
[0];
2618 namelen
= snprintf(filename
, sizeof(smallname
), "%s", vname
);
2619 if (namelen
>= sizeof(smallname
)) {
2620 MALLOC(filename
, char *, namelen
+1, M_TEMP
, M_WAITOK
);
2625 snprintf(filename
, namelen
+1, "%s", vname
);
2627 bzero(&cn
, sizeof(cn
));
2628 cn
.cn_nameptr
= filename
;
2629 cn
.cn_namelen
= namelen
;
2631 if (!(accessMode
& NFS_OPEN_SHARE_ACCESS_READ
)) {
2633 * Try to open it for read access too,
2634 * so the buffer cache can read data.
2637 accessMode
|= NFS_OPEN_SHARE_ACCESS_READ
;
2640 error
= nfs4_open_rpc(nofp
, ctx
, &cn
, NULL
, dvp
, &vp
, NFS_OPEN_NOCREATE
, accessMode
, denyMode
);
2642 if (!nfs_mount_state_error_should_restart(error
) &&
2643 (error
!= EINTR
) && (error
!= ERESTART
) && readtoo
) {
2644 /* try again without the extra read access */
2645 accessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2651 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0);
2653 if (filename
&& (filename
!= &smallname
[0]))
2654 FREE(filename
, M_TEMP
);
2656 vnode_putname(vname
);
2664 struct vnop_mmap_args
/* {
2665 struct vnodeop_desc *a_desc;
2668 vfs_context_t a_context;
2671 vfs_context_t ctx
= ap
->a_context
;
2672 vnode_t vp
= ap
->a_vp
;
2673 nfsnode_t np
= VTONFS(vp
);
2674 int error
= 0, accessMode
, denyMode
, delegated
;
2675 struct nfsmount
*nmp
;
2676 struct nfs_open_owner
*noop
= NULL
;
2677 struct nfs_open_file
*nofp
= NULL
;
2680 if (nfs_mount_gone(nmp
))
2683 if (!vnode_isreg(vp
) || !(ap
->a_fflags
& (PROT_READ
|PROT_WRITE
)))
2685 if (np
->n_flag
& NREVOKE
)
2689 * fflags contains some combination of: PROT_READ, PROT_WRITE
2690 * Since it's not possible to mmap() without having the file open for reading,
2691 * read access is always there (regardless if PROT_READ is not set).
2693 accessMode
= NFS_OPEN_SHARE_ACCESS_READ
;
2694 if (ap
->a_fflags
& PROT_WRITE
)
2695 accessMode
|= NFS_OPEN_SHARE_ACCESS_WRITE
;
2696 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2698 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
2703 error
= nfs_mount_state_in_use_start(nmp
, NULL
);
2705 nfs_open_owner_rele(noop
);
2708 if (np
->n_flag
& NREVOKE
) {
2710 nfs_mount_state_in_use_end(nmp
, 0);
2711 nfs_open_owner_rele(noop
);
2715 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 1);
2716 if (error
|| (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
))) {
2717 NP(np
, "nfs_vnop_mmap: no open file for owner, error %d, %d", error
, kauth_cred_getuid(noop
->noo_cred
));
2720 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
2721 nfs_mount_state_in_use_end(nmp
, 0);
2722 error
= nfs4_reopen(nofp
, NULL
);
2728 error
= nfs_open_file_set_busy(nofp
, NULL
);
2735 * The open reference for mmap must mirror an existing open because
2736 * we may need to reclaim it after the file is closed.
2737 * So grab another open count matching the accessMode passed in.
2738 * If we already had an mmap open, prefer read/write without deny mode.
2739 * This means we may have to drop the current mmap open first.
2741 * N.B. We should have an open for the mmap, because, mmap was
2742 * called on an open descriptor, or we've created an open for read
2743 * from reading the first page for execve. However, if we piggy
2744 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
2745 * that open may have closed.
2748 if (!(nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
)) {
2749 if (nofp
->nof_flags
& NFS_OPEN_FILE_NEEDCLOSE
) {
2750 /* We shouldn't get here. We've already open the file for execve */
2751 NP(np
, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
2752 nofp
->nof_access
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
), thread_tid(vfs_context_thread(ctx
)));
2755 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
2756 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
2758 if (accessMode
!= NFS_OPEN_SHARE_ACCESS_READ
|| (accessMode
& nofp
->nof_deny
)) {
2759 /* not asking for just read access -> fail */
2763 /* we don't have the file open, so open it for read access */
2764 if (nmp
->nm_vers
< NFS_VER4
) {
2765 /* NFS v2/v3 opens are always allowed - so just add it. */
2766 nfs_open_file_add_open(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, 0);
2769 error
= nfs4_open(np
, nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
2772 nofp
->nof_flags
|= NFS_OPEN_FILE_NEEDCLOSE
;
2777 /* determine deny mode for open */
2778 if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2779 if (nofp
->nof_d_rw
|| nofp
->nof_d_rw_dw
|| nofp
->nof_d_rw_drw
) {
2782 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2783 else if (nofp
->nof_d_rw_dw
)
2784 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
2785 else if (nofp
->nof_d_rw_drw
)
2786 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
2787 } else if (nofp
->nof_rw
|| nofp
->nof_rw_dw
|| nofp
->nof_rw_drw
) {
2790 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2791 else if (nofp
->nof_rw_dw
)
2792 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
2793 else if (nofp
->nof_rw_drw
)
2794 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
2798 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
2799 if (nofp
->nof_d_r
|| nofp
->nof_d_r_dw
|| nofp
->nof_d_r_drw
) {
2802 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2803 else if (nofp
->nof_d_r_dw
)
2804 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
2805 else if (nofp
->nof_d_r_drw
)
2806 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
2807 } else if (nofp
->nof_r
|| nofp
->nof_r_dw
|| nofp
->nof_r_drw
) {
2810 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2811 else if (nofp
->nof_r_dw
)
2812 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
2813 else if (nofp
->nof_r_drw
)
2814 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
2815 } else if (nofp
->nof_d_rw
|| nofp
->nof_d_rw_dw
|| nofp
->nof_d_rw_drw
) {
2817 * This clause and the one below is to co-opt a read write access
2818 * for a read only mmaping. We probably got here in that an
2819 * existing rw open for an executable file already exists.
2822 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
2824 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2825 else if (nofp
->nof_d_rw_dw
)
2826 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
2827 else if (nofp
->nof_d_rw_drw
)
2828 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
2829 } else if (nofp
->nof_rw
|| nofp
->nof_rw_dw
|| nofp
->nof_rw_drw
) {
2831 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
2833 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2834 else if (nofp
->nof_rw_dw
)
2835 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
2836 else if (nofp
->nof_rw_drw
)
2837 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
2842 if (error
) /* mmap mode without proper open mode */
2846 * If the existing mmap access is more than the new access OR the
2847 * existing access is the same and the existing deny mode is less,
2848 * then we'll stick with the existing mmap open mode.
2850 if ((nofp
->nof_mmap_access
> accessMode
) ||
2851 ((nofp
->nof_mmap_access
== accessMode
) && (nofp
->nof_mmap_deny
<= denyMode
)))
2854 /* update mmap open mode */
2855 if (nofp
->nof_mmap_access
) {
2856 error
= nfs_close(np
, nofp
, nofp
->nof_mmap_access
, nofp
->nof_mmap_deny
, ctx
);
2858 if (!nfs_mount_state_error_should_restart(error
))
2859 NP(np
, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2860 NP(np
, "nfs_vnop_mmap: update, close error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2863 nofp
->nof_mmap_access
= nofp
->nof_mmap_deny
= 0;
2866 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, delegated
);
2867 nofp
->nof_mmap_access
= accessMode
;
2868 nofp
->nof_mmap_deny
= denyMode
;
2872 nfs_open_file_clear_busy(nofp
);
2873 if (nfs_mount_state_in_use_end(nmp
, error
)) {
2878 nfs_open_owner_rele(noop
);
2882 nfs_node_lock_force(np
);
2883 if ((np
->n_flag
& NISMAPPED
) == 0) {
2884 np
->n_flag
|= NISMAPPED
;
2887 nfs_node_unlock(np
);
2889 lck_mtx_lock(&nmp
->nm_lock
);
2890 nmp
->nm_state
&= ~NFSSTA_SQUISHY
;
2891 nmp
->nm_curdeadtimeout
= nmp
->nm_deadtimeout
;
2892 if (nmp
->nm_curdeadtimeout
<= 0)
2893 nmp
->nm_deadto_start
= 0;
2895 lck_mtx_unlock(&nmp
->nm_lock
);
2905 struct vnop_mnomap_args
/* {
2906 struct vnodeop_desc *a_desc;
2908 vfs_context_t a_context;
2911 vfs_context_t ctx
= ap
->a_context
;
2912 vnode_t vp
= ap
->a_vp
;
2913 nfsnode_t np
= VTONFS(vp
);
2914 struct nfsmount
*nmp
;
2915 struct nfs_open_file
*nofp
= NULL
;
2918 int is_mapped_flag
= 0;
2921 if (nfs_mount_gone(nmp
))
2924 nfs_node_lock_force(np
);
2925 if (np
->n_flag
& NISMAPPED
) {
2927 np
->n_flag
&= ~NISMAPPED
;
2929 nfs_node_unlock(np
);
2930 if (is_mapped_flag
) {
2931 lck_mtx_lock(&nmp
->nm_lock
);
2932 if (nmp
->nm_mappers
)
2935 NP(np
, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
2936 lck_mtx_unlock(&nmp
->nm_lock
);
2939 /* flush buffers/ubc before we drop the open (in case it's our last open) */
2940 nfs_flush(np
, MNT_WAIT
, vfs_context_thread(ctx
), V_IGNORE_WRITEERR
);
2941 if (UBCINFOEXISTS(vp
) && (size
= ubc_getsize(vp
)))
2942 ubc_msync(vp
, 0, size
, NULL
, UBC_PUSHALL
| UBC_SYNC
);
2944 /* walk all open files and close all mmap opens */
2946 error
= nfs_mount_state_in_use_start(nmp
, NULL
);
2949 lck_mtx_lock(&np
->n_openlock
);
2950 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
2951 if (!nofp
->nof_mmap_access
)
2953 lck_mtx_unlock(&np
->n_openlock
);
2954 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
2955 nfs_mount_state_in_use_end(nmp
, 0);
2956 error
= nfs4_reopen(nofp
, NULL
);
2961 error
= nfs_open_file_set_busy(nofp
, NULL
);
2963 lck_mtx_lock(&np
->n_openlock
);
2966 if (nofp
->nof_mmap_access
) {
2967 error
= nfs_close(np
, nofp
, nofp
->nof_mmap_access
, nofp
->nof_mmap_deny
, ctx
);
2968 if (!nfs_mount_state_error_should_restart(error
)) {
2969 if (error
) /* not a state-operation-restarting error, so just clear the access */
2970 NP(np
, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2971 nofp
->nof_mmap_access
= nofp
->nof_mmap_deny
= 0;
2974 NP(np
, "nfs_vnop_mnomap: error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2976 nfs_open_file_clear_busy(nofp
);
2977 nfs_mount_state_in_use_end(nmp
, error
);
2980 lck_mtx_unlock(&np
->n_openlock
);
2981 nfs_mount_state_in_use_end(nmp
, error
);
2986 * Search a node's lock owner list for the owner for this process.
2987 * If not found and "alloc" is set, then allocate a new one.
2989 struct nfs_lock_owner
*
2990 nfs_lock_owner_find(nfsnode_t np
, proc_t p
, int alloc
)
2992 pid_t pid
= proc_pid(p
);
2993 struct nfs_lock_owner
*nlop
, *newnlop
= NULL
;
2996 lck_mtx_lock(&np
->n_openlock
);
2997 TAILQ_FOREACH(nlop
, &np
->n_lock_owners
, nlo_link
) {
2998 if (nlop
->nlo_pid
!= pid
)
3000 if (timevalcmp(&nlop
->nlo_pid_start
, &p
->p_start
, ==))
3002 /* stale lock owner... reuse it if we can */
3003 if (nlop
->nlo_refcnt
) {
3004 TAILQ_REMOVE(&np
->n_lock_owners
, nlop
, nlo_link
);
3005 nlop
->nlo_flags
&= ~NFS_LOCK_OWNER_LINK
;
3006 lck_mtx_unlock(&np
->n_openlock
);
3009 nlop
->nlo_pid_start
= p
->p_start
;
3010 nlop
->nlo_seqid
= 0;
3011 nlop
->nlo_stategenid
= 0;
3015 if (!nlop
&& !newnlop
&& alloc
) {
3016 lck_mtx_unlock(&np
->n_openlock
);
3017 MALLOC(newnlop
, struct nfs_lock_owner
*, sizeof(struct nfs_lock_owner
), M_TEMP
, M_WAITOK
);
3020 bzero(newnlop
, sizeof(*newnlop
));
3021 lck_mtx_init(&newnlop
->nlo_lock
, nfs_open_grp
, LCK_ATTR_NULL
);
3022 newnlop
->nlo_pid
= pid
;
3023 newnlop
->nlo_pid_start
= p
->p_start
;
3024 newnlop
->nlo_name
= OSAddAtomic(1, &nfs_lock_owner_seqnum
);
3025 TAILQ_INIT(&newnlop
->nlo_locks
);
3028 if (!nlop
&& newnlop
) {
3029 newnlop
->nlo_flags
|= NFS_LOCK_OWNER_LINK
;
3030 TAILQ_INSERT_HEAD(&np
->n_lock_owners
, newnlop
, nlo_link
);
3033 lck_mtx_unlock(&np
->n_openlock
);
3035 if (newnlop
&& (nlop
!= newnlop
))
3036 nfs_lock_owner_destroy(newnlop
);
3039 nfs_lock_owner_ref(nlop
);
3045 * destroy a lock owner that's no longer needed
3048 nfs_lock_owner_destroy(struct nfs_lock_owner
*nlop
)
3050 if (nlop
->nlo_open_owner
) {
3051 nfs_open_owner_rele(nlop
->nlo_open_owner
);
3052 nlop
->nlo_open_owner
= NULL
;
3054 lck_mtx_destroy(&nlop
->nlo_lock
, nfs_open_grp
);
3059 * acquire a reference count on a lock owner
3062 nfs_lock_owner_ref(struct nfs_lock_owner
*nlop
)
3064 lck_mtx_lock(&nlop
->nlo_lock
);
3066 lck_mtx_unlock(&nlop
->nlo_lock
);
3070 * drop a reference count on a lock owner and destroy it if
3071 * it is no longer referenced and no longer on the mount's list.
3074 nfs_lock_owner_rele(struct nfs_lock_owner
*nlop
)
3076 lck_mtx_lock(&nlop
->nlo_lock
);
3077 if (nlop
->nlo_refcnt
< 1)
3078 panic("nfs_lock_owner_rele: no refcnt");
3080 if (!nlop
->nlo_refcnt
&& (nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
))
3081 panic("nfs_lock_owner_rele: busy");
3082 /* XXX we may potentially want to clean up idle/unused lock owner structures */
3083 if (nlop
->nlo_refcnt
|| (nlop
->nlo_flags
& NFS_LOCK_OWNER_LINK
)) {
3084 lck_mtx_unlock(&nlop
->nlo_lock
);
3087 /* owner is no longer referenced or linked to mount, so destroy it */
3088 lck_mtx_unlock(&nlop
->nlo_lock
);
3089 nfs_lock_owner_destroy(nlop
);
3093 * Mark a lock owner as busy because we are about to
3094 * start an operation that uses and updates lock owner state.
3097 nfs_lock_owner_set_busy(struct nfs_lock_owner
*nlop
, thread_t thd
)
3099 struct nfsmount
*nmp
;
3100 struct timespec ts
= {2, 0};
3101 int error
= 0, slpflag
;
3103 nmp
= nlop
->nlo_open_owner
->noo_mount
;
3104 if (nfs_mount_gone(nmp
))
3106 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
3108 lck_mtx_lock(&nlop
->nlo_lock
);
3109 while (nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
) {
3110 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0)))
3112 nlop
->nlo_flags
|= NFS_LOCK_OWNER_WANT
;
3113 msleep(nlop
, &nlop
->nlo_lock
, slpflag
, "nfs_lock_owner_set_busy", &ts
);
3117 nlop
->nlo_flags
|= NFS_LOCK_OWNER_BUSY
;
3118 lck_mtx_unlock(&nlop
->nlo_lock
);
3124 * Clear the busy flag on a lock owner and wake up anyone waiting
3128 nfs_lock_owner_clear_busy(struct nfs_lock_owner
*nlop
)
3132 lck_mtx_lock(&nlop
->nlo_lock
);
3133 if (!(nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
))
3134 panic("nfs_lock_owner_clear_busy");
3135 wanted
= (nlop
->nlo_flags
& NFS_LOCK_OWNER_WANT
);
3136 nlop
->nlo_flags
&= ~(NFS_LOCK_OWNER_BUSY
|NFS_LOCK_OWNER_WANT
);
3137 lck_mtx_unlock(&nlop
->nlo_lock
);
3143 * Insert a held lock into a lock owner's sorted list.
3144 * (flock locks are always inserted at the head the list)
3147 nfs_lock_owner_insert_held_lock(struct nfs_lock_owner
*nlop
, struct nfs_file_lock
*newnflp
)
3149 struct nfs_file_lock
*nflp
;
3151 /* insert new lock in lock owner's held lock list */
3152 lck_mtx_lock(&nlop
->nlo_lock
);
3153 if ((newnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
) {
3154 TAILQ_INSERT_HEAD(&nlop
->nlo_locks
, newnflp
, nfl_lolink
);
3156 TAILQ_FOREACH(nflp
, &nlop
->nlo_locks
, nfl_lolink
) {
3157 if (newnflp
->nfl_start
< nflp
->nfl_start
)
3161 TAILQ_INSERT_BEFORE(nflp
, newnflp
, nfl_lolink
);
3163 TAILQ_INSERT_TAIL(&nlop
->nlo_locks
, newnflp
, nfl_lolink
);
3165 lck_mtx_unlock(&nlop
->nlo_lock
);
3169 * Get a file lock structure for this lock owner.
3171 struct nfs_file_lock
*
3172 nfs_file_lock_alloc(struct nfs_lock_owner
*nlop
)
3174 struct nfs_file_lock
*nflp
= NULL
;
3176 lck_mtx_lock(&nlop
->nlo_lock
);
3177 if (!nlop
->nlo_alock
.nfl_owner
) {
3178 nflp
= &nlop
->nlo_alock
;
3179 nflp
->nfl_owner
= nlop
;
3181 lck_mtx_unlock(&nlop
->nlo_lock
);
3183 MALLOC(nflp
, struct nfs_file_lock
*, sizeof(struct nfs_file_lock
), M_TEMP
, M_WAITOK
);
3186 bzero(nflp
, sizeof(*nflp
));
3187 nflp
->nfl_flags
|= NFS_FILE_LOCK_ALLOC
;
3188 nflp
->nfl_owner
= nlop
;
3190 nfs_lock_owner_ref(nlop
);
3195 * destroy the given NFS file lock structure
3198 nfs_file_lock_destroy(struct nfs_file_lock
*nflp
)
3200 struct nfs_lock_owner
*nlop
= nflp
->nfl_owner
;
3202 if (nflp
->nfl_flags
& NFS_FILE_LOCK_ALLOC
) {
3203 nflp
->nfl_owner
= NULL
;
3206 lck_mtx_lock(&nlop
->nlo_lock
);
3207 bzero(nflp
, sizeof(*nflp
));
3208 lck_mtx_unlock(&nlop
->nlo_lock
);
3210 nfs_lock_owner_rele(nlop
);
3214 * Check if one file lock conflicts with another.
3215 * (nflp1 is the new lock. nflp2 is the existing lock.)
3218 nfs_file_lock_conflict(struct nfs_file_lock
*nflp1
, struct nfs_file_lock
*nflp2
, int *willsplit
)
3220 /* no conflict if lock is dead */
3221 if ((nflp1
->nfl_flags
& NFS_FILE_LOCK_DEAD
) || (nflp2
->nfl_flags
& NFS_FILE_LOCK_DEAD
))
3223 /* no conflict if it's ours - unless the lock style doesn't match */
3224 if ((nflp1
->nfl_owner
== nflp2
->nfl_owner
) &&
3225 ((nflp1
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == (nflp2
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
))) {
3226 if (willsplit
&& (nflp1
->nfl_type
!= nflp2
->nfl_type
) &&
3227 (nflp1
->nfl_start
> nflp2
->nfl_start
) &&
3228 (nflp1
->nfl_end
< nflp2
->nfl_end
))
3232 /* no conflict if ranges don't overlap */
3233 if ((nflp1
->nfl_start
> nflp2
->nfl_end
) || (nflp1
->nfl_end
< nflp2
->nfl_start
))
3235 /* no conflict if neither lock is exclusive */
3236 if ((nflp1
->nfl_type
!= F_WRLCK
) && (nflp2
->nfl_type
!= F_WRLCK
))
3243 * Send an NFSv4 LOCK RPC to the server.
3248 struct nfs_open_file
*nofp
,
3249 struct nfs_file_lock
*nflp
,
3255 struct nfs_lock_owner
*nlop
= nflp
->nfl_owner
;
3256 struct nfsmount
*nmp
;
3257 struct nfsm_chain nmreq
, nmrep
;
3260 int error
= 0, lockerror
= ENOENT
, newlocker
, numops
, status
;
3261 struct nfsreq_secinfo_args si
;
3264 if (nfs_mount_gone(nmp
))
3266 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
3269 newlocker
= (nlop
->nlo_stategenid
!= nmp
->nm_stategenid
);
3270 locktype
= (nflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
) ?
3271 ((nflp
->nfl_type
== F_WRLCK
) ?
3272 NFS_LOCK_TYPE_WRITEW
:
3273 NFS_LOCK_TYPE_READW
) :
3274 ((nflp
->nfl_type
== F_WRLCK
) ?
3275 NFS_LOCK_TYPE_WRITE
:
3276 NFS_LOCK_TYPE_READ
);
3278 error
= nfs_open_file_set_busy(nofp
, thd
);
3281 error
= nfs_open_owner_set_busy(nofp
->nof_owner
, thd
);
3283 nfs_open_file_clear_busy(nofp
);
3286 if (!nlop
->nlo_open_owner
) {
3287 nfs_open_owner_ref(nofp
->nof_owner
);
3288 nlop
->nlo_open_owner
= nofp
->nof_owner
;
3291 error
= nfs_lock_owner_set_busy(nlop
, thd
);
3294 nfs_open_owner_clear_busy(nofp
->nof_owner
);
3295 nfs_open_file_clear_busy(nofp
);
3300 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
3301 nfsm_chain_null(&nmreq
);
3302 nfsm_chain_null(&nmrep
);
3304 // PUTFH, GETATTR, LOCK
3306 nfsm_chain_build_alloc_init(error
, &nmreq
, 33 * NFSX_UNSIGNED
);
3307 nfsm_chain_add_compound_header(error
, &nmreq
, "lock", nmp
->nm_minor_vers
, numops
);
3309 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3310 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3312 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3313 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
3315 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCK
);
3316 nfsm_chain_add_32(error
, &nmreq
, locktype
);
3317 nfsm_chain_add_32(error
, &nmreq
, reclaim
);
3318 nfsm_chain_add_64(error
, &nmreq
, nflp
->nfl_start
);
3319 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(nflp
->nfl_start
, nflp
->nfl_end
));
3320 nfsm_chain_add_32(error
, &nmreq
, newlocker
);
3322 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_owner
->noo_seqid
);
3323 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
3324 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3325 nfsm_chain_add_lock_owner4(error
, &nmreq
, nmp
, nlop
);
3327 nfsm_chain_add_stateid(error
, &nmreq
, &nlop
->nlo_stateid
);
3328 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3330 nfsm_chain_build_done(error
, &nmreq
);
3331 nfsm_assert(error
, (numops
== 0), EPROTO
);
3334 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
|R_NOINTR
, &nmrep
, &xid
, &status
);
3336 if ((lockerror
= nfs_node_lock(np
)))
3338 nfsm_chain_skip_tag(error
, &nmrep
);
3339 nfsm_chain_get_32(error
, &nmrep
, numops
);
3340 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3342 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3343 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
3345 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCK
);
3346 nfs_owner_seqid_increment(newlocker
? nofp
->nof_owner
: NULL
, nlop
, error
);
3347 nfsm_chain_get_stateid(error
, &nmrep
, &nlop
->nlo_stateid
);
3349 /* Update the lock owner's stategenid once it appears the server has state for it. */
3350 /* We determine this by noting the request was successful (we got a stateid). */
3351 if (newlocker
&& !error
)
3352 nlop
->nlo_stategenid
= nmp
->nm_stategenid
;
3355 nfs_node_unlock(np
);
3356 nfs_lock_owner_clear_busy(nlop
);
3358 nfs_open_owner_clear_busy(nofp
->nof_owner
);
3359 nfs_open_file_clear_busy(nofp
);
3361 nfsm_chain_cleanup(&nmreq
);
3362 nfsm_chain_cleanup(&nmrep
);
3367 * Send an NFSv4 LOCKU RPC to the server.
3372 struct nfs_lock_owner
*nlop
,
3380 struct nfsmount
*nmp
;
3381 struct nfsm_chain nmreq
, nmrep
;
3383 int error
= 0, lockerror
= ENOENT
, numops
, status
;
3384 struct nfsreq_secinfo_args si
;
3387 if (nfs_mount_gone(nmp
))
3389 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
3392 error
= nfs_lock_owner_set_busy(nlop
, NULL
);
3396 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
3397 nfsm_chain_null(&nmreq
);
3398 nfsm_chain_null(&nmrep
);
3400 // PUTFH, GETATTR, LOCKU
3402 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
3403 nfsm_chain_add_compound_header(error
, &nmreq
, "unlock", nmp
->nm_minor_vers
, numops
);
3405 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3406 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3408 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3409 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
3411 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCKU
);
3412 nfsm_chain_add_32(error
, &nmreq
, (type
== F_WRLCK
) ? NFS_LOCK_TYPE_WRITE
: NFS_LOCK_TYPE_READ
);
3413 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3414 nfsm_chain_add_stateid(error
, &nmreq
, &nlop
->nlo_stateid
);
3415 nfsm_chain_add_64(error
, &nmreq
, start
);
3416 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(start
, end
));
3417 nfsm_chain_build_done(error
, &nmreq
);
3418 nfsm_assert(error
, (numops
== 0), EPROTO
);
3421 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
|R_NOINTR
, &nmrep
, &xid
, &status
);
3423 if ((lockerror
= nfs_node_lock(np
)))
3425 nfsm_chain_skip_tag(error
, &nmrep
);
3426 nfsm_chain_get_32(error
, &nmrep
, numops
);
3427 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3429 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3430 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
3432 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCKU
);
3433 nfs_owner_seqid_increment(NULL
, nlop
, error
);
3434 nfsm_chain_get_stateid(error
, &nmrep
, &nlop
->nlo_stateid
);
3437 nfs_node_unlock(np
);
3438 nfs_lock_owner_clear_busy(nlop
);
3439 nfsm_chain_cleanup(&nmreq
);
3440 nfsm_chain_cleanup(&nmrep
);
3445 * Send an NFSv4 LOCKT RPC to the server.
3450 struct nfs_lock_owner
*nlop
,
3456 struct nfsmount
*nmp
;
3457 struct nfsm_chain nmreq
, nmrep
;
3458 uint64_t xid
, val64
= 0;
3460 int error
= 0, lockerror
, numops
, status
;
3461 struct nfsreq_secinfo_args si
;
3464 if (nfs_mount_gone(nmp
))
3466 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
3470 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
3471 nfsm_chain_null(&nmreq
);
3472 nfsm_chain_null(&nmrep
);
3474 // PUTFH, GETATTR, LOCKT
3476 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
3477 nfsm_chain_add_compound_header(error
, &nmreq
, "locktest", nmp
->nm_minor_vers
, numops
);
3479 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3480 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3482 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3483 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
3485 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCKT
);
3486 nfsm_chain_add_32(error
, &nmreq
, (fl
->l_type
== F_WRLCK
) ? NFS_LOCK_TYPE_WRITE
: NFS_LOCK_TYPE_READ
);
3487 nfsm_chain_add_64(error
, &nmreq
, start
);
3488 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(start
, end
));
3489 nfsm_chain_add_lock_owner4(error
, &nmreq
, nmp
, nlop
);
3490 nfsm_chain_build_done(error
, &nmreq
);
3491 nfsm_assert(error
, (numops
== 0), EPROTO
);
3494 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
3496 if ((lockerror
= nfs_node_lock(np
)))
3498 nfsm_chain_skip_tag(error
, &nmrep
);
3499 nfsm_chain_get_32(error
, &nmrep
, numops
);
3500 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3502 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3503 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
3505 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCKT
);
3506 if (error
== NFSERR_DENIED
) {
3508 nfsm_chain_get_64(error
, &nmrep
, fl
->l_start
);
3509 nfsm_chain_get_64(error
, &nmrep
, val64
);
3510 fl
->l_len
= (val64
== UINT64_MAX
) ? 0 : val64
;
3511 nfsm_chain_get_32(error
, &nmrep
, val
);
3512 fl
->l_type
= (val
== NFS_LOCK_TYPE_WRITE
) ? F_WRLCK
: F_RDLCK
;
3514 fl
->l_whence
= SEEK_SET
;
3515 } else if (!error
) {
3516 fl
->l_type
= F_UNLCK
;
3520 nfs_node_unlock(np
);
3521 nfsm_chain_cleanup(&nmreq
);
3522 nfsm_chain_cleanup(&nmrep
);
3528 * Check for any conflicts with the given lock.
3530 * Checking for a lock doesn't require the file to be opened.
3531 * So we skip all the open owner, open file, lock owner work
3532 * and just check for a conflicting lock.
3535 nfs_advlock_getlock(
3537 struct nfs_lock_owner
*nlop
,
3543 struct nfsmount
*nmp
;
3544 struct nfs_file_lock
*nflp
;
3545 int error
= 0, answered
= 0;
3548 if (nfs_mount_gone(nmp
))
3552 if ((error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
))))
3555 lck_mtx_lock(&np
->n_openlock
);
3556 /* scan currently held locks for conflict */
3557 TAILQ_FOREACH(nflp
, &np
->n_locks
, nfl_link
) {
3558 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
|NFS_FILE_LOCK_DEAD
))
3560 if ((start
<= nflp
->nfl_end
) && (end
>= nflp
->nfl_start
) &&
3561 ((fl
->l_type
== F_WRLCK
) || (nflp
->nfl_type
== F_WRLCK
)))
3565 /* found a conflicting lock */
3566 fl
->l_type
= nflp
->nfl_type
;
3567 fl
->l_pid
= (nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_FLOCK
) ? -1 : nflp
->nfl_owner
->nlo_pid
;
3568 fl
->l_start
= nflp
->nfl_start
;
3569 fl
->l_len
= NFS_FLOCK_LENGTH(nflp
->nfl_start
, nflp
->nfl_end
);
3570 fl
->l_whence
= SEEK_SET
;
3572 } else if ((np
->n_openflags
& N_DELEG_WRITE
) && !(np
->n_openflags
& N_DELEG_RETURN
)) {
3574 * If we have a write delegation, we know there can't be other
3575 * locks on the server. So the answer is no conflicting lock found.
3577 fl
->l_type
= F_UNLCK
;
3580 lck_mtx_unlock(&np
->n_openlock
);
3582 nfs_mount_state_in_use_end(nmp
, 0);
3586 /* no conflict found locally, so ask the server */
3587 error
= nmp
->nm_funcs
->nf_getlock_rpc(np
, nlop
, fl
, start
, end
, ctx
);
3589 if (nfs_mount_state_in_use_end(nmp
, error
))
3595 * Acquire a file lock for the given range.
3597 * Add the lock (request) to the lock queue.
3598 * Scan the lock queue for any conflicting locks.
3599 * If a conflict is found, block or return an error.
3600 * Once end of queue is reached, send request to the server.
3601 * If the server grants the lock, scan the lock queue and
3602 * update any existing locks. Then (optionally) scan the
3603 * queue again to coalesce any locks adjacent to the new one.
3606 nfs_advlock_setlock(
3608 struct nfs_open_file
*nofp
,
3609 struct nfs_lock_owner
*nlop
,
3617 struct nfsmount
*nmp
;
3618 struct nfs_file_lock
*newnflp
, *nflp
, *nflp2
= NULL
, *nextnflp
, *flocknflp
= NULL
;
3619 struct nfs_file_lock
*coalnflp
;
3620 int error
= 0, error2
, willsplit
= 0, delay
, slpflag
, busy
= 0, inuse
= 0, restart
, inqueue
= 0;
3621 struct timespec ts
= {1, 0};
3624 if (nfs_mount_gone(nmp
))
3626 slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
3628 if ((type
!= F_RDLCK
) && (type
!= F_WRLCK
))
3631 /* allocate a new lock */
3632 newnflp
= nfs_file_lock_alloc(nlop
);
3635 newnflp
->nfl_start
= start
;
3636 newnflp
->nfl_end
= end
;
3637 newnflp
->nfl_type
= type
;
3639 newnflp
->nfl_flags
|= NFS_FILE_LOCK_WAIT
;
3640 newnflp
->nfl_flags
|= style
;
3641 newnflp
->nfl_flags
|= NFS_FILE_LOCK_BLOCKED
;
3643 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) && (type
== F_WRLCK
)) {
3645 * For exclusive flock-style locks, if we block waiting for the
3646 * lock, we need to first release any currently held shared
3647 * flock-style lock. So, the first thing we do is check if we
3648 * have a shared flock-style lock.
3650 nflp
= TAILQ_FIRST(&nlop
->nlo_locks
);
3651 if (nflp
&& ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != NFS_FILE_LOCK_STYLE_FLOCK
))
3653 if (nflp
&& (nflp
->nfl_type
!= F_RDLCK
))
3660 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
3664 if (np
->n_flag
& NREVOKE
) {
3666 nfs_mount_state_in_use_end(nmp
, 0);
3670 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
3671 nfs_mount_state_in_use_end(nmp
, 0);
3673 error
= nfs4_reopen(nofp
, vfs_context_thread(ctx
));
3679 lck_mtx_lock(&np
->n_openlock
);
3681 /* insert new lock at beginning of list */
3682 TAILQ_INSERT_HEAD(&np
->n_locks
, newnflp
, nfl_link
);
3686 /* scan current list of locks (held and pending) for conflicts */
3687 for (nflp
= TAILQ_NEXT(newnflp
, nfl_link
); nflp
; nflp
= nextnflp
) {
3688 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
3689 if (!nfs_file_lock_conflict(newnflp
, nflp
, &willsplit
))
3692 if (!(newnflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
)) {
3696 /* Block until this lock is no longer held. */
3697 if (nflp
->nfl_blockcnt
== UINT_MAX
) {
3701 nflp
->nfl_blockcnt
++;
3704 /* release any currently held shared lock before sleeping */
3705 lck_mtx_unlock(&np
->n_openlock
);
3706 nfs_mount_state_in_use_end(nmp
, 0);
3708 error
= nfs_advlock_unlock(np
, nofp
, nlop
, 0, UINT64_MAX
, NFS_FILE_LOCK_STYLE_FLOCK
, ctx
);
3711 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
3713 lck_mtx_lock(&np
->n_openlock
);
3717 lck_mtx_lock(&np
->n_openlock
);
3718 /* no need to block/sleep if the conflict is gone */
3719 if (!nfs_file_lock_conflict(newnflp
, nflp
, NULL
))
3722 msleep(nflp
, &np
->n_openlock
, slpflag
, "nfs_advlock_setlock_blocked", &ts
);
3724 error
= nfs_sigintr(NFSTONMP(np
), NULL
, vfs_context_thread(ctx
), 0);
3725 if (!error
&& (nmp
->nm_state
& NFSSTA_RECOVER
)) {
3726 /* looks like we have a recover pending... restart */
3728 lck_mtx_unlock(&np
->n_openlock
);
3729 nfs_mount_state_in_use_end(nmp
, 0);
3731 lck_mtx_lock(&np
->n_openlock
);
3734 if (!error
&& (np
->n_flag
& NREVOKE
))
3736 } while (!error
&& nfs_file_lock_conflict(newnflp
, nflp
, NULL
));
3737 nflp
->nfl_blockcnt
--;
3738 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) && !nflp
->nfl_blockcnt
) {
3739 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
3740 nfs_file_lock_destroy(nflp
);
3742 if (error
|| restart
)
3744 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
3745 /* So, start this lock-scanning loop over from where it started. */
3746 nextnflp
= TAILQ_NEXT(newnflp
, nfl_link
);
3748 lck_mtx_unlock(&np
->n_openlock
);
3756 * It looks like this operation is splitting a lock.
3757 * We allocate a new lock now so we don't have to worry
3758 * about the allocation failing after we've updated some state.
3760 nflp2
= nfs_file_lock_alloc(nlop
);
3767 /* once scan for local conflicts is clear, send request to server */
3768 if ((error
= nfs_open_state_set_busy(np
, vfs_context_thread(ctx
))))
3773 /* do we have a delegation? (that we're not returning?) */
3774 if ((np
->n_openflags
& N_DELEG_MASK
) && !(np
->n_openflags
& N_DELEG_RETURN
)) {
3775 if (np
->n_openflags
& N_DELEG_WRITE
) {
3776 /* with a write delegation, just take the lock delegated */
3777 newnflp
->nfl_flags
|= NFS_FILE_LOCK_DELEGATED
;
3779 /* make sure the lock owner knows its open owner */
3780 if (!nlop
->nlo_open_owner
) {
3781 nfs_open_owner_ref(nofp
->nof_owner
);
3782 nlop
->nlo_open_owner
= nofp
->nof_owner
;
3787 * If we don't have any non-delegated opens but we do have
3788 * delegated opens, then we need to first claim the delegated
3789 * opens so that the lock request on the server can be associated
3790 * with an open it knows about.
3792 if ((!nofp
->nof_rw_drw
&& !nofp
->nof_w_drw
&& !nofp
->nof_r_drw
&&
3793 !nofp
->nof_rw_dw
&& !nofp
->nof_w_dw
&& !nofp
->nof_r_dw
&&
3794 !nofp
->nof_rw
&& !nofp
->nof_w
&& !nofp
->nof_r
) &&
3795 (nofp
->nof_d_rw_drw
|| nofp
->nof_d_w_drw
|| nofp
->nof_d_r_drw
||
3796 nofp
->nof_d_rw_dw
|| nofp
->nof_d_w_dw
|| nofp
->nof_d_r_dw
||
3797 nofp
->nof_d_rw
|| nofp
->nof_d_w
|| nofp
->nof_d_r
)) {
3798 error
= nfs4_claim_delegated_state_for_open_file(nofp
, 0);
3804 if (np
->n_flag
& NREVOKE
)
3807 error
= nmp
->nm_funcs
->nf_setlock_rpc(np
, nofp
, newnflp
, 0, 0, vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
3808 if (!error
|| ((error
!= NFSERR_DENIED
) && (error
!= NFSERR_GRACE
)))
3810 /* request was denied due to either conflict or grace period */
3811 if ((error
== NFSERR_DENIED
) && !(newnflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
)) {
3816 /* release any currently held shared lock before sleeping */
3817 nfs_open_state_clear_busy(np
);
3819 nfs_mount_state_in_use_end(nmp
, 0);
3821 error2
= nfs_advlock_unlock(np
, nofp
, nlop
, 0, UINT64_MAX
, NFS_FILE_LOCK_STYLE_FLOCK
, ctx
);
3824 error2
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
3827 error2
= nfs_open_state_set_busy(np
, vfs_context_thread(ctx
));
3836 * Wait a little bit and send the request again.
3837 * Except for retries of blocked v2/v3 request where we've already waited a bit.
3839 if ((nmp
->nm_vers
>= NFS_VER4
) || (error
== NFSERR_GRACE
)) {
3840 if (error
== NFSERR_GRACE
)
3844 tsleep(newnflp
, slpflag
, "nfs_advlock_setlock_delay", delay
* (hz
/2));
3847 error
= nfs_sigintr(NFSTONMP(np
), NULL
, vfs_context_thread(ctx
), 0);
3848 if (!error
&& (nmp
->nm_state
& NFSSTA_RECOVER
)) {
3849 /* looks like we have a recover pending... restart */
3850 nfs_open_state_clear_busy(np
);
3852 nfs_mount_state_in_use_end(nmp
, 0);
3856 if (!error
&& (np
->n_flag
& NREVOKE
))
3861 if (nfs_mount_state_error_should_restart(error
)) {
3862 /* looks like we need to restart this operation */
3864 nfs_open_state_clear_busy(np
);
3868 nfs_mount_state_in_use_end(nmp
, error
);
3873 lck_mtx_lock(&np
->n_openlock
);
3874 newnflp
->nfl_flags
&= ~NFS_FILE_LOCK_BLOCKED
;
3876 newnflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
3877 if (newnflp
->nfl_blockcnt
) {
3878 /* wake up anyone blocked on this lock */
3881 /* remove newnflp from lock list and destroy */
3883 TAILQ_REMOVE(&np
->n_locks
, newnflp
, nfl_link
);
3884 nfs_file_lock_destroy(newnflp
);
3886 lck_mtx_unlock(&np
->n_openlock
);
3888 nfs_open_state_clear_busy(np
);
3890 nfs_mount_state_in_use_end(nmp
, error
);
3892 nfs_file_lock_destroy(nflp2
);
3896 /* server granted the lock */
3899 * Scan for locks to update.
3901 * Locks completely covered are killed.
3902 * At most two locks may need to be clipped.
3903 * It's possible that a single lock may need to be split.
3905 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
3906 if (nflp
== newnflp
)
3908 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
|NFS_FILE_LOCK_DEAD
))
3910 if (nflp
->nfl_owner
!= nlop
)
3912 if ((newnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != (nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
))
3914 if ((newnflp
->nfl_start
> nflp
->nfl_end
) || (newnflp
->nfl_end
< nflp
->nfl_start
))
3916 /* here's one to update */
3917 if ((newnflp
->nfl_start
<= nflp
->nfl_start
) && (newnflp
->nfl_end
>= nflp
->nfl_end
)) {
3918 /* The entire lock is being replaced. */
3919 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
3920 lck_mtx_lock(&nlop
->nlo_lock
);
3921 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
3922 lck_mtx_unlock(&nlop
->nlo_lock
);
3923 /* lock will be destroyed below, if no waiters */
3924 } else if ((newnflp
->nfl_start
> nflp
->nfl_start
) && (newnflp
->nfl_end
< nflp
->nfl_end
)) {
3925 /* We're replacing a range in the middle of a lock. */
3926 /* The current lock will be split into two locks. */
3927 /* Update locks and insert new lock after current lock. */
3928 nflp2
->nfl_flags
|= (nflp
->nfl_flags
& (NFS_FILE_LOCK_STYLE_MASK
|NFS_FILE_LOCK_DELEGATED
));
3929 nflp2
->nfl_type
= nflp
->nfl_type
;
3930 nflp2
->nfl_start
= newnflp
->nfl_end
+ 1;
3931 nflp2
->nfl_end
= nflp
->nfl_end
;
3932 nflp
->nfl_end
= newnflp
->nfl_start
- 1;
3933 TAILQ_INSERT_AFTER(&np
->n_locks
, nflp
, nflp2
, nfl_link
);
3934 nfs_lock_owner_insert_held_lock(nlop
, nflp2
);
3937 } else if (newnflp
->nfl_start
> nflp
->nfl_start
) {
3938 /* We're replacing the end of a lock. */
3939 nflp
->nfl_end
= newnflp
->nfl_start
- 1;
3940 } else if (newnflp
->nfl_end
< nflp
->nfl_end
) {
3941 /* We're replacing the start of a lock. */
3942 nflp
->nfl_start
= newnflp
->nfl_end
+ 1;
3944 if (nflp
->nfl_blockcnt
) {
3945 /* wake up anyone blocked on this lock */
3947 } else if (nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) {
3948 /* remove nflp from lock list and destroy */
3949 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
3950 nfs_file_lock_destroy(nflp
);
3954 nfs_lock_owner_insert_held_lock(nlop
, newnflp
);
3957 * POSIX locks should be coalesced when possible.
3959 if ((style
== NFS_FILE_LOCK_STYLE_POSIX
) && (nofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
)) {
3961 * Walk through the lock queue and check each of our held locks with
3962 * the previous and next locks in the lock owner's "held lock list".
3963 * If the two locks can be coalesced, we merge the current lock into
3964 * the other (previous or next) lock. Merging this way makes sure that
3965 * lock ranges are always merged forward in the lock queue. This is
3966 * important because anyone blocked on the lock being "merged away"
3967 * will still need to block on that range and it will simply continue
3968 * checking locks that are further down the list.
3970 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
3971 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
|NFS_FILE_LOCK_DEAD
))
3973 if (nflp
->nfl_owner
!= nlop
)
3975 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != NFS_FILE_LOCK_STYLE_POSIX
)
3977 if (((coalnflp
= TAILQ_PREV(nflp
, nfs_file_lock_queue
, nfl_lolink
))) &&
3978 ((coalnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) &&
3979 (coalnflp
->nfl_type
== nflp
->nfl_type
) &&
3980 (coalnflp
->nfl_end
== (nflp
->nfl_start
- 1))) {
3981 coalnflp
->nfl_end
= nflp
->nfl_end
;
3982 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
3983 lck_mtx_lock(&nlop
->nlo_lock
);
3984 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
3985 lck_mtx_unlock(&nlop
->nlo_lock
);
3986 } else if (((coalnflp
= TAILQ_NEXT(nflp
, nfl_lolink
))) &&
3987 ((coalnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) &&
3988 (coalnflp
->nfl_type
== nflp
->nfl_type
) &&
3989 (coalnflp
->nfl_start
== (nflp
->nfl_end
+ 1))) {
3990 coalnflp
->nfl_start
= nflp
->nfl_start
;
3991 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
3992 lck_mtx_lock(&nlop
->nlo_lock
);
3993 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
3994 lck_mtx_unlock(&nlop
->nlo_lock
);
3996 if (!(nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
))
3998 if (nflp
->nfl_blockcnt
) {
3999 /* wake up anyone blocked on this lock */
4002 /* remove nflp from lock list and destroy */
4003 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4004 nfs_file_lock_destroy(nflp
);
4009 lck_mtx_unlock(&np
->n_openlock
);
4010 nfs_open_state_clear_busy(np
);
4011 nfs_mount_state_in_use_end(nmp
, error
);
4014 nfs_file_lock_destroy(nflp2
);
4019 * Release all (same style) locks within the given range.
4024 struct nfs_open_file
*nofp
,
4025 struct nfs_lock_owner
*nlop
,
4031 struct nfsmount
*nmp
;
4032 struct nfs_file_lock
*nflp
, *nextnflp
, *newnflp
= NULL
;
4033 int error
= 0, willsplit
= 0, send_unlock_rpcs
= 1;
4036 if (nfs_mount_gone(nmp
))
4040 if ((error
= nfs_mount_state_in_use_start(nmp
, NULL
)))
4042 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
4043 nfs_mount_state_in_use_end(nmp
, 0);
4044 error
= nfs4_reopen(nofp
, NULL
);
4049 if ((error
= nfs_open_state_set_busy(np
, NULL
))) {
4050 nfs_mount_state_in_use_end(nmp
, error
);
4054 lck_mtx_lock(&np
->n_openlock
);
4055 if ((start
> 0) && (end
< UINT64_MAX
) && !willsplit
) {
4057 * We may need to allocate a new lock if an existing lock gets split.
4058 * So, we first scan the list to check for a split, and if there's
4059 * going to be one, we'll allocate one now.
4061 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4062 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
|NFS_FILE_LOCK_DEAD
))
4064 if (nflp
->nfl_owner
!= nlop
)
4066 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != style
)
4068 if ((start
> nflp
->nfl_end
) || (end
< nflp
->nfl_start
))
4070 if ((start
> nflp
->nfl_start
) && (end
< nflp
->nfl_end
)) {
4076 lck_mtx_unlock(&np
->n_openlock
);
4077 nfs_open_state_clear_busy(np
);
4078 nfs_mount_state_in_use_end(nmp
, 0);
4079 newnflp
= nfs_file_lock_alloc(nlop
);
4087 * Free all of our locks in the given range.
4089 * Note that this process requires sending requests to the server.
4090 * Because of this, we will release the n_openlock while performing
4091 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4092 * locks from changing underneath us. However, other entries in the
4093 * list may be removed. So we need to be careful walking the list.
4097 * Don't unlock ranges that are held by other-style locks.
4098 * If style is posix, don't send any unlock rpcs if flock is held.
4099 * If we unlock an flock, don't send unlock rpcs for any posix-style
4100 * ranges held - instead send unlocks for the ranges not held.
4102 if ((style
== NFS_FILE_LOCK_STYLE_POSIX
) &&
4103 ((nflp
= TAILQ_FIRST(&nlop
->nlo_locks
))) &&
4104 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
))
4105 send_unlock_rpcs
= 0;
4106 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) &&
4107 ((nflp
= TAILQ_FIRST(&nlop
->nlo_locks
))) &&
4108 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
) &&
4109 ((nflp
= TAILQ_NEXT(nflp
, nfl_lolink
))) &&
4110 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
)) {
4112 int type
= TAILQ_FIRST(&nlop
->nlo_locks
)->nfl_type
;
4113 int delegated
= (TAILQ_FIRST(&nlop
->nlo_locks
)->nfl_flags
& NFS_FILE_LOCK_DELEGATED
);
4114 while (!delegated
&& nflp
) {
4115 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) {
4116 /* unlock the range preceding this lock */
4117 lck_mtx_unlock(&np
->n_openlock
);
4118 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, type
, s
, nflp
->nfl_start
-1, 0,
4119 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4120 if (nfs_mount_state_error_should_restart(error
)) {
4121 nfs_open_state_clear_busy(np
);
4122 nfs_mount_state_in_use_end(nmp
, error
);
4125 lck_mtx_lock(&np
->n_openlock
);
4128 s
= nflp
->nfl_end
+1;
4130 nflp
= TAILQ_NEXT(nflp
, nfl_lolink
);
4133 lck_mtx_unlock(&np
->n_openlock
);
4134 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, type
, s
, end
, 0,
4135 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4136 if (nfs_mount_state_error_should_restart(error
)) {
4137 nfs_open_state_clear_busy(np
);
4138 nfs_mount_state_in_use_end(nmp
, error
);
4141 lck_mtx_lock(&np
->n_openlock
);
4145 send_unlock_rpcs
= 0;
4148 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4149 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
|NFS_FILE_LOCK_DEAD
))
4151 if (nflp
->nfl_owner
!= nlop
)
4153 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != style
)
4155 if ((start
> nflp
->nfl_end
) || (end
< nflp
->nfl_start
))
4157 /* here's one to unlock */
4158 if ((start
<= nflp
->nfl_start
) && (end
>= nflp
->nfl_end
)) {
4159 /* The entire lock is being unlocked. */
4160 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4161 lck_mtx_unlock(&np
->n_openlock
);
4162 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, nflp
->nfl_start
, nflp
->nfl_end
, 0,
4163 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4164 if (nfs_mount_state_error_should_restart(error
)) {
4165 nfs_open_state_clear_busy(np
);
4166 nfs_mount_state_in_use_end(nmp
, error
);
4169 lck_mtx_lock(&np
->n_openlock
);
4171 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4174 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4175 lck_mtx_lock(&nlop
->nlo_lock
);
4176 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4177 lck_mtx_unlock(&nlop
->nlo_lock
);
4178 /* lock will be destroyed below, if no waiters */
4179 } else if ((start
> nflp
->nfl_start
) && (end
< nflp
->nfl_end
)) {
4180 /* We're unlocking a range in the middle of a lock. */
4181 /* The current lock will be split into two locks. */
4182 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4183 lck_mtx_unlock(&np
->n_openlock
);
4184 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, start
, end
, 0,
4185 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4186 if (nfs_mount_state_error_should_restart(error
)) {
4187 nfs_open_state_clear_busy(np
);
4188 nfs_mount_state_in_use_end(nmp
, error
);
4191 lck_mtx_lock(&np
->n_openlock
);
4195 /* update locks and insert new lock after current lock */
4196 newnflp
->nfl_flags
|= (nflp
->nfl_flags
& (NFS_FILE_LOCK_STYLE_MASK
|NFS_FILE_LOCK_DELEGATED
));
4197 newnflp
->nfl_type
= nflp
->nfl_type
;
4198 newnflp
->nfl_start
= end
+ 1;
4199 newnflp
->nfl_end
= nflp
->nfl_end
;
4200 nflp
->nfl_end
= start
- 1;
4201 TAILQ_INSERT_AFTER(&np
->n_locks
, nflp
, newnflp
, nfl_link
);
4202 nfs_lock_owner_insert_held_lock(nlop
, newnflp
);
4205 } else if (start
> nflp
->nfl_start
) {
4206 /* We're unlocking the end of a lock. */
4207 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4208 lck_mtx_unlock(&np
->n_openlock
);
4209 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, start
, nflp
->nfl_end
, 0,
4210 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4211 if (nfs_mount_state_error_should_restart(error
)) {
4212 nfs_open_state_clear_busy(np
);
4213 nfs_mount_state_in_use_end(nmp
, error
);
4216 lck_mtx_lock(&np
->n_openlock
);
4218 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4221 nflp
->nfl_end
= start
- 1;
4222 } else if (end
< nflp
->nfl_end
) {
4223 /* We're unlocking the start of a lock. */
4224 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4225 lck_mtx_unlock(&np
->n_openlock
);
4226 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, nflp
->nfl_start
, end
, 0,
4227 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4228 if (nfs_mount_state_error_should_restart(error
)) {
4229 nfs_open_state_clear_busy(np
);
4230 nfs_mount_state_in_use_end(nmp
, error
);
4233 lck_mtx_lock(&np
->n_openlock
);
4235 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4238 nflp
->nfl_start
= end
+ 1;
4240 if (nflp
->nfl_blockcnt
) {
4241 /* wake up anyone blocked on this lock */
4243 } else if (nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) {
4244 /* remove nflp from lock list and destroy */
4245 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4246 nfs_file_lock_destroy(nflp
);
4250 lck_mtx_unlock(&np
->n_openlock
);
4251 nfs_open_state_clear_busy(np
);
4252 nfs_mount_state_in_use_end(nmp
, 0);
4255 nfs_file_lock_destroy(newnflp
);
4260 * NFSv4 advisory file locking
4264 struct vnop_advlock_args
/* {
4265 struct vnodeop_desc *a_desc;
4271 vfs_context_t a_context;
4274 vnode_t vp
= ap
->a_vp
;
4275 nfsnode_t np
= VTONFS(ap
->a_vp
);
4276 struct flock
*fl
= ap
->a_fl
;
4278 int flags
= ap
->a_flags
;
4279 vfs_context_t ctx
= ap
->a_context
;
4280 struct nfsmount
*nmp
;
4281 struct nfs_open_owner
*noop
= NULL
;
4282 struct nfs_open_file
*nofp
= NULL
;
4283 struct nfs_lock_owner
*nlop
= NULL
;
4285 uint64_t start
, end
;
4286 int error
= 0, modified
, style
;
4288 #define OFF_MAX QUAD_MAX
4290 nmp
= VTONMP(ap
->a_vp
);
4291 if (nfs_mount_gone(nmp
))
4293 lck_mtx_lock(&nmp
->nm_lock
);
4294 if ((nmp
->nm_vers
<= NFS_VER3
) && (nmp
->nm_lockmode
== NFS_LOCK_MODE_DISABLED
)) {
4295 lck_mtx_unlock(&nmp
->nm_lock
);
4298 lck_mtx_unlock(&nmp
->nm_lock
);
4300 if (np
->n_flag
& NREVOKE
)
4302 vtype
= vnode_vtype(ap
->a_vp
);
4303 if (vtype
== VDIR
) /* ignore lock requests on directories */
4305 if (vtype
!= VREG
) /* anything other than regular files is invalid */
4308 /* Convert the flock structure into a start and end. */
4309 switch (fl
->l_whence
) {
4313 * Caller is responsible for adding any necessary offset
4314 * to fl->l_start when SEEK_CUR is used.
4316 lstart
= fl
->l_start
;
4319 /* need to flush, and refetch attributes to make */
4320 /* sure we have the correct end of file offset */
4321 if ((error
= nfs_node_lock(np
)))
4323 modified
= (np
->n_flag
& NMODIFIED
);
4324 nfs_node_unlock(np
);
4325 if (modified
&& ((error
= nfs_vinvalbuf(vp
, V_SAVE
, ctx
, 1))))
4327 if ((error
= nfs_getattr(np
, NULL
, ctx
, NGA_UNCACHED
)))
4329 nfs_data_lock(np
, NFS_DATA_LOCK_SHARED
);
4330 if ((np
->n_size
> OFF_MAX
) ||
4331 ((fl
->l_start
> 0) && (np
->n_size
> (u_quad_t
)(OFF_MAX
- fl
->l_start
))))
4333 lstart
= np
->n_size
+ fl
->l_start
;
4334 nfs_data_unlock(np
);
4344 if (fl
->l_len
== 0) {
4346 } else if (fl
->l_len
> 0) {
4347 if ((fl
->l_len
- 1) > (OFF_MAX
- lstart
))
4349 end
= start
- 1 + fl
->l_len
;
4350 } else { /* l_len is negative */
4351 if ((lstart
+ fl
->l_len
) < 0)
4356 if ((nmp
->nm_vers
== NFS_VER2
) && ((start
> INT32_MAX
) || (fl
->l_len
&& (end
> INT32_MAX
))))
4359 style
= (flags
& F_FLOCK
) ? NFS_FILE_LOCK_STYLE_FLOCK
: NFS_FILE_LOCK_STYLE_POSIX
;
4360 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) && ((start
!= 0) || (end
!= UINT64_MAX
)))
4363 /* find the lock owner, alloc if not unlock */
4364 nlop
= nfs_lock_owner_find(np
, vfs_context_proc(ctx
), (op
!= F_UNLCK
));
4366 error
= (op
== F_UNLCK
) ? 0 : ENOMEM
;
4368 NP(np
, "nfs_vnop_advlock: no lock owner, error %d", error
);
4372 if (op
== F_GETLK
) {
4373 error
= nfs_advlock_getlock(np
, nlop
, fl
, start
, end
, ctx
);
4375 /* find the open owner */
4376 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 0);
4378 NP(np
, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx
)));
4382 /* find the open file */
4384 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 0);
4387 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
4388 NP(np
, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
4391 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
4392 error
= nfs4_reopen(nofp
, ((op
== F_UNLCK
) ? NULL
: vfs_context_thread(ctx
)));
4398 NP(np
, "nfs_vnop_advlock: no open file %d, %d", error
, kauth_cred_getuid(noop
->noo_cred
));
4401 if (op
== F_UNLCK
) {
4402 error
= nfs_advlock_unlock(np
, nofp
, nlop
, start
, end
, style
, ctx
);
4403 } else if ((op
== F_SETLK
) || (op
== F_SETLKW
)) {
4404 if ((op
== F_SETLK
) && (flags
& F_WAIT
))
4406 error
= nfs_advlock_setlock(np
, nofp
, nlop
, op
, start
, end
, style
, fl
->l_type
, ctx
);
4408 /* not getlk, unlock or lock? */
4415 nfs_lock_owner_rele(nlop
);
4417 nfs_open_owner_rele(noop
);
4422 * Check if an open owner holds any locks on a file.
4425 nfs_check_for_locks(struct nfs_open_owner
*noop
, struct nfs_open_file
*nofp
)
4427 struct nfs_lock_owner
*nlop
;
4429 TAILQ_FOREACH(nlop
, &nofp
->nof_np
->n_lock_owners
, nlo_link
) {
4430 if (nlop
->nlo_open_owner
!= noop
)
4432 if (!TAILQ_EMPTY(&nlop
->nlo_locks
))
4435 return (nlop
? 1 : 0);
4439 * Reopen simple (no deny, no locks) open state that was lost.
4442 nfs4_reopen(struct nfs_open_file
*nofp
, thread_t thd
)
4444 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
4445 struct nfsmount
*nmp
= NFSTONMP(nofp
->nof_np
);
4446 nfsnode_t np
= nofp
->nof_np
;
4447 vnode_t vp
= NFSTOV(np
);
4449 struct componentname cn
;
4450 const char *vname
= NULL
;
4451 const char *name
= NULL
;
4453 char smallname
[128];
4454 char *filename
= NULL
;
4455 int error
= 0, done
= 0, slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
4456 struct timespec ts
= { 1, 0 };
4458 lck_mtx_lock(&nofp
->nof_lock
);
4459 while (nofp
->nof_flags
& NFS_OPEN_FILE_REOPENING
) {
4460 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0)))
4462 msleep(&nofp
->nof_flags
, &nofp
->nof_lock
, slpflag
|(PZERO
-1), "nfsreopenwait", &ts
);
4465 if (error
|| !(nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
4466 lck_mtx_unlock(&nofp
->nof_lock
);
4469 nofp
->nof_flags
|= NFS_OPEN_FILE_REOPENING
;
4470 lck_mtx_unlock(&nofp
->nof_lock
);
4472 nfs_node_lock_force(np
);
4473 if ((vnode_vtype(vp
) != VDIR
) && np
->n_sillyrename
) {
4475 * The node's been sillyrenamed, so we need to use
4476 * the sillyrename directory/name to do the open.
4478 struct nfs_sillyrename
*nsp
= np
->n_sillyrename
;
4479 dvp
= NFSTOV(nsp
->nsr_dnp
);
4480 if ((error
= vnode_get(dvp
))) {
4481 nfs_node_unlock(np
);
4484 name
= nsp
->nsr_name
;
4487 * [sigh] We can't trust VFS to get the parent right for named
4488 * attribute nodes. (It likes to reparent the nodes after we've
4489 * created them.) Luckily we can probably get the right parent
4490 * from the n_parent we have stashed away.
4492 if ((np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) &&
4493 (((dvp
= np
->n_parent
)) && (error
= vnode_get(dvp
))))
4496 dvp
= vnode_getparent(vp
);
4497 vname
= vnode_getname(vp
);
4498 if (!dvp
|| !vname
) {
4501 nfs_node_unlock(np
);
4506 filename
= &smallname
[0];
4507 namelen
= snprintf(filename
, sizeof(smallname
), "%s", name
);
4508 if (namelen
>= sizeof(smallname
)) {
4509 MALLOC(filename
, char *, namelen
+1, M_TEMP
, M_WAITOK
);
4514 snprintf(filename
, namelen
+1, "%s", name
);
4516 nfs_node_unlock(np
);
4517 bzero(&cn
, sizeof(cn
));
4518 cn
.cn_nameptr
= filename
;
4519 cn
.cn_namelen
= namelen
;
4523 if ((error
= nfs_mount_state_in_use_start(nmp
, thd
)))
4527 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
);
4528 if (!error
&& nofp
->nof_w
)
4529 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_NONE
);
4530 if (!error
&& nofp
->nof_r
)
4531 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
);
4533 if (nfs_mount_state_in_use_end(nmp
, error
)) {
4534 if (error
== NFSERR_GRACE
)
4536 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error
,
4537 (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) ? 1 : 0, name
? name
: "???");
4543 if (error
&& (error
!= EINTR
) && (error
!= ERESTART
))
4544 nfs_revoke_open_state_for_node(np
);
4545 lck_mtx_lock(&nofp
->nof_lock
);
4546 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPENING
;
4548 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPEN
;
4550 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error
,
4551 (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) ? 1 : 0, name
? name
: "???");
4552 lck_mtx_unlock(&nofp
->nof_lock
);
4553 if (filename
&& (filename
!= &smallname
[0]))
4554 FREE(filename
, M_TEMP
);
4556 vnode_putname(vname
);
4563 * Send a normal OPEN RPC to open/create a file.
4567 struct nfs_open_file
*nofp
,
4569 struct componentname
*cnp
,
4570 struct vnode_attr
*vap
,
4577 return (nfs4_open_rpc_internal(nofp
, ctx
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
4578 cnp
, vap
, dvp
, vpp
, create
, share_access
, share_deny
));
4582 * Send an OPEN RPC to reopen a file.
4585 nfs4_open_reopen_rpc(
4586 struct nfs_open_file
*nofp
,
4589 struct componentname
*cnp
,
4595 return (nfs4_open_rpc_internal(nofp
, NULL
, thd
, cred
, cnp
, NULL
, dvp
, vpp
, NFS_OPEN_NOCREATE
, share_access
, share_deny
));
4599 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
4602 nfs4_open_confirm_rpc(
4603 struct nfsmount
*nmp
,
4607 struct nfs_open_owner
*noop
,
4611 struct nfs_vattr
*nvap
,
4614 struct nfsm_chain nmreq
, nmrep
;
4615 int error
= 0, status
, numops
;
4616 struct nfsreq_secinfo_args si
;
4618 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
4619 nfsm_chain_null(&nmreq
);
4620 nfsm_chain_null(&nmrep
);
4622 // PUTFH, OPEN_CONFIRM, GETATTR
4624 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
4625 nfsm_chain_add_compound_header(error
, &nmreq
, "open_confirm", nmp
->nm_minor_vers
, numops
);
4627 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
4628 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, fhp
, fhlen
);
4630 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN_CONFIRM
);
4631 nfsm_chain_add_stateid(error
, &nmreq
, sid
);
4632 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
4634 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
4635 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
4636 nfsm_chain_build_done(error
, &nmreq
);
4637 nfsm_assert(error
, (numops
== 0), EPROTO
);
4639 error
= nfs_request2(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, R_NOINTR
, &nmrep
, xidp
, &status
);
4641 nfsm_chain_skip_tag(error
, &nmrep
);
4642 nfsm_chain_get_32(error
, &nmrep
, numops
);
4643 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
4645 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN_CONFIRM
);
4646 nfs_owner_seqid_increment(noop
, NULL
, error
);
4647 nfsm_chain_get_stateid(error
, &nmrep
, sid
);
4648 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
4650 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
, NULL
);
4652 nfsm_chain_cleanup(&nmreq
);
4653 nfsm_chain_cleanup(&nmrep
);
4658 * common OPEN RPC code
4660 * If create is set, ctx must be passed in.
4661 * Returns a node on success if no node passed in.
4664 nfs4_open_rpc_internal(
4665 struct nfs_open_file
*nofp
,
4669 struct componentname
*cnp
,
4670 struct vnode_attr
*vap
,
4677 struct nfsmount
*nmp
;
4678 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
4679 struct nfs_vattr nvattr
;
4680 int error
= 0, open_error
= EIO
, lockerror
= ENOENT
, busyerror
= ENOENT
, status
;
4681 int nfsvers
, namedattrs
, numops
, exclusive
= 0, gotuid
, gotgid
;
4682 u_int64_t xid
, savedxid
= 0;
4683 nfsnode_t dnp
= VTONFS(dvp
);
4684 nfsnode_t np
, newnp
= NULL
;
4685 vnode_t newvp
= NULL
;
4686 struct nfsm_chain nmreq
, nmrep
;
4687 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
4688 uint32_t rflags
, delegation
, recall
;
4689 struct nfs_stateid stateid
, dstateid
, *sid
;
4691 struct nfsreq rq
, *req
= &rq
;
4692 struct nfs_dulookup dul
;
4694 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
;
4695 struct kauth_ace ace
;
4696 struct nfsreq_secinfo_args si
;
4702 if (nfs_mount_gone(nmp
))
4704 nfsvers
= nmp
->nm_vers
;
4705 namedattrs
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
);
4706 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
4709 np
= *vpp
? VTONFS(*vpp
) : NULL
;
4710 if (create
&& vap
) {
4711 exclusive
= (vap
->va_vaflags
& VA_EXCLUSIVE
);
4712 nfs_avoid_needless_id_setting_on_create(dnp
, vap
, ctx
);
4713 gotuid
= VATTR_IS_ACTIVE(vap
, va_uid
);
4714 gotgid
= VATTR_IS_ACTIVE(vap
, va_gid
);
4715 if (exclusive
&& (!VATTR_IS_ACTIVE(vap
, va_access_time
) || !VATTR_IS_ACTIVE(vap
, va_modify_time
)))
4716 vap
->va_vaflags
|= VA_UTIMES_NULL
;
4718 exclusive
= gotuid
= gotgid
= 0;
4721 sid
= &nofp
->nof_stateid
;
4723 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
4727 if ((error
= nfs_open_owner_set_busy(noop
, thd
)))
4730 rflags
= delegation
= recall
= 0;
4733 slen
= sizeof(sbuf
);
4734 NVATTR_INIT(&nvattr
);
4735 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, cnp
->cn_nameptr
, cnp
->cn_namelen
);
4737 nfsm_chain_null(&nmreq
);
4738 nfsm_chain_null(&nmrep
);
4740 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
4742 nfsm_chain_build_alloc_init(error
, &nmreq
, 53 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
4743 nfsm_chain_add_compound_header(error
, &nmreq
, create
? "create" : "open", nmp
->nm_minor_vers
, numops
);
4745 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
4746 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
4748 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
4750 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
4751 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
4752 nfsm_chain_add_32(error
, &nmreq
, share_access
);
4753 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
4754 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
);
4755 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
4756 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
));
4757 nfsm_chain_add_32(error
, &nmreq
, create
);
4760 static uint32_t create_verf
; // XXX need a better verifier
4762 nfsm_chain_add_32(error
, &nmreq
, NFS_CREATE_EXCLUSIVE
);
4763 /* insert 64 bit verifier */
4764 nfsm_chain_add_32(error
, &nmreq
, create_verf
);
4765 nfsm_chain_add_32(error
, &nmreq
, create_verf
);
4767 nfsm_chain_add_32(error
, &nmreq
, NFS_CREATE_UNCHECKED
);
4768 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
4771 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_NULL
);
4772 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
4774 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
4775 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
4776 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
4777 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
4779 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
4781 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
4782 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
4783 nfsm_chain_build_done(error
, &nmreq
);
4784 nfsm_assert(error
, (numops
== 0), EPROTO
);
4786 error
= busyerror
= nfs_node_set_busy(dnp
, thd
);
4789 if (create
&& !namedattrs
)
4790 nfs_dulookup_init(&dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
4792 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, R_NOINTR
, NULL
, &req
);
4794 if (create
&& !namedattrs
)
4795 nfs_dulookup_start(&dul
, dnp
, ctx
);
4796 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
4800 if (create
&& !namedattrs
)
4801 nfs_dulookup_finish(&dul
, dnp
, ctx
);
4803 if ((lockerror
= nfs_node_lock(dnp
)))
4805 nfsm_chain_skip_tag(error
, &nmrep
);
4806 nfsm_chain_get_32(error
, &nmrep
, numops
);
4807 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
4808 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
4810 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
4811 nfs_owner_seqid_increment(noop
, NULL
, error
);
4812 nfsm_chain_get_stateid(error
, &nmrep
, sid
);
4813 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
4814 nfsm_chain_get_32(error
, &nmrep
, rflags
);
4815 bmlen
= NFS_ATTR_BITMAP_LEN
;
4816 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
4817 nfsm_chain_get_32(error
, &nmrep
, delegation
);
4819 switch (delegation
) {
4820 case NFS_OPEN_DELEGATE_NONE
:
4822 case NFS_OPEN_DELEGATE_READ
:
4823 case NFS_OPEN_DELEGATE_WRITE
:
4824 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
4825 nfsm_chain_get_32(error
, &nmrep
, recall
);
4826 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) // space (skip) XXX
4827 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
4828 /* if we have any trouble accepting the ACE, just invalidate it */
4829 ace_type
= ace_flags
= ace_mask
= len
= 0;
4830 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
4831 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
4832 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
4833 nfsm_chain_get_32(error
, &nmrep
, len
);
4834 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
4835 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
4836 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
4837 if (!error
&& (len
>= slen
)) {
4838 MALLOC(s
, char*, len
+1, M_TEMP
, M_WAITOK
);
4845 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
4847 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
4850 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
)))
4855 if (s
&& (s
!= sbuf
))
4862 /* At this point if we have no error, the object was created/opened. */
4865 if (create
&& vap
&& !exclusive
)
4866 nfs_vattr_set_supported(bitmap
, vap
);
4867 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
4869 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
4871 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
4872 printf("nfs: open/create didn't return filehandle? %s\n", cnp
->cn_nameptr
);
4876 if (!create
&& np
&& !NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
4877 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
4878 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
4879 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
))
4880 NP(np
, "nfs4_open_rpc: warning: file handle mismatch");
4882 /* directory attributes: if we don't get them, make sure to invalidate */
4883 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
4884 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
4885 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
4887 NATTRINVALIDATE(dnp
);
4890 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
)
4891 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
4893 if (rflags
& NFS_OPEN_RESULT_CONFIRM
) {
4894 nfs_node_unlock(dnp
);
4896 NVATTR_CLEANUP(&nvattr
);
4897 error
= nfs4_open_confirm_rpc(nmp
, dnp
, fh
.fh_data
, fh
.fh_len
, noop
, sid
, thd
, cred
, &nvattr
, &xid
);
4900 if ((lockerror
= nfs_node_lock(dnp
)))
4905 nfsm_chain_cleanup(&nmreq
);
4906 nfsm_chain_cleanup(&nmrep
);
4908 if (!lockerror
&& create
) {
4909 if (!open_error
&& (dnp
->n_flag
& NNEGNCENTRIES
)) {
4910 dnp
->n_flag
&= ~NNEGNCENTRIES
;
4911 cache_purge_negatives(dvp
);
4913 dnp
->n_flag
|= NMODIFIED
;
4914 nfs_node_unlock(dnp
);
4916 nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
);
4919 nfs_node_unlock(dnp
);
4920 if (!error
&& !np
&& fh
.fh_len
) {
4921 /* create the vnode with the filehandle and attributes */
4923 error
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, NG_MAKEENTRY
, &newnp
);
4925 newvp
= NFSTOV(newnp
);
4927 NVATTR_CLEANUP(&nvattr
);
4929 nfs_node_clear_busy(dnp
);
4930 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
4933 if (!error
&& np
&& !recall
) {
4934 /* stuff the delegation state in the node */
4935 lck_mtx_lock(&np
->n_openlock
);
4936 np
->n_openflags
&= ~N_DELEG_MASK
;
4937 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
4938 np
->n_dstateid
= dstateid
;
4940 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
4941 lck_mtx_lock(&nmp
->nm_lock
);
4942 if (np
->n_dlink
.tqe_next
== NFSNOLIST
)
4943 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
4944 lck_mtx_unlock(&nmp
->nm_lock
);
4946 lck_mtx_unlock(&np
->n_openlock
);
4948 /* give the delegation back */
4950 if (NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
4951 /* update delegation state and return it */
4952 lck_mtx_lock(&np
->n_openlock
);
4953 np
->n_openflags
&= ~N_DELEG_MASK
;
4954 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
4955 np
->n_dstateid
= dstateid
;
4957 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
4958 lck_mtx_lock(&nmp
->nm_lock
);
4959 if (np
->n_dlink
.tqe_next
== NFSNOLIST
)
4960 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
4961 lck_mtx_unlock(&nmp
->nm_lock
);
4963 lck_mtx_unlock(&np
->n_openlock
);
4964 /* don't need to send a separate delegreturn for fh */
4967 /* return np's current delegation */
4968 nfs4_delegation_return(np
, 0, thd
, cred
);
4970 if (fh
.fh_len
) /* return fh's delegation if it wasn't for np */
4971 nfs4_delegreturn_rpc(nmp
, fh
.fh_data
, fh
.fh_len
, &dstateid
, 0, thd
, cred
);
4975 if (exclusive
&& (error
== NFSERR_NOTSUPP
)) {
4980 nfs_node_unlock(newnp
);
4983 } else if (create
) {
4984 nfs_node_unlock(newnp
);
4986 error
= nfs4_setattr_rpc(newnp
, vap
, ctx
);
4987 if (error
&& (gotuid
|| gotgid
)) {
4988 /* it's possible the server didn't like our attempt to set IDs. */
4989 /* so, let's try it again without those */
4990 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
4991 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
4992 error
= nfs4_setattr_rpc(newnp
, vap
, ctx
);
5000 nfs_open_owner_clear_busy(noop
);
5006 * Send an OPEN RPC to claim a delegated open for a file
5009 nfs4_claim_delegated_open_rpc(
5010 struct nfs_open_file
*nofp
,
5015 struct nfsmount
*nmp
;
5016 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5017 struct nfs_vattr nvattr
;
5018 int error
= 0, lockerror
= ENOENT
, status
;
5019 int nfsvers
, numops
;
5021 nfsnode_t np
= nofp
->nof_np
;
5022 struct nfsm_chain nmreq
, nmrep
;
5023 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
5024 uint32_t rflags
= 0, delegation
, recall
= 0;
5026 struct nfs_stateid dstateid
;
5027 char sbuf
[64], *s
= sbuf
;
5028 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
= sizeof(sbuf
);
5029 struct kauth_ace ace
;
5031 const char *vname
= NULL
;
5032 const char *name
= NULL
;
5034 char smallname
[128];
5035 char *filename
= NULL
;
5036 struct nfsreq_secinfo_args si
;
5039 if (nfs_mount_gone(nmp
))
5041 nfsvers
= nmp
->nm_vers
;
5043 nfs_node_lock_force(np
);
5044 if ((vnode_vtype(NFSTOV(np
)) != VDIR
) && np
->n_sillyrename
) {
5046 * The node's been sillyrenamed, so we need to use
5047 * the sillyrename directory/name to do the open.
5049 struct nfs_sillyrename
*nsp
= np
->n_sillyrename
;
5050 dvp
= NFSTOV(nsp
->nsr_dnp
);
5051 if ((error
= vnode_get(dvp
))) {
5052 nfs_node_unlock(np
);
5055 name
= nsp
->nsr_name
;
5058 * [sigh] We can't trust VFS to get the parent right for named
5059 * attribute nodes. (It likes to reparent the nodes after we've
5060 * created them.) Luckily we can probably get the right parent
5061 * from the n_parent we have stashed away.
5063 if ((np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) &&
5064 (((dvp
= np
->n_parent
)) && (error
= vnode_get(dvp
))))
5067 dvp
= vnode_getparent(NFSTOV(np
));
5068 vname
= vnode_getname(NFSTOV(np
));
5069 if (!dvp
|| !vname
) {
5072 nfs_node_unlock(np
);
5077 filename
= &smallname
[0];
5078 namelen
= snprintf(filename
, sizeof(smallname
), "%s", name
);
5079 if (namelen
>= sizeof(smallname
)) {
5080 MALLOC(filename
, char *, namelen
+1, M_TEMP
, M_WAITOK
);
5083 nfs_node_unlock(np
);
5086 snprintf(filename
, namelen
+1, "%s", name
);
5088 nfs_node_unlock(np
);
5090 if ((error
= nfs_open_owner_set_busy(noop
, NULL
)))
5092 NVATTR_INIT(&nvattr
);
5093 delegation
= NFS_OPEN_DELEGATE_NONE
;
5094 dstateid
= np
->n_dstateid
;
5095 NFSREQ_SECINFO_SET(&si
, VTONFS(dvp
), NULL
, 0, filename
, namelen
);
5097 nfsm_chain_null(&nmreq
);
5098 nfsm_chain_null(&nmrep
);
5100 // PUTFH, OPEN, GETATTR(FH)
5102 nfsm_chain_build_alloc_init(error
, &nmreq
, 48 * NFSX_UNSIGNED
);
5103 nfsm_chain_add_compound_header(error
, &nmreq
, "open_claim_d", nmp
->nm_minor_vers
, numops
);
5105 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5106 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, VTONFS(dvp
)->n_fhp
, VTONFS(dvp
)->n_fhsize
);
5108 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
5109 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5110 nfsm_chain_add_32(error
, &nmreq
, share_access
);
5111 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
5112 // open owner: clientid + uid
5113 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
); // open_owner4.clientid
5114 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
5115 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
)); // open_owner4.owner
5117 nfsm_chain_add_32(error
, &nmreq
, NFS_OPEN_NOCREATE
);
5119 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_DELEGATE_CUR
);
5120 nfsm_chain_add_stateid(error
, &nmreq
, &np
->n_dstateid
);
5121 nfsm_chain_add_name(error
, &nmreq
, filename
, namelen
, nmp
);
5123 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5124 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
5125 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
5126 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
5127 nfsm_chain_build_done(error
, &nmreq
);
5128 nfsm_assert(error
, (numops
== 0), EPROTO
);
5131 error
= nfs_request2(np
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, current_thread(),
5132 noop
->noo_cred
, &si
, flags
|R_NOINTR
, &nmrep
, &xid
, &status
);
5134 if ((lockerror
= nfs_node_lock(np
)))
5136 nfsm_chain_skip_tag(error
, &nmrep
);
5137 nfsm_chain_get_32(error
, &nmrep
, numops
);
5138 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5140 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
5141 nfs_owner_seqid_increment(noop
, NULL
, error
);
5142 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
5143 nfsm_chain_check_change_info(error
, &nmrep
, np
);
5144 nfsm_chain_get_32(error
, &nmrep
, rflags
);
5145 bmlen
= NFS_ATTR_BITMAP_LEN
;
5146 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
5147 nfsm_chain_get_32(error
, &nmrep
, delegation
);
5149 switch (delegation
) {
5150 case NFS_OPEN_DELEGATE_NONE
:
5151 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
5152 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
5154 case NFS_OPEN_DELEGATE_READ
:
5155 case NFS_OPEN_DELEGATE_WRITE
:
5156 if ((((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_READ
) &&
5157 (delegation
== NFS_OPEN_DELEGATE_WRITE
)) ||
5158 (((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_WRITE
) &&
5159 (delegation
== NFS_OPEN_DELEGATE_READ
)))
5160 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
5161 ((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_WRITE
) ? "W" : "R",
5162 (delegation
== NFS_OPEN_DELEGATE_WRITE
) ? "W" : "R", filename
? filename
: "???");
5163 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
5164 nfsm_chain_get_32(error
, &nmrep
, recall
);
5165 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) // space (skip) XXX
5166 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
5167 /* if we have any trouble accepting the ACE, just invalidate it */
5168 ace_type
= ace_flags
= ace_mask
= len
= 0;
5169 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
5170 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
5171 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
5172 nfsm_chain_get_32(error
, &nmrep
, len
);
5173 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
5174 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
5175 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
5176 if (!error
&& (len
>= slen
)) {
5177 MALLOC(s
, char*, len
+1, M_TEMP
, M_WAITOK
);
5184 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
5186 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
5189 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
)))
5194 if (s
&& (s
!= sbuf
))
5197 /* stuff the latest delegation state in the node */
5198 lck_mtx_lock(&np
->n_openlock
);
5199 np
->n_openflags
&= ~N_DELEG_MASK
;
5200 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5201 np
->n_dstateid
= dstateid
;
5203 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5204 lck_mtx_lock(&nmp
->nm_lock
);
5205 if (np
->n_dlink
.tqe_next
== NFSNOLIST
)
5206 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5207 lck_mtx_unlock(&nmp
->nm_lock
);
5209 lck_mtx_unlock(&np
->n_openlock
);
5217 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5218 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
5220 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
5221 printf("nfs: open reclaim didn't return filehandle? %s\n", filename
? filename
: "???");
5225 if (!NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
5226 // XXX what if fh doesn't match the vnode we think we're re-opening?
5227 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5228 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
))
5229 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename
? filename
: "???");
5231 error
= nfs_loadattrcache(np
, &nvattr
, &xid
, 1);
5233 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
)
5234 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
5236 NVATTR_CLEANUP(&nvattr
);
5237 nfsm_chain_cleanup(&nmreq
);
5238 nfsm_chain_cleanup(&nmrep
);
5240 nfs_node_unlock(np
);
5241 nfs_open_owner_clear_busy(noop
);
5242 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
5245 * We're making a delegated claim.
5246 * Don't return the delegation here in case we have more to claim.
5247 * Just make sure it's queued up to be returned.
5249 nfs4_delegation_return_enqueue(np
);
5254 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5255 if (filename
&& (filename
!= &smallname
[0]))
5256 FREE(filename
, M_TEMP
);
5258 vnode_putname(vname
);
5265 * Send an OPEN RPC to reclaim an open file.
5268 nfs4_open_reclaim_rpc(
5269 struct nfs_open_file
*nofp
,
5273 struct nfsmount
*nmp
;
5274 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5275 struct nfs_vattr nvattr
;
5276 int error
= 0, lockerror
= ENOENT
, status
;
5277 int nfsvers
, numops
;
5279 nfsnode_t np
= nofp
->nof_np
;
5280 struct nfsm_chain nmreq
, nmrep
;
5281 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
5282 uint32_t rflags
= 0, delegation
, recall
= 0;
5284 struct nfs_stateid dstateid
;
5285 char sbuf
[64], *s
= sbuf
;
5286 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
= sizeof(sbuf
);
5287 struct kauth_ace ace
;
5288 struct nfsreq_secinfo_args si
;
5291 if (nfs_mount_gone(nmp
))
5293 nfsvers
= nmp
->nm_vers
;
5295 if ((error
= nfs_open_owner_set_busy(noop
, NULL
)))
5298 NVATTR_INIT(&nvattr
);
5299 delegation
= NFS_OPEN_DELEGATE_NONE
;
5300 dstateid
= np
->n_dstateid
;
5301 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
5303 nfsm_chain_null(&nmreq
);
5304 nfsm_chain_null(&nmrep
);
5306 // PUTFH, OPEN, GETATTR(FH)
5308 nfsm_chain_build_alloc_init(error
, &nmreq
, 48 * NFSX_UNSIGNED
);
5309 nfsm_chain_add_compound_header(error
, &nmreq
, "open_reclaim", nmp
->nm_minor_vers
, numops
);
5311 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5312 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
5314 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
5315 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5316 nfsm_chain_add_32(error
, &nmreq
, share_access
);
5317 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
5318 // open owner: clientid + uid
5319 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
); // open_owner4.clientid
5320 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
5321 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
)); // open_owner4.owner
5323 nfsm_chain_add_32(error
, &nmreq
, NFS_OPEN_NOCREATE
);
5325 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_PREVIOUS
);
5326 delegation
= (np
->n_openflags
& N_DELEG_READ
) ? NFS_OPEN_DELEGATE_READ
:
5327 (np
->n_openflags
& N_DELEG_WRITE
) ? NFS_OPEN_DELEGATE_WRITE
:
5328 NFS_OPEN_DELEGATE_NONE
;
5329 nfsm_chain_add_32(error
, &nmreq
, delegation
);
5330 delegation
= NFS_OPEN_DELEGATE_NONE
;
5332 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5333 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
5334 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
5335 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
5336 nfsm_chain_build_done(error
, &nmreq
);
5337 nfsm_assert(error
, (numops
== 0), EPROTO
);
5340 error
= nfs_request2(np
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, current_thread(),
5341 noop
->noo_cred
, &si
, R_RECOVER
|R_NOINTR
, &nmrep
, &xid
, &status
);
5343 if ((lockerror
= nfs_node_lock(np
)))
5345 nfsm_chain_skip_tag(error
, &nmrep
);
5346 nfsm_chain_get_32(error
, &nmrep
, numops
);
5347 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5349 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
5350 nfs_owner_seqid_increment(noop
, NULL
, error
);
5351 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
5352 nfsm_chain_check_change_info(error
, &nmrep
, np
);
5353 nfsm_chain_get_32(error
, &nmrep
, rflags
);
5354 bmlen
= NFS_ATTR_BITMAP_LEN
;
5355 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
5356 nfsm_chain_get_32(error
, &nmrep
, delegation
);
5358 switch (delegation
) {
5359 case NFS_OPEN_DELEGATE_NONE
:
5360 if (np
->n_openflags
& N_DELEG_MASK
) {
5362 * Hey! We were supposed to get our delegation back even
5363 * if it was getting immediately recalled. Bad server!
5365 * Just try to return the existing delegation.
5367 // NP(np, "nfs: open reclaim didn't return delegation?");
5368 delegation
= (np
->n_openflags
& N_DELEG_WRITE
) ? NFS_OPEN_DELEGATE_WRITE
: NFS_OPEN_DELEGATE_READ
;
5372 case NFS_OPEN_DELEGATE_READ
:
5373 case NFS_OPEN_DELEGATE_WRITE
:
5374 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
5375 nfsm_chain_get_32(error
, &nmrep
, recall
);
5376 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) // space (skip) XXX
5377 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
5378 /* if we have any trouble accepting the ACE, just invalidate it */
5379 ace_type
= ace_flags
= ace_mask
= len
= 0;
5380 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
5381 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
5382 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
5383 nfsm_chain_get_32(error
, &nmrep
, len
);
5384 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
5385 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
5386 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
5387 if (!error
&& (len
>= slen
)) {
5388 MALLOC(s
, char*, len
+1, M_TEMP
, M_WAITOK
);
5395 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
5397 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
5400 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
)))
5405 if (s
&& (s
!= sbuf
))
5408 /* stuff the delegation state in the node */
5409 lck_mtx_lock(&np
->n_openlock
);
5410 np
->n_openflags
&= ~N_DELEG_MASK
;
5411 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5412 np
->n_dstateid
= dstateid
;
5414 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5415 lck_mtx_lock(&nmp
->nm_lock
);
5416 if (np
->n_dlink
.tqe_next
== NFSNOLIST
)
5417 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5418 lck_mtx_unlock(&nmp
->nm_lock
);
5420 lck_mtx_unlock(&np
->n_openlock
);
5428 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5429 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
5431 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
5432 NP(np
, "nfs: open reclaim didn't return filehandle?");
5436 if (!NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
5437 // XXX what if fh doesn't match the vnode we think we're re-opening?
5438 // That should be pretty hard in this case, given that we are doing
5439 // the open reclaim using the file handle (and not a dir/name pair).
5440 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5441 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
))
5442 NP(np
, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
5444 error
= nfs_loadattrcache(np
, &nvattr
, &xid
, 1);
5446 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
)
5447 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
5450 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
5451 NVATTR_CLEANUP(&nvattr
);
5452 nfsm_chain_cleanup(&nmreq
);
5453 nfsm_chain_cleanup(&nmrep
);
5455 nfs_node_unlock(np
);
5456 nfs_open_owner_clear_busy(noop
);
5457 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
5459 nfs4_delegation_return_enqueue(np
);
5465 nfs4_open_downgrade_rpc(
5467 struct nfs_open_file
*nofp
,
5470 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5471 struct nfsmount
*nmp
;
5472 int error
, lockerror
= ENOENT
, status
, nfsvers
, numops
;
5473 struct nfsm_chain nmreq
, nmrep
;
5475 struct nfsreq_secinfo_args si
;
5478 if (nfs_mount_gone(nmp
))
5480 nfsvers
= nmp
->nm_vers
;
5482 if ((error
= nfs_open_owner_set_busy(noop
, NULL
)))
5485 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
5486 nfsm_chain_null(&nmreq
);
5487 nfsm_chain_null(&nmrep
);
5489 // PUTFH, OPEN_DOWNGRADE, GETATTR
5491 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
5492 nfsm_chain_add_compound_header(error
, &nmreq
, "open_downgrd", nmp
->nm_minor_vers
, numops
);
5494 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5495 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
5497 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN_DOWNGRADE
);
5498 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
5499 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5500 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_access
);
5501 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_deny
);
5503 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5504 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
5505 nfsm_chain_build_done(error
, &nmreq
);
5506 nfsm_assert(error
, (numops
== 0), EPROTO
);
5508 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
5509 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
5510 &si
, R_NOINTR
, &nmrep
, &xid
, &status
);
5512 if ((lockerror
= nfs_node_lock(np
)))
5514 nfsm_chain_skip_tag(error
, &nmrep
);
5515 nfsm_chain_get_32(error
, &nmrep
, numops
);
5516 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5518 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN_DOWNGRADE
);
5519 nfs_owner_seqid_increment(noop
, NULL
, error
);
5520 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
5521 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5522 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
5525 nfs_node_unlock(np
);
5526 nfs_open_owner_clear_busy(noop
);
5527 nfsm_chain_cleanup(&nmreq
);
5528 nfsm_chain_cleanup(&nmrep
);
5535 struct nfs_open_file
*nofp
,
5540 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5541 struct nfsmount
*nmp
;
5542 int error
, lockerror
= ENOENT
, status
, nfsvers
, numops
;
5543 struct nfsm_chain nmreq
, nmrep
;
5545 struct nfsreq_secinfo_args si
;
5548 if (nfs_mount_gone(nmp
))
5550 nfsvers
= nmp
->nm_vers
;
5552 if ((error
= nfs_open_owner_set_busy(noop
, NULL
)))
5555 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
5556 nfsm_chain_null(&nmreq
);
5557 nfsm_chain_null(&nmrep
);
5559 // PUTFH, CLOSE, GETATTR
5561 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
5562 nfsm_chain_add_compound_header(error
, &nmreq
, "close", nmp
->nm_minor_vers
, numops
);
5564 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5565 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
5567 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_CLOSE
);
5568 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5569 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
5571 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5572 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
5573 nfsm_chain_build_done(error
, &nmreq
);
5574 nfsm_assert(error
, (numops
== 0), EPROTO
);
5576 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
|R_NOINTR
, &nmrep
, &xid
, &status
);
5578 if ((lockerror
= nfs_node_lock(np
)))
5580 nfsm_chain_skip_tag(error
, &nmrep
);
5581 nfsm_chain_get_32(error
, &nmrep
, numops
);
5582 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5584 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_CLOSE
);
5585 nfs_owner_seqid_increment(noop
, NULL
, error
);
5586 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
5587 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5588 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
5591 nfs_node_unlock(np
);
5592 nfs_open_owner_clear_busy(noop
);
5593 nfsm_chain_cleanup(&nmreq
);
5594 nfsm_chain_cleanup(&nmrep
);
5600 * Claim the delegated open combinations this open file holds.
5603 nfs4_claim_delegated_state_for_open_file(struct nfs_open_file
*nofp
, int flags
)
5605 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5606 struct nfs_lock_owner
*nlop
;
5607 struct nfs_file_lock
*nflp
, *nextnflp
;
5608 struct nfsmount
*nmp
;
5609 int error
= 0, reopen
= 0;
5611 if (nofp
->nof_d_rw_drw
) {
5612 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_BOTH
, flags
);
5614 lck_mtx_lock(&nofp
->nof_lock
);
5615 nofp
->nof_rw_drw
+= nofp
->nof_d_rw_drw
;
5616 nofp
->nof_d_rw_drw
= 0;
5617 lck_mtx_unlock(&nofp
->nof_lock
);
5620 if (!error
&& nofp
->nof_d_w_drw
) {
5621 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_BOTH
, flags
);
5623 lck_mtx_lock(&nofp
->nof_lock
);
5624 nofp
->nof_w_drw
+= nofp
->nof_d_w_drw
;
5625 nofp
->nof_d_w_drw
= 0;
5626 lck_mtx_unlock(&nofp
->nof_lock
);
5629 if (!error
&& nofp
->nof_d_r_drw
) {
5630 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_BOTH
, flags
);
5632 lck_mtx_lock(&nofp
->nof_lock
);
5633 nofp
->nof_r_drw
+= nofp
->nof_d_r_drw
;
5634 nofp
->nof_d_r_drw
= 0;
5635 lck_mtx_unlock(&nofp
->nof_lock
);
5638 if (!error
&& nofp
->nof_d_rw_dw
) {
5639 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_WRITE
, flags
);
5641 lck_mtx_lock(&nofp
->nof_lock
);
5642 nofp
->nof_rw_dw
+= nofp
->nof_d_rw_dw
;
5643 nofp
->nof_d_rw_dw
= 0;
5644 lck_mtx_unlock(&nofp
->nof_lock
);
5647 if (!error
&& nofp
->nof_d_w_dw
) {
5648 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_WRITE
, flags
);
5650 lck_mtx_lock(&nofp
->nof_lock
);
5651 nofp
->nof_w_dw
+= nofp
->nof_d_w_dw
;
5652 nofp
->nof_d_w_dw
= 0;
5653 lck_mtx_unlock(&nofp
->nof_lock
);
5656 if (!error
&& nofp
->nof_d_r_dw
) {
5657 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_WRITE
, flags
);
5659 lck_mtx_lock(&nofp
->nof_lock
);
5660 nofp
->nof_r_dw
+= nofp
->nof_d_r_dw
;
5661 nofp
->nof_d_r_dw
= 0;
5662 lck_mtx_unlock(&nofp
->nof_lock
);
5665 /* non-deny-mode opens may be reopened if no locks are held */
5666 if (!error
&& nofp
->nof_d_rw
) {
5667 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
, flags
);
5668 /* for some errors, we should just try reopening the file */
5669 if (nfs_mount_state_error_delegation_lost(error
))
5671 if (!error
|| reopen
) {
5672 lck_mtx_lock(&nofp
->nof_lock
);
5673 nofp
->nof_rw
+= nofp
->nof_d_rw
;
5675 lck_mtx_unlock(&nofp
->nof_lock
);
5678 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
5679 if ((!error
|| reopen
) && nofp
->nof_d_w
) {
5681 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_NONE
, flags
);
5682 /* for some errors, we should just try reopening the file */
5683 if (nfs_mount_state_error_delegation_lost(error
))
5686 if (!error
|| reopen
) {
5687 lck_mtx_lock(&nofp
->nof_lock
);
5688 nofp
->nof_w
+= nofp
->nof_d_w
;
5690 lck_mtx_unlock(&nofp
->nof_lock
);
5693 if ((!error
|| reopen
) && nofp
->nof_d_r
) {
5695 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, flags
);
5696 /* for some errors, we should just try reopening the file */
5697 if (nfs_mount_state_error_delegation_lost(error
))
5700 if (!error
|| reopen
) {
5701 lck_mtx_lock(&nofp
->nof_lock
);
5702 nofp
->nof_r
+= nofp
->nof_d_r
;
5704 lck_mtx_unlock(&nofp
->nof_lock
);
5710 * Any problems with the delegation probably indicates that we
5711 * should review/return all of our current delegation state.
5713 if ((nmp
= NFSTONMP(nofp
->nof_np
))) {
5714 nfs4_delegation_return_enqueue(nofp
->nof_np
);
5715 lck_mtx_lock(&nmp
->nm_lock
);
5716 nfs_need_recover(nmp
, NFSERR_EXPIRED
);
5717 lck_mtx_unlock(&nmp
->nm_lock
);
5719 if (reopen
&& (nfs_check_for_locks(noop
, nofp
) == 0)) {
5720 /* just reopen the file on next access */
5721 NP(nofp
->nof_np
, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
5722 reopen
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
5723 lck_mtx_lock(&nofp
->nof_lock
);
5724 nofp
->nof_flags
|= NFS_OPEN_FILE_REOPEN
;
5725 lck_mtx_unlock(&nofp
->nof_lock
);
5729 NP(nofp
->nof_np
, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
5730 reopen
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
5733 if (!error
&& ((nmp
= NFSTONMP(nofp
->nof_np
)))) {
5734 /* claim delegated locks */
5735 TAILQ_FOREACH(nlop
, &nofp
->nof_np
->n_lock_owners
, nlo_link
) {
5736 if (nlop
->nlo_open_owner
!= noop
)
5738 TAILQ_FOREACH_SAFE(nflp
, &nlop
->nlo_locks
, nfl_lolink
, nextnflp
) {
5739 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
5740 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_DEAD
|NFS_FILE_LOCK_BLOCKED
))
5742 /* skip non-delegated locks */
5743 if (!(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
))
5745 error
= nmp
->nm_funcs
->nf_setlock_rpc(nofp
->nof_np
, nofp
, nflp
, 0, flags
, current_thread(), noop
->noo_cred
);
5747 NP(nofp
->nof_np
, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
5748 nflp
->nfl_start
, nflp
->nfl_end
, error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
5752 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
5753 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5761 if (!error
) /* all state claimed successfully! */
5764 /* restart if it looks like a problem more than just losing the delegation */
5765 if (!nfs_mount_state_error_delegation_lost(error
) &&
5766 ((error
== ETIMEDOUT
) || nfs_mount_state_error_should_restart(error
))) {
5767 NP(nofp
->nof_np
, "nfs delegated lock claim error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
5768 if ((error
== ETIMEDOUT
) && ((nmp
= NFSTONMP(nofp
->nof_np
))))
5769 nfs_need_reconnect(nmp
);
5773 /* delegated state lost (once held but now not claimable) */
5774 NP(nofp
->nof_np
, "nfs delegated state claim error %d, state lost, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
5777 * Any problems with the delegation probably indicates that we
5778 * should review/return all of our current delegation state.
5780 if ((nmp
= NFSTONMP(nofp
->nof_np
))) {
5781 nfs4_delegation_return_enqueue(nofp
->nof_np
);
5782 lck_mtx_lock(&nmp
->nm_lock
);
5783 nfs_need_recover(nmp
, NFSERR_EXPIRED
);
5784 lck_mtx_unlock(&nmp
->nm_lock
);
5787 /* revoke all open file state */
5788 nfs_revoke_open_state_for_node(nofp
->nof_np
);
5794 * Release all open state for the given node.
5797 nfs_release_open_state_for_node(nfsnode_t np
, int force
)
5799 struct nfsmount
*nmp
= NFSTONMP(np
);
5800 struct nfs_open_file
*nofp
;
5801 struct nfs_file_lock
*nflp
, *nextnflp
;
5803 /* drop held locks */
5804 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
5805 /* skip dead & blocked lock requests */
5806 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_DEAD
|NFS_FILE_LOCK_BLOCKED
))
5808 /* send an unlock if not a delegated lock */
5809 if (!force
&& nmp
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
))
5810 nmp
->nm_funcs
->nf_unlock_rpc(np
, nflp
->nfl_owner
, F_WRLCK
, nflp
->nfl_start
, nflp
->nfl_end
, R_RECOVER
,
5811 NULL
, nflp
->nfl_owner
->nlo_open_owner
->noo_cred
);
5812 /* kill/remove the lock */
5813 lck_mtx_lock(&np
->n_openlock
);
5814 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
5815 lck_mtx_lock(&nflp
->nfl_owner
->nlo_lock
);
5816 TAILQ_REMOVE(&nflp
->nfl_owner
->nlo_locks
, nflp
, nfl_lolink
);
5817 lck_mtx_unlock(&nflp
->nfl_owner
->nlo_lock
);
5818 if (nflp
->nfl_blockcnt
) {
5819 /* wake up anyone blocked on this lock */
5822 /* remove nflp from lock list and destroy */
5823 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
5824 nfs_file_lock_destroy(nflp
);
5826 lck_mtx_unlock(&np
->n_openlock
);
5829 lck_mtx_lock(&np
->n_openlock
);
5831 /* drop all opens */
5832 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
5833 if (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)
5835 /* mark open state as lost */
5836 lck_mtx_lock(&nofp
->nof_lock
);
5837 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPEN
;
5838 nofp
->nof_flags
|= NFS_OPEN_FILE_LOST
;
5840 lck_mtx_unlock(&nofp
->nof_lock
);
5841 if (!force
&& nmp
&& (nmp
->nm_vers
>= NFS_VER4
))
5842 nfs4_close_rpc(np
, nofp
, NULL
, nofp
->nof_owner
->noo_cred
, R_RECOVER
);
5845 lck_mtx_unlock(&np
->n_openlock
);
5849 * State for a node has been lost, drop it, and revoke the node.
5850 * Attempt to return any state if possible in case the server
5851 * might somehow think we hold it.
5854 nfs_revoke_open_state_for_node(nfsnode_t np
)
5856 struct nfsmount
*nmp
;
5858 /* mark node as needing to be revoked */
5859 nfs_node_lock_force(np
);
5860 if (np
->n_flag
& NREVOKE
) /* already revoked? */
5862 NP(np
, "nfs_revoke_open_state_for_node(): already revoked");
5863 nfs_node_unlock(np
);
5866 np
->n_flag
|= NREVOKE
;
5867 nfs_node_unlock(np
);
5869 nfs_release_open_state_for_node(np
, 0);
5870 NP(np
, "nfs: state lost for %p 0x%x", np
, np
->n_flag
);
5872 /* mark mount as needing a revoke scan and have the socket thread do it. */
5873 if ((nmp
= NFSTONMP(np
))) {
5874 lck_mtx_lock(&nmp
->nm_lock
);
5875 nmp
->nm_state
|= NFSSTA_REVOKE
;
5876 nfs_mount_sock_thread_wake(nmp
);
5877 lck_mtx_unlock(&nmp
->nm_lock
);
5882 * Claim the delegated open combinations that each of this node's open files hold.
5885 nfs4_claim_delegated_state_for_node(nfsnode_t np
, int flags
)
5887 struct nfs_open_file
*nofp
;
5890 lck_mtx_lock(&np
->n_openlock
);
5892 /* walk the open file list looking for opens with delegated state to claim */
5894 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
5895 if (!nofp
->nof_d_rw_drw
&& !nofp
->nof_d_w_drw
&& !nofp
->nof_d_r_drw
&&
5896 !nofp
->nof_d_rw_dw
&& !nofp
->nof_d_w_dw
&& !nofp
->nof_d_r_dw
&&
5897 !nofp
->nof_d_rw
&& !nofp
->nof_d_w
&& !nofp
->nof_d_r
)
5899 lck_mtx_unlock(&np
->n_openlock
);
5900 error
= nfs4_claim_delegated_state_for_open_file(nofp
, flags
);
5901 lck_mtx_lock(&np
->n_openlock
);
5907 lck_mtx_unlock(&np
->n_openlock
);
5913 * Mark a node as needed to have its delegation returned.
5914 * Queue it up on the delegation return queue.
5915 * Make sure the thread is running.
5918 nfs4_delegation_return_enqueue(nfsnode_t np
)
5920 struct nfsmount
*nmp
;
5923 if (nfs_mount_gone(nmp
))
5926 lck_mtx_lock(&np
->n_openlock
);
5927 np
->n_openflags
|= N_DELEG_RETURN
;
5928 lck_mtx_unlock(&np
->n_openlock
);
5930 lck_mtx_lock(&nmp
->nm_lock
);
5931 if (np
->n_dreturn
.tqe_next
== NFSNOLIST
)
5932 TAILQ_INSERT_TAIL(&nmp
->nm_dreturnq
, np
, n_dreturn
);
5933 nfs_mount_sock_thread_wake(nmp
);
5934 lck_mtx_unlock(&nmp
->nm_lock
);
5938 * return any delegation we may have for the given node
5941 nfs4_delegation_return(nfsnode_t np
, int flags
, thread_t thd
, kauth_cred_t cred
)
5943 struct nfsmount
*nmp
;
5945 nfs_stateid dstateid
;
5949 if (nfs_mount_gone(nmp
))
5952 /* first, make sure the node's marked for delegation return */
5953 lck_mtx_lock(&np
->n_openlock
);
5954 np
->n_openflags
|= (N_DELEG_RETURN
|N_DELEG_RETURNING
);
5955 lck_mtx_unlock(&np
->n_openlock
);
5957 /* make sure nobody else is using the delegation state */
5958 if ((error
= nfs_open_state_set_busy(np
, NULL
)))
5961 /* claim any delegated state */
5962 if ((error
= nfs4_claim_delegated_state_for_node(np
, flags
)))
5965 /* return the delegation */
5966 lck_mtx_lock(&np
->n_openlock
);
5967 dstateid
= np
->n_dstateid
;
5968 fh
.fh_len
= np
->n_fhsize
;
5969 bcopy(np
->n_fhp
, &fh
.fh_data
, fh
.fh_len
);
5970 lck_mtx_unlock(&np
->n_openlock
);
5971 error
= nfs4_delegreturn_rpc(NFSTONMP(np
), fh
.fh_data
, fh
.fh_len
, &dstateid
, flags
, thd
, cred
);
5972 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
5973 if ((error
!= ETIMEDOUT
) && (error
!= NFSERR_MOVED
) && (error
!= NFSERR_LEASE_MOVED
)) {
5974 lck_mtx_lock(&np
->n_openlock
);
5975 np
->n_openflags
&= ~N_DELEG_MASK
;
5976 lck_mtx_lock(&nmp
->nm_lock
);
5977 if (np
->n_dlink
.tqe_next
!= NFSNOLIST
) {
5978 TAILQ_REMOVE(&nmp
->nm_delegations
, np
, n_dlink
);
5979 np
->n_dlink
.tqe_next
= NFSNOLIST
;
5981 lck_mtx_unlock(&nmp
->nm_lock
);
5982 lck_mtx_unlock(&np
->n_openlock
);
5986 /* make sure it's no longer on the return queue and clear the return flags */
5987 lck_mtx_lock(&nmp
->nm_lock
);
5988 if (np
->n_dreturn
.tqe_next
!= NFSNOLIST
) {
5989 TAILQ_REMOVE(&nmp
->nm_dreturnq
, np
, n_dreturn
);
5990 np
->n_dreturn
.tqe_next
= NFSNOLIST
;
5992 lck_mtx_unlock(&nmp
->nm_lock
);
5993 lck_mtx_lock(&np
->n_openlock
);
5994 np
->n_openflags
&= ~(N_DELEG_RETURN
|N_DELEG_RETURNING
);
5995 lck_mtx_unlock(&np
->n_openlock
);
5998 NP(np
, "nfs4_delegation_return, error %d", error
);
5999 if (error
== ETIMEDOUT
)
6000 nfs_need_reconnect(nmp
);
6001 if (nfs_mount_state_error_should_restart(error
)) {
6002 /* make sure recovery happens */
6003 lck_mtx_lock(&nmp
->nm_lock
);
6004 nfs_need_recover(nmp
, nfs_mount_state_error_delegation_lost(error
) ? NFSERR_EXPIRED
: 0);
6005 lck_mtx_unlock(&nmp
->nm_lock
);
6009 nfs_open_state_clear_busy(np
);
6015 * RPC to return a delegation for a file handle
6018 nfs4_delegreturn_rpc(struct nfsmount
*nmp
, u_char
*fhp
, int fhlen
, struct nfs_stateid
*sid
, int flags
, thread_t thd
, kauth_cred_t cred
)
6020 int error
= 0, status
, numops
;
6022 struct nfsm_chain nmreq
, nmrep
;
6023 struct nfsreq_secinfo_args si
;
6025 NFSREQ_SECINFO_SET(&si
, NULL
, fhp
, fhlen
, NULL
, 0);
6026 nfsm_chain_null(&nmreq
);
6027 nfsm_chain_null(&nmrep
);
6029 // PUTFH, DELEGRETURN
6031 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
6032 nfsm_chain_add_compound_header(error
, &nmreq
, "delegreturn", nmp
->nm_minor_vers
, numops
);
6034 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6035 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, fhp
, fhlen
);
6037 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_DELEGRETURN
);
6038 nfsm_chain_add_stateid(error
, &nmreq
, sid
);
6039 nfsm_chain_build_done(error
, &nmreq
);
6040 nfsm_assert(error
, (numops
== 0), EPROTO
);
6042 error
= nfs_request2(NULL
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
, &nmrep
, &xid
, &status
);
6043 nfsm_chain_skip_tag(error
, &nmrep
);
6044 nfsm_chain_get_32(error
, &nmrep
, numops
);
6045 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6046 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_DELEGRETURN
);
6048 nfsm_chain_cleanup(&nmreq
);
6049 nfsm_chain_cleanup(&nmrep
);
6056 * Just call nfs_bioread() to do the work.
6058 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6059 * without first calling VNOP_OPEN, so we make sure the file is open here.
6063 struct vnop_read_args
/* {
6064 struct vnodeop_desc *a_desc;
6068 vfs_context_t a_context;
6071 vnode_t vp
= ap
->a_vp
;
6072 vfs_context_t ctx
= ap
->a_context
;
6074 struct nfsmount
*nmp
;
6075 struct nfs_open_owner
*noop
;
6076 struct nfs_open_file
*nofp
;
6079 if (vnode_vtype(ap
->a_vp
) != VREG
)
6080 return (vnode_vtype(vp
) == VDIR
) ? EISDIR
: EPERM
;
6084 if (nfs_mount_gone(nmp
))
6086 if (np
->n_flag
& NREVOKE
)
6089 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
6093 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 1);
6094 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
6095 NP(np
, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop
->noo_cred
));
6098 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
6099 error
= nfs4_reopen(nofp
, vfs_context_thread(ctx
));
6105 nfs_open_owner_rele(noop
);
6109 * Since the read path is a hot path, if we already have
6110 * read access, lets go and try and do the read, without
6111 * busying the mount and open file node for this open owner.
6113 * N.B. This is inherently racy w.r.t. an execve using
6114 * an already open file, in that the read at the end of
6115 * this routine will be racing with a potential close.
6116 * The code below ultimately has the same problem. In practice
6117 * this does not seem to be an issue.
6119 if (nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
) {
6120 nfs_open_owner_rele(noop
);
6123 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
6125 nfs_open_owner_rele(noop
);
6129 * If we don't have a file already open with the access we need (read) then
6130 * we need to open one. Otherwise we just co-opt an open. We might not already
6131 * have access because we're trying to read the first page of the
6134 error
= nfs_open_file_set_busy(nofp
, vfs_context_thread(ctx
));
6136 nfs_mount_state_in_use_end(nmp
, 0);
6137 nfs_open_owner_rele(noop
);
6140 if (!(nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
)) {
6141 /* we don't have the file open, so open it for read access if we're not denied */
6142 if (nofp
->nof_flags
& NFS_OPEN_FILE_NEEDCLOSE
) {
6143 NP(np
, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
6144 nofp
->nof_access
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
), thread_tid(vfs_context_thread(ctx
)));
6146 if (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_READ
) {
6147 nfs_open_file_clear_busy(nofp
);
6148 nfs_mount_state_in_use_end(nmp
, 0);
6149 nfs_open_owner_rele(noop
);
6152 if (np
->n_flag
& NREVOKE
) {
6154 nfs_open_file_clear_busy(nofp
);
6155 nfs_mount_state_in_use_end(nmp
, 0);
6156 nfs_open_owner_rele(noop
);
6159 if (nmp
->nm_vers
< NFS_VER4
) {
6160 /* NFS v2/v3 opens are always allowed - so just add it. */
6161 nfs_open_file_add_open(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, 0);
6163 error
= nfs4_open(np
, nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
6166 nofp
->nof_flags
|= NFS_OPEN_FILE_NEEDCLOSE
;
6169 nfs_open_file_clear_busy(nofp
);
6170 if (nfs_mount_state_in_use_end(nmp
, error
)) {
6174 nfs_open_owner_rele(noop
);
6178 return (nfs_bioread(VTONFS(ap
->a_vp
), ap
->a_uio
, ap
->a_ioflag
, ap
->a_context
));
6182 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6183 * Files are created using the NFSv4 OPEN RPC. So we must open the
6184 * file to create it and then close it.
6188 struct vnop_create_args
/* {
6189 struct vnodeop_desc *a_desc;
6192 struct componentname *a_cnp;
6193 struct vnode_attr *a_vap;
6194 vfs_context_t a_context;
6197 vfs_context_t ctx
= ap
->a_context
;
6198 struct componentname
*cnp
= ap
->a_cnp
;
6199 struct vnode_attr
*vap
= ap
->a_vap
;
6200 vnode_t dvp
= ap
->a_dvp
;
6201 vnode_t
*vpp
= ap
->a_vpp
;
6202 struct nfsmount
*nmp
;
6204 int error
= 0, busyerror
= 0, accessMode
, denyMode
;
6205 struct nfs_open_owner
*noop
= NULL
;
6206 struct nfs_open_file
*newnofp
= NULL
, *nofp
= NULL
;
6209 if (nfs_mount_gone(nmp
))
6213 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp
), vap
, ctx
);
6215 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
6220 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
6222 nfs_open_owner_rele(noop
);
6226 /* grab a provisional, nodeless open file */
6227 error
= nfs_open_file_find(NULL
, noop
, &newnofp
, 0, 0, 1);
6228 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
6229 printf("nfs_vnop_create: LOST\n");
6232 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
6233 /* This shouldn't happen given that this is a new, nodeless nofp */
6234 nfs_mount_state_in_use_end(nmp
, 0);
6235 error
= nfs4_reopen(newnofp
, vfs_context_thread(ctx
));
6236 nfs_open_file_destroy(newnofp
);
6242 error
= nfs_open_file_set_busy(newnofp
, vfs_context_thread(ctx
));
6245 nfs_open_file_destroy(newnofp
);
6251 * We're just trying to create the file.
6252 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6254 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
6255 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
6257 /* Do the open/create */
6258 error
= nfs4_open_rpc(newnofp
, ctx
, cnp
, vap
, dvp
, vpp
, NFS_OPEN_CREATE
, accessMode
, denyMode
);
6259 if ((error
== EACCES
) && vap
&& !(vap
->va_vaflags
& VA_EXCLUSIVE
) &&
6260 VATTR_IS_ACTIVE(vap
, va_mode
) && !(vap
->va_mode
& S_IWUSR
)) {
6262 * Hmm... it looks like we may have a situation where the request was
6263 * retransmitted because we didn't get the first response which successfully
6264 * created/opened the file and then the second time we were denied the open
6265 * because the mode the file was created with doesn't allow write access.
6267 * We'll try to work around this by temporarily updating the mode and
6268 * retrying the open.
6270 struct vnode_attr vattr
;
6272 /* first make sure it's there */
6273 int error2
= nfs_lookitup(VTONFS(dvp
), cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, &np
);
6274 if (!error2
&& np
) {
6275 nfs_node_unlock(np
);
6277 if (vnode_vtype(NFSTOV(np
)) == VREG
) {
6279 VATTR_SET(&vattr
, va_mode
, (vap
->va_mode
| S_IWUSR
));
6280 if (!nfs4_setattr_rpc(np
, &vattr
, ctx
)) {
6281 error2
= nfs4_open_rpc(newnofp
, ctx
, cnp
, NULL
, dvp
, vpp
, NFS_OPEN_NOCREATE
, accessMode
, denyMode
);
6283 VATTR_SET(&vattr
, va_mode
, vap
->va_mode
);
6284 nfs4_setattr_rpc(np
, &vattr
, ctx
);
6295 if (!error
&& !*vpp
) {
6296 printf("nfs4_open_rpc returned without a node?\n");
6297 /* Hmmm... with no node, we have no filehandle and can't close it */
6301 /* need to cleanup our temporary nofp */
6302 nfs_open_file_clear_busy(newnofp
);
6303 nfs_open_file_destroy(newnofp
);
6307 /* After we have a node, add our open file struct to the node */
6309 nfs_open_file_add_open(newnofp
, accessMode
, denyMode
, 0);
6311 error
= nfs_open_file_find_internal(np
, noop
, &nofp
, 0, 0, 0);
6313 /* This shouldn't happen, because we passed in a new nofp to use. */
6314 printf("nfs_open_file_find_internal failed! %d\n", error
);
6316 } else if (nofp
!= newnofp
) {
6318 * Hmm... an open file struct already exists.
6319 * Mark the existing one busy and merge our open into it.
6320 * Then destroy the one we created.
6321 * Note: there's no chance of an open confict because the
6322 * open has already been granted.
6324 busyerror
= nfs_open_file_set_busy(nofp
, NULL
);
6325 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0);
6326 nofp
->nof_stateid
= newnofp
->nof_stateid
;
6327 if (newnofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
)
6328 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
6329 nfs_open_file_clear_busy(newnofp
);
6330 nfs_open_file_destroy(newnofp
);
6333 /* mark the node as holding a create-initiated open */
6334 nofp
->nof_flags
|= NFS_OPEN_FILE_CREATE
;
6335 nofp
->nof_creator
= current_thread();
6337 if (nofp
&& !busyerror
)
6338 nfs_open_file_clear_busy(nofp
);
6339 if (nfs_mount_state_in_use_end(nmp
, error
)) {
6340 nofp
= newnofp
= NULL
;
6345 nfs_open_owner_rele(noop
);
6350 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6356 struct componentname
*cnp
,
6357 struct vnode_attr
*vap
,
6362 struct nfsmount
*nmp
;
6363 struct nfs_vattr nvattr
;
6364 int error
= 0, create_error
= EIO
, lockerror
= ENOENT
, busyerror
= ENOENT
, status
;
6365 int nfsvers
, namedattrs
, numops
;
6366 u_int64_t xid
, savedxid
= 0;
6367 nfsnode_t np
= NULL
;
6368 vnode_t newvp
= NULL
;
6369 struct nfsm_chain nmreq
, nmrep
;
6370 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
6374 struct nfsreq rq
, *req
= &rq
;
6375 struct nfs_dulookup dul
;
6376 struct nfsreq_secinfo_args si
;
6378 nmp
= NFSTONMP(dnp
);
6379 if (nfs_mount_gone(nmp
))
6381 nfsvers
= nmp
->nm_vers
;
6382 namedattrs
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
);
6383 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
6386 sd
.specdata1
= sd
.specdata2
= 0;
6395 if (!VATTR_IS_ACTIVE(vap
, va_rdev
))
6397 sd
.specdata1
= major(vap
->va_rdev
);
6398 sd
.specdata2
= minor(vap
->va_rdev
);
6411 nfs_avoid_needless_id_setting_on_create(dnp
, vap
, ctx
);
6413 error
= busyerror
= nfs_node_set_busy(dnp
, vfs_context_thread(ctx
));
6415 nfs_dulookup_init(&dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
6417 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
6418 NVATTR_INIT(&nvattr
);
6419 nfsm_chain_null(&nmreq
);
6420 nfsm_chain_null(&nmrep
);
6422 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
6424 nfsm_chain_build_alloc_init(error
, &nmreq
, 66 * NFSX_UNSIGNED
);
6425 nfsm_chain_add_compound_header(error
, &nmreq
, tag
, nmp
->nm_minor_vers
, numops
);
6427 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6428 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
6430 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
6432 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_CREATE
);
6433 nfsm_chain_add_32(error
, &nmreq
, type
);
6434 if (type
== NFLNK
) {
6435 nfsm_chain_add_name(error
, &nmreq
, link
, strlen(link
), nmp
);
6436 } else if ((type
== NFBLK
) || (type
== NFCHR
)) {
6437 nfsm_chain_add_32(error
, &nmreq
, sd
.specdata1
);
6438 nfsm_chain_add_32(error
, &nmreq
, sd
.specdata2
);
6440 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
6441 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
6443 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6444 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
6445 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
6446 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, NULL
);
6448 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
6450 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6451 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
6452 nfsm_chain_build_done(error
, &nmreq
);
6453 nfsm_assert(error
, (numops
== 0), EPROTO
);
6456 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
6457 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, 0, NULL
, &req
);
6460 nfs_dulookup_start(&dul
, dnp
, ctx
);
6461 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
6464 if ((lockerror
= nfs_node_lock(dnp
)))
6466 nfsm_chain_skip_tag(error
, &nmrep
);
6467 nfsm_chain_get_32(error
, &nmrep
, numops
);
6468 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6469 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
6471 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_CREATE
);
6472 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
6473 bmlen
= NFS_ATTR_BITMAP_LEN
;
6474 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
6475 /* At this point if we have no error, the object was created. */
6476 /* if we don't get attributes, then we should lookitup. */
6477 create_error
= error
;
6479 nfs_vattr_set_supported(bitmap
, vap
);
6480 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6482 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
6484 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
6485 printf("nfs: create/%s didn't return filehandle? %s\n", tag
, cnp
->cn_nameptr
);
6489 /* directory attributes: if we don't get them, make sure to invalidate */
6490 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
6491 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6493 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
6495 NATTRINVALIDATE(dnp
);
6498 nfsm_chain_cleanup(&nmreq
);
6499 nfsm_chain_cleanup(&nmrep
);
6502 if (!create_error
&& (dnp
->n_flag
& NNEGNCENTRIES
)) {
6503 dnp
->n_flag
&= ~NNEGNCENTRIES
;
6504 cache_purge_negatives(NFSTOV(dnp
));
6506 dnp
->n_flag
|= NMODIFIED
;
6507 nfs_node_unlock(dnp
);
6508 /* nfs_getattr() will check changed and purge caches */
6509 nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
);
6512 if (!error
&& fh
.fh_len
) {
6513 /* create the vnode with the filehandle and attributes */
6515 error
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, NG_MAKEENTRY
, &np
);
6519 NVATTR_CLEANUP(&nvattr
);
6522 nfs_dulookup_finish(&dul
, dnp
, ctx
);
6525 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
6526 * if we can succeed in looking up the object.
6528 if ((create_error
== EEXIST
) || (!create_error
&& !newvp
)) {
6529 error
= nfs_lookitup(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, &np
);
6532 if (vnode_vtype(newvp
) != nfstov_type(type
, nfsvers
))
6537 nfs_node_clear_busy(dnp
);
6540 nfs_node_unlock(np
);
6544 nfs_node_unlock(np
);
6552 struct vnop_mknod_args
/* {
6553 struct vnodeop_desc *a_desc;
6556 struct componentname *a_cnp;
6557 struct vnode_attr *a_vap;
6558 vfs_context_t a_context;
6561 nfsnode_t np
= NULL
;
6562 struct nfsmount
*nmp
;
6565 nmp
= VTONMP(ap
->a_dvp
);
6566 if (nfs_mount_gone(nmp
))
6569 if (!VATTR_IS_ACTIVE(ap
->a_vap
, va_type
))
6571 switch (ap
->a_vap
->va_type
) {
6581 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
6582 vtonfs_type(ap
->a_vap
->va_type
, nmp
->nm_vers
), NULL
, &np
);
6584 *ap
->a_vpp
= NFSTOV(np
);
6590 struct vnop_mkdir_args
/* {
6591 struct vnodeop_desc *a_desc;
6594 struct componentname *a_cnp;
6595 struct vnode_attr *a_vap;
6596 vfs_context_t a_context;
6599 nfsnode_t np
= NULL
;
6602 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
6605 *ap
->a_vpp
= NFSTOV(np
);
6611 struct vnop_symlink_args
/* {
6612 struct vnodeop_desc *a_desc;
6615 struct componentname *a_cnp;
6616 struct vnode_attr *a_vap;
6618 vfs_context_t a_context;
6621 nfsnode_t np
= NULL
;
6624 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
6625 NFLNK
, ap
->a_target
, &np
);
6627 *ap
->a_vpp
= NFSTOV(np
);
6633 struct vnop_link_args
/* {
6634 struct vnodeop_desc *a_desc;
6637 struct componentname *a_cnp;
6638 vfs_context_t a_context;
6641 vfs_context_t ctx
= ap
->a_context
;
6642 vnode_t vp
= ap
->a_vp
;
6643 vnode_t tdvp
= ap
->a_tdvp
;
6644 struct componentname
*cnp
= ap
->a_cnp
;
6645 int error
= 0, lockerror
= ENOENT
, status
;
6646 struct nfsmount
*nmp
;
6647 nfsnode_t np
= VTONFS(vp
);
6648 nfsnode_t tdnp
= VTONFS(tdvp
);
6649 int nfsvers
, numops
;
6650 u_int64_t xid
, savedxid
;
6651 struct nfsm_chain nmreq
, nmrep
;
6652 struct nfsreq_secinfo_args si
;
6654 if (vnode_mount(vp
) != vnode_mount(tdvp
))
6658 if (nfs_mount_gone(nmp
))
6660 nfsvers
= nmp
->nm_vers
;
6661 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
6663 if (tdnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
6667 * Push all writes to the server, so that the attribute cache
6668 * doesn't get "out of sync" with the server.
6669 * XXX There should be a better way!
6671 nfs_flush(np
, MNT_WAIT
, vfs_context_thread(ctx
), V_IGNORE_WRITEERR
);
6673 if ((error
= nfs_node_set_busy2(tdnp
, np
, vfs_context_thread(ctx
))))
6676 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
6677 nfsm_chain_null(&nmreq
);
6678 nfsm_chain_null(&nmrep
);
6680 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
6682 nfsm_chain_build_alloc_init(error
, &nmreq
, 29 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
6683 nfsm_chain_add_compound_header(error
, &nmreq
, "link", nmp
->nm_minor_vers
, numops
);
6685 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6686 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
6688 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
6690 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6691 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, tdnp
->n_fhp
, tdnp
->n_fhsize
);
6693 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LINK
);
6694 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
6696 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6697 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, tdnp
);
6699 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
6701 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6702 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
6703 nfsm_chain_build_done(error
, &nmreq
);
6704 nfsm_assert(error
, (numops
== 0), EPROTO
);
6706 error
= nfs_request(tdnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
6708 if ((lockerror
= nfs_node_lock2(tdnp
, np
))) {
6712 nfsm_chain_skip_tag(error
, &nmrep
);
6713 nfsm_chain_get_32(error
, &nmrep
, numops
);
6714 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6715 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
6716 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6717 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LINK
);
6718 nfsm_chain_check_change_info(error
, &nmrep
, tdnp
);
6719 /* directory attributes: if we don't get them, make sure to invalidate */
6720 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6722 nfsm_chain_loadattr(error
, &nmrep
, tdnp
, nfsvers
, &xid
);
6724 NATTRINVALIDATE(tdnp
);
6725 /* link attributes: if we don't get them, make sure to invalidate */
6726 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
6727 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6729 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
6731 NATTRINVALIDATE(np
);
6733 nfsm_chain_cleanup(&nmreq
);
6734 nfsm_chain_cleanup(&nmrep
);
6736 tdnp
->n_flag
|= NMODIFIED
;
6737 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
6738 if (error
== EEXIST
)
6740 if (!error
&& (tdnp
->n_flag
& NNEGNCENTRIES
)) {
6741 tdnp
->n_flag
&= ~NNEGNCENTRIES
;
6742 cache_purge_negatives(tdvp
);
6745 nfs_node_unlock2(tdnp
, np
);
6746 nfs_node_clear_busy2(tdnp
, np
);
6752 struct vnop_rmdir_args
/* {
6753 struct vnodeop_desc *a_desc;
6756 struct componentname *a_cnp;
6757 vfs_context_t a_context;
6760 vfs_context_t ctx
= ap
->a_context
;
6761 vnode_t vp
= ap
->a_vp
;
6762 vnode_t dvp
= ap
->a_dvp
;
6763 struct componentname
*cnp
= ap
->a_cnp
;
6764 struct nfsmount
*nmp
;
6765 int error
= 0, namedattrs
;
6766 nfsnode_t np
= VTONFS(vp
);
6767 nfsnode_t dnp
= VTONFS(dvp
);
6768 struct nfs_dulookup dul
;
6770 if (vnode_vtype(vp
) != VDIR
)
6773 nmp
= NFSTONMP(dnp
);
6774 if (nfs_mount_gone(nmp
))
6776 namedattrs
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
);
6778 if ((error
= nfs_node_set_busy2(dnp
, np
, vfs_context_thread(ctx
))))
6782 nfs_dulookup_init(&dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
6783 nfs_dulookup_start(&dul
, dnp
, ctx
);
6786 error
= nfs4_remove_rpc(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
,
6787 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
6789 nfs_name_cache_purge(dnp
, np
, cnp
, ctx
);
6790 /* nfs_getattr() will check changed and purge caches */
6791 nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
);
6793 nfs_dulookup_finish(&dul
, dnp
, ctx
);
6794 nfs_node_clear_busy2(dnp
, np
);
6797 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
6799 if (error
== ENOENT
)
6803 * remove nfsnode from hash now so we can't accidentally find it
6804 * again if another object gets created with the same filehandle
6805 * before this vnode gets reclaimed
6807 lck_mtx_lock(nfs_node_hash_mutex
);
6808 if (np
->n_hflag
& NHHASHED
) {
6809 LIST_REMOVE(np
, n_hash
);
6810 np
->n_hflag
&= ~NHHASHED
;
6811 FSDBG(266, 0, np
, np
->n_flag
, 0xb1eb1e);
6813 lck_mtx_unlock(nfs_node_hash_mutex
);
6819 * NFSv4 Named Attributes
6821 * Both the extended attributes interface and the named streams interface
6822 * are backed by NFSv4 named attributes. The implementations for both use
6823 * a common set of routines in an attempt to reduce code duplication, to
6824 * increase efficiency, to increase caching of both names and data, and to
6825 * confine the complexity.
6827 * Each NFS node caches its named attribute directory's file handle.
6828 * The directory nodes for the named attribute directories are handled
6829 * exactly like regular directories (with a couple minor exceptions).
6830 * Named attribute nodes are also treated as much like regular files as
6833 * Most of the heavy lifting is done by nfs4_named_attr_get().
6837 * Get the given node's attribute directory node.
6838 * If !fetch, then only return a cached node.
6839 * Otherwise, we will attempt to fetch the node from the server.
6840 * (Note: the node should be marked busy.)
6843 nfs4_named_attr_dir_get(nfsnode_t np
, int fetch
, vfs_context_t ctx
)
6845 nfsnode_t adnp
= NULL
;
6846 struct nfsmount
*nmp
;
6847 int error
= 0, status
, numops
;
6848 struct nfsm_chain nmreq
, nmrep
;
6850 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
6852 struct nfs_vattr nvattr
;
6853 struct componentname cn
;
6854 struct nfsreq rq
, *req
= &rq
;
6855 struct nfsreq_secinfo_args si
;
6858 if (nfs_mount_gone(nmp
))
6860 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
6863 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
6864 NVATTR_INIT(&nvattr
);
6865 nfsm_chain_null(&nmreq
);
6866 nfsm_chain_null(&nmrep
);
6868 bzero(&cn
, sizeof(cn
));
6869 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER
, const, char *); /* "/..namedfork/" */
6870 cn
.cn_namelen
= strlen(_PATH_FORKSPECIFIER
);
6871 cn
.cn_nameiop
= LOOKUP
;
6873 if (np
->n_attrdirfh
) {
6874 // XXX can't set parent correctly (to np) yet
6875 error
= nfs_nget(nmp
->nm_mountp
, NULL
, &cn
, np
->n_attrdirfh
+1, *np
->n_attrdirfh
,
6876 NULL
, NULL
, RPCAUTH_UNKNOWN
, NG_NOCREATE
, &adnp
);
6885 // PUTFH, OPENATTR, GETATTR
6887 nfsm_chain_build_alloc_init(error
, &nmreq
, 22 * NFSX_UNSIGNED
);
6888 nfsm_chain_add_compound_header(error
, &nmreq
, "openattr", nmp
->nm_minor_vers
, numops
);
6890 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6891 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, np
->n_fhp
, np
->n_fhsize
);
6893 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPENATTR
);
6894 nfsm_chain_add_32(error
, &nmreq
, 0);
6896 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6897 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
6898 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
6899 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
6900 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
6901 nfsm_chain_build_done(error
, &nmreq
);
6902 nfsm_assert(error
, (numops
== 0), EPROTO
);
6904 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
6905 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, 0, NULL
, &req
);
6907 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
6909 nfsm_chain_skip_tag(error
, &nmrep
);
6910 nfsm_chain_get_32(error
, &nmrep
, numops
);
6911 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6912 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPENATTR
);
6913 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6915 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
6917 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
) || !fh
.fh_len
) {
6921 if (!np
->n_attrdirfh
|| (*np
->n_attrdirfh
!= fh
.fh_len
)) {
6922 /* (re)allocate attrdir fh buffer */
6923 if (np
->n_attrdirfh
)
6924 FREE(np
->n_attrdirfh
, M_TEMP
);
6925 MALLOC(np
->n_attrdirfh
, u_char
*, fh
.fh_len
+1, M_TEMP
, M_WAITOK
);
6927 if (!np
->n_attrdirfh
) {
6931 /* cache the attrdir fh in the node */
6932 *np
->n_attrdirfh
= fh
.fh_len
;
6933 bcopy(fh
.fh_data
, np
->n_attrdirfh
+1, fh
.fh_len
);
6934 /* create node for attrdir */
6935 // XXX can't set parent correctly (to np) yet
6936 error
= nfs_nget(NFSTOMP(np
), NULL
, &cn
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, 0, &adnp
);
6938 NVATTR_CLEANUP(&nvattr
);
6939 nfsm_chain_cleanup(&nmreq
);
6940 nfsm_chain_cleanup(&nmrep
);
6943 /* sanity check that this node is an attribute directory */
6944 if (adnp
->n_vattr
.nva_type
!= VDIR
)
6946 if (!(adnp
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
))
6948 nfs_node_unlock(adnp
);
6950 vnode_put(NFSTOV(adnp
));
6952 return (error
? NULL
: adnp
);
6956 * Get the given node's named attribute node for the name given.
6958 * In an effort to increase the performance of named attribute access, we try
6959 * to reduce server requests by doing the following:
6961 * - cache the node's named attribute directory file handle in the node
6962 * - maintain a directory vnode for the attribute directory
6963 * - use name cache entries (positive and negative) to speed up lookups
6964 * - optionally open the named attribute (with the given accessMode) in the same RPC
6965 * - combine attribute directory retrieval with the lookup/open RPC
6966 * - optionally prefetch the named attribute's first block of data in the same RPC
6968 * Also, in an attempt to reduce the number of copies/variations of this code,
6969 * parts of the RPC building/processing code are conditionalized on what is
6970 * needed for any particular request (openattr, lookup vs. open, read).
6972 * Note that because we may not have the attribute directory node when we start
6973 * the lookup/open, we lock both the node and the attribute directory node.
6976 #define NFS_GET_NAMED_ATTR_CREATE 0x1
6977 #define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
6978 #define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
6979 #define NFS_GET_NAMED_ATTR_PREFETCH 0x8
6982 nfs4_named_attr_get(
6984 struct componentname
*cnp
,
6985 uint32_t accessMode
,
6989 struct nfs_open_file
**nofpp
)
6991 struct nfsmount
*nmp
;
6992 int error
= 0, open_error
= EIO
;
6993 int inuse
= 0, adlockerror
= ENOENT
, busyerror
= ENOENT
, adbusyerror
= ENOENT
, nofpbusyerror
= ENOENT
;
6994 int create
, guarded
, prefetch
, truncate
, noopbusy
= 0;
6995 int open
, status
, numops
, hadattrdir
, negnamecache
;
6996 struct nfs_vattr nvattr
;
6997 struct vnode_attr vattr
;
6998 nfsnode_t adnp
= NULL
, anp
= NULL
;
7000 u_int64_t xid
, savedxid
= 0;
7001 struct nfsm_chain nmreq
, nmrep
;
7002 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
7003 uint32_t denyMode
, rflags
, delegation
, recall
, eof
, rlen
, retlen
;
7004 nfs_stateid stateid
, dstateid
;
7006 struct nfs_open_owner
*noop
= NULL
;
7007 struct nfs_open_file
*newnofp
= NULL
, *nofp
= NULL
;
7008 struct vnop_access_args naa
;
7013 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
;
7014 struct kauth_ace ace
;
7015 struct nfsreq rq
, *req
= &rq
;
7016 struct nfsreq_secinfo_args si
;
7020 rflags
= delegation
= recall
= eof
= rlen
= retlen
= 0;
7023 slen
= sizeof(sbuf
);
7026 if (nfs_mount_gone(nmp
))
7028 NVATTR_INIT(&nvattr
);
7029 negnamecache
= !NMFLAG(nmp
, NONEGNAMECACHE
);
7030 thd
= vfs_context_thread(ctx
);
7031 cred
= vfs_context_ucred(ctx
);
7032 create
= (flags
& NFS_GET_NAMED_ATTR_CREATE
) ? NFS_OPEN_CREATE
: NFS_OPEN_NOCREATE
;
7033 guarded
= (flags
& NFS_GET_NAMED_ATTR_CREATE_GUARDED
) ? NFS_CREATE_GUARDED
: NFS_CREATE_UNCHECKED
;
7034 truncate
= (flags
& NFS_GET_NAMED_ATTR_TRUNCATE
);
7035 prefetch
= (flags
& NFS_GET_NAMED_ATTR_PREFETCH
);
7038 error
= nfs_getattr(np
, &nvattr
, ctx
, NGA_CACHED
);
7041 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
7042 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
))
7044 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_NONE
) {
7045 /* shouldn't happen... but just be safe */
7046 printf("nfs4_named_attr_get: create with no access %s\n", cnp
->cn_nameptr
);
7047 accessMode
= NFS_OPEN_SHARE_ACCESS_READ
;
7049 open
= (accessMode
!= NFS_OPEN_SHARE_ACCESS_NONE
);
7052 * We're trying to open the file.
7053 * We'll create/open it with the given access mode,
7054 * and set NFS_OPEN_FILE_CREATE.
7056 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
7057 if (prefetch
&& guarded
)
7058 prefetch
= 0; /* no sense prefetching data that can't be there */
7060 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
7065 if ((error
= busyerror
= nfs_node_set_busy(np
, vfs_context_thread(ctx
))))
7068 adnp
= nfs4_named_attr_dir_get(np
, 0, ctx
);
7069 hadattrdir
= (adnp
!= NULL
);
7072 /* use the special state ID because we don't have a real one to send */
7073 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
7074 rlen
= MIN(nmp
->nm_rsize
, nmp
->nm_biosize
);
7076 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
7077 nfsm_chain_null(&nmreq
);
7078 nfsm_chain_null(&nmrep
);
7081 if ((error
= adbusyerror
= nfs_node_set_busy(adnp
, vfs_context_thread(ctx
))))
7083 /* nfs_getattr() will check changed and purge caches */
7084 error
= nfs_getattr(adnp
, NULL
, ctx
, NGA_CACHED
);
7086 error
= cache_lookup(NFSTOV(adnp
), &avp
, cnp
);
7089 /* negative cache entry */
7093 /* try dir buf cache lookup */
7094 error
= nfs_dir_buf_cache_lookup(adnp
, &anp
, cnp
, ctx
, 0);
7095 if (!error
&& anp
) {
7096 /* dir buf cache hit */
7100 if (error
!= -1) /* cache miss */
7104 /* cache hit, not really an error */
7105 OSAddAtomic64(1, &nfsstats
.lookupcache_hits
);
7107 *anpp
= anp
= VTONFS(avp
);
7109 nfs_node_clear_busy(adnp
);
7110 adbusyerror
= ENOENT
;
7112 /* check for directory access */
7113 naa
.a_desc
= &vnop_access_desc
;
7114 naa
.a_vp
= NFSTOV(adnp
);
7115 naa
.a_action
= KAUTH_VNODE_SEARCH
;
7116 naa
.a_context
= ctx
;
7118 /* compute actual success/failure based on accessibility */
7119 error
= nfs_vnop_access(&naa
);
7122 /* we either found it, or hit an error */
7123 if (!error
&& guarded
) {
7124 /* found cached entry but told not to use it */
7126 vnode_put(NFSTOV(anp
));
7129 /* we're done if error or we don't need to open */
7132 /* no error and we need to open... */
7138 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
7140 nfs_open_owner_rele(noop
);
7146 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7147 error
= nfs_open_file_find(anp
, noop
, &newnofp
, 0, 0, 1);
7148 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
7149 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop
->noo_cred
), cnp
->cn_nameptr
);
7152 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
7153 nfs_mount_state_in_use_end(nmp
, 0);
7154 error
= nfs4_reopen(newnofp
, vfs_context_thread(ctx
));
7155 nfs_open_file_destroy(newnofp
);
7161 error
= nfs_open_file_set_busy(newnofp
, vfs_context_thread(ctx
));
7164 nfs_open_file_destroy(newnofp
);
7170 * We already have the node. So we just need to open
7171 * it - which we may be able to do with a delegation.
7173 open_error
= error
= nfs4_open(anp
, newnofp
, accessMode
, denyMode
, ctx
);
7175 /* open succeeded, so our open file is no longer temporary */
7187 * We either don't have the attrdir or we didn't find the attribute
7188 * in the name cache, so we need to talk to the server.
7190 * If we don't have the attrdir, we'll need to ask the server for that too.
7191 * If the caller is requesting that the attribute be created, we need to
7192 * make sure the attrdir is created.
7193 * The caller may also request that the first block of an existing attribute
7194 * be retrieved at the same time.
7198 /* need to mark the open owner busy during the RPC */
7199 if ((error
= nfs_open_owner_set_busy(noop
, thd
)))
7205 * We'd like to get updated post-open/lookup attributes for the
7206 * directory and we may also want to prefetch some data via READ.
7207 * We'd like the READ results to be last so that we can leave the
7208 * data in the mbufs until the end.
7210 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7214 numops
+= 3; // also sending: OPENATTR, GETATTR, OPENATTR
7216 numops
+= 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7217 nfsm_chain_build_alloc_init(error
, &nmreq
, 64 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
7218 nfsm_chain_add_compound_header(error
, &nmreq
, "getnamedattr", nmp
->nm_minor_vers
, numops
);
7221 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7222 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, adnp
->n_fhp
, adnp
->n_fhsize
);
7225 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7226 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, np
->n_fhp
, np
->n_fhsize
);
7228 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPENATTR
);
7229 nfsm_chain_add_32(error
, &nmreq
, create
? 1 : 0);
7231 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7232 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
7233 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
7234 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
7235 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7239 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
7240 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
7241 nfsm_chain_add_32(error
, &nmreq
, accessMode
);
7242 nfsm_chain_add_32(error
, &nmreq
, denyMode
);
7243 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
);
7244 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
7245 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
));
7246 nfsm_chain_add_32(error
, &nmreq
, create
);
7248 nfsm_chain_add_32(error
, &nmreq
, guarded
);
7251 VATTR_SET(&vattr
, va_data_size
, 0);
7252 nfsm_chain_add_fattr4(error
, &nmreq
, &vattr
, nmp
);
7254 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_NULL
);
7255 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
7258 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUP
);
7259 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
7262 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7263 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
7264 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
7265 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
7266 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7269 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
7273 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7274 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, adnp
->n_fhp
, adnp
->n_fhsize
);
7277 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7278 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, np
->n_fhp
, np
->n_fhsize
);
7280 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPENATTR
);
7281 nfsm_chain_add_32(error
, &nmreq
, 0);
7284 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7285 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
7286 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7289 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
7291 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_NVERIFY
);
7293 VATTR_SET(&vattr
, va_data_size
, 0);
7294 nfsm_chain_add_fattr4(error
, &nmreq
, &vattr
, nmp
);
7296 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READ
);
7297 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
7298 nfsm_chain_add_64(error
, &nmreq
, 0);
7299 nfsm_chain_add_32(error
, &nmreq
, rlen
);
7301 nfsm_chain_build_done(error
, &nmreq
);
7302 nfsm_assert(error
, (numops
== 0), EPROTO
);
7304 error
= nfs_request_async(hadattrdir
? adnp
: np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
7305 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, open
? R_NOINTR
: 0, NULL
, &req
);
7307 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
7309 if (hadattrdir
&& ((adlockerror
= nfs_node_lock(adnp
))))
7310 error
= adlockerror
;
7312 nfsm_chain_skip_tag(error
, &nmrep
);
7313 nfsm_chain_get_32(error
, &nmrep
, numops
);
7314 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7316 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPENATTR
);
7317 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7319 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
7321 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
) && fh
.fh_len
) {
7322 if (!np
->n_attrdirfh
|| (*np
->n_attrdirfh
!= fh
.fh_len
)) {
7323 /* (re)allocate attrdir fh buffer */
7324 if (np
->n_attrdirfh
)
7325 FREE(np
->n_attrdirfh
, M_TEMP
);
7326 MALLOC(np
->n_attrdirfh
, u_char
*, fh
.fh_len
+1, M_TEMP
, M_WAITOK
);
7328 if (np
->n_attrdirfh
) {
7329 /* remember the attrdir fh in the node */
7330 *np
->n_attrdirfh
= fh
.fh_len
;
7331 bcopy(fh
.fh_data
, np
->n_attrdirfh
+1, fh
.fh_len
);
7332 /* create busied node for attrdir */
7333 struct componentname cn
;
7334 bzero(&cn
, sizeof(cn
));
7335 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER
, const, char *); /* "/..namedfork/" */
7336 cn
.cn_namelen
= strlen(_PATH_FORKSPECIFIER
);
7337 cn
.cn_nameiop
= LOOKUP
;
7338 // XXX can't set parent correctly (to np) yet
7339 error
= nfs_nget(NFSTOMP(np
), NULL
, &cn
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, 0, &adnp
);
7342 /* set the node busy */
7343 SET(adnp
->n_flag
, NBUSY
);
7346 /* if no adnp, oh well... */
7350 NVATTR_CLEANUP(&nvattr
);
7354 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
7355 nfs_owner_seqid_increment(noop
, NULL
, error
);
7356 nfsm_chain_get_stateid(error
, &nmrep
, &newnofp
->nof_stateid
);
7357 nfsm_chain_check_change_info(error
, &nmrep
, adnp
);
7358 nfsm_chain_get_32(error
, &nmrep
, rflags
);
7359 bmlen
= NFS_ATTR_BITMAP_LEN
;
7360 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
7361 nfsm_chain_get_32(error
, &nmrep
, delegation
);
7363 switch (delegation
) {
7364 case NFS_OPEN_DELEGATE_NONE
:
7366 case NFS_OPEN_DELEGATE_READ
:
7367 case NFS_OPEN_DELEGATE_WRITE
:
7368 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
7369 nfsm_chain_get_32(error
, &nmrep
, recall
);
7370 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) // space (skip) XXX
7371 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
7372 /* if we have any trouble accepting the ACE, just invalidate it */
7373 ace_type
= ace_flags
= ace_mask
= len
= 0;
7374 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
7375 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
7376 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
7377 nfsm_chain_get_32(error
, &nmrep
, len
);
7378 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
7379 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
7380 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
7381 if (!error
&& (len
>= slen
)) {
7382 MALLOC(s
, char*, len
+1, M_TEMP
, M_WAITOK
);
7389 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
7391 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
7394 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
)))
7399 if (s
&& (s
!= sbuf
))
7406 /* At this point if we have no error, the object was created/opened. */
7409 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOOKUP
);
7411 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7413 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
7415 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
) || !fh
.fh_len
) {
7420 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
7421 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7423 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPENATTR
);
7424 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7427 nfsm_chain_loadattr(error
, &nmrep
, adnp
, nmp
->nm_vers
, &xid
);
7431 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
)
7432 newnofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
7433 if (rflags
& NFS_OPEN_RESULT_CONFIRM
) {
7435 nfs_node_unlock(adnp
);
7436 adlockerror
= ENOENT
;
7438 NVATTR_CLEANUP(&nvattr
);
7439 error
= nfs4_open_confirm_rpc(nmp
, adnp
? adnp
: np
, fh
.fh_data
, fh
.fh_len
, noop
, &newnofp
->nof_stateid
, thd
, cred
, &nvattr
, &xid
);
7442 if ((adlockerror
= nfs_node_lock(adnp
)))
7443 error
= adlockerror
;
7448 if (open
&& adnp
&& !adlockerror
) {
7449 if (!open_error
&& (adnp
->n_flag
& NNEGNCENTRIES
)) {
7450 adnp
->n_flag
&= ~NNEGNCENTRIES
;
7451 cache_purge_negatives(NFSTOV(adnp
));
7453 adnp
->n_flag
|= NMODIFIED
;
7454 nfs_node_unlock(adnp
);
7455 adlockerror
= ENOENT
;
7456 nfs_getattr(adnp
, NULL
, ctx
, NGA_CACHED
);
7458 if (adnp
&& !adlockerror
&& (error
== ENOENT
) &&
7459 (cnp
->cn_flags
& MAKEENTRY
) && (cnp
->cn_nameiop
!= CREATE
) && negnamecache
) {
7460 /* add a negative entry in the name cache */
7461 cache_enter(NFSTOV(adnp
), NULL
, cnp
);
7462 adnp
->n_flag
|= NNEGNCENTRIES
;
7464 if (adnp
&& !adlockerror
) {
7465 nfs_node_unlock(adnp
);
7466 adlockerror
= ENOENT
;
7468 if (!error
&& !anp
&& fh
.fh_len
) {
7469 /* create the vnode with the filehandle and attributes */
7471 error
= nfs_nget(NFSTOMP(np
), adnp
, cnp
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, NG_MAKEENTRY
, &anp
);
7474 nfs_node_unlock(anp
);
7476 if (!error
&& open
) {
7477 nfs_open_file_add_open(newnofp
, accessMode
, denyMode
, 0);
7478 /* After we have a node, add our open file struct to the node */
7480 error
= nfs_open_file_find_internal(anp
, noop
, &nofp
, 0, 0, 0);
7482 /* This shouldn't happen, because we passed in a new nofp to use. */
7483 printf("nfs_open_file_find_internal failed! %d\n", error
);
7485 } else if (nofp
!= newnofp
) {
7487 * Hmm... an open file struct already exists.
7488 * Mark the existing one busy and merge our open into it.
7489 * Then destroy the one we created.
7490 * Note: there's no chance of an open confict because the
7491 * open has already been granted.
7493 nofpbusyerror
= nfs_open_file_set_busy(nofp
, NULL
);
7494 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0);
7495 nofp
->nof_stateid
= newnofp
->nof_stateid
;
7496 if (newnofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
)
7497 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
7498 nfs_open_file_clear_busy(newnofp
);
7499 nfs_open_file_destroy(newnofp
);
7505 /* mark the node as holding a create-initiated open */
7506 nofp
->nof_flags
|= NFS_OPEN_FILE_CREATE
;
7507 nofp
->nof_creator
= current_thread();
7513 NVATTR_CLEANUP(&nvattr
);
7514 if (open
&& ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
))) {
7515 if (!error
&& anp
&& !recall
) {
7516 /* stuff the delegation state in the node */
7517 lck_mtx_lock(&anp
->n_openlock
);
7518 anp
->n_openflags
&= ~N_DELEG_MASK
;
7519 anp
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
7520 anp
->n_dstateid
= dstateid
;
7522 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
7523 lck_mtx_lock(&nmp
->nm_lock
);
7524 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
)
7525 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, anp
, n_dlink
);
7526 lck_mtx_unlock(&nmp
->nm_lock
);
7528 lck_mtx_unlock(&anp
->n_openlock
);
7530 /* give the delegation back */
7532 if (NFS_CMPFH(anp
, fh
.fh_data
, fh
.fh_len
)) {
7533 /* update delegation state and return it */
7534 lck_mtx_lock(&anp
->n_openlock
);
7535 anp
->n_openflags
&= ~N_DELEG_MASK
;
7536 anp
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
7537 anp
->n_dstateid
= dstateid
;
7539 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
7540 lck_mtx_lock(&nmp
->nm_lock
);
7541 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
)
7542 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, anp
, n_dlink
);
7543 lck_mtx_unlock(&nmp
->nm_lock
);
7545 lck_mtx_unlock(&anp
->n_openlock
);
7546 /* don't need to send a separate delegreturn for fh */
7549 /* return anp's current delegation */
7550 nfs4_delegation_return(anp
, 0, thd
, cred
);
7552 if (fh
.fh_len
) /* return fh's delegation if it wasn't for anp */
7553 nfs4_delegreturn_rpc(nmp
, fh
.fh_data
, fh
.fh_len
, &dstateid
, 0, thd
, cred
);
7558 /* need to cleanup our temporary nofp */
7559 nfs_open_file_clear_busy(newnofp
);
7560 nfs_open_file_destroy(newnofp
);
7562 } else if (nofp
&& !nofpbusyerror
) {
7563 nfs_open_file_clear_busy(nofp
);
7564 nofpbusyerror
= ENOENT
;
7566 if (inuse
&& nfs_mount_state_in_use_end(nmp
, error
)) {
7568 nofp
= newnofp
= NULL
;
7569 rflags
= delegation
= recall
= eof
= rlen
= retlen
= 0;
7572 slen
= sizeof(sbuf
);
7573 nfsm_chain_cleanup(&nmreq
);
7574 nfsm_chain_cleanup(&nmrep
);
7576 vnode_put(NFSTOV(anp
));
7579 hadattrdir
= (adnp
!= NULL
);
7581 nfs_open_owner_clear_busy(noop
);
7588 nfs_open_owner_clear_busy(noop
);
7591 nfs_open_owner_rele(noop
);
7594 if (!error
&& prefetch
&& nmrep
.nmc_mhead
) {
7595 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
7596 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_NVERIFY
);
7597 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READ
);
7598 nfsm_chain_get_32(error
, &nmrep
, eof
);
7599 nfsm_chain_get_32(error
, &nmrep
, retlen
);
7600 if (!error
&& anp
) {
7602 * There can be one problem with doing the prefetch.
7603 * Because we don't have the node before we start the RPC, we
7604 * can't have the buffer busy while the READ is performed.
7605 * So there is a chance that other I/O occured on the same
7606 * range of data while we were performing this RPC. If that
7607 * happens, then it's possible the data we have in the READ
7608 * response is no longer up to date.
7609 * Once we have the node and the buffer, we need to make sure
7610 * that there's no chance we could be putting stale data in
7612 * So, we check if the range read is dirty or if any I/O may
7613 * have occured on it while we were performing our RPC.
7615 struct nfsbuf
*bp
= NULL
;
7619 retlen
= MIN(retlen
, rlen
);
7621 /* check if node needs size update or invalidation */
7622 if (ISSET(anp
->n_flag
, NUPDATESIZE
))
7623 nfs_data_update_size(anp
, 0);
7624 if (!(error
= nfs_node_lock(anp
))) {
7625 if (anp
->n_flag
& NNEEDINVALIDATE
) {
7626 anp
->n_flag
&= ~NNEEDINVALIDATE
;
7627 nfs_node_unlock(anp
);
7628 error
= nfs_vinvalbuf(NFSTOV(anp
), V_SAVE
|V_IGNORE_WRITEERR
, ctx
, 1);
7629 if (!error
) /* lets play it safe and just drop the data */
7632 nfs_node_unlock(anp
);
7636 /* calculate page mask for the range of data read */
7637 lastpg
= (trunc_page_32(retlen
) - 1) / PAGE_SIZE
;
7638 pagemask
= ((1 << (lastpg
+ 1)) - 1);
7641 error
= nfs_buf_get(anp
, 0, nmp
->nm_biosize
, thd
, NBLK_READ
|NBLK_NOWAIT
, &bp
);
7642 /* don't save the data if dirty or potential I/O conflict */
7643 if (!error
&& bp
&& !bp
->nb_dirtyoff
&& !(bp
->nb_dirty
& pagemask
) &&
7644 timevalcmp(&anp
->n_lastio
, &now
, <)) {
7645 OSAddAtomic64(1, &nfsstats
.read_bios
);
7646 CLR(bp
->nb_flags
, (NB_DONE
|NB_ASYNC
));
7647 SET(bp
->nb_flags
, NB_READ
);
7649 nfsm_chain_get_opaque(error
, &nmrep
, retlen
, bp
->nb_data
);
7651 bp
->nb_error
= error
;
7652 SET(bp
->nb_flags
, NB_ERROR
);
7655 bp
->nb_endio
= rlen
;
7656 if ((retlen
> 0) && (bp
->nb_endio
< (int)retlen
))
7657 bp
->nb_endio
= retlen
;
7658 if (eof
|| (retlen
== 0)) {
7659 /* zero out the remaining data (up to EOF) */
7660 off_t rpcrem
, eofrem
, rem
;
7661 rpcrem
= (rlen
- retlen
);
7662 eofrem
= anp
->n_size
- (NBOFF(bp
) + retlen
);
7663 rem
= (rpcrem
< eofrem
) ? rpcrem
: eofrem
;
7665 bzero(bp
->nb_data
+ retlen
, rem
);
7666 } else if ((retlen
< rlen
) && !ISSET(bp
->nb_flags
, NB_ERROR
)) {
7667 /* ugh... short read ... just invalidate for now... */
7668 SET(bp
->nb_flags
, NB_INVAL
);
7671 nfs_buf_read_finish(bp
);
7672 microuptime(&anp
->n_lastio
);
7675 nfs_buf_release(bp
, 1);
7677 error
= 0; /* ignore any transient error in processing the prefetch */
7679 if (adnp
&& !adbusyerror
) {
7680 nfs_node_clear_busy(adnp
);
7681 adbusyerror
= ENOENT
;
7684 nfs_node_clear_busy(np
);
7688 vnode_put(NFSTOV(adnp
));
7689 if (error
&& *anpp
) {
7690 vnode_put(NFSTOV(*anpp
));
7693 nfsm_chain_cleanup(&nmreq
);
7694 nfsm_chain_cleanup(&nmrep
);
7699 * Remove a named attribute.
7702 nfs4_named_attr_remove(nfsnode_t np
, nfsnode_t anp
, const char *name
, vfs_context_t ctx
)
7704 nfsnode_t adnp
= NULL
;
7705 struct nfsmount
*nmp
;
7706 struct componentname cn
;
7707 struct vnop_remove_args vra
;
7708 int error
, putanp
= 0;
7711 if (nfs_mount_gone(nmp
))
7714 bzero(&cn
, sizeof(cn
));
7715 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(name
, const, char *);
7716 cn
.cn_namelen
= strlen(name
);
7717 cn
.cn_nameiop
= DELETE
;
7721 error
= nfs4_named_attr_get(np
, &cn
, NFS_OPEN_SHARE_ACCESS_NONE
,
7722 0, ctx
, &anp
, NULL
);
7723 if ((!error
&& !anp
) || (error
== ENOATTR
))
7727 vnode_put(NFSTOV(anp
));
7735 if ((error
= nfs_node_set_busy(np
, vfs_context_thread(ctx
))))
7737 adnp
= nfs4_named_attr_dir_get(np
, 1, ctx
);
7738 nfs_node_clear_busy(np
);
7744 vra
.a_desc
= &vnop_remove_desc
;
7745 vra
.a_dvp
= NFSTOV(adnp
);
7746 vra
.a_vp
= NFSTOV(anp
);
7749 vra
.a_context
= ctx
;
7750 error
= nfs_vnop_remove(&vra
);
7753 vnode_put(NFSTOV(adnp
));
7755 vnode_put(NFSTOV(anp
));
7761 struct vnop_getxattr_args
/* {
7762 struct vnodeop_desc *a_desc;
7764 const char * a_name;
7768 vfs_context_t a_context;
7771 vfs_context_t ctx
= ap
->a_context
;
7772 struct nfsmount
*nmp
;
7773 struct nfs_vattr nvattr
;
7774 struct componentname cn
;
7776 int error
= 0, isrsrcfork
;
7778 nmp
= VTONMP(ap
->a_vp
);
7779 if (nfs_mount_gone(nmp
))
7782 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
))
7784 error
= nfs_getattr(VTONFS(ap
->a_vp
), &nvattr
, ctx
, NGA_CACHED
);
7787 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
7788 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
))
7791 bzero(&cn
, sizeof(cn
));
7792 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(ap
->a_name
, const, char *);
7793 cn
.cn_namelen
= strlen(ap
->a_name
);
7794 cn
.cn_nameiop
= LOOKUP
;
7795 cn
.cn_flags
= MAKEENTRY
;
7797 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
7798 isrsrcfork
= (bcmp(ap
->a_name
, XATTR_RESOURCEFORK_NAME
, sizeof(XATTR_RESOURCEFORK_NAME
)) == 0);
7800 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_NONE
,
7801 !isrsrcfork
? NFS_GET_NAMED_ATTR_PREFETCH
: 0, ctx
, &anp
, NULL
);
7802 if ((!error
&& !anp
) || (error
== ENOENT
))
7806 error
= nfs_bioread(anp
, ap
->a_uio
, 0, ctx
);
7808 *ap
->a_size
= anp
->n_size
;
7811 vnode_put(NFSTOV(anp
));
7817 struct vnop_setxattr_args
/* {
7818 struct vnodeop_desc *a_desc;
7820 const char * a_name;
7823 vfs_context_t a_context;
7826 vfs_context_t ctx
= ap
->a_context
;
7827 int options
= ap
->a_options
;
7828 uio_t uio
= ap
->a_uio
;
7829 const char *name
= ap
->a_name
;
7830 struct nfsmount
*nmp
;
7831 struct componentname cn
;
7832 nfsnode_t anp
= NULL
;
7833 int error
= 0, closeerror
= 0, flags
, isrsrcfork
, isfinderinfo
, empty
= 0, i
;
7834 #define FINDERINFOSIZE 32
7835 uint8_t finfo
[FINDERINFOSIZE
];
7837 struct nfs_open_file
*nofp
= NULL
;
7838 char uio_buf
[ UIO_SIZEOF(1) ];
7840 struct vnop_write_args vwa
;
7842 nmp
= VTONMP(ap
->a_vp
);
7843 if (nfs_mount_gone(nmp
))
7846 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
))
7849 if ((options
& XATTR_CREATE
) && (options
& XATTR_REPLACE
))
7852 /* XXX limitation based on need to back up uio on short write */
7853 if (uio_iovcnt(uio
) > 1) {
7854 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
7858 bzero(&cn
, sizeof(cn
));
7859 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(name
, const, char *);
7860 cn
.cn_namelen
= strlen(name
);
7861 cn
.cn_nameiop
= CREATE
;
7862 cn
.cn_flags
= MAKEENTRY
;
7864 isfinderinfo
= (bcmp(name
, XATTR_FINDERINFO_NAME
, sizeof(XATTR_FINDERINFO_NAME
)) == 0);
7865 isrsrcfork
= isfinderinfo
? 0 : (bcmp(name
, XATTR_RESOURCEFORK_NAME
, sizeof(XATTR_RESOURCEFORK_NAME
)) == 0);
7867 uio_setoffset(uio
, 0);
7869 if (uio_resid(uio
) != sizeof(finfo
))
7871 error
= uiomove((char*)&finfo
, sizeof(finfo
), uio
);
7874 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
7876 for (i
=0, finfop
=(uint32_t*)&finfo
; i
< (int)(sizeof(finfo
)/sizeof(uint32_t)); i
++)
7881 if (empty
&& !(options
& (XATTR_CREATE
|XATTR_REPLACE
))) {
7882 error
= nfs4_named_attr_remove(VTONFS(ap
->a_vp
), anp
, name
, ctx
);
7883 if (error
== ENOENT
)
7887 /* first, let's see if we get a create/replace error */
7891 * create/open the xattr
7893 * We need to make sure not to create it if XATTR_REPLACE.
7894 * For all xattrs except the resource fork, we also want to
7895 * truncate the xattr to remove any current data. We'll do
7896 * that by setting the size to 0 on create/open.
7899 if (!(options
& XATTR_REPLACE
))
7900 flags
|= NFS_GET_NAMED_ATTR_CREATE
;
7901 if (options
& XATTR_CREATE
)
7902 flags
|= NFS_GET_NAMED_ATTR_CREATE_GUARDED
;
7904 flags
|= NFS_GET_NAMED_ATTR_TRUNCATE
;
7906 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_BOTH
,
7907 flags
, ctx
, &anp
, &nofp
);
7912 /* grab the open state from the get/create/open */
7913 if (nofp
&& !(error
= nfs_open_file_set_busy(nofp
, NULL
))) {
7914 nofp
->nof_flags
&= ~NFS_OPEN_FILE_CREATE
;
7915 nofp
->nof_creator
= NULL
;
7916 nfs_open_file_clear_busy(nofp
);
7919 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
7920 if (isfinderinfo
&& empty
)
7924 * Write the data out and flush.
7926 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
7928 vwa
.a_desc
= &vnop_write_desc
;
7929 vwa
.a_vp
= NFSTOV(anp
);
7932 vwa
.a_context
= ctx
;
7934 auio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_WRITE
, &uio_buf
, sizeof(uio_buf
));
7935 uio_addiov(auio
, (uintptr_t)&finfo
, sizeof(finfo
));
7937 } else if (uio_resid(uio
) > 0) {
7941 error
= nfs_vnop_write(&vwa
);
7943 error
= nfs_flush(anp
, MNT_WAIT
, vfs_context_thread(ctx
), 0);
7946 /* Close the xattr. */
7948 int busyerror
= nfs_open_file_set_busy(nofp
, NULL
);
7949 closeerror
= nfs_close(anp
, nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
7951 nfs_open_file_clear_busy(nofp
);
7953 if (!error
&& isfinderinfo
&& empty
) { /* Setting an empty FinderInfo really means remove it */
7954 error
= nfs4_named_attr_remove(VTONFS(ap
->a_vp
), anp
, name
, ctx
);
7955 if (error
== ENOENT
)
7962 vnode_put(NFSTOV(anp
));
7963 if (error
== ENOENT
)
7969 nfs4_vnop_removexattr(
7970 struct vnop_removexattr_args
/* {
7971 struct vnodeop_desc *a_desc;
7973 const char * a_name;
7975 vfs_context_t a_context;
7978 struct nfsmount
*nmp
= VTONMP(ap
->a_vp
);
7981 if (nfs_mount_gone(nmp
))
7983 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
))
7986 error
= nfs4_named_attr_remove(VTONFS(ap
->a_vp
), NULL
, ap
->a_name
, ap
->a_context
);
7987 if (error
== ENOENT
)
7993 nfs4_vnop_listxattr(
7994 struct vnop_listxattr_args
/* {
7995 struct vnodeop_desc *a_desc;
8000 vfs_context_t a_context;
8003 vfs_context_t ctx
= ap
->a_context
;
8004 nfsnode_t np
= VTONFS(ap
->a_vp
);
8005 uio_t uio
= ap
->a_uio
;
8006 nfsnode_t adnp
= NULL
;
8007 struct nfsmount
*nmp
;
8009 struct nfs_vattr nvattr
;
8010 uint64_t cookie
, nextcookie
, lbn
= 0;
8011 struct nfsbuf
*bp
= NULL
;
8012 struct nfs_dir_buf_header
*ndbhp
;
8013 struct direntry
*dp
;
8015 nmp
= VTONMP(ap
->a_vp
);
8016 if (nfs_mount_gone(nmp
))
8019 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
))
8022 error
= nfs_getattr(np
, &nvattr
, ctx
, NGA_CACHED
);
8025 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
8026 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
))
8029 if ((error
= nfs_node_set_busy(np
, vfs_context_thread(ctx
))))
8031 adnp
= nfs4_named_attr_dir_get(np
, 1, ctx
);
8032 nfs_node_clear_busy(np
);
8036 if ((error
= nfs_node_lock(adnp
)))
8039 if (adnp
->n_flag
& NNEEDINVALIDATE
) {
8040 adnp
->n_flag
&= ~NNEEDINVALIDATE
;
8042 nfs_node_unlock(adnp
);
8043 error
= nfs_vinvalbuf(NFSTOV(adnp
), 0, ctx
, 1);
8045 error
= nfs_node_lock(adnp
);
8051 * check for need to invalidate when (re)starting at beginning
8053 if (adnp
->n_flag
& NMODIFIED
) {
8055 nfs_node_unlock(adnp
);
8056 if ((error
= nfs_vinvalbuf(NFSTOV(adnp
), 0, ctx
, 1)))
8059 nfs_node_unlock(adnp
);
8061 /* nfs_getattr() will check changed and purge caches */
8062 if ((error
= nfs_getattr(adnp
, &nvattr
, ctx
, NGA_UNCACHED
)))
8065 if (uio
&& (uio_resid(uio
) == 0))
8069 nextcookie
= lbn
= 0;
8071 while (!error
&& !done
) {
8072 OSAddAtomic64(1, &nfsstats
.biocache_readdirs
);
8073 cookie
= nextcookie
;
8075 error
= nfs_buf_get(adnp
, lbn
, NFS_DIRBLKSIZ
, vfs_context_thread(ctx
), NBLK_READ
, &bp
);
8078 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
8079 if (!ISSET(bp
->nb_flags
, NB_CACHE
) || !ISSET(ndbhp
->ndbh_flags
, NDB_FULL
)) {
8080 if (!ISSET(bp
->nb_flags
, NB_CACHE
)) { /* initialize the buffer */
8081 ndbhp
->ndbh_flags
= 0;
8082 ndbhp
->ndbh_count
= 0;
8083 ndbhp
->ndbh_entry_end
= sizeof(*ndbhp
);
8084 ndbhp
->ndbh_ncgen
= adnp
->n_ncgen
;
8086 error
= nfs_buf_readdir(bp
, ctx
);
8087 if (error
== NFSERR_DIRBUFDROPPED
)
8090 nfs_buf_release(bp
, 1);
8091 if (error
&& (error
!= ENXIO
) && (error
!= ETIMEDOUT
) && (error
!= EINTR
) && (error
!= ERESTART
)) {
8092 if (!nfs_node_lock(adnp
)) {
8094 nfs_node_unlock(adnp
);
8096 nfs_vinvalbuf(NFSTOV(adnp
), 0, ctx
, 1);
8097 if (error
== NFSERR_BAD_COOKIE
)
8104 /* go through all the entries copying/counting */
8105 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
8106 for (i
=0; i
< ndbhp
->ndbh_count
; i
++) {
8107 if (!xattr_protected(dp
->d_name
)) {
8109 *ap
->a_size
+= dp
->d_namlen
+ 1;
8110 } else if (uio_resid(uio
) < (dp
->d_namlen
+ 1)) {
8113 error
= uiomove(dp
->d_name
, dp
->d_namlen
+1, uio
);
8114 if (error
&& (error
!= EFAULT
))
8118 nextcookie
= dp
->d_seekoff
;
8119 dp
= NFS_DIRENTRY_NEXT(dp
);
8122 if (i
== ndbhp
->ndbh_count
) {
8123 /* hit end of buffer, move to next buffer */
8125 /* if we also hit EOF, we're done */
8126 if (ISSET(ndbhp
->ndbh_flags
, NDB_EOF
))
8129 if (!error
&& !done
&& (nextcookie
== cookie
)) {
8130 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie
, i
, ndbhp
->ndbh_count
);
8133 nfs_buf_release(bp
, 1);
8137 vnode_put(NFSTOV(adnp
));
8143 nfs4_vnop_getnamedstream(
8144 struct vnop_getnamedstream_args
/* {
8145 struct vnodeop_desc *a_desc;
8149 enum nsoperation a_operation;
8151 vfs_context_t a_context;
8154 vfs_context_t ctx
= ap
->a_context
;
8155 struct nfsmount
*nmp
;
8156 struct nfs_vattr nvattr
;
8157 struct componentname cn
;
8161 nmp
= VTONMP(ap
->a_vp
);
8162 if (nfs_mount_gone(nmp
))
8165 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
))
8167 error
= nfs_getattr(VTONFS(ap
->a_vp
), &nvattr
, ctx
, NGA_CACHED
);
8170 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
8171 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
))
8174 bzero(&cn
, sizeof(cn
));
8175 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(ap
->a_name
, const, char *);
8176 cn
.cn_namelen
= strlen(ap
->a_name
);
8177 cn
.cn_nameiop
= LOOKUP
;
8178 cn
.cn_flags
= MAKEENTRY
;
8180 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_NONE
,
8181 0, ctx
, &anp
, NULL
);
8182 if ((!error
&& !anp
) || (error
== ENOENT
))
8185 *ap
->a_svpp
= NFSTOV(anp
);
8187 vnode_put(NFSTOV(anp
));
8192 nfs4_vnop_makenamedstream(
8193 struct vnop_makenamedstream_args
/* {
8194 struct vnodeop_desc *a_desc;
8199 vfs_context_t a_context;
8202 vfs_context_t ctx
= ap
->a_context
;
8203 struct nfsmount
*nmp
;
8204 struct componentname cn
;
8208 nmp
= VTONMP(ap
->a_vp
);
8209 if (nfs_mount_gone(nmp
))
8212 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
))
8215 bzero(&cn
, sizeof(cn
));
8216 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(ap
->a_name
, const, char *);
8217 cn
.cn_namelen
= strlen(ap
->a_name
);
8218 cn
.cn_nameiop
= CREATE
;
8219 cn
.cn_flags
= MAKEENTRY
;
8221 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_BOTH
,
8222 NFS_GET_NAMED_ATTR_CREATE
, ctx
, &anp
, NULL
);
8223 if ((!error
&& !anp
) || (error
== ENOENT
))
8226 *ap
->a_svpp
= NFSTOV(anp
);
8228 vnode_put(NFSTOV(anp
));
8233 nfs4_vnop_removenamedstream(
8234 struct vnop_removenamedstream_args
/* {
8235 struct vnodeop_desc *a_desc;
8240 vfs_context_t a_context;
8243 struct nfsmount
*nmp
= VTONMP(ap
->a_vp
);
8244 nfsnode_t np
= ap
->a_vp
? VTONFS(ap
->a_vp
) : NULL
;
8245 nfsnode_t anp
= ap
->a_svp
? VTONFS(ap
->a_svp
) : NULL
;
8247 if (nfs_mount_gone(nmp
))
8251 * Given that a_svp is a named stream, checking for
8252 * named attribute support is kinda pointless.
8254 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
))
8257 return (nfs4_named_attr_remove(np
, anp
, ap
->a_name
, ap
->a_context
));