2 * Copyright (c) 2006-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * vnode op calls for NFS version 4
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/systm.h>
35 #include <sys/resourcevar.h>
36 #include <sys/proc_internal.h>
37 #include <sys/kauth.h>
38 #include <sys/mount_internal.h>
39 #include <sys/malloc.h>
40 #include <sys/kpi_mbuf.h>
42 #include <sys/vnode_internal.h>
43 #include <sys/dirent.h>
44 #include <sys/fcntl.h>
45 #include <sys/lockf.h>
46 #include <sys/ubc_internal.h>
48 #include <sys/signalvar.h>
49 #include <sys/uio_internal.h>
50 #include <sys/xattr.h>
51 #include <sys/paths.h>
53 #include <vfs/vfs_support.h>
58 #include <kern/clock.h>
59 #include <libkern/OSAtomic.h>
61 #include <miscfs/fifofs/fifo.h>
62 #include <miscfs/specfs/specdev.h>
64 #include <nfs/rpcv2.h>
65 #include <nfs/nfsproto.h>
67 #include <nfs/nfsnode.h>
68 #include <nfs/nfs_gss.h>
69 #include <nfs/nfsmount.h>
70 #include <nfs/nfs_lock.h>
71 #include <nfs/xdr_subs.h>
72 #include <nfs/nfsm_subs.h>
75 #include <netinet/in.h>
76 #include <netinet/in_var.h>
77 #include <vm/vm_kern.h>
79 #include <kern/task.h>
80 #include <kern/sched_prim.h>
84 nfs4_access_rpc(nfsnode_t np
, u_int32_t
*access
, int rpcflags
, vfs_context_t ctx
)
86 int error
= 0, lockerror
= ENOENT
, status
, numops
, slot
;
88 struct nfsm_chain nmreq
, nmrep
;
90 uint32_t access_result
= 0, supported
= 0, missing
;
91 struct nfsmount
*nmp
= NFSTONMP(np
);
92 int nfsvers
= nmp
->nm_vers
;
94 struct nfsreq_secinfo_args si
;
96 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
100 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
101 nfsm_chain_null(&nmreq
);
102 nfsm_chain_null(&nmrep
);
104 // PUTFH, ACCESS, GETATTR
106 nfsm_chain_build_alloc_init(error
, &nmreq
, 17 * NFSX_UNSIGNED
);
107 nfsm_chain_add_compound_header(error
, &nmreq
, "access", nmp
->nm_minor_vers
, numops
);
109 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
110 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
112 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_ACCESS
);
113 nfsm_chain_add_32(error
, &nmreq
, *access
);
115 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
116 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
117 nfsm_chain_build_done(error
, &nmreq
);
118 nfsm_assert(error
, (numops
== 0), EPROTO
);
120 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
121 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
122 &si
, rpcflags
, &nmrep
, &xid
, &status
);
124 if ((lockerror
= nfs_node_lock(np
))) {
127 nfsm_chain_skip_tag(error
, &nmrep
);
128 nfsm_chain_get_32(error
, &nmrep
, numops
);
129 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
130 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_ACCESS
);
131 nfsm_chain_get_32(error
, &nmrep
, supported
);
132 nfsm_chain_get_32(error
, &nmrep
, access_result
);
134 if ((missing
= (*access
& ~supported
))) {
135 /* missing support for something(s) we wanted */
136 if (missing
& NFS_ACCESS_DELETE
) {
138 * If the server doesn't report DELETE (possible
139 * on UNIX systems), we'll assume that it is OK
140 * and just let any subsequent delete action fail
141 * if it really isn't deletable.
143 access_result
|= NFS_ACCESS_DELETE
;
146 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
147 if (nfs_access_dotzfs
) {
148 vnode_t dvp
= NULLVP
;
149 if (np
->n_flag
& NISDOTZFSCHILD
) { /* may be able to create/delete snapshot dirs */
150 access_result
|= (NFS_ACCESS_MODIFY
| NFS_ACCESS_EXTEND
| NFS_ACCESS_DELETE
);
151 } else if (((dvp
= vnode_getparent(NFSTOV(np
))) != NULLVP
) && (VTONFS(dvp
)->n_flag
& NISDOTZFSCHILD
)) {
152 access_result
|= NFS_ACCESS_DELETE
; /* may be able to delete snapshot dirs */
158 /* Some servers report DELETE support but erroneously give a denied answer. */
159 if (nfs_access_delete
&& (*access
& NFS_ACCESS_DELETE
) && !(access_result
& NFS_ACCESS_DELETE
)) {
160 access_result
|= NFS_ACCESS_DELETE
;
162 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
163 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
166 if (nfs_mount_gone(nmp
)) {
171 if (auth_is_kerberized(np
->n_auth
) || auth_is_kerberized(nmp
->nm_auth
)) {
172 uid
= nfs_cred_getasid2uid(vfs_context_ucred(ctx
));
174 uid
= kauth_cred_getuid(vfs_context_ucred(ctx
));
176 slot
= nfs_node_access_slot(np
, uid
, 1);
177 np
->n_accessuid
[slot
] = uid
;
179 np
->n_accessstamp
[slot
] = now
.tv_sec
;
180 np
->n_access
[slot
] = access_result
;
182 /* pass back the access returned with this request */
183 *access
= np
->n_access
[slot
];
188 nfsm_chain_cleanup(&nmreq
);
189 nfsm_chain_cleanup(&nmrep
);
201 struct nfs_vattr
*nvap
,
204 struct nfsmount
*nmp
= mp
? VFSTONFS(mp
) : NFSTONMP(np
);
205 int error
= 0, status
, nfsvers
, numops
, rpcflags
= 0, acls
;
206 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
207 struct nfsm_chain nmreq
, nmrep
;
208 struct nfsreq_secinfo_args si
;
210 if (nfs_mount_gone(nmp
)) {
213 nfsvers
= nmp
->nm_vers
;
214 acls
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_ACL
);
216 if (np
&& (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)) {
217 nfs4_default_attrs_for_referral_trigger(VTONFS(np
->n_parent
), NULL
, 0, nvap
, NULL
);
221 if (flags
& NGA_MONITOR
) { /* vnode monitor requests should be soft */
222 rpcflags
= R_RECOVER
;
225 if (flags
& NGA_SOFT
) { /* Return ETIMEDOUT if server not responding */
229 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
230 nfsm_chain_null(&nmreq
);
231 nfsm_chain_null(&nmrep
);
235 nfsm_chain_build_alloc_init(error
, &nmreq
, 15 * NFSX_UNSIGNED
);
236 nfsm_chain_add_compound_header(error
, &nmreq
, "getattr", nmp
->nm_minor_vers
, numops
);
238 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
239 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, fhp
, fhsize
);
241 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
242 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
243 if ((flags
& NGA_ACL
) && acls
) {
244 NFS_BITMAP_SET(bitmap
, NFS_FATTR_ACL
);
246 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
247 nfsm_chain_build_done(error
, &nmreq
);
248 nfsm_assert(error
, (numops
== 0), EPROTO
);
250 error
= nfs_request2(np
, mp
, &nmreq
, NFSPROC4_COMPOUND
,
251 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
252 NULL
, rpcflags
, &nmrep
, xidp
, &status
);
254 nfsm_chain_skip_tag(error
, &nmrep
);
255 nfsm_chain_get_32(error
, &nmrep
, numops
);
256 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
257 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
259 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
, NULL
);
261 if ((flags
& NGA_ACL
) && acls
&& !NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_ACL
)) {
262 /* we asked for the ACL but didn't get one... assume there isn't one */
263 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_ACL
);
264 nvap
->nva_acl
= NULL
;
267 nfsm_chain_cleanup(&nmreq
);
268 nfsm_chain_cleanup(&nmrep
);
273 nfs4_readlink_rpc(nfsnode_t np
, char *buf
, uint32_t *buflenp
, vfs_context_t ctx
)
275 struct nfsmount
*nmp
;
276 int error
= 0, lockerror
= ENOENT
, status
, numops
;
279 struct nfsm_chain nmreq
, nmrep
;
280 struct nfsreq_secinfo_args si
;
283 if (nfs_mount_gone(nmp
)) {
286 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
289 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
290 nfsm_chain_null(&nmreq
);
291 nfsm_chain_null(&nmrep
);
293 // PUTFH, GETATTR, READLINK
295 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
296 nfsm_chain_add_compound_header(error
, &nmreq
, "readlink", nmp
->nm_minor_vers
, numops
);
298 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
299 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
301 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
302 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
304 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READLINK
);
305 nfsm_chain_build_done(error
, &nmreq
);
306 nfsm_assert(error
, (numops
== 0), EPROTO
);
308 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
310 if ((lockerror
= nfs_node_lock(np
))) {
313 nfsm_chain_skip_tag(error
, &nmrep
);
314 nfsm_chain_get_32(error
, &nmrep
, numops
);
315 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
316 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
317 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
318 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READLINK
);
319 nfsm_chain_get_32(error
, &nmrep
, len
);
321 if (len
>= *buflenp
) {
322 if (np
->n_size
&& (np
->n_size
< *buflenp
)) {
328 nfsm_chain_get_opaque(error
, &nmrep
, len
, buf
);
336 nfsm_chain_cleanup(&nmreq
);
337 nfsm_chain_cleanup(&nmrep
);
348 struct nfsreq_cbinfo
*cb
,
349 struct nfsreq
**reqp
)
351 struct nfsmount
*nmp
;
352 int error
= 0, nfsvers
, numops
;
354 struct nfsm_chain nmreq
;
355 struct nfsreq_secinfo_args si
;
358 if (nfs_mount_gone(nmp
)) {
361 nfsvers
= nmp
->nm_vers
;
362 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
366 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
367 nfsm_chain_null(&nmreq
);
369 // PUTFH, READ, GETATTR
371 nfsm_chain_build_alloc_init(error
, &nmreq
, 22 * NFSX_UNSIGNED
);
372 nfsm_chain_add_compound_header(error
, &nmreq
, "read", nmp
->nm_minor_vers
, numops
);
374 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
375 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
377 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READ
);
378 nfs_get_stateid(np
, thd
, cred
, &stateid
);
379 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
380 nfsm_chain_add_64(error
, &nmreq
, offset
);
381 nfsm_chain_add_32(error
, &nmreq
, len
);
383 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
384 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
385 nfsm_chain_build_done(error
, &nmreq
);
386 nfsm_assert(error
, (numops
== 0), EPROTO
);
388 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, 0, cb
, reqp
);
390 nfsm_chain_cleanup(&nmreq
);
395 nfs4_read_rpc_async_finish(
402 struct nfsmount
*nmp
;
403 int error
= 0, lockerror
, nfsvers
, numops
, status
, eof
= 0;
406 struct nfsm_chain nmrep
;
409 if (nfs_mount_gone(nmp
)) {
410 nfs_request_async_cancel(req
);
413 nfsvers
= nmp
->nm_vers
;
415 nfsm_chain_null(&nmrep
);
417 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
418 if (error
== EINPROGRESS
) { /* async request restarted */
422 if ((lockerror
= nfs_node_lock(np
))) {
425 nfsm_chain_skip_tag(error
, &nmrep
);
426 nfsm_chain_get_32(error
, &nmrep
, numops
);
427 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
428 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READ
);
429 nfsm_chain_get_32(error
, &nmrep
, eof
);
430 nfsm_chain_get_32(error
, &nmrep
, retlen
);
432 *lenp
= MIN(retlen
, *lenp
);
433 error
= nfsm_chain_get_uio(&nmrep
, *lenp
, uio
);
435 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
436 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
441 if (!eof
&& !retlen
) {
446 nfsm_chain_cleanup(&nmrep
);
447 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) {
448 microuptime(&np
->n_lastio
);
454 nfs4_write_rpc_async(
461 struct nfsreq_cbinfo
*cb
,
462 struct nfsreq
**reqp
)
464 struct nfsmount
*nmp
;
466 int error
= 0, nfsvers
, numops
;
468 struct nfsm_chain nmreq
;
469 struct nfsreq_secinfo_args si
;
472 if (nfs_mount_gone(nmp
)) {
475 nfsvers
= nmp
->nm_vers
;
476 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
480 /* for async mounts, don't bother sending sync write requests */
481 if ((iomode
!= NFS_WRITE_UNSTABLE
) && nfs_allow_async
&&
482 ((mp
= NFSTOMP(np
))) && (vfs_flags(mp
) & MNT_ASYNC
)) {
483 iomode
= NFS_WRITE_UNSTABLE
;
486 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
487 nfsm_chain_null(&nmreq
);
489 // PUTFH, WRITE, GETATTR
491 nfsm_chain_build_alloc_init(error
, &nmreq
, 25 * NFSX_UNSIGNED
+ len
);
492 nfsm_chain_add_compound_header(error
, &nmreq
, "write", nmp
->nm_minor_vers
, numops
);
494 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
495 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
497 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_WRITE
);
498 nfs_get_stateid(np
, thd
, cred
, &stateid
);
499 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
500 nfsm_chain_add_64(error
, &nmreq
, uio_offset(uio
));
501 nfsm_chain_add_32(error
, &nmreq
, iomode
);
502 nfsm_chain_add_32(error
, &nmreq
, len
);
504 error
= nfsm_chain_add_uio(&nmreq
, uio
, len
);
507 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
508 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
509 nfsm_chain_build_done(error
, &nmreq
);
510 nfsm_assert(error
, (numops
== 0), EPROTO
);
513 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, 0, cb
, reqp
);
515 nfsm_chain_cleanup(&nmreq
);
520 nfs4_write_rpc_async_finish(
527 struct nfsmount
*nmp
;
528 int error
= 0, lockerror
= ENOENT
, nfsvers
, numops
, status
;
529 int committed
= NFS_WRITE_FILESYNC
;
531 u_int64_t xid
, wverf
;
533 struct nfsm_chain nmrep
;
536 if (nfs_mount_gone(nmp
)) {
537 nfs_request_async_cancel(req
);
540 nfsvers
= nmp
->nm_vers
;
542 nfsm_chain_null(&nmrep
);
544 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
545 if (error
== EINPROGRESS
) { /* async request restarted */
549 if (nfs_mount_gone(nmp
)) {
552 if (!error
&& (lockerror
= nfs_node_lock(np
))) {
555 nfsm_chain_skip_tag(error
, &nmrep
);
556 nfsm_chain_get_32(error
, &nmrep
, numops
);
557 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
558 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_WRITE
);
559 nfsm_chain_get_32(error
, &nmrep
, rlen
);
565 nfsm_chain_get_32(error
, &nmrep
, committed
);
566 nfsm_chain_get_64(error
, &nmrep
, wverf
);
571 lck_mtx_lock(&nmp
->nm_lock
);
572 if (!(nmp
->nm_state
& NFSSTA_HASWRITEVERF
)) {
573 nmp
->nm_verf
= wverf
;
574 nmp
->nm_state
|= NFSSTA_HASWRITEVERF
;
575 } else if (nmp
->nm_verf
!= wverf
) {
576 nmp
->nm_verf
= wverf
;
578 lck_mtx_unlock(&nmp
->nm_lock
);
579 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
580 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
585 nfsm_chain_cleanup(&nmrep
);
586 if ((committed
!= NFS_WRITE_FILESYNC
) && nfs_allow_async
&&
587 ((mp
= NFSTOMP(np
))) && (vfs_flags(mp
) & MNT_ASYNC
)) {
588 committed
= NFS_WRITE_FILESYNC
;
590 *iomodep
= committed
;
591 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) {
592 microuptime(&np
->n_lastio
);
605 int error
= 0, lockerror
= ENOENT
, remove_error
= 0, status
;
606 struct nfsmount
*nmp
;
609 struct nfsm_chain nmreq
, nmrep
;
610 struct nfsreq_secinfo_args si
;
613 if (nfs_mount_gone(nmp
)) {
616 nfsvers
= nmp
->nm_vers
;
617 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
620 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
622 nfsm_chain_null(&nmreq
);
623 nfsm_chain_null(&nmrep
);
625 // PUTFH, REMOVE, GETATTR
627 nfsm_chain_build_alloc_init(error
, &nmreq
, 17 * NFSX_UNSIGNED
+ namelen
);
628 nfsm_chain_add_compound_header(error
, &nmreq
, "remove", nmp
->nm_minor_vers
, numops
);
630 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
631 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
633 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_REMOVE
);
634 nfsm_chain_add_name(error
, &nmreq
, name
, namelen
, nmp
);
636 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
637 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
638 nfsm_chain_build_done(error
, &nmreq
);
639 nfsm_assert(error
, (numops
== 0), EPROTO
);
642 error
= nfs_request2(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, 0, &nmrep
, &xid
, &status
);
644 if ((lockerror
= nfs_node_lock(dnp
))) {
647 nfsm_chain_skip_tag(error
, &nmrep
);
648 nfsm_chain_get_32(error
, &nmrep
, numops
);
649 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
650 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_REMOVE
);
651 remove_error
= error
;
652 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
653 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
654 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
655 if (error
&& !lockerror
) {
656 NATTRINVALIDATE(dnp
);
659 nfsm_chain_cleanup(&nmreq
);
660 nfsm_chain_cleanup(&nmrep
);
663 dnp
->n_flag
|= NMODIFIED
;
664 nfs_node_unlock(dnp
);
666 if (error
== NFSERR_GRACE
) {
667 tsleep(&nmp
->nm_state
, (PZERO
- 1), "nfsgrace", 2 * hz
);
684 int error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
;
685 struct nfsmount
*nmp
;
686 u_int64_t xid
, savedxid
;
687 struct nfsm_chain nmreq
, nmrep
;
688 struct nfsreq_secinfo_args si
;
690 nmp
= NFSTONMP(fdnp
);
691 if (nfs_mount_gone(nmp
)) {
694 nfsvers
= nmp
->nm_vers
;
695 if (fdnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
698 if (tdnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
702 NFSREQ_SECINFO_SET(&si
, fdnp
, NULL
, 0, NULL
, 0);
703 nfsm_chain_null(&nmreq
);
704 nfsm_chain_null(&nmrep
);
706 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
708 nfsm_chain_build_alloc_init(error
, &nmreq
, 30 * NFSX_UNSIGNED
+ fnamelen
+ tnamelen
);
709 nfsm_chain_add_compound_header(error
, &nmreq
, "rename", nmp
->nm_minor_vers
, numops
);
711 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
712 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, fdnp
->n_fhp
, fdnp
->n_fhsize
);
714 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
716 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
717 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, tdnp
->n_fhp
, tdnp
->n_fhsize
);
719 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RENAME
);
720 nfsm_chain_add_name(error
, &nmreq
, fnameptr
, fnamelen
, nmp
);
721 nfsm_chain_add_name(error
, &nmreq
, tnameptr
, tnamelen
, nmp
);
723 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
724 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, tdnp
);
726 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
728 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
729 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, fdnp
);
730 nfsm_chain_build_done(error
, &nmreq
);
731 nfsm_assert(error
, (numops
== 0), EPROTO
);
734 error
= nfs_request(fdnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
736 if ((lockerror
= nfs_node_lock2(fdnp
, tdnp
))) {
739 nfsm_chain_skip_tag(error
, &nmrep
);
740 nfsm_chain_get_32(error
, &nmrep
, numops
);
741 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
742 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
743 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
744 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RENAME
);
745 nfsm_chain_check_change_info(error
, &nmrep
, fdnp
);
746 nfsm_chain_check_change_info(error
, &nmrep
, tdnp
);
747 /* directory attributes: if we don't get them, make sure to invalidate */
748 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
750 nfsm_chain_loadattr(error
, &nmrep
, tdnp
, nfsvers
, &xid
);
751 if (error
&& !lockerror
) {
752 NATTRINVALIDATE(tdnp
);
754 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
755 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
757 nfsm_chain_loadattr(error
, &nmrep
, fdnp
, nfsvers
, &xid
);
758 if (error
&& !lockerror
) {
759 NATTRINVALIDATE(fdnp
);
762 nfsm_chain_cleanup(&nmreq
);
763 nfsm_chain_cleanup(&nmrep
);
765 fdnp
->n_flag
|= NMODIFIED
;
766 tdnp
->n_flag
|= NMODIFIED
;
767 nfs_node_unlock2(fdnp
, tdnp
);
773 * NFS V4 readdir RPC.
776 nfs4_readdir_rpc(nfsnode_t dnp
, struct nfsbuf
*bp
, vfs_context_t ctx
)
778 struct nfsmount
*nmp
;
779 int error
= 0, lockerror
, nfsvers
, namedattr
, rdirplus
, bigcookies
, numops
;
780 int i
, status
, more_entries
= 1, eof
, bp_dropped
= 0;
781 uint32_t nmreaddirsize
, nmrsize
;
782 uint32_t namlen
, skiplen
, fhlen
, xlen
, attrlen
, reclen
, space_free
, space_needed
;
783 uint64_t cookie
, lastcookie
, xid
, savedxid
;
784 struct nfsm_chain nmreq
, nmrep
, nmrepsave
;
786 struct nfs_vattr nvattr
, *nvattrp
;
787 struct nfs_dir_buf_header
*ndbhp
;
789 char *padstart
, padlen
;
791 uint32_t entry_attrs
[NFS_ATTR_BITMAP_LEN
];
793 struct nfsreq_secinfo_args si
;
796 if (nfs_mount_gone(nmp
)) {
799 nfsvers
= nmp
->nm_vers
;
800 nmreaddirsize
= nmp
->nm_readdirsize
;
801 nmrsize
= nmp
->nm_rsize
;
802 bigcookies
= nmp
->nm_state
& NFSSTA_BIGCOOKIES
;
803 namedattr
= (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) ? 1 : 0;
804 rdirplus
= (NMFLAG(nmp
, RDIRPLUS
) || namedattr
) ? 1 : 0;
805 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
808 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
811 * Set up attribute request for entries.
812 * For READDIRPLUS functionality, get everything.
813 * Otherwise, just get what we need for struct direntry.
817 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, entry_attrs
);
818 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_FILEHANDLE
);
821 NFS_CLEAR_ATTRIBUTES(entry_attrs
);
822 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_TYPE
);
823 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_FILEID
);
824 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_MOUNTED_ON_FILEID
);
826 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_RDATTR_ERROR
);
828 /* lock to protect access to cookie verifier */
829 if ((lockerror
= nfs_node_lock(dnp
))) {
833 /* determine cookie to use, and move dp to the right offset */
834 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
835 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
836 if (ndbhp
->ndbh_count
) {
837 for (i
= 0; i
< ndbhp
->ndbh_count
- 1; i
++) {
838 dp
= NFS_DIRENTRY_NEXT(dp
);
840 cookie
= dp
->d_seekoff
;
841 dp
= NFS_DIRENTRY_NEXT(dp
);
843 cookie
= bp
->nb_lblkno
;
844 /* increment with every buffer read */
845 OSAddAtomic64(1, &nfsstats
.readdir_bios
);
850 * The NFS client is responsible for the "." and ".." entries in the
851 * directory. So, we put them at the start of the first buffer.
852 * Don't bother for attribute directories.
854 if (((bp
->nb_lblkno
== 0) && (ndbhp
->ndbh_count
== 0)) &&
855 !(dnp
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
857 fhlen
= rdirplus
? fh
.fh_len
+ 1 : 0;
858 xlen
= rdirplus
? (fhlen
+ sizeof(time_t)) : 0;
861 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
863 bzero(&dp
->d_name
[namlen
+ 1], xlen
);
865 dp
->d_namlen
= namlen
;
866 strlcpy(dp
->d_name
, ".", namlen
+ 1);
867 dp
->d_fileno
= dnp
->n_vattr
.nva_fileid
;
869 dp
->d_reclen
= reclen
;
871 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
872 dp
= NFS_DIRENTRY_NEXT(dp
);
873 padlen
= (char*)dp
- padstart
;
875 bzero(padstart
, padlen
);
877 if (rdirplus
) { /* zero out attributes */
878 bzero(NFS_DIR_BUF_NVATTR(bp
, 0), sizeof(struct nfs_vattr
));
883 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
885 bzero(&dp
->d_name
[namlen
+ 1], xlen
);
887 dp
->d_namlen
= namlen
;
888 strlcpy(dp
->d_name
, "..", namlen
+ 1);
890 dp
->d_fileno
= VTONFS(dnp
->n_parent
)->n_vattr
.nva_fileid
;
892 dp
->d_fileno
= dnp
->n_vattr
.nva_fileid
;
895 dp
->d_reclen
= reclen
;
897 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
898 dp
= NFS_DIRENTRY_NEXT(dp
);
899 padlen
= (char*)dp
- padstart
;
901 bzero(padstart
, padlen
);
903 if (rdirplus
) { /* zero out attributes */
904 bzero(NFS_DIR_BUF_NVATTR(bp
, 1), sizeof(struct nfs_vattr
));
907 ndbhp
->ndbh_entry_end
= (char*)dp
- bp
->nb_data
;
908 ndbhp
->ndbh_count
= 2;
912 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
913 * the buffer is full (or we hit EOF). Then put the remainder of the
914 * results in the next buffer(s).
916 nfsm_chain_null(&nmreq
);
917 nfsm_chain_null(&nmrep
);
918 while (nfs_dir_buf_freespace(bp
, rdirplus
) && !(ndbhp
->ndbh_flags
& NDB_FULL
)) {
919 // PUTFH, GETATTR, READDIR
921 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
922 nfsm_chain_add_compound_header(error
, &nmreq
, tag
, nmp
->nm_minor_vers
, numops
);
924 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
925 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
927 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
928 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
930 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READDIR
);
931 nfsm_chain_add_64(error
, &nmreq
, (cookie
<= 2) ? 0 : cookie
);
932 nfsm_chain_add_64(error
, &nmreq
, dnp
->n_cookieverf
);
933 nfsm_chain_add_32(error
, &nmreq
, nmreaddirsize
);
934 nfsm_chain_add_32(error
, &nmreq
, nmrsize
);
935 nfsm_chain_add_bitmap_supported(error
, &nmreq
, entry_attrs
, nmp
, dnp
);
936 nfsm_chain_build_done(error
, &nmreq
);
937 nfsm_assert(error
, (numops
== 0), EPROTO
);
938 nfs_node_unlock(dnp
);
940 error
= nfs_request(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
942 if ((lockerror
= nfs_node_lock(dnp
))) {
947 nfsm_chain_skip_tag(error
, &nmrep
);
948 nfsm_chain_get_32(error
, &nmrep
, numops
);
949 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
950 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
951 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
952 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READDIR
);
953 nfsm_chain_get_64(error
, &nmrep
, dnp
->n_cookieverf
);
954 nfsm_chain_get_32(error
, &nmrep
, more_entries
);
957 nfs_node_unlock(dnp
);
966 /* loop through the entries packing them into the buffer */
967 while (more_entries
) {
968 /* Entry: COOKIE, NAME, FATTR */
969 nfsm_chain_get_64(error
, &nmrep
, cookie
);
970 nfsm_chain_get_32(error
, &nmrep
, namlen
);
972 if (!bigcookies
&& (cookie
>> 32) && (nmp
== NFSTONMP(dnp
))) {
973 /* we've got a big cookie, make sure flag is set */
974 lck_mtx_lock(&nmp
->nm_lock
);
975 nmp
->nm_state
|= NFSSTA_BIGCOOKIES
;
976 lck_mtx_unlock(&nmp
->nm_lock
);
979 /* just truncate names that don't fit in direntry.d_name */
984 if (namlen
> (sizeof(dp
->d_name
) - 1)) {
985 skiplen
= namlen
- sizeof(dp
->d_name
) + 1;
986 namlen
= sizeof(dp
->d_name
) - 1;
990 /* guess that fh size will be same as parent */
991 fhlen
= rdirplus
? (1 + dnp
->n_fhsize
) : 0;
992 xlen
= rdirplus
? (fhlen
+ sizeof(time_t)) : 0;
993 attrlen
= rdirplus
? sizeof(struct nfs_vattr
) : 0;
994 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
995 space_needed
= reclen
+ attrlen
;
996 space_free
= nfs_dir_buf_freespace(bp
, rdirplus
);
997 if (space_needed
> space_free
) {
999 * We still have entries to pack, but we've
1000 * run out of room in the current buffer.
1001 * So we need to move to the next buffer.
1002 * The block# for the next buffer is the
1003 * last cookie in the current buffer.
1006 ndbhp
->ndbh_flags
|= NDB_FULL
;
1007 nfs_buf_release(bp
, 0);
1010 error
= nfs_buf_get(dnp
, lastcookie
, NFS_DIRBLKSIZ
, vfs_context_thread(ctx
), NBLK_READ
, &bp
);
1012 /* initialize buffer */
1013 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
1014 ndbhp
->ndbh_flags
= 0;
1015 ndbhp
->ndbh_count
= 0;
1016 ndbhp
->ndbh_entry_end
= sizeof(*ndbhp
);
1017 ndbhp
->ndbh_ncgen
= dnp
->n_ncgen
;
1018 space_free
= nfs_dir_buf_freespace(bp
, rdirplus
);
1019 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
1020 /* increment with every buffer read */
1021 OSAddAtomic64(1, &nfsstats
.readdir_bios
);
1024 dp
->d_fileno
= cookie
; /* placeholder */
1025 dp
->d_seekoff
= cookie
;
1026 dp
->d_namlen
= namlen
;
1027 dp
->d_reclen
= reclen
;
1028 dp
->d_type
= DT_UNKNOWN
;
1029 nfsm_chain_get_opaque(error
, &nmrep
, namlen
, dp
->d_name
);
1031 dp
->d_name
[namlen
] = '\0';
1033 nfsm_chain_adv(error
, &nmrep
,
1034 nfsm_rndup(namlen
+ skiplen
) - nfsm_rndup(namlen
));
1037 nvattrp
= rdirplus
? NFS_DIR_BUF_NVATTR(bp
, ndbhp
->ndbh_count
) : &nvattr
;
1038 error
= nfs4_parsefattr(&nmrep
, NULL
, nvattrp
, &fh
, NULL
, NULL
);
1039 if (!error
&& NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_ACL
)) {
1040 /* we do NOT want ACLs returned to us here */
1041 NFS_BITMAP_CLR(nvattrp
->nva_bitmap
, NFS_FATTR_ACL
);
1042 if (nvattrp
->nva_acl
) {
1043 kauth_acl_free(nvattrp
->nva_acl
);
1044 nvattrp
->nva_acl
= NULL
;
1047 if (error
&& NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_RDATTR_ERROR
)) {
1048 /* OK, we may not have gotten all of the attributes but we will use what we can. */
1049 if ((error
== NFSERR_MOVED
) || (error
== NFSERR_INVAL
)) {
1050 /* set this up to look like a referral trigger */
1051 nfs4_default_attrs_for_referral_trigger(dnp
, dp
->d_name
, namlen
, nvattrp
, &fh
);
1055 /* check for more entries after this one */
1056 nfsm_chain_get_32(error
, &nmrep
, more_entries
);
1059 /* Skip any "." and ".." entries returned from server. */
1060 /* Also skip any bothersome named attribute entries. */
1061 if (((dp
->d_name
[0] == '.') && ((namlen
== 1) || ((namlen
== 2) && (dp
->d_name
[1] == '.')))) ||
1062 (namedattr
&& (namlen
== 11) && (!strcmp(dp
->d_name
, "SUNWattr_ro") || !strcmp(dp
->d_name
, "SUNWattr_rw")))) {
1063 lastcookie
= cookie
;
1067 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_TYPE
)) {
1068 dp
->d_type
= IFTODT(VTTOIF(nvattrp
->nva_type
));
1070 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_FILEID
)) {
1071 dp
->d_fileno
= nvattrp
->nva_fileid
;
1074 /* fileid is already in d_fileno, so stash xid in attrs */
1075 nvattrp
->nva_fileid
= savedxid
;
1076 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
1077 fhlen
= fh
.fh_len
+ 1;
1078 xlen
= fhlen
+ sizeof(time_t);
1079 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
1080 space_needed
= reclen
+ attrlen
;
1081 if (space_needed
> space_free
) {
1082 /* didn't actually have the room... move on to next buffer */
1086 /* pack the file handle into the record */
1087 dp
->d_name
[dp
->d_namlen
+ 1] = fh
.fh_len
;
1088 bcopy(fh
.fh_data
, &dp
->d_name
[dp
->d_namlen
+ 2], fh
.fh_len
);
1090 /* mark the file handle invalid */
1092 fhlen
= fh
.fh_len
+ 1;
1093 xlen
= fhlen
+ sizeof(time_t);
1094 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
1095 bzero(&dp
->d_name
[dp
->d_namlen
+ 1], fhlen
);
1097 *(time_t*)(&dp
->d_name
[dp
->d_namlen
+ 1 + fhlen
]) = now
.tv_sec
;
1098 dp
->d_reclen
= reclen
;
1100 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
1101 ndbhp
->ndbh_count
++;
1102 lastcookie
= cookie
;
1104 /* advance to next direntry in buffer */
1105 dp
= NFS_DIRENTRY_NEXT(dp
);
1106 ndbhp
->ndbh_entry_end
= (char*)dp
- bp
->nb_data
;
1107 /* zero out the pad bytes */
1108 padlen
= (char*)dp
- padstart
;
1110 bzero(padstart
, padlen
);
1113 /* Finally, get the eof boolean */
1114 nfsm_chain_get_32(error
, &nmrep
, eof
);
1117 ndbhp
->ndbh_flags
|= (NDB_FULL
| NDB_EOF
);
1118 nfs_node_lock_force(dnp
);
1119 dnp
->n_eofcookie
= lastcookie
;
1120 nfs_node_unlock(dnp
);
1125 nfs_buf_release(bp
, 0);
1129 if ((lockerror
= nfs_node_lock(dnp
))) {
1133 nfsm_chain_cleanup(&nmrep
);
1134 nfsm_chain_null(&nmreq
);
1137 if (bp_dropped
&& bp
) {
1138 nfs_buf_release(bp
, 0);
1141 nfs_node_unlock(dnp
);
1143 nfsm_chain_cleanup(&nmreq
);
1144 nfsm_chain_cleanup(&nmrep
);
1145 return bp_dropped
? NFSERR_DIRBUFDROPPED
: error
;
1149 nfs4_lookup_rpc_async(
1154 struct nfsreq
**reqp
)
1156 int error
= 0, isdotdot
= 0, nfsvers
, numops
;
1157 struct nfsm_chain nmreq
;
1158 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
1159 struct nfsmount
*nmp
;
1160 struct nfsreq_secinfo_args si
;
1162 nmp
= NFSTONMP(dnp
);
1163 if (nfs_mount_gone(nmp
)) {
1166 nfsvers
= nmp
->nm_vers
;
1167 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
1171 if ((name
[0] == '.') && (name
[1] == '.') && (namelen
== 2)) {
1173 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
1175 NFSREQ_SECINFO_SET(&si
, dnp
, dnp
->n_fhp
, dnp
->n_fhsize
, name
, namelen
);
1178 nfsm_chain_null(&nmreq
);
1180 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1182 nfsm_chain_build_alloc_init(error
, &nmreq
, 20 * NFSX_UNSIGNED
+ namelen
);
1183 nfsm_chain_add_compound_header(error
, &nmreq
, "lookup", nmp
->nm_minor_vers
, numops
);
1185 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1186 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
1188 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1189 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
1192 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUPP
);
1194 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUP
);
1195 nfsm_chain_add_name(error
, &nmreq
, name
, namelen
, nmp
);
1198 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETFH
);
1200 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1201 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
1202 /* some ".zfs" directories can't handle being asked for some attributes */
1203 if ((dnp
->n_flag
& NISDOTZFS
) && !isdotdot
) {
1204 NFS_BITMAP_CLR(bitmap
, NFS_FATTR_NAMED_ATTR
);
1206 if ((dnp
->n_flag
& NISDOTZFSCHILD
) && isdotdot
) {
1207 NFS_BITMAP_CLR(bitmap
, NFS_FATTR_NAMED_ATTR
);
1209 if (((namelen
== 4) && (name
[0] == '.') && (name
[1] == 'z') && (name
[2] == 'f') && (name
[3] == 's'))) {
1210 NFS_BITMAP_CLR(bitmap
, NFS_FATTR_NAMED_ATTR
);
1212 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, NULL
);
1213 nfsm_chain_build_done(error
, &nmreq
);
1214 nfsm_assert(error
, (numops
== 0), EPROTO
);
1216 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
1217 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, 0, NULL
, reqp
);
1219 nfsm_chain_cleanup(&nmreq
);
1225 nfs4_lookup_rpc_async_finish(
1233 struct nfs_vattr
*nvap
)
1235 int error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
, isdotdot
= 0;
1236 uint32_t op
= NFS_OP_LOOKUP
;
1238 struct nfsmount
*nmp
;
1239 struct nfsm_chain nmrep
;
1241 nmp
= NFSTONMP(dnp
);
1245 nfsvers
= nmp
->nm_vers
;
1246 if ((name
[0] == '.') && (name
[1] == '.') && (namelen
== 2)) {
1250 nfsm_chain_null(&nmrep
);
1252 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
1254 if ((lockerror
= nfs_node_lock(dnp
))) {
1257 nfsm_chain_skip_tag(error
, &nmrep
);
1258 nfsm_chain_get_32(error
, &nmrep
, numops
);
1259 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1260 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1264 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
1266 nfsm_chain_op_check(error
, &nmrep
, (isdotdot
? NFS_OP_LOOKUPP
: NFS_OP_LOOKUP
));
1267 nfsmout_if(error
|| !fhp
|| !nvap
);
1268 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETFH
);
1269 nfsm_chain_get_32(error
, &nmrep
, fhp
->fh_len
);
1270 if (error
== 0 && fhp
->fh_len
> sizeof(fhp
->fh_data
)) {
1274 nfsm_chain_get_opaque(error
, &nmrep
, fhp
->fh_len
, fhp
->fh_data
);
1275 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1276 if ((error
== NFSERR_MOVED
) || (error
== NFSERR_INVAL
)) {
1277 /* set this up to look like a referral trigger */
1278 nfs4_default_attrs_for_referral_trigger(dnp
, name
, namelen
, nvap
, fhp
);
1282 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
, NULL
);
1286 nfs_node_unlock(dnp
);
1288 nfsm_chain_cleanup(&nmrep
);
1289 if (!error
&& (op
== NFS_OP_LOOKUP
) && (nmp
->nm_state
& NFSSTA_NEEDSECINFO
)) {
1290 /* We still need to get SECINFO to set default for mount. */
1291 /* Do so for the first LOOKUP that returns successfully. */
1294 sec
.count
= NX_MAX_SEC_FLAVORS
;
1295 error
= nfs4_secinfo_rpc(nmp
, &req
->r_secinfo
, vfs_context_ucred(ctx
), sec
.flavors
, &sec
.count
);
1296 /* [sigh] some implementations return "illegal" error for unsupported ops */
1297 if (error
== NFSERR_OP_ILLEGAL
) {
1301 /* set our default security flavor to the first in the list */
1302 lck_mtx_lock(&nmp
->nm_lock
);
1304 nmp
->nm_auth
= sec
.flavors
[0];
1306 nmp
->nm_state
&= ~NFSSTA_NEEDSECINFO
;
1307 lck_mtx_unlock(&nmp
->nm_lock
);
1321 struct nfsmount
*nmp
;
1322 int error
= 0, lockerror
, status
, nfsvers
, numops
;
1323 u_int64_t xid
, newwverf
;
1325 struct nfsm_chain nmreq
, nmrep
;
1326 struct nfsreq_secinfo_args si
;
1329 FSDBG(521, np
, offset
, count
, nmp
? nmp
->nm_state
: 0);
1330 if (nfs_mount_gone(nmp
)) {
1333 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
1336 if (!(nmp
->nm_state
& NFSSTA_HASWRITEVERF
)) {
1339 nfsvers
= nmp
->nm_vers
;
1341 if (count
> UINT32_MAX
) {
1347 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
1348 nfsm_chain_null(&nmreq
);
1349 nfsm_chain_null(&nmrep
);
1351 // PUTFH, COMMIT, GETATTR
1353 nfsm_chain_build_alloc_init(error
, &nmreq
, 19 * NFSX_UNSIGNED
);
1354 nfsm_chain_add_compound_header(error
, &nmreq
, "commit", nmp
->nm_minor_vers
, numops
);
1356 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1357 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1359 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_COMMIT
);
1360 nfsm_chain_add_64(error
, &nmreq
, offset
);
1361 nfsm_chain_add_32(error
, &nmreq
, count32
);
1363 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1364 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
1365 nfsm_chain_build_done(error
, &nmreq
);
1366 nfsm_assert(error
, (numops
== 0), EPROTO
);
1368 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
1369 current_thread(), cred
, &si
, 0, &nmrep
, &xid
, &status
);
1371 if ((lockerror
= nfs_node_lock(np
))) {
1374 nfsm_chain_skip_tag(error
, &nmrep
);
1375 nfsm_chain_get_32(error
, &nmrep
, numops
);
1376 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1377 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_COMMIT
);
1378 nfsm_chain_get_64(error
, &nmrep
, newwverf
);
1379 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1380 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
1382 nfs_node_unlock(np
);
1385 lck_mtx_lock(&nmp
->nm_lock
);
1386 if (nmp
->nm_verf
!= newwverf
) {
1387 nmp
->nm_verf
= newwverf
;
1389 if (wverf
!= newwverf
) {
1390 error
= NFSERR_STALEWRITEVERF
;
1392 lck_mtx_unlock(&nmp
->nm_lock
);
1394 nfsm_chain_cleanup(&nmreq
);
1395 nfsm_chain_cleanup(&nmrep
);
1402 struct nfs_fsattr
*nfsap
,
1406 int error
= 0, lockerror
, status
, nfsvers
, numops
;
1407 struct nfsm_chain nmreq
, nmrep
;
1408 struct nfsmount
*nmp
= NFSTONMP(np
);
1409 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
1410 struct nfs_vattr nvattr
;
1411 struct nfsreq_secinfo_args si
;
1413 if (nfs_mount_gone(nmp
)) {
1416 nfsvers
= nmp
->nm_vers
;
1417 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
1421 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
1422 NVATTR_INIT(&nvattr
);
1423 nfsm_chain_null(&nmreq
);
1424 nfsm_chain_null(&nmrep
);
1426 /* NFSv4: fetch "pathconf" info for this node */
1429 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
1430 nfsm_chain_add_compound_header(error
, &nmreq
, "pathconf", nmp
->nm_minor_vers
, numops
);
1432 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1433 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1435 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1436 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
1437 NFS_BITMAP_SET(bitmap
, NFS_FATTR_MAXLINK
);
1438 NFS_BITMAP_SET(bitmap
, NFS_FATTR_MAXNAME
);
1439 NFS_BITMAP_SET(bitmap
, NFS_FATTR_NO_TRUNC
);
1440 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CHOWN_RESTRICTED
);
1441 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CASE_INSENSITIVE
);
1442 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CASE_PRESERVING
);
1443 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
1444 nfsm_chain_build_done(error
, &nmreq
);
1445 nfsm_assert(error
, (numops
== 0), EPROTO
);
1447 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
1449 nfsm_chain_skip_tag(error
, &nmrep
);
1450 nfsm_chain_get_32(error
, &nmrep
, numops
);
1451 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1452 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1454 error
= nfs4_parsefattr(&nmrep
, nfsap
, &nvattr
, NULL
, NULL
, NULL
);
1456 if ((lockerror
= nfs_node_lock(np
))) {
1460 nfs_loadattrcache(np
, &nvattr
, &xid
, 0);
1463 nfs_node_unlock(np
);
1466 NVATTR_CLEANUP(&nvattr
);
1467 nfsm_chain_cleanup(&nmreq
);
1468 nfsm_chain_cleanup(&nmrep
);
1474 struct vnop_getattr_args
/* {
1475 * struct vnodeop_desc *a_desc;
1477 * struct vnode_attr *a_vap;
1478 * vfs_context_t a_context;
1481 struct vnode_attr
*vap
= ap
->a_vap
;
1482 struct nfsmount
*nmp
;
1483 struct nfs_vattr nva
;
1484 int error
, acls
, ngaflags
;
1486 nmp
= VTONMP(ap
->a_vp
);
1487 if (nfs_mount_gone(nmp
)) {
1490 acls
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_ACL
);
1492 ngaflags
= NGA_CACHED
;
1493 if (VATTR_IS_ACTIVE(vap
, va_acl
) && acls
) {
1494 ngaflags
|= NGA_ACL
;
1496 error
= nfs_getattr(VTONFS(ap
->a_vp
), &nva
, ap
->a_context
, ngaflags
);
1501 /* copy what we have in nva to *a_vap */
1502 if (VATTR_IS_ACTIVE(vap
, va_rdev
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_RAWDEV
)) {
1503 dev_t rdev
= makedev(nva
.nva_rawdev
.specdata1
, nva
.nva_rawdev
.specdata2
);
1504 VATTR_RETURN(vap
, va_rdev
, rdev
);
1506 if (VATTR_IS_ACTIVE(vap
, va_nlink
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_NUMLINKS
)) {
1507 VATTR_RETURN(vap
, va_nlink
, nva
.nva_nlink
);
1509 if (VATTR_IS_ACTIVE(vap
, va_data_size
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_SIZE
)) {
1510 VATTR_RETURN(vap
, va_data_size
, nva
.nva_size
);
1512 // VATTR_RETURN(vap, va_data_alloc, ???);
1513 // VATTR_RETURN(vap, va_total_size, ???);
1514 if (VATTR_IS_ACTIVE(vap
, va_total_alloc
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_SPACE_USED
)) {
1515 VATTR_RETURN(vap
, va_total_alloc
, nva
.nva_bytes
);
1517 if (VATTR_IS_ACTIVE(vap
, va_uid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER
)) {
1518 VATTR_RETURN(vap
, va_uid
, nva
.nva_uid
);
1520 if (VATTR_IS_ACTIVE(vap
, va_uuuid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER
)) {
1521 VATTR_RETURN(vap
, va_uuuid
, nva
.nva_uuuid
);
1523 if (VATTR_IS_ACTIVE(vap
, va_gid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER_GROUP
)) {
1524 VATTR_RETURN(vap
, va_gid
, nva
.nva_gid
);
1526 if (VATTR_IS_ACTIVE(vap
, va_guuid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER_GROUP
)) {
1527 VATTR_RETURN(vap
, va_guuid
, nva
.nva_guuid
);
1529 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
1530 if (NMFLAG(nmp
, ACLONLY
) || !NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_MODE
)) {
1531 VATTR_RETURN(vap
, va_mode
, 0777);
1533 VATTR_RETURN(vap
, va_mode
, nva
.nva_mode
);
1536 if (VATTR_IS_ACTIVE(vap
, va_flags
) &&
1537 (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_ARCHIVE
) ||
1538 NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_HIDDEN
) ||
1539 (nva
.nva_flags
& NFS_FFLAG_TRIGGER
))) {
1541 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_ARCHIVE
) &&
1542 (nva
.nva_flags
& NFS_FFLAG_ARCHIVED
)) {
1543 flags
|= SF_ARCHIVED
;
1545 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_HIDDEN
) &&
1546 (nva
.nva_flags
& NFS_FFLAG_HIDDEN
)) {
1549 VATTR_RETURN(vap
, va_flags
, flags
);
1551 if (VATTR_IS_ACTIVE(vap
, va_create_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_CREATE
)) {
1552 vap
->va_create_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_CREATE
];
1553 vap
->va_create_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_CREATE
];
1554 VATTR_SET_SUPPORTED(vap
, va_create_time
);
1556 if (VATTR_IS_ACTIVE(vap
, va_access_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_ACCESS
)) {
1557 vap
->va_access_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_ACCESS
];
1558 vap
->va_access_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_ACCESS
];
1559 VATTR_SET_SUPPORTED(vap
, va_access_time
);
1561 if (VATTR_IS_ACTIVE(vap
, va_modify_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_MODIFY
)) {
1562 vap
->va_modify_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_MODIFY
];
1563 vap
->va_modify_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_MODIFY
];
1564 VATTR_SET_SUPPORTED(vap
, va_modify_time
);
1566 if (VATTR_IS_ACTIVE(vap
, va_change_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_METADATA
)) {
1567 vap
->va_change_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_CHANGE
];
1568 vap
->va_change_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_CHANGE
];
1569 VATTR_SET_SUPPORTED(vap
, va_change_time
);
1571 if (VATTR_IS_ACTIVE(vap
, va_backup_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_BACKUP
)) {
1572 vap
->va_backup_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_BACKUP
];
1573 vap
->va_backup_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_BACKUP
];
1574 VATTR_SET_SUPPORTED(vap
, va_backup_time
);
1576 if (VATTR_IS_ACTIVE(vap
, va_fileid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_FILEID
)) {
1577 VATTR_RETURN(vap
, va_fileid
, nva
.nva_fileid
);
1579 if (VATTR_IS_ACTIVE(vap
, va_type
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TYPE
)) {
1580 VATTR_RETURN(vap
, va_type
, nva
.nva_type
);
1582 if (VATTR_IS_ACTIVE(vap
, va_filerev
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_CHANGE
)) {
1583 VATTR_RETURN(vap
, va_filerev
, nva
.nva_change
);
1586 if (VATTR_IS_ACTIVE(vap
, va_acl
) && acls
) {
1587 VATTR_RETURN(vap
, va_acl
, nva
.nva_acl
);
1591 // other attrs we might support someday:
1592 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
1594 NVATTR_CLEANUP(&nva
);
1601 struct vnode_attr
*vap
,
1604 struct nfsmount
*nmp
= NFSTONMP(np
);
1605 int error
= 0, setattr_error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
;
1606 u_int64_t xid
, nextxid
;
1607 struct nfsm_chain nmreq
, nmrep
;
1608 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
1609 uint32_t getbitmap
[NFS_ATTR_BITMAP_LEN
];
1610 uint32_t setbitmap
[NFS_ATTR_BITMAP_LEN
];
1611 nfs_stateid stateid
;
1612 struct nfsreq_secinfo_args si
;
1614 if (nfs_mount_gone(nmp
)) {
1617 nfsvers
= nmp
->nm_vers
;
1618 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
1622 if (VATTR_IS_ACTIVE(vap
, va_flags
) && (vap
->va_flags
& ~(SF_ARCHIVED
| UF_HIDDEN
))) {
1623 /* we don't support setting unsupported flags (duh!) */
1624 if (vap
->va_active
& ~VNODE_ATTR_va_flags
) {
1625 return EINVAL
; /* return EINVAL if other attributes also set */
1627 return ENOTSUP
; /* return ENOTSUP for chflags(2) */
1631 /* don't bother requesting some changes if they don't look like they are changing */
1632 if (VATTR_IS_ACTIVE(vap
, va_uid
) && (vap
->va_uid
== np
->n_vattr
.nva_uid
)) {
1633 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
1635 if (VATTR_IS_ACTIVE(vap
, va_gid
) && (vap
->va_gid
== np
->n_vattr
.nva_gid
)) {
1636 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
1638 if (VATTR_IS_ACTIVE(vap
, va_uuuid
) && kauth_guid_equal(&vap
->va_uuuid
, &np
->n_vattr
.nva_uuuid
)) {
1639 VATTR_CLEAR_ACTIVE(vap
, va_uuuid
);
1641 if (VATTR_IS_ACTIVE(vap
, va_guuid
) && kauth_guid_equal(&vap
->va_guuid
, &np
->n_vattr
.nva_guuid
)) {
1642 VATTR_CLEAR_ACTIVE(vap
, va_guuid
);
1646 /* do nothing if no attributes will be sent */
1647 nfs_vattr_set_bitmap(nmp
, bitmap
, vap
);
1648 if (!bitmap
[0] && !bitmap
[1]) {
1652 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
1653 nfsm_chain_null(&nmreq
);
1654 nfsm_chain_null(&nmrep
);
1657 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1658 * need to invalidate any cached ACL. And if we had an ACL cached,
1659 * we might as well also fetch the new value.
1661 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, getbitmap
);
1662 if (NFS_BITMAP_ISSET(bitmap
, NFS_FATTR_ACL
) ||
1663 NFS_BITMAP_ISSET(bitmap
, NFS_FATTR_MODE
)) {
1664 if (NACLVALID(np
)) {
1665 NFS_BITMAP_SET(getbitmap
, NFS_FATTR_ACL
);
1670 // PUTFH, SETATTR, GETATTR
1672 nfsm_chain_build_alloc_init(error
, &nmreq
, 40 * NFSX_UNSIGNED
);
1673 nfsm_chain_add_compound_header(error
, &nmreq
, "setattr", nmp
->nm_minor_vers
, numops
);
1675 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1676 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1678 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SETATTR
);
1679 if (VATTR_IS_ACTIVE(vap
, va_data_size
)) {
1680 nfs_get_stateid(np
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &stateid
);
1682 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
1684 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
1685 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
1687 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1688 nfsm_chain_add_bitmap_supported(error
, &nmreq
, getbitmap
, nmp
, np
);
1689 nfsm_chain_build_done(error
, &nmreq
);
1690 nfsm_assert(error
, (numops
== 0), EPROTO
);
1692 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
1694 if ((lockerror
= nfs_node_lock(np
))) {
1697 nfsm_chain_skip_tag(error
, &nmrep
);
1698 nfsm_chain_get_32(error
, &nmrep
, numops
);
1699 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1701 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SETATTR
);
1702 nfsmout_if(error
== EBADRPC
);
1703 setattr_error
= error
;
1705 bmlen
= NFS_ATTR_BITMAP_LEN
;
1706 nfsm_chain_get_bitmap(error
, &nmrep
, setbitmap
, bmlen
);
1708 if (VATTR_IS_ACTIVE(vap
, va_data_size
) && (np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
1709 microuptime(&np
->n_lastio
);
1711 nfs_vattr_set_supported(setbitmap
, vap
);
1712 error
= setattr_error
;
1714 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1715 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
1717 NATTRINVALIDATE(np
);
1720 * We just changed the attributes and we want to make sure that we
1721 * see the latest attributes. Get the next XID. If it's not the
1722 * next XID after the SETATTR XID, then it's possible that another
1723 * RPC was in flight at the same time and it might put stale attributes
1724 * in the cache. In that case, we invalidate the attributes and set
1725 * the attribute cache XID to guarantee that newer attributes will
1729 nfs_get_xid(&nextxid
);
1730 if (nextxid
!= (xid
+ 1)) {
1731 np
->n_xid
= nextxid
;
1732 NATTRINVALIDATE(np
);
1736 nfs_node_unlock(np
);
1738 nfsm_chain_cleanup(&nmreq
);
1739 nfsm_chain_cleanup(&nmrep
);
1740 if ((setattr_error
== EINVAL
) && VATTR_IS_ACTIVE(vap
, va_acl
) && VATTR_IS_ACTIVE(vap
, va_mode
) && !NMFLAG(nmp
, ACLONLY
)) {
1742 * Some server's may not like ACL/mode combos that get sent.
1743 * If it looks like that's what the server choked on, try setting
1744 * just the ACL and not the mode (unless it looks like everything
1745 * but mode was already successfully set).
1747 if (((bitmap
[0] & setbitmap
[0]) != bitmap
[0]) ||
1748 ((bitmap
[1] & (setbitmap
[1] | NFS_FATTR_MODE
)) != bitmap
[1])) {
1749 VATTR_CLEAR_ACTIVE(vap
, va_mode
);
1756 #endif /* CONFIG_NFS4 */
1759 * Wait for any pending recovery to complete.
1762 nfs_mount_state_wait_for_recovery(struct nfsmount
*nmp
)
1764 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
1765 int error
= 0, slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
1767 lck_mtx_lock(&nmp
->nm_lock
);
1768 while (nmp
->nm_state
& NFSSTA_RECOVER
) {
1769 if ((error
= nfs_sigintr(nmp
, NULL
, current_thread(), 1))) {
1772 nfs_mount_sock_thread_wake(nmp
);
1773 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, slpflag
| (PZERO
- 1), "nfsrecoverwait", &ts
);
1776 lck_mtx_unlock(&nmp
->nm_lock
);
1782 * We're about to use/manipulate NFS mount's open/lock state.
1783 * Wait for any pending state recovery to complete, then
1784 * mark the state as being in use (which will hold off
1785 * the recovery thread until we're done).
1788 nfs_mount_state_in_use_start(struct nfsmount
*nmp
, thread_t thd
)
1790 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
1791 int error
= 0, slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
1793 if (nfs_mount_gone(nmp
)) {
1796 lck_mtx_lock(&nmp
->nm_lock
);
1797 if (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) {
1798 lck_mtx_unlock(&nmp
->nm_lock
);
1801 while (nmp
->nm_state
& NFSSTA_RECOVER
) {
1802 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 1))) {
1805 nfs_mount_sock_thread_wake(nmp
);
1806 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, slpflag
| (PZERO
- 1), "nfsrecoverwait", &ts
);
1810 nmp
->nm_stateinuse
++;
1812 lck_mtx_unlock(&nmp
->nm_lock
);
1818 * We're done using/manipulating the NFS mount's open/lock
1819 * state. If the given error indicates that recovery should
1820 * be performed, we'll initiate recovery.
1823 nfs_mount_state_in_use_end(struct nfsmount
*nmp
, int error
)
1825 int restart
= nfs_mount_state_error_should_restart(error
);
1827 if (nfs_mount_gone(nmp
)) {
1830 lck_mtx_lock(&nmp
->nm_lock
);
1831 if (restart
&& (error
!= NFSERR_OLD_STATEID
) && (error
!= NFSERR_GRACE
)) {
1832 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
1833 error
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nmp
->nm_stategenid
);
1834 nfs_need_recover(nmp
, error
);
1836 if (nmp
->nm_stateinuse
> 0) {
1837 nmp
->nm_stateinuse
--;
1839 panic("NFS mount state in use count underrun");
1841 if (!nmp
->nm_stateinuse
&& (nmp
->nm_state
& NFSSTA_RECOVER
)) {
1842 wakeup(&nmp
->nm_stateinuse
);
1844 lck_mtx_unlock(&nmp
->nm_lock
);
1845 if (error
== NFSERR_GRACE
) {
1846 tsleep(&nmp
->nm_state
, (PZERO
- 1), "nfsgrace", 2 * hz
);
1853 * Does the error mean we should restart/redo a state-related operation?
1856 nfs_mount_state_error_should_restart(int error
)
1859 case NFSERR_STALE_STATEID
:
1860 case NFSERR_STALE_CLIENTID
:
1861 case NFSERR_ADMIN_REVOKED
:
1862 case NFSERR_EXPIRED
:
1863 case NFSERR_OLD_STATEID
:
1864 case NFSERR_BAD_STATEID
:
1872 * In some cases we may want to limit how many times we restart a
1873 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1874 * Base the limit on the lease (as long as it's not too short).
1877 nfs_mount_state_max_restarts(struct nfsmount
*nmp
)
1879 return MAX(nmp
->nm_fsattr
.nfsa_lease
, 60);
1883 * Does the error mean we probably lost a delegation?
1886 nfs_mount_state_error_delegation_lost(int error
)
1889 case NFSERR_STALE_STATEID
:
1890 case NFSERR_ADMIN_REVOKED
:
1891 case NFSERR_EXPIRED
:
1892 case NFSERR_OLD_STATEID
:
1893 case NFSERR_BAD_STATEID
:
1894 case NFSERR_GRACE
: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
1902 * Mark an NFS node's open state as busy.
1905 nfs_open_state_set_busy(nfsnode_t np
, thread_t thd
)
1907 struct nfsmount
*nmp
;
1908 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
1909 int error
= 0, slpflag
;
1912 if (nfs_mount_gone(nmp
)) {
1915 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
1917 lck_mtx_lock(&np
->n_openlock
);
1918 while (np
->n_openflags
& N_OPENBUSY
) {
1919 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
1922 np
->n_openflags
|= N_OPENWANT
;
1923 msleep(&np
->n_openflags
, &np
->n_openlock
, slpflag
, "nfs_open_state_set_busy", &ts
);
1927 np
->n_openflags
|= N_OPENBUSY
;
1929 lck_mtx_unlock(&np
->n_openlock
);
1935 * Clear an NFS node's open state busy flag and wake up
1936 * anyone wanting it.
1939 nfs_open_state_clear_busy(nfsnode_t np
)
1943 lck_mtx_lock(&np
->n_openlock
);
1944 if (!(np
->n_openflags
& N_OPENBUSY
)) {
1945 panic("nfs_open_state_clear_busy");
1947 wanted
= (np
->n_openflags
& N_OPENWANT
);
1948 np
->n_openflags
&= ~(N_OPENBUSY
| N_OPENWANT
);
1949 lck_mtx_unlock(&np
->n_openlock
);
1951 wakeup(&np
->n_openflags
);
1956 * Search a mount's open owner list for the owner for this credential.
1957 * If not found and "alloc" is set, then allocate a new one.
1959 struct nfs_open_owner
*
1960 nfs_open_owner_find(struct nfsmount
*nmp
, kauth_cred_t cred
, int alloc
)
1962 uid_t uid
= kauth_cred_getuid(cred
);
1963 struct nfs_open_owner
*noop
, *newnoop
= NULL
;
1966 lck_mtx_lock(&nmp
->nm_lock
);
1967 TAILQ_FOREACH(noop
, &nmp
->nm_open_owners
, noo_link
) {
1968 if (kauth_cred_getuid(noop
->noo_cred
) == uid
) {
1973 if (!noop
&& !newnoop
&& alloc
) {
1974 lck_mtx_unlock(&nmp
->nm_lock
);
1975 MALLOC(newnoop
, struct nfs_open_owner
*, sizeof(struct nfs_open_owner
), M_TEMP
, M_WAITOK
);
1979 bzero(newnoop
, sizeof(*newnoop
));
1980 lck_mtx_init(&newnoop
->noo_lock
, nfs_open_grp
, LCK_ATTR_NULL
);
1981 newnoop
->noo_mount
= nmp
;
1982 kauth_cred_ref(cred
);
1983 newnoop
->noo_cred
= cred
;
1984 newnoop
->noo_name
= OSAddAtomic(1, &nfs_open_owner_seqnum
);
1985 TAILQ_INIT(&newnoop
->noo_opens
);
1988 if (!noop
&& newnoop
) {
1989 newnoop
->noo_flags
|= NFS_OPEN_OWNER_LINK
;
1990 os_ref_init(&newnoop
->noo_refcnt
, NULL
);
1991 TAILQ_INSERT_HEAD(&nmp
->nm_open_owners
, newnoop
, noo_link
);
1994 lck_mtx_unlock(&nmp
->nm_lock
);
1996 if (newnoop
&& (noop
!= newnoop
)) {
1997 nfs_open_owner_destroy(newnoop
);
2001 nfs_open_owner_ref(noop
);
2008 * destroy an open owner that's no longer needed
2011 nfs_open_owner_destroy(struct nfs_open_owner
*noop
)
2013 if (noop
->noo_cred
) {
2014 kauth_cred_unref(&noop
->noo_cred
);
2016 lck_mtx_destroy(&noop
->noo_lock
, nfs_open_grp
);
2021 * acquire a reference count on an open owner
2024 nfs_open_owner_ref(struct nfs_open_owner
*noop
)
2026 lck_mtx_lock(&noop
->noo_lock
);
2027 os_ref_retain_locked(&noop
->noo_refcnt
);
2028 lck_mtx_unlock(&noop
->noo_lock
);
2032 * drop a reference count on an open owner and destroy it if
2033 * it is no longer referenced and no longer on the mount's list.
2036 nfs_open_owner_rele(struct nfs_open_owner
*noop
)
2038 os_ref_count_t newcount
;
2040 lck_mtx_lock(&noop
->noo_lock
);
2041 if (os_ref_get_count(&noop
->noo_refcnt
) < 1) {
2042 panic("nfs_open_owner_rele: no refcnt");
2044 newcount
= os_ref_release_locked(&noop
->noo_refcnt
);
2045 if (!newcount
&& (noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
)) {
2046 panic("nfs_open_owner_rele: busy");
2048 /* XXX we may potentially want to clean up idle/unused open owner structures */
2049 if (newcount
|| (noop
->noo_flags
& NFS_OPEN_OWNER_LINK
)) {
2050 lck_mtx_unlock(&noop
->noo_lock
);
2053 /* owner is no longer referenced or linked to mount, so destroy it */
2054 lck_mtx_unlock(&noop
->noo_lock
);
2055 nfs_open_owner_destroy(noop
);
2059 * Mark an open owner as busy because we are about to
2060 * start an operation that uses and updates open owner state.
2063 nfs_open_owner_set_busy(struct nfs_open_owner
*noop
, thread_t thd
)
2065 struct nfsmount
*nmp
;
2066 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
2067 int error
= 0, slpflag
;
2069 nmp
= noop
->noo_mount
;
2070 if (nfs_mount_gone(nmp
)) {
2073 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
2075 lck_mtx_lock(&noop
->noo_lock
);
2076 while (noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
) {
2077 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
2080 noop
->noo_flags
|= NFS_OPEN_OWNER_WANT
;
2081 msleep(noop
, &noop
->noo_lock
, slpflag
, "nfs_open_owner_set_busy", &ts
);
2085 noop
->noo_flags
|= NFS_OPEN_OWNER_BUSY
;
2087 lck_mtx_unlock(&noop
->noo_lock
);
2093 * Clear the busy flag on an open owner and wake up anyone waiting
2097 nfs_open_owner_clear_busy(struct nfs_open_owner
*noop
)
2101 lck_mtx_lock(&noop
->noo_lock
);
2102 if (!(noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
)) {
2103 panic("nfs_open_owner_clear_busy");
2105 wanted
= (noop
->noo_flags
& NFS_OPEN_OWNER_WANT
);
2106 noop
->noo_flags
&= ~(NFS_OPEN_OWNER_BUSY
| NFS_OPEN_OWNER_WANT
);
2107 lck_mtx_unlock(&noop
->noo_lock
);
2114 * Given an open/lock owner and an error code, increment the
2115 * sequence ID if appropriate.
2118 nfs_owner_seqid_increment(struct nfs_open_owner
*noop
, struct nfs_lock_owner
*nlop
, int error
)
2121 case NFSERR_STALE_CLIENTID
:
2122 case NFSERR_STALE_STATEID
:
2123 case NFSERR_OLD_STATEID
:
2124 case NFSERR_BAD_STATEID
:
2125 case NFSERR_BAD_SEQID
:
2127 case NFSERR_RESOURCE
:
2128 case NFSERR_NOFILEHANDLE
:
2129 /* do not increment the open seqid on these errors */
2141 * Search a node's open file list for any conflicts with this request.
2142 * Also find this open owner's open file structure.
2143 * If not found and "alloc" is set, then allocate one.
2148 struct nfs_open_owner
*noop
,
2149 struct nfs_open_file
**nofpp
,
2150 uint32_t accessMode
,
2155 return nfs_open_file_find_internal(np
, noop
, nofpp
, accessMode
, denyMode
, alloc
);
2159 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
2160 * if an existing one is not found. This is used in "create" scenarios to
2161 * officially add the provisional nofp to the node once the node is created.
2164 nfs_open_file_find_internal(
2166 struct nfs_open_owner
*noop
,
2167 struct nfs_open_file
**nofpp
,
2168 uint32_t accessMode
,
2172 struct nfs_open_file
*nofp
= NULL
, *nofp2
, *newnofp
= NULL
;
2178 lck_mtx_lock(&np
->n_openlock
);
2179 TAILQ_FOREACH(nofp2
, &np
->n_opens
, nof_link
) {
2180 if (nofp2
->nof_owner
== noop
) {
2186 if ((accessMode
& nofp2
->nof_deny
) || (denyMode
& nofp2
->nof_access
)) {
2187 /* This request conflicts with an existing open on this client. */
2188 lck_mtx_unlock(&np
->n_openlock
);
2194 * If this open owner doesn't have an open
2195 * file structure yet, we create one for it.
2197 if (!nofp
&& !*nofpp
&& !newnofp
&& alloc
) {
2198 lck_mtx_unlock(&np
->n_openlock
);
2200 MALLOC(newnofp
, struct nfs_open_file
*, sizeof(struct nfs_open_file
), M_TEMP
, M_WAITOK
);
2204 bzero(newnofp
, sizeof(*newnofp
));
2205 lck_mtx_init(&newnofp
->nof_lock
, nfs_open_grp
, LCK_ATTR_NULL
);
2206 newnofp
->nof_owner
= noop
;
2207 nfs_open_owner_ref(noop
);
2208 newnofp
->nof_np
= np
;
2209 lck_mtx_lock(&noop
->noo_lock
);
2210 TAILQ_INSERT_HEAD(&noop
->noo_opens
, newnofp
, nof_oolink
);
2211 lck_mtx_unlock(&noop
->noo_lock
);
2218 (*nofpp
)->nof_np
= np
;
2224 TAILQ_INSERT_HEAD(&np
->n_opens
, nofp
, nof_link
);
2228 lck_mtx_unlock(&np
->n_openlock
);
2231 if (alloc
&& newnofp
&& (nofp
!= newnofp
)) {
2232 nfs_open_file_destroy(newnofp
);
2236 return nofp
? 0 : ESRCH
;
2240 * Destroy an open file structure.
2243 nfs_open_file_destroy(struct nfs_open_file
*nofp
)
2245 lck_mtx_lock(&nofp
->nof_owner
->noo_lock
);
2246 TAILQ_REMOVE(&nofp
->nof_owner
->noo_opens
, nofp
, nof_oolink
);
2247 lck_mtx_unlock(&nofp
->nof_owner
->noo_lock
);
2248 nfs_open_owner_rele(nofp
->nof_owner
);
2249 lck_mtx_destroy(&nofp
->nof_lock
, nfs_open_grp
);
2254 * Mark an open file as busy because we are about to
2255 * start an operation that uses and updates open file state.
2258 nfs_open_file_set_busy(struct nfs_open_file
*nofp
, thread_t thd
)
2260 struct nfsmount
*nmp
;
2261 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
2262 int error
= 0, slpflag
;
2264 nmp
= nofp
->nof_owner
->noo_mount
;
2265 if (nfs_mount_gone(nmp
)) {
2268 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
2270 lck_mtx_lock(&nofp
->nof_lock
);
2271 while (nofp
->nof_flags
& NFS_OPEN_FILE_BUSY
) {
2272 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
2275 nofp
->nof_flags
|= NFS_OPEN_FILE_WANT
;
2276 msleep(nofp
, &nofp
->nof_lock
, slpflag
, "nfs_open_file_set_busy", &ts
);
2280 nofp
->nof_flags
|= NFS_OPEN_FILE_BUSY
;
2282 lck_mtx_unlock(&nofp
->nof_lock
);
2288 * Clear the busy flag on an open file and wake up anyone waiting
2292 nfs_open_file_clear_busy(struct nfs_open_file
*nofp
)
2296 lck_mtx_lock(&nofp
->nof_lock
);
2297 if (!(nofp
->nof_flags
& NFS_OPEN_FILE_BUSY
)) {
2298 panic("nfs_open_file_clear_busy");
2300 wanted
= (nofp
->nof_flags
& NFS_OPEN_FILE_WANT
);
2301 nofp
->nof_flags
&= ~(NFS_OPEN_FILE_BUSY
| NFS_OPEN_FILE_WANT
);
2302 lck_mtx_unlock(&nofp
->nof_lock
);
2309 * Add the open state for the given access/deny modes to this open file.
2312 nfs_open_file_add_open(struct nfs_open_file
*nofp
, uint32_t accessMode
, uint32_t denyMode
, int delegated
)
2314 lck_mtx_lock(&nofp
->nof_lock
);
2315 nofp
->nof_access
|= accessMode
;
2316 nofp
->nof_deny
|= denyMode
;
2319 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2320 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2322 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2324 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2327 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2328 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2330 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2332 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2333 nofp
->nof_d_rw_dw
++;
2335 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2336 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2337 nofp
->nof_d_r_drw
++;
2338 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2339 nofp
->nof_d_w_drw
++;
2340 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2341 nofp
->nof_d_rw_drw
++;
2345 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2346 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2348 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2350 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2353 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2354 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2356 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2358 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2361 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2362 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2364 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2366 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2372 nofp
->nof_opencnt
++;
2373 lck_mtx_unlock(&nofp
->nof_lock
);
2377 * Find which particular open combo will be closed and report what
2378 * the new modes will be and whether the open was delegated.
2381 nfs_open_file_remove_open_find(
2382 struct nfs_open_file
*nofp
,
2383 uint32_t accessMode
,
2385 uint32_t *newAccessMode
,
2386 uint32_t *newDenyMode
,
2390 * Calculate new modes: a mode bit gets removed when there's only
2391 * one count in all the corresponding counts
2393 *newAccessMode
= nofp
->nof_access
;
2394 *newDenyMode
= nofp
->nof_deny
;
2396 if ((accessMode
& NFS_OPEN_SHARE_ACCESS_READ
) &&
2397 (nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
) &&
2398 ((nofp
->nof_r
+ nofp
->nof_d_r
+
2399 nofp
->nof_rw
+ nofp
->nof_d_rw
+
2400 nofp
->nof_r_dw
+ nofp
->nof_d_r_dw
+
2401 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
+
2402 nofp
->nof_r_drw
+ nofp
->nof_d_r_drw
+
2403 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
) == 1)) {
2404 *newAccessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2406 if ((accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
) &&
2407 (nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_WRITE
) &&
2408 ((nofp
->nof_w
+ nofp
->nof_d_w
+
2409 nofp
->nof_rw
+ nofp
->nof_d_rw
+
2410 nofp
->nof_w_dw
+ nofp
->nof_d_w_dw
+
2411 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
+
2412 nofp
->nof_w_drw
+ nofp
->nof_d_w_drw
+
2413 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
) == 1)) {
2414 *newAccessMode
&= ~NFS_OPEN_SHARE_ACCESS_WRITE
;
2416 if ((denyMode
& NFS_OPEN_SHARE_DENY_READ
) &&
2417 (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_READ
) &&
2418 ((nofp
->nof_r_drw
+ nofp
->nof_d_r_drw
+
2419 nofp
->nof_w_drw
+ nofp
->nof_d_w_drw
+
2420 nofp
->nof_rw_drw
+ nofp
->nof_d_rw_drw
) == 1)) {
2421 *newDenyMode
&= ~NFS_OPEN_SHARE_DENY_READ
;
2423 if ((denyMode
& NFS_OPEN_SHARE_DENY_WRITE
) &&
2424 (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_WRITE
) &&
2425 ((nofp
->nof_r_drw
+ nofp
->nof_d_r_drw
+
2426 nofp
->nof_w_drw
+ nofp
->nof_d_w_drw
+
2427 nofp
->nof_rw_drw
+ nofp
->nof_d_rw_drw
+
2428 nofp
->nof_r_dw
+ nofp
->nof_d_r_dw
+
2429 nofp
->nof_w_dw
+ nofp
->nof_d_w_dw
+
2430 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
) == 1)) {
2431 *newDenyMode
&= ~NFS_OPEN_SHARE_DENY_WRITE
;
2434 /* Find the corresponding open access/deny mode counter. */
2435 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2436 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2437 *delegated
= (nofp
->nof_d_r
!= 0);
2438 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2439 *delegated
= (nofp
->nof_d_w
!= 0);
2440 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2441 *delegated
= (nofp
->nof_d_rw
!= 0);
2445 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2446 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2447 *delegated
= (nofp
->nof_d_r_dw
!= 0);
2448 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2449 *delegated
= (nofp
->nof_d_w_dw
!= 0);
2450 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2451 *delegated
= (nofp
->nof_d_rw_dw
!= 0);
2455 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2456 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2457 *delegated
= (nofp
->nof_d_r_drw
!= 0);
2458 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2459 *delegated
= (nofp
->nof_d_w_drw
!= 0);
2460 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2461 *delegated
= (nofp
->nof_d_rw_drw
!= 0);
2469 * Remove the open state for the given access/deny modes to this open file.
2472 nfs_open_file_remove_open(struct nfs_open_file
*nofp
, uint32_t accessMode
, uint32_t denyMode
)
2474 uint32_t newAccessMode
, newDenyMode
;
2477 lck_mtx_lock(&nofp
->nof_lock
);
2478 nfs_open_file_remove_open_find(nofp
, accessMode
, denyMode
, &newAccessMode
, &newDenyMode
, &delegated
);
2480 /* Decrement the corresponding open access/deny mode counter. */
2481 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2482 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2484 if (nofp
->nof_d_r
== 0) {
2485 NP(nofp
->nof_np
, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2490 if (nofp
->nof_r
== 0) {
2491 NP(nofp
->nof_np
, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2496 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2498 if (nofp
->nof_d_w
== 0) {
2499 NP(nofp
->nof_np
, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2504 if (nofp
->nof_w
== 0) {
2505 NP(nofp
->nof_np
, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2510 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2512 if (nofp
->nof_d_rw
== 0) {
2513 NP(nofp
->nof_np
, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2518 if (nofp
->nof_rw
== 0) {
2519 NP(nofp
->nof_np
, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2525 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2526 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2528 if (nofp
->nof_d_r_dw
== 0) {
2529 NP(nofp
->nof_np
, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2534 if (nofp
->nof_r_dw
== 0) {
2535 NP(nofp
->nof_np
, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2540 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2542 if (nofp
->nof_d_w_dw
== 0) {
2543 NP(nofp
->nof_np
, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2548 if (nofp
->nof_w_dw
== 0) {
2549 NP(nofp
->nof_np
, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2554 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2556 if (nofp
->nof_d_rw_dw
== 0) {
2557 NP(nofp
->nof_np
, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2559 nofp
->nof_d_rw_dw
--;
2562 if (nofp
->nof_rw_dw
== 0) {
2563 NP(nofp
->nof_np
, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2569 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2570 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2572 if (nofp
->nof_d_r_drw
== 0) {
2573 NP(nofp
->nof_np
, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2575 nofp
->nof_d_r_drw
--;
2578 if (nofp
->nof_r_drw
== 0) {
2579 NP(nofp
->nof_np
, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2584 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2586 if (nofp
->nof_d_w_drw
== 0) {
2587 NP(nofp
->nof_np
, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2589 nofp
->nof_d_w_drw
--;
2592 if (nofp
->nof_w_drw
== 0) {
2593 NP(nofp
->nof_np
, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2598 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2600 if (nofp
->nof_d_rw_drw
== 0) {
2601 NP(nofp
->nof_np
, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2603 nofp
->nof_d_rw_drw
--;
2606 if (nofp
->nof_rw_drw
== 0) {
2607 NP(nofp
->nof_np
, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2615 /* update the modes */
2616 nofp
->nof_access
= newAccessMode
;
2617 nofp
->nof_deny
= newDenyMode
;
2618 nofp
->nof_opencnt
--;
2619 lck_mtx_unlock(&nofp
->nof_lock
);
2624 * Get the current (delegation, lock, open, default) stateid for this node.
2625 * If node has a delegation, use that stateid.
2626 * If pid has a lock, use the lockowner's stateid.
2627 * Or use the open file's stateid.
2628 * If no open file, use a default stateid of all ones.
2631 nfs_get_stateid(nfsnode_t np
, thread_t thd
, kauth_cred_t cred
, nfs_stateid
*sid
)
2633 struct nfsmount
*nmp
= NFSTONMP(np
);
2634 proc_t p
= thd
? get_bsdthreadtask_info(thd
) : current_proc(); // XXX async I/O requests don't have a thread
2635 struct nfs_open_owner
*noop
= NULL
;
2636 struct nfs_open_file
*nofp
= NULL
;
2637 struct nfs_lock_owner
*nlop
= NULL
;
2638 nfs_stateid
*s
= NULL
;
2640 if (np
->n_openflags
& N_DELEG_MASK
) {
2641 s
= &np
->n_dstateid
;
2644 nlop
= nfs_lock_owner_find(np
, p
, 0);
2646 if (nlop
&& !TAILQ_EMPTY(&nlop
->nlo_locks
)) {
2647 /* we hold locks, use lock stateid */
2648 s
= &nlop
->nlo_stateid
;
2649 } else if (((noop
= nfs_open_owner_find(nmp
, cred
, 0))) &&
2650 (nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 0) == 0) &&
2651 !(nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) &&
2653 /* we (should) have the file open, use open stateid */
2654 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
2655 nfs4_reopen(nofp
, thd
);
2657 if (!(nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
2658 s
= &nofp
->nof_stateid
;
2664 sid
->seqid
= s
->seqid
;
2665 sid
->other
[0] = s
->other
[0];
2666 sid
->other
[1] = s
->other
[1];
2667 sid
->other
[2] = s
->other
[2];
2669 /* named attributes may not have a stateid for reads, so don't complain for them */
2670 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
2671 NP(np
, "nfs_get_stateid: no stateid");
2673 sid
->seqid
= sid
->other
[0] = sid
->other
[1] = sid
->other
[2] = 0xffffffff;
2676 nfs_lock_owner_rele(nlop
);
2679 nfs_open_owner_rele(noop
);
2685 * When we have a delegation, we may be able to perform the OPEN locally.
2686 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2689 nfs4_open_delegated(
2691 struct nfs_open_file
*nofp
,
2692 uint32_t accessMode
,
2696 int error
= 0, ismember
, readtoo
= 0, authorized
= 0;
2698 struct kauth_acl_eval eval
;
2699 kauth_cred_t cred
= vfs_context_ucred(ctx
);
2701 if (!(accessMode
& NFS_OPEN_SHARE_ACCESS_READ
)) {
2703 * Try to open it for read access too,
2704 * so the buffer cache can read data.
2707 accessMode
|= NFS_OPEN_SHARE_ACCESS_READ
;
2712 if (accessMode
& NFS_OPEN_SHARE_ACCESS_READ
) {
2713 action
|= KAUTH_VNODE_READ_DATA
;
2715 if (accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
) {
2716 action
|= KAUTH_VNODE_WRITE_DATA
;
2719 /* evaluate ACE (if we have one) */
2720 if (np
->n_dace
.ace_flags
) {
2721 eval
.ae_requested
= action
;
2722 eval
.ae_acl
= &np
->n_dace
;
2724 eval
.ae_options
= 0;
2725 if (np
->n_vattr
.nva_uid
== kauth_cred_getuid(cred
)) {
2726 eval
.ae_options
|= KAUTH_AEVAL_IS_OWNER
;
2728 error
= kauth_cred_ismember_gid(cred
, np
->n_vattr
.nva_gid
, &ismember
);
2729 if (!error
&& ismember
) {
2730 eval
.ae_options
|= KAUTH_AEVAL_IN_GROUP
;
2733 eval
.ae_exp_gall
= KAUTH_VNODE_GENERIC_ALL_BITS
;
2734 eval
.ae_exp_gread
= KAUTH_VNODE_GENERIC_READ_BITS
;
2735 eval
.ae_exp_gwrite
= KAUTH_VNODE_GENERIC_WRITE_BITS
;
2736 eval
.ae_exp_gexec
= KAUTH_VNODE_GENERIC_EXECUTE_BITS
;
2738 error
= kauth_acl_evaluate(cred
, &eval
);
2740 if (!error
&& (eval
.ae_result
== KAUTH_RESULT_ALLOW
)) {
2746 /* need to ask the server via ACCESS */
2747 struct vnop_access_args naa
;
2748 naa
.a_desc
= &vnop_access_desc
;
2749 naa
.a_vp
= NFSTOV(np
);
2750 naa
.a_action
= action
;
2751 naa
.a_context
= ctx
;
2752 if (!(error
= nfs_vnop_access(&naa
))) {
2759 /* try again without the extra read access */
2760 accessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2764 return error
? error
: EACCES
;
2767 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 1);
2774 * Open a file with the given access/deny modes.
2776 * If we have a delegation, we may be able to handle the open locally.
2777 * Otherwise, we will always send the open RPC even if this open's mode is
2778 * a subset of all the existing opens. This makes sure that we will always
2779 * be able to do a downgrade to any of the open modes.
2781 * Note: local conflicts should have already been checked in nfs_open_file_find().
2786 struct nfs_open_file
*nofp
,
2787 uint32_t accessMode
,
2791 vnode_t vp
= NFSTOV(np
);
2793 struct componentname cn
;
2794 const char *vname
= NULL
;
2796 char smallname
[128];
2797 char *filename
= NULL
;
2798 int error
= 0, readtoo
= 0;
2801 * We can handle the OPEN ourselves if we have a delegation,
2802 * unless it's a read delegation and the open is asking for
2803 * either write access or deny read. We also don't bother to
2804 * use the delegation if it's being returned.
2806 if (np
->n_openflags
& N_DELEG_MASK
) {
2807 if ((error
= nfs_open_state_set_busy(np
, vfs_context_thread(ctx
)))) {
2810 if ((np
->n_openflags
& N_DELEG_MASK
) && !(np
->n_openflags
& N_DELEG_RETURN
) &&
2811 (((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_WRITE
) ||
2812 (!(accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
) && !(denyMode
& NFS_OPEN_SHARE_DENY_READ
)))) {
2813 error
= nfs4_open_delegated(np
, nofp
, accessMode
, denyMode
, ctx
);
2814 nfs_open_state_clear_busy(np
);
2817 nfs_open_state_clear_busy(np
);
2821 * [sigh] We can't trust VFS to get the parent right for named
2822 * attribute nodes. (It likes to reparent the nodes after we've
2823 * created them.) Luckily we can probably get the right parent
2824 * from the n_parent we have stashed away.
2826 if ((np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) &&
2827 (((dvp
= np
->n_parent
)) && (error
= vnode_get(dvp
)))) {
2831 dvp
= vnode_getparent(vp
);
2833 vname
= vnode_getname(vp
);
2834 if (!dvp
|| !vname
) {
2840 filename
= &smallname
[0];
2841 namelen
= snprintf(filename
, sizeof(smallname
), "%s", vname
);
2842 if (namelen
>= sizeof(smallname
)) {
2843 MALLOC(filename
, char *, namelen
+ 1, M_TEMP
, M_WAITOK
);
2848 snprintf(filename
, namelen
+ 1, "%s", vname
);
2850 bzero(&cn
, sizeof(cn
));
2851 cn
.cn_nameptr
= filename
;
2852 cn
.cn_namelen
= namelen
;
2854 if (!(accessMode
& NFS_OPEN_SHARE_ACCESS_READ
)) {
2856 * Try to open it for read access too,
2857 * so the buffer cache can read data.
2860 accessMode
|= NFS_OPEN_SHARE_ACCESS_READ
;
2863 error
= nfs4_open_rpc(nofp
, ctx
, &cn
, NULL
, dvp
, &vp
, NFS_OPEN_NOCREATE
, accessMode
, denyMode
);
2865 if (!nfs_mount_state_error_should_restart(error
) &&
2866 (error
!= EINTR
) && (error
!= ERESTART
) && readtoo
) {
2867 /* try again without the extra read access */
2868 accessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2874 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0);
2876 if (filename
&& (filename
!= &smallname
[0])) {
2877 FREE(filename
, M_TEMP
);
2880 vnode_putname(vname
);
2882 if (dvp
!= NULLVP
) {
2887 #endif /* CONFIG_NFS4 */
2891 struct vnop_mmap_args
/* {
2892 * struct vnodeop_desc *a_desc;
2895 * vfs_context_t a_context;
2898 vfs_context_t ctx
= ap
->a_context
;
2899 vnode_t vp
= ap
->a_vp
;
2900 nfsnode_t np
= VTONFS(vp
);
2901 int error
= 0, accessMode
, denyMode
, delegated
;
2902 struct nfsmount
*nmp
;
2903 struct nfs_open_owner
*noop
= NULL
;
2904 struct nfs_open_file
*nofp
= NULL
;
2907 if (nfs_mount_gone(nmp
)) {
2911 if (!vnode_isreg(vp
) || !(ap
->a_fflags
& (PROT_READ
| PROT_WRITE
))) {
2914 if (np
->n_flag
& NREVOKE
) {
2919 * fflags contains some combination of: PROT_READ, PROT_WRITE
2920 * Since it's not possible to mmap() without having the file open for reading,
2921 * read access is always there (regardless if PROT_READ is not set).
2923 accessMode
= NFS_OPEN_SHARE_ACCESS_READ
;
2924 if (ap
->a_fflags
& PROT_WRITE
) {
2925 accessMode
|= NFS_OPEN_SHARE_ACCESS_WRITE
;
2927 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2929 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
2935 error
= nfs_mount_state_in_use_start(nmp
, NULL
);
2937 nfs_open_owner_rele(noop
);
2940 if (np
->n_flag
& NREVOKE
) {
2942 nfs_mount_state_in_use_end(nmp
, 0);
2943 nfs_open_owner_rele(noop
);
2947 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 1);
2948 if (error
|| (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
))) {
2949 NP(np
, "nfs_vnop_mmap: no open file for owner, error %d, %d", error
, kauth_cred_getuid(noop
->noo_cred
));
2953 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
2954 nfs_mount_state_in_use_end(nmp
, 0);
2955 error
= nfs4_reopen(nofp
, NULL
);
2963 error
= nfs_open_file_set_busy(nofp
, NULL
);
2971 * The open reference for mmap must mirror an existing open because
2972 * we may need to reclaim it after the file is closed.
2973 * So grab another open count matching the accessMode passed in.
2974 * If we already had an mmap open, prefer read/write without deny mode.
2975 * This means we may have to drop the current mmap open first.
2977 * N.B. We should have an open for the mmap, because, mmap was
2978 * called on an open descriptor, or we've created an open for read
2979 * from reading the first page for execve. However, if we piggy
2980 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
2981 * that open may have closed.
2984 if (!(nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
)) {
2985 if (nofp
->nof_flags
& NFS_OPEN_FILE_NEEDCLOSE
) {
2986 /* We shouldn't get here. We've already open the file for execve */
2987 NP(np
, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
2988 nofp
->nof_access
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
), thread_tid(vfs_context_thread(ctx
)));
2991 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
2992 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
2994 if (accessMode
!= NFS_OPEN_SHARE_ACCESS_READ
|| (accessMode
& nofp
->nof_deny
)) {
2995 /* not asking for just read access -> fail */
2999 /* we don't have the file open, so open it for read access */
3000 if (nmp
->nm_vers
< NFS_VER4
) {
3001 /* NFS v2/v3 opens are always allowed - so just add it. */
3002 nfs_open_file_add_open(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, 0);
3007 error
= nfs4_open(np
, nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
3011 nofp
->nof_flags
|= NFS_OPEN_FILE_NEEDCLOSE
;
3018 /* determine deny mode for open */
3019 if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
3020 if (nofp
->nof_d_rw
|| nofp
->nof_d_rw_dw
|| nofp
->nof_d_rw_drw
) {
3022 if (nofp
->nof_d_rw
) {
3023 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3024 } else if (nofp
->nof_d_rw_dw
) {
3025 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3026 } else if (nofp
->nof_d_rw_drw
) {
3027 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3029 } else if (nofp
->nof_rw
|| nofp
->nof_rw_dw
|| nofp
->nof_rw_drw
) {
3032 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3033 } else if (nofp
->nof_rw_dw
) {
3034 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3035 } else if (nofp
->nof_rw_drw
) {
3036 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3041 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
3042 if (nofp
->nof_d_r
|| nofp
->nof_d_r_dw
|| nofp
->nof_d_r_drw
) {
3044 if (nofp
->nof_d_r
) {
3045 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3046 } else if (nofp
->nof_d_r_dw
) {
3047 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3048 } else if (nofp
->nof_d_r_drw
) {
3049 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3051 } else if (nofp
->nof_r
|| nofp
->nof_r_dw
|| nofp
->nof_r_drw
) {
3054 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3055 } else if (nofp
->nof_r_dw
) {
3056 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3057 } else if (nofp
->nof_r_drw
) {
3058 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3060 } else if (nofp
->nof_d_rw
|| nofp
->nof_d_rw_dw
|| nofp
->nof_d_rw_drw
) {
3062 * This clause and the one below is to co-opt a read write access
3063 * for a read only mmaping. We probably got here in that an
3064 * existing rw open for an executable file already exists.
3067 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
3068 if (nofp
->nof_d_rw
) {
3069 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3070 } else if (nofp
->nof_d_rw_dw
) {
3071 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3072 } else if (nofp
->nof_d_rw_drw
) {
3073 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3075 } else if (nofp
->nof_rw
|| nofp
->nof_rw_dw
|| nofp
->nof_rw_drw
) {
3077 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
3079 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3080 } else if (nofp
->nof_rw_dw
) {
3081 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3082 } else if (nofp
->nof_rw_drw
) {
3083 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3089 if (error
) { /* mmap mode without proper open mode */
3094 * If the existing mmap access is more than the new access OR the
3095 * existing access is the same and the existing deny mode is less,
3096 * then we'll stick with the existing mmap open mode.
3098 if ((nofp
->nof_mmap_access
> accessMode
) ||
3099 ((nofp
->nof_mmap_access
== accessMode
) && (nofp
->nof_mmap_deny
<= denyMode
))) {
3103 /* update mmap open mode */
3104 if (nofp
->nof_mmap_access
) {
3105 error
= nfs_close(np
, nofp
, nofp
->nof_mmap_access
, nofp
->nof_mmap_deny
, ctx
);
3107 if (!nfs_mount_state_error_should_restart(error
)) {
3108 NP(np
, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
3110 NP(np
, "nfs_vnop_mmap: update, close error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
3113 nofp
->nof_mmap_access
= nofp
->nof_mmap_deny
= 0;
3116 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, delegated
);
3117 nofp
->nof_mmap_access
= accessMode
;
3118 nofp
->nof_mmap_deny
= denyMode
;
3122 nfs_open_file_clear_busy(nofp
);
3124 if (nfs_mount_state_in_use_end(nmp
, error
)) {
3129 nfs_open_owner_rele(noop
);
3134 nfs_node_lock_force(np
);
3135 if ((np
->n_flag
& NISMAPPED
) == 0) {
3136 np
->n_flag
|= NISMAPPED
;
3139 nfs_node_unlock(np
);
3141 lck_mtx_lock(&nmp
->nm_lock
);
3142 nmp
->nm_state
&= ~NFSSTA_SQUISHY
;
3143 nmp
->nm_curdeadtimeout
= nmp
->nm_deadtimeout
;
3144 if (nmp
->nm_curdeadtimeout
<= 0) {
3145 nmp
->nm_deadto_start
= 0;
3148 lck_mtx_unlock(&nmp
->nm_lock
);
3158 struct vnop_mnomap_args
/* {
3159 * struct vnodeop_desc *a_desc;
3161 * vfs_context_t a_context;
3164 vfs_context_t ctx
= ap
->a_context
;
3165 vnode_t vp
= ap
->a_vp
;
3166 nfsnode_t np
= VTONFS(vp
);
3167 struct nfsmount
*nmp
;
3168 struct nfs_open_file
*nofp
= NULL
;
3171 int is_mapped_flag
= 0;
3174 if (nfs_mount_gone(nmp
)) {
3178 nfs_node_lock_force(np
);
3179 if (np
->n_flag
& NISMAPPED
) {
3181 np
->n_flag
&= ~NISMAPPED
;
3183 nfs_node_unlock(np
);
3184 if (is_mapped_flag
) {
3185 lck_mtx_lock(&nmp
->nm_lock
);
3186 if (nmp
->nm_mappers
) {
3189 NP(np
, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
3191 lck_mtx_unlock(&nmp
->nm_lock
);
3194 /* flush buffers/ubc before we drop the open (in case it's our last open) */
3195 nfs_flush(np
, MNT_WAIT
, vfs_context_thread(ctx
), V_IGNORE_WRITEERR
);
3196 if (UBCINFOEXISTS(vp
) && (size
= ubc_getsize(vp
))) {
3197 ubc_msync(vp
, 0, size
, NULL
, UBC_PUSHALL
| UBC_SYNC
);
3200 /* walk all open files and close all mmap opens */
3202 error
= nfs_mount_state_in_use_start(nmp
, NULL
);
3206 lck_mtx_lock(&np
->n_openlock
);
3207 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
3208 if (!nofp
->nof_mmap_access
) {
3211 lck_mtx_unlock(&np
->n_openlock
);
3213 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
3214 nfs_mount_state_in_use_end(nmp
, 0);
3215 error
= nfs4_reopen(nofp
, NULL
);
3222 error
= nfs_open_file_set_busy(nofp
, NULL
);
3225 lck_mtx_lock(&np
->n_openlock
);
3228 if (nofp
->nof_mmap_access
) {
3229 error
= nfs_close(np
, nofp
, nofp
->nof_mmap_access
, nofp
->nof_mmap_deny
, ctx
);
3230 if (!nfs_mount_state_error_should_restart(error
)) {
3231 if (error
) { /* not a state-operation-restarting error, so just clear the access */
3232 NP(np
, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
3234 nofp
->nof_mmap_access
= nofp
->nof_mmap_deny
= 0;
3237 NP(np
, "nfs_vnop_mnomap: error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
3240 nfs_open_file_clear_busy(nofp
);
3241 nfs_mount_state_in_use_end(nmp
, error
);
3244 lck_mtx_unlock(&np
->n_openlock
);
3245 nfs_mount_state_in_use_end(nmp
, error
);
3250 * Search a node's lock owner list for the owner for this process.
3251 * If not found and "alloc" is set, then allocate a new one.
3253 struct nfs_lock_owner
*
3254 nfs_lock_owner_find(nfsnode_t np
, proc_t p
, int alloc
)
3256 pid_t pid
= proc_pid(p
);
3257 struct nfs_lock_owner
*nlop
, *newnlop
= NULL
;
3260 lck_mtx_lock(&np
->n_openlock
);
3261 TAILQ_FOREACH(nlop
, &np
->n_lock_owners
, nlo_link
) {
3262 os_ref_count_t newcount
;
3264 if (nlop
->nlo_pid
!= pid
) {
3267 if (timevalcmp(&nlop
->nlo_pid_start
, &p
->p_start
, ==)) {
3270 /* stale lock owner... reuse it if we can */
3271 if (os_ref_get_count(&nlop
->nlo_refcnt
)) {
3272 TAILQ_REMOVE(&np
->n_lock_owners
, nlop
, nlo_link
);
3273 nlop
->nlo_flags
&= ~NFS_LOCK_OWNER_LINK
;
3274 newcount
= os_ref_release_locked(&nlop
->nlo_refcnt
);
3275 lck_mtx_unlock(&np
->n_openlock
);
3278 nlop
->nlo_pid_start
= p
->p_start
;
3279 nlop
->nlo_seqid
= 0;
3280 nlop
->nlo_stategenid
= 0;
3284 if (!nlop
&& !newnlop
&& alloc
) {
3285 lck_mtx_unlock(&np
->n_openlock
);
3286 MALLOC(newnlop
, struct nfs_lock_owner
*, sizeof(struct nfs_lock_owner
), M_TEMP
, M_WAITOK
);
3290 bzero(newnlop
, sizeof(*newnlop
));
3291 lck_mtx_init(&newnlop
->nlo_lock
, nfs_open_grp
, LCK_ATTR_NULL
);
3292 newnlop
->nlo_pid
= pid
;
3293 newnlop
->nlo_pid_start
= p
->p_start
;
3294 newnlop
->nlo_name
= OSAddAtomic(1, &nfs_lock_owner_seqnum
);
3295 TAILQ_INIT(&newnlop
->nlo_locks
);
3298 if (!nlop
&& newnlop
) {
3299 newnlop
->nlo_flags
|= NFS_LOCK_OWNER_LINK
;
3300 os_ref_init(&newnlop
->nlo_refcnt
, NULL
);
3301 TAILQ_INSERT_HEAD(&np
->n_lock_owners
, newnlop
, nlo_link
);
3304 lck_mtx_unlock(&np
->n_openlock
);
3306 if (newnlop
&& (nlop
!= newnlop
)) {
3307 nfs_lock_owner_destroy(newnlop
);
3311 nfs_lock_owner_ref(nlop
);
3318 * destroy a lock owner that's no longer needed
3321 nfs_lock_owner_destroy(struct nfs_lock_owner
*nlop
)
3323 if (nlop
->nlo_open_owner
) {
3324 nfs_open_owner_rele(nlop
->nlo_open_owner
);
3325 nlop
->nlo_open_owner
= NULL
;
3327 lck_mtx_destroy(&nlop
->nlo_lock
, nfs_open_grp
);
3332 * acquire a reference count on a lock owner
3335 nfs_lock_owner_ref(struct nfs_lock_owner
*nlop
)
3337 lck_mtx_lock(&nlop
->nlo_lock
);
3338 os_ref_retain_locked(&nlop
->nlo_refcnt
);
3339 lck_mtx_unlock(&nlop
->nlo_lock
);
3343 * drop a reference count on a lock owner and destroy it if
3344 * it is no longer referenced and no longer on the mount's list.
3347 nfs_lock_owner_rele(struct nfs_lock_owner
*nlop
)
3349 os_ref_count_t newcount
;
3351 lck_mtx_lock(&nlop
->nlo_lock
);
3352 if (os_ref_get_count(&nlop
->nlo_refcnt
) < 1) {
3353 panic("nfs_lock_owner_rele: no refcnt");
3355 newcount
= os_ref_release_locked(&nlop
->nlo_refcnt
);
3356 if (!newcount
&& (nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
)) {
3357 panic("nfs_lock_owner_rele: busy");
3359 /* XXX we may potentially want to clean up idle/unused lock owner structures */
3360 if (newcount
|| (nlop
->nlo_flags
& NFS_LOCK_OWNER_LINK
)) {
3361 lck_mtx_unlock(&nlop
->nlo_lock
);
3364 /* owner is no longer referenced or linked to mount, so destroy it */
3365 lck_mtx_unlock(&nlop
->nlo_lock
);
3366 nfs_lock_owner_destroy(nlop
);
3370 * Mark a lock owner as busy because we are about to
3371 * start an operation that uses and updates lock owner state.
3374 nfs_lock_owner_set_busy(struct nfs_lock_owner
*nlop
, thread_t thd
)
3376 struct nfsmount
*nmp
;
3377 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
3378 int error
= 0, slpflag
;
3380 nmp
= nlop
->nlo_open_owner
->noo_mount
;
3381 if (nfs_mount_gone(nmp
)) {
3384 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
3386 lck_mtx_lock(&nlop
->nlo_lock
);
3387 while (nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
) {
3388 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
3391 nlop
->nlo_flags
|= NFS_LOCK_OWNER_WANT
;
3392 msleep(nlop
, &nlop
->nlo_lock
, slpflag
, "nfs_lock_owner_set_busy", &ts
);
3396 nlop
->nlo_flags
|= NFS_LOCK_OWNER_BUSY
;
3398 lck_mtx_unlock(&nlop
->nlo_lock
);
3404 * Clear the busy flag on a lock owner and wake up anyone waiting
3408 nfs_lock_owner_clear_busy(struct nfs_lock_owner
*nlop
)
3412 lck_mtx_lock(&nlop
->nlo_lock
);
3413 if (!(nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
)) {
3414 panic("nfs_lock_owner_clear_busy");
3416 wanted
= (nlop
->nlo_flags
& NFS_LOCK_OWNER_WANT
);
3417 nlop
->nlo_flags
&= ~(NFS_LOCK_OWNER_BUSY
| NFS_LOCK_OWNER_WANT
);
3418 lck_mtx_unlock(&nlop
->nlo_lock
);
3425 * Insert a held lock into a lock owner's sorted list.
3426 * (flock locks are always inserted at the head the list)
3429 nfs_lock_owner_insert_held_lock(struct nfs_lock_owner
*nlop
, struct nfs_file_lock
*newnflp
)
3431 struct nfs_file_lock
*nflp
;
3433 /* insert new lock in lock owner's held lock list */
3434 lck_mtx_lock(&nlop
->nlo_lock
);
3435 if ((newnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
) {
3436 TAILQ_INSERT_HEAD(&nlop
->nlo_locks
, newnflp
, nfl_lolink
);
3438 TAILQ_FOREACH(nflp
, &nlop
->nlo_locks
, nfl_lolink
) {
3439 if (newnflp
->nfl_start
< nflp
->nfl_start
) {
3444 TAILQ_INSERT_BEFORE(nflp
, newnflp
, nfl_lolink
);
3446 TAILQ_INSERT_TAIL(&nlop
->nlo_locks
, newnflp
, nfl_lolink
);
3449 lck_mtx_unlock(&nlop
->nlo_lock
);
3453 * Get a file lock structure for this lock owner.
3455 struct nfs_file_lock
*
3456 nfs_file_lock_alloc(struct nfs_lock_owner
*nlop
)
3458 struct nfs_file_lock
*nflp
= NULL
;
3460 lck_mtx_lock(&nlop
->nlo_lock
);
3461 if (!nlop
->nlo_alock
.nfl_owner
) {
3462 nflp
= &nlop
->nlo_alock
;
3463 nflp
->nfl_owner
= nlop
;
3465 lck_mtx_unlock(&nlop
->nlo_lock
);
3467 MALLOC(nflp
, struct nfs_file_lock
*, sizeof(struct nfs_file_lock
), M_TEMP
, M_WAITOK
);
3471 bzero(nflp
, sizeof(*nflp
));
3472 nflp
->nfl_flags
|= NFS_FILE_LOCK_ALLOC
;
3473 nflp
->nfl_owner
= nlop
;
3475 nfs_lock_owner_ref(nlop
);
3480 * destroy the given NFS file lock structure
3483 nfs_file_lock_destroy(struct nfs_file_lock
*nflp
)
3485 struct nfs_lock_owner
*nlop
= nflp
->nfl_owner
;
3487 if (nflp
->nfl_flags
& NFS_FILE_LOCK_ALLOC
) {
3488 nflp
->nfl_owner
= NULL
;
3491 lck_mtx_lock(&nlop
->nlo_lock
);
3492 bzero(nflp
, sizeof(*nflp
));
3493 lck_mtx_unlock(&nlop
->nlo_lock
);
3495 nfs_lock_owner_rele(nlop
);
3499 * Check if one file lock conflicts with another.
3500 * (nflp1 is the new lock. nflp2 is the existing lock.)
3503 nfs_file_lock_conflict(struct nfs_file_lock
*nflp1
, struct nfs_file_lock
*nflp2
, int *willsplit
)
3505 /* no conflict if lock is dead */
3506 if ((nflp1
->nfl_flags
& NFS_FILE_LOCK_DEAD
) || (nflp2
->nfl_flags
& NFS_FILE_LOCK_DEAD
)) {
3509 /* no conflict if it's ours - unless the lock style doesn't match */
3510 if ((nflp1
->nfl_owner
== nflp2
->nfl_owner
) &&
3511 ((nflp1
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == (nflp2
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
))) {
3512 if (willsplit
&& (nflp1
->nfl_type
!= nflp2
->nfl_type
) &&
3513 (nflp1
->nfl_start
> nflp2
->nfl_start
) &&
3514 (nflp1
->nfl_end
< nflp2
->nfl_end
)) {
3519 /* no conflict if ranges don't overlap */
3520 if ((nflp1
->nfl_start
> nflp2
->nfl_end
) || (nflp1
->nfl_end
< nflp2
->nfl_start
)) {
3523 /* no conflict if neither lock is exclusive */
3524 if ((nflp1
->nfl_type
!= F_WRLCK
) && (nflp2
->nfl_type
!= F_WRLCK
)) {
3533 * Send an NFSv4 LOCK RPC to the server.
3538 struct nfs_open_file
*nofp
,
3539 struct nfs_file_lock
*nflp
,
3545 struct nfs_lock_owner
*nlop
= nflp
->nfl_owner
;
3546 struct nfsmount
*nmp
;
3547 struct nfsm_chain nmreq
, nmrep
;
3550 int error
= 0, lockerror
= ENOENT
, newlocker
, numops
, status
;
3551 struct nfsreq_secinfo_args si
;
3554 if (nfs_mount_gone(nmp
)) {
3557 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
3561 newlocker
= (nlop
->nlo_stategenid
!= nmp
->nm_stategenid
);
3562 locktype
= (nflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
) ?
3563 ((nflp
->nfl_type
== F_WRLCK
) ?
3564 NFS_LOCK_TYPE_WRITEW
:
3565 NFS_LOCK_TYPE_READW
) :
3566 ((nflp
->nfl_type
== F_WRLCK
) ?
3567 NFS_LOCK_TYPE_WRITE
:
3568 NFS_LOCK_TYPE_READ
);
3570 error
= nfs_open_file_set_busy(nofp
, thd
);
3574 error
= nfs_open_owner_set_busy(nofp
->nof_owner
, thd
);
3576 nfs_open_file_clear_busy(nofp
);
3579 if (!nlop
->nlo_open_owner
) {
3580 nfs_open_owner_ref(nofp
->nof_owner
);
3581 nlop
->nlo_open_owner
= nofp
->nof_owner
;
3584 error
= nfs_lock_owner_set_busy(nlop
, thd
);
3587 nfs_open_owner_clear_busy(nofp
->nof_owner
);
3588 nfs_open_file_clear_busy(nofp
);
3593 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
3594 nfsm_chain_null(&nmreq
);
3595 nfsm_chain_null(&nmrep
);
3597 // PUTFH, GETATTR, LOCK
3599 nfsm_chain_build_alloc_init(error
, &nmreq
, 33 * NFSX_UNSIGNED
);
3600 nfsm_chain_add_compound_header(error
, &nmreq
, "lock", nmp
->nm_minor_vers
, numops
);
3602 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3603 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3605 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3606 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
3608 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCK
);
3609 nfsm_chain_add_32(error
, &nmreq
, locktype
);
3610 nfsm_chain_add_32(error
, &nmreq
, reclaim
);
3611 nfsm_chain_add_64(error
, &nmreq
, nflp
->nfl_start
);
3612 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(nflp
->nfl_start
, nflp
->nfl_end
));
3613 nfsm_chain_add_32(error
, &nmreq
, newlocker
);
3615 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_owner
->noo_seqid
);
3616 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
3617 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3618 nfsm_chain_add_lock_owner4(error
, &nmreq
, nmp
, nlop
);
3620 nfsm_chain_add_stateid(error
, &nmreq
, &nlop
->nlo_stateid
);
3621 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3623 nfsm_chain_build_done(error
, &nmreq
);
3624 nfsm_assert(error
, (numops
== 0), EPROTO
);
3627 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
| R_NOINTR
, &nmrep
, &xid
, &status
);
3629 if ((lockerror
= nfs_node_lock(np
))) {
3632 nfsm_chain_skip_tag(error
, &nmrep
);
3633 nfsm_chain_get_32(error
, &nmrep
, numops
);
3634 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3636 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3637 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
3639 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCK
);
3640 nfs_owner_seqid_increment(newlocker
? nofp
->nof_owner
: NULL
, nlop
, error
);
3641 nfsm_chain_get_stateid(error
, &nmrep
, &nlop
->nlo_stateid
);
3643 /* Update the lock owner's stategenid once it appears the server has state for it. */
3644 /* We determine this by noting the request was successful (we got a stateid). */
3645 if (newlocker
&& !error
) {
3646 nlop
->nlo_stategenid
= nmp
->nm_stategenid
;
3650 nfs_node_unlock(np
);
3652 nfs_lock_owner_clear_busy(nlop
);
3654 nfs_open_owner_clear_busy(nofp
->nof_owner
);
3655 nfs_open_file_clear_busy(nofp
);
3657 nfsm_chain_cleanup(&nmreq
);
3658 nfsm_chain_cleanup(&nmrep
);
3663 * Send an NFSv4 LOCKU RPC to the server.
3668 struct nfs_lock_owner
*nlop
,
3676 struct nfsmount
*nmp
;
3677 struct nfsm_chain nmreq
, nmrep
;
3679 int error
= 0, lockerror
= ENOENT
, numops
, status
;
3680 struct nfsreq_secinfo_args si
;
3683 if (nfs_mount_gone(nmp
)) {
3686 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
3690 error
= nfs_lock_owner_set_busy(nlop
, NULL
);
3695 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
3696 nfsm_chain_null(&nmreq
);
3697 nfsm_chain_null(&nmrep
);
3699 // PUTFH, GETATTR, LOCKU
3701 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
3702 nfsm_chain_add_compound_header(error
, &nmreq
, "unlock", nmp
->nm_minor_vers
, numops
);
3704 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3705 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3707 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3708 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
3710 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCKU
);
3711 nfsm_chain_add_32(error
, &nmreq
, (type
== F_WRLCK
) ? NFS_LOCK_TYPE_WRITE
: NFS_LOCK_TYPE_READ
);
3712 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3713 nfsm_chain_add_stateid(error
, &nmreq
, &nlop
->nlo_stateid
);
3714 nfsm_chain_add_64(error
, &nmreq
, start
);
3715 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(start
, end
));
3716 nfsm_chain_build_done(error
, &nmreq
);
3717 nfsm_assert(error
, (numops
== 0), EPROTO
);
3720 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
| R_NOINTR
, &nmrep
, &xid
, &status
);
3722 if ((lockerror
= nfs_node_lock(np
))) {
3725 nfsm_chain_skip_tag(error
, &nmrep
);
3726 nfsm_chain_get_32(error
, &nmrep
, numops
);
3727 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3729 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3730 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
3732 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCKU
);
3733 nfs_owner_seqid_increment(NULL
, nlop
, error
);
3734 nfsm_chain_get_stateid(error
, &nmrep
, &nlop
->nlo_stateid
);
3737 nfs_node_unlock(np
);
3739 nfs_lock_owner_clear_busy(nlop
);
3740 nfsm_chain_cleanup(&nmreq
);
3741 nfsm_chain_cleanup(&nmrep
);
3746 * Send an NFSv4 LOCKT RPC to the server.
3751 struct nfs_lock_owner
*nlop
,
3757 struct nfsmount
*nmp
;
3758 struct nfsm_chain nmreq
, nmrep
;
3759 uint64_t xid
, val64
= 0;
3761 int error
= 0, lockerror
, numops
, status
;
3762 struct nfsreq_secinfo_args si
;
3765 if (nfs_mount_gone(nmp
)) {
3768 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
3773 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
3774 nfsm_chain_null(&nmreq
);
3775 nfsm_chain_null(&nmrep
);
3777 // PUTFH, GETATTR, LOCKT
3779 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
3780 nfsm_chain_add_compound_header(error
, &nmreq
, "locktest", nmp
->nm_minor_vers
, numops
);
3782 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3783 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3785 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3786 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
3788 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCKT
);
3789 nfsm_chain_add_32(error
, &nmreq
, (fl
->l_type
== F_WRLCK
) ? NFS_LOCK_TYPE_WRITE
: NFS_LOCK_TYPE_READ
);
3790 nfsm_chain_add_64(error
, &nmreq
, start
);
3791 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(start
, end
));
3792 nfsm_chain_add_lock_owner4(error
, &nmreq
, nmp
, nlop
);
3793 nfsm_chain_build_done(error
, &nmreq
);
3794 nfsm_assert(error
, (numops
== 0), EPROTO
);
3797 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
3799 if ((lockerror
= nfs_node_lock(np
))) {
3802 nfsm_chain_skip_tag(error
, &nmrep
);
3803 nfsm_chain_get_32(error
, &nmrep
, numops
);
3804 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3806 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3807 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
3809 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCKT
);
3810 if (error
== NFSERR_DENIED
) {
3812 nfsm_chain_get_64(error
, &nmrep
, fl
->l_start
);
3813 nfsm_chain_get_64(error
, &nmrep
, val64
);
3814 fl
->l_len
= (val64
== UINT64_MAX
) ? 0 : val64
;
3815 nfsm_chain_get_32(error
, &nmrep
, val
);
3816 fl
->l_type
= (val
== NFS_LOCK_TYPE_WRITE
) ? F_WRLCK
: F_RDLCK
;
3818 fl
->l_whence
= SEEK_SET
;
3819 } else if (!error
) {
3820 fl
->l_type
= F_UNLCK
;
3824 nfs_node_unlock(np
);
3826 nfsm_chain_cleanup(&nmreq
);
3827 nfsm_chain_cleanup(&nmrep
);
3830 #endif /* CONFIG_NFS4 */
3833 * Check for any conflicts with the given lock.
3835 * Checking for a lock doesn't require the file to be opened.
3836 * So we skip all the open owner, open file, lock owner work
3837 * and just check for a conflicting lock.
3840 nfs_advlock_getlock(
3842 struct nfs_lock_owner
*nlop
,
3848 struct nfsmount
*nmp
;
3849 struct nfs_file_lock
*nflp
;
3850 int error
= 0, answered
= 0;
3853 if (nfs_mount_gone(nmp
)) {
3858 if ((error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
)))) {
3862 lck_mtx_lock(&np
->n_openlock
);
3863 /* scan currently held locks for conflict */
3864 TAILQ_FOREACH(nflp
, &np
->n_locks
, nfl_link
) {
3865 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
3868 if ((start
<= nflp
->nfl_end
) && (end
>= nflp
->nfl_start
) &&
3869 ((fl
->l_type
== F_WRLCK
) || (nflp
->nfl_type
== F_WRLCK
))) {
3874 /* found a conflicting lock */
3875 fl
->l_type
= nflp
->nfl_type
;
3876 fl
->l_pid
= (nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_FLOCK
) ? -1 : nflp
->nfl_owner
->nlo_pid
;
3877 fl
->l_start
= nflp
->nfl_start
;
3878 fl
->l_len
= NFS_FLOCK_LENGTH(nflp
->nfl_start
, nflp
->nfl_end
);
3879 fl
->l_whence
= SEEK_SET
;
3881 } else if ((np
->n_openflags
& N_DELEG_WRITE
) && !(np
->n_openflags
& N_DELEG_RETURN
)) {
3883 * If we have a write delegation, we know there can't be other
3884 * locks on the server. So the answer is no conflicting lock found.
3886 fl
->l_type
= F_UNLCK
;
3889 lck_mtx_unlock(&np
->n_openlock
);
3891 nfs_mount_state_in_use_end(nmp
, 0);
3895 /* no conflict found locally, so ask the server */
3896 error
= nmp
->nm_funcs
->nf_getlock_rpc(np
, nlop
, fl
, start
, end
, ctx
);
3898 if (nfs_mount_state_in_use_end(nmp
, error
)) {
3905 * Acquire a file lock for the given range.
3907 * Add the lock (request) to the lock queue.
3908 * Scan the lock queue for any conflicting locks.
3909 * If a conflict is found, block or return an error.
3910 * Once end of queue is reached, send request to the server.
3911 * If the server grants the lock, scan the lock queue and
3912 * update any existing locks. Then (optionally) scan the
3913 * queue again to coalesce any locks adjacent to the new one.
3916 nfs_advlock_setlock(
3918 struct nfs_open_file
*nofp
,
3919 struct nfs_lock_owner
*nlop
,
3927 struct nfsmount
*nmp
;
3928 struct nfs_file_lock
*newnflp
, *nflp
, *nflp2
= NULL
, *nextnflp
, *flocknflp
= NULL
;
3929 struct nfs_file_lock
*coalnflp
;
3930 int error
= 0, error2
, willsplit
= 0, delay
, slpflag
, busy
= 0, inuse
= 0, restart
, inqueue
= 0;
3931 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
3934 if (nfs_mount_gone(nmp
)) {
3937 slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
3939 if ((type
!= F_RDLCK
) && (type
!= F_WRLCK
)) {
3943 /* allocate a new lock */
3944 newnflp
= nfs_file_lock_alloc(nlop
);
3948 newnflp
->nfl_start
= start
;
3949 newnflp
->nfl_end
= end
;
3950 newnflp
->nfl_type
= type
;
3951 if (op
== F_SETLKW
) {
3952 newnflp
->nfl_flags
|= NFS_FILE_LOCK_WAIT
;
3954 newnflp
->nfl_flags
|= style
;
3955 newnflp
->nfl_flags
|= NFS_FILE_LOCK_BLOCKED
;
3957 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) && (type
== F_WRLCK
)) {
3959 * For exclusive flock-style locks, if we block waiting for the
3960 * lock, we need to first release any currently held shared
3961 * flock-style lock. So, the first thing we do is check if we
3962 * have a shared flock-style lock.
3964 nflp
= TAILQ_FIRST(&nlop
->nlo_locks
);
3965 if (nflp
&& ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != NFS_FILE_LOCK_STYLE_FLOCK
)) {
3968 if (nflp
&& (nflp
->nfl_type
!= F_RDLCK
)) {
3976 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
3981 if (np
->n_flag
& NREVOKE
) {
3983 nfs_mount_state_in_use_end(nmp
, 0);
3988 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
3989 nfs_mount_state_in_use_end(nmp
, 0);
3991 error
= nfs4_reopen(nofp
, vfs_context_thread(ctx
));
3999 lck_mtx_lock(&np
->n_openlock
);
4001 /* insert new lock at beginning of list */
4002 TAILQ_INSERT_HEAD(&np
->n_locks
, newnflp
, nfl_link
);
4006 /* scan current list of locks (held and pending) for conflicts */
4007 for (nflp
= TAILQ_NEXT(newnflp
, nfl_link
); nflp
; nflp
= nextnflp
) {
4008 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4009 if (!nfs_file_lock_conflict(newnflp
, nflp
, &willsplit
)) {
4013 if (!(newnflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
)) {
4017 /* Block until this lock is no longer held. */
4018 if (nflp
->nfl_blockcnt
== UINT_MAX
) {
4022 nflp
->nfl_blockcnt
++;
4025 /* release any currently held shared lock before sleeping */
4026 lck_mtx_unlock(&np
->n_openlock
);
4027 nfs_mount_state_in_use_end(nmp
, 0);
4029 error
= nfs_advlock_unlock(np
, nofp
, nlop
, 0, UINT64_MAX
, NFS_FILE_LOCK_STYLE_FLOCK
, ctx
);
4032 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
4035 lck_mtx_lock(&np
->n_openlock
);
4039 lck_mtx_lock(&np
->n_openlock
);
4040 /* no need to block/sleep if the conflict is gone */
4041 if (!nfs_file_lock_conflict(newnflp
, nflp
, NULL
)) {
4045 msleep(nflp
, &np
->n_openlock
, slpflag
, "nfs_advlock_setlock_blocked", &ts
);
4047 error
= nfs_sigintr(NFSTONMP(np
), NULL
, vfs_context_thread(ctx
), 0);
4048 if (!error
&& (nmp
->nm_state
& NFSSTA_RECOVER
)) {
4049 /* looks like we have a recover pending... restart */
4051 lck_mtx_unlock(&np
->n_openlock
);
4052 nfs_mount_state_in_use_end(nmp
, 0);
4054 lck_mtx_lock(&np
->n_openlock
);
4057 if (!error
&& (np
->n_flag
& NREVOKE
)) {
4060 } while (!error
&& nfs_file_lock_conflict(newnflp
, nflp
, NULL
));
4061 nflp
->nfl_blockcnt
--;
4062 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) && !nflp
->nfl_blockcnt
) {
4063 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4064 nfs_file_lock_destroy(nflp
);
4066 if (error
|| restart
) {
4069 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
4070 /* So, start this lock-scanning loop over from where it started. */
4071 nextnflp
= TAILQ_NEXT(newnflp
, nfl_link
);
4073 lck_mtx_unlock(&np
->n_openlock
);
4083 * It looks like this operation is splitting a lock.
4084 * We allocate a new lock now so we don't have to worry
4085 * about the allocation failing after we've updated some state.
4087 nflp2
= nfs_file_lock_alloc(nlop
);
4094 /* once scan for local conflicts is clear, send request to server */
4095 if ((error
= nfs_open_state_set_busy(np
, vfs_context_thread(ctx
)))) {
4102 /* do we have a delegation? (that we're not returning?) */
4103 if ((np
->n_openflags
& N_DELEG_MASK
) && !(np
->n_openflags
& N_DELEG_RETURN
)) {
4104 if (np
->n_openflags
& N_DELEG_WRITE
) {
4105 /* with a write delegation, just take the lock delegated */
4106 newnflp
->nfl_flags
|= NFS_FILE_LOCK_DELEGATED
;
4108 /* make sure the lock owner knows its open owner */
4109 if (!nlop
->nlo_open_owner
) {
4110 nfs_open_owner_ref(nofp
->nof_owner
);
4111 nlop
->nlo_open_owner
= nofp
->nof_owner
;
4116 * If we don't have any non-delegated opens but we do have
4117 * delegated opens, then we need to first claim the delegated
4118 * opens so that the lock request on the server can be associated
4119 * with an open it knows about.
4121 if ((!nofp
->nof_rw_drw
&& !nofp
->nof_w_drw
&& !nofp
->nof_r_drw
&&
4122 !nofp
->nof_rw_dw
&& !nofp
->nof_w_dw
&& !nofp
->nof_r_dw
&&
4123 !nofp
->nof_rw
&& !nofp
->nof_w
&& !nofp
->nof_r
) &&
4124 (nofp
->nof_d_rw_drw
|| nofp
->nof_d_w_drw
|| nofp
->nof_d_r_drw
||
4125 nofp
->nof_d_rw_dw
|| nofp
->nof_d_w_dw
|| nofp
->nof_d_r_dw
||
4126 nofp
->nof_d_rw
|| nofp
->nof_d_w
|| nofp
->nof_d_r
)) {
4127 error
= nfs4_claim_delegated_state_for_open_file(nofp
, 0);
4135 if (np
->n_flag
& NREVOKE
) {
4139 error
= nmp
->nm_funcs
->nf_setlock_rpc(np
, nofp
, newnflp
, 0, 0, vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4141 if (!error
|| ((error
!= NFSERR_DENIED
) && (error
!= NFSERR_GRACE
))) {
4144 /* request was denied due to either conflict or grace period */
4145 if ((error
== NFSERR_DENIED
) && !(newnflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
)) {
4150 /* release any currently held shared lock before sleeping */
4151 nfs_open_state_clear_busy(np
);
4153 nfs_mount_state_in_use_end(nmp
, 0);
4155 error2
= nfs_advlock_unlock(np
, nofp
, nlop
, 0, UINT64_MAX
, NFS_FILE_LOCK_STYLE_FLOCK
, ctx
);
4158 error2
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
4162 error2
= nfs_open_state_set_busy(np
, vfs_context_thread(ctx
));
4171 * Wait a little bit and send the request again.
4172 * Except for retries of blocked v2/v3 request where we've already waited a bit.
4174 if ((nmp
->nm_vers
>= NFS_VER4
) || (error
== NFSERR_GRACE
)) {
4175 if (error
== NFSERR_GRACE
) {
4181 tsleep(newnflp
, slpflag
, "nfs_advlock_setlock_delay", delay
* (hz
/ 2));
4184 error
= nfs_sigintr(NFSTONMP(np
), NULL
, vfs_context_thread(ctx
), 0);
4185 if (!error
&& (nmp
->nm_state
& NFSSTA_RECOVER
)) {
4186 /* looks like we have a recover pending... restart */
4187 nfs_open_state_clear_busy(np
);
4189 nfs_mount_state_in_use_end(nmp
, 0);
4193 if (!error
&& (np
->n_flag
& NREVOKE
)) {
4199 if (nfs_mount_state_error_should_restart(error
)) {
4200 /* looks like we need to restart this operation */
4202 nfs_open_state_clear_busy(np
);
4206 nfs_mount_state_in_use_end(nmp
, error
);
4211 lck_mtx_lock(&np
->n_openlock
);
4212 newnflp
->nfl_flags
&= ~NFS_FILE_LOCK_BLOCKED
;
4214 newnflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4215 if (newnflp
->nfl_blockcnt
) {
4216 /* wake up anyone blocked on this lock */
4219 /* remove newnflp from lock list and destroy */
4221 TAILQ_REMOVE(&np
->n_locks
, newnflp
, nfl_link
);
4223 nfs_file_lock_destroy(newnflp
);
4225 lck_mtx_unlock(&np
->n_openlock
);
4227 nfs_open_state_clear_busy(np
);
4230 nfs_mount_state_in_use_end(nmp
, error
);
4233 nfs_file_lock_destroy(nflp2
);
4238 /* server granted the lock */
4241 * Scan for locks to update.
4243 * Locks completely covered are killed.
4244 * At most two locks may need to be clipped.
4245 * It's possible that a single lock may need to be split.
4247 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4248 if (nflp
== newnflp
) {
4251 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
4254 if (nflp
->nfl_owner
!= nlop
) {
4257 if ((newnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != (nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
)) {
4260 if ((newnflp
->nfl_start
> nflp
->nfl_end
) || (newnflp
->nfl_end
< nflp
->nfl_start
)) {
4263 /* here's one to update */
4264 if ((newnflp
->nfl_start
<= nflp
->nfl_start
) && (newnflp
->nfl_end
>= nflp
->nfl_end
)) {
4265 /* The entire lock is being replaced. */
4266 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4267 lck_mtx_lock(&nlop
->nlo_lock
);
4268 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4269 lck_mtx_unlock(&nlop
->nlo_lock
);
4270 /* lock will be destroyed below, if no waiters */
4271 } else if ((newnflp
->nfl_start
> nflp
->nfl_start
) && (newnflp
->nfl_end
< nflp
->nfl_end
)) {
4272 /* We're replacing a range in the middle of a lock. */
4273 /* The current lock will be split into two locks. */
4274 /* Update locks and insert new lock after current lock. */
4275 nflp2
->nfl_flags
|= (nflp
->nfl_flags
& (NFS_FILE_LOCK_STYLE_MASK
| NFS_FILE_LOCK_DELEGATED
));
4276 nflp2
->nfl_type
= nflp
->nfl_type
;
4277 nflp2
->nfl_start
= newnflp
->nfl_end
+ 1;
4278 nflp2
->nfl_end
= nflp
->nfl_end
;
4279 nflp
->nfl_end
= newnflp
->nfl_start
- 1;
4280 TAILQ_INSERT_AFTER(&np
->n_locks
, nflp
, nflp2
, nfl_link
);
4281 nfs_lock_owner_insert_held_lock(nlop
, nflp2
);
4284 } else if (newnflp
->nfl_start
> nflp
->nfl_start
) {
4285 /* We're replacing the end of a lock. */
4286 nflp
->nfl_end
= newnflp
->nfl_start
- 1;
4287 } else if (newnflp
->nfl_end
< nflp
->nfl_end
) {
4288 /* We're replacing the start of a lock. */
4289 nflp
->nfl_start
= newnflp
->nfl_end
+ 1;
4291 if (nflp
->nfl_blockcnt
) {
4292 /* wake up anyone blocked on this lock */
4294 } else if (nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) {
4295 /* remove nflp from lock list and destroy */
4296 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4297 nfs_file_lock_destroy(nflp
);
4301 nfs_lock_owner_insert_held_lock(nlop
, newnflp
);
4304 * POSIX locks should be coalesced when possible.
4306 if ((style
== NFS_FILE_LOCK_STYLE_POSIX
) && (nofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
)) {
4308 * Walk through the lock queue and check each of our held locks with
4309 * the previous and next locks in the lock owner's "held lock list".
4310 * If the two locks can be coalesced, we merge the current lock into
4311 * the other (previous or next) lock. Merging this way makes sure that
4312 * lock ranges are always merged forward in the lock queue. This is
4313 * important because anyone blocked on the lock being "merged away"
4314 * will still need to block on that range and it will simply continue
4315 * checking locks that are further down the list.
4317 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4318 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
4321 if (nflp
->nfl_owner
!= nlop
) {
4324 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != NFS_FILE_LOCK_STYLE_POSIX
) {
4327 if (((coalnflp
= TAILQ_PREV(nflp
, nfs_file_lock_queue
, nfl_lolink
))) &&
4328 ((coalnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) &&
4329 (coalnflp
->nfl_type
== nflp
->nfl_type
) &&
4330 (coalnflp
->nfl_end
== (nflp
->nfl_start
- 1))) {
4331 coalnflp
->nfl_end
= nflp
->nfl_end
;
4332 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4333 lck_mtx_lock(&nlop
->nlo_lock
);
4334 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4335 lck_mtx_unlock(&nlop
->nlo_lock
);
4336 } else if (((coalnflp
= TAILQ_NEXT(nflp
, nfl_lolink
))) &&
4337 ((coalnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) &&
4338 (coalnflp
->nfl_type
== nflp
->nfl_type
) &&
4339 (coalnflp
->nfl_start
== (nflp
->nfl_end
+ 1))) {
4340 coalnflp
->nfl_start
= nflp
->nfl_start
;
4341 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4342 lck_mtx_lock(&nlop
->nlo_lock
);
4343 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4344 lck_mtx_unlock(&nlop
->nlo_lock
);
4346 if (!(nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
)) {
4349 if (nflp
->nfl_blockcnt
) {
4350 /* wake up anyone blocked on this lock */
4353 /* remove nflp from lock list and destroy */
4354 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4355 nfs_file_lock_destroy(nflp
);
4360 lck_mtx_unlock(&np
->n_openlock
);
4361 nfs_open_state_clear_busy(np
);
4362 nfs_mount_state_in_use_end(nmp
, error
);
4365 nfs_file_lock_destroy(nflp2
);
4371 * Release all (same style) locks within the given range.
4376 struct nfs_open_file
*nofp
4381 struct nfs_lock_owner
*nlop
,
4387 struct nfsmount
*nmp
;
4388 struct nfs_file_lock
*nflp
, *nextnflp
, *newnflp
= NULL
;
4389 int error
= 0, willsplit
= 0, send_unlock_rpcs
= 1;
4392 if (nfs_mount_gone(nmp
)) {
4397 if ((error
= nfs_mount_state_in_use_start(nmp
, NULL
))) {
4401 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
4402 nfs_mount_state_in_use_end(nmp
, 0);
4403 error
= nfs4_reopen(nofp
, NULL
);
4410 if ((error
= nfs_open_state_set_busy(np
, NULL
))) {
4411 nfs_mount_state_in_use_end(nmp
, error
);
4415 lck_mtx_lock(&np
->n_openlock
);
4416 if ((start
> 0) && (end
< UINT64_MAX
) && !willsplit
) {
4418 * We may need to allocate a new lock if an existing lock gets split.
4419 * So, we first scan the list to check for a split, and if there's
4420 * going to be one, we'll allocate one now.
4422 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4423 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
4426 if (nflp
->nfl_owner
!= nlop
) {
4429 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != style
) {
4432 if ((start
> nflp
->nfl_end
) || (end
< nflp
->nfl_start
)) {
4435 if ((start
> nflp
->nfl_start
) && (end
< nflp
->nfl_end
)) {
4441 lck_mtx_unlock(&np
->n_openlock
);
4442 nfs_open_state_clear_busy(np
);
4443 nfs_mount_state_in_use_end(nmp
, 0);
4444 newnflp
= nfs_file_lock_alloc(nlop
);
4453 * Free all of our locks in the given range.
4455 * Note that this process requires sending requests to the server.
4456 * Because of this, we will release the n_openlock while performing
4457 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4458 * locks from changing underneath us. However, other entries in the
4459 * list may be removed. So we need to be careful walking the list.
4463 * Don't unlock ranges that are held by other-style locks.
4464 * If style is posix, don't send any unlock rpcs if flock is held.
4465 * If we unlock an flock, don't send unlock rpcs for any posix-style
4466 * ranges held - instead send unlocks for the ranges not held.
4468 if ((style
== NFS_FILE_LOCK_STYLE_POSIX
) &&
4469 ((nflp
= TAILQ_FIRST(&nlop
->nlo_locks
))) &&
4470 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
)) {
4471 send_unlock_rpcs
= 0;
4473 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) &&
4474 ((nflp
= TAILQ_FIRST(&nlop
->nlo_locks
))) &&
4475 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
) &&
4476 ((nflp
= TAILQ_NEXT(nflp
, nfl_lolink
))) &&
4477 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
)) {
4479 int type
= TAILQ_FIRST(&nlop
->nlo_locks
)->nfl_type
;
4480 int delegated
= (TAILQ_FIRST(&nlop
->nlo_locks
)->nfl_flags
& NFS_FILE_LOCK_DELEGATED
);
4481 while (!delegated
&& nflp
) {
4482 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) {
4483 /* unlock the range preceding this lock */
4484 lck_mtx_unlock(&np
->n_openlock
);
4485 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, type
, s
, nflp
->nfl_start
- 1, 0,
4486 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4487 if (nfs_mount_state_error_should_restart(error
)) {
4488 nfs_open_state_clear_busy(np
);
4489 nfs_mount_state_in_use_end(nmp
, error
);
4492 lck_mtx_lock(&np
->n_openlock
);
4496 s
= nflp
->nfl_end
+ 1;
4498 nflp
= TAILQ_NEXT(nflp
, nfl_lolink
);
4501 lck_mtx_unlock(&np
->n_openlock
);
4502 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, type
, s
, end
, 0,
4503 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4504 if (nfs_mount_state_error_should_restart(error
)) {
4505 nfs_open_state_clear_busy(np
);
4506 nfs_mount_state_in_use_end(nmp
, error
);
4509 lck_mtx_lock(&np
->n_openlock
);
4514 send_unlock_rpcs
= 0;
4517 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4518 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
4521 if (nflp
->nfl_owner
!= nlop
) {
4524 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != style
) {
4527 if ((start
> nflp
->nfl_end
) || (end
< nflp
->nfl_start
)) {
4530 /* here's one to unlock */
4531 if ((start
<= nflp
->nfl_start
) && (end
>= nflp
->nfl_end
)) {
4532 /* The entire lock is being unlocked. */
4533 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4534 lck_mtx_unlock(&np
->n_openlock
);
4535 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, nflp
->nfl_start
, nflp
->nfl_end
, 0,
4536 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4537 if (nfs_mount_state_error_should_restart(error
)) {
4538 nfs_open_state_clear_busy(np
);
4539 nfs_mount_state_in_use_end(nmp
, error
);
4542 lck_mtx_lock(&np
->n_openlock
);
4544 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4548 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4549 lck_mtx_lock(&nlop
->nlo_lock
);
4550 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4551 lck_mtx_unlock(&nlop
->nlo_lock
);
4552 /* lock will be destroyed below, if no waiters */
4553 } else if ((start
> nflp
->nfl_start
) && (end
< nflp
->nfl_end
)) {
4554 /* We're unlocking a range in the middle of a lock. */
4555 /* The current lock will be split into two locks. */
4556 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4557 lck_mtx_unlock(&np
->n_openlock
);
4558 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, start
, end
, 0,
4559 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4560 if (nfs_mount_state_error_should_restart(error
)) {
4561 nfs_open_state_clear_busy(np
);
4562 nfs_mount_state_in_use_end(nmp
, error
);
4565 lck_mtx_lock(&np
->n_openlock
);
4570 /* update locks and insert new lock after current lock */
4571 newnflp
->nfl_flags
|= (nflp
->nfl_flags
& (NFS_FILE_LOCK_STYLE_MASK
| NFS_FILE_LOCK_DELEGATED
));
4572 newnflp
->nfl_type
= nflp
->nfl_type
;
4573 newnflp
->nfl_start
= end
+ 1;
4574 newnflp
->nfl_end
= nflp
->nfl_end
;
4575 nflp
->nfl_end
= start
- 1;
4576 TAILQ_INSERT_AFTER(&np
->n_locks
, nflp
, newnflp
, nfl_link
);
4577 nfs_lock_owner_insert_held_lock(nlop
, newnflp
);
4580 } else if (start
> nflp
->nfl_start
) {
4581 /* We're unlocking the end of a lock. */
4582 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4583 lck_mtx_unlock(&np
->n_openlock
);
4584 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, start
, nflp
->nfl_end
, 0,
4585 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4586 if (nfs_mount_state_error_should_restart(error
)) {
4587 nfs_open_state_clear_busy(np
);
4588 nfs_mount_state_in_use_end(nmp
, error
);
4591 lck_mtx_lock(&np
->n_openlock
);
4593 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4597 nflp
->nfl_end
= start
- 1;
4598 } else if (end
< nflp
->nfl_end
) {
4599 /* We're unlocking the start of a lock. */
4600 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4601 lck_mtx_unlock(&np
->n_openlock
);
4602 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, nflp
->nfl_start
, end
, 0,
4603 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4604 if (nfs_mount_state_error_should_restart(error
)) {
4605 nfs_open_state_clear_busy(np
);
4606 nfs_mount_state_in_use_end(nmp
, error
);
4609 lck_mtx_lock(&np
->n_openlock
);
4611 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4615 nflp
->nfl_start
= end
+ 1;
4617 if (nflp
->nfl_blockcnt
) {
4618 /* wake up anyone blocked on this lock */
4620 } else if (nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) {
4621 /* remove nflp from lock list and destroy */
4622 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4623 nfs_file_lock_destroy(nflp
);
4627 lck_mtx_unlock(&np
->n_openlock
);
4628 nfs_open_state_clear_busy(np
);
4629 nfs_mount_state_in_use_end(nmp
, 0);
4632 nfs_file_lock_destroy(newnflp
);
4638 * NFSv4 advisory file locking
4642 struct vnop_advlock_args
/* {
4643 * struct vnodeop_desc *a_desc;
4647 * struct flock *a_fl;
4649 * vfs_context_t a_context;
4652 vnode_t vp
= ap
->a_vp
;
4653 nfsnode_t np
= VTONFS(ap
->a_vp
);
4654 struct flock
*fl
= ap
->a_fl
;
4656 int flags
= ap
->a_flags
;
4657 vfs_context_t ctx
= ap
->a_context
;
4658 struct nfsmount
*nmp
;
4659 struct nfs_open_owner
*noop
= NULL
;
4660 struct nfs_open_file
*nofp
= NULL
;
4661 struct nfs_lock_owner
*nlop
= NULL
;
4663 uint64_t start
, end
;
4664 int error
= 0, modified
, style
;
4666 #define OFF_MAX QUAD_MAX
4668 nmp
= VTONMP(ap
->a_vp
);
4669 if (nfs_mount_gone(nmp
)) {
4672 lck_mtx_lock(&nmp
->nm_lock
);
4673 if ((nmp
->nm_vers
<= NFS_VER3
) && (nmp
->nm_lockmode
== NFS_LOCK_MODE_DISABLED
)) {
4674 lck_mtx_unlock(&nmp
->nm_lock
);
4677 lck_mtx_unlock(&nmp
->nm_lock
);
4679 if (np
->n_flag
& NREVOKE
) {
4682 vtype
= vnode_vtype(ap
->a_vp
);
4683 if (vtype
== VDIR
) { /* ignore lock requests on directories */
4686 if (vtype
!= VREG
) { /* anything other than regular files is invalid */
4690 /* Convert the flock structure into a start and end. */
4691 switch (fl
->l_whence
) {
4695 * Caller is responsible for adding any necessary offset
4696 * to fl->l_start when SEEK_CUR is used.
4698 lstart
= fl
->l_start
;
4701 /* need to flush, and refetch attributes to make */
4702 /* sure we have the correct end of file offset */
4703 if ((error
= nfs_node_lock(np
))) {
4706 modified
= (np
->n_flag
& NMODIFIED
);
4707 nfs_node_unlock(np
);
4708 if (modified
&& ((error
= nfs_vinvalbuf(vp
, V_SAVE
, ctx
, 1)))) {
4711 if ((error
= nfs_getattr(np
, NULL
, ctx
, NGA_UNCACHED
))) {
4714 nfs_data_lock(np
, NFS_DATA_LOCK_SHARED
);
4715 if ((np
->n_size
> OFF_MAX
) ||
4716 ((fl
->l_start
> 0) && (np
->n_size
> (u_quad_t
)(OFF_MAX
- fl
->l_start
)))) {
4719 lstart
= np
->n_size
+ fl
->l_start
;
4720 nfs_data_unlock(np
);
4732 if (fl
->l_len
== 0) {
4734 } else if (fl
->l_len
> 0) {
4735 if ((fl
->l_len
- 1) > (OFF_MAX
- lstart
)) {
4738 end
= start
- 1 + fl
->l_len
;
4739 } else { /* l_len is negative */
4740 if ((lstart
+ fl
->l_len
) < 0) {
4746 if ((nmp
->nm_vers
== NFS_VER2
) && ((start
> INT32_MAX
) || (fl
->l_len
&& (end
> INT32_MAX
)))) {
4750 style
= (flags
& F_FLOCK
) ? NFS_FILE_LOCK_STYLE_FLOCK
: NFS_FILE_LOCK_STYLE_POSIX
;
4751 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) && ((start
!= 0) || (end
!= UINT64_MAX
))) {
4755 /* find the lock owner, alloc if not unlock */
4756 nlop
= nfs_lock_owner_find(np
, vfs_context_proc(ctx
), (op
!= F_UNLCK
));
4758 error
= (op
== F_UNLCK
) ? 0 : ENOMEM
;
4760 NP(np
, "nfs_vnop_advlock: no lock owner, error %d", error
);
4765 if (op
== F_GETLK
) {
4766 error
= nfs_advlock_getlock(np
, nlop
, fl
, start
, end
, ctx
);
4768 /* find the open owner */
4769 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 0);
4771 NP(np
, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx
)));
4775 /* find the open file */
4779 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 0);
4783 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
4784 NP(np
, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
4788 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
4789 error
= nfs4_reopen(nofp
, ((op
== F_UNLCK
) ? NULL
: vfs_context_thread(ctx
)));
4797 NP(np
, "nfs_vnop_advlock: no open file %d, %d", error
, kauth_cred_getuid(noop
->noo_cred
));
4800 if (op
== F_UNLCK
) {
4801 error
= nfs_advlock_unlock(np
, nofp
, nlop
, start
, end
, style
, ctx
);
4802 } else if ((op
== F_SETLK
) || (op
== F_SETLKW
)) {
4803 if ((op
== F_SETLK
) && (flags
& F_WAIT
)) {
4806 error
= nfs_advlock_setlock(np
, nofp
, nlop
, op
, start
, end
, style
, fl
->l_type
, ctx
);
4808 /* not getlk, unlock or lock? */
4815 nfs_lock_owner_rele(nlop
);
4818 nfs_open_owner_rele(noop
);
4824 * Check if an open owner holds any locks on a file.
4827 nfs_check_for_locks(struct nfs_open_owner
*noop
, struct nfs_open_file
*nofp
)
4829 struct nfs_lock_owner
*nlop
;
4831 TAILQ_FOREACH(nlop
, &nofp
->nof_np
->n_lock_owners
, nlo_link
) {
4832 if (nlop
->nlo_open_owner
!= noop
) {
4835 if (!TAILQ_EMPTY(&nlop
->nlo_locks
)) {
4839 return nlop
? 1 : 0;
4844 * Reopen simple (no deny, no locks) open state that was lost.
4847 nfs4_reopen(struct nfs_open_file
*nofp
, thread_t thd
)
4849 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
4850 struct nfsmount
*nmp
= NFSTONMP(nofp
->nof_np
);
4851 nfsnode_t np
= nofp
->nof_np
;
4852 vnode_t vp
= NFSTOV(np
);
4854 struct componentname cn
;
4855 const char *vname
= NULL
;
4856 const char *name
= NULL
;
4858 char smallname
[128];
4859 char *filename
= NULL
;
4860 int error
= 0, done
= 0, slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
4861 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
4863 lck_mtx_lock(&nofp
->nof_lock
);
4864 while (nofp
->nof_flags
& NFS_OPEN_FILE_REOPENING
) {
4865 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
4868 msleep(&nofp
->nof_flags
, &nofp
->nof_lock
, slpflag
| (PZERO
- 1), "nfsreopenwait", &ts
);
4871 if (error
|| !(nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
4872 lck_mtx_unlock(&nofp
->nof_lock
);
4875 nofp
->nof_flags
|= NFS_OPEN_FILE_REOPENING
;
4876 lck_mtx_unlock(&nofp
->nof_lock
);
4878 nfs_node_lock_force(np
);
4879 if ((vnode_vtype(vp
) != VDIR
) && np
->n_sillyrename
) {
4881 * The node's been sillyrenamed, so we need to use
4882 * the sillyrename directory/name to do the open.
4884 struct nfs_sillyrename
*nsp
= np
->n_sillyrename
;
4885 dvp
= NFSTOV(nsp
->nsr_dnp
);
4886 if ((error
= vnode_get(dvp
))) {
4888 nfs_node_unlock(np
);
4891 name
= nsp
->nsr_name
;
4894 * [sigh] We can't trust VFS to get the parent right for named
4895 * attribute nodes. (It likes to reparent the nodes after we've
4896 * created them.) Luckily we can probably get the right parent
4897 * from the n_parent we have stashed away.
4899 if ((np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) &&
4900 (((dvp
= np
->n_parent
)) && (error
= vnode_get(dvp
)))) {
4904 dvp
= vnode_getparent(vp
);
4906 vname
= vnode_getname(vp
);
4907 if (!dvp
|| !vname
) {
4911 nfs_node_unlock(np
);
4916 filename
= &smallname
[0];
4917 namelen
= snprintf(filename
, sizeof(smallname
), "%s", name
);
4918 if (namelen
>= sizeof(smallname
)) {
4919 MALLOC(filename
, char *, namelen
+ 1, M_TEMP
, M_WAITOK
);
4924 snprintf(filename
, namelen
+ 1, "%s", name
);
4926 nfs_node_unlock(np
);
4927 bzero(&cn
, sizeof(cn
));
4928 cn
.cn_nameptr
= filename
;
4929 cn
.cn_namelen
= namelen
;
4933 if ((error
= nfs_mount_state_in_use_start(nmp
, thd
))) {
4938 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
);
4940 if (!error
&& nofp
->nof_w
) {
4941 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_NONE
);
4943 if (!error
&& nofp
->nof_r
) {
4944 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
);
4947 if (nfs_mount_state_in_use_end(nmp
, error
)) {
4948 if (error
== NFSERR_GRACE
) {
4951 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error
,
4952 (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) ? 1 : 0, name
? name
: "???");
4958 if (error
&& (error
!= EINTR
) && (error
!= ERESTART
)) {
4959 nfs_revoke_open_state_for_node(np
);
4961 lck_mtx_lock(&nofp
->nof_lock
);
4962 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPENING
;
4964 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPEN
;
4966 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error
,
4967 (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) ? 1 : 0, name
? name
: "???");
4969 lck_mtx_unlock(&nofp
->nof_lock
);
4970 if (filename
&& (filename
!= &smallname
[0])) {
4971 FREE(filename
, M_TEMP
);
4974 vnode_putname(vname
);
4976 if (dvp
!= NULLVP
) {
4983 * Send a normal OPEN RPC to open/create a file.
4987 struct nfs_open_file
*nofp
,
4989 struct componentname
*cnp
,
4990 struct vnode_attr
*vap
,
4997 return nfs4_open_rpc_internal(nofp
, ctx
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
4998 cnp
, vap
, dvp
, vpp
, create
, share_access
, share_deny
);
5002 * Send an OPEN RPC to reopen a file.
5005 nfs4_open_reopen_rpc(
5006 struct nfs_open_file
*nofp
,
5009 struct componentname
*cnp
,
5015 return nfs4_open_rpc_internal(nofp
, NULL
, thd
, cred
, cnp
, NULL
, dvp
, vpp
, NFS_OPEN_NOCREATE
, share_access
, share_deny
);
5019 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
5022 nfs4_open_confirm_rpc(
5023 struct nfsmount
*nmp
,
5027 struct nfs_open_owner
*noop
,
5031 struct nfs_vattr
*nvap
,
5034 struct nfsm_chain nmreq
, nmrep
;
5035 int error
= 0, status
, numops
;
5036 struct nfsreq_secinfo_args si
;
5038 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
5039 nfsm_chain_null(&nmreq
);
5040 nfsm_chain_null(&nmrep
);
5042 // PUTFH, OPEN_CONFIRM, GETATTR
5044 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
5045 nfsm_chain_add_compound_header(error
, &nmreq
, "open_confirm", nmp
->nm_minor_vers
, numops
);
5047 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5048 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, fhp
, fhlen
);
5050 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN_CONFIRM
);
5051 nfsm_chain_add_stateid(error
, &nmreq
, sid
);
5052 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5054 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5055 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
5056 nfsm_chain_build_done(error
, &nmreq
);
5057 nfsm_assert(error
, (numops
== 0), EPROTO
);
5059 error
= nfs_request2(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, R_NOINTR
, &nmrep
, xidp
, &status
);
5061 nfsm_chain_skip_tag(error
, &nmrep
);
5062 nfsm_chain_get_32(error
, &nmrep
, numops
);
5063 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5065 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN_CONFIRM
);
5066 nfs_owner_seqid_increment(noop
, NULL
, error
);
5067 nfsm_chain_get_stateid(error
, &nmrep
, sid
);
5068 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5070 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
, NULL
);
5072 nfsm_chain_cleanup(&nmreq
);
5073 nfsm_chain_cleanup(&nmrep
);
5078 * common OPEN RPC code
5080 * If create is set, ctx must be passed in.
5081 * Returns a node on success if no node passed in.
5084 nfs4_open_rpc_internal(
5085 struct nfs_open_file
*nofp
,
5089 struct componentname
*cnp
,
5090 struct vnode_attr
*vap
,
5097 struct nfsmount
*nmp
;
5098 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5099 struct nfs_vattr nvattr
;
5100 int error
= 0, open_error
= EIO
, lockerror
= ENOENT
, busyerror
= ENOENT
, status
;
5101 int nfsvers
, namedattrs
, numops
, exclusive
= 0, gotuid
, gotgid
;
5102 u_int64_t xid
, savedxid
= 0;
5103 nfsnode_t dnp
= VTONFS(dvp
);
5104 nfsnode_t np
, newnp
= NULL
;
5105 vnode_t newvp
= NULL
;
5106 struct nfsm_chain nmreq
, nmrep
;
5107 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
5108 uint32_t rflags
, delegation
, recall
;
5109 struct nfs_stateid stateid
, dstateid
, *sid
;
5111 struct nfsreq rq
, *req
= &rq
;
5112 struct nfs_dulookup dul
;
5114 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
;
5115 struct kauth_ace ace
;
5116 struct nfsreq_secinfo_args si
;
5118 if (create
&& !ctx
) {
5123 if (nfs_mount_gone(nmp
)) {
5126 nfsvers
= nmp
->nm_vers
;
5127 namedattrs
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
);
5128 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
5132 np
= *vpp
? VTONFS(*vpp
) : NULL
;
5133 if (create
&& vap
) {
5134 exclusive
= (vap
->va_vaflags
& VA_EXCLUSIVE
);
5135 nfs_avoid_needless_id_setting_on_create(dnp
, vap
, ctx
);
5136 gotuid
= VATTR_IS_ACTIVE(vap
, va_uid
);
5137 gotgid
= VATTR_IS_ACTIVE(vap
, va_gid
);
5138 if (exclusive
&& (!VATTR_IS_ACTIVE(vap
, va_access_time
) || !VATTR_IS_ACTIVE(vap
, va_modify_time
))) {
5139 vap
->va_vaflags
|= VA_UTIMES_NULL
;
5142 exclusive
= gotuid
= gotgid
= 0;
5145 sid
= &nofp
->nof_stateid
;
5147 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
5151 if ((error
= nfs_open_owner_set_busy(noop
, thd
))) {
5155 rflags
= delegation
= recall
= 0;
5158 slen
= sizeof(sbuf
);
5159 NVATTR_INIT(&nvattr
);
5160 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, cnp
->cn_nameptr
, cnp
->cn_namelen
);
5162 nfsm_chain_null(&nmreq
);
5163 nfsm_chain_null(&nmrep
);
5165 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
5167 nfsm_chain_build_alloc_init(error
, &nmreq
, 53 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
5168 nfsm_chain_add_compound_header(error
, &nmreq
, create
? "create" : "open", nmp
->nm_minor_vers
, numops
);
5170 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5171 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
5173 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
5175 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
5176 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5177 nfsm_chain_add_32(error
, &nmreq
, share_access
);
5178 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
5179 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
);
5180 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
5181 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
));
5182 nfsm_chain_add_32(error
, &nmreq
, create
);
5185 static uint32_t create_verf
; // XXX need a better verifier
5187 nfsm_chain_add_32(error
, &nmreq
, NFS_CREATE_EXCLUSIVE
);
5188 /* insert 64 bit verifier */
5189 nfsm_chain_add_32(error
, &nmreq
, create_verf
);
5190 nfsm_chain_add_32(error
, &nmreq
, create_verf
);
5192 nfsm_chain_add_32(error
, &nmreq
, NFS_CREATE_UNCHECKED
);
5193 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
5196 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_NULL
);
5197 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
5199 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5200 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
5201 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
5202 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
5204 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
5206 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5207 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
5208 nfsm_chain_build_done(error
, &nmreq
);
5209 nfsm_assert(error
, (numops
== 0), EPROTO
);
5211 error
= busyerror
= nfs_node_set_busy(dnp
, thd
);
5215 if (create
&& !namedattrs
) {
5216 nfs_dulookup_init(&dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
5219 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, R_NOINTR
, NULL
, &req
);
5221 if (create
&& !namedattrs
) {
5222 nfs_dulookup_start(&dul
, dnp
, ctx
);
5224 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
5228 if (create
&& !namedattrs
) {
5229 nfs_dulookup_finish(&dul
, dnp
, ctx
);
5232 if ((lockerror
= nfs_node_lock(dnp
))) {
5235 nfsm_chain_skip_tag(error
, &nmrep
);
5236 nfsm_chain_get_32(error
, &nmrep
, numops
);
5237 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5238 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
5240 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
5241 nfs_owner_seqid_increment(noop
, NULL
, error
);
5242 nfsm_chain_get_stateid(error
, &nmrep
, sid
);
5243 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
5244 nfsm_chain_get_32(error
, &nmrep
, rflags
);
5245 bmlen
= NFS_ATTR_BITMAP_LEN
;
5246 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
5247 nfsm_chain_get_32(error
, &nmrep
, delegation
);
5249 switch (delegation
) {
5250 case NFS_OPEN_DELEGATE_NONE
:
5252 case NFS_OPEN_DELEGATE_READ
:
5253 case NFS_OPEN_DELEGATE_WRITE
:
5254 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
5255 nfsm_chain_get_32(error
, &nmrep
, recall
);
5256 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) { // space (skip) XXX
5257 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
5259 /* if we have any trouble accepting the ACE, just invalidate it */
5260 ace_type
= ace_flags
= ace_mask
= len
= 0;
5261 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
5262 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
5263 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
5264 nfsm_chain_get_32(error
, &nmrep
, len
);
5265 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
5266 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
5267 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
5268 if (!error
&& (len
>= slen
)) {
5269 MALLOC(s
, char*, len
+ 1, M_TEMP
, M_WAITOK
);
5277 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
5279 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
5283 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
))) {
5290 if (s
&& (s
!= sbuf
)) {
5299 /* At this point if we have no error, the object was created/opened. */
5302 if (create
&& vap
&& !exclusive
) {
5303 nfs_vattr_set_supported(bitmap
, vap
);
5305 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5307 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
5309 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
5310 printf("nfs: open/create didn't return filehandle? %s\n", cnp
->cn_nameptr
);
5314 if (!create
&& np
&& !NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
5315 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
5316 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5317 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
5318 NP(np
, "nfs4_open_rpc: warning: file handle mismatch");
5321 /* directory attributes: if we don't get them, make sure to invalidate */
5322 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
5323 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5324 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
5326 NATTRINVALIDATE(dnp
);
5330 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
) {
5331 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
5334 if (rflags
& NFS_OPEN_RESULT_CONFIRM
) {
5335 nfs_node_unlock(dnp
);
5337 NVATTR_CLEANUP(&nvattr
);
5338 error
= nfs4_open_confirm_rpc(nmp
, dnp
, fh
.fh_data
, fh
.fh_len
, noop
, sid
, thd
, cred
, &nvattr
, &xid
);
5341 if ((lockerror
= nfs_node_lock(dnp
))) {
5347 nfsm_chain_cleanup(&nmreq
);
5348 nfsm_chain_cleanup(&nmrep
);
5350 if (!lockerror
&& create
) {
5351 if (!open_error
&& (dnp
->n_flag
& NNEGNCENTRIES
)) {
5352 dnp
->n_flag
&= ~NNEGNCENTRIES
;
5353 cache_purge_negatives(dvp
);
5355 dnp
->n_flag
|= NMODIFIED
;
5356 nfs_node_unlock(dnp
);
5358 nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
);
5361 nfs_node_unlock(dnp
);
5363 if (!error
&& !np
&& fh
.fh_len
) {
5364 /* create the vnode with the filehandle and attributes */
5366 error
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, NG_MAKEENTRY
, &newnp
);
5368 newvp
= NFSTOV(newnp
);
5371 NVATTR_CLEANUP(&nvattr
);
5373 nfs_node_clear_busy(dnp
);
5375 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
5379 if (!error
&& np
&& !recall
) {
5380 /* stuff the delegation state in the node */
5381 lck_mtx_lock(&np
->n_openlock
);
5382 np
->n_openflags
&= ~N_DELEG_MASK
;
5383 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5384 np
->n_dstateid
= dstateid
;
5386 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5387 lck_mtx_lock(&nmp
->nm_lock
);
5388 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5389 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5391 lck_mtx_unlock(&nmp
->nm_lock
);
5393 lck_mtx_unlock(&np
->n_openlock
);
5395 /* give the delegation back */
5397 if (NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
5398 /* update delegation state and return it */
5399 lck_mtx_lock(&np
->n_openlock
);
5400 np
->n_openflags
&= ~N_DELEG_MASK
;
5401 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5402 np
->n_dstateid
= dstateid
;
5404 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5405 lck_mtx_lock(&nmp
->nm_lock
);
5406 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5407 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5409 lck_mtx_unlock(&nmp
->nm_lock
);
5411 lck_mtx_unlock(&np
->n_openlock
);
5412 /* don't need to send a separate delegreturn for fh */
5415 /* return np's current delegation */
5416 nfs4_delegation_return(np
, 0, thd
, cred
);
5418 if (fh
.fh_len
) { /* return fh's delegation if it wasn't for np */
5419 nfs4_delegreturn_rpc(nmp
, fh
.fh_data
, fh
.fh_len
, &dstateid
, 0, thd
, cred
);
5424 if (exclusive
&& (error
== NFSERR_NOTSUPP
)) {
5429 nfs_node_unlock(newnp
);
5432 } else if (create
) {
5433 nfs_node_unlock(newnp
);
5435 error
= nfs4_setattr_rpc(newnp
, vap
, ctx
);
5436 if (error
&& (gotuid
|| gotgid
)) {
5437 /* it's possible the server didn't like our attempt to set IDs. */
5438 /* so, let's try it again without those */
5439 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
5440 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
5441 error
= nfs4_setattr_rpc(newnp
, vap
, ctx
);
5450 nfs_open_owner_clear_busy(noop
);
5456 * Send an OPEN RPC to claim a delegated open for a file
5459 nfs4_claim_delegated_open_rpc(
5460 struct nfs_open_file
*nofp
,
5465 struct nfsmount
*nmp
;
5466 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5467 struct nfs_vattr nvattr
;
5468 int error
= 0, lockerror
= ENOENT
, status
;
5469 int nfsvers
, numops
;
5471 nfsnode_t np
= nofp
->nof_np
;
5472 struct nfsm_chain nmreq
, nmrep
;
5473 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
5474 uint32_t rflags
= 0, delegation
, recall
= 0;
5476 struct nfs_stateid dstateid
;
5477 char sbuf
[64], *s
= sbuf
;
5478 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
= sizeof(sbuf
);
5479 struct kauth_ace ace
;
5481 const char *vname
= NULL
;
5482 const char *name
= NULL
;
5484 char smallname
[128];
5485 char *filename
= NULL
;
5486 struct nfsreq_secinfo_args si
;
5489 if (nfs_mount_gone(nmp
)) {
5492 nfsvers
= nmp
->nm_vers
;
5494 nfs_node_lock_force(np
);
5495 if ((vnode_vtype(NFSTOV(np
)) != VDIR
) && np
->n_sillyrename
) {
5497 * The node's been sillyrenamed, so we need to use
5498 * the sillyrename directory/name to do the open.
5500 struct nfs_sillyrename
*nsp
= np
->n_sillyrename
;
5501 dvp
= NFSTOV(nsp
->nsr_dnp
);
5502 if ((error
= vnode_get(dvp
))) {
5504 nfs_node_unlock(np
);
5507 name
= nsp
->nsr_name
;
5510 * [sigh] We can't trust VFS to get the parent right for named
5511 * attribute nodes. (It likes to reparent the nodes after we've
5512 * created them.) Luckily we can probably get the right parent
5513 * from the n_parent we have stashed away.
5515 if ((np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) &&
5516 (((dvp
= np
->n_parent
)) && (error
= vnode_get(dvp
)))) {
5520 dvp
= vnode_getparent(NFSTOV(np
));
5522 vname
= vnode_getname(NFSTOV(np
));
5523 if (!dvp
|| !vname
) {
5527 nfs_node_unlock(np
);
5532 filename
= &smallname
[0];
5533 namelen
= snprintf(filename
, sizeof(smallname
), "%s", name
);
5534 if (namelen
>= sizeof(smallname
)) {
5535 MALLOC(filename
, char *, namelen
+ 1, M_TEMP
, M_WAITOK
);
5538 nfs_node_unlock(np
);
5541 snprintf(filename
, namelen
+ 1, "%s", name
);
5543 nfs_node_unlock(np
);
5545 if ((error
= nfs_open_owner_set_busy(noop
, NULL
))) {
5548 NVATTR_INIT(&nvattr
);
5549 delegation
= NFS_OPEN_DELEGATE_NONE
;
5550 dstateid
= np
->n_dstateid
;
5551 NFSREQ_SECINFO_SET(&si
, VTONFS(dvp
), NULL
, 0, filename
, namelen
);
5553 nfsm_chain_null(&nmreq
);
5554 nfsm_chain_null(&nmrep
);
5556 // PUTFH, OPEN, GETATTR(FH)
5558 nfsm_chain_build_alloc_init(error
, &nmreq
, 48 * NFSX_UNSIGNED
);
5559 nfsm_chain_add_compound_header(error
, &nmreq
, "open_claim_d", nmp
->nm_minor_vers
, numops
);
5561 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5562 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, VTONFS(dvp
)->n_fhp
, VTONFS(dvp
)->n_fhsize
);
5564 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
5565 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5566 nfsm_chain_add_32(error
, &nmreq
, share_access
);
5567 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
5568 // open owner: clientid + uid
5569 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
); // open_owner4.clientid
5570 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
5571 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
)); // open_owner4.owner
5573 nfsm_chain_add_32(error
, &nmreq
, NFS_OPEN_NOCREATE
);
5575 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_DELEGATE_CUR
);
5576 nfsm_chain_add_stateid(error
, &nmreq
, &np
->n_dstateid
);
5577 nfsm_chain_add_name(error
, &nmreq
, filename
, namelen
, nmp
);
5579 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5580 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
5581 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
5582 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
5583 nfsm_chain_build_done(error
, &nmreq
);
5584 nfsm_assert(error
, (numops
== 0), EPROTO
);
5587 error
= nfs_request2(np
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, current_thread(),
5588 noop
->noo_cred
, &si
, flags
| R_NOINTR
, &nmrep
, &xid
, &status
);
5590 if ((lockerror
= nfs_node_lock(np
))) {
5593 nfsm_chain_skip_tag(error
, &nmrep
);
5594 nfsm_chain_get_32(error
, &nmrep
, numops
);
5595 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5597 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
5598 nfs_owner_seqid_increment(noop
, NULL
, error
);
5599 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
5600 nfsm_chain_check_change_info(error
, &nmrep
, np
);
5601 nfsm_chain_get_32(error
, &nmrep
, rflags
);
5602 bmlen
= NFS_ATTR_BITMAP_LEN
;
5603 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
5604 nfsm_chain_get_32(error
, &nmrep
, delegation
);
5606 switch (delegation
) {
5607 case NFS_OPEN_DELEGATE_NONE
:
5608 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
5609 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
5611 case NFS_OPEN_DELEGATE_READ
:
5612 case NFS_OPEN_DELEGATE_WRITE
:
5613 if ((((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_READ
) &&
5614 (delegation
== NFS_OPEN_DELEGATE_WRITE
)) ||
5615 (((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_WRITE
) &&
5616 (delegation
== NFS_OPEN_DELEGATE_READ
))) {
5617 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
5618 ((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_WRITE
) ? "W" : "R",
5619 (delegation
== NFS_OPEN_DELEGATE_WRITE
) ? "W" : "R", filename
? filename
: "???");
5621 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
5622 nfsm_chain_get_32(error
, &nmrep
, recall
);
5623 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) { // space (skip) XXX
5624 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
5626 /* if we have any trouble accepting the ACE, just invalidate it */
5627 ace_type
= ace_flags
= ace_mask
= len
= 0;
5628 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
5629 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
5630 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
5631 nfsm_chain_get_32(error
, &nmrep
, len
);
5632 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
5633 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
5634 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
5635 if (!error
&& (len
>= slen
)) {
5636 MALLOC(s
, char*, len
+ 1, M_TEMP
, M_WAITOK
);
5644 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
5646 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
5650 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
))) {
5657 if (s
&& (s
!= sbuf
)) {
5661 /* stuff the latest delegation state in the node */
5662 lck_mtx_lock(&np
->n_openlock
);
5663 np
->n_openflags
&= ~N_DELEG_MASK
;
5664 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5665 np
->n_dstateid
= dstateid
;
5667 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5668 lck_mtx_lock(&nmp
->nm_lock
);
5669 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5670 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5672 lck_mtx_unlock(&nmp
->nm_lock
);
5674 lck_mtx_unlock(&np
->n_openlock
);
5683 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5684 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
5686 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
5687 printf("nfs: open reclaim didn't return filehandle? %s\n", filename
? filename
: "???");
5691 if (!NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
5692 // XXX what if fh doesn't match the vnode we think we're re-opening?
5693 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5694 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
5695 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename
? filename
: "???");
5698 error
= nfs_loadattrcache(np
, &nvattr
, &xid
, 1);
5700 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
) {
5701 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
5704 NVATTR_CLEANUP(&nvattr
);
5705 nfsm_chain_cleanup(&nmreq
);
5706 nfsm_chain_cleanup(&nmrep
);
5708 nfs_node_unlock(np
);
5710 nfs_open_owner_clear_busy(noop
);
5711 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
5714 * We're making a delegated claim.
5715 * Don't return the delegation here in case we have more to claim.
5716 * Just make sure it's queued up to be returned.
5718 nfs4_delegation_return_enqueue(np
);
5723 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5724 if (filename
&& (filename
!= &smallname
[0])) {
5725 FREE(filename
, M_TEMP
);
5728 vnode_putname(vname
);
5730 if (dvp
!= NULLVP
) {
5737 * Send an OPEN RPC to reclaim an open file.
5740 nfs4_open_reclaim_rpc(
5741 struct nfs_open_file
*nofp
,
5745 struct nfsmount
*nmp
;
5746 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5747 struct nfs_vattr nvattr
;
5748 int error
= 0, lockerror
= ENOENT
, status
;
5749 int nfsvers
, numops
;
5751 nfsnode_t np
= nofp
->nof_np
;
5752 struct nfsm_chain nmreq
, nmrep
;
5753 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
5754 uint32_t rflags
= 0, delegation
, recall
= 0;
5756 struct nfs_stateid dstateid
;
5757 char sbuf
[64], *s
= sbuf
;
5758 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
= sizeof(sbuf
);
5759 struct kauth_ace ace
;
5760 struct nfsreq_secinfo_args si
;
5763 if (nfs_mount_gone(nmp
)) {
5766 nfsvers
= nmp
->nm_vers
;
5768 if ((error
= nfs_open_owner_set_busy(noop
, NULL
))) {
5772 NVATTR_INIT(&nvattr
);
5773 delegation
= NFS_OPEN_DELEGATE_NONE
;
5774 dstateid
= np
->n_dstateid
;
5775 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
5777 nfsm_chain_null(&nmreq
);
5778 nfsm_chain_null(&nmrep
);
5780 // PUTFH, OPEN, GETATTR(FH)
5782 nfsm_chain_build_alloc_init(error
, &nmreq
, 48 * NFSX_UNSIGNED
);
5783 nfsm_chain_add_compound_header(error
, &nmreq
, "open_reclaim", nmp
->nm_minor_vers
, numops
);
5785 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5786 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
5788 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
5789 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5790 nfsm_chain_add_32(error
, &nmreq
, share_access
);
5791 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
5792 // open owner: clientid + uid
5793 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
); // open_owner4.clientid
5794 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
5795 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
)); // open_owner4.owner
5797 nfsm_chain_add_32(error
, &nmreq
, NFS_OPEN_NOCREATE
);
5799 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_PREVIOUS
);
5800 delegation
= (np
->n_openflags
& N_DELEG_READ
) ? NFS_OPEN_DELEGATE_READ
:
5801 (np
->n_openflags
& N_DELEG_WRITE
) ? NFS_OPEN_DELEGATE_WRITE
:
5802 NFS_OPEN_DELEGATE_NONE
;
5803 nfsm_chain_add_32(error
, &nmreq
, delegation
);
5804 delegation
= NFS_OPEN_DELEGATE_NONE
;
5806 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5807 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
5808 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
5809 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
5810 nfsm_chain_build_done(error
, &nmreq
);
5811 nfsm_assert(error
, (numops
== 0), EPROTO
);
5814 error
= nfs_request2(np
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, current_thread(),
5815 noop
->noo_cred
, &si
, R_RECOVER
| R_NOINTR
, &nmrep
, &xid
, &status
);
5817 if ((lockerror
= nfs_node_lock(np
))) {
5820 nfsm_chain_skip_tag(error
, &nmrep
);
5821 nfsm_chain_get_32(error
, &nmrep
, numops
);
5822 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5824 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
5825 nfs_owner_seqid_increment(noop
, NULL
, error
);
5826 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
5827 nfsm_chain_check_change_info(error
, &nmrep
, np
);
5828 nfsm_chain_get_32(error
, &nmrep
, rflags
);
5829 bmlen
= NFS_ATTR_BITMAP_LEN
;
5830 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
5831 nfsm_chain_get_32(error
, &nmrep
, delegation
);
5833 switch (delegation
) {
5834 case NFS_OPEN_DELEGATE_NONE
:
5835 if (np
->n_openflags
& N_DELEG_MASK
) {
5837 * Hey! We were supposed to get our delegation back even
5838 * if it was getting immediately recalled. Bad server!
5840 * Just try to return the existing delegation.
5842 // NP(np, "nfs: open reclaim didn't return delegation?");
5843 delegation
= (np
->n_openflags
& N_DELEG_WRITE
) ? NFS_OPEN_DELEGATE_WRITE
: NFS_OPEN_DELEGATE_READ
;
5847 case NFS_OPEN_DELEGATE_READ
:
5848 case NFS_OPEN_DELEGATE_WRITE
:
5849 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
5850 nfsm_chain_get_32(error
, &nmrep
, recall
);
5851 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) { // space (skip) XXX
5852 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
5854 /* if we have any trouble accepting the ACE, just invalidate it */
5855 ace_type
= ace_flags
= ace_mask
= len
= 0;
5856 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
5857 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
5858 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
5859 nfsm_chain_get_32(error
, &nmrep
, len
);
5860 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
5861 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
5862 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
5863 if (!error
&& (len
>= slen
)) {
5864 MALLOC(s
, char*, len
+ 1, M_TEMP
, M_WAITOK
);
5872 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
5874 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
5878 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
))) {
5885 if (s
&& (s
!= sbuf
)) {
5889 /* stuff the delegation state in the node */
5890 lck_mtx_lock(&np
->n_openlock
);
5891 np
->n_openflags
&= ~N_DELEG_MASK
;
5892 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5893 np
->n_dstateid
= dstateid
;
5895 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5896 lck_mtx_lock(&nmp
->nm_lock
);
5897 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5898 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5900 lck_mtx_unlock(&nmp
->nm_lock
);
5902 lck_mtx_unlock(&np
->n_openlock
);
5911 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5912 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
5914 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
5915 NP(np
, "nfs: open reclaim didn't return filehandle?");
5919 if (!NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
5920 // XXX what if fh doesn't match the vnode we think we're re-opening?
5921 // That should be pretty hard in this case, given that we are doing
5922 // the open reclaim using the file handle (and not a dir/name pair).
5923 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5924 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
5925 NP(np
, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
5928 error
= nfs_loadattrcache(np
, &nvattr
, &xid
, 1);
5930 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
) {
5931 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
5935 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
5936 NVATTR_CLEANUP(&nvattr
);
5937 nfsm_chain_cleanup(&nmreq
);
5938 nfsm_chain_cleanup(&nmrep
);
5940 nfs_node_unlock(np
);
5942 nfs_open_owner_clear_busy(noop
);
5943 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
5945 nfs4_delegation_return_enqueue(np
);
5952 nfs4_open_downgrade_rpc(
5954 struct nfs_open_file
*nofp
,
5957 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5958 struct nfsmount
*nmp
;
5959 int error
, lockerror
= ENOENT
, status
, nfsvers
, numops
;
5960 struct nfsm_chain nmreq
, nmrep
;
5962 struct nfsreq_secinfo_args si
;
5965 if (nfs_mount_gone(nmp
)) {
5968 nfsvers
= nmp
->nm_vers
;
5970 if ((error
= nfs_open_owner_set_busy(noop
, NULL
))) {
5974 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
5975 nfsm_chain_null(&nmreq
);
5976 nfsm_chain_null(&nmrep
);
5978 // PUTFH, OPEN_DOWNGRADE, GETATTR
5980 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
5981 nfsm_chain_add_compound_header(error
, &nmreq
, "open_downgrd", nmp
->nm_minor_vers
, numops
);
5983 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5984 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
5986 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN_DOWNGRADE
);
5987 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
5988 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5989 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_access
);
5990 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_deny
);
5992 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5993 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
5994 nfsm_chain_build_done(error
, &nmreq
);
5995 nfsm_assert(error
, (numops
== 0), EPROTO
);
5997 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
5998 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
5999 &si
, R_NOINTR
, &nmrep
, &xid
, &status
);
6001 if ((lockerror
= nfs_node_lock(np
))) {
6004 nfsm_chain_skip_tag(error
, &nmrep
);
6005 nfsm_chain_get_32(error
, &nmrep
, numops
);
6006 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6008 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN_DOWNGRADE
);
6009 nfs_owner_seqid_increment(noop
, NULL
, error
);
6010 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
6011 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6012 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
6015 nfs_node_unlock(np
);
6017 nfs_open_owner_clear_busy(noop
);
6018 nfsm_chain_cleanup(&nmreq
);
6019 nfsm_chain_cleanup(&nmrep
);
6026 struct nfs_open_file
*nofp
,
6031 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
6032 struct nfsmount
*nmp
;
6033 int error
, lockerror
= ENOENT
, status
, nfsvers
, numops
;
6034 struct nfsm_chain nmreq
, nmrep
;
6036 struct nfsreq_secinfo_args si
;
6039 if (nfs_mount_gone(nmp
)) {
6042 nfsvers
= nmp
->nm_vers
;
6044 if ((error
= nfs_open_owner_set_busy(noop
, NULL
))) {
6048 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
6049 nfsm_chain_null(&nmreq
);
6050 nfsm_chain_null(&nmrep
);
6052 // PUTFH, CLOSE, GETATTR
6054 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
6055 nfsm_chain_add_compound_header(error
, &nmreq
, "close", nmp
->nm_minor_vers
, numops
);
6057 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6058 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
6060 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_CLOSE
);
6061 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
6062 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
6064 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6065 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
6066 nfsm_chain_build_done(error
, &nmreq
);
6067 nfsm_assert(error
, (numops
== 0), EPROTO
);
6069 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
| R_NOINTR
, &nmrep
, &xid
, &status
);
6071 if ((lockerror
= nfs_node_lock(np
))) {
6074 nfsm_chain_skip_tag(error
, &nmrep
);
6075 nfsm_chain_get_32(error
, &nmrep
, numops
);
6076 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6078 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_CLOSE
);
6079 nfs_owner_seqid_increment(noop
, NULL
, error
);
6080 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
6081 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6082 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
6085 nfs_node_unlock(np
);
6087 nfs_open_owner_clear_busy(noop
);
6088 nfsm_chain_cleanup(&nmreq
);
6089 nfsm_chain_cleanup(&nmrep
);
6095 * Claim the delegated open combinations this open file holds.
6098 nfs4_claim_delegated_state_for_open_file(struct nfs_open_file
*nofp
, int flags
)
6100 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
6101 struct nfs_lock_owner
*nlop
;
6102 struct nfs_file_lock
*nflp
, *nextnflp
;
6103 struct nfsmount
*nmp
;
6104 int error
= 0, reopen
= 0;
6106 if (nofp
->nof_d_rw_drw
) {
6107 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_BOTH
, flags
);
6109 lck_mtx_lock(&nofp
->nof_lock
);
6110 nofp
->nof_rw_drw
+= nofp
->nof_d_rw_drw
;
6111 nofp
->nof_d_rw_drw
= 0;
6112 lck_mtx_unlock(&nofp
->nof_lock
);
6115 if (!error
&& nofp
->nof_d_w_drw
) {
6116 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_BOTH
, flags
);
6118 lck_mtx_lock(&nofp
->nof_lock
);
6119 nofp
->nof_w_drw
+= nofp
->nof_d_w_drw
;
6120 nofp
->nof_d_w_drw
= 0;
6121 lck_mtx_unlock(&nofp
->nof_lock
);
6124 if (!error
&& nofp
->nof_d_r_drw
) {
6125 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_BOTH
, flags
);
6127 lck_mtx_lock(&nofp
->nof_lock
);
6128 nofp
->nof_r_drw
+= nofp
->nof_d_r_drw
;
6129 nofp
->nof_d_r_drw
= 0;
6130 lck_mtx_unlock(&nofp
->nof_lock
);
6133 if (!error
&& nofp
->nof_d_rw_dw
) {
6134 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_WRITE
, flags
);
6136 lck_mtx_lock(&nofp
->nof_lock
);
6137 nofp
->nof_rw_dw
+= nofp
->nof_d_rw_dw
;
6138 nofp
->nof_d_rw_dw
= 0;
6139 lck_mtx_unlock(&nofp
->nof_lock
);
6142 if (!error
&& nofp
->nof_d_w_dw
) {
6143 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_WRITE
, flags
);
6145 lck_mtx_lock(&nofp
->nof_lock
);
6146 nofp
->nof_w_dw
+= nofp
->nof_d_w_dw
;
6147 nofp
->nof_d_w_dw
= 0;
6148 lck_mtx_unlock(&nofp
->nof_lock
);
6151 if (!error
&& nofp
->nof_d_r_dw
) {
6152 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_WRITE
, flags
);
6154 lck_mtx_lock(&nofp
->nof_lock
);
6155 nofp
->nof_r_dw
+= nofp
->nof_d_r_dw
;
6156 nofp
->nof_d_r_dw
= 0;
6157 lck_mtx_unlock(&nofp
->nof_lock
);
6160 /* non-deny-mode opens may be reopened if no locks are held */
6161 if (!error
&& nofp
->nof_d_rw
) {
6162 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
, flags
);
6163 /* for some errors, we should just try reopening the file */
6164 if (nfs_mount_state_error_delegation_lost(error
)) {
6167 if (!error
|| reopen
) {
6168 lck_mtx_lock(&nofp
->nof_lock
);
6169 nofp
->nof_rw
+= nofp
->nof_d_rw
;
6171 lck_mtx_unlock(&nofp
->nof_lock
);
6174 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
6175 if ((!error
|| reopen
) && nofp
->nof_d_w
) {
6177 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_NONE
, flags
);
6178 /* for some errors, we should just try reopening the file */
6179 if (nfs_mount_state_error_delegation_lost(error
)) {
6183 if (!error
|| reopen
) {
6184 lck_mtx_lock(&nofp
->nof_lock
);
6185 nofp
->nof_w
+= nofp
->nof_d_w
;
6187 lck_mtx_unlock(&nofp
->nof_lock
);
6190 if ((!error
|| reopen
) && nofp
->nof_d_r
) {
6192 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, flags
);
6193 /* for some errors, we should just try reopening the file */
6194 if (nfs_mount_state_error_delegation_lost(error
)) {
6198 if (!error
|| reopen
) {
6199 lck_mtx_lock(&nofp
->nof_lock
);
6200 nofp
->nof_r
+= nofp
->nof_d_r
;
6202 lck_mtx_unlock(&nofp
->nof_lock
);
6208 * Any problems with the delegation probably indicates that we
6209 * should review/return all of our current delegation state.
6211 if ((nmp
= NFSTONMP(nofp
->nof_np
))) {
6212 nfs4_delegation_return_enqueue(nofp
->nof_np
);
6213 lck_mtx_lock(&nmp
->nm_lock
);
6214 nfs_need_recover(nmp
, NFSERR_EXPIRED
);
6215 lck_mtx_unlock(&nmp
->nm_lock
);
6217 if (reopen
&& (nfs_check_for_locks(noop
, nofp
) == 0)) {
6218 /* just reopen the file on next access */
6219 NP(nofp
->nof_np
, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
6220 reopen
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6221 lck_mtx_lock(&nofp
->nof_lock
);
6222 nofp
->nof_flags
|= NFS_OPEN_FILE_REOPEN
;
6223 lck_mtx_unlock(&nofp
->nof_lock
);
6227 NP(nofp
->nof_np
, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
6228 reopen
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6232 if (!error
&& ((nmp
= NFSTONMP(nofp
->nof_np
)))) {
6233 /* claim delegated locks */
6234 TAILQ_FOREACH(nlop
, &nofp
->nof_np
->n_lock_owners
, nlo_link
) {
6235 if (nlop
->nlo_open_owner
!= noop
) {
6238 TAILQ_FOREACH_SAFE(nflp
, &nlop
->nlo_locks
, nfl_lolink
, nextnflp
) {
6239 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
6240 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_DEAD
| NFS_FILE_LOCK_BLOCKED
)) {
6243 /* skip non-delegated locks */
6244 if (!(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
6247 error
= nmp
->nm_funcs
->nf_setlock_rpc(nofp
->nof_np
, nofp
, nflp
, 0, flags
, current_thread(), noop
->noo_cred
);
6249 NP(nofp
->nof_np
, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
6250 nflp
->nfl_start
, nflp
->nfl_end
, error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6254 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
6255 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6264 if (!error
) { /* all state claimed successfully! */
6268 /* restart if it looks like a problem more than just losing the delegation */
6269 if (!nfs_mount_state_error_delegation_lost(error
) &&
6270 ((error
== ETIMEDOUT
) || nfs_mount_state_error_should_restart(error
))) {
6271 NP(nofp
->nof_np
, "nfs delegated lock claim error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6272 if ((error
== ETIMEDOUT
) && ((nmp
= NFSTONMP(nofp
->nof_np
)))) {
6273 nfs_need_reconnect(nmp
);
6278 /* delegated state lost (once held but now not claimable) */
6279 NP(nofp
->nof_np
, "nfs delegated state claim error %d, state lost, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6282 * Any problems with the delegation probably indicates that we
6283 * should review/return all of our current delegation state.
6285 if ((nmp
= NFSTONMP(nofp
->nof_np
))) {
6286 nfs4_delegation_return_enqueue(nofp
->nof_np
);
6287 lck_mtx_lock(&nmp
->nm_lock
);
6288 nfs_need_recover(nmp
, NFSERR_EXPIRED
);
6289 lck_mtx_unlock(&nmp
->nm_lock
);
6292 /* revoke all open file state */
6293 nfs_revoke_open_state_for_node(nofp
->nof_np
);
6297 #endif /* CONFIG_NFS4*/
6300 * Release all open state for the given node.
6303 nfs_release_open_state_for_node(nfsnode_t np
, int force
)
6305 struct nfsmount
*nmp
= NFSTONMP(np
);
6306 struct nfs_open_file
*nofp
;
6307 struct nfs_file_lock
*nflp
, *nextnflp
;
6309 /* drop held locks */
6310 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
6311 /* skip dead & blocked lock requests */
6312 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_DEAD
| NFS_FILE_LOCK_BLOCKED
)) {
6315 /* send an unlock if not a delegated lock */
6316 if (!force
&& nmp
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
6317 nmp
->nm_funcs
->nf_unlock_rpc(np
, nflp
->nfl_owner
, F_WRLCK
, nflp
->nfl_start
, nflp
->nfl_end
, R_RECOVER
,
6318 NULL
, nflp
->nfl_owner
->nlo_open_owner
->noo_cred
);
6320 /* kill/remove the lock */
6321 lck_mtx_lock(&np
->n_openlock
);
6322 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
6323 lck_mtx_lock(&nflp
->nfl_owner
->nlo_lock
);
6324 TAILQ_REMOVE(&nflp
->nfl_owner
->nlo_locks
, nflp
, nfl_lolink
);
6325 lck_mtx_unlock(&nflp
->nfl_owner
->nlo_lock
);
6326 if (nflp
->nfl_blockcnt
) {
6327 /* wake up anyone blocked on this lock */
6330 /* remove nflp from lock list and destroy */
6331 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
6332 nfs_file_lock_destroy(nflp
);
6334 lck_mtx_unlock(&np
->n_openlock
);
6337 lck_mtx_lock(&np
->n_openlock
);
6339 /* drop all opens */
6340 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
6341 if (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) {
6344 /* mark open state as lost */
6345 lck_mtx_lock(&nofp
->nof_lock
);
6346 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPEN
;
6347 nofp
->nof_flags
|= NFS_OPEN_FILE_LOST
;
6349 lck_mtx_unlock(&nofp
->nof_lock
);
6351 if (!force
&& nmp
&& (nmp
->nm_vers
>= NFS_VER4
)) {
6352 nfs4_close_rpc(np
, nofp
, NULL
, nofp
->nof_owner
->noo_cred
, R_RECOVER
);
6357 lck_mtx_unlock(&np
->n_openlock
);
6361 * State for a node has been lost, drop it, and revoke the node.
6362 * Attempt to return any state if possible in case the server
6363 * might somehow think we hold it.
6366 nfs_revoke_open_state_for_node(nfsnode_t np
)
6368 struct nfsmount
*nmp
;
6370 /* mark node as needing to be revoked */
6371 nfs_node_lock_force(np
);
6372 if (np
->n_flag
& NREVOKE
) { /* already revoked? */
6373 NP(np
, "nfs_revoke_open_state_for_node(): already revoked");
6374 nfs_node_unlock(np
);
6377 np
->n_flag
|= NREVOKE
;
6378 nfs_node_unlock(np
);
6380 nfs_release_open_state_for_node(np
, 0);
6381 NP(np
, "nfs: state lost for %p 0x%x", np
, np
->n_flag
);
6383 /* mark mount as needing a revoke scan and have the socket thread do it. */
6384 if ((nmp
= NFSTONMP(np
))) {
6385 lck_mtx_lock(&nmp
->nm_lock
);
6386 nmp
->nm_state
|= NFSSTA_REVOKE
;
6387 nfs_mount_sock_thread_wake(nmp
);
6388 lck_mtx_unlock(&nmp
->nm_lock
);
6394 * Claim the delegated open combinations that each of this node's open files hold.
6397 nfs4_claim_delegated_state_for_node(nfsnode_t np
, int flags
)
6399 struct nfs_open_file
*nofp
;
6402 lck_mtx_lock(&np
->n_openlock
);
6404 /* walk the open file list looking for opens with delegated state to claim */
6406 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
6407 if (!nofp
->nof_d_rw_drw
&& !nofp
->nof_d_w_drw
&& !nofp
->nof_d_r_drw
&&
6408 !nofp
->nof_d_rw_dw
&& !nofp
->nof_d_w_dw
&& !nofp
->nof_d_r_dw
&&
6409 !nofp
->nof_d_rw
&& !nofp
->nof_d_w
&& !nofp
->nof_d_r
) {
6412 lck_mtx_unlock(&np
->n_openlock
);
6413 error
= nfs4_claim_delegated_state_for_open_file(nofp
, flags
);
6414 lck_mtx_lock(&np
->n_openlock
);
6421 lck_mtx_unlock(&np
->n_openlock
);
6427 * Mark a node as needed to have its delegation returned.
6428 * Queue it up on the delegation return queue.
6429 * Make sure the thread is running.
6432 nfs4_delegation_return_enqueue(nfsnode_t np
)
6434 struct nfsmount
*nmp
;
6437 if (nfs_mount_gone(nmp
)) {
6441 lck_mtx_lock(&np
->n_openlock
);
6442 np
->n_openflags
|= N_DELEG_RETURN
;
6443 lck_mtx_unlock(&np
->n_openlock
);
6445 lck_mtx_lock(&nmp
->nm_lock
);
6446 if (np
->n_dreturn
.tqe_next
== NFSNOLIST
) {
6447 TAILQ_INSERT_TAIL(&nmp
->nm_dreturnq
, np
, n_dreturn
);
6449 nfs_mount_sock_thread_wake(nmp
);
6450 lck_mtx_unlock(&nmp
->nm_lock
);
6454 * return any delegation we may have for the given node
6457 nfs4_delegation_return(nfsnode_t np
, int flags
, thread_t thd
, kauth_cred_t cred
)
6459 struct nfsmount
*nmp
;
6461 nfs_stateid dstateid
;
6465 if (nfs_mount_gone(nmp
)) {
6469 /* first, make sure the node's marked for delegation return */
6470 lck_mtx_lock(&np
->n_openlock
);
6471 np
->n_openflags
|= (N_DELEG_RETURN
| N_DELEG_RETURNING
);
6472 lck_mtx_unlock(&np
->n_openlock
);
6474 /* make sure nobody else is using the delegation state */
6475 if ((error
= nfs_open_state_set_busy(np
, NULL
))) {
6479 /* claim any delegated state */
6480 if ((error
= nfs4_claim_delegated_state_for_node(np
, flags
))) {
6484 /* return the delegation */
6485 lck_mtx_lock(&np
->n_openlock
);
6486 dstateid
= np
->n_dstateid
;
6487 fh
.fh_len
= np
->n_fhsize
;
6488 bcopy(np
->n_fhp
, &fh
.fh_data
, fh
.fh_len
);
6489 lck_mtx_unlock(&np
->n_openlock
);
6490 error
= nfs4_delegreturn_rpc(NFSTONMP(np
), fh
.fh_data
, fh
.fh_len
, &dstateid
, flags
, thd
, cred
);
6491 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
6492 if ((error
!= ETIMEDOUT
) && (error
!= NFSERR_MOVED
) && (error
!= NFSERR_LEASE_MOVED
)) {
6493 lck_mtx_lock(&np
->n_openlock
);
6494 np
->n_openflags
&= ~N_DELEG_MASK
;
6495 lck_mtx_lock(&nmp
->nm_lock
);
6496 if (np
->n_dlink
.tqe_next
!= NFSNOLIST
) {
6497 TAILQ_REMOVE(&nmp
->nm_delegations
, np
, n_dlink
);
6498 np
->n_dlink
.tqe_next
= NFSNOLIST
;
6500 lck_mtx_unlock(&nmp
->nm_lock
);
6501 lck_mtx_unlock(&np
->n_openlock
);
6505 /* make sure it's no longer on the return queue and clear the return flags */
6506 lck_mtx_lock(&nmp
->nm_lock
);
6507 if (np
->n_dreturn
.tqe_next
!= NFSNOLIST
) {
6508 TAILQ_REMOVE(&nmp
->nm_dreturnq
, np
, n_dreturn
);
6509 np
->n_dreturn
.tqe_next
= NFSNOLIST
;
6511 lck_mtx_unlock(&nmp
->nm_lock
);
6512 lck_mtx_lock(&np
->n_openlock
);
6513 np
->n_openflags
&= ~(N_DELEG_RETURN
| N_DELEG_RETURNING
);
6514 lck_mtx_unlock(&np
->n_openlock
);
6517 NP(np
, "nfs4_delegation_return, error %d", error
);
6518 if (error
== ETIMEDOUT
) {
6519 nfs_need_reconnect(nmp
);
6521 if (nfs_mount_state_error_should_restart(error
)) {
6522 /* make sure recovery happens */
6523 lck_mtx_lock(&nmp
->nm_lock
);
6524 nfs_need_recover(nmp
, nfs_mount_state_error_delegation_lost(error
) ? NFSERR_EXPIRED
: 0);
6525 lck_mtx_unlock(&nmp
->nm_lock
);
6529 nfs_open_state_clear_busy(np
);
6535 * RPC to return a delegation for a file handle
6538 nfs4_delegreturn_rpc(struct nfsmount
*nmp
, u_char
*fhp
, int fhlen
, struct nfs_stateid
*sid
, int flags
, thread_t thd
, kauth_cred_t cred
)
6540 int error
= 0, status
, numops
;
6542 struct nfsm_chain nmreq
, nmrep
;
6543 struct nfsreq_secinfo_args si
;
6545 NFSREQ_SECINFO_SET(&si
, NULL
, fhp
, fhlen
, NULL
, 0);
6546 nfsm_chain_null(&nmreq
);
6547 nfsm_chain_null(&nmrep
);
6549 // PUTFH, DELEGRETURN
6551 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
6552 nfsm_chain_add_compound_header(error
, &nmreq
, "delegreturn", nmp
->nm_minor_vers
, numops
);
6554 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6555 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, fhp
, fhlen
);
6557 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_DELEGRETURN
);
6558 nfsm_chain_add_stateid(error
, &nmreq
, sid
);
6559 nfsm_chain_build_done(error
, &nmreq
);
6560 nfsm_assert(error
, (numops
== 0), EPROTO
);
6562 error
= nfs_request2(NULL
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
, &nmrep
, &xid
, &status
);
6563 nfsm_chain_skip_tag(error
, &nmrep
);
6564 nfsm_chain_get_32(error
, &nmrep
, numops
);
6565 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6566 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_DELEGRETURN
);
6568 nfsm_chain_cleanup(&nmreq
);
6569 nfsm_chain_cleanup(&nmrep
);
6572 #endif /* CONFIG_NFS4 */
6576 * Just call nfs_bioread() to do the work.
6578 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6579 * without first calling VNOP_OPEN, so we make sure the file is open here.
6583 struct vnop_read_args
/* {
6584 * struct vnodeop_desc *a_desc;
6586 * struct uio *a_uio;
6588 * vfs_context_t a_context;
6591 vnode_t vp
= ap
->a_vp
;
6592 vfs_context_t ctx
= ap
->a_context
;
6594 struct nfsmount
*nmp
;
6595 struct nfs_open_owner
*noop
;
6596 struct nfs_open_file
*nofp
;
6599 if (vnode_vtype(ap
->a_vp
) != VREG
) {
6600 return (vnode_vtype(vp
) == VDIR
) ? EISDIR
: EPERM
;
6605 if (nfs_mount_gone(nmp
)) {
6608 if (np
->n_flag
& NREVOKE
) {
6612 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
6617 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 1);
6618 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
6619 NP(np
, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop
->noo_cred
));
6623 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
6624 error
= nfs4_reopen(nofp
, vfs_context_thread(ctx
));
6632 nfs_open_owner_rele(noop
);
6636 * Since the read path is a hot path, if we already have
6637 * read access, lets go and try and do the read, without
6638 * busying the mount and open file node for this open owner.
6640 * N.B. This is inherently racy w.r.t. an execve using
6641 * an already open file, in that the read at the end of
6642 * this routine will be racing with a potential close.
6643 * The code below ultimately has the same problem. In practice
6644 * this does not seem to be an issue.
6646 if (nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
) {
6647 nfs_open_owner_rele(noop
);
6650 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
6652 nfs_open_owner_rele(noop
);
6656 * If we don't have a file already open with the access we need (read) then
6657 * we need to open one. Otherwise we just co-opt an open. We might not already
6658 * have access because we're trying to read the first page of the
6661 error
= nfs_open_file_set_busy(nofp
, vfs_context_thread(ctx
));
6663 nfs_mount_state_in_use_end(nmp
, 0);
6664 nfs_open_owner_rele(noop
);
6667 if (!(nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
)) {
6668 /* we don't have the file open, so open it for read access if we're not denied */
6669 if (nofp
->nof_flags
& NFS_OPEN_FILE_NEEDCLOSE
) {
6670 NP(np
, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
6671 nofp
->nof_access
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
), thread_tid(vfs_context_thread(ctx
)));
6673 if (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_READ
) {
6674 nfs_open_file_clear_busy(nofp
);
6675 nfs_mount_state_in_use_end(nmp
, 0);
6676 nfs_open_owner_rele(noop
);
6679 if (np
->n_flag
& NREVOKE
) {
6681 nfs_open_file_clear_busy(nofp
);
6682 nfs_mount_state_in_use_end(nmp
, 0);
6683 nfs_open_owner_rele(noop
);
6686 if (nmp
->nm_vers
< NFS_VER4
) {
6687 /* NFS v2/v3 opens are always allowed - so just add it. */
6688 nfs_open_file_add_open(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, 0);
6692 error
= nfs4_open(np
, nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
6696 nofp
->nof_flags
|= NFS_OPEN_FILE_NEEDCLOSE
;
6700 nfs_open_file_clear_busy(nofp
);
6702 if (nfs_mount_state_in_use_end(nmp
, error
)) {
6706 nfs_open_owner_rele(noop
);
6711 return nfs_bioread(VTONFS(ap
->a_vp
), ap
->a_uio
, ap
->a_ioflag
, ap
->a_context
);
6716 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6717 * Files are created using the NFSv4 OPEN RPC. So we must open the
6718 * file to create it and then close it.
6722 struct vnop_create_args
/* {
6723 * struct vnodeop_desc *a_desc;
6726 * struct componentname *a_cnp;
6727 * struct vnode_attr *a_vap;
6728 * vfs_context_t a_context;
6731 vfs_context_t ctx
= ap
->a_context
;
6732 struct componentname
*cnp
= ap
->a_cnp
;
6733 struct vnode_attr
*vap
= ap
->a_vap
;
6734 vnode_t dvp
= ap
->a_dvp
;
6735 vnode_t
*vpp
= ap
->a_vpp
;
6736 struct nfsmount
*nmp
;
6738 int error
= 0, busyerror
= 0, accessMode
, denyMode
;
6739 struct nfs_open_owner
*noop
= NULL
;
6740 struct nfs_open_file
*newnofp
= NULL
, *nofp
= NULL
;
6743 if (nfs_mount_gone(nmp
)) {
6748 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp
), vap
, ctx
);
6751 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
6757 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
6759 nfs_open_owner_rele(noop
);
6763 /* grab a provisional, nodeless open file */
6764 error
= nfs_open_file_find(NULL
, noop
, &newnofp
, 0, 0, 1);
6765 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
6766 printf("nfs_vnop_create: LOST\n");
6769 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
6770 /* This shouldn't happen given that this is a new, nodeless nofp */
6771 nfs_mount_state_in_use_end(nmp
, 0);
6772 error
= nfs4_reopen(newnofp
, vfs_context_thread(ctx
));
6773 nfs_open_file_destroy(newnofp
);
6780 error
= nfs_open_file_set_busy(newnofp
, vfs_context_thread(ctx
));
6784 nfs_open_file_destroy(newnofp
);
6791 * We're just trying to create the file.
6792 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6794 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
6795 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
6797 /* Do the open/create */
6798 error
= nfs4_open_rpc(newnofp
, ctx
, cnp
, vap
, dvp
, vpp
, NFS_OPEN_CREATE
, accessMode
, denyMode
);
6799 if ((error
== EACCES
) && vap
&& !(vap
->va_vaflags
& VA_EXCLUSIVE
) &&
6800 VATTR_IS_ACTIVE(vap
, va_mode
) && !(vap
->va_mode
& S_IWUSR
)) {
6802 * Hmm... it looks like we may have a situation where the request was
6803 * retransmitted because we didn't get the first response which successfully
6804 * created/opened the file and then the second time we were denied the open
6805 * because the mode the file was created with doesn't allow write access.
6807 * We'll try to work around this by temporarily updating the mode and
6808 * retrying the open.
6810 struct vnode_attr vattr
;
6812 /* first make sure it's there */
6813 int error2
= nfs_lookitup(VTONFS(dvp
), cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, &np
);
6814 if (!error2
&& np
) {
6815 nfs_node_unlock(np
);
6817 if (vnode_vtype(NFSTOV(np
)) == VREG
) {
6819 VATTR_SET(&vattr
, va_mode
, (vap
->va_mode
| S_IWUSR
));
6820 if (!nfs4_setattr_rpc(np
, &vattr
, ctx
)) {
6821 error2
= nfs4_open_rpc(newnofp
, ctx
, cnp
, NULL
, dvp
, vpp
, NFS_OPEN_NOCREATE
, accessMode
, denyMode
);
6823 VATTR_SET(&vattr
, va_mode
, vap
->va_mode
);
6824 nfs4_setattr_rpc(np
, &vattr
, ctx
);
6836 if (!error
&& !*vpp
) {
6837 printf("nfs4_open_rpc returned without a node?\n");
6838 /* Hmmm... with no node, we have no filehandle and can't close it */
6842 /* need to cleanup our temporary nofp */
6843 nfs_open_file_clear_busy(newnofp
);
6844 nfs_open_file_destroy(newnofp
);
6848 /* After we have a node, add our open file struct to the node */
6850 nfs_open_file_add_open(newnofp
, accessMode
, denyMode
, 0);
6852 error
= nfs_open_file_find_internal(np
, noop
, &nofp
, 0, 0, 0);
6854 /* This shouldn't happen, because we passed in a new nofp to use. */
6855 printf("nfs_open_file_find_internal failed! %d\n", error
);
6857 } else if (nofp
!= newnofp
) {
6859 * Hmm... an open file struct already exists.
6860 * Mark the existing one busy and merge our open into it.
6861 * Then destroy the one we created.
6862 * Note: there's no chance of an open confict because the
6863 * open has already been granted.
6865 busyerror
= nfs_open_file_set_busy(nofp
, NULL
);
6866 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0);
6867 nofp
->nof_stateid
= newnofp
->nof_stateid
;
6868 if (newnofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
) {
6869 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
6871 nfs_open_file_clear_busy(newnofp
);
6872 nfs_open_file_destroy(newnofp
);
6875 /* mark the node as holding a create-initiated open */
6876 nofp
->nof_flags
|= NFS_OPEN_FILE_CREATE
;
6877 nofp
->nof_creator
= current_thread();
6879 if (nofp
&& !busyerror
) {
6880 nfs_open_file_clear_busy(nofp
);
6882 if (nfs_mount_state_in_use_end(nmp
, error
)) {
6883 nofp
= newnofp
= NULL
;
6888 nfs_open_owner_rele(noop
);
6894 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6900 struct componentname
*cnp
,
6901 struct vnode_attr
*vap
,
6906 struct nfsmount
*nmp
;
6907 struct nfs_vattr nvattr
;
6908 int error
= 0, create_error
= EIO
, lockerror
= ENOENT
, busyerror
= ENOENT
, status
;
6909 int nfsvers
, namedattrs
, numops
;
6910 u_int64_t xid
, savedxid
= 0;
6911 nfsnode_t np
= NULL
;
6912 vnode_t newvp
= NULL
;
6913 struct nfsm_chain nmreq
, nmrep
;
6914 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
6918 struct nfsreq rq
, *req
= &rq
;
6919 struct nfs_dulookup dul
;
6920 struct nfsreq_secinfo_args si
;
6922 nmp
= NFSTONMP(dnp
);
6923 if (nfs_mount_gone(nmp
)) {
6926 nfsvers
= nmp
->nm_vers
;
6927 namedattrs
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
);
6928 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
6932 sd
.specdata1
= sd
.specdata2
= 0;
6941 if (!VATTR_IS_ACTIVE(vap
, va_rdev
)) {
6944 sd
.specdata1
= major(vap
->va_rdev
);
6945 sd
.specdata2
= minor(vap
->va_rdev
);
6958 nfs_avoid_needless_id_setting_on_create(dnp
, vap
, ctx
);
6960 error
= busyerror
= nfs_node_set_busy(dnp
, vfs_context_thread(ctx
));
6962 nfs_dulookup_init(&dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
6965 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
6966 NVATTR_INIT(&nvattr
);
6967 nfsm_chain_null(&nmreq
);
6968 nfsm_chain_null(&nmrep
);
6970 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
6972 nfsm_chain_build_alloc_init(error
, &nmreq
, 66 * NFSX_UNSIGNED
);
6973 nfsm_chain_add_compound_header(error
, &nmreq
, tag
, nmp
->nm_minor_vers
, numops
);
6975 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6976 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
6978 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
6980 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_CREATE
);
6981 nfsm_chain_add_32(error
, &nmreq
, type
);
6982 if (type
== NFLNK
) {
6983 nfsm_chain_add_name(error
, &nmreq
, link
, strlen(link
), nmp
);
6984 } else if ((type
== NFBLK
) || (type
== NFCHR
)) {
6985 nfsm_chain_add_32(error
, &nmreq
, sd
.specdata1
);
6986 nfsm_chain_add_32(error
, &nmreq
, sd
.specdata2
);
6988 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
6989 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
6991 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6992 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
6993 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
6994 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, NULL
);
6996 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
6998 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6999 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
7000 nfsm_chain_build_done(error
, &nmreq
);
7001 nfsm_assert(error
, (numops
== 0), EPROTO
);
7004 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
7005 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, 0, NULL
, &req
);
7008 nfs_dulookup_start(&dul
, dnp
, ctx
);
7010 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
7013 if ((lockerror
= nfs_node_lock(dnp
))) {
7016 nfsm_chain_skip_tag(error
, &nmrep
);
7017 nfsm_chain_get_32(error
, &nmrep
, numops
);
7018 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7019 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
7021 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_CREATE
);
7022 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
7023 bmlen
= NFS_ATTR_BITMAP_LEN
;
7024 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
7025 /* At this point if we have no error, the object was created. */
7026 /* if we don't get attributes, then we should lookitup. */
7027 create_error
= error
;
7029 nfs_vattr_set_supported(bitmap
, vap
);
7030 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7032 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
7034 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
7035 printf("nfs: create/%s didn't return filehandle? %s\n", tag
, cnp
->cn_nameptr
);
7039 /* directory attributes: if we don't get them, make sure to invalidate */
7040 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
7041 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7043 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
7045 NATTRINVALIDATE(dnp
);
7049 nfsm_chain_cleanup(&nmreq
);
7050 nfsm_chain_cleanup(&nmrep
);
7053 if (!create_error
&& (dnp
->n_flag
& NNEGNCENTRIES
)) {
7054 dnp
->n_flag
&= ~NNEGNCENTRIES
;
7055 cache_purge_negatives(NFSTOV(dnp
));
7057 dnp
->n_flag
|= NMODIFIED
;
7058 nfs_node_unlock(dnp
);
7059 /* nfs_getattr() will check changed and purge caches */
7060 nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
);
7063 if (!error
&& fh
.fh_len
) {
7064 /* create the vnode with the filehandle and attributes */
7066 error
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, NG_MAKEENTRY
, &np
);
7071 NVATTR_CLEANUP(&nvattr
);
7074 nfs_dulookup_finish(&dul
, dnp
, ctx
);
7078 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
7079 * if we can succeed in looking up the object.
7081 if ((create_error
== EEXIST
) || (!create_error
&& !newvp
)) {
7082 error
= nfs_lookitup(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, &np
);
7085 if (vnode_vtype(newvp
) != nfstov_type(type
, nfsvers
)) {
7091 nfs_node_clear_busy(dnp
);
7095 nfs_node_unlock(np
);
7099 nfs_node_unlock(np
);
7107 struct vnop_mknod_args
/* {
7108 * struct vnodeop_desc *a_desc;
7111 * struct componentname *a_cnp;
7112 * struct vnode_attr *a_vap;
7113 * vfs_context_t a_context;
7116 nfsnode_t np
= NULL
;
7117 struct nfsmount
*nmp
;
7120 nmp
= VTONMP(ap
->a_dvp
);
7121 if (nfs_mount_gone(nmp
)) {
7125 if (!VATTR_IS_ACTIVE(ap
->a_vap
, va_type
)) {
7128 switch (ap
->a_vap
->va_type
) {
7138 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
7139 vtonfs_type(ap
->a_vap
->va_type
, nmp
->nm_vers
), NULL
, &np
);
7141 *ap
->a_vpp
= NFSTOV(np
);
7148 struct vnop_mkdir_args
/* {
7149 * struct vnodeop_desc *a_desc;
7152 * struct componentname *a_cnp;
7153 * struct vnode_attr *a_vap;
7154 * vfs_context_t a_context;
7157 nfsnode_t np
= NULL
;
7160 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
7163 *ap
->a_vpp
= NFSTOV(np
);
7170 struct vnop_symlink_args
/* {
7171 * struct vnodeop_desc *a_desc;
7174 * struct componentname *a_cnp;
7175 * struct vnode_attr *a_vap;
7177 * vfs_context_t a_context;
7180 nfsnode_t np
= NULL
;
7183 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
7184 NFLNK
, ap
->a_target
, &np
);
7186 *ap
->a_vpp
= NFSTOV(np
);
7193 struct vnop_link_args
/* {
7194 * struct vnodeop_desc *a_desc;
7197 * struct componentname *a_cnp;
7198 * vfs_context_t a_context;
7201 vfs_context_t ctx
= ap
->a_context
;
7202 vnode_t vp
= ap
->a_vp
;
7203 vnode_t tdvp
= ap
->a_tdvp
;
7204 struct componentname
*cnp
= ap
->a_cnp
;
7205 int error
= 0, lockerror
= ENOENT
, status
;
7206 struct nfsmount
*nmp
;
7207 nfsnode_t np
= VTONFS(vp
);
7208 nfsnode_t tdnp
= VTONFS(tdvp
);
7209 int nfsvers
, numops
;
7210 u_int64_t xid
, savedxid
;
7211 struct nfsm_chain nmreq
, nmrep
;
7212 struct nfsreq_secinfo_args si
;
7214 if (vnode_mount(vp
) != vnode_mount(tdvp
)) {
7219 if (nfs_mount_gone(nmp
)) {
7222 nfsvers
= nmp
->nm_vers
;
7223 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
7226 if (tdnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
7231 * Push all writes to the server, so that the attribute cache
7232 * doesn't get "out of sync" with the server.
7233 * XXX There should be a better way!
7235 nfs_flush(np
, MNT_WAIT
, vfs_context_thread(ctx
), V_IGNORE_WRITEERR
);
7237 if ((error
= nfs_node_set_busy2(tdnp
, np
, vfs_context_thread(ctx
)))) {
7241 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
7242 nfsm_chain_null(&nmreq
);
7243 nfsm_chain_null(&nmrep
);
7245 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
7247 nfsm_chain_build_alloc_init(error
, &nmreq
, 29 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
7248 nfsm_chain_add_compound_header(error
, &nmreq
, "link", nmp
->nm_minor_vers
, numops
);
7250 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7251 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
7253 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
7255 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7256 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, tdnp
->n_fhp
, tdnp
->n_fhsize
);
7258 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LINK
);
7259 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
7261 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7262 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, tdnp
);
7264 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
7266 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7267 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
7268 nfsm_chain_build_done(error
, &nmreq
);
7269 nfsm_assert(error
, (numops
== 0), EPROTO
);
7271 error
= nfs_request(tdnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
7273 if ((lockerror
= nfs_node_lock2(tdnp
, np
))) {
7277 nfsm_chain_skip_tag(error
, &nmrep
);
7278 nfsm_chain_get_32(error
, &nmrep
, numops
);
7279 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7280 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
7281 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7282 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LINK
);
7283 nfsm_chain_check_change_info(error
, &nmrep
, tdnp
);
7284 /* directory attributes: if we don't get them, make sure to invalidate */
7285 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7287 nfsm_chain_loadattr(error
, &nmrep
, tdnp
, nfsvers
, &xid
);
7289 NATTRINVALIDATE(tdnp
);
7291 /* link attributes: if we don't get them, make sure to invalidate */
7292 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
7293 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7295 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
7297 NATTRINVALIDATE(np
);
7300 nfsm_chain_cleanup(&nmreq
);
7301 nfsm_chain_cleanup(&nmrep
);
7303 tdnp
->n_flag
|= NMODIFIED
;
7305 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
7306 if (error
== EEXIST
) {
7309 if (!error
&& (tdnp
->n_flag
& NNEGNCENTRIES
)) {
7310 tdnp
->n_flag
&= ~NNEGNCENTRIES
;
7311 cache_purge_negatives(tdvp
);
7314 nfs_node_unlock2(tdnp
, np
);
7316 nfs_node_clear_busy2(tdnp
, np
);
7322 struct vnop_rmdir_args
/* {
7323 * struct vnodeop_desc *a_desc;
7326 * struct componentname *a_cnp;
7327 * vfs_context_t a_context;
7330 vfs_context_t ctx
= ap
->a_context
;
7331 vnode_t vp
= ap
->a_vp
;
7332 vnode_t dvp
= ap
->a_dvp
;
7333 struct componentname
*cnp
= ap
->a_cnp
;
7334 struct nfsmount
*nmp
;
7335 int error
= 0, namedattrs
;
7336 nfsnode_t np
= VTONFS(vp
);
7337 nfsnode_t dnp
= VTONFS(dvp
);
7338 struct nfs_dulookup dul
;
7340 if (vnode_vtype(vp
) != VDIR
) {
7344 nmp
= NFSTONMP(dnp
);
7345 if (nfs_mount_gone(nmp
)) {
7348 namedattrs
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
);
7350 if ((error
= nfs_node_set_busy2(dnp
, np
, vfs_context_thread(ctx
)))) {
7355 nfs_dulookup_init(&dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
7356 nfs_dulookup_start(&dul
, dnp
, ctx
);
7359 error
= nfs4_remove_rpc(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
,
7360 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
7362 nfs_name_cache_purge(dnp
, np
, cnp
, ctx
);
7363 /* nfs_getattr() will check changed and purge caches */
7364 nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
);
7366 nfs_dulookup_finish(&dul
, dnp
, ctx
);
7368 nfs_node_clear_busy2(dnp
, np
);
7371 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
7373 if (error
== ENOENT
) {
7378 * remove nfsnode from hash now so we can't accidentally find it
7379 * again if another object gets created with the same filehandle
7380 * before this vnode gets reclaimed
7382 lck_mtx_lock(nfs_node_hash_mutex
);
7383 if (np
->n_hflag
& NHHASHED
) {
7384 LIST_REMOVE(np
, n_hash
);
7385 np
->n_hflag
&= ~NHHASHED
;
7386 FSDBG(266, 0, np
, np
->n_flag
, 0xb1eb1e);
7388 lck_mtx_unlock(nfs_node_hash_mutex
);
7394 * NFSv4 Named Attributes
7396 * Both the extended attributes interface and the named streams interface
7397 * are backed by NFSv4 named attributes. The implementations for both use
7398 * a common set of routines in an attempt to reduce code duplication, to
7399 * increase efficiency, to increase caching of both names and data, and to
7400 * confine the complexity.
7402 * Each NFS node caches its named attribute directory's file handle.
7403 * The directory nodes for the named attribute directories are handled
7404 * exactly like regular directories (with a couple minor exceptions).
7405 * Named attribute nodes are also treated as much like regular files as
7408 * Most of the heavy lifting is done by nfs4_named_attr_get().
7412 * Get the given node's attribute directory node.
7413 * If !fetch, then only return a cached node.
7414 * Otherwise, we will attempt to fetch the node from the server.
7415 * (Note: the node should be marked busy.)
7418 nfs4_named_attr_dir_get(nfsnode_t np
, int fetch
, vfs_context_t ctx
)
7420 nfsnode_t adnp
= NULL
;
7421 struct nfsmount
*nmp
;
7422 int error
= 0, status
, numops
;
7423 struct nfsm_chain nmreq
, nmrep
;
7425 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
7427 struct nfs_vattr nvattr
;
7428 struct componentname cn
;
7429 struct nfsreq rq
, *req
= &rq
;
7430 struct nfsreq_secinfo_args si
;
7433 if (nfs_mount_gone(nmp
)) {
7436 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
7440 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
7441 NVATTR_INIT(&nvattr
);
7442 nfsm_chain_null(&nmreq
);
7443 nfsm_chain_null(&nmrep
);
7445 bzero(&cn
, sizeof(cn
));
7446 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER
, const, char *); /* "/..namedfork/" */
7447 cn
.cn_namelen
= strlen(_PATH_FORKSPECIFIER
);
7448 cn
.cn_nameiop
= LOOKUP
;
7450 if (np
->n_attrdirfh
) {
7451 // XXX can't set parent correctly (to np) yet
7452 error
= nfs_nget(nmp
->nm_mountp
, NULL
, &cn
, np
->n_attrdirfh
+ 1, *np
->n_attrdirfh
,
7453 NULL
, NULL
, RPCAUTH_UNKNOWN
, NG_NOCREATE
, &adnp
);
7463 // PUTFH, OPENATTR, GETATTR
7465 nfsm_chain_build_alloc_init(error
, &nmreq
, 22 * NFSX_UNSIGNED
);
7466 nfsm_chain_add_compound_header(error
, &nmreq
, "openattr", nmp
->nm_minor_vers
, numops
);
7468 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7469 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, np
->n_fhp
, np
->n_fhsize
);
7471 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPENATTR
);
7472 nfsm_chain_add_32(error
, &nmreq
, 0);
7474 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7475 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
7476 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
7477 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
7478 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7479 nfsm_chain_build_done(error
, &nmreq
);
7480 nfsm_assert(error
, (numops
== 0), EPROTO
);
7482 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
7483 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, 0, NULL
, &req
);
7485 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
7488 nfsm_chain_skip_tag(error
, &nmrep
);
7489 nfsm_chain_get_32(error
, &nmrep
, numops
);
7490 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7491 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPENATTR
);
7492 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7494 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
7496 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
) || !fh
.fh_len
) {
7500 if (!np
->n_attrdirfh
|| (*np
->n_attrdirfh
!= fh
.fh_len
)) {
7501 /* (re)allocate attrdir fh buffer */
7502 if (np
->n_attrdirfh
) {
7503 FREE(np
->n_attrdirfh
, M_TEMP
);
7505 MALLOC(np
->n_attrdirfh
, u_char
*, fh
.fh_len
+ 1, M_TEMP
, M_WAITOK
);
7507 if (!np
->n_attrdirfh
) {
7511 /* cache the attrdir fh in the node */
7512 *np
->n_attrdirfh
= fh
.fh_len
;
7513 bcopy(fh
.fh_data
, np
->n_attrdirfh
+ 1, fh
.fh_len
);
7514 /* create node for attrdir */
7515 // XXX can't set parent correctly (to np) yet
7516 error
= nfs_nget(NFSTOMP(np
), NULL
, &cn
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, 0, &adnp
);
7518 NVATTR_CLEANUP(&nvattr
);
7519 nfsm_chain_cleanup(&nmreq
);
7520 nfsm_chain_cleanup(&nmrep
);
7523 /* sanity check that this node is an attribute directory */
7524 if (adnp
->n_vattr
.nva_type
!= VDIR
) {
7527 if (!(adnp
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
7530 nfs_node_unlock(adnp
);
7532 vnode_put(NFSTOV(adnp
));
7535 return error
? NULL
: adnp
;
7539 * Get the given node's named attribute node for the name given.
7541 * In an effort to increase the performance of named attribute access, we try
7542 * to reduce server requests by doing the following:
7544 * - cache the node's named attribute directory file handle in the node
7545 * - maintain a directory vnode for the attribute directory
7546 * - use name cache entries (positive and negative) to speed up lookups
7547 * - optionally open the named attribute (with the given accessMode) in the same RPC
7548 * - combine attribute directory retrieval with the lookup/open RPC
7549 * - optionally prefetch the named attribute's first block of data in the same RPC
7551 * Also, in an attempt to reduce the number of copies/variations of this code,
7552 * parts of the RPC building/processing code are conditionalized on what is
7553 * needed for any particular request (openattr, lookup vs. open, read).
7555 * Note that because we may not have the attribute directory node when we start
7556 * the lookup/open, we lock both the node and the attribute directory node.
7559 #define NFS_GET_NAMED_ATTR_CREATE 0x1
7560 #define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
7561 #define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
7562 #define NFS_GET_NAMED_ATTR_PREFETCH 0x8
7565 nfs4_named_attr_get(
7567 struct componentname
*cnp
,
7568 uint32_t accessMode
,
7572 struct nfs_open_file
**nofpp
)
7574 struct nfsmount
*nmp
;
7575 int error
= 0, open_error
= EIO
;
7576 int inuse
= 0, adlockerror
= ENOENT
, busyerror
= ENOENT
, adbusyerror
= ENOENT
, nofpbusyerror
= ENOENT
;
7577 int create
, guarded
, prefetch
, truncate
, noopbusy
= 0;
7578 int open
, status
, numops
, hadattrdir
, negnamecache
;
7579 struct nfs_vattr nvattr
;
7580 struct vnode_attr vattr
;
7581 nfsnode_t adnp
= NULL
, anp
= NULL
;
7583 u_int64_t xid
, savedxid
= 0;
7584 struct nfsm_chain nmreq
, nmrep
;
7585 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
7586 uint32_t denyMode
, rflags
, delegation
, recall
, eof
, rlen
, retlen
;
7587 nfs_stateid stateid
, dstateid
;
7589 struct nfs_open_owner
*noop
= NULL
;
7590 struct nfs_open_file
*newnofp
= NULL
, *nofp
= NULL
;
7591 struct vnop_access_args naa
;
7596 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
;
7597 struct kauth_ace ace
;
7598 struct nfsreq rq
, *req
= &rq
;
7599 struct nfsreq_secinfo_args si
;
7603 rflags
= delegation
= recall
= eof
= rlen
= retlen
= 0;
7606 slen
= sizeof(sbuf
);
7609 if (nfs_mount_gone(nmp
)) {
7612 NVATTR_INIT(&nvattr
);
7613 negnamecache
= !NMFLAG(nmp
, NONEGNAMECACHE
);
7614 thd
= vfs_context_thread(ctx
);
7615 cred
= vfs_context_ucred(ctx
);
7616 create
= (flags
& NFS_GET_NAMED_ATTR_CREATE
) ? NFS_OPEN_CREATE
: NFS_OPEN_NOCREATE
;
7617 guarded
= (flags
& NFS_GET_NAMED_ATTR_CREATE_GUARDED
) ? NFS_CREATE_GUARDED
: NFS_CREATE_UNCHECKED
;
7618 truncate
= (flags
& NFS_GET_NAMED_ATTR_TRUNCATE
);
7619 prefetch
= (flags
& NFS_GET_NAMED_ATTR_PREFETCH
);
7622 error
= nfs_getattr(np
, &nvattr
, ctx
, NGA_CACHED
);
7626 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
7627 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
)) {
7630 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_NONE
) {
7631 /* shouldn't happen... but just be safe */
7632 printf("nfs4_named_attr_get: create with no access %s\n", cnp
->cn_nameptr
);
7633 accessMode
= NFS_OPEN_SHARE_ACCESS_READ
;
7635 open
= (accessMode
!= NFS_OPEN_SHARE_ACCESS_NONE
);
7638 * We're trying to open the file.
7639 * We'll create/open it with the given access mode,
7640 * and set NFS_OPEN_FILE_CREATE.
7642 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
7643 if (prefetch
&& guarded
) {
7644 prefetch
= 0; /* no sense prefetching data that can't be there */
7646 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
7652 if ((error
= busyerror
= nfs_node_set_busy(np
, vfs_context_thread(ctx
)))) {
7656 adnp
= nfs4_named_attr_dir_get(np
, 0, ctx
);
7657 hadattrdir
= (adnp
!= NULL
);
7660 /* use the special state ID because we don't have a real one to send */
7661 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
7662 rlen
= MIN(nmp
->nm_rsize
, nmp
->nm_biosize
);
7664 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
7665 nfsm_chain_null(&nmreq
);
7666 nfsm_chain_null(&nmrep
);
7669 if ((error
= adbusyerror
= nfs_node_set_busy(adnp
, vfs_context_thread(ctx
)))) {
7672 /* nfs_getattr() will check changed and purge caches */
7673 error
= nfs_getattr(adnp
, NULL
, ctx
, NGA_CACHED
);
7675 error
= cache_lookup(NFSTOV(adnp
), &avp
, cnp
);
7678 /* negative cache entry */
7682 /* try dir buf cache lookup */
7683 error
= nfs_dir_buf_cache_lookup(adnp
, &anp
, cnp
, ctx
, 0);
7684 if (!error
&& anp
) {
7685 /* dir buf cache hit */
7689 if (error
!= -1) { /* cache miss */
7694 /* cache hit, not really an error */
7695 OSAddAtomic64(1, &nfsstats
.lookupcache_hits
);
7697 *anpp
= anp
= VTONFS(avp
);
7700 nfs_node_clear_busy(adnp
);
7701 adbusyerror
= ENOENT
;
7703 /* check for directory access */
7704 naa
.a_desc
= &vnop_access_desc
;
7705 naa
.a_vp
= NFSTOV(adnp
);
7706 naa
.a_action
= KAUTH_VNODE_SEARCH
;
7707 naa
.a_context
= ctx
;
7709 /* compute actual success/failure based on accessibility */
7710 error
= nfs_vnop_access(&naa
);
7713 /* we either found it, or hit an error */
7714 if (!error
&& guarded
) {
7715 /* found cached entry but told not to use it */
7717 vnode_put(NFSTOV(anp
));
7720 /* we're done if error or we don't need to open */
7721 if (error
|| !open
) {
7724 /* no error and we need to open... */
7730 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
7732 nfs_open_owner_rele(noop
);
7738 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7739 error
= nfs_open_file_find(anp
, noop
, &newnofp
, 0, 0, 1);
7740 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
7741 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop
->noo_cred
), cnp
->cn_nameptr
);
7744 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
7745 nfs_mount_state_in_use_end(nmp
, 0);
7746 error
= nfs4_reopen(newnofp
, vfs_context_thread(ctx
));
7747 nfs_open_file_destroy(newnofp
);
7754 error
= nfs_open_file_set_busy(newnofp
, vfs_context_thread(ctx
));
7758 nfs_open_file_destroy(newnofp
);
7765 * We already have the node. So we just need to open
7766 * it - which we may be able to do with a delegation.
7768 open_error
= error
= nfs4_open(anp
, newnofp
, accessMode
, denyMode
, ctx
);
7770 /* open succeeded, so our open file is no longer temporary */
7783 * We either don't have the attrdir or we didn't find the attribute
7784 * in the name cache, so we need to talk to the server.
7786 * If we don't have the attrdir, we'll need to ask the server for that too.
7787 * If the caller is requesting that the attribute be created, we need to
7788 * make sure the attrdir is created.
7789 * The caller may also request that the first block of an existing attribute
7790 * be retrieved at the same time.
7794 /* need to mark the open owner busy during the RPC */
7795 if ((error
= nfs_open_owner_set_busy(noop
, thd
))) {
7802 * We'd like to get updated post-open/lookup attributes for the
7803 * directory and we may also want to prefetch some data via READ.
7804 * We'd like the READ results to be last so that we can leave the
7805 * data in the mbufs until the end.
7807 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7811 numops
+= 3; // also sending: OPENATTR, GETATTR, OPENATTR
7814 numops
+= 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7816 nfsm_chain_build_alloc_init(error
, &nmreq
, 64 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
7817 nfsm_chain_add_compound_header(error
, &nmreq
, "getnamedattr", nmp
->nm_minor_vers
, numops
);
7820 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7821 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, adnp
->n_fhp
, adnp
->n_fhsize
);
7824 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7825 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, np
->n_fhp
, np
->n_fhsize
);
7827 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPENATTR
);
7828 nfsm_chain_add_32(error
, &nmreq
, create
? 1 : 0);
7830 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7831 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
7832 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
7833 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
7834 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7838 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
7839 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
7840 nfsm_chain_add_32(error
, &nmreq
, accessMode
);
7841 nfsm_chain_add_32(error
, &nmreq
, denyMode
);
7842 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
);
7843 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
7844 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
));
7845 nfsm_chain_add_32(error
, &nmreq
, create
);
7847 nfsm_chain_add_32(error
, &nmreq
, guarded
);
7850 VATTR_SET(&vattr
, va_data_size
, 0);
7852 nfsm_chain_add_fattr4(error
, &nmreq
, &vattr
, nmp
);
7854 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_NULL
);
7855 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
7858 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUP
);
7859 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
7862 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7863 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
7864 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
7865 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
7866 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7869 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
7873 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7874 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, adnp
->n_fhp
, adnp
->n_fhsize
);
7877 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7878 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, np
->n_fhp
, np
->n_fhsize
);
7880 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPENATTR
);
7881 nfsm_chain_add_32(error
, &nmreq
, 0);
7884 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7885 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
7886 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7889 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
7891 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_NVERIFY
);
7893 VATTR_SET(&vattr
, va_data_size
, 0);
7894 nfsm_chain_add_fattr4(error
, &nmreq
, &vattr
, nmp
);
7896 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READ
);
7897 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
7898 nfsm_chain_add_64(error
, &nmreq
, 0);
7899 nfsm_chain_add_32(error
, &nmreq
, rlen
);
7901 nfsm_chain_build_done(error
, &nmreq
);
7902 nfsm_assert(error
, (numops
== 0), EPROTO
);
7904 error
= nfs_request_async(hadattrdir
? adnp
: np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
7905 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, open
? R_NOINTR
: 0, NULL
, &req
);
7907 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
7910 if (hadattrdir
&& ((adlockerror
= nfs_node_lock(adnp
)))) {
7911 error
= adlockerror
;
7914 nfsm_chain_skip_tag(error
, &nmrep
);
7915 nfsm_chain_get_32(error
, &nmrep
, numops
);
7916 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7918 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPENATTR
);
7919 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7921 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
7923 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
) && fh
.fh_len
) {
7924 if (!np
->n_attrdirfh
|| (*np
->n_attrdirfh
!= fh
.fh_len
)) {
7925 /* (re)allocate attrdir fh buffer */
7926 if (np
->n_attrdirfh
) {
7927 FREE(np
->n_attrdirfh
, M_TEMP
);
7929 MALLOC(np
->n_attrdirfh
, u_char
*, fh
.fh_len
+ 1, M_TEMP
, M_WAITOK
);
7931 if (np
->n_attrdirfh
) {
7932 /* remember the attrdir fh in the node */
7933 *np
->n_attrdirfh
= fh
.fh_len
;
7934 bcopy(fh
.fh_data
, np
->n_attrdirfh
+ 1, fh
.fh_len
);
7935 /* create busied node for attrdir */
7936 struct componentname cn
;
7937 bzero(&cn
, sizeof(cn
));
7938 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER
, const, char *); /* "/..namedfork/" */
7939 cn
.cn_namelen
= strlen(_PATH_FORKSPECIFIER
);
7940 cn
.cn_nameiop
= LOOKUP
;
7941 // XXX can't set parent correctly (to np) yet
7942 error
= nfs_nget(NFSTOMP(np
), NULL
, &cn
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, 0, &adnp
);
7945 /* set the node busy */
7946 SET(adnp
->n_flag
, NBUSY
);
7949 /* if no adnp, oh well... */
7953 NVATTR_CLEANUP(&nvattr
);
7957 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
7958 nfs_owner_seqid_increment(noop
, NULL
, error
);
7959 nfsm_chain_get_stateid(error
, &nmrep
, &newnofp
->nof_stateid
);
7960 nfsm_chain_check_change_info(error
, &nmrep
, adnp
);
7961 nfsm_chain_get_32(error
, &nmrep
, rflags
);
7962 bmlen
= NFS_ATTR_BITMAP_LEN
;
7963 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
7964 nfsm_chain_get_32(error
, &nmrep
, delegation
);
7966 switch (delegation
) {
7967 case NFS_OPEN_DELEGATE_NONE
:
7969 case NFS_OPEN_DELEGATE_READ
:
7970 case NFS_OPEN_DELEGATE_WRITE
:
7971 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
7972 nfsm_chain_get_32(error
, &nmrep
, recall
);
7973 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) { // space (skip) XXX
7974 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
7976 /* if we have any trouble accepting the ACE, just invalidate it */
7977 ace_type
= ace_flags
= ace_mask
= len
= 0;
7978 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
7979 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
7980 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
7981 nfsm_chain_get_32(error
, &nmrep
, len
);
7982 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
7983 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
7984 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
7985 if (!error
&& (len
>= slen
)) {
7986 MALLOC(s
, char*, len
+ 1, M_TEMP
, M_WAITOK
);
7994 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
7996 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
8000 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
))) {
8007 if (s
&& (s
!= sbuf
)) {
8016 /* At this point if we have no error, the object was created/opened. */
8019 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOOKUP
);
8021 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
8023 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
8025 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
) || !fh
.fh_len
) {
8030 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
8032 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
8034 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPENATTR
);
8036 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
8039 nfsm_chain_loadattr(error
, &nmrep
, adnp
, nmp
->nm_vers
, &xid
);
8043 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
) {
8044 newnofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
8046 if (rflags
& NFS_OPEN_RESULT_CONFIRM
) {
8048 nfs_node_unlock(adnp
);
8049 adlockerror
= ENOENT
;
8051 NVATTR_CLEANUP(&nvattr
);
8052 error
= nfs4_open_confirm_rpc(nmp
, adnp
? adnp
: np
, fh
.fh_data
, fh
.fh_len
, noop
, &newnofp
->nof_stateid
, thd
, cred
, &nvattr
, &xid
);
8055 if ((adlockerror
= nfs_node_lock(adnp
))) {
8056 error
= adlockerror
;
8062 if (open
&& adnp
&& !adlockerror
) {
8063 if (!open_error
&& (adnp
->n_flag
& NNEGNCENTRIES
)) {
8064 adnp
->n_flag
&= ~NNEGNCENTRIES
;
8065 cache_purge_negatives(NFSTOV(adnp
));
8067 adnp
->n_flag
|= NMODIFIED
;
8068 nfs_node_unlock(adnp
);
8069 adlockerror
= ENOENT
;
8070 nfs_getattr(adnp
, NULL
, ctx
, NGA_CACHED
);
8072 if (adnp
&& !adlockerror
&& (error
== ENOENT
) &&
8073 (cnp
->cn_flags
& MAKEENTRY
) && (cnp
->cn_nameiop
!= CREATE
) && negnamecache
) {
8074 /* add a negative entry in the name cache */
8075 cache_enter(NFSTOV(adnp
), NULL
, cnp
);
8076 adnp
->n_flag
|= NNEGNCENTRIES
;
8078 if (adnp
&& !adlockerror
) {
8079 nfs_node_unlock(adnp
);
8080 adlockerror
= ENOENT
;
8082 if (!error
&& !anp
&& fh
.fh_len
) {
8083 /* create the vnode with the filehandle and attributes */
8085 error
= nfs_nget(NFSTOMP(np
), adnp
, cnp
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, NG_MAKEENTRY
, &anp
);
8088 nfs_node_unlock(anp
);
8090 if (!error
&& open
) {
8091 nfs_open_file_add_open(newnofp
, accessMode
, denyMode
, 0);
8092 /* After we have a node, add our open file struct to the node */
8094 error
= nfs_open_file_find_internal(anp
, noop
, &nofp
, 0, 0, 0);
8096 /* This shouldn't happen, because we passed in a new nofp to use. */
8097 printf("nfs_open_file_find_internal failed! %d\n", error
);
8099 } else if (nofp
!= newnofp
) {
8101 * Hmm... an open file struct already exists.
8102 * Mark the existing one busy and merge our open into it.
8103 * Then destroy the one we created.
8104 * Note: there's no chance of an open confict because the
8105 * open has already been granted.
8107 nofpbusyerror
= nfs_open_file_set_busy(nofp
, NULL
);
8108 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0);
8109 nofp
->nof_stateid
= newnofp
->nof_stateid
;
8110 if (newnofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
) {
8111 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
8113 nfs_open_file_clear_busy(newnofp
);
8114 nfs_open_file_destroy(newnofp
);
8120 /* mark the node as holding a create-initiated open */
8121 nofp
->nof_flags
|= NFS_OPEN_FILE_CREATE
;
8122 nofp
->nof_creator
= current_thread();
8129 NVATTR_CLEANUP(&nvattr
);
8130 if (open
&& ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
))) {
8131 if (!error
&& anp
&& !recall
) {
8132 /* stuff the delegation state in the node */
8133 lck_mtx_lock(&anp
->n_openlock
);
8134 anp
->n_openflags
&= ~N_DELEG_MASK
;
8135 anp
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
8136 anp
->n_dstateid
= dstateid
;
8138 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
8139 lck_mtx_lock(&nmp
->nm_lock
);
8140 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
8141 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, anp
, n_dlink
);
8143 lck_mtx_unlock(&nmp
->nm_lock
);
8145 lck_mtx_unlock(&anp
->n_openlock
);
8147 /* give the delegation back */
8149 if (NFS_CMPFH(anp
, fh
.fh_data
, fh
.fh_len
)) {
8150 /* update delegation state and return it */
8151 lck_mtx_lock(&anp
->n_openlock
);
8152 anp
->n_openflags
&= ~N_DELEG_MASK
;
8153 anp
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
8154 anp
->n_dstateid
= dstateid
;
8156 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
8157 lck_mtx_lock(&nmp
->nm_lock
);
8158 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
8159 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, anp
, n_dlink
);
8161 lck_mtx_unlock(&nmp
->nm_lock
);
8163 lck_mtx_unlock(&anp
->n_openlock
);
8164 /* don't need to send a separate delegreturn for fh */
8167 /* return anp's current delegation */
8168 nfs4_delegation_return(anp
, 0, thd
, cred
);
8170 if (fh
.fh_len
) { /* return fh's delegation if it wasn't for anp */
8171 nfs4_delegreturn_rpc(nmp
, fh
.fh_data
, fh
.fh_len
, &dstateid
, 0, thd
, cred
);
8177 /* need to cleanup our temporary nofp */
8178 nfs_open_file_clear_busy(newnofp
);
8179 nfs_open_file_destroy(newnofp
);
8181 } else if (nofp
&& !nofpbusyerror
) {
8182 nfs_open_file_clear_busy(nofp
);
8183 nofpbusyerror
= ENOENT
;
8185 if (inuse
&& nfs_mount_state_in_use_end(nmp
, error
)) {
8187 nofp
= newnofp
= NULL
;
8188 rflags
= delegation
= recall
= eof
= rlen
= retlen
= 0;
8191 slen
= sizeof(sbuf
);
8192 nfsm_chain_cleanup(&nmreq
);
8193 nfsm_chain_cleanup(&nmrep
);
8195 vnode_put(NFSTOV(anp
));
8198 hadattrdir
= (adnp
!= NULL
);
8200 nfs_open_owner_clear_busy(noop
);
8207 nfs_open_owner_clear_busy(noop
);
8210 nfs_open_owner_rele(noop
);
8213 if (!error
&& prefetch
&& nmrep
.nmc_mhead
) {
8214 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
8215 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_NVERIFY
);
8216 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READ
);
8217 nfsm_chain_get_32(error
, &nmrep
, eof
);
8218 nfsm_chain_get_32(error
, &nmrep
, retlen
);
8219 if (!error
&& anp
) {
8221 * There can be one problem with doing the prefetch.
8222 * Because we don't have the node before we start the RPC, we
8223 * can't have the buffer busy while the READ is performed.
8224 * So there is a chance that other I/O occured on the same
8225 * range of data while we were performing this RPC. If that
8226 * happens, then it's possible the data we have in the READ
8227 * response is no longer up to date.
8228 * Once we have the node and the buffer, we need to make sure
8229 * that there's no chance we could be putting stale data in
8231 * So, we check if the range read is dirty or if any I/O may
8232 * have occured on it while we were performing our RPC.
8234 struct nfsbuf
*bp
= NULL
;
8238 retlen
= MIN(retlen
, rlen
);
8240 /* check if node needs size update or invalidation */
8241 if (ISSET(anp
->n_flag
, NUPDATESIZE
)) {
8242 nfs_data_update_size(anp
, 0);
8244 if (!(error
= nfs_node_lock(anp
))) {
8245 if (anp
->n_flag
& NNEEDINVALIDATE
) {
8246 anp
->n_flag
&= ~NNEEDINVALIDATE
;
8247 nfs_node_unlock(anp
);
8248 error
= nfs_vinvalbuf(NFSTOV(anp
), V_SAVE
| V_IGNORE_WRITEERR
, ctx
, 1);
8249 if (!error
) { /* lets play it safe and just drop the data */
8253 nfs_node_unlock(anp
);
8257 /* calculate page mask for the range of data read */
8258 lastpg
= (trunc_page_32(retlen
) - 1) / PAGE_SIZE
;
8259 pagemask
= ((1 << (lastpg
+ 1)) - 1);
8262 error
= nfs_buf_get(anp
, 0, nmp
->nm_biosize
, thd
, NBLK_READ
| NBLK_NOWAIT
, &bp
);
8264 /* don't save the data if dirty or potential I/O conflict */
8265 if (!error
&& bp
&& !bp
->nb_dirtyoff
&& !(bp
->nb_dirty
& pagemask
) &&
8266 timevalcmp(&anp
->n_lastio
, &now
, <)) {
8267 OSAddAtomic64(1, &nfsstats
.read_bios
);
8268 CLR(bp
->nb_flags
, (NB_DONE
| NB_ASYNC
));
8269 SET(bp
->nb_flags
, NB_READ
);
8271 nfsm_chain_get_opaque(error
, &nmrep
, retlen
, bp
->nb_data
);
8273 bp
->nb_error
= error
;
8274 SET(bp
->nb_flags
, NB_ERROR
);
8277 bp
->nb_endio
= rlen
;
8278 if ((retlen
> 0) && (bp
->nb_endio
< (int)retlen
)) {
8279 bp
->nb_endio
= retlen
;
8281 if (eof
|| (retlen
== 0)) {
8282 /* zero out the remaining data (up to EOF) */
8283 off_t rpcrem
, eofrem
, rem
;
8284 rpcrem
= (rlen
- retlen
);
8285 eofrem
= anp
->n_size
- (NBOFF(bp
) + retlen
);
8286 rem
= (rpcrem
< eofrem
) ? rpcrem
: eofrem
;
8288 bzero(bp
->nb_data
+ retlen
, rem
);
8290 } else if ((retlen
< rlen
) && !ISSET(bp
->nb_flags
, NB_ERROR
)) {
8291 /* ugh... short read ... just invalidate for now... */
8292 SET(bp
->nb_flags
, NB_INVAL
);
8295 nfs_buf_read_finish(bp
);
8296 microuptime(&anp
->n_lastio
);
8299 nfs_buf_release(bp
, 1);
8302 error
= 0; /* ignore any transient error in processing the prefetch */
8304 if (adnp
&& !adbusyerror
) {
8305 nfs_node_clear_busy(adnp
);
8306 adbusyerror
= ENOENT
;
8309 nfs_node_clear_busy(np
);
8313 vnode_put(NFSTOV(adnp
));
8315 if (error
&& *anpp
) {
8316 vnode_put(NFSTOV(*anpp
));
8319 nfsm_chain_cleanup(&nmreq
);
8320 nfsm_chain_cleanup(&nmrep
);
8325 * Remove a named attribute.
8328 nfs4_named_attr_remove(nfsnode_t np
, nfsnode_t anp
, const char *name
, vfs_context_t ctx
)
8330 nfsnode_t adnp
= NULL
;
8331 struct nfsmount
*nmp
;
8332 struct componentname cn
;
8333 struct vnop_remove_args vra
;
8334 int error
, putanp
= 0;
8337 if (nfs_mount_gone(nmp
)) {
8341 bzero(&cn
, sizeof(cn
));
8342 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(name
, const, char *);
8343 cn
.cn_namelen
= strlen(name
);
8344 cn
.cn_nameiop
= DELETE
;
8348 error
= nfs4_named_attr_get(np
, &cn
, NFS_OPEN_SHARE_ACCESS_NONE
,
8349 0, ctx
, &anp
, NULL
);
8350 if ((!error
&& !anp
) || (error
== ENOATTR
)) {
8355 vnode_put(NFSTOV(anp
));
8363 if ((error
= nfs_node_set_busy(np
, vfs_context_thread(ctx
)))) {
8366 adnp
= nfs4_named_attr_dir_get(np
, 1, ctx
);
8367 nfs_node_clear_busy(np
);
8373 vra
.a_desc
= &vnop_remove_desc
;
8374 vra
.a_dvp
= NFSTOV(adnp
);
8375 vra
.a_vp
= NFSTOV(anp
);
8378 vra
.a_context
= ctx
;
8379 error
= nfs_vnop_remove(&vra
);
8382 vnode_put(NFSTOV(adnp
));
8385 vnode_put(NFSTOV(anp
));
8392 struct vnop_getxattr_args
/* {
8393 * struct vnodeop_desc *a_desc;
8395 * const char * a_name;
8399 * vfs_context_t a_context;
8402 vfs_context_t ctx
= ap
->a_context
;
8403 struct nfsmount
*nmp
;
8404 struct nfs_vattr nvattr
;
8405 struct componentname cn
;
8407 int error
= 0, isrsrcfork
;
8409 nmp
= VTONMP(ap
->a_vp
);
8410 if (nfs_mount_gone(nmp
)) {
8414 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8417 error
= nfs_getattr(VTONFS(ap
->a_vp
), &nvattr
, ctx
, NGA_CACHED
);
8421 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
8422 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
)) {
8426 bzero(&cn
, sizeof(cn
));
8427 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(ap
->a_name
, const, char *);
8428 cn
.cn_namelen
= strlen(ap
->a_name
);
8429 cn
.cn_nameiop
= LOOKUP
;
8430 cn
.cn_flags
= MAKEENTRY
;
8432 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
8433 isrsrcfork
= (bcmp(ap
->a_name
, XATTR_RESOURCEFORK_NAME
, sizeof(XATTR_RESOURCEFORK_NAME
)) == 0);
8435 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_NONE
,
8436 !isrsrcfork
? NFS_GET_NAMED_ATTR_PREFETCH
: 0, ctx
, &anp
, NULL
);
8437 if ((!error
&& !anp
) || (error
== ENOENT
)) {
8442 error
= nfs_bioread(anp
, ap
->a_uio
, 0, ctx
);
8444 *ap
->a_size
= anp
->n_size
;
8448 vnode_put(NFSTOV(anp
));
8455 struct vnop_setxattr_args
/* {
8456 * struct vnodeop_desc *a_desc;
8458 * const char * a_name;
8461 * vfs_context_t a_context;
8464 vfs_context_t ctx
= ap
->a_context
;
8465 int options
= ap
->a_options
;
8466 uio_t uio
= ap
->a_uio
;
8467 const char *name
= ap
->a_name
;
8468 struct nfsmount
*nmp
;
8469 struct componentname cn
;
8470 nfsnode_t anp
= NULL
;
8471 int error
= 0, closeerror
= 0, flags
, isrsrcfork
, isfinderinfo
, empty
= 0, i
;
8472 #define FINDERINFOSIZE 32
8473 uint8_t finfo
[FINDERINFOSIZE
];
8475 struct nfs_open_file
*nofp
= NULL
;
8476 char uio_buf
[UIO_SIZEOF(1)];
8478 struct vnop_write_args vwa
;
8480 nmp
= VTONMP(ap
->a_vp
);
8481 if (nfs_mount_gone(nmp
)) {
8485 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8489 if ((options
& XATTR_CREATE
) && (options
& XATTR_REPLACE
)) {
8493 /* XXX limitation based on need to back up uio on short write */
8494 if (uio_iovcnt(uio
) > 1) {
8495 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
8499 bzero(&cn
, sizeof(cn
));
8500 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(name
, const, char *);
8501 cn
.cn_namelen
= strlen(name
);
8502 cn
.cn_nameiop
= CREATE
;
8503 cn
.cn_flags
= MAKEENTRY
;
8505 isfinderinfo
= (bcmp(name
, XATTR_FINDERINFO_NAME
, sizeof(XATTR_FINDERINFO_NAME
)) == 0);
8506 isrsrcfork
= isfinderinfo
? 0 : (bcmp(name
, XATTR_RESOURCEFORK_NAME
, sizeof(XATTR_RESOURCEFORK_NAME
)) == 0);
8508 uio_setoffset(uio
, 0);
8511 if (uio_resid(uio
) != sizeof(finfo
)) {
8514 error
= uiomove((char*)&finfo
, sizeof(finfo
), uio
);
8518 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
8520 for (i
= 0, finfop
= (uint32_t*)&finfo
; i
< (int)(sizeof(finfo
) / sizeof(uint32_t)); i
++) {
8526 if (empty
&& !(options
& (XATTR_CREATE
| XATTR_REPLACE
))) {
8527 error
= nfs4_named_attr_remove(VTONFS(ap
->a_vp
), anp
, name
, ctx
);
8528 if (error
== ENOENT
) {
8533 /* first, let's see if we get a create/replace error */
8537 * create/open the xattr
8539 * We need to make sure not to create it if XATTR_REPLACE.
8540 * For all xattrs except the resource fork, we also want to
8541 * truncate the xattr to remove any current data. We'll do
8542 * that by setting the size to 0 on create/open.
8545 if (!(options
& XATTR_REPLACE
)) {
8546 flags
|= NFS_GET_NAMED_ATTR_CREATE
;
8548 if (options
& XATTR_CREATE
) {
8549 flags
|= NFS_GET_NAMED_ATTR_CREATE_GUARDED
;
8552 flags
|= NFS_GET_NAMED_ATTR_TRUNCATE
;
8555 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_BOTH
,
8556 flags
, ctx
, &anp
, &nofp
);
8557 if (!error
&& !anp
) {
8563 /* grab the open state from the get/create/open */
8564 if (nofp
&& !(error
= nfs_open_file_set_busy(nofp
, NULL
))) {
8565 nofp
->nof_flags
&= ~NFS_OPEN_FILE_CREATE
;
8566 nofp
->nof_creator
= NULL
;
8567 nfs_open_file_clear_busy(nofp
);
8570 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
8571 if (isfinderinfo
&& empty
) {
8576 * Write the data out and flush.
8578 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
8580 vwa
.a_desc
= &vnop_write_desc
;
8581 vwa
.a_vp
= NFSTOV(anp
);
8584 vwa
.a_context
= ctx
;
8586 auio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_WRITE
, &uio_buf
, sizeof(uio_buf
));
8587 uio_addiov(auio
, (uintptr_t)&finfo
, sizeof(finfo
));
8589 } else if (uio_resid(uio
) > 0) {
8593 error
= nfs_vnop_write(&vwa
);
8595 error
= nfs_flush(anp
, MNT_WAIT
, vfs_context_thread(ctx
), 0);
8599 /* Close the xattr. */
8601 int busyerror
= nfs_open_file_set_busy(nofp
, NULL
);
8602 closeerror
= nfs_close(anp
, nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
8604 nfs_open_file_clear_busy(nofp
);
8607 if (!error
&& isfinderinfo
&& empty
) { /* Setting an empty FinderInfo really means remove it */
8608 error
= nfs4_named_attr_remove(VTONFS(ap
->a_vp
), anp
, name
, ctx
);
8609 if (error
== ENOENT
) {
8618 vnode_put(NFSTOV(anp
));
8620 if (error
== ENOENT
) {
8627 nfs4_vnop_removexattr(
8628 struct vnop_removexattr_args
/* {
8629 * struct vnodeop_desc *a_desc;
8631 * const char * a_name;
8633 * vfs_context_t a_context;
8636 struct nfsmount
*nmp
= VTONMP(ap
->a_vp
);
8639 if (nfs_mount_gone(nmp
)) {
8642 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8646 error
= nfs4_named_attr_remove(VTONFS(ap
->a_vp
), NULL
, ap
->a_name
, ap
->a_context
);
8647 if (error
== ENOENT
) {
8654 nfs4_vnop_listxattr(
8655 struct vnop_listxattr_args
/* {
8656 * struct vnodeop_desc *a_desc;
8661 * vfs_context_t a_context;
8664 vfs_context_t ctx
= ap
->a_context
;
8665 nfsnode_t np
= VTONFS(ap
->a_vp
);
8666 uio_t uio
= ap
->a_uio
;
8667 nfsnode_t adnp
= NULL
;
8668 struct nfsmount
*nmp
;
8670 struct nfs_vattr nvattr
;
8671 uint64_t cookie
, nextcookie
, lbn
= 0;
8672 struct nfsbuf
*bp
= NULL
;
8673 struct nfs_dir_buf_header
*ndbhp
;
8674 struct direntry
*dp
;
8676 nmp
= VTONMP(ap
->a_vp
);
8677 if (nfs_mount_gone(nmp
)) {
8681 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8685 error
= nfs_getattr(np
, &nvattr
, ctx
, NGA_CACHED
);
8689 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
8690 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
)) {
8694 if ((error
= nfs_node_set_busy(np
, vfs_context_thread(ctx
)))) {
8697 adnp
= nfs4_named_attr_dir_get(np
, 1, ctx
);
8698 nfs_node_clear_busy(np
);
8703 if ((error
= nfs_node_lock(adnp
))) {
8707 if (adnp
->n_flag
& NNEEDINVALIDATE
) {
8708 adnp
->n_flag
&= ~NNEEDINVALIDATE
;
8710 nfs_node_unlock(adnp
);
8711 error
= nfs_vinvalbuf(NFSTOV(adnp
), 0, ctx
, 1);
8713 error
= nfs_node_lock(adnp
);
8721 * check for need to invalidate when (re)starting at beginning
8723 if (adnp
->n_flag
& NMODIFIED
) {
8725 nfs_node_unlock(adnp
);
8726 if ((error
= nfs_vinvalbuf(NFSTOV(adnp
), 0, ctx
, 1))) {
8730 nfs_node_unlock(adnp
);
8732 /* nfs_getattr() will check changed and purge caches */
8733 if ((error
= nfs_getattr(adnp
, &nvattr
, ctx
, NGA_UNCACHED
))) {
8737 if (uio
&& (uio_resid(uio
) == 0)) {
8742 nextcookie
= lbn
= 0;
8744 while (!error
&& !done
) {
8745 OSAddAtomic64(1, &nfsstats
.biocache_readdirs
);
8746 cookie
= nextcookie
;
8748 error
= nfs_buf_get(adnp
, lbn
, NFS_DIRBLKSIZ
, vfs_context_thread(ctx
), NBLK_READ
, &bp
);
8752 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
8753 if (!ISSET(bp
->nb_flags
, NB_CACHE
) || !ISSET(ndbhp
->ndbh_flags
, NDB_FULL
)) {
8754 if (!ISSET(bp
->nb_flags
, NB_CACHE
)) { /* initialize the buffer */
8755 ndbhp
->ndbh_flags
= 0;
8756 ndbhp
->ndbh_count
= 0;
8757 ndbhp
->ndbh_entry_end
= sizeof(*ndbhp
);
8758 ndbhp
->ndbh_ncgen
= adnp
->n_ncgen
;
8760 error
= nfs_buf_readdir(bp
, ctx
);
8761 if (error
== NFSERR_DIRBUFDROPPED
) {
8765 nfs_buf_release(bp
, 1);
8767 if (error
&& (error
!= ENXIO
) && (error
!= ETIMEDOUT
) && (error
!= EINTR
) && (error
!= ERESTART
)) {
8768 if (!nfs_node_lock(adnp
)) {
8770 nfs_node_unlock(adnp
);
8772 nfs_vinvalbuf(NFSTOV(adnp
), 0, ctx
, 1);
8773 if (error
== NFSERR_BAD_COOKIE
) {
8782 /* go through all the entries copying/counting */
8783 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
8784 for (i
= 0; i
< ndbhp
->ndbh_count
; i
++) {
8785 if (!xattr_protected(dp
->d_name
)) {
8787 *ap
->a_size
+= dp
->d_namlen
+ 1;
8788 } else if (uio_resid(uio
) < (dp
->d_namlen
+ 1)) {
8791 error
= uiomove(dp
->d_name
, dp
->d_namlen
+ 1, uio
);
8792 if (error
&& (error
!= EFAULT
)) {
8797 nextcookie
= dp
->d_seekoff
;
8798 dp
= NFS_DIRENTRY_NEXT(dp
);
8801 if (i
== ndbhp
->ndbh_count
) {
8802 /* hit end of buffer, move to next buffer */
8804 /* if we also hit EOF, we're done */
8805 if (ISSET(ndbhp
->ndbh_flags
, NDB_EOF
)) {
8809 if (!error
&& !done
&& (nextcookie
== cookie
)) {
8810 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie
, i
, ndbhp
->ndbh_count
);
8813 nfs_buf_release(bp
, 1);
8817 vnode_put(NFSTOV(adnp
));
8824 nfs4_vnop_getnamedstream(
8825 struct vnop_getnamedstream_args
/* {
8826 * struct vnodeop_desc *a_desc;
8829 * const char *a_name;
8830 * enum nsoperation a_operation;
8832 * vfs_context_t a_context;
8835 vfs_context_t ctx
= ap
->a_context
;
8836 struct nfsmount
*nmp
;
8837 struct nfs_vattr nvattr
;
8838 struct componentname cn
;
8842 nmp
= VTONMP(ap
->a_vp
);
8843 if (nfs_mount_gone(nmp
)) {
8847 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8850 error
= nfs_getattr(VTONFS(ap
->a_vp
), &nvattr
, ctx
, NGA_CACHED
);
8854 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
8855 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
)) {
8859 bzero(&cn
, sizeof(cn
));
8860 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(ap
->a_name
, const, char *);
8861 cn
.cn_namelen
= strlen(ap
->a_name
);
8862 cn
.cn_nameiop
= LOOKUP
;
8863 cn
.cn_flags
= MAKEENTRY
;
8865 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_NONE
,
8866 0, ctx
, &anp
, NULL
);
8867 if ((!error
&& !anp
) || (error
== ENOENT
)) {
8870 if (!error
&& anp
) {
8871 *ap
->a_svpp
= NFSTOV(anp
);
8873 vnode_put(NFSTOV(anp
));
8879 nfs4_vnop_makenamedstream(
8880 struct vnop_makenamedstream_args
/* {
8881 * struct vnodeop_desc *a_desc;
8884 * const char *a_name;
8886 * vfs_context_t a_context;
8889 vfs_context_t ctx
= ap
->a_context
;
8890 struct nfsmount
*nmp
;
8891 struct componentname cn
;
8895 nmp
= VTONMP(ap
->a_vp
);
8896 if (nfs_mount_gone(nmp
)) {
8900 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8904 bzero(&cn
, sizeof(cn
));
8905 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(ap
->a_name
, const, char *);
8906 cn
.cn_namelen
= strlen(ap
->a_name
);
8907 cn
.cn_nameiop
= CREATE
;
8908 cn
.cn_flags
= MAKEENTRY
;
8910 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_BOTH
,
8911 NFS_GET_NAMED_ATTR_CREATE
, ctx
, &anp
, NULL
);
8912 if ((!error
&& !anp
) || (error
== ENOENT
)) {
8915 if (!error
&& anp
) {
8916 *ap
->a_svpp
= NFSTOV(anp
);
8918 vnode_put(NFSTOV(anp
));
8924 nfs4_vnop_removenamedstream(
8925 struct vnop_removenamedstream_args
/* {
8926 * struct vnodeop_desc *a_desc;
8929 * const char *a_name;
8931 * vfs_context_t a_context;
8934 struct nfsmount
*nmp
= VTONMP(ap
->a_vp
);
8935 nfsnode_t np
= ap
->a_vp
? VTONFS(ap
->a_vp
) : NULL
;
8936 nfsnode_t anp
= ap
->a_svp
? VTONFS(ap
->a_svp
) : NULL
;
8938 if (nfs_mount_gone(nmp
)) {
8943 * Given that a_svp is a named stream, checking for
8944 * named attribute support is kinda pointless.
8946 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8950 return nfs4_named_attr_remove(np
, anp
, ap
->a_name
, ap
->a_context
);
8954 #endif /* CONFIG_NFS4 */