2 * Copyright (c) 2006-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * vnode op calls for NFS version 4
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/systm.h>
35 #include <sys/resourcevar.h>
36 #include <sys/proc_internal.h>
37 #include <sys/kauth.h>
38 #include <sys/mount_internal.h>
39 #include <sys/malloc.h>
40 #include <sys/kpi_mbuf.h>
42 #include <sys/vnode_internal.h>
43 #include <sys/dirent.h>
44 #include <sys/fcntl.h>
45 #include <sys/lockf.h>
46 #include <sys/ubc_internal.h>
48 #include <sys/signalvar.h>
49 #include <sys/uio_internal.h>
50 #include <sys/xattr.h>
51 #include <sys/paths.h>
53 #include <vfs/vfs_support.h>
58 #include <kern/clock.h>
59 #include <libkern/OSAtomic.h>
61 #include <miscfs/fifofs/fifo.h>
62 #include <miscfs/specfs/specdev.h>
64 #include <nfs/rpcv2.h>
65 #include <nfs/nfsproto.h>
67 #include <nfs/nfsnode.h>
68 #include <nfs/nfs_gss.h>
69 #include <nfs/nfsmount.h>
70 #include <nfs/nfs_lock.h>
71 #include <nfs/xdr_subs.h>
72 #include <nfs/nfsm_subs.h>
75 #include <netinet/in.h>
76 #include <netinet/in_var.h>
77 #include <vm/vm_kern.h>
79 #include <kern/task.h>
80 #include <kern/sched_prim.h>
83 nfs4_access_rpc(nfsnode_t np
, u_int32_t
*access
, int rpcflags
, vfs_context_t ctx
)
85 int error
= 0, lockerror
= ENOENT
, status
, numops
, slot
;
87 struct nfsm_chain nmreq
, nmrep
;
89 uint32_t access_result
= 0, supported
= 0, missing
;
90 struct nfsmount
*nmp
= NFSTONMP(np
);
91 int nfsvers
= nmp
->nm_vers
;
93 struct nfsreq_secinfo_args si
;
95 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
98 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
99 nfsm_chain_null(&nmreq
);
100 nfsm_chain_null(&nmrep
);
102 // PUTFH, ACCESS, GETATTR
104 nfsm_chain_build_alloc_init(error
, &nmreq
, 17 * NFSX_UNSIGNED
);
105 nfsm_chain_add_compound_header(error
, &nmreq
, "access", nmp
->nm_minor_vers
, numops
);
107 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
108 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
110 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_ACCESS
);
111 nfsm_chain_add_32(error
, &nmreq
, *access
);
113 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
114 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
115 nfsm_chain_build_done(error
, &nmreq
);
116 nfsm_assert(error
, (numops
== 0), EPROTO
);
118 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
119 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
120 &si
, rpcflags
, &nmrep
, &xid
, &status
);
122 if ((lockerror
= nfs_node_lock(np
)))
124 nfsm_chain_skip_tag(error
, &nmrep
);
125 nfsm_chain_get_32(error
, &nmrep
, numops
);
126 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
127 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_ACCESS
);
128 nfsm_chain_get_32(error
, &nmrep
, supported
);
129 nfsm_chain_get_32(error
, &nmrep
, access_result
);
131 if ((missing
= (*access
& ~supported
))) {
132 /* missing support for something(s) we wanted */
133 if (missing
& NFS_ACCESS_DELETE
) {
135 * If the server doesn't report DELETE (possible
136 * on UNIX systems), we'll assume that it is OK
137 * and just let any subsequent delete action fail
138 * if it really isn't deletable.
140 access_result
|= NFS_ACCESS_DELETE
;
143 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
144 if (nfs_access_dotzfs
) {
145 vnode_t dvp
= NULLVP
;
146 if (np
->n_flag
& NISDOTZFSCHILD
) /* may be able to create/delete snapshot dirs */
147 access_result
|= (NFS_ACCESS_MODIFY
|NFS_ACCESS_EXTEND
|NFS_ACCESS_DELETE
);
148 else if (((dvp
= vnode_getparent(NFSTOV(np
))) != NULLVP
) && (VTONFS(dvp
)->n_flag
& NISDOTZFSCHILD
))
149 access_result
|= NFS_ACCESS_DELETE
; /* may be able to delete snapshot dirs */
153 /* Some servers report DELETE support but erroneously give a denied answer. */
154 if (nfs_access_delete
&& (*access
& NFS_ACCESS_DELETE
) && !(access_result
& NFS_ACCESS_DELETE
))
155 access_result
|= NFS_ACCESS_DELETE
;
156 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
157 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
160 if (nfs_mount_gone(nmp
)) {
165 if (auth_is_kerberized(np
->n_auth
) || auth_is_kerberized(nmp
->nm_auth
)) {
166 uid
= nfs_cred_getasid2uid(vfs_context_ucred(ctx
));
168 uid
= kauth_cred_getuid(vfs_context_ucred(ctx
));
170 slot
= nfs_node_access_slot(np
, uid
, 1);
171 np
->n_accessuid
[slot
] = uid
;
173 np
->n_accessstamp
[slot
] = now
.tv_sec
;
174 np
->n_access
[slot
] = access_result
;
176 /* pass back the access returned with this request */
177 *access
= np
->n_access
[slot
];
181 nfsm_chain_cleanup(&nmreq
);
182 nfsm_chain_cleanup(&nmrep
);
194 struct nfs_vattr
*nvap
,
197 struct nfsmount
*nmp
= mp
? VFSTONFS(mp
) : NFSTONMP(np
);
198 int error
= 0, status
, nfsvers
, numops
, rpcflags
= 0, acls
;
199 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
200 struct nfsm_chain nmreq
, nmrep
;
201 struct nfsreq_secinfo_args si
;
203 if (nfs_mount_gone(nmp
))
205 nfsvers
= nmp
->nm_vers
;
206 acls
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_ACL
);
208 if (np
&& (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)) {
209 nfs4_default_attrs_for_referral_trigger(VTONFS(np
->n_parent
), NULL
, 0, nvap
, NULL
);
213 if (flags
& NGA_MONITOR
) /* vnode monitor requests should be soft */
214 rpcflags
= R_RECOVER
;
216 if (flags
& NGA_SOFT
) /* Return ETIMEDOUT if server not responding */
219 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
220 nfsm_chain_null(&nmreq
);
221 nfsm_chain_null(&nmrep
);
225 nfsm_chain_build_alloc_init(error
, &nmreq
, 15 * NFSX_UNSIGNED
);
226 nfsm_chain_add_compound_header(error
, &nmreq
, "getattr", nmp
->nm_minor_vers
, numops
);
228 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
229 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, fhp
, fhsize
);
231 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
232 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
233 if ((flags
& NGA_ACL
) && acls
)
234 NFS_BITMAP_SET(bitmap
, NFS_FATTR_ACL
);
235 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
236 nfsm_chain_build_done(error
, &nmreq
);
237 nfsm_assert(error
, (numops
== 0), EPROTO
);
239 error
= nfs_request2(np
, mp
, &nmreq
, NFSPROC4_COMPOUND
,
240 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
241 NULL
, rpcflags
, &nmrep
, xidp
, &status
);
243 nfsm_chain_skip_tag(error
, &nmrep
);
244 nfsm_chain_get_32(error
, &nmrep
, numops
);
245 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
246 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
248 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
, NULL
);
250 if ((flags
& NGA_ACL
) && acls
&& !NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_ACL
)) {
251 /* we asked for the ACL but didn't get one... assume there isn't one */
252 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_ACL
);
253 nvap
->nva_acl
= NULL
;
256 nfsm_chain_cleanup(&nmreq
);
257 nfsm_chain_cleanup(&nmrep
);
262 nfs4_readlink_rpc(nfsnode_t np
, char *buf
, uint32_t *buflenp
, vfs_context_t ctx
)
264 struct nfsmount
*nmp
;
265 int error
= 0, lockerror
= ENOENT
, status
, numops
;
268 struct nfsm_chain nmreq
, nmrep
;
269 struct nfsreq_secinfo_args si
;
272 if (nfs_mount_gone(nmp
))
274 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
276 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
277 nfsm_chain_null(&nmreq
);
278 nfsm_chain_null(&nmrep
);
280 // PUTFH, GETATTR, READLINK
282 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
283 nfsm_chain_add_compound_header(error
, &nmreq
, "readlink", nmp
->nm_minor_vers
, numops
);
285 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
286 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
288 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
289 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
291 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READLINK
);
292 nfsm_chain_build_done(error
, &nmreq
);
293 nfsm_assert(error
, (numops
== 0), EPROTO
);
295 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
297 if ((lockerror
= nfs_node_lock(np
)))
299 nfsm_chain_skip_tag(error
, &nmrep
);
300 nfsm_chain_get_32(error
, &nmrep
, numops
);
301 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
302 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
303 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
304 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READLINK
);
305 nfsm_chain_get_32(error
, &nmrep
, len
);
307 if (len
>= *buflenp
) {
308 if (np
->n_size
&& (np
->n_size
< *buflenp
))
313 nfsm_chain_get_opaque(error
, &nmrep
, len
, buf
);
319 nfsm_chain_cleanup(&nmreq
);
320 nfsm_chain_cleanup(&nmrep
);
331 struct nfsreq_cbinfo
*cb
,
332 struct nfsreq
**reqp
)
334 struct nfsmount
*nmp
;
335 int error
= 0, nfsvers
, numops
;
337 struct nfsm_chain nmreq
;
338 struct nfsreq_secinfo_args si
;
341 if (nfs_mount_gone(nmp
))
343 nfsvers
= nmp
->nm_vers
;
344 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
347 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
348 nfsm_chain_null(&nmreq
);
350 // PUTFH, READ, GETATTR
352 nfsm_chain_build_alloc_init(error
, &nmreq
, 22 * NFSX_UNSIGNED
);
353 nfsm_chain_add_compound_header(error
, &nmreq
, "read", nmp
->nm_minor_vers
, numops
);
355 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
356 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
358 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READ
);
359 nfs_get_stateid(np
, thd
, cred
, &stateid
);
360 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
361 nfsm_chain_add_64(error
, &nmreq
, offset
);
362 nfsm_chain_add_32(error
, &nmreq
, len
);
364 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
365 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
366 nfsm_chain_build_done(error
, &nmreq
);
367 nfsm_assert(error
, (numops
== 0), EPROTO
);
369 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, 0, cb
, reqp
);
371 nfsm_chain_cleanup(&nmreq
);
376 nfs4_read_rpc_async_finish(
383 struct nfsmount
*nmp
;
384 int error
= 0, lockerror
, nfsvers
, numops
, status
, eof
= 0;
387 struct nfsm_chain nmrep
;
390 if (nfs_mount_gone(nmp
)) {
391 nfs_request_async_cancel(req
);
394 nfsvers
= nmp
->nm_vers
;
396 nfsm_chain_null(&nmrep
);
398 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
399 if (error
== EINPROGRESS
) /* async request restarted */
402 if ((lockerror
= nfs_node_lock(np
)))
404 nfsm_chain_skip_tag(error
, &nmrep
);
405 nfsm_chain_get_32(error
, &nmrep
, numops
);
406 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
407 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READ
);
408 nfsm_chain_get_32(error
, &nmrep
, eof
);
409 nfsm_chain_get_32(error
, &nmrep
, retlen
);
411 *lenp
= MIN(retlen
, *lenp
);
412 error
= nfsm_chain_get_uio(&nmrep
, *lenp
, uio
);
414 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
415 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
423 nfsm_chain_cleanup(&nmrep
);
424 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)
425 microuptime(&np
->n_lastio
);
430 nfs4_write_rpc_async(
437 struct nfsreq_cbinfo
*cb
,
438 struct nfsreq
**reqp
)
440 struct nfsmount
*nmp
;
442 int error
= 0, nfsvers
, numops
;
444 struct nfsm_chain nmreq
;
445 struct nfsreq_secinfo_args si
;
448 if (nfs_mount_gone(nmp
))
450 nfsvers
= nmp
->nm_vers
;
451 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
454 /* for async mounts, don't bother sending sync write requests */
455 if ((iomode
!= NFS_WRITE_UNSTABLE
) && nfs_allow_async
&&
456 ((mp
= NFSTOMP(np
))) && (vfs_flags(mp
) & MNT_ASYNC
))
457 iomode
= NFS_WRITE_UNSTABLE
;
459 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
460 nfsm_chain_null(&nmreq
);
462 // PUTFH, WRITE, GETATTR
464 nfsm_chain_build_alloc_init(error
, &nmreq
, 25 * NFSX_UNSIGNED
+ len
);
465 nfsm_chain_add_compound_header(error
, &nmreq
, "write", nmp
->nm_minor_vers
, numops
);
467 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
468 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
470 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_WRITE
);
471 nfs_get_stateid(np
, thd
, cred
, &stateid
);
472 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
473 nfsm_chain_add_64(error
, &nmreq
, uio_offset(uio
));
474 nfsm_chain_add_32(error
, &nmreq
, iomode
);
475 nfsm_chain_add_32(error
, &nmreq
, len
);
477 error
= nfsm_chain_add_uio(&nmreq
, uio
, len
);
479 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
480 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
481 nfsm_chain_build_done(error
, &nmreq
);
482 nfsm_assert(error
, (numops
== 0), EPROTO
);
485 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, 0, cb
, reqp
);
487 nfsm_chain_cleanup(&nmreq
);
492 nfs4_write_rpc_async_finish(
499 struct nfsmount
*nmp
;
500 int error
= 0, lockerror
= ENOENT
, nfsvers
, numops
, status
;
501 int committed
= NFS_WRITE_FILESYNC
;
503 u_int64_t xid
, wverf
;
505 struct nfsm_chain nmrep
;
508 if (nfs_mount_gone(nmp
)) {
509 nfs_request_async_cancel(req
);
512 nfsvers
= nmp
->nm_vers
;
514 nfsm_chain_null(&nmrep
);
516 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
517 if (error
== EINPROGRESS
) /* async request restarted */
520 if (nfs_mount_gone(nmp
))
522 if (!error
&& (lockerror
= nfs_node_lock(np
)))
524 nfsm_chain_skip_tag(error
, &nmrep
);
525 nfsm_chain_get_32(error
, &nmrep
, numops
);
526 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
527 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_WRITE
);
528 nfsm_chain_get_32(error
, &nmrep
, rlen
);
533 nfsm_chain_get_32(error
, &nmrep
, committed
);
534 nfsm_chain_get_64(error
, &nmrep
, wverf
);
538 lck_mtx_lock(&nmp
->nm_lock
);
539 if (!(nmp
->nm_state
& NFSSTA_HASWRITEVERF
)) {
540 nmp
->nm_verf
= wverf
;
541 nmp
->nm_state
|= NFSSTA_HASWRITEVERF
;
542 } else if (nmp
->nm_verf
!= wverf
) {
543 nmp
->nm_verf
= wverf
;
545 lck_mtx_unlock(&nmp
->nm_lock
);
546 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
547 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
551 nfsm_chain_cleanup(&nmrep
);
552 if ((committed
!= NFS_WRITE_FILESYNC
) && nfs_allow_async
&&
553 ((mp
= NFSTOMP(np
))) && (vfs_flags(mp
) & MNT_ASYNC
))
554 committed
= NFS_WRITE_FILESYNC
;
555 *iomodep
= committed
;
556 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)
557 microuptime(&np
->n_lastio
);
569 int error
= 0, lockerror
= ENOENT
, remove_error
= 0, status
;
570 struct nfsmount
*nmp
;
573 struct nfsm_chain nmreq
, nmrep
;
574 struct nfsreq_secinfo_args si
;
577 if (nfs_mount_gone(nmp
))
579 nfsvers
= nmp
->nm_vers
;
580 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
582 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
584 nfsm_chain_null(&nmreq
);
585 nfsm_chain_null(&nmrep
);
587 // PUTFH, REMOVE, GETATTR
589 nfsm_chain_build_alloc_init(error
, &nmreq
, 17 * NFSX_UNSIGNED
+ namelen
);
590 nfsm_chain_add_compound_header(error
, &nmreq
, "remove", nmp
->nm_minor_vers
, numops
);
592 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
593 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
595 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_REMOVE
);
596 nfsm_chain_add_name(error
, &nmreq
, name
, namelen
, nmp
);
598 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
599 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
600 nfsm_chain_build_done(error
, &nmreq
);
601 nfsm_assert(error
, (numops
== 0), EPROTO
);
604 error
= nfs_request2(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, 0, &nmrep
, &xid
, &status
);
606 if ((lockerror
= nfs_node_lock(dnp
)))
608 nfsm_chain_skip_tag(error
, &nmrep
);
609 nfsm_chain_get_32(error
, &nmrep
, numops
);
610 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
611 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_REMOVE
);
612 remove_error
= error
;
613 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
614 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
615 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
616 if (error
&& !lockerror
)
617 NATTRINVALIDATE(dnp
);
619 nfsm_chain_cleanup(&nmreq
);
620 nfsm_chain_cleanup(&nmrep
);
623 dnp
->n_flag
|= NMODIFIED
;
624 nfs_node_unlock(dnp
);
626 if (error
== NFSERR_GRACE
) {
627 tsleep(&nmp
->nm_state
, (PZERO
-1), "nfsgrace", 2*hz
);
631 return (remove_error
);
644 int error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
;
645 struct nfsmount
*nmp
;
646 u_int64_t xid
, savedxid
;
647 struct nfsm_chain nmreq
, nmrep
;
648 struct nfsreq_secinfo_args si
;
650 nmp
= NFSTONMP(fdnp
);
651 if (nfs_mount_gone(nmp
))
653 nfsvers
= nmp
->nm_vers
;
654 if (fdnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
656 if (tdnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
659 NFSREQ_SECINFO_SET(&si
, fdnp
, NULL
, 0, NULL
, 0);
660 nfsm_chain_null(&nmreq
);
661 nfsm_chain_null(&nmrep
);
663 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
665 nfsm_chain_build_alloc_init(error
, &nmreq
, 30 * NFSX_UNSIGNED
+ fnamelen
+ tnamelen
);
666 nfsm_chain_add_compound_header(error
, &nmreq
, "rename", nmp
->nm_minor_vers
, numops
);
668 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
669 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, fdnp
->n_fhp
, fdnp
->n_fhsize
);
671 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
673 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
674 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, tdnp
->n_fhp
, tdnp
->n_fhsize
);
676 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RENAME
);
677 nfsm_chain_add_name(error
, &nmreq
, fnameptr
, fnamelen
, nmp
);
678 nfsm_chain_add_name(error
, &nmreq
, tnameptr
, tnamelen
, nmp
);
680 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
681 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, tdnp
);
683 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
685 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
686 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, fdnp
);
687 nfsm_chain_build_done(error
, &nmreq
);
688 nfsm_assert(error
, (numops
== 0), EPROTO
);
691 error
= nfs_request(fdnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
693 if ((lockerror
= nfs_node_lock2(fdnp
, tdnp
)))
695 nfsm_chain_skip_tag(error
, &nmrep
);
696 nfsm_chain_get_32(error
, &nmrep
, numops
);
697 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
698 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
699 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
700 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RENAME
);
701 nfsm_chain_check_change_info(error
, &nmrep
, fdnp
);
702 nfsm_chain_check_change_info(error
, &nmrep
, tdnp
);
703 /* directory attributes: if we don't get them, make sure to invalidate */
704 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
706 nfsm_chain_loadattr(error
, &nmrep
, tdnp
, nfsvers
, &xid
);
707 if (error
&& !lockerror
)
708 NATTRINVALIDATE(tdnp
);
709 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
710 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
712 nfsm_chain_loadattr(error
, &nmrep
, fdnp
, nfsvers
, &xid
);
713 if (error
&& !lockerror
)
714 NATTRINVALIDATE(fdnp
);
716 nfsm_chain_cleanup(&nmreq
);
717 nfsm_chain_cleanup(&nmrep
);
719 fdnp
->n_flag
|= NMODIFIED
;
720 tdnp
->n_flag
|= NMODIFIED
;
721 nfs_node_unlock2(fdnp
, tdnp
);
727 * NFS V4 readdir RPC.
730 nfs4_readdir_rpc(nfsnode_t dnp
, struct nfsbuf
*bp
, vfs_context_t ctx
)
732 struct nfsmount
*nmp
;
733 int error
= 0, lockerror
, nfsvers
, namedattr
, rdirplus
, bigcookies
, numops
;
734 int i
, status
, more_entries
= 1, eof
, bp_dropped
= 0;
735 uint32_t nmreaddirsize
, nmrsize
;
736 uint32_t namlen
, skiplen
, fhlen
, xlen
, attrlen
, reclen
, space_free
, space_needed
;
737 uint64_t cookie
, lastcookie
, xid
, savedxid
;
738 struct nfsm_chain nmreq
, nmrep
, nmrepsave
;
740 struct nfs_vattr nvattr
, *nvattrp
;
741 struct nfs_dir_buf_header
*ndbhp
;
743 char *padstart
, padlen
;
745 uint32_t entry_attrs
[NFS_ATTR_BITMAP_LEN
];
747 struct nfsreq_secinfo_args si
;
750 if (nfs_mount_gone(nmp
))
752 nfsvers
= nmp
->nm_vers
;
753 nmreaddirsize
= nmp
->nm_readdirsize
;
754 nmrsize
= nmp
->nm_rsize
;
755 bigcookies
= nmp
->nm_state
& NFSSTA_BIGCOOKIES
;
756 namedattr
= (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) ? 1 : 0;
757 rdirplus
= (NMFLAG(nmp
, RDIRPLUS
) || namedattr
) ? 1 : 0;
758 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
760 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
763 * Set up attribute request for entries.
764 * For READDIRPLUS functionality, get everything.
765 * Otherwise, just get what we need for struct direntry.
769 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, entry_attrs
);
770 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_FILEHANDLE
);
773 NFS_CLEAR_ATTRIBUTES(entry_attrs
);
774 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_TYPE
);
775 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_FILEID
);
776 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_MOUNTED_ON_FILEID
);
778 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_RDATTR_ERROR
);
780 /* lock to protect access to cookie verifier */
781 if ((lockerror
= nfs_node_lock(dnp
)))
784 /* determine cookie to use, and move dp to the right offset */
785 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
786 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
787 if (ndbhp
->ndbh_count
) {
788 for (i
=0; i
< ndbhp
->ndbh_count
-1; i
++)
789 dp
= NFS_DIRENTRY_NEXT(dp
);
790 cookie
= dp
->d_seekoff
;
791 dp
= NFS_DIRENTRY_NEXT(dp
);
793 cookie
= bp
->nb_lblkno
;
794 /* increment with every buffer read */
795 OSAddAtomic64(1, &nfsstats
.readdir_bios
);
800 * The NFS client is responsible for the "." and ".." entries in the
801 * directory. So, we put them at the start of the first buffer.
802 * Don't bother for attribute directories.
804 if (((bp
->nb_lblkno
== 0) && (ndbhp
->ndbh_count
== 0)) &&
805 !(dnp
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
807 fhlen
= rdirplus
? fh
.fh_len
+ 1 : 0;
808 xlen
= rdirplus
? (fhlen
+ sizeof(time_t)) : 0;
811 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
813 bzero(&dp
->d_name
[namlen
+1], xlen
);
814 dp
->d_namlen
= namlen
;
815 strlcpy(dp
->d_name
, ".", namlen
+1);
816 dp
->d_fileno
= dnp
->n_vattr
.nva_fileid
;
818 dp
->d_reclen
= reclen
;
820 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
821 dp
= NFS_DIRENTRY_NEXT(dp
);
822 padlen
= (char*)dp
- padstart
;
824 bzero(padstart
, padlen
);
825 if (rdirplus
) /* zero out attributes */
826 bzero(NFS_DIR_BUF_NVATTR(bp
, 0), sizeof(struct nfs_vattr
));
830 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
832 bzero(&dp
->d_name
[namlen
+1], xlen
);
833 dp
->d_namlen
= namlen
;
834 strlcpy(dp
->d_name
, "..", namlen
+1);
836 dp
->d_fileno
= VTONFS(dnp
->n_parent
)->n_vattr
.nva_fileid
;
838 dp
->d_fileno
= dnp
->n_vattr
.nva_fileid
;
840 dp
->d_reclen
= reclen
;
842 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
843 dp
= NFS_DIRENTRY_NEXT(dp
);
844 padlen
= (char*)dp
- padstart
;
846 bzero(padstart
, padlen
);
847 if (rdirplus
) /* zero out attributes */
848 bzero(NFS_DIR_BUF_NVATTR(bp
, 1), sizeof(struct nfs_vattr
));
850 ndbhp
->ndbh_entry_end
= (char*)dp
- bp
->nb_data
;
851 ndbhp
->ndbh_count
= 2;
855 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
856 * the buffer is full (or we hit EOF). Then put the remainder of the
857 * results in the next buffer(s).
859 nfsm_chain_null(&nmreq
);
860 nfsm_chain_null(&nmrep
);
861 while (nfs_dir_buf_freespace(bp
, rdirplus
) && !(ndbhp
->ndbh_flags
& NDB_FULL
)) {
863 // PUTFH, GETATTR, READDIR
865 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
866 nfsm_chain_add_compound_header(error
, &nmreq
, tag
, nmp
->nm_minor_vers
, numops
);
868 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
869 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
871 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
872 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
874 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READDIR
);
875 nfsm_chain_add_64(error
, &nmreq
, (cookie
<= 2) ? 0 : cookie
);
876 nfsm_chain_add_64(error
, &nmreq
, dnp
->n_cookieverf
);
877 nfsm_chain_add_32(error
, &nmreq
, nmreaddirsize
);
878 nfsm_chain_add_32(error
, &nmreq
, nmrsize
);
879 nfsm_chain_add_bitmap_supported(error
, &nmreq
, entry_attrs
, nmp
, dnp
);
880 nfsm_chain_build_done(error
, &nmreq
);
881 nfsm_assert(error
, (numops
== 0), EPROTO
);
882 nfs_node_unlock(dnp
);
884 error
= nfs_request(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
886 if ((lockerror
= nfs_node_lock(dnp
)))
890 nfsm_chain_skip_tag(error
, &nmrep
);
891 nfsm_chain_get_32(error
, &nmrep
, numops
);
892 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
893 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
894 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
895 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READDIR
);
896 nfsm_chain_get_64(error
, &nmrep
, dnp
->n_cookieverf
);
897 nfsm_chain_get_32(error
, &nmrep
, more_entries
);
900 nfs_node_unlock(dnp
);
908 /* loop through the entries packing them into the buffer */
909 while (more_entries
) {
910 /* Entry: COOKIE, NAME, FATTR */
911 nfsm_chain_get_64(error
, &nmrep
, cookie
);
912 nfsm_chain_get_32(error
, &nmrep
, namlen
);
914 if (!bigcookies
&& (cookie
>> 32) && (nmp
== NFSTONMP(dnp
))) {
915 /* we've got a big cookie, make sure flag is set */
916 lck_mtx_lock(&nmp
->nm_lock
);
917 nmp
->nm_state
|= NFSSTA_BIGCOOKIES
;
918 lck_mtx_unlock(&nmp
->nm_lock
);
921 /* just truncate names that don't fit in direntry.d_name */
926 if (namlen
> (sizeof(dp
->d_name
)-1)) {
927 skiplen
= namlen
- sizeof(dp
->d_name
) + 1;
928 namlen
= sizeof(dp
->d_name
) - 1;
932 /* guess that fh size will be same as parent */
933 fhlen
= rdirplus
? (1 + dnp
->n_fhsize
) : 0;
934 xlen
= rdirplus
? (fhlen
+ sizeof(time_t)) : 0;
935 attrlen
= rdirplus
? sizeof(struct nfs_vattr
) : 0;
936 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
937 space_needed
= reclen
+ attrlen
;
938 space_free
= nfs_dir_buf_freespace(bp
, rdirplus
);
939 if (space_needed
> space_free
) {
941 * We still have entries to pack, but we've
942 * run out of room in the current buffer.
943 * So we need to move to the next buffer.
944 * The block# for the next buffer is the
945 * last cookie in the current buffer.
948 ndbhp
->ndbh_flags
|= NDB_FULL
;
949 nfs_buf_release(bp
, 0);
952 error
= nfs_buf_get(dnp
, lastcookie
, NFS_DIRBLKSIZ
, vfs_context_thread(ctx
), NBLK_READ
, &bp
);
954 /* initialize buffer */
955 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
956 ndbhp
->ndbh_flags
= 0;
957 ndbhp
->ndbh_count
= 0;
958 ndbhp
->ndbh_entry_end
= sizeof(*ndbhp
);
959 ndbhp
->ndbh_ncgen
= dnp
->n_ncgen
;
960 space_free
= nfs_dir_buf_freespace(bp
, rdirplus
);
961 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
962 /* increment with every buffer read */
963 OSAddAtomic64(1, &nfsstats
.readdir_bios
);
966 dp
->d_fileno
= cookie
; /* placeholder */
967 dp
->d_seekoff
= cookie
;
968 dp
->d_namlen
= namlen
;
969 dp
->d_reclen
= reclen
;
970 dp
->d_type
= DT_UNKNOWN
;
971 nfsm_chain_get_opaque(error
, &nmrep
, namlen
, dp
->d_name
);
973 dp
->d_name
[namlen
] = '\0';
975 nfsm_chain_adv(error
, &nmrep
,
976 nfsm_rndup(namlen
+ skiplen
) - nfsm_rndup(namlen
));
978 nvattrp
= rdirplus
? NFS_DIR_BUF_NVATTR(bp
, ndbhp
->ndbh_count
) : &nvattr
;
979 error
= nfs4_parsefattr(&nmrep
, NULL
, nvattrp
, &fh
, NULL
, NULL
);
980 if (!error
&& NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_ACL
)) {
981 /* we do NOT want ACLs returned to us here */
982 NFS_BITMAP_CLR(nvattrp
->nva_bitmap
, NFS_FATTR_ACL
);
983 if (nvattrp
->nva_acl
) {
984 kauth_acl_free(nvattrp
->nva_acl
);
985 nvattrp
->nva_acl
= NULL
;
988 if (error
&& NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_RDATTR_ERROR
)) {
989 /* OK, we may not have gotten all of the attributes but we will use what we can. */
990 if ((error
== NFSERR_MOVED
) || (error
== NFSERR_INVAL
)) {
991 /* set this up to look like a referral trigger */
992 nfs4_default_attrs_for_referral_trigger(dnp
, dp
->d_name
, namlen
, nvattrp
, &fh
);
996 /* check for more entries after this one */
997 nfsm_chain_get_32(error
, &nmrep
, more_entries
);
1000 /* Skip any "." and ".." entries returned from server. */
1001 /* Also skip any bothersome named attribute entries. */
1002 if (((dp
->d_name
[0] == '.') && ((namlen
== 1) || ((namlen
== 2) && (dp
->d_name
[1] == '.')))) ||
1003 (namedattr
&& (namlen
== 11) && (!strcmp(dp
->d_name
, "SUNWattr_ro") || !strcmp(dp
->d_name
, "SUNWattr_rw")))) {
1004 lastcookie
= cookie
;
1008 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_TYPE
))
1009 dp
->d_type
= IFTODT(VTTOIF(nvattrp
->nva_type
));
1010 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_FILEID
))
1011 dp
->d_fileno
= nvattrp
->nva_fileid
;
1013 /* fileid is already in d_fileno, so stash xid in attrs */
1014 nvattrp
->nva_fileid
= savedxid
;
1015 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
1016 fhlen
= fh
.fh_len
+ 1;
1017 xlen
= fhlen
+ sizeof(time_t);
1018 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
1019 space_needed
= reclen
+ attrlen
;
1020 if (space_needed
> space_free
) {
1021 /* didn't actually have the room... move on to next buffer */
1025 /* pack the file handle into the record */
1026 dp
->d_name
[dp
->d_namlen
+1] = fh
.fh_len
;
1027 bcopy(fh
.fh_data
, &dp
->d_name
[dp
->d_namlen
+2], fh
.fh_len
);
1029 /* mark the file handle invalid */
1031 fhlen
= fh
.fh_len
+ 1;
1032 xlen
= fhlen
+ sizeof(time_t);
1033 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
1034 bzero(&dp
->d_name
[dp
->d_namlen
+1], fhlen
);
1036 *(time_t*)(&dp
->d_name
[dp
->d_namlen
+1+fhlen
]) = now
.tv_sec
;
1037 dp
->d_reclen
= reclen
;
1039 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
1040 ndbhp
->ndbh_count
++;
1041 lastcookie
= cookie
;
1043 /* advance to next direntry in buffer */
1044 dp
= NFS_DIRENTRY_NEXT(dp
);
1045 ndbhp
->ndbh_entry_end
= (char*)dp
- bp
->nb_data
;
1046 /* zero out the pad bytes */
1047 padlen
= (char*)dp
- padstart
;
1049 bzero(padstart
, padlen
);
1051 /* Finally, get the eof boolean */
1052 nfsm_chain_get_32(error
, &nmrep
, eof
);
1055 ndbhp
->ndbh_flags
|= (NDB_FULL
|NDB_EOF
);
1056 nfs_node_lock_force(dnp
);
1057 dnp
->n_eofcookie
= lastcookie
;
1058 nfs_node_unlock(dnp
);
1063 nfs_buf_release(bp
, 0);
1067 if ((lockerror
= nfs_node_lock(dnp
)))
1070 nfsm_chain_cleanup(&nmrep
);
1071 nfsm_chain_null(&nmreq
);
1074 if (bp_dropped
&& bp
)
1075 nfs_buf_release(bp
, 0);
1077 nfs_node_unlock(dnp
);
1078 nfsm_chain_cleanup(&nmreq
);
1079 nfsm_chain_cleanup(&nmrep
);
1080 return (bp_dropped
? NFSERR_DIRBUFDROPPED
: error
);
1084 nfs4_lookup_rpc_async(
1089 struct nfsreq
**reqp
)
1091 int error
= 0, isdotdot
= 0, nfsvers
, numops
;
1092 struct nfsm_chain nmreq
;
1093 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
1094 struct nfsmount
*nmp
;
1095 struct nfsreq_secinfo_args si
;
1097 nmp
= NFSTONMP(dnp
);
1098 if (nfs_mount_gone(nmp
))
1100 nfsvers
= nmp
->nm_vers
;
1101 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
1104 if ((name
[0] == '.') && (name
[1] == '.') && (namelen
== 2)) {
1106 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
1108 NFSREQ_SECINFO_SET(&si
, dnp
, dnp
->n_fhp
, dnp
->n_fhsize
, name
, namelen
);
1111 nfsm_chain_null(&nmreq
);
1113 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1115 nfsm_chain_build_alloc_init(error
, &nmreq
, 20 * NFSX_UNSIGNED
+ namelen
);
1116 nfsm_chain_add_compound_header(error
, &nmreq
, "lookup", nmp
->nm_minor_vers
, numops
);
1118 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1119 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
1121 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1122 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
1125 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUPP
);
1127 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUP
);
1128 nfsm_chain_add_name(error
, &nmreq
, name
, namelen
, nmp
);
1131 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETFH
);
1133 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1134 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
1135 /* some ".zfs" directories can't handle being asked for some attributes */
1136 if ((dnp
->n_flag
& NISDOTZFS
) && !isdotdot
)
1137 NFS_BITMAP_CLR(bitmap
, NFS_FATTR_NAMED_ATTR
);
1138 if ((dnp
->n_flag
& NISDOTZFSCHILD
) && isdotdot
)
1139 NFS_BITMAP_CLR(bitmap
, NFS_FATTR_NAMED_ATTR
);
1140 if (((namelen
== 4) && (name
[0] == '.') && (name
[1] == 'z') && (name
[2] == 'f') && (name
[3] == 's')))
1141 NFS_BITMAP_CLR(bitmap
, NFS_FATTR_NAMED_ATTR
);
1142 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, NULL
);
1143 nfsm_chain_build_done(error
, &nmreq
);
1144 nfsm_assert(error
, (numops
== 0), EPROTO
);
1146 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
1147 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, 0, NULL
, reqp
);
1149 nfsm_chain_cleanup(&nmreq
);
1155 nfs4_lookup_rpc_async_finish(
1163 struct nfs_vattr
*nvap
)
1165 int error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
, isdotdot
= 0;
1166 uint32_t op
= NFS_OP_LOOKUP
;
1168 struct nfsmount
*nmp
;
1169 struct nfsm_chain nmrep
;
1171 nmp
= NFSTONMP(dnp
);
1174 nfsvers
= nmp
->nm_vers
;
1175 if ((name
[0] == '.') && (name
[1] == '.') && (namelen
== 2))
1178 nfsm_chain_null(&nmrep
);
1180 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
1182 if ((lockerror
= nfs_node_lock(dnp
)))
1184 nfsm_chain_skip_tag(error
, &nmrep
);
1185 nfsm_chain_get_32(error
, &nmrep
, numops
);
1186 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1187 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1190 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
1192 nfsm_chain_op_check(error
, &nmrep
, (isdotdot
? NFS_OP_LOOKUPP
: NFS_OP_LOOKUP
));
1193 nfsmout_if(error
|| !fhp
|| !nvap
);
1194 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETFH
);
1195 nfsm_chain_get_32(error
, &nmrep
, fhp
->fh_len
);
1196 if (error
== 0 && fhp
->fh_len
> sizeof(fhp
->fh_data
))
1199 nfsm_chain_get_opaque(error
, &nmrep
, fhp
->fh_len
, fhp
->fh_data
);
1200 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1201 if ((error
== NFSERR_MOVED
) || (error
== NFSERR_INVAL
)) {
1202 /* set this up to look like a referral trigger */
1203 nfs4_default_attrs_for_referral_trigger(dnp
, name
, namelen
, nvap
, fhp
);
1207 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
, NULL
);
1211 nfs_node_unlock(dnp
);
1212 nfsm_chain_cleanup(&nmrep
);
1213 if (!error
&& (op
== NFS_OP_LOOKUP
) && (nmp
->nm_state
& NFSSTA_NEEDSECINFO
)) {
1214 /* We still need to get SECINFO to set default for mount. */
1215 /* Do so for the first LOOKUP that returns successfully. */
1218 sec
.count
= NX_MAX_SEC_FLAVORS
;
1219 error
= nfs4_secinfo_rpc(nmp
, &req
->r_secinfo
, vfs_context_ucred(ctx
), sec
.flavors
, &sec
.count
);
1220 /* [sigh] some implementations return "illegal" error for unsupported ops */
1221 if (error
== NFSERR_OP_ILLEGAL
)
1224 /* set our default security flavor to the first in the list */
1225 lck_mtx_lock(&nmp
->nm_lock
);
1227 nmp
->nm_auth
= sec
.flavors
[0];
1228 nmp
->nm_state
&= ~NFSSTA_NEEDSECINFO
;
1229 lck_mtx_unlock(&nmp
->nm_lock
);
1243 struct nfsmount
*nmp
;
1244 int error
= 0, lockerror
, status
, nfsvers
, numops
;
1245 u_int64_t xid
, newwverf
;
1247 struct nfsm_chain nmreq
, nmrep
;
1248 struct nfsreq_secinfo_args si
;
1251 FSDBG(521, np
, offset
, count
, nmp
? nmp
->nm_state
: 0);
1252 if (nfs_mount_gone(nmp
))
1254 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
1256 if (!(nmp
->nm_state
& NFSSTA_HASWRITEVERF
))
1258 nfsvers
= nmp
->nm_vers
;
1260 if (count
> UINT32_MAX
)
1265 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
1266 nfsm_chain_null(&nmreq
);
1267 nfsm_chain_null(&nmrep
);
1269 // PUTFH, COMMIT, GETATTR
1271 nfsm_chain_build_alloc_init(error
, &nmreq
, 19 * NFSX_UNSIGNED
);
1272 nfsm_chain_add_compound_header(error
, &nmreq
, "commit", nmp
->nm_minor_vers
, numops
);
1274 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1275 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1277 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_COMMIT
);
1278 nfsm_chain_add_64(error
, &nmreq
, offset
);
1279 nfsm_chain_add_32(error
, &nmreq
, count32
);
1281 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1282 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
1283 nfsm_chain_build_done(error
, &nmreq
);
1284 nfsm_assert(error
, (numops
== 0), EPROTO
);
1286 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
1287 current_thread(), cred
, &si
, 0, &nmrep
, &xid
, &status
);
1289 if ((lockerror
= nfs_node_lock(np
)))
1291 nfsm_chain_skip_tag(error
, &nmrep
);
1292 nfsm_chain_get_32(error
, &nmrep
, numops
);
1293 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1294 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_COMMIT
);
1295 nfsm_chain_get_64(error
, &nmrep
, newwverf
);
1296 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1297 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
1299 nfs_node_unlock(np
);
1301 lck_mtx_lock(&nmp
->nm_lock
);
1302 if (nmp
->nm_verf
!= newwverf
)
1303 nmp
->nm_verf
= newwverf
;
1304 if (wverf
!= newwverf
)
1305 error
= NFSERR_STALEWRITEVERF
;
1306 lck_mtx_unlock(&nmp
->nm_lock
);
1308 nfsm_chain_cleanup(&nmreq
);
1309 nfsm_chain_cleanup(&nmrep
);
1316 struct nfs_fsattr
*nfsap
,
1320 int error
= 0, lockerror
, status
, nfsvers
, numops
;
1321 struct nfsm_chain nmreq
, nmrep
;
1322 struct nfsmount
*nmp
= NFSTONMP(np
);
1323 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
1324 struct nfs_vattr nvattr
;
1325 struct nfsreq_secinfo_args si
;
1327 if (nfs_mount_gone(nmp
))
1329 nfsvers
= nmp
->nm_vers
;
1330 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
1333 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
1334 NVATTR_INIT(&nvattr
);
1335 nfsm_chain_null(&nmreq
);
1336 nfsm_chain_null(&nmrep
);
1338 /* NFSv4: fetch "pathconf" info for this node */
1341 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
1342 nfsm_chain_add_compound_header(error
, &nmreq
, "pathconf", nmp
->nm_minor_vers
, numops
);
1344 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1345 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1347 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1348 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
1349 NFS_BITMAP_SET(bitmap
, NFS_FATTR_MAXLINK
);
1350 NFS_BITMAP_SET(bitmap
, NFS_FATTR_MAXNAME
);
1351 NFS_BITMAP_SET(bitmap
, NFS_FATTR_NO_TRUNC
);
1352 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CHOWN_RESTRICTED
);
1353 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CASE_INSENSITIVE
);
1354 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CASE_PRESERVING
);
1355 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
1356 nfsm_chain_build_done(error
, &nmreq
);
1357 nfsm_assert(error
, (numops
== 0), EPROTO
);
1359 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
1361 nfsm_chain_skip_tag(error
, &nmrep
);
1362 nfsm_chain_get_32(error
, &nmrep
, numops
);
1363 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1364 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1366 error
= nfs4_parsefattr(&nmrep
, nfsap
, &nvattr
, NULL
, NULL
, NULL
);
1368 if ((lockerror
= nfs_node_lock(np
)))
1371 nfs_loadattrcache(np
, &nvattr
, &xid
, 0);
1373 nfs_node_unlock(np
);
1375 NVATTR_CLEANUP(&nvattr
);
1376 nfsm_chain_cleanup(&nmreq
);
1377 nfsm_chain_cleanup(&nmrep
);
1383 struct vnop_getattr_args
/* {
1384 struct vnodeop_desc *a_desc;
1386 struct vnode_attr *a_vap;
1387 vfs_context_t a_context;
1390 struct vnode_attr
*vap
= ap
->a_vap
;
1391 struct nfsmount
*nmp
;
1392 struct nfs_vattr nva
;
1393 int error
, acls
, ngaflags
;
1395 nmp
= VTONMP(ap
->a_vp
);
1396 if (nfs_mount_gone(nmp
))
1398 acls
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_ACL
);
1400 ngaflags
= NGA_CACHED
;
1401 if (VATTR_IS_ACTIVE(vap
, va_acl
) && acls
)
1402 ngaflags
|= NGA_ACL
;
1403 error
= nfs_getattr(VTONFS(ap
->a_vp
), &nva
, ap
->a_context
, ngaflags
);
1407 vap
->va_flags
|= VA_64BITOBJIDS
;
1409 /* copy what we have in nva to *a_vap */
1410 if (VATTR_IS_ACTIVE(vap
, va_rdev
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_RAWDEV
)) {
1411 dev_t rdev
= makedev(nva
.nva_rawdev
.specdata1
, nva
.nva_rawdev
.specdata2
);
1412 VATTR_RETURN(vap
, va_rdev
, rdev
);
1414 if (VATTR_IS_ACTIVE(vap
, va_nlink
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_NUMLINKS
))
1415 VATTR_RETURN(vap
, va_nlink
, nva
.nva_nlink
);
1416 if (VATTR_IS_ACTIVE(vap
, va_data_size
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_SIZE
))
1417 VATTR_RETURN(vap
, va_data_size
, nva
.nva_size
);
1418 // VATTR_RETURN(vap, va_data_alloc, ???);
1419 // VATTR_RETURN(vap, va_total_size, ???);
1420 if (VATTR_IS_ACTIVE(vap
, va_total_alloc
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_SPACE_USED
))
1421 VATTR_RETURN(vap
, va_total_alloc
, nva
.nva_bytes
);
1422 if (VATTR_IS_ACTIVE(vap
, va_uid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER
))
1423 VATTR_RETURN(vap
, va_uid
, nva
.nva_uid
);
1424 if (VATTR_IS_ACTIVE(vap
, va_uuuid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER
))
1425 VATTR_RETURN(vap
, va_uuuid
, nva
.nva_uuuid
);
1426 if (VATTR_IS_ACTIVE(vap
, va_gid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER_GROUP
))
1427 VATTR_RETURN(vap
, va_gid
, nva
.nva_gid
);
1428 if (VATTR_IS_ACTIVE(vap
, va_guuid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER_GROUP
))
1429 VATTR_RETURN(vap
, va_guuid
, nva
.nva_guuid
);
1430 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
1431 if (NMFLAG(nmp
, ACLONLY
) || !NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_MODE
))
1432 VATTR_RETURN(vap
, va_mode
, 0777);
1434 VATTR_RETURN(vap
, va_mode
, nva
.nva_mode
);
1436 if (VATTR_IS_ACTIVE(vap
, va_flags
) &&
1437 (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_ARCHIVE
) ||
1438 NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_HIDDEN
) ||
1439 (nva
.nva_flags
& NFS_FFLAG_TRIGGER
))) {
1441 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_ARCHIVE
) &&
1442 (nva
.nva_flags
& NFS_FFLAG_ARCHIVED
))
1443 flags
|= SF_ARCHIVED
;
1444 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_HIDDEN
) &&
1445 (nva
.nva_flags
& NFS_FFLAG_HIDDEN
))
1447 VATTR_RETURN(vap
, va_flags
, flags
);
1449 if (VATTR_IS_ACTIVE(vap
, va_create_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_CREATE
)) {
1450 vap
->va_create_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_CREATE
];
1451 vap
->va_create_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_CREATE
];
1452 VATTR_SET_SUPPORTED(vap
, va_create_time
);
1454 if (VATTR_IS_ACTIVE(vap
, va_access_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_ACCESS
)) {
1455 vap
->va_access_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_ACCESS
];
1456 vap
->va_access_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_ACCESS
];
1457 VATTR_SET_SUPPORTED(vap
, va_access_time
);
1459 if (VATTR_IS_ACTIVE(vap
, va_modify_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_MODIFY
)) {
1460 vap
->va_modify_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_MODIFY
];
1461 vap
->va_modify_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_MODIFY
];
1462 VATTR_SET_SUPPORTED(vap
, va_modify_time
);
1464 if (VATTR_IS_ACTIVE(vap
, va_change_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_METADATA
)) {
1465 vap
->va_change_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_CHANGE
];
1466 vap
->va_change_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_CHANGE
];
1467 VATTR_SET_SUPPORTED(vap
, va_change_time
);
1469 if (VATTR_IS_ACTIVE(vap
, va_backup_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_BACKUP
)) {
1470 vap
->va_backup_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_BACKUP
];
1471 vap
->va_backup_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_BACKUP
];
1472 VATTR_SET_SUPPORTED(vap
, va_backup_time
);
1474 if (VATTR_IS_ACTIVE(vap
, va_fileid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_FILEID
))
1475 VATTR_RETURN(vap
, va_fileid
, nva
.nva_fileid
);
1476 if (VATTR_IS_ACTIVE(vap
, va_type
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TYPE
))
1477 VATTR_RETURN(vap
, va_type
, nva
.nva_type
);
1478 if (VATTR_IS_ACTIVE(vap
, va_filerev
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_CHANGE
))
1479 VATTR_RETURN(vap
, va_filerev
, nva
.nva_change
);
1481 if (VATTR_IS_ACTIVE(vap
, va_acl
) && acls
) {
1482 VATTR_RETURN(vap
, va_acl
, nva
.nva_acl
);
1486 // other attrs we might support someday:
1487 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
1489 NVATTR_CLEANUP(&nva
);
1496 struct vnode_attr
*vap
,
1499 struct nfsmount
*nmp
= NFSTONMP(np
);
1500 int error
= 0, setattr_error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
;
1501 u_int64_t xid
, nextxid
;
1502 struct nfsm_chain nmreq
, nmrep
;
1503 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
1504 uint32_t getbitmap
[NFS_ATTR_BITMAP_LEN
];
1505 uint32_t setbitmap
[NFS_ATTR_BITMAP_LEN
];
1506 nfs_stateid stateid
;
1507 struct nfsreq_secinfo_args si
;
1509 if (nfs_mount_gone(nmp
))
1511 nfsvers
= nmp
->nm_vers
;
1512 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
1515 if (VATTR_IS_ACTIVE(vap
, va_flags
) && (vap
->va_flags
& ~(SF_ARCHIVED
|UF_HIDDEN
))) {
1516 /* we don't support setting unsupported flags (duh!) */
1517 if (vap
->va_active
& ~VNODE_ATTR_va_flags
)
1518 return (EINVAL
); /* return EINVAL if other attributes also set */
1520 return (ENOTSUP
); /* return ENOTSUP for chflags(2) */
1523 /* don't bother requesting some changes if they don't look like they are changing */
1524 if (VATTR_IS_ACTIVE(vap
, va_uid
) && (vap
->va_uid
== np
->n_vattr
.nva_uid
))
1525 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
1526 if (VATTR_IS_ACTIVE(vap
, va_gid
) && (vap
->va_gid
== np
->n_vattr
.nva_gid
))
1527 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
1528 if (VATTR_IS_ACTIVE(vap
, va_uuuid
) && kauth_guid_equal(&vap
->va_uuuid
, &np
->n_vattr
.nva_uuuid
))
1529 VATTR_CLEAR_ACTIVE(vap
, va_uuuid
);
1530 if (VATTR_IS_ACTIVE(vap
, va_guuid
) && kauth_guid_equal(&vap
->va_guuid
, &np
->n_vattr
.nva_guuid
))
1531 VATTR_CLEAR_ACTIVE(vap
, va_guuid
);
1534 /* do nothing if no attributes will be sent */
1535 nfs_vattr_set_bitmap(nmp
, bitmap
, vap
);
1536 if (!bitmap
[0] && !bitmap
[1])
1539 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
1540 nfsm_chain_null(&nmreq
);
1541 nfsm_chain_null(&nmrep
);
1544 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1545 * need to invalidate any cached ACL. And if we had an ACL cached,
1546 * we might as well also fetch the new value.
1548 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, getbitmap
);
1549 if (NFS_BITMAP_ISSET(bitmap
, NFS_FATTR_ACL
) ||
1550 NFS_BITMAP_ISSET(bitmap
, NFS_FATTR_MODE
)) {
1552 NFS_BITMAP_SET(getbitmap
, NFS_FATTR_ACL
);
1556 // PUTFH, SETATTR, GETATTR
1558 nfsm_chain_build_alloc_init(error
, &nmreq
, 40 * NFSX_UNSIGNED
);
1559 nfsm_chain_add_compound_header(error
, &nmreq
, "setattr", nmp
->nm_minor_vers
, numops
);
1561 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1562 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1564 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SETATTR
);
1565 if (VATTR_IS_ACTIVE(vap
, va_data_size
))
1566 nfs_get_stateid(np
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &stateid
);
1568 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
1569 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
1570 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
1572 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1573 nfsm_chain_add_bitmap_supported(error
, &nmreq
, getbitmap
, nmp
, np
);
1574 nfsm_chain_build_done(error
, &nmreq
);
1575 nfsm_assert(error
, (numops
== 0), EPROTO
);
1577 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
1579 if ((lockerror
= nfs_node_lock(np
)))
1581 nfsm_chain_skip_tag(error
, &nmrep
);
1582 nfsm_chain_get_32(error
, &nmrep
, numops
);
1583 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1585 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SETATTR
);
1586 nfsmout_if(error
== EBADRPC
);
1587 setattr_error
= error
;
1589 bmlen
= NFS_ATTR_BITMAP_LEN
;
1590 nfsm_chain_get_bitmap(error
, &nmrep
, setbitmap
, bmlen
);
1592 if (VATTR_IS_ACTIVE(vap
, va_data_size
) && (np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
))
1593 microuptime(&np
->n_lastio
);
1594 nfs_vattr_set_supported(setbitmap
, vap
);
1595 error
= setattr_error
;
1597 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1598 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
1600 NATTRINVALIDATE(np
);
1602 * We just changed the attributes and we want to make sure that we
1603 * see the latest attributes. Get the next XID. If it's not the
1604 * next XID after the SETATTR XID, then it's possible that another
1605 * RPC was in flight at the same time and it might put stale attributes
1606 * in the cache. In that case, we invalidate the attributes and set
1607 * the attribute cache XID to guarantee that newer attributes will
1611 nfs_get_xid(&nextxid
);
1612 if (nextxid
!= (xid
+ 1)) {
1613 np
->n_xid
= nextxid
;
1614 NATTRINVALIDATE(np
);
1618 nfs_node_unlock(np
);
1619 nfsm_chain_cleanup(&nmreq
);
1620 nfsm_chain_cleanup(&nmrep
);
1621 if ((setattr_error
== EINVAL
) && VATTR_IS_ACTIVE(vap
, va_acl
) && VATTR_IS_ACTIVE(vap
, va_mode
) && !NMFLAG(nmp
, ACLONLY
)) {
1623 * Some server's may not like ACL/mode combos that get sent.
1624 * If it looks like that's what the server choked on, try setting
1625 * just the ACL and not the mode (unless it looks like everything
1626 * but mode was already successfully set).
1628 if (((bitmap
[0] & setbitmap
[0]) != bitmap
[0]) ||
1629 ((bitmap
[1] & (setbitmap
[1]|NFS_FATTR_MODE
)) != bitmap
[1])) {
1630 VATTR_CLEAR_ACTIVE(vap
, va_mode
);
1639 * Wait for any pending recovery to complete.
1642 nfs_mount_state_wait_for_recovery(struct nfsmount
*nmp
)
1644 struct timespec ts
= { 1, 0 };
1645 int error
= 0, slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
1647 lck_mtx_lock(&nmp
->nm_lock
);
1648 while (nmp
->nm_state
& NFSSTA_RECOVER
) {
1649 if ((error
= nfs_sigintr(nmp
, NULL
, current_thread(), 1)))
1651 nfs_mount_sock_thread_wake(nmp
);
1652 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, slpflag
|(PZERO
-1), "nfsrecoverwait", &ts
);
1655 lck_mtx_unlock(&nmp
->nm_lock
);
1661 * We're about to use/manipulate NFS mount's open/lock state.
1662 * Wait for any pending state recovery to complete, then
1663 * mark the state as being in use (which will hold off
1664 * the recovery thread until we're done).
1667 nfs_mount_state_in_use_start(struct nfsmount
*nmp
, thread_t thd
)
1669 struct timespec ts
= { 1, 0 };
1670 int error
= 0, slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
1672 if (nfs_mount_gone(nmp
))
1674 lck_mtx_lock(&nmp
->nm_lock
);
1675 if (nmp
->nm_state
& (NFSSTA_FORCE
|NFSSTA_DEAD
)) {
1676 lck_mtx_unlock(&nmp
->nm_lock
);
1679 while (nmp
->nm_state
& NFSSTA_RECOVER
) {
1680 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 1)))
1682 nfs_mount_sock_thread_wake(nmp
);
1683 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, slpflag
|(PZERO
-1), "nfsrecoverwait", &ts
);
1687 nmp
->nm_stateinuse
++;
1688 lck_mtx_unlock(&nmp
->nm_lock
);
1694 * We're done using/manipulating the NFS mount's open/lock
1695 * state. If the given error indicates that recovery should
1696 * be performed, we'll initiate recovery.
1699 nfs_mount_state_in_use_end(struct nfsmount
*nmp
, int error
)
1701 int restart
= nfs_mount_state_error_should_restart(error
);
1703 if (nfs_mount_gone(nmp
))
1705 lck_mtx_lock(&nmp
->nm_lock
);
1706 if (restart
&& (error
!= NFSERR_OLD_STATEID
) && (error
!= NFSERR_GRACE
)) {
1707 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
1708 error
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nmp
->nm_stategenid
);
1709 nfs_need_recover(nmp
, error
);
1711 if (nmp
->nm_stateinuse
> 0)
1712 nmp
->nm_stateinuse
--;
1714 panic("NFS mount state in use count underrun");
1715 if (!nmp
->nm_stateinuse
&& (nmp
->nm_state
& NFSSTA_RECOVER
))
1716 wakeup(&nmp
->nm_stateinuse
);
1717 lck_mtx_unlock(&nmp
->nm_lock
);
1718 if (error
== NFSERR_GRACE
)
1719 tsleep(&nmp
->nm_state
, (PZERO
-1), "nfsgrace", 2*hz
);
1725 * Does the error mean we should restart/redo a state-related operation?
1728 nfs_mount_state_error_should_restart(int error
)
1731 case NFSERR_STALE_STATEID
:
1732 case NFSERR_STALE_CLIENTID
:
1733 case NFSERR_ADMIN_REVOKED
:
1734 case NFSERR_EXPIRED
:
1735 case NFSERR_OLD_STATEID
:
1736 case NFSERR_BAD_STATEID
:
1744 * In some cases we may want to limit how many times we restart a
1745 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1746 * Base the limit on the lease (as long as it's not too short).
1749 nfs_mount_state_max_restarts(struct nfsmount
*nmp
)
1751 return (MAX(nmp
->nm_fsattr
.nfsa_lease
, 60));
1755 * Does the error mean we probably lost a delegation?
1758 nfs_mount_state_error_delegation_lost(int error
)
1761 case NFSERR_STALE_STATEID
:
1762 case NFSERR_ADMIN_REVOKED
:
1763 case NFSERR_EXPIRED
:
1764 case NFSERR_OLD_STATEID
:
1765 case NFSERR_BAD_STATEID
:
1766 case NFSERR_GRACE
: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
1774 * Mark an NFS node's open state as busy.
1777 nfs_open_state_set_busy(nfsnode_t np
, thread_t thd
)
1779 struct nfsmount
*nmp
;
1780 struct timespec ts
= {2, 0};
1781 int error
= 0, slpflag
;
1784 if (nfs_mount_gone(nmp
))
1786 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
1788 lck_mtx_lock(&np
->n_openlock
);
1789 while (np
->n_openflags
& N_OPENBUSY
) {
1790 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0)))
1792 np
->n_openflags
|= N_OPENWANT
;
1793 msleep(&np
->n_openflags
, &np
->n_openlock
, slpflag
, "nfs_open_state_set_busy", &ts
);
1797 np
->n_openflags
|= N_OPENBUSY
;
1798 lck_mtx_unlock(&np
->n_openlock
);
1804 * Clear an NFS node's open state busy flag and wake up
1805 * anyone wanting it.
1808 nfs_open_state_clear_busy(nfsnode_t np
)
1812 lck_mtx_lock(&np
->n_openlock
);
1813 if (!(np
->n_openflags
& N_OPENBUSY
))
1814 panic("nfs_open_state_clear_busy");
1815 wanted
= (np
->n_openflags
& N_OPENWANT
);
1816 np
->n_openflags
&= ~(N_OPENBUSY
|N_OPENWANT
);
1817 lck_mtx_unlock(&np
->n_openlock
);
1819 wakeup(&np
->n_openflags
);
1823 * Search a mount's open owner list for the owner for this credential.
1824 * If not found and "alloc" is set, then allocate a new one.
1826 struct nfs_open_owner
*
1827 nfs_open_owner_find(struct nfsmount
*nmp
, kauth_cred_t cred
, int alloc
)
1829 uid_t uid
= kauth_cred_getuid(cred
);
1830 struct nfs_open_owner
*noop
, *newnoop
= NULL
;
1833 lck_mtx_lock(&nmp
->nm_lock
);
1834 TAILQ_FOREACH(noop
, &nmp
->nm_open_owners
, noo_link
) {
1835 if (kauth_cred_getuid(noop
->noo_cred
) == uid
)
1839 if (!noop
&& !newnoop
&& alloc
) {
1840 lck_mtx_unlock(&nmp
->nm_lock
);
1841 MALLOC(newnoop
, struct nfs_open_owner
*, sizeof(struct nfs_open_owner
), M_TEMP
, M_WAITOK
);
1844 bzero(newnoop
, sizeof(*newnoop
));
1845 lck_mtx_init(&newnoop
->noo_lock
, nfs_open_grp
, LCK_ATTR_NULL
);
1846 newnoop
->noo_mount
= nmp
;
1847 kauth_cred_ref(cred
);
1848 newnoop
->noo_cred
= cred
;
1849 newnoop
->noo_name
= OSAddAtomic(1, &nfs_open_owner_seqnum
);
1850 TAILQ_INIT(&newnoop
->noo_opens
);
1853 if (!noop
&& newnoop
) {
1854 newnoop
->noo_flags
|= NFS_OPEN_OWNER_LINK
;
1855 TAILQ_INSERT_HEAD(&nmp
->nm_open_owners
, newnoop
, noo_link
);
1858 lck_mtx_unlock(&nmp
->nm_lock
);
1860 if (newnoop
&& (noop
!= newnoop
))
1861 nfs_open_owner_destroy(newnoop
);
1864 nfs_open_owner_ref(noop
);
1870 * destroy an open owner that's no longer needed
1873 nfs_open_owner_destroy(struct nfs_open_owner
*noop
)
1876 kauth_cred_unref(&noop
->noo_cred
);
1877 lck_mtx_destroy(&noop
->noo_lock
, nfs_open_grp
);
1882 * acquire a reference count on an open owner
1885 nfs_open_owner_ref(struct nfs_open_owner
*noop
)
1887 lck_mtx_lock(&noop
->noo_lock
);
1889 lck_mtx_unlock(&noop
->noo_lock
);
1893 * drop a reference count on an open owner and destroy it if
1894 * it is no longer referenced and no longer on the mount's list.
1897 nfs_open_owner_rele(struct nfs_open_owner
*noop
)
1899 lck_mtx_lock(&noop
->noo_lock
);
1900 if (noop
->noo_refcnt
< 1)
1901 panic("nfs_open_owner_rele: no refcnt");
1903 if (!noop
->noo_refcnt
&& (noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
))
1904 panic("nfs_open_owner_rele: busy");
1905 /* XXX we may potentially want to clean up idle/unused open owner structures */
1906 if (noop
->noo_refcnt
|| (noop
->noo_flags
& NFS_OPEN_OWNER_LINK
)) {
1907 lck_mtx_unlock(&noop
->noo_lock
);
1910 /* owner is no longer referenced or linked to mount, so destroy it */
1911 lck_mtx_unlock(&noop
->noo_lock
);
1912 nfs_open_owner_destroy(noop
);
1916 * Mark an open owner as busy because we are about to
1917 * start an operation that uses and updates open owner state.
1920 nfs_open_owner_set_busy(struct nfs_open_owner
*noop
, thread_t thd
)
1922 struct nfsmount
*nmp
;
1923 struct timespec ts
= {2, 0};
1924 int error
= 0, slpflag
;
1926 nmp
= noop
->noo_mount
;
1927 if (nfs_mount_gone(nmp
))
1929 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
1931 lck_mtx_lock(&noop
->noo_lock
);
1932 while (noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
) {
1933 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0)))
1935 noop
->noo_flags
|= NFS_OPEN_OWNER_WANT
;
1936 msleep(noop
, &noop
->noo_lock
, slpflag
, "nfs_open_owner_set_busy", &ts
);
1940 noop
->noo_flags
|= NFS_OPEN_OWNER_BUSY
;
1941 lck_mtx_unlock(&noop
->noo_lock
);
1947 * Clear the busy flag on an open owner and wake up anyone waiting
1951 nfs_open_owner_clear_busy(struct nfs_open_owner
*noop
)
1955 lck_mtx_lock(&noop
->noo_lock
);
1956 if (!(noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
))
1957 panic("nfs_open_owner_clear_busy");
1958 wanted
= (noop
->noo_flags
& NFS_OPEN_OWNER_WANT
);
1959 noop
->noo_flags
&= ~(NFS_OPEN_OWNER_BUSY
|NFS_OPEN_OWNER_WANT
);
1960 lck_mtx_unlock(&noop
->noo_lock
);
1966 * Given an open/lock owner and an error code, increment the
1967 * sequence ID if appropriate.
1970 nfs_owner_seqid_increment(struct nfs_open_owner
*noop
, struct nfs_lock_owner
*nlop
, int error
)
1973 case NFSERR_STALE_CLIENTID
:
1974 case NFSERR_STALE_STATEID
:
1975 case NFSERR_OLD_STATEID
:
1976 case NFSERR_BAD_STATEID
:
1977 case NFSERR_BAD_SEQID
:
1979 case NFSERR_RESOURCE
:
1980 case NFSERR_NOFILEHANDLE
:
1981 /* do not increment the open seqid on these errors */
1991 * Search a node's open file list for any conflicts with this request.
1992 * Also find this open owner's open file structure.
1993 * If not found and "alloc" is set, then allocate one.
1998 struct nfs_open_owner
*noop
,
1999 struct nfs_open_file
**nofpp
,
2000 uint32_t accessMode
,
2005 return nfs_open_file_find_internal(np
, noop
, nofpp
, accessMode
, denyMode
, alloc
);
2009 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
2010 * if an existing one is not found. This is used in "create" scenarios to
2011 * officially add the provisional nofp to the node once the node is created.
2014 nfs_open_file_find_internal(
2016 struct nfs_open_owner
*noop
,
2017 struct nfs_open_file
**nofpp
,
2018 uint32_t accessMode
,
2022 struct nfs_open_file
*nofp
= NULL
, *nofp2
, *newnofp
= NULL
;
2027 lck_mtx_lock(&np
->n_openlock
);
2028 TAILQ_FOREACH(nofp2
, &np
->n_opens
, nof_link
) {
2029 if (nofp2
->nof_owner
== noop
) {
2034 if ((accessMode
& nofp2
->nof_deny
) || (denyMode
& nofp2
->nof_access
)) {
2035 /* This request conflicts with an existing open on this client. */
2036 lck_mtx_unlock(&np
->n_openlock
);
2042 * If this open owner doesn't have an open
2043 * file structure yet, we create one for it.
2045 if (!nofp
&& !*nofpp
&& !newnofp
&& alloc
) {
2046 lck_mtx_unlock(&np
->n_openlock
);
2048 MALLOC(newnofp
, struct nfs_open_file
*, sizeof(struct nfs_open_file
), M_TEMP
, M_WAITOK
);
2051 bzero(newnofp
, sizeof(*newnofp
));
2052 lck_mtx_init(&newnofp
->nof_lock
, nfs_open_grp
, LCK_ATTR_NULL
);
2053 newnofp
->nof_owner
= noop
;
2054 nfs_open_owner_ref(noop
);
2055 newnofp
->nof_np
= np
;
2056 lck_mtx_lock(&noop
->noo_lock
);
2057 TAILQ_INSERT_HEAD(&noop
->noo_opens
, newnofp
, nof_oolink
);
2058 lck_mtx_unlock(&noop
->noo_lock
);
2064 (*nofpp
)->nof_np
= np
;
2070 TAILQ_INSERT_HEAD(&np
->n_opens
, nofp
, nof_link
);
2073 lck_mtx_unlock(&np
->n_openlock
);
2075 if (alloc
&& newnofp
&& (nofp
!= newnofp
))
2076 nfs_open_file_destroy(newnofp
);
2079 return (nofp
? 0 : ESRCH
);
2083 * Destroy an open file structure.
2086 nfs_open_file_destroy(struct nfs_open_file
*nofp
)
2088 lck_mtx_lock(&nofp
->nof_owner
->noo_lock
);
2089 TAILQ_REMOVE(&nofp
->nof_owner
->noo_opens
, nofp
, nof_oolink
);
2090 lck_mtx_unlock(&nofp
->nof_owner
->noo_lock
);
2091 nfs_open_owner_rele(nofp
->nof_owner
);
2092 lck_mtx_destroy(&nofp
->nof_lock
, nfs_open_grp
);
2097 * Mark an open file as busy because we are about to
2098 * start an operation that uses and updates open file state.
2101 nfs_open_file_set_busy(struct nfs_open_file
*nofp
, thread_t thd
)
2103 struct nfsmount
*nmp
;
2104 struct timespec ts
= {2, 0};
2105 int error
= 0, slpflag
;
2107 nmp
= nofp
->nof_owner
->noo_mount
;
2108 if (nfs_mount_gone(nmp
))
2110 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
2112 lck_mtx_lock(&nofp
->nof_lock
);
2113 while (nofp
->nof_flags
& NFS_OPEN_FILE_BUSY
) {
2114 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0)))
2116 nofp
->nof_flags
|= NFS_OPEN_FILE_WANT
;
2117 msleep(nofp
, &nofp
->nof_lock
, slpflag
, "nfs_open_file_set_busy", &ts
);
2121 nofp
->nof_flags
|= NFS_OPEN_FILE_BUSY
;
2122 lck_mtx_unlock(&nofp
->nof_lock
);
2128 * Clear the busy flag on an open file and wake up anyone waiting
2132 nfs_open_file_clear_busy(struct nfs_open_file
*nofp
)
2136 lck_mtx_lock(&nofp
->nof_lock
);
2137 if (!(nofp
->nof_flags
& NFS_OPEN_FILE_BUSY
))
2138 panic("nfs_open_file_clear_busy");
2139 wanted
= (nofp
->nof_flags
& NFS_OPEN_FILE_WANT
);
2140 nofp
->nof_flags
&= ~(NFS_OPEN_FILE_BUSY
|NFS_OPEN_FILE_WANT
);
2141 lck_mtx_unlock(&nofp
->nof_lock
);
2147 * Add the open state for the given access/deny modes to this open file.
2150 nfs_open_file_add_open(struct nfs_open_file
*nofp
, uint32_t accessMode
, uint32_t denyMode
, int delegated
)
2152 lck_mtx_lock(&nofp
->nof_lock
);
2153 nofp
->nof_access
|= accessMode
;
2154 nofp
->nof_deny
|= denyMode
;
2157 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2158 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2160 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2162 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2164 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2165 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2167 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2169 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2170 nofp
->nof_d_rw_dw
++;
2171 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2172 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2173 nofp
->nof_d_r_drw
++;
2174 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2175 nofp
->nof_d_w_drw
++;
2176 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2177 nofp
->nof_d_rw_drw
++;
2180 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2181 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2183 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2185 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2187 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2188 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2190 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2192 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2194 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2195 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2197 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2199 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2204 nofp
->nof_opencnt
++;
2205 lck_mtx_unlock(&nofp
->nof_lock
);
2209 * Find which particular open combo will be closed and report what
2210 * the new modes will be and whether the open was delegated.
2213 nfs_open_file_remove_open_find(
2214 struct nfs_open_file
*nofp
,
2215 uint32_t accessMode
,
2217 uint32_t *newAccessMode
,
2218 uint32_t *newDenyMode
,
2222 * Calculate new modes: a mode bit gets removed when there's only
2223 * one count in all the corresponding counts
2225 *newAccessMode
= nofp
->nof_access
;
2226 *newDenyMode
= nofp
->nof_deny
;
2228 if ((accessMode
& NFS_OPEN_SHARE_ACCESS_READ
) &&
2229 (nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
) &&
2230 ((nofp
->nof_r
+ nofp
->nof_d_r
+
2231 nofp
->nof_rw
+ nofp
->nof_d_rw
+
2232 nofp
->nof_r_dw
+ nofp
->nof_d_r_dw
+
2233 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
+
2234 nofp
->nof_r_drw
+ nofp
->nof_d_r_drw
+
2235 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
) == 1))
2236 *newAccessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2237 if ((accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
) &&
2238 (nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_WRITE
) &&
2239 ((nofp
->nof_w
+ nofp
->nof_d_w
+
2240 nofp
->nof_rw
+ nofp
->nof_d_rw
+
2241 nofp
->nof_w_dw
+ nofp
->nof_d_w_dw
+
2242 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
+
2243 nofp
->nof_w_drw
+ nofp
->nof_d_w_drw
+
2244 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
) == 1))
2245 *newAccessMode
&= ~NFS_OPEN_SHARE_ACCESS_WRITE
;
2246 if ((denyMode
& NFS_OPEN_SHARE_DENY_READ
) &&
2247 (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_READ
) &&
2248 ((nofp
->nof_r_drw
+ nofp
->nof_d_r_drw
+
2249 nofp
->nof_w_drw
+ nofp
->nof_d_w_drw
+
2250 nofp
->nof_rw_drw
+ nofp
->nof_d_rw_drw
) == 1))
2251 *newDenyMode
&= ~NFS_OPEN_SHARE_DENY_READ
;
2252 if ((denyMode
& NFS_OPEN_SHARE_DENY_WRITE
) &&
2253 (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_WRITE
) &&
2254 ((nofp
->nof_r_drw
+ nofp
->nof_d_r_drw
+
2255 nofp
->nof_w_drw
+ nofp
->nof_d_w_drw
+
2256 nofp
->nof_rw_drw
+ nofp
->nof_d_rw_drw
+
2257 nofp
->nof_r_dw
+ nofp
->nof_d_r_dw
+
2258 nofp
->nof_w_dw
+ nofp
->nof_d_w_dw
+
2259 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
) == 1))
2260 *newDenyMode
&= ~NFS_OPEN_SHARE_DENY_WRITE
;
2262 /* Find the corresponding open access/deny mode counter. */
2263 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2264 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2265 *delegated
= (nofp
->nof_d_r
!= 0);
2266 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2267 *delegated
= (nofp
->nof_d_w
!= 0);
2268 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2269 *delegated
= (nofp
->nof_d_rw
!= 0);
2272 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2273 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2274 *delegated
= (nofp
->nof_d_r_dw
!= 0);
2275 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2276 *delegated
= (nofp
->nof_d_w_dw
!= 0);
2277 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2278 *delegated
= (nofp
->nof_d_rw_dw
!= 0);
2281 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2282 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
)
2283 *delegated
= (nofp
->nof_d_r_drw
!= 0);
2284 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
)
2285 *delegated
= (nofp
->nof_d_w_drw
!= 0);
2286 else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
)
2287 *delegated
= (nofp
->nof_d_rw_drw
!= 0);
2294 * Remove the open state for the given access/deny modes to this open file.
2297 nfs_open_file_remove_open(struct nfs_open_file
*nofp
, uint32_t accessMode
, uint32_t denyMode
)
2299 uint32_t newAccessMode
, newDenyMode
;
2302 lck_mtx_lock(&nofp
->nof_lock
);
2303 nfs_open_file_remove_open_find(nofp
, accessMode
, denyMode
, &newAccessMode
, &newDenyMode
, &delegated
);
2305 /* Decrement the corresponding open access/deny mode counter. */
2306 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2307 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2309 if (nofp
->nof_d_r
== 0)
2310 NP(nofp
->nof_np
, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2314 if (nofp
->nof_r
== 0)
2315 NP(nofp
->nof_np
, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2319 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2321 if (nofp
->nof_d_w
== 0)
2322 NP(nofp
->nof_np
, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2326 if (nofp
->nof_w
== 0)
2327 NP(nofp
->nof_np
, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2331 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2333 if (nofp
->nof_d_rw
== 0)
2334 NP(nofp
->nof_np
, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2338 if (nofp
->nof_rw
== 0)
2339 NP(nofp
->nof_np
, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2344 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2345 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2347 if (nofp
->nof_d_r_dw
== 0)
2348 NP(nofp
->nof_np
, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2352 if (nofp
->nof_r_dw
== 0)
2353 NP(nofp
->nof_np
, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2357 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2359 if (nofp
->nof_d_w_dw
== 0)
2360 NP(nofp
->nof_np
, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2364 if (nofp
->nof_w_dw
== 0)
2365 NP(nofp
->nof_np
, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2369 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2371 if (nofp
->nof_d_rw_dw
== 0)
2372 NP(nofp
->nof_np
, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2374 nofp
->nof_d_rw_dw
--;
2376 if (nofp
->nof_rw_dw
== 0)
2377 NP(nofp
->nof_np
, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2382 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2383 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2385 if (nofp
->nof_d_r_drw
== 0)
2386 NP(nofp
->nof_np
, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2388 nofp
->nof_d_r_drw
--;
2390 if (nofp
->nof_r_drw
== 0)
2391 NP(nofp
->nof_np
, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2395 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2397 if (nofp
->nof_d_w_drw
== 0)
2398 NP(nofp
->nof_np
, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2400 nofp
->nof_d_w_drw
--;
2402 if (nofp
->nof_w_drw
== 0)
2403 NP(nofp
->nof_np
, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2407 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2409 if (nofp
->nof_d_rw_drw
== 0)
2410 NP(nofp
->nof_np
, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2412 nofp
->nof_d_rw_drw
--;
2414 if (nofp
->nof_rw_drw
== 0)
2415 NP(nofp
->nof_np
, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2422 /* update the modes */
2423 nofp
->nof_access
= newAccessMode
;
2424 nofp
->nof_deny
= newDenyMode
;
2425 nofp
->nof_opencnt
--;
2426 lck_mtx_unlock(&nofp
->nof_lock
);
2431 * Get the current (delegation, lock, open, default) stateid for this node.
2432 * If node has a delegation, use that stateid.
2433 * If pid has a lock, use the lockowner's stateid.
2434 * Or use the open file's stateid.
2435 * If no open file, use a default stateid of all ones.
2438 nfs_get_stateid(nfsnode_t np
, thread_t thd
, kauth_cred_t cred
, nfs_stateid
*sid
)
2440 struct nfsmount
*nmp
= NFSTONMP(np
);
2441 proc_t p
= thd
? get_bsdthreadtask_info(thd
) : current_proc(); // XXX async I/O requests don't have a thread
2442 struct nfs_open_owner
*noop
= NULL
;
2443 struct nfs_open_file
*nofp
= NULL
;
2444 struct nfs_lock_owner
*nlop
= NULL
;
2445 nfs_stateid
*s
= NULL
;
2447 if (np
->n_openflags
& N_DELEG_MASK
) {
2448 s
= &np
->n_dstateid
;
2451 nlop
= nfs_lock_owner_find(np
, p
, 0);
2452 if (nlop
&& !TAILQ_EMPTY(&nlop
->nlo_locks
)) {
2453 /* we hold locks, use lock stateid */
2454 s
= &nlop
->nlo_stateid
;
2455 } else if (((noop
= nfs_open_owner_find(nmp
, cred
, 0))) &&
2456 (nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 0) == 0) &&
2457 !(nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) &&
2459 /* we (should) have the file open, use open stateid */
2460 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)
2461 nfs4_reopen(nofp
, thd
);
2462 if (!(nofp
->nof_flags
& NFS_OPEN_FILE_LOST
))
2463 s
= &nofp
->nof_stateid
;
2468 sid
->seqid
= s
->seqid
;
2469 sid
->other
[0] = s
->other
[0];
2470 sid
->other
[1] = s
->other
[1];
2471 sid
->other
[2] = s
->other
[2];
2473 /* named attributes may not have a stateid for reads, so don't complain for them */
2474 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
))
2475 NP(np
, "nfs_get_stateid: no stateid");
2476 sid
->seqid
= sid
->other
[0] = sid
->other
[1] = sid
->other
[2] = 0xffffffff;
2479 nfs_lock_owner_rele(nlop
);
2481 nfs_open_owner_rele(noop
);
2486 * When we have a delegation, we may be able to perform the OPEN locally.
2487 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2490 nfs4_open_delegated(
2492 struct nfs_open_file
*nofp
,
2493 uint32_t accessMode
,
2497 int error
= 0, ismember
, readtoo
= 0, authorized
= 0;
2499 struct kauth_acl_eval eval
;
2500 kauth_cred_t cred
= vfs_context_ucred(ctx
);
2502 if (!(accessMode
& NFS_OPEN_SHARE_ACCESS_READ
)) {
2504 * Try to open it for read access too,
2505 * so the buffer cache can read data.
2508 accessMode
|= NFS_OPEN_SHARE_ACCESS_READ
;
2513 if (accessMode
& NFS_OPEN_SHARE_ACCESS_READ
)
2514 action
|= KAUTH_VNODE_READ_DATA
;
2515 if (accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
)
2516 action
|= KAUTH_VNODE_WRITE_DATA
;
2518 /* evaluate ACE (if we have one) */
2519 if (np
->n_dace
.ace_flags
) {
2520 eval
.ae_requested
= action
;
2521 eval
.ae_acl
= &np
->n_dace
;
2523 eval
.ae_options
= 0;
2524 if (np
->n_vattr
.nva_uid
== kauth_cred_getuid(cred
))
2525 eval
.ae_options
|= KAUTH_AEVAL_IS_OWNER
;
2526 error
= kauth_cred_ismember_gid(cred
, np
->n_vattr
.nva_gid
, &ismember
);
2527 if (!error
&& ismember
)
2528 eval
.ae_options
|= KAUTH_AEVAL_IN_GROUP
;
2530 eval
.ae_exp_gall
= KAUTH_VNODE_GENERIC_ALL_BITS
;
2531 eval
.ae_exp_gread
= KAUTH_VNODE_GENERIC_READ_BITS
;
2532 eval
.ae_exp_gwrite
= KAUTH_VNODE_GENERIC_WRITE_BITS
;
2533 eval
.ae_exp_gexec
= KAUTH_VNODE_GENERIC_EXECUTE_BITS
;
2535 error
= kauth_acl_evaluate(cred
, &eval
);
2537 if (!error
&& (eval
.ae_result
== KAUTH_RESULT_ALLOW
))
2542 /* need to ask the server via ACCESS */
2543 struct vnop_access_args naa
;
2544 naa
.a_desc
= &vnop_access_desc
;
2545 naa
.a_vp
= NFSTOV(np
);
2546 naa
.a_action
= action
;
2547 naa
.a_context
= ctx
;
2548 if (!(error
= nfs_vnop_access(&naa
)))
2554 /* try again without the extra read access */
2555 accessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2559 return (error
? error
: EACCES
);
2562 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 1);
2569 * Open a file with the given access/deny modes.
2571 * If we have a delegation, we may be able to handle the open locally.
2572 * Otherwise, we will always send the open RPC even if this open's mode is
2573 * a subset of all the existing opens. This makes sure that we will always
2574 * be able to do a downgrade to any of the open modes.
2576 * Note: local conflicts should have already been checked in nfs_open_file_find().
2581 struct nfs_open_file
*nofp
,
2582 uint32_t accessMode
,
2586 vnode_t vp
= NFSTOV(np
);
2588 struct componentname cn
;
2589 const char *vname
= NULL
;
2591 char smallname
[128];
2592 char *filename
= NULL
;
2593 int error
= 0, readtoo
= 0;
2596 * We can handle the OPEN ourselves if we have a delegation,
2597 * unless it's a read delegation and the open is asking for
2598 * either write access or deny read. We also don't bother to
2599 * use the delegation if it's being returned.
2601 if (np
->n_openflags
& N_DELEG_MASK
) {
2602 if ((error
= nfs_open_state_set_busy(np
, vfs_context_thread(ctx
))))
2604 if ((np
->n_openflags
& N_DELEG_MASK
) && !(np
->n_openflags
& N_DELEG_RETURN
) &&
2605 (((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_WRITE
) ||
2606 (!(accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
) && !(denyMode
& NFS_OPEN_SHARE_DENY_READ
)))) {
2607 error
= nfs4_open_delegated(np
, nofp
, accessMode
, denyMode
, ctx
);
2608 nfs_open_state_clear_busy(np
);
2611 nfs_open_state_clear_busy(np
);
2615 * [sigh] We can't trust VFS to get the parent right for named
2616 * attribute nodes. (It likes to reparent the nodes after we've
2617 * created them.) Luckily we can probably get the right parent
2618 * from the n_parent we have stashed away.
2620 if ((np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) &&
2621 (((dvp
= np
->n_parent
)) && (error
= vnode_get(dvp
))))
2624 dvp
= vnode_getparent(vp
);
2625 vname
= vnode_getname(vp
);
2626 if (!dvp
|| !vname
) {
2631 filename
= &smallname
[0];
2632 namelen
= snprintf(filename
, sizeof(smallname
), "%s", vname
);
2633 if (namelen
>= sizeof(smallname
)) {
2634 MALLOC(filename
, char *, namelen
+1, M_TEMP
, M_WAITOK
);
2639 snprintf(filename
, namelen
+1, "%s", vname
);
2641 bzero(&cn
, sizeof(cn
));
2642 cn
.cn_nameptr
= filename
;
2643 cn
.cn_namelen
= namelen
;
2645 if (!(accessMode
& NFS_OPEN_SHARE_ACCESS_READ
)) {
2647 * Try to open it for read access too,
2648 * so the buffer cache can read data.
2651 accessMode
|= NFS_OPEN_SHARE_ACCESS_READ
;
2654 error
= nfs4_open_rpc(nofp
, ctx
, &cn
, NULL
, dvp
, &vp
, NFS_OPEN_NOCREATE
, accessMode
, denyMode
);
2656 if (!nfs_mount_state_error_should_restart(error
) &&
2657 (error
!= EINTR
) && (error
!= ERESTART
) && readtoo
) {
2658 /* try again without the extra read access */
2659 accessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2665 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0);
2667 if (filename
&& (filename
!= &smallname
[0]))
2668 FREE(filename
, M_TEMP
);
2670 vnode_putname(vname
);
2678 struct vnop_mmap_args
/* {
2679 struct vnodeop_desc *a_desc;
2682 vfs_context_t a_context;
2685 vfs_context_t ctx
= ap
->a_context
;
2686 vnode_t vp
= ap
->a_vp
;
2687 nfsnode_t np
= VTONFS(vp
);
2688 int error
= 0, accessMode
, denyMode
, delegated
;
2689 struct nfsmount
*nmp
;
2690 struct nfs_open_owner
*noop
= NULL
;
2691 struct nfs_open_file
*nofp
= NULL
;
2694 if (nfs_mount_gone(nmp
))
2697 if (!vnode_isreg(vp
) || !(ap
->a_fflags
& (PROT_READ
|PROT_WRITE
)))
2699 if (np
->n_flag
& NREVOKE
)
2703 * fflags contains some combination of: PROT_READ, PROT_WRITE
2704 * Since it's not possible to mmap() without having the file open for reading,
2705 * read access is always there (regardless if PROT_READ is not set).
2707 accessMode
= NFS_OPEN_SHARE_ACCESS_READ
;
2708 if (ap
->a_fflags
& PROT_WRITE
)
2709 accessMode
|= NFS_OPEN_SHARE_ACCESS_WRITE
;
2710 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2712 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
2717 error
= nfs_mount_state_in_use_start(nmp
, NULL
);
2719 nfs_open_owner_rele(noop
);
2722 if (np
->n_flag
& NREVOKE
) {
2724 nfs_mount_state_in_use_end(nmp
, 0);
2725 nfs_open_owner_rele(noop
);
2729 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 1);
2730 if (error
|| (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
))) {
2731 NP(np
, "nfs_vnop_mmap: no open file for owner, error %d, %d", error
, kauth_cred_getuid(noop
->noo_cred
));
2734 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
2735 nfs_mount_state_in_use_end(nmp
, 0);
2736 error
= nfs4_reopen(nofp
, NULL
);
2742 error
= nfs_open_file_set_busy(nofp
, NULL
);
2749 * The open reference for mmap must mirror an existing open because
2750 * we may need to reclaim it after the file is closed.
2751 * So grab another open count matching the accessMode passed in.
2752 * If we already had an mmap open, prefer read/write without deny mode.
2753 * This means we may have to drop the current mmap open first.
2755 * N.B. We should have an open for the mmap, because, mmap was
2756 * called on an open descriptor, or we've created an open for read
2757 * from reading the first page for execve. However, if we piggy
2758 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
2759 * that open may have closed.
2762 if (!(nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
)) {
2763 if (nofp
->nof_flags
& NFS_OPEN_FILE_NEEDCLOSE
) {
2764 /* We shouldn't get here. We've already open the file for execve */
2765 NP(np
, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
2766 nofp
->nof_access
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
), thread_tid(vfs_context_thread(ctx
)));
2769 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
2770 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
2772 if (accessMode
!= NFS_OPEN_SHARE_ACCESS_READ
|| (accessMode
& nofp
->nof_deny
)) {
2773 /* not asking for just read access -> fail */
2777 /* we don't have the file open, so open it for read access */
2778 if (nmp
->nm_vers
< NFS_VER4
) {
2779 /* NFS v2/v3 opens are always allowed - so just add it. */
2780 nfs_open_file_add_open(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, 0);
2783 error
= nfs4_open(np
, nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
2786 nofp
->nof_flags
|= NFS_OPEN_FILE_NEEDCLOSE
;
2791 /* determine deny mode for open */
2792 if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2793 if (nofp
->nof_d_rw
|| nofp
->nof_d_rw_dw
|| nofp
->nof_d_rw_drw
) {
2796 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2797 else if (nofp
->nof_d_rw_dw
)
2798 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
2799 else if (nofp
->nof_d_rw_drw
)
2800 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
2801 } else if (nofp
->nof_rw
|| nofp
->nof_rw_dw
|| nofp
->nof_rw_drw
) {
2804 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2805 else if (nofp
->nof_rw_dw
)
2806 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
2807 else if (nofp
->nof_rw_drw
)
2808 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
2812 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
2813 if (nofp
->nof_d_r
|| nofp
->nof_d_r_dw
|| nofp
->nof_d_r_drw
) {
2816 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2817 else if (nofp
->nof_d_r_dw
)
2818 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
2819 else if (nofp
->nof_d_r_drw
)
2820 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
2821 } else if (nofp
->nof_r
|| nofp
->nof_r_dw
|| nofp
->nof_r_drw
) {
2824 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2825 else if (nofp
->nof_r_dw
)
2826 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
2827 else if (nofp
->nof_r_drw
)
2828 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
2829 } else if (nofp
->nof_d_rw
|| nofp
->nof_d_rw_dw
|| nofp
->nof_d_rw_drw
) {
2831 * This clause and the one below is to co-opt a read write access
2832 * for a read only mmaping. We probably got here in that an
2833 * existing rw open for an executable file already exists.
2836 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
2838 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2839 else if (nofp
->nof_d_rw_dw
)
2840 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
2841 else if (nofp
->nof_d_rw_drw
)
2842 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
2843 } else if (nofp
->nof_rw
|| nofp
->nof_rw_dw
|| nofp
->nof_rw_drw
) {
2845 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
2847 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2848 else if (nofp
->nof_rw_dw
)
2849 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
2850 else if (nofp
->nof_rw_drw
)
2851 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
2856 if (error
) /* mmap mode without proper open mode */
2860 * If the existing mmap access is more than the new access OR the
2861 * existing access is the same and the existing deny mode is less,
2862 * then we'll stick with the existing mmap open mode.
2864 if ((nofp
->nof_mmap_access
> accessMode
) ||
2865 ((nofp
->nof_mmap_access
== accessMode
) && (nofp
->nof_mmap_deny
<= denyMode
)))
2868 /* update mmap open mode */
2869 if (nofp
->nof_mmap_access
) {
2870 error
= nfs_close(np
, nofp
, nofp
->nof_mmap_access
, nofp
->nof_mmap_deny
, ctx
);
2872 if (!nfs_mount_state_error_should_restart(error
))
2873 NP(np
, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2874 NP(np
, "nfs_vnop_mmap: update, close error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2877 nofp
->nof_mmap_access
= nofp
->nof_mmap_deny
= 0;
2880 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, delegated
);
2881 nofp
->nof_mmap_access
= accessMode
;
2882 nofp
->nof_mmap_deny
= denyMode
;
2886 nfs_open_file_clear_busy(nofp
);
2887 if (nfs_mount_state_in_use_end(nmp
, error
)) {
2892 nfs_open_owner_rele(noop
);
2896 nfs_node_lock_force(np
);
2897 if ((np
->n_flag
& NISMAPPED
) == 0) {
2898 np
->n_flag
|= NISMAPPED
;
2901 nfs_node_unlock(np
);
2903 lck_mtx_lock(&nmp
->nm_lock
);
2904 nmp
->nm_state
&= ~NFSSTA_SQUISHY
;
2905 nmp
->nm_curdeadtimeout
= nmp
->nm_deadtimeout
;
2906 if (nmp
->nm_curdeadtimeout
<= 0)
2907 nmp
->nm_deadto_start
= 0;
2909 lck_mtx_unlock(&nmp
->nm_lock
);
2919 struct vnop_mnomap_args
/* {
2920 struct vnodeop_desc *a_desc;
2922 vfs_context_t a_context;
2925 vfs_context_t ctx
= ap
->a_context
;
2926 vnode_t vp
= ap
->a_vp
;
2927 nfsnode_t np
= VTONFS(vp
);
2928 struct nfsmount
*nmp
;
2929 struct nfs_open_file
*nofp
= NULL
;
2932 int is_mapped_flag
= 0;
2935 if (nfs_mount_gone(nmp
))
2938 nfs_node_lock_force(np
);
2939 if (np
->n_flag
& NISMAPPED
) {
2941 np
->n_flag
&= ~NISMAPPED
;
2943 nfs_node_unlock(np
);
2944 if (is_mapped_flag
) {
2945 lck_mtx_lock(&nmp
->nm_lock
);
2946 if (nmp
->nm_mappers
)
2949 NP(np
, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
2950 lck_mtx_unlock(&nmp
->nm_lock
);
2953 /* flush buffers/ubc before we drop the open (in case it's our last open) */
2954 nfs_flush(np
, MNT_WAIT
, vfs_context_thread(ctx
), V_IGNORE_WRITEERR
);
2955 if (UBCINFOEXISTS(vp
) && (size
= ubc_getsize(vp
)))
2956 ubc_msync(vp
, 0, size
, NULL
, UBC_PUSHALL
| UBC_SYNC
);
2958 /* walk all open files and close all mmap opens */
2960 error
= nfs_mount_state_in_use_start(nmp
, NULL
);
2963 lck_mtx_lock(&np
->n_openlock
);
2964 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
2965 if (!nofp
->nof_mmap_access
)
2967 lck_mtx_unlock(&np
->n_openlock
);
2968 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
2969 nfs_mount_state_in_use_end(nmp
, 0);
2970 error
= nfs4_reopen(nofp
, NULL
);
2975 error
= nfs_open_file_set_busy(nofp
, NULL
);
2977 lck_mtx_lock(&np
->n_openlock
);
2980 if (nofp
->nof_mmap_access
) {
2981 error
= nfs_close(np
, nofp
, nofp
->nof_mmap_access
, nofp
->nof_mmap_deny
, ctx
);
2982 if (!nfs_mount_state_error_should_restart(error
)) {
2983 if (error
) /* not a state-operation-restarting error, so just clear the access */
2984 NP(np
, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2985 nofp
->nof_mmap_access
= nofp
->nof_mmap_deny
= 0;
2988 NP(np
, "nfs_vnop_mnomap: error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2990 nfs_open_file_clear_busy(nofp
);
2991 nfs_mount_state_in_use_end(nmp
, error
);
2994 lck_mtx_unlock(&np
->n_openlock
);
2995 nfs_mount_state_in_use_end(nmp
, error
);
3000 * Search a node's lock owner list for the owner for this process.
3001 * If not found and "alloc" is set, then allocate a new one.
3003 struct nfs_lock_owner
*
3004 nfs_lock_owner_find(nfsnode_t np
, proc_t p
, int alloc
)
3006 pid_t pid
= proc_pid(p
);
3007 struct nfs_lock_owner
*nlop
, *newnlop
= NULL
;
3010 lck_mtx_lock(&np
->n_openlock
);
3011 TAILQ_FOREACH(nlop
, &np
->n_lock_owners
, nlo_link
) {
3012 if (nlop
->nlo_pid
!= pid
)
3014 if (timevalcmp(&nlop
->nlo_pid_start
, &p
->p_start
, ==))
3016 /* stale lock owner... reuse it if we can */
3017 if (nlop
->nlo_refcnt
) {
3018 TAILQ_REMOVE(&np
->n_lock_owners
, nlop
, nlo_link
);
3019 nlop
->nlo_flags
&= ~NFS_LOCK_OWNER_LINK
;
3020 lck_mtx_unlock(&np
->n_openlock
);
3023 nlop
->nlo_pid_start
= p
->p_start
;
3024 nlop
->nlo_seqid
= 0;
3025 nlop
->nlo_stategenid
= 0;
3029 if (!nlop
&& !newnlop
&& alloc
) {
3030 lck_mtx_unlock(&np
->n_openlock
);
3031 MALLOC(newnlop
, struct nfs_lock_owner
*, sizeof(struct nfs_lock_owner
), M_TEMP
, M_WAITOK
);
3034 bzero(newnlop
, sizeof(*newnlop
));
3035 lck_mtx_init(&newnlop
->nlo_lock
, nfs_open_grp
, LCK_ATTR_NULL
);
3036 newnlop
->nlo_pid
= pid
;
3037 newnlop
->nlo_pid_start
= p
->p_start
;
3038 newnlop
->nlo_name
= OSAddAtomic(1, &nfs_lock_owner_seqnum
);
3039 TAILQ_INIT(&newnlop
->nlo_locks
);
3042 if (!nlop
&& newnlop
) {
3043 newnlop
->nlo_flags
|= NFS_LOCK_OWNER_LINK
;
3044 TAILQ_INSERT_HEAD(&np
->n_lock_owners
, newnlop
, nlo_link
);
3047 lck_mtx_unlock(&np
->n_openlock
);
3049 if (newnlop
&& (nlop
!= newnlop
))
3050 nfs_lock_owner_destroy(newnlop
);
3053 nfs_lock_owner_ref(nlop
);
3059 * destroy a lock owner that's no longer needed
3062 nfs_lock_owner_destroy(struct nfs_lock_owner
*nlop
)
3064 if (nlop
->nlo_open_owner
) {
3065 nfs_open_owner_rele(nlop
->nlo_open_owner
);
3066 nlop
->nlo_open_owner
= NULL
;
3068 lck_mtx_destroy(&nlop
->nlo_lock
, nfs_open_grp
);
3073 * acquire a reference count on a lock owner
3076 nfs_lock_owner_ref(struct nfs_lock_owner
*nlop
)
3078 lck_mtx_lock(&nlop
->nlo_lock
);
3080 lck_mtx_unlock(&nlop
->nlo_lock
);
3084 * drop a reference count on a lock owner and destroy it if
3085 * it is no longer referenced and no longer on the mount's list.
3088 nfs_lock_owner_rele(struct nfs_lock_owner
*nlop
)
3090 lck_mtx_lock(&nlop
->nlo_lock
);
3091 if (nlop
->nlo_refcnt
< 1)
3092 panic("nfs_lock_owner_rele: no refcnt");
3094 if (!nlop
->nlo_refcnt
&& (nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
))
3095 panic("nfs_lock_owner_rele: busy");
3096 /* XXX we may potentially want to clean up idle/unused lock owner structures */
3097 if (nlop
->nlo_refcnt
|| (nlop
->nlo_flags
& NFS_LOCK_OWNER_LINK
)) {
3098 lck_mtx_unlock(&nlop
->nlo_lock
);
3101 /* owner is no longer referenced or linked to mount, so destroy it */
3102 lck_mtx_unlock(&nlop
->nlo_lock
);
3103 nfs_lock_owner_destroy(nlop
);
3107 * Mark a lock owner as busy because we are about to
3108 * start an operation that uses and updates lock owner state.
3111 nfs_lock_owner_set_busy(struct nfs_lock_owner
*nlop
, thread_t thd
)
3113 struct nfsmount
*nmp
;
3114 struct timespec ts
= {2, 0};
3115 int error
= 0, slpflag
;
3117 nmp
= nlop
->nlo_open_owner
->noo_mount
;
3118 if (nfs_mount_gone(nmp
))
3120 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
3122 lck_mtx_lock(&nlop
->nlo_lock
);
3123 while (nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
) {
3124 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0)))
3126 nlop
->nlo_flags
|= NFS_LOCK_OWNER_WANT
;
3127 msleep(nlop
, &nlop
->nlo_lock
, slpflag
, "nfs_lock_owner_set_busy", &ts
);
3131 nlop
->nlo_flags
|= NFS_LOCK_OWNER_BUSY
;
3132 lck_mtx_unlock(&nlop
->nlo_lock
);
3138 * Clear the busy flag on a lock owner and wake up anyone waiting
3142 nfs_lock_owner_clear_busy(struct nfs_lock_owner
*nlop
)
3146 lck_mtx_lock(&nlop
->nlo_lock
);
3147 if (!(nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
))
3148 panic("nfs_lock_owner_clear_busy");
3149 wanted
= (nlop
->nlo_flags
& NFS_LOCK_OWNER_WANT
);
3150 nlop
->nlo_flags
&= ~(NFS_LOCK_OWNER_BUSY
|NFS_LOCK_OWNER_WANT
);
3151 lck_mtx_unlock(&nlop
->nlo_lock
);
3157 * Insert a held lock into a lock owner's sorted list.
3158 * (flock locks are always inserted at the head the list)
3161 nfs_lock_owner_insert_held_lock(struct nfs_lock_owner
*nlop
, struct nfs_file_lock
*newnflp
)
3163 struct nfs_file_lock
*nflp
;
3165 /* insert new lock in lock owner's held lock list */
3166 lck_mtx_lock(&nlop
->nlo_lock
);
3167 if ((newnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
) {
3168 TAILQ_INSERT_HEAD(&nlop
->nlo_locks
, newnflp
, nfl_lolink
);
3170 TAILQ_FOREACH(nflp
, &nlop
->nlo_locks
, nfl_lolink
) {
3171 if (newnflp
->nfl_start
< nflp
->nfl_start
)
3175 TAILQ_INSERT_BEFORE(nflp
, newnflp
, nfl_lolink
);
3177 TAILQ_INSERT_TAIL(&nlop
->nlo_locks
, newnflp
, nfl_lolink
);
3179 lck_mtx_unlock(&nlop
->nlo_lock
);
3183 * Get a file lock structure for this lock owner.
3185 struct nfs_file_lock
*
3186 nfs_file_lock_alloc(struct nfs_lock_owner
*nlop
)
3188 struct nfs_file_lock
*nflp
= NULL
;
3190 lck_mtx_lock(&nlop
->nlo_lock
);
3191 if (!nlop
->nlo_alock
.nfl_owner
) {
3192 nflp
= &nlop
->nlo_alock
;
3193 nflp
->nfl_owner
= nlop
;
3195 lck_mtx_unlock(&nlop
->nlo_lock
);
3197 MALLOC(nflp
, struct nfs_file_lock
*, sizeof(struct nfs_file_lock
), M_TEMP
, M_WAITOK
);
3200 bzero(nflp
, sizeof(*nflp
));
3201 nflp
->nfl_flags
|= NFS_FILE_LOCK_ALLOC
;
3202 nflp
->nfl_owner
= nlop
;
3204 nfs_lock_owner_ref(nlop
);
3209 * destroy the given NFS file lock structure
3212 nfs_file_lock_destroy(struct nfs_file_lock
*nflp
)
3214 struct nfs_lock_owner
*nlop
= nflp
->nfl_owner
;
3216 if (nflp
->nfl_flags
& NFS_FILE_LOCK_ALLOC
) {
3217 nflp
->nfl_owner
= NULL
;
3220 lck_mtx_lock(&nlop
->nlo_lock
);
3221 bzero(nflp
, sizeof(*nflp
));
3222 lck_mtx_unlock(&nlop
->nlo_lock
);
3224 nfs_lock_owner_rele(nlop
);
3228 * Check if one file lock conflicts with another.
3229 * (nflp1 is the new lock. nflp2 is the existing lock.)
3232 nfs_file_lock_conflict(struct nfs_file_lock
*nflp1
, struct nfs_file_lock
*nflp2
, int *willsplit
)
3234 /* no conflict if lock is dead */
3235 if ((nflp1
->nfl_flags
& NFS_FILE_LOCK_DEAD
) || (nflp2
->nfl_flags
& NFS_FILE_LOCK_DEAD
))
3237 /* no conflict if it's ours - unless the lock style doesn't match */
3238 if ((nflp1
->nfl_owner
== nflp2
->nfl_owner
) &&
3239 ((nflp1
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == (nflp2
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
))) {
3240 if (willsplit
&& (nflp1
->nfl_type
!= nflp2
->nfl_type
) &&
3241 (nflp1
->nfl_start
> nflp2
->nfl_start
) &&
3242 (nflp1
->nfl_end
< nflp2
->nfl_end
))
3246 /* no conflict if ranges don't overlap */
3247 if ((nflp1
->nfl_start
> nflp2
->nfl_end
) || (nflp1
->nfl_end
< nflp2
->nfl_start
))
3249 /* no conflict if neither lock is exclusive */
3250 if ((nflp1
->nfl_type
!= F_WRLCK
) && (nflp2
->nfl_type
!= F_WRLCK
))
3257 * Send an NFSv4 LOCK RPC to the server.
3262 struct nfs_open_file
*nofp
,
3263 struct nfs_file_lock
*nflp
,
3269 struct nfs_lock_owner
*nlop
= nflp
->nfl_owner
;
3270 struct nfsmount
*nmp
;
3271 struct nfsm_chain nmreq
, nmrep
;
3274 int error
= 0, lockerror
= ENOENT
, newlocker
, numops
, status
;
3275 struct nfsreq_secinfo_args si
;
3278 if (nfs_mount_gone(nmp
))
3280 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
3283 newlocker
= (nlop
->nlo_stategenid
!= nmp
->nm_stategenid
);
3284 locktype
= (nflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
) ?
3285 ((nflp
->nfl_type
== F_WRLCK
) ?
3286 NFS_LOCK_TYPE_WRITEW
:
3287 NFS_LOCK_TYPE_READW
) :
3288 ((nflp
->nfl_type
== F_WRLCK
) ?
3289 NFS_LOCK_TYPE_WRITE
:
3290 NFS_LOCK_TYPE_READ
);
3292 error
= nfs_open_file_set_busy(nofp
, thd
);
3295 error
= nfs_open_owner_set_busy(nofp
->nof_owner
, thd
);
3297 nfs_open_file_clear_busy(nofp
);
3300 if (!nlop
->nlo_open_owner
) {
3301 nfs_open_owner_ref(nofp
->nof_owner
);
3302 nlop
->nlo_open_owner
= nofp
->nof_owner
;
3305 error
= nfs_lock_owner_set_busy(nlop
, thd
);
3308 nfs_open_owner_clear_busy(nofp
->nof_owner
);
3309 nfs_open_file_clear_busy(nofp
);
3314 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
3315 nfsm_chain_null(&nmreq
);
3316 nfsm_chain_null(&nmrep
);
3318 // PUTFH, GETATTR, LOCK
3320 nfsm_chain_build_alloc_init(error
, &nmreq
, 33 * NFSX_UNSIGNED
);
3321 nfsm_chain_add_compound_header(error
, &nmreq
, "lock", nmp
->nm_minor_vers
, numops
);
3323 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3324 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3326 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3327 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
3329 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCK
);
3330 nfsm_chain_add_32(error
, &nmreq
, locktype
);
3331 nfsm_chain_add_32(error
, &nmreq
, reclaim
);
3332 nfsm_chain_add_64(error
, &nmreq
, nflp
->nfl_start
);
3333 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(nflp
->nfl_start
, nflp
->nfl_end
));
3334 nfsm_chain_add_32(error
, &nmreq
, newlocker
);
3336 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_owner
->noo_seqid
);
3337 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
3338 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3339 nfsm_chain_add_lock_owner4(error
, &nmreq
, nmp
, nlop
);
3341 nfsm_chain_add_stateid(error
, &nmreq
, &nlop
->nlo_stateid
);
3342 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3344 nfsm_chain_build_done(error
, &nmreq
);
3345 nfsm_assert(error
, (numops
== 0), EPROTO
);
3348 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
|R_NOINTR
, &nmrep
, &xid
, &status
);
3350 if ((lockerror
= nfs_node_lock(np
)))
3352 nfsm_chain_skip_tag(error
, &nmrep
);
3353 nfsm_chain_get_32(error
, &nmrep
, numops
);
3354 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3356 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3357 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
3359 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCK
);
3360 nfs_owner_seqid_increment(newlocker
? nofp
->nof_owner
: NULL
, nlop
, error
);
3361 nfsm_chain_get_stateid(error
, &nmrep
, &nlop
->nlo_stateid
);
3363 /* Update the lock owner's stategenid once it appears the server has state for it. */
3364 /* We determine this by noting the request was successful (we got a stateid). */
3365 if (newlocker
&& !error
)
3366 nlop
->nlo_stategenid
= nmp
->nm_stategenid
;
3369 nfs_node_unlock(np
);
3370 nfs_lock_owner_clear_busy(nlop
);
3372 nfs_open_owner_clear_busy(nofp
->nof_owner
);
3373 nfs_open_file_clear_busy(nofp
);
3375 nfsm_chain_cleanup(&nmreq
);
3376 nfsm_chain_cleanup(&nmrep
);
3381 * Send an NFSv4 LOCKU RPC to the server.
3386 struct nfs_lock_owner
*nlop
,
3394 struct nfsmount
*nmp
;
3395 struct nfsm_chain nmreq
, nmrep
;
3397 int error
= 0, lockerror
= ENOENT
, numops
, status
;
3398 struct nfsreq_secinfo_args si
;
3401 if (nfs_mount_gone(nmp
))
3403 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
3406 error
= nfs_lock_owner_set_busy(nlop
, NULL
);
3410 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
3411 nfsm_chain_null(&nmreq
);
3412 nfsm_chain_null(&nmrep
);
3414 // PUTFH, GETATTR, LOCKU
3416 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
3417 nfsm_chain_add_compound_header(error
, &nmreq
, "unlock", nmp
->nm_minor_vers
, numops
);
3419 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3420 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3422 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3423 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
3425 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCKU
);
3426 nfsm_chain_add_32(error
, &nmreq
, (type
== F_WRLCK
) ? NFS_LOCK_TYPE_WRITE
: NFS_LOCK_TYPE_READ
);
3427 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3428 nfsm_chain_add_stateid(error
, &nmreq
, &nlop
->nlo_stateid
);
3429 nfsm_chain_add_64(error
, &nmreq
, start
);
3430 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(start
, end
));
3431 nfsm_chain_build_done(error
, &nmreq
);
3432 nfsm_assert(error
, (numops
== 0), EPROTO
);
3435 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
|R_NOINTR
, &nmrep
, &xid
, &status
);
3437 if ((lockerror
= nfs_node_lock(np
)))
3439 nfsm_chain_skip_tag(error
, &nmrep
);
3440 nfsm_chain_get_32(error
, &nmrep
, numops
);
3441 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3443 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3444 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
3446 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCKU
);
3447 nfs_owner_seqid_increment(NULL
, nlop
, error
);
3448 nfsm_chain_get_stateid(error
, &nmrep
, &nlop
->nlo_stateid
);
3451 nfs_node_unlock(np
);
3452 nfs_lock_owner_clear_busy(nlop
);
3453 nfsm_chain_cleanup(&nmreq
);
3454 nfsm_chain_cleanup(&nmrep
);
3459 * Send an NFSv4 LOCKT RPC to the server.
3464 struct nfs_lock_owner
*nlop
,
3470 struct nfsmount
*nmp
;
3471 struct nfsm_chain nmreq
, nmrep
;
3472 uint64_t xid
, val64
= 0;
3474 int error
= 0, lockerror
, numops
, status
;
3475 struct nfsreq_secinfo_args si
;
3478 if (nfs_mount_gone(nmp
))
3480 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
3484 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
3485 nfsm_chain_null(&nmreq
);
3486 nfsm_chain_null(&nmrep
);
3488 // PUTFH, GETATTR, LOCKT
3490 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
3491 nfsm_chain_add_compound_header(error
, &nmreq
, "locktest", nmp
->nm_minor_vers
, numops
);
3493 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3494 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3496 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3497 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
3499 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCKT
);
3500 nfsm_chain_add_32(error
, &nmreq
, (fl
->l_type
== F_WRLCK
) ? NFS_LOCK_TYPE_WRITE
: NFS_LOCK_TYPE_READ
);
3501 nfsm_chain_add_64(error
, &nmreq
, start
);
3502 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(start
, end
));
3503 nfsm_chain_add_lock_owner4(error
, &nmreq
, nmp
, nlop
);
3504 nfsm_chain_build_done(error
, &nmreq
);
3505 nfsm_assert(error
, (numops
== 0), EPROTO
);
3508 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
3510 if ((lockerror
= nfs_node_lock(np
)))
3512 nfsm_chain_skip_tag(error
, &nmrep
);
3513 nfsm_chain_get_32(error
, &nmrep
, numops
);
3514 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3516 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3517 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
3519 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCKT
);
3520 if (error
== NFSERR_DENIED
) {
3522 nfsm_chain_get_64(error
, &nmrep
, fl
->l_start
);
3523 nfsm_chain_get_64(error
, &nmrep
, val64
);
3524 fl
->l_len
= (val64
== UINT64_MAX
) ? 0 : val64
;
3525 nfsm_chain_get_32(error
, &nmrep
, val
);
3526 fl
->l_type
= (val
== NFS_LOCK_TYPE_WRITE
) ? F_WRLCK
: F_RDLCK
;
3528 fl
->l_whence
= SEEK_SET
;
3529 } else if (!error
) {
3530 fl
->l_type
= F_UNLCK
;
3534 nfs_node_unlock(np
);
3535 nfsm_chain_cleanup(&nmreq
);
3536 nfsm_chain_cleanup(&nmrep
);
3542 * Check for any conflicts with the given lock.
3544 * Checking for a lock doesn't require the file to be opened.
3545 * So we skip all the open owner, open file, lock owner work
3546 * and just check for a conflicting lock.
3549 nfs_advlock_getlock(
3551 struct nfs_lock_owner
*nlop
,
3557 struct nfsmount
*nmp
;
3558 struct nfs_file_lock
*nflp
;
3559 int error
= 0, answered
= 0;
3562 if (nfs_mount_gone(nmp
))
3566 if ((error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
))))
3569 lck_mtx_lock(&np
->n_openlock
);
3570 /* scan currently held locks for conflict */
3571 TAILQ_FOREACH(nflp
, &np
->n_locks
, nfl_link
) {
3572 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
|NFS_FILE_LOCK_DEAD
))
3574 if ((start
<= nflp
->nfl_end
) && (end
>= nflp
->nfl_start
) &&
3575 ((fl
->l_type
== F_WRLCK
) || (nflp
->nfl_type
== F_WRLCK
)))
3579 /* found a conflicting lock */
3580 fl
->l_type
= nflp
->nfl_type
;
3581 fl
->l_pid
= (nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_FLOCK
) ? -1 : nflp
->nfl_owner
->nlo_pid
;
3582 fl
->l_start
= nflp
->nfl_start
;
3583 fl
->l_len
= NFS_FLOCK_LENGTH(nflp
->nfl_start
, nflp
->nfl_end
);
3584 fl
->l_whence
= SEEK_SET
;
3586 } else if ((np
->n_openflags
& N_DELEG_WRITE
) && !(np
->n_openflags
& N_DELEG_RETURN
)) {
3588 * If we have a write delegation, we know there can't be other
3589 * locks on the server. So the answer is no conflicting lock found.
3591 fl
->l_type
= F_UNLCK
;
3594 lck_mtx_unlock(&np
->n_openlock
);
3596 nfs_mount_state_in_use_end(nmp
, 0);
3600 /* no conflict found locally, so ask the server */
3601 error
= nmp
->nm_funcs
->nf_getlock_rpc(np
, nlop
, fl
, start
, end
, ctx
);
3603 if (nfs_mount_state_in_use_end(nmp
, error
))
3609 * Acquire a file lock for the given range.
3611 * Add the lock (request) to the lock queue.
3612 * Scan the lock queue for any conflicting locks.
3613 * If a conflict is found, block or return an error.
3614 * Once end of queue is reached, send request to the server.
3615 * If the server grants the lock, scan the lock queue and
3616 * update any existing locks. Then (optionally) scan the
3617 * queue again to coalesce any locks adjacent to the new one.
3620 nfs_advlock_setlock(
3622 struct nfs_open_file
*nofp
,
3623 struct nfs_lock_owner
*nlop
,
3631 struct nfsmount
*nmp
;
3632 struct nfs_file_lock
*newnflp
, *nflp
, *nflp2
= NULL
, *nextnflp
, *flocknflp
= NULL
;
3633 struct nfs_file_lock
*coalnflp
;
3634 int error
= 0, error2
, willsplit
= 0, delay
, slpflag
, busy
= 0, inuse
= 0, restart
, inqueue
= 0;
3635 struct timespec ts
= {1, 0};
3638 if (nfs_mount_gone(nmp
))
3640 slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
3642 if ((type
!= F_RDLCK
) && (type
!= F_WRLCK
))
3645 /* allocate a new lock */
3646 newnflp
= nfs_file_lock_alloc(nlop
);
3649 newnflp
->nfl_start
= start
;
3650 newnflp
->nfl_end
= end
;
3651 newnflp
->nfl_type
= type
;
3653 newnflp
->nfl_flags
|= NFS_FILE_LOCK_WAIT
;
3654 newnflp
->nfl_flags
|= style
;
3655 newnflp
->nfl_flags
|= NFS_FILE_LOCK_BLOCKED
;
3657 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) && (type
== F_WRLCK
)) {
3659 * For exclusive flock-style locks, if we block waiting for the
3660 * lock, we need to first release any currently held shared
3661 * flock-style lock. So, the first thing we do is check if we
3662 * have a shared flock-style lock.
3664 nflp
= TAILQ_FIRST(&nlop
->nlo_locks
);
3665 if (nflp
&& ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != NFS_FILE_LOCK_STYLE_FLOCK
))
3667 if (nflp
&& (nflp
->nfl_type
!= F_RDLCK
))
3674 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
3678 if (np
->n_flag
& NREVOKE
) {
3680 nfs_mount_state_in_use_end(nmp
, 0);
3684 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
3685 nfs_mount_state_in_use_end(nmp
, 0);
3687 error
= nfs4_reopen(nofp
, vfs_context_thread(ctx
));
3693 lck_mtx_lock(&np
->n_openlock
);
3695 /* insert new lock at beginning of list */
3696 TAILQ_INSERT_HEAD(&np
->n_locks
, newnflp
, nfl_link
);
3700 /* scan current list of locks (held and pending) for conflicts */
3701 for (nflp
= TAILQ_NEXT(newnflp
, nfl_link
); nflp
; nflp
= nextnflp
) {
3702 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
3703 if (!nfs_file_lock_conflict(newnflp
, nflp
, &willsplit
))
3706 if (!(newnflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
)) {
3710 /* Block until this lock is no longer held. */
3711 if (nflp
->nfl_blockcnt
== UINT_MAX
) {
3715 nflp
->nfl_blockcnt
++;
3718 /* release any currently held shared lock before sleeping */
3719 lck_mtx_unlock(&np
->n_openlock
);
3720 nfs_mount_state_in_use_end(nmp
, 0);
3722 error
= nfs_advlock_unlock(np
, nofp
, nlop
, 0, UINT64_MAX
, NFS_FILE_LOCK_STYLE_FLOCK
, ctx
);
3725 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
3727 lck_mtx_lock(&np
->n_openlock
);
3731 lck_mtx_lock(&np
->n_openlock
);
3732 /* no need to block/sleep if the conflict is gone */
3733 if (!nfs_file_lock_conflict(newnflp
, nflp
, NULL
))
3736 msleep(nflp
, &np
->n_openlock
, slpflag
, "nfs_advlock_setlock_blocked", &ts
);
3738 error
= nfs_sigintr(NFSTONMP(np
), NULL
, vfs_context_thread(ctx
), 0);
3739 if (!error
&& (nmp
->nm_state
& NFSSTA_RECOVER
)) {
3740 /* looks like we have a recover pending... restart */
3742 lck_mtx_unlock(&np
->n_openlock
);
3743 nfs_mount_state_in_use_end(nmp
, 0);
3745 lck_mtx_lock(&np
->n_openlock
);
3748 if (!error
&& (np
->n_flag
& NREVOKE
))
3750 } while (!error
&& nfs_file_lock_conflict(newnflp
, nflp
, NULL
));
3751 nflp
->nfl_blockcnt
--;
3752 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) && !nflp
->nfl_blockcnt
) {
3753 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
3754 nfs_file_lock_destroy(nflp
);
3756 if (error
|| restart
)
3758 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
3759 /* So, start this lock-scanning loop over from where it started. */
3760 nextnflp
= TAILQ_NEXT(newnflp
, nfl_link
);
3762 lck_mtx_unlock(&np
->n_openlock
);
3770 * It looks like this operation is splitting a lock.
3771 * We allocate a new lock now so we don't have to worry
3772 * about the allocation failing after we've updated some state.
3774 nflp2
= nfs_file_lock_alloc(nlop
);
3781 /* once scan for local conflicts is clear, send request to server */
3782 if ((error
= nfs_open_state_set_busy(np
, vfs_context_thread(ctx
))))
3787 /* do we have a delegation? (that we're not returning?) */
3788 if ((np
->n_openflags
& N_DELEG_MASK
) && !(np
->n_openflags
& N_DELEG_RETURN
)) {
3789 if (np
->n_openflags
& N_DELEG_WRITE
) {
3790 /* with a write delegation, just take the lock delegated */
3791 newnflp
->nfl_flags
|= NFS_FILE_LOCK_DELEGATED
;
3793 /* make sure the lock owner knows its open owner */
3794 if (!nlop
->nlo_open_owner
) {
3795 nfs_open_owner_ref(nofp
->nof_owner
);
3796 nlop
->nlo_open_owner
= nofp
->nof_owner
;
3801 * If we don't have any non-delegated opens but we do have
3802 * delegated opens, then we need to first claim the delegated
3803 * opens so that the lock request on the server can be associated
3804 * with an open it knows about.
3806 if ((!nofp
->nof_rw_drw
&& !nofp
->nof_w_drw
&& !nofp
->nof_r_drw
&&
3807 !nofp
->nof_rw_dw
&& !nofp
->nof_w_dw
&& !nofp
->nof_r_dw
&&
3808 !nofp
->nof_rw
&& !nofp
->nof_w
&& !nofp
->nof_r
) &&
3809 (nofp
->nof_d_rw_drw
|| nofp
->nof_d_w_drw
|| nofp
->nof_d_r_drw
||
3810 nofp
->nof_d_rw_dw
|| nofp
->nof_d_w_dw
|| nofp
->nof_d_r_dw
||
3811 nofp
->nof_d_rw
|| nofp
->nof_d_w
|| nofp
->nof_d_r
)) {
3812 error
= nfs4_claim_delegated_state_for_open_file(nofp
, 0);
3818 if (np
->n_flag
& NREVOKE
)
3821 error
= nmp
->nm_funcs
->nf_setlock_rpc(np
, nofp
, newnflp
, 0, 0, vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
3822 if (!error
|| ((error
!= NFSERR_DENIED
) && (error
!= NFSERR_GRACE
)))
3824 /* request was denied due to either conflict or grace period */
3825 if ((error
== NFSERR_DENIED
) && !(newnflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
)) {
3830 /* release any currently held shared lock before sleeping */
3831 nfs_open_state_clear_busy(np
);
3833 nfs_mount_state_in_use_end(nmp
, 0);
3835 error2
= nfs_advlock_unlock(np
, nofp
, nlop
, 0, UINT64_MAX
, NFS_FILE_LOCK_STYLE_FLOCK
, ctx
);
3838 error2
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
3841 error2
= nfs_open_state_set_busy(np
, vfs_context_thread(ctx
));
3850 * Wait a little bit and send the request again.
3851 * Except for retries of blocked v2/v3 request where we've already waited a bit.
3853 if ((nmp
->nm_vers
>= NFS_VER4
) || (error
== NFSERR_GRACE
)) {
3854 if (error
== NFSERR_GRACE
)
3858 tsleep(newnflp
, slpflag
, "nfs_advlock_setlock_delay", delay
* (hz
/2));
3861 error
= nfs_sigintr(NFSTONMP(np
), NULL
, vfs_context_thread(ctx
), 0);
3862 if (!error
&& (nmp
->nm_state
& NFSSTA_RECOVER
)) {
3863 /* looks like we have a recover pending... restart */
3864 nfs_open_state_clear_busy(np
);
3866 nfs_mount_state_in_use_end(nmp
, 0);
3870 if (!error
&& (np
->n_flag
& NREVOKE
))
3875 if (nfs_mount_state_error_should_restart(error
)) {
3876 /* looks like we need to restart this operation */
3878 nfs_open_state_clear_busy(np
);
3882 nfs_mount_state_in_use_end(nmp
, error
);
3887 lck_mtx_lock(&np
->n_openlock
);
3888 newnflp
->nfl_flags
&= ~NFS_FILE_LOCK_BLOCKED
;
3890 newnflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
3891 if (newnflp
->nfl_blockcnt
) {
3892 /* wake up anyone blocked on this lock */
3895 /* remove newnflp from lock list and destroy */
3897 TAILQ_REMOVE(&np
->n_locks
, newnflp
, nfl_link
);
3898 nfs_file_lock_destroy(newnflp
);
3900 lck_mtx_unlock(&np
->n_openlock
);
3902 nfs_open_state_clear_busy(np
);
3904 nfs_mount_state_in_use_end(nmp
, error
);
3906 nfs_file_lock_destroy(nflp2
);
3910 /* server granted the lock */
3913 * Scan for locks to update.
3915 * Locks completely covered are killed.
3916 * At most two locks may need to be clipped.
3917 * It's possible that a single lock may need to be split.
3919 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
3920 if (nflp
== newnflp
)
3922 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
|NFS_FILE_LOCK_DEAD
))
3924 if (nflp
->nfl_owner
!= nlop
)
3926 if ((newnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != (nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
))
3928 if ((newnflp
->nfl_start
> nflp
->nfl_end
) || (newnflp
->nfl_end
< nflp
->nfl_start
))
3930 /* here's one to update */
3931 if ((newnflp
->nfl_start
<= nflp
->nfl_start
) && (newnflp
->nfl_end
>= nflp
->nfl_end
)) {
3932 /* The entire lock is being replaced. */
3933 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
3934 lck_mtx_lock(&nlop
->nlo_lock
);
3935 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
3936 lck_mtx_unlock(&nlop
->nlo_lock
);
3937 /* lock will be destroyed below, if no waiters */
3938 } else if ((newnflp
->nfl_start
> nflp
->nfl_start
) && (newnflp
->nfl_end
< nflp
->nfl_end
)) {
3939 /* We're replacing a range in the middle of a lock. */
3940 /* The current lock will be split into two locks. */
3941 /* Update locks and insert new lock after current lock. */
3942 nflp2
->nfl_flags
|= (nflp
->nfl_flags
& (NFS_FILE_LOCK_STYLE_MASK
|NFS_FILE_LOCK_DELEGATED
));
3943 nflp2
->nfl_type
= nflp
->nfl_type
;
3944 nflp2
->nfl_start
= newnflp
->nfl_end
+ 1;
3945 nflp2
->nfl_end
= nflp
->nfl_end
;
3946 nflp
->nfl_end
= newnflp
->nfl_start
- 1;
3947 TAILQ_INSERT_AFTER(&np
->n_locks
, nflp
, nflp2
, nfl_link
);
3948 nfs_lock_owner_insert_held_lock(nlop
, nflp2
);
3951 } else if (newnflp
->nfl_start
> nflp
->nfl_start
) {
3952 /* We're replacing the end of a lock. */
3953 nflp
->nfl_end
= newnflp
->nfl_start
- 1;
3954 } else if (newnflp
->nfl_end
< nflp
->nfl_end
) {
3955 /* We're replacing the start of a lock. */
3956 nflp
->nfl_start
= newnflp
->nfl_end
+ 1;
3958 if (nflp
->nfl_blockcnt
) {
3959 /* wake up anyone blocked on this lock */
3961 } else if (nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) {
3962 /* remove nflp from lock list and destroy */
3963 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
3964 nfs_file_lock_destroy(nflp
);
3968 nfs_lock_owner_insert_held_lock(nlop
, newnflp
);
3971 * POSIX locks should be coalesced when possible.
3973 if ((style
== NFS_FILE_LOCK_STYLE_POSIX
) && (nofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
)) {
3975 * Walk through the lock queue and check each of our held locks with
3976 * the previous and next locks in the lock owner's "held lock list".
3977 * If the two locks can be coalesced, we merge the current lock into
3978 * the other (previous or next) lock. Merging this way makes sure that
3979 * lock ranges are always merged forward in the lock queue. This is
3980 * important because anyone blocked on the lock being "merged away"
3981 * will still need to block on that range and it will simply continue
3982 * checking locks that are further down the list.
3984 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
3985 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
|NFS_FILE_LOCK_DEAD
))
3987 if (nflp
->nfl_owner
!= nlop
)
3989 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != NFS_FILE_LOCK_STYLE_POSIX
)
3991 if (((coalnflp
= TAILQ_PREV(nflp
, nfs_file_lock_queue
, nfl_lolink
))) &&
3992 ((coalnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) &&
3993 (coalnflp
->nfl_type
== nflp
->nfl_type
) &&
3994 (coalnflp
->nfl_end
== (nflp
->nfl_start
- 1))) {
3995 coalnflp
->nfl_end
= nflp
->nfl_end
;
3996 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
3997 lck_mtx_lock(&nlop
->nlo_lock
);
3998 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
3999 lck_mtx_unlock(&nlop
->nlo_lock
);
4000 } else if (((coalnflp
= TAILQ_NEXT(nflp
, nfl_lolink
))) &&
4001 ((coalnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) &&
4002 (coalnflp
->nfl_type
== nflp
->nfl_type
) &&
4003 (coalnflp
->nfl_start
== (nflp
->nfl_end
+ 1))) {
4004 coalnflp
->nfl_start
= nflp
->nfl_start
;
4005 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4006 lck_mtx_lock(&nlop
->nlo_lock
);
4007 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4008 lck_mtx_unlock(&nlop
->nlo_lock
);
4010 if (!(nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
))
4012 if (nflp
->nfl_blockcnt
) {
4013 /* wake up anyone blocked on this lock */
4016 /* remove nflp from lock list and destroy */
4017 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4018 nfs_file_lock_destroy(nflp
);
4023 lck_mtx_unlock(&np
->n_openlock
);
4024 nfs_open_state_clear_busy(np
);
4025 nfs_mount_state_in_use_end(nmp
, error
);
4028 nfs_file_lock_destroy(nflp2
);
4033 * Release all (same style) locks within the given range.
4038 struct nfs_open_file
*nofp
,
4039 struct nfs_lock_owner
*nlop
,
4045 struct nfsmount
*nmp
;
4046 struct nfs_file_lock
*nflp
, *nextnflp
, *newnflp
= NULL
;
4047 int error
= 0, willsplit
= 0, send_unlock_rpcs
= 1;
4050 if (nfs_mount_gone(nmp
))
4054 if ((error
= nfs_mount_state_in_use_start(nmp
, NULL
)))
4056 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
4057 nfs_mount_state_in_use_end(nmp
, 0);
4058 error
= nfs4_reopen(nofp
, NULL
);
4063 if ((error
= nfs_open_state_set_busy(np
, NULL
))) {
4064 nfs_mount_state_in_use_end(nmp
, error
);
4068 lck_mtx_lock(&np
->n_openlock
);
4069 if ((start
> 0) && (end
< UINT64_MAX
) && !willsplit
) {
4071 * We may need to allocate a new lock if an existing lock gets split.
4072 * So, we first scan the list to check for a split, and if there's
4073 * going to be one, we'll allocate one now.
4075 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4076 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
|NFS_FILE_LOCK_DEAD
))
4078 if (nflp
->nfl_owner
!= nlop
)
4080 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != style
)
4082 if ((start
> nflp
->nfl_end
) || (end
< nflp
->nfl_start
))
4084 if ((start
> nflp
->nfl_start
) && (end
< nflp
->nfl_end
)) {
4090 lck_mtx_unlock(&np
->n_openlock
);
4091 nfs_open_state_clear_busy(np
);
4092 nfs_mount_state_in_use_end(nmp
, 0);
4093 newnflp
= nfs_file_lock_alloc(nlop
);
4101 * Free all of our locks in the given range.
4103 * Note that this process requires sending requests to the server.
4104 * Because of this, we will release the n_openlock while performing
4105 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4106 * locks from changing underneath us. However, other entries in the
4107 * list may be removed. So we need to be careful walking the list.
4111 * Don't unlock ranges that are held by other-style locks.
4112 * If style is posix, don't send any unlock rpcs if flock is held.
4113 * If we unlock an flock, don't send unlock rpcs for any posix-style
4114 * ranges held - instead send unlocks for the ranges not held.
4116 if ((style
== NFS_FILE_LOCK_STYLE_POSIX
) &&
4117 ((nflp
= TAILQ_FIRST(&nlop
->nlo_locks
))) &&
4118 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
))
4119 send_unlock_rpcs
= 0;
4120 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) &&
4121 ((nflp
= TAILQ_FIRST(&nlop
->nlo_locks
))) &&
4122 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
) &&
4123 ((nflp
= TAILQ_NEXT(nflp
, nfl_lolink
))) &&
4124 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
)) {
4126 int type
= TAILQ_FIRST(&nlop
->nlo_locks
)->nfl_type
;
4127 int delegated
= (TAILQ_FIRST(&nlop
->nlo_locks
)->nfl_flags
& NFS_FILE_LOCK_DELEGATED
);
4128 while (!delegated
&& nflp
) {
4129 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) {
4130 /* unlock the range preceding this lock */
4131 lck_mtx_unlock(&np
->n_openlock
);
4132 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, type
, s
, nflp
->nfl_start
-1, 0,
4133 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4134 if (nfs_mount_state_error_should_restart(error
)) {
4135 nfs_open_state_clear_busy(np
);
4136 nfs_mount_state_in_use_end(nmp
, error
);
4139 lck_mtx_lock(&np
->n_openlock
);
4142 s
= nflp
->nfl_end
+1;
4144 nflp
= TAILQ_NEXT(nflp
, nfl_lolink
);
4147 lck_mtx_unlock(&np
->n_openlock
);
4148 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, type
, s
, end
, 0,
4149 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4150 if (nfs_mount_state_error_should_restart(error
)) {
4151 nfs_open_state_clear_busy(np
);
4152 nfs_mount_state_in_use_end(nmp
, error
);
4155 lck_mtx_lock(&np
->n_openlock
);
4159 send_unlock_rpcs
= 0;
4162 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4163 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
|NFS_FILE_LOCK_DEAD
))
4165 if (nflp
->nfl_owner
!= nlop
)
4167 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != style
)
4169 if ((start
> nflp
->nfl_end
) || (end
< nflp
->nfl_start
))
4171 /* here's one to unlock */
4172 if ((start
<= nflp
->nfl_start
) && (end
>= nflp
->nfl_end
)) {
4173 /* The entire lock is being unlocked. */
4174 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4175 lck_mtx_unlock(&np
->n_openlock
);
4176 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, nflp
->nfl_start
, nflp
->nfl_end
, 0,
4177 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4178 if (nfs_mount_state_error_should_restart(error
)) {
4179 nfs_open_state_clear_busy(np
);
4180 nfs_mount_state_in_use_end(nmp
, error
);
4183 lck_mtx_lock(&np
->n_openlock
);
4185 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4188 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4189 lck_mtx_lock(&nlop
->nlo_lock
);
4190 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4191 lck_mtx_unlock(&nlop
->nlo_lock
);
4192 /* lock will be destroyed below, if no waiters */
4193 } else if ((start
> nflp
->nfl_start
) && (end
< nflp
->nfl_end
)) {
4194 /* We're unlocking a range in the middle of a lock. */
4195 /* The current lock will be split into two locks. */
4196 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4197 lck_mtx_unlock(&np
->n_openlock
);
4198 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, start
, end
, 0,
4199 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4200 if (nfs_mount_state_error_should_restart(error
)) {
4201 nfs_open_state_clear_busy(np
);
4202 nfs_mount_state_in_use_end(nmp
, error
);
4205 lck_mtx_lock(&np
->n_openlock
);
4209 /* update locks and insert new lock after current lock */
4210 newnflp
->nfl_flags
|= (nflp
->nfl_flags
& (NFS_FILE_LOCK_STYLE_MASK
|NFS_FILE_LOCK_DELEGATED
));
4211 newnflp
->nfl_type
= nflp
->nfl_type
;
4212 newnflp
->nfl_start
= end
+ 1;
4213 newnflp
->nfl_end
= nflp
->nfl_end
;
4214 nflp
->nfl_end
= start
- 1;
4215 TAILQ_INSERT_AFTER(&np
->n_locks
, nflp
, newnflp
, nfl_link
);
4216 nfs_lock_owner_insert_held_lock(nlop
, newnflp
);
4219 } else if (start
> nflp
->nfl_start
) {
4220 /* We're unlocking the end of a lock. */
4221 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4222 lck_mtx_unlock(&np
->n_openlock
);
4223 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, start
, nflp
->nfl_end
, 0,
4224 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4225 if (nfs_mount_state_error_should_restart(error
)) {
4226 nfs_open_state_clear_busy(np
);
4227 nfs_mount_state_in_use_end(nmp
, error
);
4230 lck_mtx_lock(&np
->n_openlock
);
4232 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4235 nflp
->nfl_end
= start
- 1;
4236 } else if (end
< nflp
->nfl_end
) {
4237 /* We're unlocking the start of a lock. */
4238 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4239 lck_mtx_unlock(&np
->n_openlock
);
4240 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, nflp
->nfl_start
, end
, 0,
4241 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4242 if (nfs_mount_state_error_should_restart(error
)) {
4243 nfs_open_state_clear_busy(np
);
4244 nfs_mount_state_in_use_end(nmp
, error
);
4247 lck_mtx_lock(&np
->n_openlock
);
4249 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4252 nflp
->nfl_start
= end
+ 1;
4254 if (nflp
->nfl_blockcnt
) {
4255 /* wake up anyone blocked on this lock */
4257 } else if (nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) {
4258 /* remove nflp from lock list and destroy */
4259 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4260 nfs_file_lock_destroy(nflp
);
4264 lck_mtx_unlock(&np
->n_openlock
);
4265 nfs_open_state_clear_busy(np
);
4266 nfs_mount_state_in_use_end(nmp
, 0);
4269 nfs_file_lock_destroy(newnflp
);
4274 * NFSv4 advisory file locking
4278 struct vnop_advlock_args
/* {
4279 struct vnodeop_desc *a_desc;
4285 vfs_context_t a_context;
4288 vnode_t vp
= ap
->a_vp
;
4289 nfsnode_t np
= VTONFS(ap
->a_vp
);
4290 struct flock
*fl
= ap
->a_fl
;
4292 int flags
= ap
->a_flags
;
4293 vfs_context_t ctx
= ap
->a_context
;
4294 struct nfsmount
*nmp
;
4295 struct nfs_open_owner
*noop
= NULL
;
4296 struct nfs_open_file
*nofp
= NULL
;
4297 struct nfs_lock_owner
*nlop
= NULL
;
4299 uint64_t start
, end
;
4300 int error
= 0, modified
, style
;
4302 #define OFF_MAX QUAD_MAX
4304 nmp
= VTONMP(ap
->a_vp
);
4305 if (nfs_mount_gone(nmp
))
4307 lck_mtx_lock(&nmp
->nm_lock
);
4308 if ((nmp
->nm_vers
<= NFS_VER3
) && (nmp
->nm_lockmode
== NFS_LOCK_MODE_DISABLED
)) {
4309 lck_mtx_unlock(&nmp
->nm_lock
);
4312 lck_mtx_unlock(&nmp
->nm_lock
);
4314 if (np
->n_flag
& NREVOKE
)
4316 vtype
= vnode_vtype(ap
->a_vp
);
4317 if (vtype
== VDIR
) /* ignore lock requests on directories */
4319 if (vtype
!= VREG
) /* anything other than regular files is invalid */
4322 /* Convert the flock structure into a start and end. */
4323 switch (fl
->l_whence
) {
4327 * Caller is responsible for adding any necessary offset
4328 * to fl->l_start when SEEK_CUR is used.
4330 lstart
= fl
->l_start
;
4333 /* need to flush, and refetch attributes to make */
4334 /* sure we have the correct end of file offset */
4335 if ((error
= nfs_node_lock(np
)))
4337 modified
= (np
->n_flag
& NMODIFIED
);
4338 nfs_node_unlock(np
);
4339 if (modified
&& ((error
= nfs_vinvalbuf(vp
, V_SAVE
, ctx
, 1))))
4341 if ((error
= nfs_getattr(np
, NULL
, ctx
, NGA_UNCACHED
)))
4343 nfs_data_lock(np
, NFS_DATA_LOCK_SHARED
);
4344 if ((np
->n_size
> OFF_MAX
) ||
4345 ((fl
->l_start
> 0) && (np
->n_size
> (u_quad_t
)(OFF_MAX
- fl
->l_start
))))
4347 lstart
= np
->n_size
+ fl
->l_start
;
4348 nfs_data_unlock(np
);
4358 if (fl
->l_len
== 0) {
4360 } else if (fl
->l_len
> 0) {
4361 if ((fl
->l_len
- 1) > (OFF_MAX
- lstart
))
4363 end
= start
- 1 + fl
->l_len
;
4364 } else { /* l_len is negative */
4365 if ((lstart
+ fl
->l_len
) < 0)
4370 if ((nmp
->nm_vers
== NFS_VER2
) && ((start
> INT32_MAX
) || (fl
->l_len
&& (end
> INT32_MAX
))))
4373 style
= (flags
& F_FLOCK
) ? NFS_FILE_LOCK_STYLE_FLOCK
: NFS_FILE_LOCK_STYLE_POSIX
;
4374 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) && ((start
!= 0) || (end
!= UINT64_MAX
)))
4377 /* find the lock owner, alloc if not unlock */
4378 nlop
= nfs_lock_owner_find(np
, vfs_context_proc(ctx
), (op
!= F_UNLCK
));
4380 error
= (op
== F_UNLCK
) ? 0 : ENOMEM
;
4382 NP(np
, "nfs_vnop_advlock: no lock owner, error %d", error
);
4386 if (op
== F_GETLK
) {
4387 error
= nfs_advlock_getlock(np
, nlop
, fl
, start
, end
, ctx
);
4389 /* find the open owner */
4390 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 0);
4392 NP(np
, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx
)));
4396 /* find the open file */
4398 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 0);
4401 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
4402 NP(np
, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
4405 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
4406 error
= nfs4_reopen(nofp
, ((op
== F_UNLCK
) ? NULL
: vfs_context_thread(ctx
)));
4412 NP(np
, "nfs_vnop_advlock: no open file %d, %d", error
, kauth_cred_getuid(noop
->noo_cred
));
4415 if (op
== F_UNLCK
) {
4416 error
= nfs_advlock_unlock(np
, nofp
, nlop
, start
, end
, style
, ctx
);
4417 } else if ((op
== F_SETLK
) || (op
== F_SETLKW
)) {
4418 if ((op
== F_SETLK
) && (flags
& F_WAIT
))
4420 error
= nfs_advlock_setlock(np
, nofp
, nlop
, op
, start
, end
, style
, fl
->l_type
, ctx
);
4422 /* not getlk, unlock or lock? */
4429 nfs_lock_owner_rele(nlop
);
4431 nfs_open_owner_rele(noop
);
4436 * Check if an open owner holds any locks on a file.
4439 nfs_check_for_locks(struct nfs_open_owner
*noop
, struct nfs_open_file
*nofp
)
4441 struct nfs_lock_owner
*nlop
;
4443 TAILQ_FOREACH(nlop
, &nofp
->nof_np
->n_lock_owners
, nlo_link
) {
4444 if (nlop
->nlo_open_owner
!= noop
)
4446 if (!TAILQ_EMPTY(&nlop
->nlo_locks
))
4449 return (nlop
? 1 : 0);
4453 * Reopen simple (no deny, no locks) open state that was lost.
4456 nfs4_reopen(struct nfs_open_file
*nofp
, thread_t thd
)
4458 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
4459 struct nfsmount
*nmp
= NFSTONMP(nofp
->nof_np
);
4460 nfsnode_t np
= nofp
->nof_np
;
4461 vnode_t vp
= NFSTOV(np
);
4463 struct componentname cn
;
4464 const char *vname
= NULL
;
4465 const char *name
= NULL
;
4467 char smallname
[128];
4468 char *filename
= NULL
;
4469 int error
= 0, done
= 0, slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
4470 struct timespec ts
= { 1, 0 };
4472 lck_mtx_lock(&nofp
->nof_lock
);
4473 while (nofp
->nof_flags
& NFS_OPEN_FILE_REOPENING
) {
4474 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0)))
4476 msleep(&nofp
->nof_flags
, &nofp
->nof_lock
, slpflag
|(PZERO
-1), "nfsreopenwait", &ts
);
4479 if (error
|| !(nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
4480 lck_mtx_unlock(&nofp
->nof_lock
);
4483 nofp
->nof_flags
|= NFS_OPEN_FILE_REOPENING
;
4484 lck_mtx_unlock(&nofp
->nof_lock
);
4486 nfs_node_lock_force(np
);
4487 if ((vnode_vtype(vp
) != VDIR
) && np
->n_sillyrename
) {
4489 * The node's been sillyrenamed, so we need to use
4490 * the sillyrename directory/name to do the open.
4492 struct nfs_sillyrename
*nsp
= np
->n_sillyrename
;
4493 dvp
= NFSTOV(nsp
->nsr_dnp
);
4494 if ((error
= vnode_get(dvp
))) {
4495 nfs_node_unlock(np
);
4498 name
= nsp
->nsr_name
;
4501 * [sigh] We can't trust VFS to get the parent right for named
4502 * attribute nodes. (It likes to reparent the nodes after we've
4503 * created them.) Luckily we can probably get the right parent
4504 * from the n_parent we have stashed away.
4506 if ((np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) &&
4507 (((dvp
= np
->n_parent
)) && (error
= vnode_get(dvp
))))
4510 dvp
= vnode_getparent(vp
);
4511 vname
= vnode_getname(vp
);
4512 if (!dvp
|| !vname
) {
4515 nfs_node_unlock(np
);
4520 filename
= &smallname
[0];
4521 namelen
= snprintf(filename
, sizeof(smallname
), "%s", name
);
4522 if (namelen
>= sizeof(smallname
)) {
4523 MALLOC(filename
, char *, namelen
+1, M_TEMP
, M_WAITOK
);
4528 snprintf(filename
, namelen
+1, "%s", name
);
4530 nfs_node_unlock(np
);
4531 bzero(&cn
, sizeof(cn
));
4532 cn
.cn_nameptr
= filename
;
4533 cn
.cn_namelen
= namelen
;
4537 if ((error
= nfs_mount_state_in_use_start(nmp
, thd
)))
4541 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
);
4542 if (!error
&& nofp
->nof_w
)
4543 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_NONE
);
4544 if (!error
&& nofp
->nof_r
)
4545 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
);
4547 if (nfs_mount_state_in_use_end(nmp
, error
)) {
4548 if (error
== NFSERR_GRACE
)
4550 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error
,
4551 (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) ? 1 : 0, name
? name
: "???");
4557 if (error
&& (error
!= EINTR
) && (error
!= ERESTART
))
4558 nfs_revoke_open_state_for_node(np
);
4559 lck_mtx_lock(&nofp
->nof_lock
);
4560 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPENING
;
4562 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPEN
;
4564 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error
,
4565 (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) ? 1 : 0, name
? name
: "???");
4566 lck_mtx_unlock(&nofp
->nof_lock
);
4567 if (filename
&& (filename
!= &smallname
[0]))
4568 FREE(filename
, M_TEMP
);
4570 vnode_putname(vname
);
4577 * Send a normal OPEN RPC to open/create a file.
4581 struct nfs_open_file
*nofp
,
4583 struct componentname
*cnp
,
4584 struct vnode_attr
*vap
,
4591 return (nfs4_open_rpc_internal(nofp
, ctx
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
4592 cnp
, vap
, dvp
, vpp
, create
, share_access
, share_deny
));
4596 * Send an OPEN RPC to reopen a file.
4599 nfs4_open_reopen_rpc(
4600 struct nfs_open_file
*nofp
,
4603 struct componentname
*cnp
,
4609 return (nfs4_open_rpc_internal(nofp
, NULL
, thd
, cred
, cnp
, NULL
, dvp
, vpp
, NFS_OPEN_NOCREATE
, share_access
, share_deny
));
4613 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
4616 nfs4_open_confirm_rpc(
4617 struct nfsmount
*nmp
,
4621 struct nfs_open_owner
*noop
,
4625 struct nfs_vattr
*nvap
,
4628 struct nfsm_chain nmreq
, nmrep
;
4629 int error
= 0, status
, numops
;
4630 struct nfsreq_secinfo_args si
;
4632 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
4633 nfsm_chain_null(&nmreq
);
4634 nfsm_chain_null(&nmrep
);
4636 // PUTFH, OPEN_CONFIRM, GETATTR
4638 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
4639 nfsm_chain_add_compound_header(error
, &nmreq
, "open_confirm", nmp
->nm_minor_vers
, numops
);
4641 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
4642 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, fhp
, fhlen
);
4644 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN_CONFIRM
);
4645 nfsm_chain_add_stateid(error
, &nmreq
, sid
);
4646 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
4648 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
4649 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
4650 nfsm_chain_build_done(error
, &nmreq
);
4651 nfsm_assert(error
, (numops
== 0), EPROTO
);
4653 error
= nfs_request2(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, R_NOINTR
, &nmrep
, xidp
, &status
);
4655 nfsm_chain_skip_tag(error
, &nmrep
);
4656 nfsm_chain_get_32(error
, &nmrep
, numops
);
4657 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
4659 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN_CONFIRM
);
4660 nfs_owner_seqid_increment(noop
, NULL
, error
);
4661 nfsm_chain_get_stateid(error
, &nmrep
, sid
);
4662 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
4664 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
, NULL
);
4666 nfsm_chain_cleanup(&nmreq
);
4667 nfsm_chain_cleanup(&nmrep
);
4672 * common OPEN RPC code
4674 * If create is set, ctx must be passed in.
4675 * Returns a node on success if no node passed in.
4678 nfs4_open_rpc_internal(
4679 struct nfs_open_file
*nofp
,
4683 struct componentname
*cnp
,
4684 struct vnode_attr
*vap
,
4691 struct nfsmount
*nmp
;
4692 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
4693 struct nfs_vattr nvattr
;
4694 int error
= 0, open_error
= EIO
, lockerror
= ENOENT
, busyerror
= ENOENT
, status
;
4695 int nfsvers
, namedattrs
, numops
, exclusive
= 0, gotuid
, gotgid
;
4696 u_int64_t xid
, savedxid
= 0;
4697 nfsnode_t dnp
= VTONFS(dvp
);
4698 nfsnode_t np
, newnp
= NULL
;
4699 vnode_t newvp
= NULL
;
4700 struct nfsm_chain nmreq
, nmrep
;
4701 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
4702 uint32_t rflags
, delegation
, recall
;
4703 struct nfs_stateid stateid
, dstateid
, *sid
;
4705 struct nfsreq rq
, *req
= &rq
;
4706 struct nfs_dulookup dul
;
4708 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
;
4709 struct kauth_ace ace
;
4710 struct nfsreq_secinfo_args si
;
4716 if (nfs_mount_gone(nmp
))
4718 nfsvers
= nmp
->nm_vers
;
4719 namedattrs
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
);
4720 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
4723 np
= *vpp
? VTONFS(*vpp
) : NULL
;
4724 if (create
&& vap
) {
4725 exclusive
= (vap
->va_vaflags
& VA_EXCLUSIVE
);
4726 nfs_avoid_needless_id_setting_on_create(dnp
, vap
, ctx
);
4727 gotuid
= VATTR_IS_ACTIVE(vap
, va_uid
);
4728 gotgid
= VATTR_IS_ACTIVE(vap
, va_gid
);
4729 if (exclusive
&& (!VATTR_IS_ACTIVE(vap
, va_access_time
) || !VATTR_IS_ACTIVE(vap
, va_modify_time
)))
4730 vap
->va_vaflags
|= VA_UTIMES_NULL
;
4732 exclusive
= gotuid
= gotgid
= 0;
4735 sid
= &nofp
->nof_stateid
;
4737 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
4741 if ((error
= nfs_open_owner_set_busy(noop
, thd
)))
4744 rflags
= delegation
= recall
= 0;
4747 slen
= sizeof(sbuf
);
4748 NVATTR_INIT(&nvattr
);
4749 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, cnp
->cn_nameptr
, cnp
->cn_namelen
);
4751 nfsm_chain_null(&nmreq
);
4752 nfsm_chain_null(&nmrep
);
4754 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
4756 nfsm_chain_build_alloc_init(error
, &nmreq
, 53 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
4757 nfsm_chain_add_compound_header(error
, &nmreq
, create
? "create" : "open", nmp
->nm_minor_vers
, numops
);
4759 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
4760 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
4762 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
4764 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
4765 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
4766 nfsm_chain_add_32(error
, &nmreq
, share_access
);
4767 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
4768 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
);
4769 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
4770 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
));
4771 nfsm_chain_add_32(error
, &nmreq
, create
);
4774 static uint32_t create_verf
; // XXX need a better verifier
4776 nfsm_chain_add_32(error
, &nmreq
, NFS_CREATE_EXCLUSIVE
);
4777 /* insert 64 bit verifier */
4778 nfsm_chain_add_32(error
, &nmreq
, create_verf
);
4779 nfsm_chain_add_32(error
, &nmreq
, create_verf
);
4781 nfsm_chain_add_32(error
, &nmreq
, NFS_CREATE_UNCHECKED
);
4782 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
4785 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_NULL
);
4786 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
4788 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
4789 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
4790 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
4791 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
4793 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
4795 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
4796 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
4797 nfsm_chain_build_done(error
, &nmreq
);
4798 nfsm_assert(error
, (numops
== 0), EPROTO
);
4800 error
= busyerror
= nfs_node_set_busy(dnp
, thd
);
4803 if (create
&& !namedattrs
)
4804 nfs_dulookup_init(&dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
4806 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, R_NOINTR
, NULL
, &req
);
4808 if (create
&& !namedattrs
)
4809 nfs_dulookup_start(&dul
, dnp
, ctx
);
4810 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
4814 if (create
&& !namedattrs
)
4815 nfs_dulookup_finish(&dul
, dnp
, ctx
);
4817 if ((lockerror
= nfs_node_lock(dnp
)))
4819 nfsm_chain_skip_tag(error
, &nmrep
);
4820 nfsm_chain_get_32(error
, &nmrep
, numops
);
4821 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
4822 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
4824 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
4825 nfs_owner_seqid_increment(noop
, NULL
, error
);
4826 nfsm_chain_get_stateid(error
, &nmrep
, sid
);
4827 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
4828 nfsm_chain_get_32(error
, &nmrep
, rflags
);
4829 bmlen
= NFS_ATTR_BITMAP_LEN
;
4830 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
4831 nfsm_chain_get_32(error
, &nmrep
, delegation
);
4833 switch (delegation
) {
4834 case NFS_OPEN_DELEGATE_NONE
:
4836 case NFS_OPEN_DELEGATE_READ
:
4837 case NFS_OPEN_DELEGATE_WRITE
:
4838 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
4839 nfsm_chain_get_32(error
, &nmrep
, recall
);
4840 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) // space (skip) XXX
4841 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
4842 /* if we have any trouble accepting the ACE, just invalidate it */
4843 ace_type
= ace_flags
= ace_mask
= len
= 0;
4844 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
4845 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
4846 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
4847 nfsm_chain_get_32(error
, &nmrep
, len
);
4848 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
4849 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
4850 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
4851 if (!error
&& (len
>= slen
)) {
4852 MALLOC(s
, char*, len
+1, M_TEMP
, M_WAITOK
);
4859 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
4861 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
4864 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
)))
4869 if (s
&& (s
!= sbuf
))
4876 /* At this point if we have no error, the object was created/opened. */
4879 if (create
&& vap
&& !exclusive
)
4880 nfs_vattr_set_supported(bitmap
, vap
);
4881 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
4883 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
4885 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
4886 printf("nfs: open/create didn't return filehandle? %s\n", cnp
->cn_nameptr
);
4890 if (!create
&& np
&& !NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
4891 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
4892 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
4893 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
))
4894 NP(np
, "nfs4_open_rpc: warning: file handle mismatch");
4896 /* directory attributes: if we don't get them, make sure to invalidate */
4897 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
4898 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
4899 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
4901 NATTRINVALIDATE(dnp
);
4904 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
)
4905 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
4907 if (rflags
& NFS_OPEN_RESULT_CONFIRM
) {
4908 nfs_node_unlock(dnp
);
4910 NVATTR_CLEANUP(&nvattr
);
4911 error
= nfs4_open_confirm_rpc(nmp
, dnp
, fh
.fh_data
, fh
.fh_len
, noop
, sid
, thd
, cred
, &nvattr
, &xid
);
4914 if ((lockerror
= nfs_node_lock(dnp
)))
4919 nfsm_chain_cleanup(&nmreq
);
4920 nfsm_chain_cleanup(&nmrep
);
4922 if (!lockerror
&& create
) {
4923 if (!open_error
&& (dnp
->n_flag
& NNEGNCENTRIES
)) {
4924 dnp
->n_flag
&= ~NNEGNCENTRIES
;
4925 cache_purge_negatives(dvp
);
4927 dnp
->n_flag
|= NMODIFIED
;
4928 nfs_node_unlock(dnp
);
4930 nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
);
4933 nfs_node_unlock(dnp
);
4934 if (!error
&& !np
&& fh
.fh_len
) {
4935 /* create the vnode with the filehandle and attributes */
4937 error
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, NG_MAKEENTRY
, &newnp
);
4939 newvp
= NFSTOV(newnp
);
4941 NVATTR_CLEANUP(&nvattr
);
4943 nfs_node_clear_busy(dnp
);
4944 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
4947 if (!error
&& np
&& !recall
) {
4948 /* stuff the delegation state in the node */
4949 lck_mtx_lock(&np
->n_openlock
);
4950 np
->n_openflags
&= ~N_DELEG_MASK
;
4951 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
4952 np
->n_dstateid
= dstateid
;
4954 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
4955 lck_mtx_lock(&nmp
->nm_lock
);
4956 if (np
->n_dlink
.tqe_next
== NFSNOLIST
)
4957 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
4958 lck_mtx_unlock(&nmp
->nm_lock
);
4960 lck_mtx_unlock(&np
->n_openlock
);
4962 /* give the delegation back */
4964 if (NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
4965 /* update delegation state and return it */
4966 lck_mtx_lock(&np
->n_openlock
);
4967 np
->n_openflags
&= ~N_DELEG_MASK
;
4968 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
4969 np
->n_dstateid
= dstateid
;
4971 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
4972 lck_mtx_lock(&nmp
->nm_lock
);
4973 if (np
->n_dlink
.tqe_next
== NFSNOLIST
)
4974 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
4975 lck_mtx_unlock(&nmp
->nm_lock
);
4977 lck_mtx_unlock(&np
->n_openlock
);
4978 /* don't need to send a separate delegreturn for fh */
4981 /* return np's current delegation */
4982 nfs4_delegation_return(np
, 0, thd
, cred
);
4984 if (fh
.fh_len
) /* return fh's delegation if it wasn't for np */
4985 nfs4_delegreturn_rpc(nmp
, fh
.fh_data
, fh
.fh_len
, &dstateid
, 0, thd
, cred
);
4989 if (exclusive
&& (error
== NFSERR_NOTSUPP
)) {
4994 nfs_node_unlock(newnp
);
4997 } else if (create
) {
4998 nfs_node_unlock(newnp
);
5000 error
= nfs4_setattr_rpc(newnp
, vap
, ctx
);
5001 if (error
&& (gotuid
|| gotgid
)) {
5002 /* it's possible the server didn't like our attempt to set IDs. */
5003 /* so, let's try it again without those */
5004 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
5005 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
5006 error
= nfs4_setattr_rpc(newnp
, vap
, ctx
);
5014 nfs_open_owner_clear_busy(noop
);
5020 * Send an OPEN RPC to claim a delegated open for a file
5023 nfs4_claim_delegated_open_rpc(
5024 struct nfs_open_file
*nofp
,
5029 struct nfsmount
*nmp
;
5030 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5031 struct nfs_vattr nvattr
;
5032 int error
= 0, lockerror
= ENOENT
, status
;
5033 int nfsvers
, numops
;
5035 nfsnode_t np
= nofp
->nof_np
;
5036 struct nfsm_chain nmreq
, nmrep
;
5037 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
5038 uint32_t rflags
= 0, delegation
, recall
= 0;
5040 struct nfs_stateid dstateid
;
5041 char sbuf
[64], *s
= sbuf
;
5042 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
= sizeof(sbuf
);
5043 struct kauth_ace ace
;
5045 const char *vname
= NULL
;
5046 const char *name
= NULL
;
5048 char smallname
[128];
5049 char *filename
= NULL
;
5050 struct nfsreq_secinfo_args si
;
5053 if (nfs_mount_gone(nmp
))
5055 nfsvers
= nmp
->nm_vers
;
5057 nfs_node_lock_force(np
);
5058 if ((vnode_vtype(NFSTOV(np
)) != VDIR
) && np
->n_sillyrename
) {
5060 * The node's been sillyrenamed, so we need to use
5061 * the sillyrename directory/name to do the open.
5063 struct nfs_sillyrename
*nsp
= np
->n_sillyrename
;
5064 dvp
= NFSTOV(nsp
->nsr_dnp
);
5065 if ((error
= vnode_get(dvp
))) {
5066 nfs_node_unlock(np
);
5069 name
= nsp
->nsr_name
;
5072 * [sigh] We can't trust VFS to get the parent right for named
5073 * attribute nodes. (It likes to reparent the nodes after we've
5074 * created them.) Luckily we can probably get the right parent
5075 * from the n_parent we have stashed away.
5077 if ((np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) &&
5078 (((dvp
= np
->n_parent
)) && (error
= vnode_get(dvp
))))
5081 dvp
= vnode_getparent(NFSTOV(np
));
5082 vname
= vnode_getname(NFSTOV(np
));
5083 if (!dvp
|| !vname
) {
5086 nfs_node_unlock(np
);
5091 filename
= &smallname
[0];
5092 namelen
= snprintf(filename
, sizeof(smallname
), "%s", name
);
5093 if (namelen
>= sizeof(smallname
)) {
5094 MALLOC(filename
, char *, namelen
+1, M_TEMP
, M_WAITOK
);
5097 nfs_node_unlock(np
);
5100 snprintf(filename
, namelen
+1, "%s", name
);
5102 nfs_node_unlock(np
);
5104 if ((error
= nfs_open_owner_set_busy(noop
, NULL
)))
5106 NVATTR_INIT(&nvattr
);
5107 delegation
= NFS_OPEN_DELEGATE_NONE
;
5108 dstateid
= np
->n_dstateid
;
5109 NFSREQ_SECINFO_SET(&si
, VTONFS(dvp
), NULL
, 0, filename
, namelen
);
5111 nfsm_chain_null(&nmreq
);
5112 nfsm_chain_null(&nmrep
);
5114 // PUTFH, OPEN, GETATTR(FH)
5116 nfsm_chain_build_alloc_init(error
, &nmreq
, 48 * NFSX_UNSIGNED
);
5117 nfsm_chain_add_compound_header(error
, &nmreq
, "open_claim_d", nmp
->nm_minor_vers
, numops
);
5119 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5120 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, VTONFS(dvp
)->n_fhp
, VTONFS(dvp
)->n_fhsize
);
5122 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
5123 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5124 nfsm_chain_add_32(error
, &nmreq
, share_access
);
5125 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
5126 // open owner: clientid + uid
5127 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
); // open_owner4.clientid
5128 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
5129 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
)); // open_owner4.owner
5131 nfsm_chain_add_32(error
, &nmreq
, NFS_OPEN_NOCREATE
);
5133 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_DELEGATE_CUR
);
5134 nfsm_chain_add_stateid(error
, &nmreq
, &np
->n_dstateid
);
5135 nfsm_chain_add_name(error
, &nmreq
, filename
, namelen
, nmp
);
5137 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5138 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
5139 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
5140 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
5141 nfsm_chain_build_done(error
, &nmreq
);
5142 nfsm_assert(error
, (numops
== 0), EPROTO
);
5145 error
= nfs_request2(np
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, current_thread(),
5146 noop
->noo_cred
, &si
, flags
|R_NOINTR
, &nmrep
, &xid
, &status
);
5148 if ((lockerror
= nfs_node_lock(np
)))
5150 nfsm_chain_skip_tag(error
, &nmrep
);
5151 nfsm_chain_get_32(error
, &nmrep
, numops
);
5152 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5154 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
5155 nfs_owner_seqid_increment(noop
, NULL
, error
);
5156 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
5157 nfsm_chain_check_change_info(error
, &nmrep
, np
);
5158 nfsm_chain_get_32(error
, &nmrep
, rflags
);
5159 bmlen
= NFS_ATTR_BITMAP_LEN
;
5160 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
5161 nfsm_chain_get_32(error
, &nmrep
, delegation
);
5163 switch (delegation
) {
5164 case NFS_OPEN_DELEGATE_NONE
:
5165 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
5166 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
5168 case NFS_OPEN_DELEGATE_READ
:
5169 case NFS_OPEN_DELEGATE_WRITE
:
5170 if ((((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_READ
) &&
5171 (delegation
== NFS_OPEN_DELEGATE_WRITE
)) ||
5172 (((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_WRITE
) &&
5173 (delegation
== NFS_OPEN_DELEGATE_READ
)))
5174 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
5175 ((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_WRITE
) ? "W" : "R",
5176 (delegation
== NFS_OPEN_DELEGATE_WRITE
) ? "W" : "R", filename
? filename
: "???");
5177 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
5178 nfsm_chain_get_32(error
, &nmrep
, recall
);
5179 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) // space (skip) XXX
5180 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
5181 /* if we have any trouble accepting the ACE, just invalidate it */
5182 ace_type
= ace_flags
= ace_mask
= len
= 0;
5183 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
5184 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
5185 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
5186 nfsm_chain_get_32(error
, &nmrep
, len
);
5187 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
5188 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
5189 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
5190 if (!error
&& (len
>= slen
)) {
5191 MALLOC(s
, char*, len
+1, M_TEMP
, M_WAITOK
);
5198 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
5200 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
5203 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
)))
5208 if (s
&& (s
!= sbuf
))
5211 /* stuff the latest delegation state in the node */
5212 lck_mtx_lock(&np
->n_openlock
);
5213 np
->n_openflags
&= ~N_DELEG_MASK
;
5214 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5215 np
->n_dstateid
= dstateid
;
5217 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5218 lck_mtx_lock(&nmp
->nm_lock
);
5219 if (np
->n_dlink
.tqe_next
== NFSNOLIST
)
5220 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5221 lck_mtx_unlock(&nmp
->nm_lock
);
5223 lck_mtx_unlock(&np
->n_openlock
);
5231 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5232 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
5234 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
5235 printf("nfs: open reclaim didn't return filehandle? %s\n", filename
? filename
: "???");
5239 if (!NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
5240 // XXX what if fh doesn't match the vnode we think we're re-opening?
5241 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5242 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
))
5243 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename
? filename
: "???");
5245 error
= nfs_loadattrcache(np
, &nvattr
, &xid
, 1);
5247 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
)
5248 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
5250 NVATTR_CLEANUP(&nvattr
);
5251 nfsm_chain_cleanup(&nmreq
);
5252 nfsm_chain_cleanup(&nmrep
);
5254 nfs_node_unlock(np
);
5255 nfs_open_owner_clear_busy(noop
);
5256 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
5259 * We're making a delegated claim.
5260 * Don't return the delegation here in case we have more to claim.
5261 * Just make sure it's queued up to be returned.
5263 nfs4_delegation_return_enqueue(np
);
5268 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5269 if (filename
&& (filename
!= &smallname
[0]))
5270 FREE(filename
, M_TEMP
);
5272 vnode_putname(vname
);
5279 * Send an OPEN RPC to reclaim an open file.
5282 nfs4_open_reclaim_rpc(
5283 struct nfs_open_file
*nofp
,
5287 struct nfsmount
*nmp
;
5288 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5289 struct nfs_vattr nvattr
;
5290 int error
= 0, lockerror
= ENOENT
, status
;
5291 int nfsvers
, numops
;
5293 nfsnode_t np
= nofp
->nof_np
;
5294 struct nfsm_chain nmreq
, nmrep
;
5295 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
5296 uint32_t rflags
= 0, delegation
, recall
= 0;
5298 struct nfs_stateid dstateid
;
5299 char sbuf
[64], *s
= sbuf
;
5300 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
= sizeof(sbuf
);
5301 struct kauth_ace ace
;
5302 struct nfsreq_secinfo_args si
;
5305 if (nfs_mount_gone(nmp
))
5307 nfsvers
= nmp
->nm_vers
;
5309 if ((error
= nfs_open_owner_set_busy(noop
, NULL
)))
5312 NVATTR_INIT(&nvattr
);
5313 delegation
= NFS_OPEN_DELEGATE_NONE
;
5314 dstateid
= np
->n_dstateid
;
5315 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
5317 nfsm_chain_null(&nmreq
);
5318 nfsm_chain_null(&nmrep
);
5320 // PUTFH, OPEN, GETATTR(FH)
5322 nfsm_chain_build_alloc_init(error
, &nmreq
, 48 * NFSX_UNSIGNED
);
5323 nfsm_chain_add_compound_header(error
, &nmreq
, "open_reclaim", nmp
->nm_minor_vers
, numops
);
5325 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5326 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
5328 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
5329 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5330 nfsm_chain_add_32(error
, &nmreq
, share_access
);
5331 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
5332 // open owner: clientid + uid
5333 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
); // open_owner4.clientid
5334 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
5335 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
)); // open_owner4.owner
5337 nfsm_chain_add_32(error
, &nmreq
, NFS_OPEN_NOCREATE
);
5339 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_PREVIOUS
);
5340 delegation
= (np
->n_openflags
& N_DELEG_READ
) ? NFS_OPEN_DELEGATE_READ
:
5341 (np
->n_openflags
& N_DELEG_WRITE
) ? NFS_OPEN_DELEGATE_WRITE
:
5342 NFS_OPEN_DELEGATE_NONE
;
5343 nfsm_chain_add_32(error
, &nmreq
, delegation
);
5344 delegation
= NFS_OPEN_DELEGATE_NONE
;
5346 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5347 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
5348 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
5349 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
5350 nfsm_chain_build_done(error
, &nmreq
);
5351 nfsm_assert(error
, (numops
== 0), EPROTO
);
5354 error
= nfs_request2(np
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, current_thread(),
5355 noop
->noo_cred
, &si
, R_RECOVER
|R_NOINTR
, &nmrep
, &xid
, &status
);
5357 if ((lockerror
= nfs_node_lock(np
)))
5359 nfsm_chain_skip_tag(error
, &nmrep
);
5360 nfsm_chain_get_32(error
, &nmrep
, numops
);
5361 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5363 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
5364 nfs_owner_seqid_increment(noop
, NULL
, error
);
5365 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
5366 nfsm_chain_check_change_info(error
, &nmrep
, np
);
5367 nfsm_chain_get_32(error
, &nmrep
, rflags
);
5368 bmlen
= NFS_ATTR_BITMAP_LEN
;
5369 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
5370 nfsm_chain_get_32(error
, &nmrep
, delegation
);
5372 switch (delegation
) {
5373 case NFS_OPEN_DELEGATE_NONE
:
5374 if (np
->n_openflags
& N_DELEG_MASK
) {
5376 * Hey! We were supposed to get our delegation back even
5377 * if it was getting immediately recalled. Bad server!
5379 * Just try to return the existing delegation.
5381 // NP(np, "nfs: open reclaim didn't return delegation?");
5382 delegation
= (np
->n_openflags
& N_DELEG_WRITE
) ? NFS_OPEN_DELEGATE_WRITE
: NFS_OPEN_DELEGATE_READ
;
5386 case NFS_OPEN_DELEGATE_READ
:
5387 case NFS_OPEN_DELEGATE_WRITE
:
5388 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
5389 nfsm_chain_get_32(error
, &nmrep
, recall
);
5390 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) // space (skip) XXX
5391 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
5392 /* if we have any trouble accepting the ACE, just invalidate it */
5393 ace_type
= ace_flags
= ace_mask
= len
= 0;
5394 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
5395 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
5396 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
5397 nfsm_chain_get_32(error
, &nmrep
, len
);
5398 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
5399 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
5400 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
5401 if (!error
&& (len
>= slen
)) {
5402 MALLOC(s
, char*, len
+1, M_TEMP
, M_WAITOK
);
5409 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
5411 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
5414 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
)))
5419 if (s
&& (s
!= sbuf
))
5422 /* stuff the delegation state in the node */
5423 lck_mtx_lock(&np
->n_openlock
);
5424 np
->n_openflags
&= ~N_DELEG_MASK
;
5425 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5426 np
->n_dstateid
= dstateid
;
5428 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5429 lck_mtx_lock(&nmp
->nm_lock
);
5430 if (np
->n_dlink
.tqe_next
== NFSNOLIST
)
5431 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5432 lck_mtx_unlock(&nmp
->nm_lock
);
5434 lck_mtx_unlock(&np
->n_openlock
);
5442 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5443 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
5445 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
5446 NP(np
, "nfs: open reclaim didn't return filehandle?");
5450 if (!NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
5451 // XXX what if fh doesn't match the vnode we think we're re-opening?
5452 // That should be pretty hard in this case, given that we are doing
5453 // the open reclaim using the file handle (and not a dir/name pair).
5454 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5455 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
))
5456 NP(np
, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
5458 error
= nfs_loadattrcache(np
, &nvattr
, &xid
, 1);
5460 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
)
5461 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
5464 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
5465 NVATTR_CLEANUP(&nvattr
);
5466 nfsm_chain_cleanup(&nmreq
);
5467 nfsm_chain_cleanup(&nmrep
);
5469 nfs_node_unlock(np
);
5470 nfs_open_owner_clear_busy(noop
);
5471 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
5473 nfs4_delegation_return_enqueue(np
);
5479 nfs4_open_downgrade_rpc(
5481 struct nfs_open_file
*nofp
,
5484 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5485 struct nfsmount
*nmp
;
5486 int error
, lockerror
= ENOENT
, status
, nfsvers
, numops
;
5487 struct nfsm_chain nmreq
, nmrep
;
5489 struct nfsreq_secinfo_args si
;
5492 if (nfs_mount_gone(nmp
))
5494 nfsvers
= nmp
->nm_vers
;
5496 if ((error
= nfs_open_owner_set_busy(noop
, NULL
)))
5499 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
5500 nfsm_chain_null(&nmreq
);
5501 nfsm_chain_null(&nmrep
);
5503 // PUTFH, OPEN_DOWNGRADE, GETATTR
5505 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
5506 nfsm_chain_add_compound_header(error
, &nmreq
, "open_downgrd", nmp
->nm_minor_vers
, numops
);
5508 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5509 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
5511 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN_DOWNGRADE
);
5512 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
5513 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5514 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_access
);
5515 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_deny
);
5517 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5518 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
5519 nfsm_chain_build_done(error
, &nmreq
);
5520 nfsm_assert(error
, (numops
== 0), EPROTO
);
5522 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
5523 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
5524 &si
, R_NOINTR
, &nmrep
, &xid
, &status
);
5526 if ((lockerror
= nfs_node_lock(np
)))
5528 nfsm_chain_skip_tag(error
, &nmrep
);
5529 nfsm_chain_get_32(error
, &nmrep
, numops
);
5530 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5532 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN_DOWNGRADE
);
5533 nfs_owner_seqid_increment(noop
, NULL
, error
);
5534 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
5535 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5536 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
5539 nfs_node_unlock(np
);
5540 nfs_open_owner_clear_busy(noop
);
5541 nfsm_chain_cleanup(&nmreq
);
5542 nfsm_chain_cleanup(&nmrep
);
5549 struct nfs_open_file
*nofp
,
5554 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5555 struct nfsmount
*nmp
;
5556 int error
, lockerror
= ENOENT
, status
, nfsvers
, numops
;
5557 struct nfsm_chain nmreq
, nmrep
;
5559 struct nfsreq_secinfo_args si
;
5562 if (nfs_mount_gone(nmp
))
5564 nfsvers
= nmp
->nm_vers
;
5566 if ((error
= nfs_open_owner_set_busy(noop
, NULL
)))
5569 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
5570 nfsm_chain_null(&nmreq
);
5571 nfsm_chain_null(&nmrep
);
5573 // PUTFH, CLOSE, GETATTR
5575 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
5576 nfsm_chain_add_compound_header(error
, &nmreq
, "close", nmp
->nm_minor_vers
, numops
);
5578 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5579 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
5581 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_CLOSE
);
5582 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5583 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
5585 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5586 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
5587 nfsm_chain_build_done(error
, &nmreq
);
5588 nfsm_assert(error
, (numops
== 0), EPROTO
);
5590 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
|R_NOINTR
, &nmrep
, &xid
, &status
);
5592 if ((lockerror
= nfs_node_lock(np
)))
5594 nfsm_chain_skip_tag(error
, &nmrep
);
5595 nfsm_chain_get_32(error
, &nmrep
, numops
);
5596 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5598 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_CLOSE
);
5599 nfs_owner_seqid_increment(noop
, NULL
, error
);
5600 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
5601 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5602 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
5605 nfs_node_unlock(np
);
5606 nfs_open_owner_clear_busy(noop
);
5607 nfsm_chain_cleanup(&nmreq
);
5608 nfsm_chain_cleanup(&nmrep
);
5614 * Claim the delegated open combinations this open file holds.
5617 nfs4_claim_delegated_state_for_open_file(struct nfs_open_file
*nofp
, int flags
)
5619 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5620 struct nfs_lock_owner
*nlop
;
5621 struct nfs_file_lock
*nflp
, *nextnflp
;
5622 struct nfsmount
*nmp
;
5623 int error
= 0, reopen
= 0;
5625 if (nofp
->nof_d_rw_drw
) {
5626 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_BOTH
, flags
);
5628 lck_mtx_lock(&nofp
->nof_lock
);
5629 nofp
->nof_rw_drw
+= nofp
->nof_d_rw_drw
;
5630 nofp
->nof_d_rw_drw
= 0;
5631 lck_mtx_unlock(&nofp
->nof_lock
);
5634 if (!error
&& nofp
->nof_d_w_drw
) {
5635 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_BOTH
, flags
);
5637 lck_mtx_lock(&nofp
->nof_lock
);
5638 nofp
->nof_w_drw
+= nofp
->nof_d_w_drw
;
5639 nofp
->nof_d_w_drw
= 0;
5640 lck_mtx_unlock(&nofp
->nof_lock
);
5643 if (!error
&& nofp
->nof_d_r_drw
) {
5644 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_BOTH
, flags
);
5646 lck_mtx_lock(&nofp
->nof_lock
);
5647 nofp
->nof_r_drw
+= nofp
->nof_d_r_drw
;
5648 nofp
->nof_d_r_drw
= 0;
5649 lck_mtx_unlock(&nofp
->nof_lock
);
5652 if (!error
&& nofp
->nof_d_rw_dw
) {
5653 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_WRITE
, flags
);
5655 lck_mtx_lock(&nofp
->nof_lock
);
5656 nofp
->nof_rw_dw
+= nofp
->nof_d_rw_dw
;
5657 nofp
->nof_d_rw_dw
= 0;
5658 lck_mtx_unlock(&nofp
->nof_lock
);
5661 if (!error
&& nofp
->nof_d_w_dw
) {
5662 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_WRITE
, flags
);
5664 lck_mtx_lock(&nofp
->nof_lock
);
5665 nofp
->nof_w_dw
+= nofp
->nof_d_w_dw
;
5666 nofp
->nof_d_w_dw
= 0;
5667 lck_mtx_unlock(&nofp
->nof_lock
);
5670 if (!error
&& nofp
->nof_d_r_dw
) {
5671 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_WRITE
, flags
);
5673 lck_mtx_lock(&nofp
->nof_lock
);
5674 nofp
->nof_r_dw
+= nofp
->nof_d_r_dw
;
5675 nofp
->nof_d_r_dw
= 0;
5676 lck_mtx_unlock(&nofp
->nof_lock
);
5679 /* non-deny-mode opens may be reopened if no locks are held */
5680 if (!error
&& nofp
->nof_d_rw
) {
5681 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
, flags
);
5682 /* for some errors, we should just try reopening the file */
5683 if (nfs_mount_state_error_delegation_lost(error
))
5685 if (!error
|| reopen
) {
5686 lck_mtx_lock(&nofp
->nof_lock
);
5687 nofp
->nof_rw
+= nofp
->nof_d_rw
;
5689 lck_mtx_unlock(&nofp
->nof_lock
);
5692 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
5693 if ((!error
|| reopen
) && nofp
->nof_d_w
) {
5695 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_NONE
, flags
);
5696 /* for some errors, we should just try reopening the file */
5697 if (nfs_mount_state_error_delegation_lost(error
))
5700 if (!error
|| reopen
) {
5701 lck_mtx_lock(&nofp
->nof_lock
);
5702 nofp
->nof_w
+= nofp
->nof_d_w
;
5704 lck_mtx_unlock(&nofp
->nof_lock
);
5707 if ((!error
|| reopen
) && nofp
->nof_d_r
) {
5709 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, flags
);
5710 /* for some errors, we should just try reopening the file */
5711 if (nfs_mount_state_error_delegation_lost(error
))
5714 if (!error
|| reopen
) {
5715 lck_mtx_lock(&nofp
->nof_lock
);
5716 nofp
->nof_r
+= nofp
->nof_d_r
;
5718 lck_mtx_unlock(&nofp
->nof_lock
);
5724 * Any problems with the delegation probably indicates that we
5725 * should review/return all of our current delegation state.
5727 if ((nmp
= NFSTONMP(nofp
->nof_np
))) {
5728 nfs4_delegation_return_enqueue(nofp
->nof_np
);
5729 lck_mtx_lock(&nmp
->nm_lock
);
5730 nfs_need_recover(nmp
, NFSERR_EXPIRED
);
5731 lck_mtx_unlock(&nmp
->nm_lock
);
5733 if (reopen
&& (nfs_check_for_locks(noop
, nofp
) == 0)) {
5734 /* just reopen the file on next access */
5735 NP(nofp
->nof_np
, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
5736 reopen
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
5737 lck_mtx_lock(&nofp
->nof_lock
);
5738 nofp
->nof_flags
|= NFS_OPEN_FILE_REOPEN
;
5739 lck_mtx_unlock(&nofp
->nof_lock
);
5743 NP(nofp
->nof_np
, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
5744 reopen
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
5747 if (!error
&& ((nmp
= NFSTONMP(nofp
->nof_np
)))) {
5748 /* claim delegated locks */
5749 TAILQ_FOREACH(nlop
, &nofp
->nof_np
->n_lock_owners
, nlo_link
) {
5750 if (nlop
->nlo_open_owner
!= noop
)
5752 TAILQ_FOREACH_SAFE(nflp
, &nlop
->nlo_locks
, nfl_lolink
, nextnflp
) {
5753 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
5754 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_DEAD
|NFS_FILE_LOCK_BLOCKED
))
5756 /* skip non-delegated locks */
5757 if (!(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
))
5759 error
= nmp
->nm_funcs
->nf_setlock_rpc(nofp
->nof_np
, nofp
, nflp
, 0, flags
, current_thread(), noop
->noo_cred
);
5761 NP(nofp
->nof_np
, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
5762 nflp
->nfl_start
, nflp
->nfl_end
, error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
5766 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
5767 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5775 if (!error
) /* all state claimed successfully! */
5778 /* restart if it looks like a problem more than just losing the delegation */
5779 if (!nfs_mount_state_error_delegation_lost(error
) &&
5780 ((error
== ETIMEDOUT
) || nfs_mount_state_error_should_restart(error
))) {
5781 NP(nofp
->nof_np
, "nfs delegated lock claim error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
5782 if ((error
== ETIMEDOUT
) && ((nmp
= NFSTONMP(nofp
->nof_np
))))
5783 nfs_need_reconnect(nmp
);
5787 /* delegated state lost (once held but now not claimable) */
5788 NP(nofp
->nof_np
, "nfs delegated state claim error %d, state lost, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
5791 * Any problems with the delegation probably indicates that we
5792 * should review/return all of our current delegation state.
5794 if ((nmp
= NFSTONMP(nofp
->nof_np
))) {
5795 nfs4_delegation_return_enqueue(nofp
->nof_np
);
5796 lck_mtx_lock(&nmp
->nm_lock
);
5797 nfs_need_recover(nmp
, NFSERR_EXPIRED
);
5798 lck_mtx_unlock(&nmp
->nm_lock
);
5801 /* revoke all open file state */
5802 nfs_revoke_open_state_for_node(nofp
->nof_np
);
5808 * Release all open state for the given node.
5811 nfs_release_open_state_for_node(nfsnode_t np
, int force
)
5813 struct nfsmount
*nmp
= NFSTONMP(np
);
5814 struct nfs_open_file
*nofp
;
5815 struct nfs_file_lock
*nflp
, *nextnflp
;
5817 /* drop held locks */
5818 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
5819 /* skip dead & blocked lock requests */
5820 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_DEAD
|NFS_FILE_LOCK_BLOCKED
))
5822 /* send an unlock if not a delegated lock */
5823 if (!force
&& nmp
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
))
5824 nmp
->nm_funcs
->nf_unlock_rpc(np
, nflp
->nfl_owner
, F_WRLCK
, nflp
->nfl_start
, nflp
->nfl_end
, R_RECOVER
,
5825 NULL
, nflp
->nfl_owner
->nlo_open_owner
->noo_cred
);
5826 /* kill/remove the lock */
5827 lck_mtx_lock(&np
->n_openlock
);
5828 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
5829 lck_mtx_lock(&nflp
->nfl_owner
->nlo_lock
);
5830 TAILQ_REMOVE(&nflp
->nfl_owner
->nlo_locks
, nflp
, nfl_lolink
);
5831 lck_mtx_unlock(&nflp
->nfl_owner
->nlo_lock
);
5832 if (nflp
->nfl_blockcnt
) {
5833 /* wake up anyone blocked on this lock */
5836 /* remove nflp from lock list and destroy */
5837 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
5838 nfs_file_lock_destroy(nflp
);
5840 lck_mtx_unlock(&np
->n_openlock
);
5843 lck_mtx_lock(&np
->n_openlock
);
5845 /* drop all opens */
5846 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
5847 if (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)
5849 /* mark open state as lost */
5850 lck_mtx_lock(&nofp
->nof_lock
);
5851 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPEN
;
5852 nofp
->nof_flags
|= NFS_OPEN_FILE_LOST
;
5854 lck_mtx_unlock(&nofp
->nof_lock
);
5855 if (!force
&& nmp
&& (nmp
->nm_vers
>= NFS_VER4
))
5856 nfs4_close_rpc(np
, nofp
, NULL
, nofp
->nof_owner
->noo_cred
, R_RECOVER
);
5859 lck_mtx_unlock(&np
->n_openlock
);
5863 * State for a node has been lost, drop it, and revoke the node.
5864 * Attempt to return any state if possible in case the server
5865 * might somehow think we hold it.
5868 nfs_revoke_open_state_for_node(nfsnode_t np
)
5870 struct nfsmount
*nmp
;
5872 /* mark node as needing to be revoked */
5873 nfs_node_lock_force(np
);
5874 if (np
->n_flag
& NREVOKE
) /* already revoked? */
5876 NP(np
, "nfs_revoke_open_state_for_node(): already revoked");
5877 nfs_node_unlock(np
);
5880 np
->n_flag
|= NREVOKE
;
5881 nfs_node_unlock(np
);
5883 nfs_release_open_state_for_node(np
, 0);
5884 NP(np
, "nfs: state lost for %p 0x%x", np
, np
->n_flag
);
5886 /* mark mount as needing a revoke scan and have the socket thread do it. */
5887 if ((nmp
= NFSTONMP(np
))) {
5888 lck_mtx_lock(&nmp
->nm_lock
);
5889 nmp
->nm_state
|= NFSSTA_REVOKE
;
5890 nfs_mount_sock_thread_wake(nmp
);
5891 lck_mtx_unlock(&nmp
->nm_lock
);
5896 * Claim the delegated open combinations that each of this node's open files hold.
5899 nfs4_claim_delegated_state_for_node(nfsnode_t np
, int flags
)
5901 struct nfs_open_file
*nofp
;
5904 lck_mtx_lock(&np
->n_openlock
);
5906 /* walk the open file list looking for opens with delegated state to claim */
5908 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
5909 if (!nofp
->nof_d_rw_drw
&& !nofp
->nof_d_w_drw
&& !nofp
->nof_d_r_drw
&&
5910 !nofp
->nof_d_rw_dw
&& !nofp
->nof_d_w_dw
&& !nofp
->nof_d_r_dw
&&
5911 !nofp
->nof_d_rw
&& !nofp
->nof_d_w
&& !nofp
->nof_d_r
)
5913 lck_mtx_unlock(&np
->n_openlock
);
5914 error
= nfs4_claim_delegated_state_for_open_file(nofp
, flags
);
5915 lck_mtx_lock(&np
->n_openlock
);
5921 lck_mtx_unlock(&np
->n_openlock
);
5927 * Mark a node as needed to have its delegation returned.
5928 * Queue it up on the delegation return queue.
5929 * Make sure the thread is running.
5932 nfs4_delegation_return_enqueue(nfsnode_t np
)
5934 struct nfsmount
*nmp
;
5937 if (nfs_mount_gone(nmp
))
5940 lck_mtx_lock(&np
->n_openlock
);
5941 np
->n_openflags
|= N_DELEG_RETURN
;
5942 lck_mtx_unlock(&np
->n_openlock
);
5944 lck_mtx_lock(&nmp
->nm_lock
);
5945 if (np
->n_dreturn
.tqe_next
== NFSNOLIST
)
5946 TAILQ_INSERT_TAIL(&nmp
->nm_dreturnq
, np
, n_dreturn
);
5947 nfs_mount_sock_thread_wake(nmp
);
5948 lck_mtx_unlock(&nmp
->nm_lock
);
5952 * return any delegation we may have for the given node
5955 nfs4_delegation_return(nfsnode_t np
, int flags
, thread_t thd
, kauth_cred_t cred
)
5957 struct nfsmount
*nmp
;
5959 nfs_stateid dstateid
;
5963 if (nfs_mount_gone(nmp
))
5966 /* first, make sure the node's marked for delegation return */
5967 lck_mtx_lock(&np
->n_openlock
);
5968 np
->n_openflags
|= (N_DELEG_RETURN
|N_DELEG_RETURNING
);
5969 lck_mtx_unlock(&np
->n_openlock
);
5971 /* make sure nobody else is using the delegation state */
5972 if ((error
= nfs_open_state_set_busy(np
, NULL
)))
5975 /* claim any delegated state */
5976 if ((error
= nfs4_claim_delegated_state_for_node(np
, flags
)))
5979 /* return the delegation */
5980 lck_mtx_lock(&np
->n_openlock
);
5981 dstateid
= np
->n_dstateid
;
5982 fh
.fh_len
= np
->n_fhsize
;
5983 bcopy(np
->n_fhp
, &fh
.fh_data
, fh
.fh_len
);
5984 lck_mtx_unlock(&np
->n_openlock
);
5985 error
= nfs4_delegreturn_rpc(NFSTONMP(np
), fh
.fh_data
, fh
.fh_len
, &dstateid
, flags
, thd
, cred
);
5986 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
5987 if ((error
!= ETIMEDOUT
) && (error
!= NFSERR_MOVED
) && (error
!= NFSERR_LEASE_MOVED
)) {
5988 lck_mtx_lock(&np
->n_openlock
);
5989 np
->n_openflags
&= ~N_DELEG_MASK
;
5990 lck_mtx_lock(&nmp
->nm_lock
);
5991 if (np
->n_dlink
.tqe_next
!= NFSNOLIST
) {
5992 TAILQ_REMOVE(&nmp
->nm_delegations
, np
, n_dlink
);
5993 np
->n_dlink
.tqe_next
= NFSNOLIST
;
5995 lck_mtx_unlock(&nmp
->nm_lock
);
5996 lck_mtx_unlock(&np
->n_openlock
);
6000 /* make sure it's no longer on the return queue and clear the return flags */
6001 lck_mtx_lock(&nmp
->nm_lock
);
6002 if (np
->n_dreturn
.tqe_next
!= NFSNOLIST
) {
6003 TAILQ_REMOVE(&nmp
->nm_dreturnq
, np
, n_dreturn
);
6004 np
->n_dreturn
.tqe_next
= NFSNOLIST
;
6006 lck_mtx_unlock(&nmp
->nm_lock
);
6007 lck_mtx_lock(&np
->n_openlock
);
6008 np
->n_openflags
&= ~(N_DELEG_RETURN
|N_DELEG_RETURNING
);
6009 lck_mtx_unlock(&np
->n_openlock
);
6012 NP(np
, "nfs4_delegation_return, error %d", error
);
6013 if (error
== ETIMEDOUT
)
6014 nfs_need_reconnect(nmp
);
6015 if (nfs_mount_state_error_should_restart(error
)) {
6016 /* make sure recovery happens */
6017 lck_mtx_lock(&nmp
->nm_lock
);
6018 nfs_need_recover(nmp
, nfs_mount_state_error_delegation_lost(error
) ? NFSERR_EXPIRED
: 0);
6019 lck_mtx_unlock(&nmp
->nm_lock
);
6023 nfs_open_state_clear_busy(np
);
6029 * RPC to return a delegation for a file handle
6032 nfs4_delegreturn_rpc(struct nfsmount
*nmp
, u_char
*fhp
, int fhlen
, struct nfs_stateid
*sid
, int flags
, thread_t thd
, kauth_cred_t cred
)
6034 int error
= 0, status
, numops
;
6036 struct nfsm_chain nmreq
, nmrep
;
6037 struct nfsreq_secinfo_args si
;
6039 NFSREQ_SECINFO_SET(&si
, NULL
, fhp
, fhlen
, NULL
, 0);
6040 nfsm_chain_null(&nmreq
);
6041 nfsm_chain_null(&nmrep
);
6043 // PUTFH, DELEGRETURN
6045 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
6046 nfsm_chain_add_compound_header(error
, &nmreq
, "delegreturn", nmp
->nm_minor_vers
, numops
);
6048 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6049 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, fhp
, fhlen
);
6051 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_DELEGRETURN
);
6052 nfsm_chain_add_stateid(error
, &nmreq
, sid
);
6053 nfsm_chain_build_done(error
, &nmreq
);
6054 nfsm_assert(error
, (numops
== 0), EPROTO
);
6056 error
= nfs_request2(NULL
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
, &nmrep
, &xid
, &status
);
6057 nfsm_chain_skip_tag(error
, &nmrep
);
6058 nfsm_chain_get_32(error
, &nmrep
, numops
);
6059 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6060 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_DELEGRETURN
);
6062 nfsm_chain_cleanup(&nmreq
);
6063 nfsm_chain_cleanup(&nmrep
);
6070 * Just call nfs_bioread() to do the work.
6072 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6073 * without first calling VNOP_OPEN, so we make sure the file is open here.
6077 struct vnop_read_args
/* {
6078 struct vnodeop_desc *a_desc;
6082 vfs_context_t a_context;
6085 vnode_t vp
= ap
->a_vp
;
6086 vfs_context_t ctx
= ap
->a_context
;
6088 struct nfsmount
*nmp
;
6089 struct nfs_open_owner
*noop
;
6090 struct nfs_open_file
*nofp
;
6093 if (vnode_vtype(ap
->a_vp
) != VREG
)
6094 return (vnode_vtype(vp
) == VDIR
) ? EISDIR
: EPERM
;
6098 if (nfs_mount_gone(nmp
))
6100 if (np
->n_flag
& NREVOKE
)
6103 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
6107 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 1);
6108 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
6109 NP(np
, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop
->noo_cred
));
6112 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
6113 error
= nfs4_reopen(nofp
, vfs_context_thread(ctx
));
6119 nfs_open_owner_rele(noop
);
6123 * Since the read path is a hot path, if we already have
6124 * read access, lets go and try and do the read, without
6125 * busying the mount and open file node for this open owner.
6127 * N.B. This is inherently racy w.r.t. an execve using
6128 * an already open file, in that the read at the end of
6129 * this routine will be racing with a potential close.
6130 * The code below ultimately has the same problem. In practice
6131 * this does not seem to be an issue.
6133 if (nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
) {
6134 nfs_open_owner_rele(noop
);
6137 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
6139 nfs_open_owner_rele(noop
);
6143 * If we don't have a file already open with the access we need (read) then
6144 * we need to open one. Otherwise we just co-opt an open. We might not already
6145 * have access because we're trying to read the first page of the
6148 error
= nfs_open_file_set_busy(nofp
, vfs_context_thread(ctx
));
6150 nfs_mount_state_in_use_end(nmp
, 0);
6151 nfs_open_owner_rele(noop
);
6154 if (!(nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
)) {
6155 /* we don't have the file open, so open it for read access if we're not denied */
6156 if (nofp
->nof_flags
& NFS_OPEN_FILE_NEEDCLOSE
) {
6157 NP(np
, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
6158 nofp
->nof_access
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
), thread_tid(vfs_context_thread(ctx
)));
6160 if (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_READ
) {
6161 nfs_open_file_clear_busy(nofp
);
6162 nfs_mount_state_in_use_end(nmp
, 0);
6163 nfs_open_owner_rele(noop
);
6166 if (np
->n_flag
& NREVOKE
) {
6168 nfs_open_file_clear_busy(nofp
);
6169 nfs_mount_state_in_use_end(nmp
, 0);
6170 nfs_open_owner_rele(noop
);
6173 if (nmp
->nm_vers
< NFS_VER4
) {
6174 /* NFS v2/v3 opens are always allowed - so just add it. */
6175 nfs_open_file_add_open(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, 0);
6177 error
= nfs4_open(np
, nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
6180 nofp
->nof_flags
|= NFS_OPEN_FILE_NEEDCLOSE
;
6183 nfs_open_file_clear_busy(nofp
);
6184 if (nfs_mount_state_in_use_end(nmp
, error
)) {
6188 nfs_open_owner_rele(noop
);
6192 return (nfs_bioread(VTONFS(ap
->a_vp
), ap
->a_uio
, ap
->a_ioflag
, ap
->a_context
));
6196 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6197 * Files are created using the NFSv4 OPEN RPC. So we must open the
6198 * file to create it and then close it.
6202 struct vnop_create_args
/* {
6203 struct vnodeop_desc *a_desc;
6206 struct componentname *a_cnp;
6207 struct vnode_attr *a_vap;
6208 vfs_context_t a_context;
6211 vfs_context_t ctx
= ap
->a_context
;
6212 struct componentname
*cnp
= ap
->a_cnp
;
6213 struct vnode_attr
*vap
= ap
->a_vap
;
6214 vnode_t dvp
= ap
->a_dvp
;
6215 vnode_t
*vpp
= ap
->a_vpp
;
6216 struct nfsmount
*nmp
;
6218 int error
= 0, busyerror
= 0, accessMode
, denyMode
;
6219 struct nfs_open_owner
*noop
= NULL
;
6220 struct nfs_open_file
*newnofp
= NULL
, *nofp
= NULL
;
6223 if (nfs_mount_gone(nmp
))
6227 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp
), vap
, ctx
);
6229 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
6234 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
6236 nfs_open_owner_rele(noop
);
6240 /* grab a provisional, nodeless open file */
6241 error
= nfs_open_file_find(NULL
, noop
, &newnofp
, 0, 0, 1);
6242 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
6243 printf("nfs_vnop_create: LOST\n");
6246 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
6247 /* This shouldn't happen given that this is a new, nodeless nofp */
6248 nfs_mount_state_in_use_end(nmp
, 0);
6249 error
= nfs4_reopen(newnofp
, vfs_context_thread(ctx
));
6250 nfs_open_file_destroy(newnofp
);
6256 error
= nfs_open_file_set_busy(newnofp
, vfs_context_thread(ctx
));
6259 nfs_open_file_destroy(newnofp
);
6265 * We're just trying to create the file.
6266 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6268 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
6269 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
6271 /* Do the open/create */
6272 error
= nfs4_open_rpc(newnofp
, ctx
, cnp
, vap
, dvp
, vpp
, NFS_OPEN_CREATE
, accessMode
, denyMode
);
6273 if ((error
== EACCES
) && vap
&& !(vap
->va_vaflags
& VA_EXCLUSIVE
) &&
6274 VATTR_IS_ACTIVE(vap
, va_mode
) && !(vap
->va_mode
& S_IWUSR
)) {
6276 * Hmm... it looks like we may have a situation where the request was
6277 * retransmitted because we didn't get the first response which successfully
6278 * created/opened the file and then the second time we were denied the open
6279 * because the mode the file was created with doesn't allow write access.
6281 * We'll try to work around this by temporarily updating the mode and
6282 * retrying the open.
6284 struct vnode_attr vattr
;
6286 /* first make sure it's there */
6287 int error2
= nfs_lookitup(VTONFS(dvp
), cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, &np
);
6288 if (!error2
&& np
) {
6289 nfs_node_unlock(np
);
6291 if (vnode_vtype(NFSTOV(np
)) == VREG
) {
6293 VATTR_SET(&vattr
, va_mode
, (vap
->va_mode
| S_IWUSR
));
6294 if (!nfs4_setattr_rpc(np
, &vattr
, ctx
)) {
6295 error2
= nfs4_open_rpc(newnofp
, ctx
, cnp
, NULL
, dvp
, vpp
, NFS_OPEN_NOCREATE
, accessMode
, denyMode
);
6297 VATTR_SET(&vattr
, va_mode
, vap
->va_mode
);
6298 nfs4_setattr_rpc(np
, &vattr
, ctx
);
6309 if (!error
&& !*vpp
) {
6310 printf("nfs4_open_rpc returned without a node?\n");
6311 /* Hmmm... with no node, we have no filehandle and can't close it */
6315 /* need to cleanup our temporary nofp */
6316 nfs_open_file_clear_busy(newnofp
);
6317 nfs_open_file_destroy(newnofp
);
6321 /* After we have a node, add our open file struct to the node */
6323 nfs_open_file_add_open(newnofp
, accessMode
, denyMode
, 0);
6325 error
= nfs_open_file_find_internal(np
, noop
, &nofp
, 0, 0, 0);
6327 /* This shouldn't happen, because we passed in a new nofp to use. */
6328 printf("nfs_open_file_find_internal failed! %d\n", error
);
6330 } else if (nofp
!= newnofp
) {
6332 * Hmm... an open file struct already exists.
6333 * Mark the existing one busy and merge our open into it.
6334 * Then destroy the one we created.
6335 * Note: there's no chance of an open confict because the
6336 * open has already been granted.
6338 busyerror
= nfs_open_file_set_busy(nofp
, NULL
);
6339 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0);
6340 nofp
->nof_stateid
= newnofp
->nof_stateid
;
6341 if (newnofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
)
6342 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
6343 nfs_open_file_clear_busy(newnofp
);
6344 nfs_open_file_destroy(newnofp
);
6347 /* mark the node as holding a create-initiated open */
6348 nofp
->nof_flags
|= NFS_OPEN_FILE_CREATE
;
6349 nofp
->nof_creator
= current_thread();
6351 if (nofp
&& !busyerror
)
6352 nfs_open_file_clear_busy(nofp
);
6353 if (nfs_mount_state_in_use_end(nmp
, error
)) {
6354 nofp
= newnofp
= NULL
;
6359 nfs_open_owner_rele(noop
);
6364 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6370 struct componentname
*cnp
,
6371 struct vnode_attr
*vap
,
6376 struct nfsmount
*nmp
;
6377 struct nfs_vattr nvattr
;
6378 int error
= 0, create_error
= EIO
, lockerror
= ENOENT
, busyerror
= ENOENT
, status
;
6379 int nfsvers
, namedattrs
, numops
;
6380 u_int64_t xid
, savedxid
= 0;
6381 nfsnode_t np
= NULL
;
6382 vnode_t newvp
= NULL
;
6383 struct nfsm_chain nmreq
, nmrep
;
6384 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
6388 struct nfsreq rq
, *req
= &rq
;
6389 struct nfs_dulookup dul
;
6390 struct nfsreq_secinfo_args si
;
6392 nmp
= NFSTONMP(dnp
);
6393 if (nfs_mount_gone(nmp
))
6395 nfsvers
= nmp
->nm_vers
;
6396 namedattrs
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
);
6397 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
6400 sd
.specdata1
= sd
.specdata2
= 0;
6409 if (!VATTR_IS_ACTIVE(vap
, va_rdev
))
6411 sd
.specdata1
= major(vap
->va_rdev
);
6412 sd
.specdata2
= minor(vap
->va_rdev
);
6425 nfs_avoid_needless_id_setting_on_create(dnp
, vap
, ctx
);
6427 error
= busyerror
= nfs_node_set_busy(dnp
, vfs_context_thread(ctx
));
6429 nfs_dulookup_init(&dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
6431 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
6432 NVATTR_INIT(&nvattr
);
6433 nfsm_chain_null(&nmreq
);
6434 nfsm_chain_null(&nmrep
);
6436 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
6438 nfsm_chain_build_alloc_init(error
, &nmreq
, 66 * NFSX_UNSIGNED
);
6439 nfsm_chain_add_compound_header(error
, &nmreq
, tag
, nmp
->nm_minor_vers
, numops
);
6441 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6442 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
6444 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
6446 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_CREATE
);
6447 nfsm_chain_add_32(error
, &nmreq
, type
);
6448 if (type
== NFLNK
) {
6449 nfsm_chain_add_name(error
, &nmreq
, link
, strlen(link
), nmp
);
6450 } else if ((type
== NFBLK
) || (type
== NFCHR
)) {
6451 nfsm_chain_add_32(error
, &nmreq
, sd
.specdata1
);
6452 nfsm_chain_add_32(error
, &nmreq
, sd
.specdata2
);
6454 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
6455 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
6457 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6458 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
6459 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
6460 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, NULL
);
6462 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
6464 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6465 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
6466 nfsm_chain_build_done(error
, &nmreq
);
6467 nfsm_assert(error
, (numops
== 0), EPROTO
);
6470 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
6471 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, 0, NULL
, &req
);
6474 nfs_dulookup_start(&dul
, dnp
, ctx
);
6475 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
6478 if ((lockerror
= nfs_node_lock(dnp
)))
6480 nfsm_chain_skip_tag(error
, &nmrep
);
6481 nfsm_chain_get_32(error
, &nmrep
, numops
);
6482 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6483 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
6485 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_CREATE
);
6486 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
6487 bmlen
= NFS_ATTR_BITMAP_LEN
;
6488 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
6489 /* At this point if we have no error, the object was created. */
6490 /* if we don't get attributes, then we should lookitup. */
6491 create_error
= error
;
6493 nfs_vattr_set_supported(bitmap
, vap
);
6494 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6496 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
6498 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
6499 printf("nfs: create/%s didn't return filehandle? %s\n", tag
, cnp
->cn_nameptr
);
6503 /* directory attributes: if we don't get them, make sure to invalidate */
6504 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
6505 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6507 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
6509 NATTRINVALIDATE(dnp
);
6512 nfsm_chain_cleanup(&nmreq
);
6513 nfsm_chain_cleanup(&nmrep
);
6516 if (!create_error
&& (dnp
->n_flag
& NNEGNCENTRIES
)) {
6517 dnp
->n_flag
&= ~NNEGNCENTRIES
;
6518 cache_purge_negatives(NFSTOV(dnp
));
6520 dnp
->n_flag
|= NMODIFIED
;
6521 nfs_node_unlock(dnp
);
6522 /* nfs_getattr() will check changed and purge caches */
6523 nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
);
6526 if (!error
&& fh
.fh_len
) {
6527 /* create the vnode with the filehandle and attributes */
6529 error
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, NG_MAKEENTRY
, &np
);
6533 NVATTR_CLEANUP(&nvattr
);
6536 nfs_dulookup_finish(&dul
, dnp
, ctx
);
6539 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
6540 * if we can succeed in looking up the object.
6542 if ((create_error
== EEXIST
) || (!create_error
&& !newvp
)) {
6543 error
= nfs_lookitup(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, &np
);
6546 if (vnode_vtype(newvp
) != nfstov_type(type
, nfsvers
))
6551 nfs_node_clear_busy(dnp
);
6554 nfs_node_unlock(np
);
6558 nfs_node_unlock(np
);
6566 struct vnop_mknod_args
/* {
6567 struct vnodeop_desc *a_desc;
6570 struct componentname *a_cnp;
6571 struct vnode_attr *a_vap;
6572 vfs_context_t a_context;
6575 nfsnode_t np
= NULL
;
6576 struct nfsmount
*nmp
;
6579 nmp
= VTONMP(ap
->a_dvp
);
6580 if (nfs_mount_gone(nmp
))
6583 if (!VATTR_IS_ACTIVE(ap
->a_vap
, va_type
))
6585 switch (ap
->a_vap
->va_type
) {
6595 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
6596 vtonfs_type(ap
->a_vap
->va_type
, nmp
->nm_vers
), NULL
, &np
);
6598 *ap
->a_vpp
= NFSTOV(np
);
6604 struct vnop_mkdir_args
/* {
6605 struct vnodeop_desc *a_desc;
6608 struct componentname *a_cnp;
6609 struct vnode_attr *a_vap;
6610 vfs_context_t a_context;
6613 nfsnode_t np
= NULL
;
6616 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
6619 *ap
->a_vpp
= NFSTOV(np
);
6625 struct vnop_symlink_args
/* {
6626 struct vnodeop_desc *a_desc;
6629 struct componentname *a_cnp;
6630 struct vnode_attr *a_vap;
6632 vfs_context_t a_context;
6635 nfsnode_t np
= NULL
;
6638 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
6639 NFLNK
, ap
->a_target
, &np
);
6641 *ap
->a_vpp
= NFSTOV(np
);
6647 struct vnop_link_args
/* {
6648 struct vnodeop_desc *a_desc;
6651 struct componentname *a_cnp;
6652 vfs_context_t a_context;
6655 vfs_context_t ctx
= ap
->a_context
;
6656 vnode_t vp
= ap
->a_vp
;
6657 vnode_t tdvp
= ap
->a_tdvp
;
6658 struct componentname
*cnp
= ap
->a_cnp
;
6659 int error
= 0, lockerror
= ENOENT
, status
;
6660 struct nfsmount
*nmp
;
6661 nfsnode_t np
= VTONFS(vp
);
6662 nfsnode_t tdnp
= VTONFS(tdvp
);
6663 int nfsvers
, numops
;
6664 u_int64_t xid
, savedxid
;
6665 struct nfsm_chain nmreq
, nmrep
;
6666 struct nfsreq_secinfo_args si
;
6668 if (vnode_mount(vp
) != vnode_mount(tdvp
))
6672 if (nfs_mount_gone(nmp
))
6674 nfsvers
= nmp
->nm_vers
;
6675 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
6677 if (tdnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
6681 * Push all writes to the server, so that the attribute cache
6682 * doesn't get "out of sync" with the server.
6683 * XXX There should be a better way!
6685 nfs_flush(np
, MNT_WAIT
, vfs_context_thread(ctx
), V_IGNORE_WRITEERR
);
6687 if ((error
= nfs_node_set_busy2(tdnp
, np
, vfs_context_thread(ctx
))))
6690 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
6691 nfsm_chain_null(&nmreq
);
6692 nfsm_chain_null(&nmrep
);
6694 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
6696 nfsm_chain_build_alloc_init(error
, &nmreq
, 29 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
6697 nfsm_chain_add_compound_header(error
, &nmreq
, "link", nmp
->nm_minor_vers
, numops
);
6699 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6700 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
6702 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
6704 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6705 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, tdnp
->n_fhp
, tdnp
->n_fhsize
);
6707 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LINK
);
6708 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
6710 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6711 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, tdnp
);
6713 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
6715 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6716 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
6717 nfsm_chain_build_done(error
, &nmreq
);
6718 nfsm_assert(error
, (numops
== 0), EPROTO
);
6720 error
= nfs_request(tdnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
6722 if ((lockerror
= nfs_node_lock2(tdnp
, np
))) {
6726 nfsm_chain_skip_tag(error
, &nmrep
);
6727 nfsm_chain_get_32(error
, &nmrep
, numops
);
6728 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6729 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
6730 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6731 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LINK
);
6732 nfsm_chain_check_change_info(error
, &nmrep
, tdnp
);
6733 /* directory attributes: if we don't get them, make sure to invalidate */
6734 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6736 nfsm_chain_loadattr(error
, &nmrep
, tdnp
, nfsvers
, &xid
);
6738 NATTRINVALIDATE(tdnp
);
6739 /* link attributes: if we don't get them, make sure to invalidate */
6740 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
6741 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6743 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
6745 NATTRINVALIDATE(np
);
6747 nfsm_chain_cleanup(&nmreq
);
6748 nfsm_chain_cleanup(&nmrep
);
6750 tdnp
->n_flag
|= NMODIFIED
;
6751 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
6752 if (error
== EEXIST
)
6754 if (!error
&& (tdnp
->n_flag
& NNEGNCENTRIES
)) {
6755 tdnp
->n_flag
&= ~NNEGNCENTRIES
;
6756 cache_purge_negatives(tdvp
);
6759 nfs_node_unlock2(tdnp
, np
);
6760 nfs_node_clear_busy2(tdnp
, np
);
6766 struct vnop_rmdir_args
/* {
6767 struct vnodeop_desc *a_desc;
6770 struct componentname *a_cnp;
6771 vfs_context_t a_context;
6774 vfs_context_t ctx
= ap
->a_context
;
6775 vnode_t vp
= ap
->a_vp
;
6776 vnode_t dvp
= ap
->a_dvp
;
6777 struct componentname
*cnp
= ap
->a_cnp
;
6778 struct nfsmount
*nmp
;
6779 int error
= 0, namedattrs
;
6780 nfsnode_t np
= VTONFS(vp
);
6781 nfsnode_t dnp
= VTONFS(dvp
);
6782 struct nfs_dulookup dul
;
6784 if (vnode_vtype(vp
) != VDIR
)
6787 nmp
= NFSTONMP(dnp
);
6788 if (nfs_mount_gone(nmp
))
6790 namedattrs
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
);
6792 if ((error
= nfs_node_set_busy2(dnp
, np
, vfs_context_thread(ctx
))))
6796 nfs_dulookup_init(&dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
6797 nfs_dulookup_start(&dul
, dnp
, ctx
);
6800 error
= nfs4_remove_rpc(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
,
6801 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
6803 nfs_name_cache_purge(dnp
, np
, cnp
, ctx
);
6804 /* nfs_getattr() will check changed and purge caches */
6805 nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
);
6807 nfs_dulookup_finish(&dul
, dnp
, ctx
);
6808 nfs_node_clear_busy2(dnp
, np
);
6811 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
6813 if (error
== ENOENT
)
6817 * remove nfsnode from hash now so we can't accidentally find it
6818 * again if another object gets created with the same filehandle
6819 * before this vnode gets reclaimed
6821 lck_mtx_lock(nfs_node_hash_mutex
);
6822 if (np
->n_hflag
& NHHASHED
) {
6823 LIST_REMOVE(np
, n_hash
);
6824 np
->n_hflag
&= ~NHHASHED
;
6825 FSDBG(266, 0, np
, np
->n_flag
, 0xb1eb1e);
6827 lck_mtx_unlock(nfs_node_hash_mutex
);
6833 * NFSv4 Named Attributes
6835 * Both the extended attributes interface and the named streams interface
6836 * are backed by NFSv4 named attributes. The implementations for both use
6837 * a common set of routines in an attempt to reduce code duplication, to
6838 * increase efficiency, to increase caching of both names and data, and to
6839 * confine the complexity.
6841 * Each NFS node caches its named attribute directory's file handle.
6842 * The directory nodes for the named attribute directories are handled
6843 * exactly like regular directories (with a couple minor exceptions).
6844 * Named attribute nodes are also treated as much like regular files as
6847 * Most of the heavy lifting is done by nfs4_named_attr_get().
6851 * Get the given node's attribute directory node.
6852 * If !fetch, then only return a cached node.
6853 * Otherwise, we will attempt to fetch the node from the server.
6854 * (Note: the node should be marked busy.)
6857 nfs4_named_attr_dir_get(nfsnode_t np
, int fetch
, vfs_context_t ctx
)
6859 nfsnode_t adnp
= NULL
;
6860 struct nfsmount
*nmp
;
6861 int error
= 0, status
, numops
;
6862 struct nfsm_chain nmreq
, nmrep
;
6864 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
6866 struct nfs_vattr nvattr
;
6867 struct componentname cn
;
6868 struct nfsreq rq
, *req
= &rq
;
6869 struct nfsreq_secinfo_args si
;
6872 if (nfs_mount_gone(nmp
))
6874 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)
6877 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
6878 NVATTR_INIT(&nvattr
);
6879 nfsm_chain_null(&nmreq
);
6880 nfsm_chain_null(&nmrep
);
6882 bzero(&cn
, sizeof(cn
));
6883 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER
, const, char *); /* "/..namedfork/" */
6884 cn
.cn_namelen
= strlen(_PATH_FORKSPECIFIER
);
6885 cn
.cn_nameiop
= LOOKUP
;
6887 if (np
->n_attrdirfh
) {
6888 // XXX can't set parent correctly (to np) yet
6889 error
= nfs_nget(nmp
->nm_mountp
, NULL
, &cn
, np
->n_attrdirfh
+1, *np
->n_attrdirfh
,
6890 NULL
, NULL
, RPCAUTH_UNKNOWN
, NG_NOCREATE
, &adnp
);
6899 // PUTFH, OPENATTR, GETATTR
6901 nfsm_chain_build_alloc_init(error
, &nmreq
, 22 * NFSX_UNSIGNED
);
6902 nfsm_chain_add_compound_header(error
, &nmreq
, "openattr", nmp
->nm_minor_vers
, numops
);
6904 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6905 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, np
->n_fhp
, np
->n_fhsize
);
6907 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPENATTR
);
6908 nfsm_chain_add_32(error
, &nmreq
, 0);
6910 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6911 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
6912 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
6913 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
6914 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
6915 nfsm_chain_build_done(error
, &nmreq
);
6916 nfsm_assert(error
, (numops
== 0), EPROTO
);
6918 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
6919 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, 0, NULL
, &req
);
6921 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
6923 nfsm_chain_skip_tag(error
, &nmrep
);
6924 nfsm_chain_get_32(error
, &nmrep
, numops
);
6925 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6926 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPENATTR
);
6927 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6929 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
6931 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
) || !fh
.fh_len
) {
6935 if (!np
->n_attrdirfh
|| (*np
->n_attrdirfh
!= fh
.fh_len
)) {
6936 /* (re)allocate attrdir fh buffer */
6937 if (np
->n_attrdirfh
)
6938 FREE(np
->n_attrdirfh
, M_TEMP
);
6939 MALLOC(np
->n_attrdirfh
, u_char
*, fh
.fh_len
+1, M_TEMP
, M_WAITOK
);
6941 if (!np
->n_attrdirfh
) {
6945 /* cache the attrdir fh in the node */
6946 *np
->n_attrdirfh
= fh
.fh_len
;
6947 bcopy(fh
.fh_data
, np
->n_attrdirfh
+1, fh
.fh_len
);
6948 /* create node for attrdir */
6949 // XXX can't set parent correctly (to np) yet
6950 error
= nfs_nget(NFSTOMP(np
), NULL
, &cn
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, 0, &adnp
);
6952 NVATTR_CLEANUP(&nvattr
);
6953 nfsm_chain_cleanup(&nmreq
);
6954 nfsm_chain_cleanup(&nmrep
);
6957 /* sanity check that this node is an attribute directory */
6958 if (adnp
->n_vattr
.nva_type
!= VDIR
)
6960 if (!(adnp
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
))
6962 nfs_node_unlock(adnp
);
6964 vnode_put(NFSTOV(adnp
));
6966 return (error
? NULL
: adnp
);
6970 * Get the given node's named attribute node for the name given.
6972 * In an effort to increase the performance of named attribute access, we try
6973 * to reduce server requests by doing the following:
6975 * - cache the node's named attribute directory file handle in the node
6976 * - maintain a directory vnode for the attribute directory
6977 * - use name cache entries (positive and negative) to speed up lookups
6978 * - optionally open the named attribute (with the given accessMode) in the same RPC
6979 * - combine attribute directory retrieval with the lookup/open RPC
6980 * - optionally prefetch the named attribute's first block of data in the same RPC
6982 * Also, in an attempt to reduce the number of copies/variations of this code,
6983 * parts of the RPC building/processing code are conditionalized on what is
6984 * needed for any particular request (openattr, lookup vs. open, read).
6986 * Note that because we may not have the attribute directory node when we start
6987 * the lookup/open, we lock both the node and the attribute directory node.
6990 #define NFS_GET_NAMED_ATTR_CREATE 0x1
6991 #define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
6992 #define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
6993 #define NFS_GET_NAMED_ATTR_PREFETCH 0x8
6996 nfs4_named_attr_get(
6998 struct componentname
*cnp
,
6999 uint32_t accessMode
,
7003 struct nfs_open_file
**nofpp
)
7005 struct nfsmount
*nmp
;
7006 int error
= 0, open_error
= EIO
;
7007 int inuse
= 0, adlockerror
= ENOENT
, busyerror
= ENOENT
, adbusyerror
= ENOENT
, nofpbusyerror
= ENOENT
;
7008 int create
, guarded
, prefetch
, truncate
, noopbusy
= 0;
7009 int open
, status
, numops
, hadattrdir
, negnamecache
;
7010 struct nfs_vattr nvattr
;
7011 struct vnode_attr vattr
;
7012 nfsnode_t adnp
= NULL
, anp
= NULL
;
7014 u_int64_t xid
, savedxid
= 0;
7015 struct nfsm_chain nmreq
, nmrep
;
7016 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
7017 uint32_t denyMode
, rflags
, delegation
, recall
, eof
, rlen
, retlen
;
7018 nfs_stateid stateid
, dstateid
;
7020 struct nfs_open_owner
*noop
= NULL
;
7021 struct nfs_open_file
*newnofp
= NULL
, *nofp
= NULL
;
7022 struct vnop_access_args naa
;
7027 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
;
7028 struct kauth_ace ace
;
7029 struct nfsreq rq
, *req
= &rq
;
7030 struct nfsreq_secinfo_args si
;
7034 rflags
= delegation
= recall
= eof
= rlen
= retlen
= 0;
7037 slen
= sizeof(sbuf
);
7040 if (nfs_mount_gone(nmp
))
7042 NVATTR_INIT(&nvattr
);
7043 negnamecache
= !NMFLAG(nmp
, NONEGNAMECACHE
);
7044 thd
= vfs_context_thread(ctx
);
7045 cred
= vfs_context_ucred(ctx
);
7046 create
= (flags
& NFS_GET_NAMED_ATTR_CREATE
) ? NFS_OPEN_CREATE
: NFS_OPEN_NOCREATE
;
7047 guarded
= (flags
& NFS_GET_NAMED_ATTR_CREATE_GUARDED
) ? NFS_CREATE_GUARDED
: NFS_CREATE_UNCHECKED
;
7048 truncate
= (flags
& NFS_GET_NAMED_ATTR_TRUNCATE
);
7049 prefetch
= (flags
& NFS_GET_NAMED_ATTR_PREFETCH
);
7052 error
= nfs_getattr(np
, &nvattr
, ctx
, NGA_CACHED
);
7055 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
7056 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
))
7058 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_NONE
) {
7059 /* shouldn't happen... but just be safe */
7060 printf("nfs4_named_attr_get: create with no access %s\n", cnp
->cn_nameptr
);
7061 accessMode
= NFS_OPEN_SHARE_ACCESS_READ
;
7063 open
= (accessMode
!= NFS_OPEN_SHARE_ACCESS_NONE
);
7066 * We're trying to open the file.
7067 * We'll create/open it with the given access mode,
7068 * and set NFS_OPEN_FILE_CREATE.
7070 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
7071 if (prefetch
&& guarded
)
7072 prefetch
= 0; /* no sense prefetching data that can't be there */
7074 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
7079 if ((error
= busyerror
= nfs_node_set_busy(np
, vfs_context_thread(ctx
))))
7082 adnp
= nfs4_named_attr_dir_get(np
, 0, ctx
);
7083 hadattrdir
= (adnp
!= NULL
);
7086 /* use the special state ID because we don't have a real one to send */
7087 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
7088 rlen
= MIN(nmp
->nm_rsize
, nmp
->nm_biosize
);
7090 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
7091 nfsm_chain_null(&nmreq
);
7092 nfsm_chain_null(&nmrep
);
7095 if ((error
= adbusyerror
= nfs_node_set_busy(adnp
, vfs_context_thread(ctx
))))
7097 /* nfs_getattr() will check changed and purge caches */
7098 error
= nfs_getattr(adnp
, NULL
, ctx
, NGA_CACHED
);
7100 error
= cache_lookup(NFSTOV(adnp
), &avp
, cnp
);
7103 /* negative cache entry */
7107 /* try dir buf cache lookup */
7108 error
= nfs_dir_buf_cache_lookup(adnp
, &anp
, cnp
, ctx
, 0);
7109 if (!error
&& anp
) {
7110 /* dir buf cache hit */
7114 if (error
!= -1) /* cache miss */
7118 /* cache hit, not really an error */
7119 OSAddAtomic64(1, &nfsstats
.lookupcache_hits
);
7121 *anpp
= anp
= VTONFS(avp
);
7123 nfs_node_clear_busy(adnp
);
7124 adbusyerror
= ENOENT
;
7126 /* check for directory access */
7127 naa
.a_desc
= &vnop_access_desc
;
7128 naa
.a_vp
= NFSTOV(adnp
);
7129 naa
.a_action
= KAUTH_VNODE_SEARCH
;
7130 naa
.a_context
= ctx
;
7132 /* compute actual success/failure based on accessibility */
7133 error
= nfs_vnop_access(&naa
);
7136 /* we either found it, or hit an error */
7137 if (!error
&& guarded
) {
7138 /* found cached entry but told not to use it */
7140 vnode_put(NFSTOV(anp
));
7143 /* we're done if error or we don't need to open */
7146 /* no error and we need to open... */
7152 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
7154 nfs_open_owner_rele(noop
);
7160 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7161 error
= nfs_open_file_find(anp
, noop
, &newnofp
, 0, 0, 1);
7162 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
7163 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop
->noo_cred
), cnp
->cn_nameptr
);
7166 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
7167 nfs_mount_state_in_use_end(nmp
, 0);
7168 error
= nfs4_reopen(newnofp
, vfs_context_thread(ctx
));
7169 nfs_open_file_destroy(newnofp
);
7175 error
= nfs_open_file_set_busy(newnofp
, vfs_context_thread(ctx
));
7178 nfs_open_file_destroy(newnofp
);
7184 * We already have the node. So we just need to open
7185 * it - which we may be able to do with a delegation.
7187 open_error
= error
= nfs4_open(anp
, newnofp
, accessMode
, denyMode
, ctx
);
7189 /* open succeeded, so our open file is no longer temporary */
7201 * We either don't have the attrdir or we didn't find the attribute
7202 * in the name cache, so we need to talk to the server.
7204 * If we don't have the attrdir, we'll need to ask the server for that too.
7205 * If the caller is requesting that the attribute be created, we need to
7206 * make sure the attrdir is created.
7207 * The caller may also request that the first block of an existing attribute
7208 * be retrieved at the same time.
7212 /* need to mark the open owner busy during the RPC */
7213 if ((error
= nfs_open_owner_set_busy(noop
, thd
)))
7219 * We'd like to get updated post-open/lookup attributes for the
7220 * directory and we may also want to prefetch some data via READ.
7221 * We'd like the READ results to be last so that we can leave the
7222 * data in the mbufs until the end.
7224 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7228 numops
+= 3; // also sending: OPENATTR, GETATTR, OPENATTR
7230 numops
+= 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7231 nfsm_chain_build_alloc_init(error
, &nmreq
, 64 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
7232 nfsm_chain_add_compound_header(error
, &nmreq
, "getnamedattr", nmp
->nm_minor_vers
, numops
);
7235 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7236 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, adnp
->n_fhp
, adnp
->n_fhsize
);
7239 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7240 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, np
->n_fhp
, np
->n_fhsize
);
7242 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPENATTR
);
7243 nfsm_chain_add_32(error
, &nmreq
, create
? 1 : 0);
7245 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7246 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
7247 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
7248 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
7249 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7253 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
7254 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
7255 nfsm_chain_add_32(error
, &nmreq
, accessMode
);
7256 nfsm_chain_add_32(error
, &nmreq
, denyMode
);
7257 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
);
7258 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
7259 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
));
7260 nfsm_chain_add_32(error
, &nmreq
, create
);
7262 nfsm_chain_add_32(error
, &nmreq
, guarded
);
7265 VATTR_SET(&vattr
, va_data_size
, 0);
7266 nfsm_chain_add_fattr4(error
, &nmreq
, &vattr
, nmp
);
7268 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_NULL
);
7269 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
7272 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUP
);
7273 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
7276 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7277 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
7278 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
7279 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
7280 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7283 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
7287 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7288 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, adnp
->n_fhp
, adnp
->n_fhsize
);
7291 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7292 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, np
->n_fhp
, np
->n_fhsize
);
7294 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPENATTR
);
7295 nfsm_chain_add_32(error
, &nmreq
, 0);
7298 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7299 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
7300 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7303 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
7305 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_NVERIFY
);
7307 VATTR_SET(&vattr
, va_data_size
, 0);
7308 nfsm_chain_add_fattr4(error
, &nmreq
, &vattr
, nmp
);
7310 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READ
);
7311 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
7312 nfsm_chain_add_64(error
, &nmreq
, 0);
7313 nfsm_chain_add_32(error
, &nmreq
, rlen
);
7315 nfsm_chain_build_done(error
, &nmreq
);
7316 nfsm_assert(error
, (numops
== 0), EPROTO
);
7318 error
= nfs_request_async(hadattrdir
? adnp
: np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
7319 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, open
? R_NOINTR
: 0, NULL
, &req
);
7321 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
7323 if (hadattrdir
&& ((adlockerror
= nfs_node_lock(adnp
))))
7324 error
= adlockerror
;
7326 nfsm_chain_skip_tag(error
, &nmrep
);
7327 nfsm_chain_get_32(error
, &nmrep
, numops
);
7328 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7330 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPENATTR
);
7331 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7333 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
7335 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
) && fh
.fh_len
) {
7336 if (!np
->n_attrdirfh
|| (*np
->n_attrdirfh
!= fh
.fh_len
)) {
7337 /* (re)allocate attrdir fh buffer */
7338 if (np
->n_attrdirfh
)
7339 FREE(np
->n_attrdirfh
, M_TEMP
);
7340 MALLOC(np
->n_attrdirfh
, u_char
*, fh
.fh_len
+1, M_TEMP
, M_WAITOK
);
7342 if (np
->n_attrdirfh
) {
7343 /* remember the attrdir fh in the node */
7344 *np
->n_attrdirfh
= fh
.fh_len
;
7345 bcopy(fh
.fh_data
, np
->n_attrdirfh
+1, fh
.fh_len
);
7346 /* create busied node for attrdir */
7347 struct componentname cn
;
7348 bzero(&cn
, sizeof(cn
));
7349 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER
, const, char *); /* "/..namedfork/" */
7350 cn
.cn_namelen
= strlen(_PATH_FORKSPECIFIER
);
7351 cn
.cn_nameiop
= LOOKUP
;
7352 // XXX can't set parent correctly (to np) yet
7353 error
= nfs_nget(NFSTOMP(np
), NULL
, &cn
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, 0, &adnp
);
7356 /* set the node busy */
7357 SET(adnp
->n_flag
, NBUSY
);
7360 /* if no adnp, oh well... */
7364 NVATTR_CLEANUP(&nvattr
);
7368 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
7369 nfs_owner_seqid_increment(noop
, NULL
, error
);
7370 nfsm_chain_get_stateid(error
, &nmrep
, &newnofp
->nof_stateid
);
7371 nfsm_chain_check_change_info(error
, &nmrep
, adnp
);
7372 nfsm_chain_get_32(error
, &nmrep
, rflags
);
7373 bmlen
= NFS_ATTR_BITMAP_LEN
;
7374 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
7375 nfsm_chain_get_32(error
, &nmrep
, delegation
);
7377 switch (delegation
) {
7378 case NFS_OPEN_DELEGATE_NONE
:
7380 case NFS_OPEN_DELEGATE_READ
:
7381 case NFS_OPEN_DELEGATE_WRITE
:
7382 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
7383 nfsm_chain_get_32(error
, &nmrep
, recall
);
7384 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) // space (skip) XXX
7385 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
7386 /* if we have any trouble accepting the ACE, just invalidate it */
7387 ace_type
= ace_flags
= ace_mask
= len
= 0;
7388 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
7389 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
7390 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
7391 nfsm_chain_get_32(error
, &nmrep
, len
);
7392 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
7393 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
7394 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
7395 if (!error
&& (len
>= slen
)) {
7396 MALLOC(s
, char*, len
+1, M_TEMP
, M_WAITOK
);
7403 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
7405 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
7408 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
)))
7413 if (s
&& (s
!= sbuf
))
7420 /* At this point if we have no error, the object was created/opened. */
7423 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOOKUP
);
7425 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7427 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
7429 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
) || !fh
.fh_len
) {
7434 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
7435 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7437 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPENATTR
);
7438 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7441 nfsm_chain_loadattr(error
, &nmrep
, adnp
, nmp
->nm_vers
, &xid
);
7445 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
)
7446 newnofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
7447 if (rflags
& NFS_OPEN_RESULT_CONFIRM
) {
7449 nfs_node_unlock(adnp
);
7450 adlockerror
= ENOENT
;
7452 NVATTR_CLEANUP(&nvattr
);
7453 error
= nfs4_open_confirm_rpc(nmp
, adnp
? adnp
: np
, fh
.fh_data
, fh
.fh_len
, noop
, &newnofp
->nof_stateid
, thd
, cred
, &nvattr
, &xid
);
7456 if ((adlockerror
= nfs_node_lock(adnp
)))
7457 error
= adlockerror
;
7462 if (open
&& adnp
&& !adlockerror
) {
7463 if (!open_error
&& (adnp
->n_flag
& NNEGNCENTRIES
)) {
7464 adnp
->n_flag
&= ~NNEGNCENTRIES
;
7465 cache_purge_negatives(NFSTOV(adnp
));
7467 adnp
->n_flag
|= NMODIFIED
;
7468 nfs_node_unlock(adnp
);
7469 adlockerror
= ENOENT
;
7470 nfs_getattr(adnp
, NULL
, ctx
, NGA_CACHED
);
7472 if (adnp
&& !adlockerror
&& (error
== ENOENT
) &&
7473 (cnp
->cn_flags
& MAKEENTRY
) && (cnp
->cn_nameiop
!= CREATE
) && negnamecache
) {
7474 /* add a negative entry in the name cache */
7475 cache_enter(NFSTOV(adnp
), NULL
, cnp
);
7476 adnp
->n_flag
|= NNEGNCENTRIES
;
7478 if (adnp
&& !adlockerror
) {
7479 nfs_node_unlock(adnp
);
7480 adlockerror
= ENOENT
;
7482 if (!error
&& !anp
&& fh
.fh_len
) {
7483 /* create the vnode with the filehandle and attributes */
7485 error
= nfs_nget(NFSTOMP(np
), adnp
, cnp
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, NG_MAKEENTRY
, &anp
);
7488 nfs_node_unlock(anp
);
7490 if (!error
&& open
) {
7491 nfs_open_file_add_open(newnofp
, accessMode
, denyMode
, 0);
7492 /* After we have a node, add our open file struct to the node */
7494 error
= nfs_open_file_find_internal(anp
, noop
, &nofp
, 0, 0, 0);
7496 /* This shouldn't happen, because we passed in a new nofp to use. */
7497 printf("nfs_open_file_find_internal failed! %d\n", error
);
7499 } else if (nofp
!= newnofp
) {
7501 * Hmm... an open file struct already exists.
7502 * Mark the existing one busy and merge our open into it.
7503 * Then destroy the one we created.
7504 * Note: there's no chance of an open confict because the
7505 * open has already been granted.
7507 nofpbusyerror
= nfs_open_file_set_busy(nofp
, NULL
);
7508 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0);
7509 nofp
->nof_stateid
= newnofp
->nof_stateid
;
7510 if (newnofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
)
7511 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
7512 nfs_open_file_clear_busy(newnofp
);
7513 nfs_open_file_destroy(newnofp
);
7519 /* mark the node as holding a create-initiated open */
7520 nofp
->nof_flags
|= NFS_OPEN_FILE_CREATE
;
7521 nofp
->nof_creator
= current_thread();
7527 NVATTR_CLEANUP(&nvattr
);
7528 if (open
&& ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
))) {
7529 if (!error
&& anp
&& !recall
) {
7530 /* stuff the delegation state in the node */
7531 lck_mtx_lock(&anp
->n_openlock
);
7532 anp
->n_openflags
&= ~N_DELEG_MASK
;
7533 anp
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
7534 anp
->n_dstateid
= dstateid
;
7536 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
7537 lck_mtx_lock(&nmp
->nm_lock
);
7538 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
)
7539 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, anp
, n_dlink
);
7540 lck_mtx_unlock(&nmp
->nm_lock
);
7542 lck_mtx_unlock(&anp
->n_openlock
);
7544 /* give the delegation back */
7546 if (NFS_CMPFH(anp
, fh
.fh_data
, fh
.fh_len
)) {
7547 /* update delegation state and return it */
7548 lck_mtx_lock(&anp
->n_openlock
);
7549 anp
->n_openflags
&= ~N_DELEG_MASK
;
7550 anp
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
7551 anp
->n_dstateid
= dstateid
;
7553 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
7554 lck_mtx_lock(&nmp
->nm_lock
);
7555 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
)
7556 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, anp
, n_dlink
);
7557 lck_mtx_unlock(&nmp
->nm_lock
);
7559 lck_mtx_unlock(&anp
->n_openlock
);
7560 /* don't need to send a separate delegreturn for fh */
7563 /* return anp's current delegation */
7564 nfs4_delegation_return(anp
, 0, thd
, cred
);
7566 if (fh
.fh_len
) /* return fh's delegation if it wasn't for anp */
7567 nfs4_delegreturn_rpc(nmp
, fh
.fh_data
, fh
.fh_len
, &dstateid
, 0, thd
, cred
);
7572 /* need to cleanup our temporary nofp */
7573 nfs_open_file_clear_busy(newnofp
);
7574 nfs_open_file_destroy(newnofp
);
7576 } else if (nofp
&& !nofpbusyerror
) {
7577 nfs_open_file_clear_busy(nofp
);
7578 nofpbusyerror
= ENOENT
;
7580 if (inuse
&& nfs_mount_state_in_use_end(nmp
, error
)) {
7582 nofp
= newnofp
= NULL
;
7583 rflags
= delegation
= recall
= eof
= rlen
= retlen
= 0;
7586 slen
= sizeof(sbuf
);
7587 nfsm_chain_cleanup(&nmreq
);
7588 nfsm_chain_cleanup(&nmrep
);
7590 vnode_put(NFSTOV(anp
));
7593 hadattrdir
= (adnp
!= NULL
);
7595 nfs_open_owner_clear_busy(noop
);
7602 nfs_open_owner_clear_busy(noop
);
7605 nfs_open_owner_rele(noop
);
7608 if (!error
&& prefetch
&& nmrep
.nmc_mhead
) {
7609 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
7610 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_NVERIFY
);
7611 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READ
);
7612 nfsm_chain_get_32(error
, &nmrep
, eof
);
7613 nfsm_chain_get_32(error
, &nmrep
, retlen
);
7614 if (!error
&& anp
) {
7616 * There can be one problem with doing the prefetch.
7617 * Because we don't have the node before we start the RPC, we
7618 * can't have the buffer busy while the READ is performed.
7619 * So there is a chance that other I/O occured on the same
7620 * range of data while we were performing this RPC. If that
7621 * happens, then it's possible the data we have in the READ
7622 * response is no longer up to date.
7623 * Once we have the node and the buffer, we need to make sure
7624 * that there's no chance we could be putting stale data in
7626 * So, we check if the range read is dirty or if any I/O may
7627 * have occured on it while we were performing our RPC.
7629 struct nfsbuf
*bp
= NULL
;
7633 retlen
= MIN(retlen
, rlen
);
7635 /* check if node needs size update or invalidation */
7636 if (ISSET(anp
->n_flag
, NUPDATESIZE
))
7637 nfs_data_update_size(anp
, 0);
7638 if (!(error
= nfs_node_lock(anp
))) {
7639 if (anp
->n_flag
& NNEEDINVALIDATE
) {
7640 anp
->n_flag
&= ~NNEEDINVALIDATE
;
7641 nfs_node_unlock(anp
);
7642 error
= nfs_vinvalbuf(NFSTOV(anp
), V_SAVE
|V_IGNORE_WRITEERR
, ctx
, 1);
7643 if (!error
) /* lets play it safe and just drop the data */
7646 nfs_node_unlock(anp
);
7650 /* calculate page mask for the range of data read */
7651 lastpg
= (trunc_page_32(retlen
) - 1) / PAGE_SIZE
;
7652 pagemask
= ((1 << (lastpg
+ 1)) - 1);
7655 error
= nfs_buf_get(anp
, 0, nmp
->nm_biosize
, thd
, NBLK_READ
|NBLK_NOWAIT
, &bp
);
7656 /* don't save the data if dirty or potential I/O conflict */
7657 if (!error
&& bp
&& !bp
->nb_dirtyoff
&& !(bp
->nb_dirty
& pagemask
) &&
7658 timevalcmp(&anp
->n_lastio
, &now
, <)) {
7659 OSAddAtomic64(1, &nfsstats
.read_bios
);
7660 CLR(bp
->nb_flags
, (NB_DONE
|NB_ASYNC
));
7661 SET(bp
->nb_flags
, NB_READ
);
7663 nfsm_chain_get_opaque(error
, &nmrep
, retlen
, bp
->nb_data
);
7665 bp
->nb_error
= error
;
7666 SET(bp
->nb_flags
, NB_ERROR
);
7669 bp
->nb_endio
= rlen
;
7670 if ((retlen
> 0) && (bp
->nb_endio
< (int)retlen
))
7671 bp
->nb_endio
= retlen
;
7672 if (eof
|| (retlen
== 0)) {
7673 /* zero out the remaining data (up to EOF) */
7674 off_t rpcrem
, eofrem
, rem
;
7675 rpcrem
= (rlen
- retlen
);
7676 eofrem
= anp
->n_size
- (NBOFF(bp
) + retlen
);
7677 rem
= (rpcrem
< eofrem
) ? rpcrem
: eofrem
;
7679 bzero(bp
->nb_data
+ retlen
, rem
);
7680 } else if ((retlen
< rlen
) && !ISSET(bp
->nb_flags
, NB_ERROR
)) {
7681 /* ugh... short read ... just invalidate for now... */
7682 SET(bp
->nb_flags
, NB_INVAL
);
7685 nfs_buf_read_finish(bp
);
7686 microuptime(&anp
->n_lastio
);
7689 nfs_buf_release(bp
, 1);
7691 error
= 0; /* ignore any transient error in processing the prefetch */
7693 if (adnp
&& !adbusyerror
) {
7694 nfs_node_clear_busy(adnp
);
7695 adbusyerror
= ENOENT
;
7698 nfs_node_clear_busy(np
);
7702 vnode_put(NFSTOV(adnp
));
7703 if (error
&& *anpp
) {
7704 vnode_put(NFSTOV(*anpp
));
7707 nfsm_chain_cleanup(&nmreq
);
7708 nfsm_chain_cleanup(&nmrep
);
7713 * Remove a named attribute.
7716 nfs4_named_attr_remove(nfsnode_t np
, nfsnode_t anp
, const char *name
, vfs_context_t ctx
)
7718 nfsnode_t adnp
= NULL
;
7719 struct nfsmount
*nmp
;
7720 struct componentname cn
;
7721 struct vnop_remove_args vra
;
7722 int error
, putanp
= 0;
7725 if (nfs_mount_gone(nmp
))
7728 bzero(&cn
, sizeof(cn
));
7729 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(name
, const, char *);
7730 cn
.cn_namelen
= strlen(name
);
7731 cn
.cn_nameiop
= DELETE
;
7735 error
= nfs4_named_attr_get(np
, &cn
, NFS_OPEN_SHARE_ACCESS_NONE
,
7736 0, ctx
, &anp
, NULL
);
7737 if ((!error
&& !anp
) || (error
== ENOATTR
))
7741 vnode_put(NFSTOV(anp
));
7749 if ((error
= nfs_node_set_busy(np
, vfs_context_thread(ctx
))))
7751 adnp
= nfs4_named_attr_dir_get(np
, 1, ctx
);
7752 nfs_node_clear_busy(np
);
7758 vra
.a_desc
= &vnop_remove_desc
;
7759 vra
.a_dvp
= NFSTOV(adnp
);
7760 vra
.a_vp
= NFSTOV(anp
);
7763 vra
.a_context
= ctx
;
7764 error
= nfs_vnop_remove(&vra
);
7767 vnode_put(NFSTOV(adnp
));
7769 vnode_put(NFSTOV(anp
));
7775 struct vnop_getxattr_args
/* {
7776 struct vnodeop_desc *a_desc;
7778 const char * a_name;
7782 vfs_context_t a_context;
7785 vfs_context_t ctx
= ap
->a_context
;
7786 struct nfsmount
*nmp
;
7787 struct nfs_vattr nvattr
;
7788 struct componentname cn
;
7790 int error
= 0, isrsrcfork
;
7792 nmp
= VTONMP(ap
->a_vp
);
7793 if (nfs_mount_gone(nmp
))
7796 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
))
7798 error
= nfs_getattr(VTONFS(ap
->a_vp
), &nvattr
, ctx
, NGA_CACHED
);
7801 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
7802 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
))
7805 bzero(&cn
, sizeof(cn
));
7806 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(ap
->a_name
, const, char *);
7807 cn
.cn_namelen
= strlen(ap
->a_name
);
7808 cn
.cn_nameiop
= LOOKUP
;
7809 cn
.cn_flags
= MAKEENTRY
;
7811 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
7812 isrsrcfork
= (bcmp(ap
->a_name
, XATTR_RESOURCEFORK_NAME
, sizeof(XATTR_RESOURCEFORK_NAME
)) == 0);
7814 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_NONE
,
7815 !isrsrcfork
? NFS_GET_NAMED_ATTR_PREFETCH
: 0, ctx
, &anp
, NULL
);
7816 if ((!error
&& !anp
) || (error
== ENOENT
))
7820 error
= nfs_bioread(anp
, ap
->a_uio
, 0, ctx
);
7822 *ap
->a_size
= anp
->n_size
;
7825 vnode_put(NFSTOV(anp
));
7831 struct vnop_setxattr_args
/* {
7832 struct vnodeop_desc *a_desc;
7834 const char * a_name;
7837 vfs_context_t a_context;
7840 vfs_context_t ctx
= ap
->a_context
;
7841 int options
= ap
->a_options
;
7842 uio_t uio
= ap
->a_uio
;
7843 const char *name
= ap
->a_name
;
7844 struct nfsmount
*nmp
;
7845 struct componentname cn
;
7846 nfsnode_t anp
= NULL
;
7847 int error
= 0, closeerror
= 0, flags
, isrsrcfork
, isfinderinfo
, empty
= 0, i
;
7848 #define FINDERINFOSIZE 32
7849 uint8_t finfo
[FINDERINFOSIZE
];
7851 struct nfs_open_file
*nofp
= NULL
;
7852 char uio_buf
[ UIO_SIZEOF(1) ];
7854 struct vnop_write_args vwa
;
7856 nmp
= VTONMP(ap
->a_vp
);
7857 if (nfs_mount_gone(nmp
))
7860 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
))
7863 if ((options
& XATTR_CREATE
) && (options
& XATTR_REPLACE
))
7866 /* XXX limitation based on need to back up uio on short write */
7867 if (uio_iovcnt(uio
) > 1) {
7868 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
7872 bzero(&cn
, sizeof(cn
));
7873 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(name
, const, char *);
7874 cn
.cn_namelen
= strlen(name
);
7875 cn
.cn_nameiop
= CREATE
;
7876 cn
.cn_flags
= MAKEENTRY
;
7878 isfinderinfo
= (bcmp(name
, XATTR_FINDERINFO_NAME
, sizeof(XATTR_FINDERINFO_NAME
)) == 0);
7879 isrsrcfork
= isfinderinfo
? 0 : (bcmp(name
, XATTR_RESOURCEFORK_NAME
, sizeof(XATTR_RESOURCEFORK_NAME
)) == 0);
7881 uio_setoffset(uio
, 0);
7883 if (uio_resid(uio
) != sizeof(finfo
))
7885 error
= uiomove((char*)&finfo
, sizeof(finfo
), uio
);
7888 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
7890 for (i
=0, finfop
=(uint32_t*)&finfo
; i
< (int)(sizeof(finfo
)/sizeof(uint32_t)); i
++)
7895 if (empty
&& !(options
& (XATTR_CREATE
|XATTR_REPLACE
))) {
7896 error
= nfs4_named_attr_remove(VTONFS(ap
->a_vp
), anp
, name
, ctx
);
7897 if (error
== ENOENT
)
7901 /* first, let's see if we get a create/replace error */
7905 * create/open the xattr
7907 * We need to make sure not to create it if XATTR_REPLACE.
7908 * For all xattrs except the resource fork, we also want to
7909 * truncate the xattr to remove any current data. We'll do
7910 * that by setting the size to 0 on create/open.
7913 if (!(options
& XATTR_REPLACE
))
7914 flags
|= NFS_GET_NAMED_ATTR_CREATE
;
7915 if (options
& XATTR_CREATE
)
7916 flags
|= NFS_GET_NAMED_ATTR_CREATE_GUARDED
;
7918 flags
|= NFS_GET_NAMED_ATTR_TRUNCATE
;
7920 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_BOTH
,
7921 flags
, ctx
, &anp
, &nofp
);
7926 /* grab the open state from the get/create/open */
7927 if (nofp
&& !(error
= nfs_open_file_set_busy(nofp
, NULL
))) {
7928 nofp
->nof_flags
&= ~NFS_OPEN_FILE_CREATE
;
7929 nofp
->nof_creator
= NULL
;
7930 nfs_open_file_clear_busy(nofp
);
7933 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
7934 if (isfinderinfo
&& empty
)
7938 * Write the data out and flush.
7940 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
7942 vwa
.a_desc
= &vnop_write_desc
;
7943 vwa
.a_vp
= NFSTOV(anp
);
7946 vwa
.a_context
= ctx
;
7948 auio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_WRITE
, &uio_buf
, sizeof(uio_buf
));
7949 uio_addiov(auio
, (uintptr_t)&finfo
, sizeof(finfo
));
7951 } else if (uio_resid(uio
) > 0) {
7955 error
= nfs_vnop_write(&vwa
);
7957 error
= nfs_flush(anp
, MNT_WAIT
, vfs_context_thread(ctx
), 0);
7960 /* Close the xattr. */
7962 int busyerror
= nfs_open_file_set_busy(nofp
, NULL
);
7963 closeerror
= nfs_close(anp
, nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
7965 nfs_open_file_clear_busy(nofp
);
7967 if (!error
&& isfinderinfo
&& empty
) { /* Setting an empty FinderInfo really means remove it */
7968 error
= nfs4_named_attr_remove(VTONFS(ap
->a_vp
), anp
, name
, ctx
);
7969 if (error
== ENOENT
)
7976 vnode_put(NFSTOV(anp
));
7977 if (error
== ENOENT
)
7983 nfs4_vnop_removexattr(
7984 struct vnop_removexattr_args
/* {
7985 struct vnodeop_desc *a_desc;
7987 const char * a_name;
7989 vfs_context_t a_context;
7992 struct nfsmount
*nmp
= VTONMP(ap
->a_vp
);
7995 if (nfs_mount_gone(nmp
))
7997 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
))
8000 error
= nfs4_named_attr_remove(VTONFS(ap
->a_vp
), NULL
, ap
->a_name
, ap
->a_context
);
8001 if (error
== ENOENT
)
8007 nfs4_vnop_listxattr(
8008 struct vnop_listxattr_args
/* {
8009 struct vnodeop_desc *a_desc;
8014 vfs_context_t a_context;
8017 vfs_context_t ctx
= ap
->a_context
;
8018 nfsnode_t np
= VTONFS(ap
->a_vp
);
8019 uio_t uio
= ap
->a_uio
;
8020 nfsnode_t adnp
= NULL
;
8021 struct nfsmount
*nmp
;
8023 struct nfs_vattr nvattr
;
8024 uint64_t cookie
, nextcookie
, lbn
= 0;
8025 struct nfsbuf
*bp
= NULL
;
8026 struct nfs_dir_buf_header
*ndbhp
;
8027 struct direntry
*dp
;
8029 nmp
= VTONMP(ap
->a_vp
);
8030 if (nfs_mount_gone(nmp
))
8033 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
))
8036 error
= nfs_getattr(np
, &nvattr
, ctx
, NGA_CACHED
);
8039 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
8040 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
))
8043 if ((error
= nfs_node_set_busy(np
, vfs_context_thread(ctx
))))
8045 adnp
= nfs4_named_attr_dir_get(np
, 1, ctx
);
8046 nfs_node_clear_busy(np
);
8050 if ((error
= nfs_node_lock(adnp
)))
8053 if (adnp
->n_flag
& NNEEDINVALIDATE
) {
8054 adnp
->n_flag
&= ~NNEEDINVALIDATE
;
8056 nfs_node_unlock(adnp
);
8057 error
= nfs_vinvalbuf(NFSTOV(adnp
), 0, ctx
, 1);
8059 error
= nfs_node_lock(adnp
);
8065 * check for need to invalidate when (re)starting at beginning
8067 if (adnp
->n_flag
& NMODIFIED
) {
8069 nfs_node_unlock(adnp
);
8070 if ((error
= nfs_vinvalbuf(NFSTOV(adnp
), 0, ctx
, 1)))
8073 nfs_node_unlock(adnp
);
8075 /* nfs_getattr() will check changed and purge caches */
8076 if ((error
= nfs_getattr(adnp
, &nvattr
, ctx
, NGA_UNCACHED
)))
8079 if (uio
&& (uio_resid(uio
) == 0))
8083 nextcookie
= lbn
= 0;
8085 while (!error
&& !done
) {
8086 OSAddAtomic64(1, &nfsstats
.biocache_readdirs
);
8087 cookie
= nextcookie
;
8089 error
= nfs_buf_get(adnp
, lbn
, NFS_DIRBLKSIZ
, vfs_context_thread(ctx
), NBLK_READ
, &bp
);
8092 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
8093 if (!ISSET(bp
->nb_flags
, NB_CACHE
) || !ISSET(ndbhp
->ndbh_flags
, NDB_FULL
)) {
8094 if (!ISSET(bp
->nb_flags
, NB_CACHE
)) { /* initialize the buffer */
8095 ndbhp
->ndbh_flags
= 0;
8096 ndbhp
->ndbh_count
= 0;
8097 ndbhp
->ndbh_entry_end
= sizeof(*ndbhp
);
8098 ndbhp
->ndbh_ncgen
= adnp
->n_ncgen
;
8100 error
= nfs_buf_readdir(bp
, ctx
);
8101 if (error
== NFSERR_DIRBUFDROPPED
)
8104 nfs_buf_release(bp
, 1);
8105 if (error
&& (error
!= ENXIO
) && (error
!= ETIMEDOUT
) && (error
!= EINTR
) && (error
!= ERESTART
)) {
8106 if (!nfs_node_lock(adnp
)) {
8108 nfs_node_unlock(adnp
);
8110 nfs_vinvalbuf(NFSTOV(adnp
), 0, ctx
, 1);
8111 if (error
== NFSERR_BAD_COOKIE
)
8118 /* go through all the entries copying/counting */
8119 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
8120 for (i
=0; i
< ndbhp
->ndbh_count
; i
++) {
8121 if (!xattr_protected(dp
->d_name
)) {
8123 *ap
->a_size
+= dp
->d_namlen
+ 1;
8124 } else if (uio_resid(uio
) < (dp
->d_namlen
+ 1)) {
8127 error
= uiomove(dp
->d_name
, dp
->d_namlen
+1, uio
);
8128 if (error
&& (error
!= EFAULT
))
8132 nextcookie
= dp
->d_seekoff
;
8133 dp
= NFS_DIRENTRY_NEXT(dp
);
8136 if (i
== ndbhp
->ndbh_count
) {
8137 /* hit end of buffer, move to next buffer */
8139 /* if we also hit EOF, we're done */
8140 if (ISSET(ndbhp
->ndbh_flags
, NDB_EOF
))
8143 if (!error
&& !done
&& (nextcookie
== cookie
)) {
8144 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie
, i
, ndbhp
->ndbh_count
);
8147 nfs_buf_release(bp
, 1);
8151 vnode_put(NFSTOV(adnp
));
8157 nfs4_vnop_getnamedstream(
8158 struct vnop_getnamedstream_args
/* {
8159 struct vnodeop_desc *a_desc;
8163 enum nsoperation a_operation;
8165 vfs_context_t a_context;
8168 vfs_context_t ctx
= ap
->a_context
;
8169 struct nfsmount
*nmp
;
8170 struct nfs_vattr nvattr
;
8171 struct componentname cn
;
8175 nmp
= VTONMP(ap
->a_vp
);
8176 if (nfs_mount_gone(nmp
))
8179 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
))
8181 error
= nfs_getattr(VTONFS(ap
->a_vp
), &nvattr
, ctx
, NGA_CACHED
);
8184 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
8185 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
))
8188 bzero(&cn
, sizeof(cn
));
8189 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(ap
->a_name
, const, char *);
8190 cn
.cn_namelen
= strlen(ap
->a_name
);
8191 cn
.cn_nameiop
= LOOKUP
;
8192 cn
.cn_flags
= MAKEENTRY
;
8194 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_NONE
,
8195 0, ctx
, &anp
, NULL
);
8196 if ((!error
&& !anp
) || (error
== ENOENT
))
8199 *ap
->a_svpp
= NFSTOV(anp
);
8201 vnode_put(NFSTOV(anp
));
8206 nfs4_vnop_makenamedstream(
8207 struct vnop_makenamedstream_args
/* {
8208 struct vnodeop_desc *a_desc;
8213 vfs_context_t a_context;
8216 vfs_context_t ctx
= ap
->a_context
;
8217 struct nfsmount
*nmp
;
8218 struct componentname cn
;
8222 nmp
= VTONMP(ap
->a_vp
);
8223 if (nfs_mount_gone(nmp
))
8226 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
))
8229 bzero(&cn
, sizeof(cn
));
8230 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(ap
->a_name
, const, char *);
8231 cn
.cn_namelen
= strlen(ap
->a_name
);
8232 cn
.cn_nameiop
= CREATE
;
8233 cn
.cn_flags
= MAKEENTRY
;
8235 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_BOTH
,
8236 NFS_GET_NAMED_ATTR_CREATE
, ctx
, &anp
, NULL
);
8237 if ((!error
&& !anp
) || (error
== ENOENT
))
8240 *ap
->a_svpp
= NFSTOV(anp
);
8242 vnode_put(NFSTOV(anp
));
8247 nfs4_vnop_removenamedstream(
8248 struct vnop_removenamedstream_args
/* {
8249 struct vnodeop_desc *a_desc;
8254 vfs_context_t a_context;
8257 struct nfsmount
*nmp
= VTONMP(ap
->a_vp
);
8258 nfsnode_t np
= ap
->a_vp
? VTONFS(ap
->a_vp
) : NULL
;
8259 nfsnode_t anp
= ap
->a_svp
? VTONFS(ap
->a_svp
) : NULL
;
8261 if (nfs_mount_gone(nmp
))
8265 * Given that a_svp is a named stream, checking for
8266 * named attribute support is kinda pointless.
8268 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
))
8271 return (nfs4_named_attr_remove(np
, anp
, ap
->a_name
, ap
->a_context
));