2 * Copyright (c) 2006-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * vnode op calls for NFS version 4
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/systm.h>
35 #include <sys/resourcevar.h>
36 #include <sys/proc_internal.h>
37 #include <sys/kauth.h>
38 #include <sys/mount_internal.h>
39 #include <sys/malloc.h>
40 #include <sys/kpi_mbuf.h>
42 #include <sys/vnode_internal.h>
43 #include <sys/dirent.h>
44 #include <sys/fcntl.h>
45 #include <sys/lockf.h>
46 #include <sys/ubc_internal.h>
48 #include <sys/signalvar.h>
49 #include <sys/uio_internal.h>
50 #include <sys/xattr.h>
51 #include <sys/paths.h>
53 #include <vfs/vfs_support.h>
58 #include <kern/clock.h>
59 #include <libkern/OSAtomic.h>
61 #include <miscfs/fifofs/fifo.h>
62 #include <miscfs/specfs/specdev.h>
64 #include <nfs/rpcv2.h>
65 #include <nfs/nfsproto.h>
67 #include <nfs/nfsnode.h>
68 #include <nfs/nfs_gss.h>
69 #include <nfs/nfsmount.h>
70 #include <nfs/nfs_lock.h>
71 #include <nfs/xdr_subs.h>
72 #include <nfs/nfsm_subs.h>
75 #include <netinet/in.h>
76 #include <netinet/in_var.h>
77 #include <vm/vm_kern.h>
79 #include <kern/task.h>
80 #include <kern/sched_prim.h>
83 nfs4_access_rpc(nfsnode_t np
, u_int32_t
*access
, int rpcflags
, vfs_context_t ctx
)
85 int error
= 0, lockerror
= ENOENT
, status
, numops
, slot
;
87 struct nfsm_chain nmreq
, nmrep
;
89 uint32_t access_result
= 0, supported
= 0, missing
;
90 struct nfsmount
*nmp
= NFSTONMP(np
);
91 int nfsvers
= nmp
->nm_vers
;
93 struct nfsreq_secinfo_args si
;
95 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
99 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
100 nfsm_chain_null(&nmreq
);
101 nfsm_chain_null(&nmrep
);
103 // PUTFH, ACCESS, GETATTR
105 nfsm_chain_build_alloc_init(error
, &nmreq
, 17 * NFSX_UNSIGNED
);
106 nfsm_chain_add_compound_header(error
, &nmreq
, "access", nmp
->nm_minor_vers
, numops
);
108 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
109 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
111 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_ACCESS
);
112 nfsm_chain_add_32(error
, &nmreq
, *access
);
114 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
115 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
116 nfsm_chain_build_done(error
, &nmreq
);
117 nfsm_assert(error
, (numops
== 0), EPROTO
);
119 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
120 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
121 &si
, rpcflags
, &nmrep
, &xid
, &status
);
123 if ((lockerror
= nfs_node_lock(np
))) {
126 nfsm_chain_skip_tag(error
, &nmrep
);
127 nfsm_chain_get_32(error
, &nmrep
, numops
);
128 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
129 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_ACCESS
);
130 nfsm_chain_get_32(error
, &nmrep
, supported
);
131 nfsm_chain_get_32(error
, &nmrep
, access_result
);
133 if ((missing
= (*access
& ~supported
))) {
134 /* missing support for something(s) we wanted */
135 if (missing
& NFS_ACCESS_DELETE
) {
137 * If the server doesn't report DELETE (possible
138 * on UNIX systems), we'll assume that it is OK
139 * and just let any subsequent delete action fail
140 * if it really isn't deletable.
142 access_result
|= NFS_ACCESS_DELETE
;
145 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
146 if (nfs_access_dotzfs
) {
147 vnode_t dvp
= NULLVP
;
148 if (np
->n_flag
& NISDOTZFSCHILD
) { /* may be able to create/delete snapshot dirs */
149 access_result
|= (NFS_ACCESS_MODIFY
| NFS_ACCESS_EXTEND
| NFS_ACCESS_DELETE
);
150 } else if (((dvp
= vnode_getparent(NFSTOV(np
))) != NULLVP
) && (VTONFS(dvp
)->n_flag
& NISDOTZFSCHILD
)) {
151 access_result
|= NFS_ACCESS_DELETE
; /* may be able to delete snapshot dirs */
157 /* Some servers report DELETE support but erroneously give a denied answer. */
158 if (nfs_access_delete
&& (*access
& NFS_ACCESS_DELETE
) && !(access_result
& NFS_ACCESS_DELETE
)) {
159 access_result
|= NFS_ACCESS_DELETE
;
161 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
162 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
165 if (nfs_mount_gone(nmp
)) {
170 if (auth_is_kerberized(np
->n_auth
) || auth_is_kerberized(nmp
->nm_auth
)) {
171 uid
= nfs_cred_getasid2uid(vfs_context_ucred(ctx
));
173 uid
= kauth_cred_getuid(vfs_context_ucred(ctx
));
175 slot
= nfs_node_access_slot(np
, uid
, 1);
176 np
->n_accessuid
[slot
] = uid
;
178 np
->n_accessstamp
[slot
] = now
.tv_sec
;
179 np
->n_access
[slot
] = access_result
;
181 /* pass back the access returned with this request */
182 *access
= np
->n_access
[slot
];
187 nfsm_chain_cleanup(&nmreq
);
188 nfsm_chain_cleanup(&nmrep
);
200 struct nfs_vattr
*nvap
,
203 struct nfsmount
*nmp
= mp
? VFSTONFS(mp
) : NFSTONMP(np
);
204 int error
= 0, status
, nfsvers
, numops
, rpcflags
= 0, acls
;
205 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
206 struct nfsm_chain nmreq
, nmrep
;
207 struct nfsreq_secinfo_args si
;
209 if (nfs_mount_gone(nmp
)) {
212 nfsvers
= nmp
->nm_vers
;
213 acls
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_ACL
);
215 if (np
&& (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)) {
216 nfs4_default_attrs_for_referral_trigger(VTONFS(np
->n_parent
), NULL
, 0, nvap
, NULL
);
220 if (flags
& NGA_MONITOR
) { /* vnode monitor requests should be soft */
221 rpcflags
= R_RECOVER
;
224 if (flags
& NGA_SOFT
) { /* Return ETIMEDOUT if server not responding */
228 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
229 nfsm_chain_null(&nmreq
);
230 nfsm_chain_null(&nmrep
);
234 nfsm_chain_build_alloc_init(error
, &nmreq
, 15 * NFSX_UNSIGNED
);
235 nfsm_chain_add_compound_header(error
, &nmreq
, "getattr", nmp
->nm_minor_vers
, numops
);
237 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
238 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, fhp
, fhsize
);
240 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
241 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
242 if ((flags
& NGA_ACL
) && acls
) {
243 NFS_BITMAP_SET(bitmap
, NFS_FATTR_ACL
);
245 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
246 nfsm_chain_build_done(error
, &nmreq
);
247 nfsm_assert(error
, (numops
== 0), EPROTO
);
249 error
= nfs_request2(np
, mp
, &nmreq
, NFSPROC4_COMPOUND
,
250 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
251 NULL
, rpcflags
, &nmrep
, xidp
, &status
);
253 nfsm_chain_skip_tag(error
, &nmrep
);
254 nfsm_chain_get_32(error
, &nmrep
, numops
);
255 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
256 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
258 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
, NULL
);
260 if ((flags
& NGA_ACL
) && acls
&& !NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_ACL
)) {
261 /* we asked for the ACL but didn't get one... assume there isn't one */
262 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_ACL
);
263 nvap
->nva_acl
= NULL
;
266 nfsm_chain_cleanup(&nmreq
);
267 nfsm_chain_cleanup(&nmrep
);
272 nfs4_readlink_rpc(nfsnode_t np
, char *buf
, uint32_t *buflenp
, vfs_context_t ctx
)
274 struct nfsmount
*nmp
;
275 int error
= 0, lockerror
= ENOENT
, status
, numops
;
278 struct nfsm_chain nmreq
, nmrep
;
279 struct nfsreq_secinfo_args si
;
282 if (nfs_mount_gone(nmp
)) {
285 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
288 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
289 nfsm_chain_null(&nmreq
);
290 nfsm_chain_null(&nmrep
);
292 // PUTFH, GETATTR, READLINK
294 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
295 nfsm_chain_add_compound_header(error
, &nmreq
, "readlink", nmp
->nm_minor_vers
, numops
);
297 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
298 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
300 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
301 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
303 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READLINK
);
304 nfsm_chain_build_done(error
, &nmreq
);
305 nfsm_assert(error
, (numops
== 0), EPROTO
);
307 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
309 if ((lockerror
= nfs_node_lock(np
))) {
312 nfsm_chain_skip_tag(error
, &nmrep
);
313 nfsm_chain_get_32(error
, &nmrep
, numops
);
314 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
315 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
316 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
317 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READLINK
);
318 nfsm_chain_get_32(error
, &nmrep
, len
);
320 if (len
>= *buflenp
) {
321 if (np
->n_size
&& (np
->n_size
< *buflenp
)) {
327 nfsm_chain_get_opaque(error
, &nmrep
, len
, buf
);
335 nfsm_chain_cleanup(&nmreq
);
336 nfsm_chain_cleanup(&nmrep
);
347 struct nfsreq_cbinfo
*cb
,
348 struct nfsreq
**reqp
)
350 struct nfsmount
*nmp
;
351 int error
= 0, nfsvers
, numops
;
353 struct nfsm_chain nmreq
;
354 struct nfsreq_secinfo_args si
;
357 if (nfs_mount_gone(nmp
)) {
360 nfsvers
= nmp
->nm_vers
;
361 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
365 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
366 nfsm_chain_null(&nmreq
);
368 // PUTFH, READ, GETATTR
370 nfsm_chain_build_alloc_init(error
, &nmreq
, 22 * NFSX_UNSIGNED
);
371 nfsm_chain_add_compound_header(error
, &nmreq
, "read", nmp
->nm_minor_vers
, numops
);
373 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
374 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
376 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READ
);
377 nfs_get_stateid(np
, thd
, cred
, &stateid
);
378 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
379 nfsm_chain_add_64(error
, &nmreq
, offset
);
380 nfsm_chain_add_32(error
, &nmreq
, len
);
382 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
383 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
384 nfsm_chain_build_done(error
, &nmreq
);
385 nfsm_assert(error
, (numops
== 0), EPROTO
);
387 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, 0, cb
, reqp
);
389 nfsm_chain_cleanup(&nmreq
);
394 nfs4_read_rpc_async_finish(
401 struct nfsmount
*nmp
;
402 int error
= 0, lockerror
, nfsvers
, numops
, status
, eof
= 0;
405 struct nfsm_chain nmrep
;
408 if (nfs_mount_gone(nmp
)) {
409 nfs_request_async_cancel(req
);
412 nfsvers
= nmp
->nm_vers
;
414 nfsm_chain_null(&nmrep
);
416 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
417 if (error
== EINPROGRESS
) { /* async request restarted */
421 if ((lockerror
= nfs_node_lock(np
))) {
424 nfsm_chain_skip_tag(error
, &nmrep
);
425 nfsm_chain_get_32(error
, &nmrep
, numops
);
426 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
427 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READ
);
428 nfsm_chain_get_32(error
, &nmrep
, eof
);
429 nfsm_chain_get_32(error
, &nmrep
, retlen
);
431 *lenp
= MIN(retlen
, *lenp
);
432 error
= nfsm_chain_get_uio(&nmrep
, *lenp
, uio
);
434 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
435 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
440 if (!eof
&& !retlen
) {
445 nfsm_chain_cleanup(&nmrep
);
446 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) {
447 microuptime(&np
->n_lastio
);
453 nfs4_write_rpc_async(
460 struct nfsreq_cbinfo
*cb
,
461 struct nfsreq
**reqp
)
463 struct nfsmount
*nmp
;
465 int error
= 0, nfsvers
, numops
;
467 struct nfsm_chain nmreq
;
468 struct nfsreq_secinfo_args si
;
471 if (nfs_mount_gone(nmp
)) {
474 nfsvers
= nmp
->nm_vers
;
475 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
479 /* for async mounts, don't bother sending sync write requests */
480 if ((iomode
!= NFS_WRITE_UNSTABLE
) && nfs_allow_async
&&
481 ((mp
= NFSTOMP(np
))) && (vfs_flags(mp
) & MNT_ASYNC
)) {
482 iomode
= NFS_WRITE_UNSTABLE
;
485 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
486 nfsm_chain_null(&nmreq
);
488 // PUTFH, WRITE, GETATTR
490 nfsm_chain_build_alloc_init(error
, &nmreq
, 25 * NFSX_UNSIGNED
+ len
);
491 nfsm_chain_add_compound_header(error
, &nmreq
, "write", nmp
->nm_minor_vers
, numops
);
493 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
494 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
496 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_WRITE
);
497 nfs_get_stateid(np
, thd
, cred
, &stateid
);
498 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
499 nfsm_chain_add_64(error
, &nmreq
, uio_offset(uio
));
500 nfsm_chain_add_32(error
, &nmreq
, iomode
);
501 nfsm_chain_add_32(error
, &nmreq
, len
);
503 error
= nfsm_chain_add_uio(&nmreq
, uio
, len
);
506 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
507 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
508 nfsm_chain_build_done(error
, &nmreq
);
509 nfsm_assert(error
, (numops
== 0), EPROTO
);
512 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, 0, cb
, reqp
);
514 nfsm_chain_cleanup(&nmreq
);
519 nfs4_write_rpc_async_finish(
526 struct nfsmount
*nmp
;
527 int error
= 0, lockerror
= ENOENT
, nfsvers
, numops
, status
;
528 int committed
= NFS_WRITE_FILESYNC
;
530 u_int64_t xid
, wverf
;
532 struct nfsm_chain nmrep
;
535 if (nfs_mount_gone(nmp
)) {
536 nfs_request_async_cancel(req
);
539 nfsvers
= nmp
->nm_vers
;
541 nfsm_chain_null(&nmrep
);
543 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
544 if (error
== EINPROGRESS
) { /* async request restarted */
548 if (nfs_mount_gone(nmp
)) {
551 if (!error
&& (lockerror
= nfs_node_lock(np
))) {
554 nfsm_chain_skip_tag(error
, &nmrep
);
555 nfsm_chain_get_32(error
, &nmrep
, numops
);
556 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
557 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_WRITE
);
558 nfsm_chain_get_32(error
, &nmrep
, rlen
);
564 nfsm_chain_get_32(error
, &nmrep
, committed
);
565 nfsm_chain_get_64(error
, &nmrep
, wverf
);
570 lck_mtx_lock(&nmp
->nm_lock
);
571 if (!(nmp
->nm_state
& NFSSTA_HASWRITEVERF
)) {
572 nmp
->nm_verf
= wverf
;
573 nmp
->nm_state
|= NFSSTA_HASWRITEVERF
;
574 } else if (nmp
->nm_verf
!= wverf
) {
575 nmp
->nm_verf
= wverf
;
577 lck_mtx_unlock(&nmp
->nm_lock
);
578 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
579 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
584 nfsm_chain_cleanup(&nmrep
);
585 if ((committed
!= NFS_WRITE_FILESYNC
) && nfs_allow_async
&&
586 ((mp
= NFSTOMP(np
))) && (vfs_flags(mp
) & MNT_ASYNC
)) {
587 committed
= NFS_WRITE_FILESYNC
;
589 *iomodep
= committed
;
590 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) {
591 microuptime(&np
->n_lastio
);
604 int error
= 0, lockerror
= ENOENT
, remove_error
= 0, status
;
605 struct nfsmount
*nmp
;
608 struct nfsm_chain nmreq
, nmrep
;
609 struct nfsreq_secinfo_args si
;
612 if (nfs_mount_gone(nmp
)) {
615 nfsvers
= nmp
->nm_vers
;
616 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
619 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
621 nfsm_chain_null(&nmreq
);
622 nfsm_chain_null(&nmrep
);
624 // PUTFH, REMOVE, GETATTR
626 nfsm_chain_build_alloc_init(error
, &nmreq
, 17 * NFSX_UNSIGNED
+ namelen
);
627 nfsm_chain_add_compound_header(error
, &nmreq
, "remove", nmp
->nm_minor_vers
, numops
);
629 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
630 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
632 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_REMOVE
);
633 nfsm_chain_add_name(error
, &nmreq
, name
, namelen
, nmp
);
635 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
636 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
637 nfsm_chain_build_done(error
, &nmreq
);
638 nfsm_assert(error
, (numops
== 0), EPROTO
);
641 error
= nfs_request2(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, 0, &nmrep
, &xid
, &status
);
643 if ((lockerror
= nfs_node_lock(dnp
))) {
646 nfsm_chain_skip_tag(error
, &nmrep
);
647 nfsm_chain_get_32(error
, &nmrep
, numops
);
648 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
649 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_REMOVE
);
650 remove_error
= error
;
651 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
652 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
653 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
654 if (error
&& !lockerror
) {
655 NATTRINVALIDATE(dnp
);
658 nfsm_chain_cleanup(&nmreq
);
659 nfsm_chain_cleanup(&nmrep
);
662 dnp
->n_flag
|= NMODIFIED
;
663 nfs_node_unlock(dnp
);
665 if (error
== NFSERR_GRACE
) {
666 tsleep(&nmp
->nm_state
, (PZERO
- 1), "nfsgrace", 2 * hz
);
683 int error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
;
684 struct nfsmount
*nmp
;
685 u_int64_t xid
, savedxid
;
686 struct nfsm_chain nmreq
, nmrep
;
687 struct nfsreq_secinfo_args si
;
689 nmp
= NFSTONMP(fdnp
);
690 if (nfs_mount_gone(nmp
)) {
693 nfsvers
= nmp
->nm_vers
;
694 if (fdnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
697 if (tdnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
701 NFSREQ_SECINFO_SET(&si
, fdnp
, NULL
, 0, NULL
, 0);
702 nfsm_chain_null(&nmreq
);
703 nfsm_chain_null(&nmrep
);
705 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
707 nfsm_chain_build_alloc_init(error
, &nmreq
, 30 * NFSX_UNSIGNED
+ fnamelen
+ tnamelen
);
708 nfsm_chain_add_compound_header(error
, &nmreq
, "rename", nmp
->nm_minor_vers
, numops
);
710 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
711 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, fdnp
->n_fhp
, fdnp
->n_fhsize
);
713 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
715 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
716 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, tdnp
->n_fhp
, tdnp
->n_fhsize
);
718 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RENAME
);
719 nfsm_chain_add_name(error
, &nmreq
, fnameptr
, fnamelen
, nmp
);
720 nfsm_chain_add_name(error
, &nmreq
, tnameptr
, tnamelen
, nmp
);
722 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
723 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, tdnp
);
725 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
727 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
728 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, fdnp
);
729 nfsm_chain_build_done(error
, &nmreq
);
730 nfsm_assert(error
, (numops
== 0), EPROTO
);
733 error
= nfs_request(fdnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
735 if ((lockerror
= nfs_node_lock2(fdnp
, tdnp
))) {
738 nfsm_chain_skip_tag(error
, &nmrep
);
739 nfsm_chain_get_32(error
, &nmrep
, numops
);
740 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
741 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
742 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
743 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RENAME
);
744 nfsm_chain_check_change_info(error
, &nmrep
, fdnp
);
745 nfsm_chain_check_change_info(error
, &nmrep
, tdnp
);
746 /* directory attributes: if we don't get them, make sure to invalidate */
747 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
749 nfsm_chain_loadattr(error
, &nmrep
, tdnp
, nfsvers
, &xid
);
750 if (error
&& !lockerror
) {
751 NATTRINVALIDATE(tdnp
);
753 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
754 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
756 nfsm_chain_loadattr(error
, &nmrep
, fdnp
, nfsvers
, &xid
);
757 if (error
&& !lockerror
) {
758 NATTRINVALIDATE(fdnp
);
761 nfsm_chain_cleanup(&nmreq
);
762 nfsm_chain_cleanup(&nmrep
);
764 fdnp
->n_flag
|= NMODIFIED
;
765 tdnp
->n_flag
|= NMODIFIED
;
766 nfs_node_unlock2(fdnp
, tdnp
);
772 * NFS V4 readdir RPC.
775 nfs4_readdir_rpc(nfsnode_t dnp
, struct nfsbuf
*bp
, vfs_context_t ctx
)
777 struct nfsmount
*nmp
;
778 int error
= 0, lockerror
, nfsvers
, namedattr
, rdirplus
, bigcookies
, numops
;
779 int i
, status
, more_entries
= 1, eof
, bp_dropped
= 0;
780 uint32_t nmreaddirsize
, nmrsize
;
781 uint32_t namlen
, skiplen
, fhlen
, xlen
, attrlen
, reclen
, space_free
, space_needed
;
782 uint64_t cookie
, lastcookie
, xid
, savedxid
;
783 struct nfsm_chain nmreq
, nmrep
, nmrepsave
;
785 struct nfs_vattr nvattr
, *nvattrp
;
786 struct nfs_dir_buf_header
*ndbhp
;
788 char *padstart
, padlen
;
790 uint32_t entry_attrs
[NFS_ATTR_BITMAP_LEN
];
792 struct nfsreq_secinfo_args si
;
795 if (nfs_mount_gone(nmp
)) {
798 nfsvers
= nmp
->nm_vers
;
799 nmreaddirsize
= nmp
->nm_readdirsize
;
800 nmrsize
= nmp
->nm_rsize
;
801 bigcookies
= nmp
->nm_state
& NFSSTA_BIGCOOKIES
;
802 namedattr
= (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) ? 1 : 0;
803 rdirplus
= (NMFLAG(nmp
, RDIRPLUS
) || namedattr
) ? 1 : 0;
804 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
807 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
810 * Set up attribute request for entries.
811 * For READDIRPLUS functionality, get everything.
812 * Otherwise, just get what we need for struct direntry.
816 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, entry_attrs
);
817 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_FILEHANDLE
);
820 NFS_CLEAR_ATTRIBUTES(entry_attrs
);
821 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_TYPE
);
822 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_FILEID
);
823 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_MOUNTED_ON_FILEID
);
825 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_RDATTR_ERROR
);
827 /* lock to protect access to cookie verifier */
828 if ((lockerror
= nfs_node_lock(dnp
))) {
832 /* determine cookie to use, and move dp to the right offset */
833 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
834 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
835 if (ndbhp
->ndbh_count
) {
836 for (i
= 0; i
< ndbhp
->ndbh_count
- 1; i
++) {
837 dp
= NFS_DIRENTRY_NEXT(dp
);
839 cookie
= dp
->d_seekoff
;
840 dp
= NFS_DIRENTRY_NEXT(dp
);
842 cookie
= bp
->nb_lblkno
;
843 /* increment with every buffer read */
844 OSAddAtomic64(1, &nfsstats
.readdir_bios
);
849 * The NFS client is responsible for the "." and ".." entries in the
850 * directory. So, we put them at the start of the first buffer.
851 * Don't bother for attribute directories.
853 if (((bp
->nb_lblkno
== 0) && (ndbhp
->ndbh_count
== 0)) &&
854 !(dnp
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
856 fhlen
= rdirplus
? fh
.fh_len
+ 1 : 0;
857 xlen
= rdirplus
? (fhlen
+ sizeof(time_t)) : 0;
860 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
862 bzero(&dp
->d_name
[namlen
+ 1], xlen
);
864 dp
->d_namlen
= namlen
;
865 strlcpy(dp
->d_name
, ".", namlen
+ 1);
866 dp
->d_fileno
= dnp
->n_vattr
.nva_fileid
;
868 dp
->d_reclen
= reclen
;
870 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
871 dp
= NFS_DIRENTRY_NEXT(dp
);
872 padlen
= (char*)dp
- padstart
;
874 bzero(padstart
, padlen
);
876 if (rdirplus
) { /* zero out attributes */
877 bzero(NFS_DIR_BUF_NVATTR(bp
, 0), sizeof(struct nfs_vattr
));
882 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
884 bzero(&dp
->d_name
[namlen
+ 1], xlen
);
886 dp
->d_namlen
= namlen
;
887 strlcpy(dp
->d_name
, "..", namlen
+ 1);
889 dp
->d_fileno
= VTONFS(dnp
->n_parent
)->n_vattr
.nva_fileid
;
891 dp
->d_fileno
= dnp
->n_vattr
.nva_fileid
;
894 dp
->d_reclen
= reclen
;
896 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
897 dp
= NFS_DIRENTRY_NEXT(dp
);
898 padlen
= (char*)dp
- padstart
;
900 bzero(padstart
, padlen
);
902 if (rdirplus
) { /* zero out attributes */
903 bzero(NFS_DIR_BUF_NVATTR(bp
, 1), sizeof(struct nfs_vattr
));
906 ndbhp
->ndbh_entry_end
= (char*)dp
- bp
->nb_data
;
907 ndbhp
->ndbh_count
= 2;
911 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
912 * the buffer is full (or we hit EOF). Then put the remainder of the
913 * results in the next buffer(s).
915 nfsm_chain_null(&nmreq
);
916 nfsm_chain_null(&nmrep
);
917 while (nfs_dir_buf_freespace(bp
, rdirplus
) && !(ndbhp
->ndbh_flags
& NDB_FULL
)) {
918 // PUTFH, GETATTR, READDIR
920 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
921 nfsm_chain_add_compound_header(error
, &nmreq
, tag
, nmp
->nm_minor_vers
, numops
);
923 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
924 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
926 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
927 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
929 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READDIR
);
930 nfsm_chain_add_64(error
, &nmreq
, (cookie
<= 2) ? 0 : cookie
);
931 nfsm_chain_add_64(error
, &nmreq
, dnp
->n_cookieverf
);
932 nfsm_chain_add_32(error
, &nmreq
, nmreaddirsize
);
933 nfsm_chain_add_32(error
, &nmreq
, nmrsize
);
934 nfsm_chain_add_bitmap_supported(error
, &nmreq
, entry_attrs
, nmp
, dnp
);
935 nfsm_chain_build_done(error
, &nmreq
);
936 nfsm_assert(error
, (numops
== 0), EPROTO
);
937 nfs_node_unlock(dnp
);
939 error
= nfs_request(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
941 if ((lockerror
= nfs_node_lock(dnp
))) {
946 nfsm_chain_skip_tag(error
, &nmrep
);
947 nfsm_chain_get_32(error
, &nmrep
, numops
);
948 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
949 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
950 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
951 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READDIR
);
952 nfsm_chain_get_64(error
, &nmrep
, dnp
->n_cookieverf
);
953 nfsm_chain_get_32(error
, &nmrep
, more_entries
);
956 nfs_node_unlock(dnp
);
965 /* loop through the entries packing them into the buffer */
966 while (more_entries
) {
967 /* Entry: COOKIE, NAME, FATTR */
968 nfsm_chain_get_64(error
, &nmrep
, cookie
);
969 nfsm_chain_get_32(error
, &nmrep
, namlen
);
971 if (!bigcookies
&& (cookie
>> 32) && (nmp
== NFSTONMP(dnp
))) {
972 /* we've got a big cookie, make sure flag is set */
973 lck_mtx_lock(&nmp
->nm_lock
);
974 nmp
->nm_state
|= NFSSTA_BIGCOOKIES
;
975 lck_mtx_unlock(&nmp
->nm_lock
);
978 /* just truncate names that don't fit in direntry.d_name */
983 if (namlen
> (sizeof(dp
->d_name
) - 1)) {
984 skiplen
= namlen
- sizeof(dp
->d_name
) + 1;
985 namlen
= sizeof(dp
->d_name
) - 1;
989 /* guess that fh size will be same as parent */
990 fhlen
= rdirplus
? (1 + dnp
->n_fhsize
) : 0;
991 xlen
= rdirplus
? (fhlen
+ sizeof(time_t)) : 0;
992 attrlen
= rdirplus
? sizeof(struct nfs_vattr
) : 0;
993 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
994 space_needed
= reclen
+ attrlen
;
995 space_free
= nfs_dir_buf_freespace(bp
, rdirplus
);
996 if (space_needed
> space_free
) {
998 * We still have entries to pack, but we've
999 * run out of room in the current buffer.
1000 * So we need to move to the next buffer.
1001 * The block# for the next buffer is the
1002 * last cookie in the current buffer.
1005 ndbhp
->ndbh_flags
|= NDB_FULL
;
1006 nfs_buf_release(bp
, 0);
1009 error
= nfs_buf_get(dnp
, lastcookie
, NFS_DIRBLKSIZ
, vfs_context_thread(ctx
), NBLK_READ
, &bp
);
1011 /* initialize buffer */
1012 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
1013 ndbhp
->ndbh_flags
= 0;
1014 ndbhp
->ndbh_count
= 0;
1015 ndbhp
->ndbh_entry_end
= sizeof(*ndbhp
);
1016 ndbhp
->ndbh_ncgen
= dnp
->n_ncgen
;
1017 space_free
= nfs_dir_buf_freespace(bp
, rdirplus
);
1018 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
1019 /* increment with every buffer read */
1020 OSAddAtomic64(1, &nfsstats
.readdir_bios
);
1023 dp
->d_fileno
= cookie
; /* placeholder */
1024 dp
->d_seekoff
= cookie
;
1025 dp
->d_namlen
= namlen
;
1026 dp
->d_reclen
= reclen
;
1027 dp
->d_type
= DT_UNKNOWN
;
1028 nfsm_chain_get_opaque(error
, &nmrep
, namlen
, dp
->d_name
);
1030 dp
->d_name
[namlen
] = '\0';
1032 nfsm_chain_adv(error
, &nmrep
,
1033 nfsm_rndup(namlen
+ skiplen
) - nfsm_rndup(namlen
));
1036 nvattrp
= rdirplus
? NFS_DIR_BUF_NVATTR(bp
, ndbhp
->ndbh_count
) : &nvattr
;
1037 error
= nfs4_parsefattr(&nmrep
, NULL
, nvattrp
, &fh
, NULL
, NULL
);
1038 if (!error
&& NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_ACL
)) {
1039 /* we do NOT want ACLs returned to us here */
1040 NFS_BITMAP_CLR(nvattrp
->nva_bitmap
, NFS_FATTR_ACL
);
1041 if (nvattrp
->nva_acl
) {
1042 kauth_acl_free(nvattrp
->nva_acl
);
1043 nvattrp
->nva_acl
= NULL
;
1046 if (error
&& NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_RDATTR_ERROR
)) {
1047 /* OK, we may not have gotten all of the attributes but we will use what we can. */
1048 if ((error
== NFSERR_MOVED
) || (error
== NFSERR_INVAL
)) {
1049 /* set this up to look like a referral trigger */
1050 nfs4_default_attrs_for_referral_trigger(dnp
, dp
->d_name
, namlen
, nvattrp
, &fh
);
1054 /* check for more entries after this one */
1055 nfsm_chain_get_32(error
, &nmrep
, more_entries
);
1058 /* Skip any "." and ".." entries returned from server. */
1059 /* Also skip any bothersome named attribute entries. */
1060 if (((dp
->d_name
[0] == '.') && ((namlen
== 1) || ((namlen
== 2) && (dp
->d_name
[1] == '.')))) ||
1061 (namedattr
&& (namlen
== 11) && (!strcmp(dp
->d_name
, "SUNWattr_ro") || !strcmp(dp
->d_name
, "SUNWattr_rw")))) {
1062 lastcookie
= cookie
;
1066 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_TYPE
)) {
1067 dp
->d_type
= IFTODT(VTTOIF(nvattrp
->nva_type
));
1069 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_FILEID
)) {
1070 dp
->d_fileno
= nvattrp
->nva_fileid
;
1073 /* fileid is already in d_fileno, so stash xid in attrs */
1074 nvattrp
->nva_fileid
= savedxid
;
1075 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
1076 fhlen
= fh
.fh_len
+ 1;
1077 xlen
= fhlen
+ sizeof(time_t);
1078 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
1079 space_needed
= reclen
+ attrlen
;
1080 if (space_needed
> space_free
) {
1081 /* didn't actually have the room... move on to next buffer */
1085 /* pack the file handle into the record */
1086 dp
->d_name
[dp
->d_namlen
+ 1] = fh
.fh_len
;
1087 bcopy(fh
.fh_data
, &dp
->d_name
[dp
->d_namlen
+ 2], fh
.fh_len
);
1089 /* mark the file handle invalid */
1091 fhlen
= fh
.fh_len
+ 1;
1092 xlen
= fhlen
+ sizeof(time_t);
1093 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
1094 bzero(&dp
->d_name
[dp
->d_namlen
+ 1], fhlen
);
1096 *(time_t*)(&dp
->d_name
[dp
->d_namlen
+ 1 + fhlen
]) = now
.tv_sec
;
1097 dp
->d_reclen
= reclen
;
1099 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
1100 ndbhp
->ndbh_count
++;
1101 lastcookie
= cookie
;
1103 /* advance to next direntry in buffer */
1104 dp
= NFS_DIRENTRY_NEXT(dp
);
1105 ndbhp
->ndbh_entry_end
= (char*)dp
- bp
->nb_data
;
1106 /* zero out the pad bytes */
1107 padlen
= (char*)dp
- padstart
;
1109 bzero(padstart
, padlen
);
1112 /* Finally, get the eof boolean */
1113 nfsm_chain_get_32(error
, &nmrep
, eof
);
1116 ndbhp
->ndbh_flags
|= (NDB_FULL
| NDB_EOF
);
1117 nfs_node_lock_force(dnp
);
1118 dnp
->n_eofcookie
= lastcookie
;
1119 nfs_node_unlock(dnp
);
1124 nfs_buf_release(bp
, 0);
1128 if ((lockerror
= nfs_node_lock(dnp
))) {
1132 nfsm_chain_cleanup(&nmrep
);
1133 nfsm_chain_null(&nmreq
);
1136 if (bp_dropped
&& bp
) {
1137 nfs_buf_release(bp
, 0);
1140 nfs_node_unlock(dnp
);
1142 nfsm_chain_cleanup(&nmreq
);
1143 nfsm_chain_cleanup(&nmrep
);
1144 return bp_dropped
? NFSERR_DIRBUFDROPPED
: error
;
1148 nfs4_lookup_rpc_async(
1153 struct nfsreq
**reqp
)
1155 int error
= 0, isdotdot
= 0, nfsvers
, numops
;
1156 struct nfsm_chain nmreq
;
1157 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
1158 struct nfsmount
*nmp
;
1159 struct nfsreq_secinfo_args si
;
1161 nmp
= NFSTONMP(dnp
);
1162 if (nfs_mount_gone(nmp
)) {
1165 nfsvers
= nmp
->nm_vers
;
1166 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
1170 if ((name
[0] == '.') && (name
[1] == '.') && (namelen
== 2)) {
1172 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
1174 NFSREQ_SECINFO_SET(&si
, dnp
, dnp
->n_fhp
, dnp
->n_fhsize
, name
, namelen
);
1177 nfsm_chain_null(&nmreq
);
1179 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1181 nfsm_chain_build_alloc_init(error
, &nmreq
, 20 * NFSX_UNSIGNED
+ namelen
);
1182 nfsm_chain_add_compound_header(error
, &nmreq
, "lookup", nmp
->nm_minor_vers
, numops
);
1184 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1185 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
1187 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1188 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
1191 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUPP
);
1193 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUP
);
1194 nfsm_chain_add_name(error
, &nmreq
, name
, namelen
, nmp
);
1197 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETFH
);
1199 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1200 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
1201 /* some ".zfs" directories can't handle being asked for some attributes */
1202 if ((dnp
->n_flag
& NISDOTZFS
) && !isdotdot
) {
1203 NFS_BITMAP_CLR(bitmap
, NFS_FATTR_NAMED_ATTR
);
1205 if ((dnp
->n_flag
& NISDOTZFSCHILD
) && isdotdot
) {
1206 NFS_BITMAP_CLR(bitmap
, NFS_FATTR_NAMED_ATTR
);
1208 if (((namelen
== 4) && (name
[0] == '.') && (name
[1] == 'z') && (name
[2] == 'f') && (name
[3] == 's'))) {
1209 NFS_BITMAP_CLR(bitmap
, NFS_FATTR_NAMED_ATTR
);
1211 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, NULL
);
1212 nfsm_chain_build_done(error
, &nmreq
);
1213 nfsm_assert(error
, (numops
== 0), EPROTO
);
1215 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
1216 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, 0, NULL
, reqp
);
1218 nfsm_chain_cleanup(&nmreq
);
1224 nfs4_lookup_rpc_async_finish(
1232 struct nfs_vattr
*nvap
)
1234 int error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
, isdotdot
= 0;
1235 uint32_t op
= NFS_OP_LOOKUP
;
1237 struct nfsmount
*nmp
;
1238 struct nfsm_chain nmrep
;
1240 nmp
= NFSTONMP(dnp
);
1244 nfsvers
= nmp
->nm_vers
;
1245 if ((name
[0] == '.') && (name
[1] == '.') && (namelen
== 2)) {
1249 nfsm_chain_null(&nmrep
);
1251 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
1253 if ((lockerror
= nfs_node_lock(dnp
))) {
1256 nfsm_chain_skip_tag(error
, &nmrep
);
1257 nfsm_chain_get_32(error
, &nmrep
, numops
);
1258 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1259 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1263 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
1265 nfsm_chain_op_check(error
, &nmrep
, (isdotdot
? NFS_OP_LOOKUPP
: NFS_OP_LOOKUP
));
1266 nfsmout_if(error
|| !fhp
|| !nvap
);
1267 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETFH
);
1268 nfsm_chain_get_32(error
, &nmrep
, fhp
->fh_len
);
1269 if (error
== 0 && fhp
->fh_len
> sizeof(fhp
->fh_data
)) {
1273 nfsm_chain_get_opaque(error
, &nmrep
, fhp
->fh_len
, fhp
->fh_data
);
1274 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1275 if ((error
== NFSERR_MOVED
) || (error
== NFSERR_INVAL
)) {
1276 /* set this up to look like a referral trigger */
1277 nfs4_default_attrs_for_referral_trigger(dnp
, name
, namelen
, nvap
, fhp
);
1281 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
, NULL
);
1285 nfs_node_unlock(dnp
);
1287 nfsm_chain_cleanup(&nmrep
);
1288 if (!error
&& (op
== NFS_OP_LOOKUP
) && (nmp
->nm_state
& NFSSTA_NEEDSECINFO
)) {
1289 /* We still need to get SECINFO to set default for mount. */
1290 /* Do so for the first LOOKUP that returns successfully. */
1293 sec
.count
= NX_MAX_SEC_FLAVORS
;
1294 error
= nfs4_secinfo_rpc(nmp
, &req
->r_secinfo
, vfs_context_ucred(ctx
), sec
.flavors
, &sec
.count
);
1295 /* [sigh] some implementations return "illegal" error for unsupported ops */
1296 if (error
== NFSERR_OP_ILLEGAL
) {
1300 /* set our default security flavor to the first in the list */
1301 lck_mtx_lock(&nmp
->nm_lock
);
1303 nmp
->nm_auth
= sec
.flavors
[0];
1305 nmp
->nm_state
&= ~NFSSTA_NEEDSECINFO
;
1306 lck_mtx_unlock(&nmp
->nm_lock
);
1320 struct nfsmount
*nmp
;
1321 int error
= 0, lockerror
, status
, nfsvers
, numops
;
1322 u_int64_t xid
, newwverf
;
1324 struct nfsm_chain nmreq
, nmrep
;
1325 struct nfsreq_secinfo_args si
;
1328 FSDBG(521, np
, offset
, count
, nmp
? nmp
->nm_state
: 0);
1329 if (nfs_mount_gone(nmp
)) {
1332 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
1335 if (!(nmp
->nm_state
& NFSSTA_HASWRITEVERF
)) {
1338 nfsvers
= nmp
->nm_vers
;
1340 if (count
> UINT32_MAX
) {
1346 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
1347 nfsm_chain_null(&nmreq
);
1348 nfsm_chain_null(&nmrep
);
1350 // PUTFH, COMMIT, GETATTR
1352 nfsm_chain_build_alloc_init(error
, &nmreq
, 19 * NFSX_UNSIGNED
);
1353 nfsm_chain_add_compound_header(error
, &nmreq
, "commit", nmp
->nm_minor_vers
, numops
);
1355 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1356 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1358 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_COMMIT
);
1359 nfsm_chain_add_64(error
, &nmreq
, offset
);
1360 nfsm_chain_add_32(error
, &nmreq
, count32
);
1362 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1363 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
1364 nfsm_chain_build_done(error
, &nmreq
);
1365 nfsm_assert(error
, (numops
== 0), EPROTO
);
1367 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
1368 current_thread(), cred
, &si
, 0, &nmrep
, &xid
, &status
);
1370 if ((lockerror
= nfs_node_lock(np
))) {
1373 nfsm_chain_skip_tag(error
, &nmrep
);
1374 nfsm_chain_get_32(error
, &nmrep
, numops
);
1375 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1376 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_COMMIT
);
1377 nfsm_chain_get_64(error
, &nmrep
, newwverf
);
1378 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1379 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
1381 nfs_node_unlock(np
);
1384 lck_mtx_lock(&nmp
->nm_lock
);
1385 if (nmp
->nm_verf
!= newwverf
) {
1386 nmp
->nm_verf
= newwverf
;
1388 if (wverf
!= newwverf
) {
1389 error
= NFSERR_STALEWRITEVERF
;
1391 lck_mtx_unlock(&nmp
->nm_lock
);
1393 nfsm_chain_cleanup(&nmreq
);
1394 nfsm_chain_cleanup(&nmrep
);
1401 struct nfs_fsattr
*nfsap
,
1405 int error
= 0, lockerror
, status
, nfsvers
, numops
;
1406 struct nfsm_chain nmreq
, nmrep
;
1407 struct nfsmount
*nmp
= NFSTONMP(np
);
1408 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
1409 struct nfs_vattr nvattr
;
1410 struct nfsreq_secinfo_args si
;
1412 if (nfs_mount_gone(nmp
)) {
1415 nfsvers
= nmp
->nm_vers
;
1416 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
1420 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
1421 NVATTR_INIT(&nvattr
);
1422 nfsm_chain_null(&nmreq
);
1423 nfsm_chain_null(&nmrep
);
1425 /* NFSv4: fetch "pathconf" info for this node */
1428 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
1429 nfsm_chain_add_compound_header(error
, &nmreq
, "pathconf", nmp
->nm_minor_vers
, numops
);
1431 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1432 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1434 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1435 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
1436 NFS_BITMAP_SET(bitmap
, NFS_FATTR_MAXLINK
);
1437 NFS_BITMAP_SET(bitmap
, NFS_FATTR_MAXNAME
);
1438 NFS_BITMAP_SET(bitmap
, NFS_FATTR_NO_TRUNC
);
1439 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CHOWN_RESTRICTED
);
1440 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CASE_INSENSITIVE
);
1441 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CASE_PRESERVING
);
1442 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
1443 nfsm_chain_build_done(error
, &nmreq
);
1444 nfsm_assert(error
, (numops
== 0), EPROTO
);
1446 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
1448 nfsm_chain_skip_tag(error
, &nmrep
);
1449 nfsm_chain_get_32(error
, &nmrep
, numops
);
1450 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1451 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1453 error
= nfs4_parsefattr(&nmrep
, nfsap
, &nvattr
, NULL
, NULL
, NULL
);
1455 if ((lockerror
= nfs_node_lock(np
))) {
1459 nfs_loadattrcache(np
, &nvattr
, &xid
, 0);
1462 nfs_node_unlock(np
);
1465 NVATTR_CLEANUP(&nvattr
);
1466 nfsm_chain_cleanup(&nmreq
);
1467 nfsm_chain_cleanup(&nmrep
);
1473 struct vnop_getattr_args
/* {
1474 * struct vnodeop_desc *a_desc;
1476 * struct vnode_attr *a_vap;
1477 * vfs_context_t a_context;
1480 struct vnode_attr
*vap
= ap
->a_vap
;
1481 struct nfsmount
*nmp
;
1482 struct nfs_vattr nva
;
1483 int error
, acls
, ngaflags
;
1485 nmp
= VTONMP(ap
->a_vp
);
1486 if (nfs_mount_gone(nmp
)) {
1489 acls
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_ACL
);
1491 ngaflags
= NGA_CACHED
;
1492 if (VATTR_IS_ACTIVE(vap
, va_acl
) && acls
) {
1493 ngaflags
|= NGA_ACL
;
1495 error
= nfs_getattr(VTONFS(ap
->a_vp
), &nva
, ap
->a_context
, ngaflags
);
1500 /* copy what we have in nva to *a_vap */
1501 if (VATTR_IS_ACTIVE(vap
, va_rdev
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_RAWDEV
)) {
1502 dev_t rdev
= makedev(nva
.nva_rawdev
.specdata1
, nva
.nva_rawdev
.specdata2
);
1503 VATTR_RETURN(vap
, va_rdev
, rdev
);
1505 if (VATTR_IS_ACTIVE(vap
, va_nlink
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_NUMLINKS
)) {
1506 VATTR_RETURN(vap
, va_nlink
, nva
.nva_nlink
);
1508 if (VATTR_IS_ACTIVE(vap
, va_data_size
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_SIZE
)) {
1509 VATTR_RETURN(vap
, va_data_size
, nva
.nva_size
);
1511 // VATTR_RETURN(vap, va_data_alloc, ???);
1512 // VATTR_RETURN(vap, va_total_size, ???);
1513 if (VATTR_IS_ACTIVE(vap
, va_total_alloc
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_SPACE_USED
)) {
1514 VATTR_RETURN(vap
, va_total_alloc
, nva
.nva_bytes
);
1516 if (VATTR_IS_ACTIVE(vap
, va_uid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER
)) {
1517 VATTR_RETURN(vap
, va_uid
, nva
.nva_uid
);
1519 if (VATTR_IS_ACTIVE(vap
, va_uuuid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER
)) {
1520 VATTR_RETURN(vap
, va_uuuid
, nva
.nva_uuuid
);
1522 if (VATTR_IS_ACTIVE(vap
, va_gid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER_GROUP
)) {
1523 VATTR_RETURN(vap
, va_gid
, nva
.nva_gid
);
1525 if (VATTR_IS_ACTIVE(vap
, va_guuid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER_GROUP
)) {
1526 VATTR_RETURN(vap
, va_guuid
, nva
.nva_guuid
);
1528 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
1529 if (NMFLAG(nmp
, ACLONLY
) || !NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_MODE
)) {
1530 VATTR_RETURN(vap
, va_mode
, 0777);
1532 VATTR_RETURN(vap
, va_mode
, nva
.nva_mode
);
1535 if (VATTR_IS_ACTIVE(vap
, va_flags
) &&
1536 (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_ARCHIVE
) ||
1537 NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_HIDDEN
) ||
1538 (nva
.nva_flags
& NFS_FFLAG_TRIGGER
))) {
1540 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_ARCHIVE
) &&
1541 (nva
.nva_flags
& NFS_FFLAG_ARCHIVED
)) {
1542 flags
|= SF_ARCHIVED
;
1544 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_HIDDEN
) &&
1545 (nva
.nva_flags
& NFS_FFLAG_HIDDEN
)) {
1548 VATTR_RETURN(vap
, va_flags
, flags
);
1550 if (VATTR_IS_ACTIVE(vap
, va_create_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_CREATE
)) {
1551 vap
->va_create_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_CREATE
];
1552 vap
->va_create_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_CREATE
];
1553 VATTR_SET_SUPPORTED(vap
, va_create_time
);
1555 if (VATTR_IS_ACTIVE(vap
, va_access_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_ACCESS
)) {
1556 vap
->va_access_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_ACCESS
];
1557 vap
->va_access_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_ACCESS
];
1558 VATTR_SET_SUPPORTED(vap
, va_access_time
);
1560 if (VATTR_IS_ACTIVE(vap
, va_modify_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_MODIFY
)) {
1561 vap
->va_modify_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_MODIFY
];
1562 vap
->va_modify_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_MODIFY
];
1563 VATTR_SET_SUPPORTED(vap
, va_modify_time
);
1565 if (VATTR_IS_ACTIVE(vap
, va_change_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_METADATA
)) {
1566 vap
->va_change_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_CHANGE
];
1567 vap
->va_change_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_CHANGE
];
1568 VATTR_SET_SUPPORTED(vap
, va_change_time
);
1570 if (VATTR_IS_ACTIVE(vap
, va_backup_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_BACKUP
)) {
1571 vap
->va_backup_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_BACKUP
];
1572 vap
->va_backup_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_BACKUP
];
1573 VATTR_SET_SUPPORTED(vap
, va_backup_time
);
1575 if (VATTR_IS_ACTIVE(vap
, va_fileid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_FILEID
)) {
1576 VATTR_RETURN(vap
, va_fileid
, nva
.nva_fileid
);
1578 if (VATTR_IS_ACTIVE(vap
, va_type
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TYPE
)) {
1579 VATTR_RETURN(vap
, va_type
, nva
.nva_type
);
1581 if (VATTR_IS_ACTIVE(vap
, va_filerev
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_CHANGE
)) {
1582 VATTR_RETURN(vap
, va_filerev
, nva
.nva_change
);
1585 if (VATTR_IS_ACTIVE(vap
, va_acl
) && acls
) {
1586 VATTR_RETURN(vap
, va_acl
, nva
.nva_acl
);
1590 // other attrs we might support someday:
1591 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
1593 NVATTR_CLEANUP(&nva
);
1600 struct vnode_attr
*vap
,
1603 struct nfsmount
*nmp
= NFSTONMP(np
);
1604 int error
= 0, setattr_error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
;
1605 u_int64_t xid
, nextxid
;
1606 struct nfsm_chain nmreq
, nmrep
;
1607 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
1608 uint32_t getbitmap
[NFS_ATTR_BITMAP_LEN
];
1609 uint32_t setbitmap
[NFS_ATTR_BITMAP_LEN
];
1610 nfs_stateid stateid
;
1611 struct nfsreq_secinfo_args si
;
1613 if (nfs_mount_gone(nmp
)) {
1616 nfsvers
= nmp
->nm_vers
;
1617 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
1621 if (VATTR_IS_ACTIVE(vap
, va_flags
) && (vap
->va_flags
& ~(SF_ARCHIVED
| UF_HIDDEN
))) {
1622 /* we don't support setting unsupported flags (duh!) */
1623 if (vap
->va_active
& ~VNODE_ATTR_va_flags
) {
1624 return EINVAL
; /* return EINVAL if other attributes also set */
1626 return ENOTSUP
; /* return ENOTSUP for chflags(2) */
1630 /* don't bother requesting some changes if they don't look like they are changing */
1631 if (VATTR_IS_ACTIVE(vap
, va_uid
) && (vap
->va_uid
== np
->n_vattr
.nva_uid
)) {
1632 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
1634 if (VATTR_IS_ACTIVE(vap
, va_gid
) && (vap
->va_gid
== np
->n_vattr
.nva_gid
)) {
1635 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
1637 if (VATTR_IS_ACTIVE(vap
, va_uuuid
) && kauth_guid_equal(&vap
->va_uuuid
, &np
->n_vattr
.nva_uuuid
)) {
1638 VATTR_CLEAR_ACTIVE(vap
, va_uuuid
);
1640 if (VATTR_IS_ACTIVE(vap
, va_guuid
) && kauth_guid_equal(&vap
->va_guuid
, &np
->n_vattr
.nva_guuid
)) {
1641 VATTR_CLEAR_ACTIVE(vap
, va_guuid
);
1645 /* do nothing if no attributes will be sent */
1646 nfs_vattr_set_bitmap(nmp
, bitmap
, vap
);
1647 if (!bitmap
[0] && !bitmap
[1]) {
1651 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
1652 nfsm_chain_null(&nmreq
);
1653 nfsm_chain_null(&nmrep
);
1656 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1657 * need to invalidate any cached ACL. And if we had an ACL cached,
1658 * we might as well also fetch the new value.
1660 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, getbitmap
);
1661 if (NFS_BITMAP_ISSET(bitmap
, NFS_FATTR_ACL
) ||
1662 NFS_BITMAP_ISSET(bitmap
, NFS_FATTR_MODE
)) {
1663 if (NACLVALID(np
)) {
1664 NFS_BITMAP_SET(getbitmap
, NFS_FATTR_ACL
);
1669 // PUTFH, SETATTR, GETATTR
1671 nfsm_chain_build_alloc_init(error
, &nmreq
, 40 * NFSX_UNSIGNED
);
1672 nfsm_chain_add_compound_header(error
, &nmreq
, "setattr", nmp
->nm_minor_vers
, numops
);
1674 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1675 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1677 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SETATTR
);
1678 if (VATTR_IS_ACTIVE(vap
, va_data_size
)) {
1679 nfs_get_stateid(np
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &stateid
);
1681 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
1683 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
1684 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
1686 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1687 nfsm_chain_add_bitmap_supported(error
, &nmreq
, getbitmap
, nmp
, np
);
1688 nfsm_chain_build_done(error
, &nmreq
);
1689 nfsm_assert(error
, (numops
== 0), EPROTO
);
1691 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
1693 if ((lockerror
= nfs_node_lock(np
))) {
1696 nfsm_chain_skip_tag(error
, &nmrep
);
1697 nfsm_chain_get_32(error
, &nmrep
, numops
);
1698 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1700 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SETATTR
);
1701 nfsmout_if(error
== EBADRPC
);
1702 setattr_error
= error
;
1704 bmlen
= NFS_ATTR_BITMAP_LEN
;
1705 nfsm_chain_get_bitmap(error
, &nmrep
, setbitmap
, bmlen
);
1707 if (VATTR_IS_ACTIVE(vap
, va_data_size
) && (np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
1708 microuptime(&np
->n_lastio
);
1710 nfs_vattr_set_supported(setbitmap
, vap
);
1711 error
= setattr_error
;
1713 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1714 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
1716 NATTRINVALIDATE(np
);
1719 * We just changed the attributes and we want to make sure that we
1720 * see the latest attributes. Get the next XID. If it's not the
1721 * next XID after the SETATTR XID, then it's possible that another
1722 * RPC was in flight at the same time and it might put stale attributes
1723 * in the cache. In that case, we invalidate the attributes and set
1724 * the attribute cache XID to guarantee that newer attributes will
1728 nfs_get_xid(&nextxid
);
1729 if (nextxid
!= (xid
+ 1)) {
1730 np
->n_xid
= nextxid
;
1731 NATTRINVALIDATE(np
);
1735 nfs_node_unlock(np
);
1737 nfsm_chain_cleanup(&nmreq
);
1738 nfsm_chain_cleanup(&nmrep
);
1739 if ((setattr_error
== EINVAL
) && VATTR_IS_ACTIVE(vap
, va_acl
) && VATTR_IS_ACTIVE(vap
, va_mode
) && !NMFLAG(nmp
, ACLONLY
)) {
1741 * Some server's may not like ACL/mode combos that get sent.
1742 * If it looks like that's what the server choked on, try setting
1743 * just the ACL and not the mode (unless it looks like everything
1744 * but mode was already successfully set).
1746 if (((bitmap
[0] & setbitmap
[0]) != bitmap
[0]) ||
1747 ((bitmap
[1] & (setbitmap
[1] | NFS_FATTR_MODE
)) != bitmap
[1])) {
1748 VATTR_CLEAR_ACTIVE(vap
, va_mode
);
1757 * Wait for any pending recovery to complete.
1760 nfs_mount_state_wait_for_recovery(struct nfsmount
*nmp
)
1762 struct timespec ts
= { 1, 0 };
1763 int error
= 0, slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
1765 lck_mtx_lock(&nmp
->nm_lock
);
1766 while (nmp
->nm_state
& NFSSTA_RECOVER
) {
1767 if ((error
= nfs_sigintr(nmp
, NULL
, current_thread(), 1))) {
1770 nfs_mount_sock_thread_wake(nmp
);
1771 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, slpflag
| (PZERO
- 1), "nfsrecoverwait", &ts
);
1774 lck_mtx_unlock(&nmp
->nm_lock
);
1780 * We're about to use/manipulate NFS mount's open/lock state.
1781 * Wait for any pending state recovery to complete, then
1782 * mark the state as being in use (which will hold off
1783 * the recovery thread until we're done).
1786 nfs_mount_state_in_use_start(struct nfsmount
*nmp
, thread_t thd
)
1788 struct timespec ts
= { 1, 0 };
1789 int error
= 0, slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
1791 if (nfs_mount_gone(nmp
)) {
1794 lck_mtx_lock(&nmp
->nm_lock
);
1795 if (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) {
1796 lck_mtx_unlock(&nmp
->nm_lock
);
1799 while (nmp
->nm_state
& NFSSTA_RECOVER
) {
1800 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 1))) {
1803 nfs_mount_sock_thread_wake(nmp
);
1804 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, slpflag
| (PZERO
- 1), "nfsrecoverwait", &ts
);
1808 nmp
->nm_stateinuse
++;
1810 lck_mtx_unlock(&nmp
->nm_lock
);
1816 * We're done using/manipulating the NFS mount's open/lock
1817 * state. If the given error indicates that recovery should
1818 * be performed, we'll initiate recovery.
1821 nfs_mount_state_in_use_end(struct nfsmount
*nmp
, int error
)
1823 int restart
= nfs_mount_state_error_should_restart(error
);
1825 if (nfs_mount_gone(nmp
)) {
1828 lck_mtx_lock(&nmp
->nm_lock
);
1829 if (restart
&& (error
!= NFSERR_OLD_STATEID
) && (error
!= NFSERR_GRACE
)) {
1830 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
1831 error
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nmp
->nm_stategenid
);
1832 nfs_need_recover(nmp
, error
);
1834 if (nmp
->nm_stateinuse
> 0) {
1835 nmp
->nm_stateinuse
--;
1837 panic("NFS mount state in use count underrun");
1839 if (!nmp
->nm_stateinuse
&& (nmp
->nm_state
& NFSSTA_RECOVER
)) {
1840 wakeup(&nmp
->nm_stateinuse
);
1842 lck_mtx_unlock(&nmp
->nm_lock
);
1843 if (error
== NFSERR_GRACE
) {
1844 tsleep(&nmp
->nm_state
, (PZERO
- 1), "nfsgrace", 2 * hz
);
1851 * Does the error mean we should restart/redo a state-related operation?
1854 nfs_mount_state_error_should_restart(int error
)
1857 case NFSERR_STALE_STATEID
:
1858 case NFSERR_STALE_CLIENTID
:
1859 case NFSERR_ADMIN_REVOKED
:
1860 case NFSERR_EXPIRED
:
1861 case NFSERR_OLD_STATEID
:
1862 case NFSERR_BAD_STATEID
:
1870 * In some cases we may want to limit how many times we restart a
1871 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1872 * Base the limit on the lease (as long as it's not too short).
1875 nfs_mount_state_max_restarts(struct nfsmount
*nmp
)
1877 return MAX(nmp
->nm_fsattr
.nfsa_lease
, 60);
1881 * Does the error mean we probably lost a delegation?
1884 nfs_mount_state_error_delegation_lost(int error
)
1887 case NFSERR_STALE_STATEID
:
1888 case NFSERR_ADMIN_REVOKED
:
1889 case NFSERR_EXPIRED
:
1890 case NFSERR_OLD_STATEID
:
1891 case NFSERR_BAD_STATEID
:
1892 case NFSERR_GRACE
: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
1900 * Mark an NFS node's open state as busy.
1903 nfs_open_state_set_busy(nfsnode_t np
, thread_t thd
)
1905 struct nfsmount
*nmp
;
1906 struct timespec ts
= {2, 0};
1907 int error
= 0, slpflag
;
1910 if (nfs_mount_gone(nmp
)) {
1913 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
1915 lck_mtx_lock(&np
->n_openlock
);
1916 while (np
->n_openflags
& N_OPENBUSY
) {
1917 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
1920 np
->n_openflags
|= N_OPENWANT
;
1921 msleep(&np
->n_openflags
, &np
->n_openlock
, slpflag
, "nfs_open_state_set_busy", &ts
);
1925 np
->n_openflags
|= N_OPENBUSY
;
1927 lck_mtx_unlock(&np
->n_openlock
);
1933 * Clear an NFS node's open state busy flag and wake up
1934 * anyone wanting it.
1937 nfs_open_state_clear_busy(nfsnode_t np
)
1941 lck_mtx_lock(&np
->n_openlock
);
1942 if (!(np
->n_openflags
& N_OPENBUSY
)) {
1943 panic("nfs_open_state_clear_busy");
1945 wanted
= (np
->n_openflags
& N_OPENWANT
);
1946 np
->n_openflags
&= ~(N_OPENBUSY
| N_OPENWANT
);
1947 lck_mtx_unlock(&np
->n_openlock
);
1949 wakeup(&np
->n_openflags
);
1954 * Search a mount's open owner list for the owner for this credential.
1955 * If not found and "alloc" is set, then allocate a new one.
1957 struct nfs_open_owner
*
1958 nfs_open_owner_find(struct nfsmount
*nmp
, kauth_cred_t cred
, int alloc
)
1960 uid_t uid
= kauth_cred_getuid(cred
);
1961 struct nfs_open_owner
*noop
, *newnoop
= NULL
;
1964 lck_mtx_lock(&nmp
->nm_lock
);
1965 TAILQ_FOREACH(noop
, &nmp
->nm_open_owners
, noo_link
) {
1966 if (kauth_cred_getuid(noop
->noo_cred
) == uid
) {
1971 if (!noop
&& !newnoop
&& alloc
) {
1972 lck_mtx_unlock(&nmp
->nm_lock
);
1973 MALLOC(newnoop
, struct nfs_open_owner
*, sizeof(struct nfs_open_owner
), M_TEMP
, M_WAITOK
);
1977 bzero(newnoop
, sizeof(*newnoop
));
1978 lck_mtx_init(&newnoop
->noo_lock
, nfs_open_grp
, LCK_ATTR_NULL
);
1979 newnoop
->noo_mount
= nmp
;
1980 kauth_cred_ref(cred
);
1981 newnoop
->noo_cred
= cred
;
1982 newnoop
->noo_name
= OSAddAtomic(1, &nfs_open_owner_seqnum
);
1983 TAILQ_INIT(&newnoop
->noo_opens
);
1986 if (!noop
&& newnoop
) {
1987 newnoop
->noo_flags
|= NFS_OPEN_OWNER_LINK
;
1988 os_ref_init(&newnoop
->noo_refcnt
, NULL
);
1989 TAILQ_INSERT_HEAD(&nmp
->nm_open_owners
, newnoop
, noo_link
);
1992 lck_mtx_unlock(&nmp
->nm_lock
);
1994 if (newnoop
&& (noop
!= newnoop
)) {
1995 nfs_open_owner_destroy(newnoop
);
1999 nfs_open_owner_ref(noop
);
2006 * destroy an open owner that's no longer needed
2009 nfs_open_owner_destroy(struct nfs_open_owner
*noop
)
2011 if (noop
->noo_cred
) {
2012 kauth_cred_unref(&noop
->noo_cred
);
2014 lck_mtx_destroy(&noop
->noo_lock
, nfs_open_grp
);
2019 * acquire a reference count on an open owner
2022 nfs_open_owner_ref(struct nfs_open_owner
*noop
)
2024 lck_mtx_lock(&noop
->noo_lock
);
2025 os_ref_retain_locked(&noop
->noo_refcnt
);
2026 lck_mtx_unlock(&noop
->noo_lock
);
2030 * drop a reference count on an open owner and destroy it if
2031 * it is no longer referenced and no longer on the mount's list.
2034 nfs_open_owner_rele(struct nfs_open_owner
*noop
)
2036 os_ref_count_t newcount
;
2038 lck_mtx_lock(&noop
->noo_lock
);
2039 if (os_ref_get_count(&noop
->noo_refcnt
) < 1) {
2040 panic("nfs_open_owner_rele: no refcnt");
2042 newcount
= os_ref_release_locked(&noop
->noo_refcnt
);
2043 if (!newcount
&& (noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
)) {
2044 panic("nfs_open_owner_rele: busy");
2046 /* XXX we may potentially want to clean up idle/unused open owner structures */
2047 if (newcount
|| (noop
->noo_flags
& NFS_OPEN_OWNER_LINK
)) {
2048 lck_mtx_unlock(&noop
->noo_lock
);
2051 /* owner is no longer referenced or linked to mount, so destroy it */
2052 lck_mtx_unlock(&noop
->noo_lock
);
2053 nfs_open_owner_destroy(noop
);
2057 * Mark an open owner as busy because we are about to
2058 * start an operation that uses and updates open owner state.
2061 nfs_open_owner_set_busy(struct nfs_open_owner
*noop
, thread_t thd
)
2063 struct nfsmount
*nmp
;
2064 struct timespec ts
= {2, 0};
2065 int error
= 0, slpflag
;
2067 nmp
= noop
->noo_mount
;
2068 if (nfs_mount_gone(nmp
)) {
2071 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
2073 lck_mtx_lock(&noop
->noo_lock
);
2074 while (noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
) {
2075 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
2078 noop
->noo_flags
|= NFS_OPEN_OWNER_WANT
;
2079 msleep(noop
, &noop
->noo_lock
, slpflag
, "nfs_open_owner_set_busy", &ts
);
2083 noop
->noo_flags
|= NFS_OPEN_OWNER_BUSY
;
2085 lck_mtx_unlock(&noop
->noo_lock
);
2091 * Clear the busy flag on an open owner and wake up anyone waiting
2095 nfs_open_owner_clear_busy(struct nfs_open_owner
*noop
)
2099 lck_mtx_lock(&noop
->noo_lock
);
2100 if (!(noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
)) {
2101 panic("nfs_open_owner_clear_busy");
2103 wanted
= (noop
->noo_flags
& NFS_OPEN_OWNER_WANT
);
2104 noop
->noo_flags
&= ~(NFS_OPEN_OWNER_BUSY
| NFS_OPEN_OWNER_WANT
);
2105 lck_mtx_unlock(&noop
->noo_lock
);
2112 * Given an open/lock owner and an error code, increment the
2113 * sequence ID if appropriate.
2116 nfs_owner_seqid_increment(struct nfs_open_owner
*noop
, struct nfs_lock_owner
*nlop
, int error
)
2119 case NFSERR_STALE_CLIENTID
:
2120 case NFSERR_STALE_STATEID
:
2121 case NFSERR_OLD_STATEID
:
2122 case NFSERR_BAD_STATEID
:
2123 case NFSERR_BAD_SEQID
:
2125 case NFSERR_RESOURCE
:
2126 case NFSERR_NOFILEHANDLE
:
2127 /* do not increment the open seqid on these errors */
2139 * Search a node's open file list for any conflicts with this request.
2140 * Also find this open owner's open file structure.
2141 * If not found and "alloc" is set, then allocate one.
2146 struct nfs_open_owner
*noop
,
2147 struct nfs_open_file
**nofpp
,
2148 uint32_t accessMode
,
2153 return nfs_open_file_find_internal(np
, noop
, nofpp
, accessMode
, denyMode
, alloc
);
2157 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
2158 * if an existing one is not found. This is used in "create" scenarios to
2159 * officially add the provisional nofp to the node once the node is created.
2162 nfs_open_file_find_internal(
2164 struct nfs_open_owner
*noop
,
2165 struct nfs_open_file
**nofpp
,
2166 uint32_t accessMode
,
2170 struct nfs_open_file
*nofp
= NULL
, *nofp2
, *newnofp
= NULL
;
2176 lck_mtx_lock(&np
->n_openlock
);
2177 TAILQ_FOREACH(nofp2
, &np
->n_opens
, nof_link
) {
2178 if (nofp2
->nof_owner
== noop
) {
2184 if ((accessMode
& nofp2
->nof_deny
) || (denyMode
& nofp2
->nof_access
)) {
2185 /* This request conflicts with an existing open on this client. */
2186 lck_mtx_unlock(&np
->n_openlock
);
2192 * If this open owner doesn't have an open
2193 * file structure yet, we create one for it.
2195 if (!nofp
&& !*nofpp
&& !newnofp
&& alloc
) {
2196 lck_mtx_unlock(&np
->n_openlock
);
2198 MALLOC(newnofp
, struct nfs_open_file
*, sizeof(struct nfs_open_file
), M_TEMP
, M_WAITOK
);
2202 bzero(newnofp
, sizeof(*newnofp
));
2203 lck_mtx_init(&newnofp
->nof_lock
, nfs_open_grp
, LCK_ATTR_NULL
);
2204 newnofp
->nof_owner
= noop
;
2205 nfs_open_owner_ref(noop
);
2206 newnofp
->nof_np
= np
;
2207 lck_mtx_lock(&noop
->noo_lock
);
2208 TAILQ_INSERT_HEAD(&noop
->noo_opens
, newnofp
, nof_oolink
);
2209 lck_mtx_unlock(&noop
->noo_lock
);
2216 (*nofpp
)->nof_np
= np
;
2222 TAILQ_INSERT_HEAD(&np
->n_opens
, nofp
, nof_link
);
2226 lck_mtx_unlock(&np
->n_openlock
);
2229 if (alloc
&& newnofp
&& (nofp
!= newnofp
)) {
2230 nfs_open_file_destroy(newnofp
);
2234 return nofp
? 0 : ESRCH
;
2238 * Destroy an open file structure.
2241 nfs_open_file_destroy(struct nfs_open_file
*nofp
)
2243 lck_mtx_lock(&nofp
->nof_owner
->noo_lock
);
2244 TAILQ_REMOVE(&nofp
->nof_owner
->noo_opens
, nofp
, nof_oolink
);
2245 lck_mtx_unlock(&nofp
->nof_owner
->noo_lock
);
2246 nfs_open_owner_rele(nofp
->nof_owner
);
2247 lck_mtx_destroy(&nofp
->nof_lock
, nfs_open_grp
);
2252 * Mark an open file as busy because we are about to
2253 * start an operation that uses and updates open file state.
2256 nfs_open_file_set_busy(struct nfs_open_file
*nofp
, thread_t thd
)
2258 struct nfsmount
*nmp
;
2259 struct timespec ts
= {2, 0};
2260 int error
= 0, slpflag
;
2262 nmp
= nofp
->nof_owner
->noo_mount
;
2263 if (nfs_mount_gone(nmp
)) {
2266 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
2268 lck_mtx_lock(&nofp
->nof_lock
);
2269 while (nofp
->nof_flags
& NFS_OPEN_FILE_BUSY
) {
2270 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
2273 nofp
->nof_flags
|= NFS_OPEN_FILE_WANT
;
2274 msleep(nofp
, &nofp
->nof_lock
, slpflag
, "nfs_open_file_set_busy", &ts
);
2278 nofp
->nof_flags
|= NFS_OPEN_FILE_BUSY
;
2280 lck_mtx_unlock(&nofp
->nof_lock
);
2286 * Clear the busy flag on an open file and wake up anyone waiting
2290 nfs_open_file_clear_busy(struct nfs_open_file
*nofp
)
2294 lck_mtx_lock(&nofp
->nof_lock
);
2295 if (!(nofp
->nof_flags
& NFS_OPEN_FILE_BUSY
)) {
2296 panic("nfs_open_file_clear_busy");
2298 wanted
= (nofp
->nof_flags
& NFS_OPEN_FILE_WANT
);
2299 nofp
->nof_flags
&= ~(NFS_OPEN_FILE_BUSY
| NFS_OPEN_FILE_WANT
);
2300 lck_mtx_unlock(&nofp
->nof_lock
);
2307 * Add the open state for the given access/deny modes to this open file.
2310 nfs_open_file_add_open(struct nfs_open_file
*nofp
, uint32_t accessMode
, uint32_t denyMode
, int delegated
)
2312 lck_mtx_lock(&nofp
->nof_lock
);
2313 nofp
->nof_access
|= accessMode
;
2314 nofp
->nof_deny
|= denyMode
;
2317 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2318 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2320 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2322 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2325 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2326 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2328 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2330 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2331 nofp
->nof_d_rw_dw
++;
2333 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2334 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2335 nofp
->nof_d_r_drw
++;
2336 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2337 nofp
->nof_d_w_drw
++;
2338 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2339 nofp
->nof_d_rw_drw
++;
2343 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2344 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2346 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2348 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2351 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2352 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2354 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2356 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2359 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2360 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2362 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2364 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2370 nofp
->nof_opencnt
++;
2371 lck_mtx_unlock(&nofp
->nof_lock
);
2375 * Find which particular open combo will be closed and report what
2376 * the new modes will be and whether the open was delegated.
2379 nfs_open_file_remove_open_find(
2380 struct nfs_open_file
*nofp
,
2381 uint32_t accessMode
,
2383 uint32_t *newAccessMode
,
2384 uint32_t *newDenyMode
,
2388 * Calculate new modes: a mode bit gets removed when there's only
2389 * one count in all the corresponding counts
2391 *newAccessMode
= nofp
->nof_access
;
2392 *newDenyMode
= nofp
->nof_deny
;
2394 if ((accessMode
& NFS_OPEN_SHARE_ACCESS_READ
) &&
2395 (nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
) &&
2396 ((nofp
->nof_r
+ nofp
->nof_d_r
+
2397 nofp
->nof_rw
+ nofp
->nof_d_rw
+
2398 nofp
->nof_r_dw
+ nofp
->nof_d_r_dw
+
2399 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
+
2400 nofp
->nof_r_drw
+ nofp
->nof_d_r_drw
+
2401 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
) == 1)) {
2402 *newAccessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2404 if ((accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
) &&
2405 (nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_WRITE
) &&
2406 ((nofp
->nof_w
+ nofp
->nof_d_w
+
2407 nofp
->nof_rw
+ nofp
->nof_d_rw
+
2408 nofp
->nof_w_dw
+ nofp
->nof_d_w_dw
+
2409 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
+
2410 nofp
->nof_w_drw
+ nofp
->nof_d_w_drw
+
2411 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
) == 1)) {
2412 *newAccessMode
&= ~NFS_OPEN_SHARE_ACCESS_WRITE
;
2414 if ((denyMode
& NFS_OPEN_SHARE_DENY_READ
) &&
2415 (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_READ
) &&
2416 ((nofp
->nof_r_drw
+ nofp
->nof_d_r_drw
+
2417 nofp
->nof_w_drw
+ nofp
->nof_d_w_drw
+
2418 nofp
->nof_rw_drw
+ nofp
->nof_d_rw_drw
) == 1)) {
2419 *newDenyMode
&= ~NFS_OPEN_SHARE_DENY_READ
;
2421 if ((denyMode
& NFS_OPEN_SHARE_DENY_WRITE
) &&
2422 (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_WRITE
) &&
2423 ((nofp
->nof_r_drw
+ nofp
->nof_d_r_drw
+
2424 nofp
->nof_w_drw
+ nofp
->nof_d_w_drw
+
2425 nofp
->nof_rw_drw
+ nofp
->nof_d_rw_drw
+
2426 nofp
->nof_r_dw
+ nofp
->nof_d_r_dw
+
2427 nofp
->nof_w_dw
+ nofp
->nof_d_w_dw
+
2428 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
) == 1)) {
2429 *newDenyMode
&= ~NFS_OPEN_SHARE_DENY_WRITE
;
2432 /* Find the corresponding open access/deny mode counter. */
2433 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2434 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2435 *delegated
= (nofp
->nof_d_r
!= 0);
2436 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2437 *delegated
= (nofp
->nof_d_w
!= 0);
2438 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2439 *delegated
= (nofp
->nof_d_rw
!= 0);
2443 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2444 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2445 *delegated
= (nofp
->nof_d_r_dw
!= 0);
2446 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2447 *delegated
= (nofp
->nof_d_w_dw
!= 0);
2448 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2449 *delegated
= (nofp
->nof_d_rw_dw
!= 0);
2453 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2454 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2455 *delegated
= (nofp
->nof_d_r_drw
!= 0);
2456 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2457 *delegated
= (nofp
->nof_d_w_drw
!= 0);
2458 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2459 *delegated
= (nofp
->nof_d_rw_drw
!= 0);
2467 * Remove the open state for the given access/deny modes to this open file.
2470 nfs_open_file_remove_open(struct nfs_open_file
*nofp
, uint32_t accessMode
, uint32_t denyMode
)
2472 uint32_t newAccessMode
, newDenyMode
;
2475 lck_mtx_lock(&nofp
->nof_lock
);
2476 nfs_open_file_remove_open_find(nofp
, accessMode
, denyMode
, &newAccessMode
, &newDenyMode
, &delegated
);
2478 /* Decrement the corresponding open access/deny mode counter. */
2479 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2480 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2482 if (nofp
->nof_d_r
== 0) {
2483 NP(nofp
->nof_np
, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2488 if (nofp
->nof_r
== 0) {
2489 NP(nofp
->nof_np
, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2494 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2496 if (nofp
->nof_d_w
== 0) {
2497 NP(nofp
->nof_np
, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2502 if (nofp
->nof_w
== 0) {
2503 NP(nofp
->nof_np
, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2508 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2510 if (nofp
->nof_d_rw
== 0) {
2511 NP(nofp
->nof_np
, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2516 if (nofp
->nof_rw
== 0) {
2517 NP(nofp
->nof_np
, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2523 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2524 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2526 if (nofp
->nof_d_r_dw
== 0) {
2527 NP(nofp
->nof_np
, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2532 if (nofp
->nof_r_dw
== 0) {
2533 NP(nofp
->nof_np
, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2538 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2540 if (nofp
->nof_d_w_dw
== 0) {
2541 NP(nofp
->nof_np
, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2546 if (nofp
->nof_w_dw
== 0) {
2547 NP(nofp
->nof_np
, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2552 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2554 if (nofp
->nof_d_rw_dw
== 0) {
2555 NP(nofp
->nof_np
, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2557 nofp
->nof_d_rw_dw
--;
2560 if (nofp
->nof_rw_dw
== 0) {
2561 NP(nofp
->nof_np
, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2567 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2568 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2570 if (nofp
->nof_d_r_drw
== 0) {
2571 NP(nofp
->nof_np
, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2573 nofp
->nof_d_r_drw
--;
2576 if (nofp
->nof_r_drw
== 0) {
2577 NP(nofp
->nof_np
, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2582 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2584 if (nofp
->nof_d_w_drw
== 0) {
2585 NP(nofp
->nof_np
, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2587 nofp
->nof_d_w_drw
--;
2590 if (nofp
->nof_w_drw
== 0) {
2591 NP(nofp
->nof_np
, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2596 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2598 if (nofp
->nof_d_rw_drw
== 0) {
2599 NP(nofp
->nof_np
, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2601 nofp
->nof_d_rw_drw
--;
2604 if (nofp
->nof_rw_drw
== 0) {
2605 NP(nofp
->nof_np
, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2613 /* update the modes */
2614 nofp
->nof_access
= newAccessMode
;
2615 nofp
->nof_deny
= newDenyMode
;
2616 nofp
->nof_opencnt
--;
2617 lck_mtx_unlock(&nofp
->nof_lock
);
2622 * Get the current (delegation, lock, open, default) stateid for this node.
2623 * If node has a delegation, use that stateid.
2624 * If pid has a lock, use the lockowner's stateid.
2625 * Or use the open file's stateid.
2626 * If no open file, use a default stateid of all ones.
2629 nfs_get_stateid(nfsnode_t np
, thread_t thd
, kauth_cred_t cred
, nfs_stateid
*sid
)
2631 struct nfsmount
*nmp
= NFSTONMP(np
);
2632 proc_t p
= thd
? get_bsdthreadtask_info(thd
) : current_proc(); // XXX async I/O requests don't have a thread
2633 struct nfs_open_owner
*noop
= NULL
;
2634 struct nfs_open_file
*nofp
= NULL
;
2635 struct nfs_lock_owner
*nlop
= NULL
;
2636 nfs_stateid
*s
= NULL
;
2638 if (np
->n_openflags
& N_DELEG_MASK
) {
2639 s
= &np
->n_dstateid
;
2642 nlop
= nfs_lock_owner_find(np
, p
, 0);
2644 if (nlop
&& !TAILQ_EMPTY(&nlop
->nlo_locks
)) {
2645 /* we hold locks, use lock stateid */
2646 s
= &nlop
->nlo_stateid
;
2647 } else if (((noop
= nfs_open_owner_find(nmp
, cred
, 0))) &&
2648 (nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 0) == 0) &&
2649 !(nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) &&
2651 /* we (should) have the file open, use open stateid */
2652 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
2653 nfs4_reopen(nofp
, thd
);
2655 if (!(nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
2656 s
= &nofp
->nof_stateid
;
2662 sid
->seqid
= s
->seqid
;
2663 sid
->other
[0] = s
->other
[0];
2664 sid
->other
[1] = s
->other
[1];
2665 sid
->other
[2] = s
->other
[2];
2667 /* named attributes may not have a stateid for reads, so don't complain for them */
2668 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
2669 NP(np
, "nfs_get_stateid: no stateid");
2671 sid
->seqid
= sid
->other
[0] = sid
->other
[1] = sid
->other
[2] = 0xffffffff;
2674 nfs_lock_owner_rele(nlop
);
2677 nfs_open_owner_rele(noop
);
2683 * When we have a delegation, we may be able to perform the OPEN locally.
2684 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2687 nfs4_open_delegated(
2689 struct nfs_open_file
*nofp
,
2690 uint32_t accessMode
,
2694 int error
= 0, ismember
, readtoo
= 0, authorized
= 0;
2696 struct kauth_acl_eval eval
;
2697 kauth_cred_t cred
= vfs_context_ucred(ctx
);
2699 if (!(accessMode
& NFS_OPEN_SHARE_ACCESS_READ
)) {
2701 * Try to open it for read access too,
2702 * so the buffer cache can read data.
2705 accessMode
|= NFS_OPEN_SHARE_ACCESS_READ
;
2710 if (accessMode
& NFS_OPEN_SHARE_ACCESS_READ
) {
2711 action
|= KAUTH_VNODE_READ_DATA
;
2713 if (accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
) {
2714 action
|= KAUTH_VNODE_WRITE_DATA
;
2717 /* evaluate ACE (if we have one) */
2718 if (np
->n_dace
.ace_flags
) {
2719 eval
.ae_requested
= action
;
2720 eval
.ae_acl
= &np
->n_dace
;
2722 eval
.ae_options
= 0;
2723 if (np
->n_vattr
.nva_uid
== kauth_cred_getuid(cred
)) {
2724 eval
.ae_options
|= KAUTH_AEVAL_IS_OWNER
;
2726 error
= kauth_cred_ismember_gid(cred
, np
->n_vattr
.nva_gid
, &ismember
);
2727 if (!error
&& ismember
) {
2728 eval
.ae_options
|= KAUTH_AEVAL_IN_GROUP
;
2731 eval
.ae_exp_gall
= KAUTH_VNODE_GENERIC_ALL_BITS
;
2732 eval
.ae_exp_gread
= KAUTH_VNODE_GENERIC_READ_BITS
;
2733 eval
.ae_exp_gwrite
= KAUTH_VNODE_GENERIC_WRITE_BITS
;
2734 eval
.ae_exp_gexec
= KAUTH_VNODE_GENERIC_EXECUTE_BITS
;
2736 error
= kauth_acl_evaluate(cred
, &eval
);
2738 if (!error
&& (eval
.ae_result
== KAUTH_RESULT_ALLOW
)) {
2744 /* need to ask the server via ACCESS */
2745 struct vnop_access_args naa
;
2746 naa
.a_desc
= &vnop_access_desc
;
2747 naa
.a_vp
= NFSTOV(np
);
2748 naa
.a_action
= action
;
2749 naa
.a_context
= ctx
;
2750 if (!(error
= nfs_vnop_access(&naa
))) {
2757 /* try again without the extra read access */
2758 accessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2762 return error
? error
: EACCES
;
2765 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 1);
2772 * Open a file with the given access/deny modes.
2774 * If we have a delegation, we may be able to handle the open locally.
2775 * Otherwise, we will always send the open RPC even if this open's mode is
2776 * a subset of all the existing opens. This makes sure that we will always
2777 * be able to do a downgrade to any of the open modes.
2779 * Note: local conflicts should have already been checked in nfs_open_file_find().
2784 struct nfs_open_file
*nofp
,
2785 uint32_t accessMode
,
2789 vnode_t vp
= NFSTOV(np
);
2791 struct componentname cn
;
2792 const char *vname
= NULL
;
2794 char smallname
[128];
2795 char *filename
= NULL
;
2796 int error
= 0, readtoo
= 0;
2799 * We can handle the OPEN ourselves if we have a delegation,
2800 * unless it's a read delegation and the open is asking for
2801 * either write access or deny read. We also don't bother to
2802 * use the delegation if it's being returned.
2804 if (np
->n_openflags
& N_DELEG_MASK
) {
2805 if ((error
= nfs_open_state_set_busy(np
, vfs_context_thread(ctx
)))) {
2808 if ((np
->n_openflags
& N_DELEG_MASK
) && !(np
->n_openflags
& N_DELEG_RETURN
) &&
2809 (((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_WRITE
) ||
2810 (!(accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
) && !(denyMode
& NFS_OPEN_SHARE_DENY_READ
)))) {
2811 error
= nfs4_open_delegated(np
, nofp
, accessMode
, denyMode
, ctx
);
2812 nfs_open_state_clear_busy(np
);
2815 nfs_open_state_clear_busy(np
);
2819 * [sigh] We can't trust VFS to get the parent right for named
2820 * attribute nodes. (It likes to reparent the nodes after we've
2821 * created them.) Luckily we can probably get the right parent
2822 * from the n_parent we have stashed away.
2824 if ((np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) &&
2825 (((dvp
= np
->n_parent
)) && (error
= vnode_get(dvp
)))) {
2829 dvp
= vnode_getparent(vp
);
2831 vname
= vnode_getname(vp
);
2832 if (!dvp
|| !vname
) {
2838 filename
= &smallname
[0];
2839 namelen
= snprintf(filename
, sizeof(smallname
), "%s", vname
);
2840 if (namelen
>= sizeof(smallname
)) {
2841 MALLOC(filename
, char *, namelen
+ 1, M_TEMP
, M_WAITOK
);
2846 snprintf(filename
, namelen
+ 1, "%s", vname
);
2848 bzero(&cn
, sizeof(cn
));
2849 cn
.cn_nameptr
= filename
;
2850 cn
.cn_namelen
= namelen
;
2852 if (!(accessMode
& NFS_OPEN_SHARE_ACCESS_READ
)) {
2854 * Try to open it for read access too,
2855 * so the buffer cache can read data.
2858 accessMode
|= NFS_OPEN_SHARE_ACCESS_READ
;
2861 error
= nfs4_open_rpc(nofp
, ctx
, &cn
, NULL
, dvp
, &vp
, NFS_OPEN_NOCREATE
, accessMode
, denyMode
);
2863 if (!nfs_mount_state_error_should_restart(error
) &&
2864 (error
!= EINTR
) && (error
!= ERESTART
) && readtoo
) {
2865 /* try again without the extra read access */
2866 accessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2872 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0);
2874 if (filename
&& (filename
!= &smallname
[0])) {
2875 FREE(filename
, M_TEMP
);
2878 vnode_putname(vname
);
2880 if (dvp
!= NULLVP
) {
2888 struct vnop_mmap_args
/* {
2889 * struct vnodeop_desc *a_desc;
2892 * vfs_context_t a_context;
2895 vfs_context_t ctx
= ap
->a_context
;
2896 vnode_t vp
= ap
->a_vp
;
2897 nfsnode_t np
= VTONFS(vp
);
2898 int error
= 0, accessMode
, denyMode
, delegated
;
2899 struct nfsmount
*nmp
;
2900 struct nfs_open_owner
*noop
= NULL
;
2901 struct nfs_open_file
*nofp
= NULL
;
2904 if (nfs_mount_gone(nmp
)) {
2908 if (!vnode_isreg(vp
) || !(ap
->a_fflags
& (PROT_READ
| PROT_WRITE
))) {
2911 if (np
->n_flag
& NREVOKE
) {
2916 * fflags contains some combination of: PROT_READ, PROT_WRITE
2917 * Since it's not possible to mmap() without having the file open for reading,
2918 * read access is always there (regardless if PROT_READ is not set).
2920 accessMode
= NFS_OPEN_SHARE_ACCESS_READ
;
2921 if (ap
->a_fflags
& PROT_WRITE
) {
2922 accessMode
|= NFS_OPEN_SHARE_ACCESS_WRITE
;
2924 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2926 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
2932 error
= nfs_mount_state_in_use_start(nmp
, NULL
);
2934 nfs_open_owner_rele(noop
);
2937 if (np
->n_flag
& NREVOKE
) {
2939 nfs_mount_state_in_use_end(nmp
, 0);
2940 nfs_open_owner_rele(noop
);
2944 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 1);
2945 if (error
|| (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
))) {
2946 NP(np
, "nfs_vnop_mmap: no open file for owner, error %d, %d", error
, kauth_cred_getuid(noop
->noo_cred
));
2949 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
2950 nfs_mount_state_in_use_end(nmp
, 0);
2951 error
= nfs4_reopen(nofp
, NULL
);
2958 error
= nfs_open_file_set_busy(nofp
, NULL
);
2966 * The open reference for mmap must mirror an existing open because
2967 * we may need to reclaim it after the file is closed.
2968 * So grab another open count matching the accessMode passed in.
2969 * If we already had an mmap open, prefer read/write without deny mode.
2970 * This means we may have to drop the current mmap open first.
2972 * N.B. We should have an open for the mmap, because, mmap was
2973 * called on an open descriptor, or we've created an open for read
2974 * from reading the first page for execve. However, if we piggy
2975 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
2976 * that open may have closed.
2979 if (!(nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
)) {
2980 if (nofp
->nof_flags
& NFS_OPEN_FILE_NEEDCLOSE
) {
2981 /* We shouldn't get here. We've already open the file for execve */
2982 NP(np
, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
2983 nofp
->nof_access
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
), thread_tid(vfs_context_thread(ctx
)));
2986 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
2987 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
2989 if (accessMode
!= NFS_OPEN_SHARE_ACCESS_READ
|| (accessMode
& nofp
->nof_deny
)) {
2990 /* not asking for just read access -> fail */
2994 /* we don't have the file open, so open it for read access */
2995 if (nmp
->nm_vers
< NFS_VER4
) {
2996 /* NFS v2/v3 opens are always allowed - so just add it. */
2997 nfs_open_file_add_open(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, 0);
3000 error
= nfs4_open(np
, nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
3003 nofp
->nof_flags
|= NFS_OPEN_FILE_NEEDCLOSE
;
3010 /* determine deny mode for open */
3011 if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
3012 if (nofp
->nof_d_rw
|| nofp
->nof_d_rw_dw
|| nofp
->nof_d_rw_drw
) {
3014 if (nofp
->nof_d_rw
) {
3015 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3016 } else if (nofp
->nof_d_rw_dw
) {
3017 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3018 } else if (nofp
->nof_d_rw_drw
) {
3019 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3021 } else if (nofp
->nof_rw
|| nofp
->nof_rw_dw
|| nofp
->nof_rw_drw
) {
3024 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3025 } else if (nofp
->nof_rw_dw
) {
3026 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3027 } else if (nofp
->nof_rw_drw
) {
3028 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3033 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
3034 if (nofp
->nof_d_r
|| nofp
->nof_d_r_dw
|| nofp
->nof_d_r_drw
) {
3036 if (nofp
->nof_d_r
) {
3037 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3038 } else if (nofp
->nof_d_r_dw
) {
3039 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3040 } else if (nofp
->nof_d_r_drw
) {
3041 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3043 } else if (nofp
->nof_r
|| nofp
->nof_r_dw
|| nofp
->nof_r_drw
) {
3046 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3047 } else if (nofp
->nof_r_dw
) {
3048 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3049 } else if (nofp
->nof_r_drw
) {
3050 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3052 } else if (nofp
->nof_d_rw
|| nofp
->nof_d_rw_dw
|| nofp
->nof_d_rw_drw
) {
3054 * This clause and the one below is to co-opt a read write access
3055 * for a read only mmaping. We probably got here in that an
3056 * existing rw open for an executable file already exists.
3059 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
3060 if (nofp
->nof_d_rw
) {
3061 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3062 } else if (nofp
->nof_d_rw_dw
) {
3063 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3064 } else if (nofp
->nof_d_rw_drw
) {
3065 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3067 } else if (nofp
->nof_rw
|| nofp
->nof_rw_dw
|| nofp
->nof_rw_drw
) {
3069 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
3071 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3072 } else if (nofp
->nof_rw_dw
) {
3073 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3074 } else if (nofp
->nof_rw_drw
) {
3075 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3081 if (error
) { /* mmap mode without proper open mode */
3086 * If the existing mmap access is more than the new access OR the
3087 * existing access is the same and the existing deny mode is less,
3088 * then we'll stick with the existing mmap open mode.
3090 if ((nofp
->nof_mmap_access
> accessMode
) ||
3091 ((nofp
->nof_mmap_access
== accessMode
) && (nofp
->nof_mmap_deny
<= denyMode
))) {
3095 /* update mmap open mode */
3096 if (nofp
->nof_mmap_access
) {
3097 error
= nfs_close(np
, nofp
, nofp
->nof_mmap_access
, nofp
->nof_mmap_deny
, ctx
);
3099 if (!nfs_mount_state_error_should_restart(error
)) {
3100 NP(np
, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
3102 NP(np
, "nfs_vnop_mmap: update, close error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
3105 nofp
->nof_mmap_access
= nofp
->nof_mmap_deny
= 0;
3108 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, delegated
);
3109 nofp
->nof_mmap_access
= accessMode
;
3110 nofp
->nof_mmap_deny
= denyMode
;
3114 nfs_open_file_clear_busy(nofp
);
3116 if (nfs_mount_state_in_use_end(nmp
, error
)) {
3121 nfs_open_owner_rele(noop
);
3126 nfs_node_lock_force(np
);
3127 if ((np
->n_flag
& NISMAPPED
) == 0) {
3128 np
->n_flag
|= NISMAPPED
;
3131 nfs_node_unlock(np
);
3133 lck_mtx_lock(&nmp
->nm_lock
);
3134 nmp
->nm_state
&= ~NFSSTA_SQUISHY
;
3135 nmp
->nm_curdeadtimeout
= nmp
->nm_deadtimeout
;
3136 if (nmp
->nm_curdeadtimeout
<= 0) {
3137 nmp
->nm_deadto_start
= 0;
3140 lck_mtx_unlock(&nmp
->nm_lock
);
3150 struct vnop_mnomap_args
/* {
3151 * struct vnodeop_desc *a_desc;
3153 * vfs_context_t a_context;
3156 vfs_context_t ctx
= ap
->a_context
;
3157 vnode_t vp
= ap
->a_vp
;
3158 nfsnode_t np
= VTONFS(vp
);
3159 struct nfsmount
*nmp
;
3160 struct nfs_open_file
*nofp
= NULL
;
3163 int is_mapped_flag
= 0;
3166 if (nfs_mount_gone(nmp
)) {
3170 nfs_node_lock_force(np
);
3171 if (np
->n_flag
& NISMAPPED
) {
3173 np
->n_flag
&= ~NISMAPPED
;
3175 nfs_node_unlock(np
);
3176 if (is_mapped_flag
) {
3177 lck_mtx_lock(&nmp
->nm_lock
);
3178 if (nmp
->nm_mappers
) {
3181 NP(np
, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
3183 lck_mtx_unlock(&nmp
->nm_lock
);
3186 /* flush buffers/ubc before we drop the open (in case it's our last open) */
3187 nfs_flush(np
, MNT_WAIT
, vfs_context_thread(ctx
), V_IGNORE_WRITEERR
);
3188 if (UBCINFOEXISTS(vp
) && (size
= ubc_getsize(vp
))) {
3189 ubc_msync(vp
, 0, size
, NULL
, UBC_PUSHALL
| UBC_SYNC
);
3192 /* walk all open files and close all mmap opens */
3194 error
= nfs_mount_state_in_use_start(nmp
, NULL
);
3198 lck_mtx_lock(&np
->n_openlock
);
3199 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
3200 if (!nofp
->nof_mmap_access
) {
3203 lck_mtx_unlock(&np
->n_openlock
);
3204 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
3205 nfs_mount_state_in_use_end(nmp
, 0);
3206 error
= nfs4_reopen(nofp
, NULL
);
3212 error
= nfs_open_file_set_busy(nofp
, NULL
);
3215 lck_mtx_lock(&np
->n_openlock
);
3218 if (nofp
->nof_mmap_access
) {
3219 error
= nfs_close(np
, nofp
, nofp
->nof_mmap_access
, nofp
->nof_mmap_deny
, ctx
);
3220 if (!nfs_mount_state_error_should_restart(error
)) {
3221 if (error
) { /* not a state-operation-restarting error, so just clear the access */
3222 NP(np
, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
3224 nofp
->nof_mmap_access
= nofp
->nof_mmap_deny
= 0;
3227 NP(np
, "nfs_vnop_mnomap: error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
3230 nfs_open_file_clear_busy(nofp
);
3231 nfs_mount_state_in_use_end(nmp
, error
);
3234 lck_mtx_unlock(&np
->n_openlock
);
3235 nfs_mount_state_in_use_end(nmp
, error
);
3240 * Search a node's lock owner list for the owner for this process.
3241 * If not found and "alloc" is set, then allocate a new one.
3243 struct nfs_lock_owner
*
3244 nfs_lock_owner_find(nfsnode_t np
, proc_t p
, int alloc
)
3246 pid_t pid
= proc_pid(p
);
3247 struct nfs_lock_owner
*nlop
, *newnlop
= NULL
;
3250 lck_mtx_lock(&np
->n_openlock
);
3251 TAILQ_FOREACH(nlop
, &np
->n_lock_owners
, nlo_link
) {
3252 os_ref_count_t newcount
;
3254 if (nlop
->nlo_pid
!= pid
) {
3257 if (timevalcmp(&nlop
->nlo_pid_start
, &p
->p_start
, ==)) {
3260 /* stale lock owner... reuse it if we can */
3261 if (os_ref_get_count(&nlop
->nlo_refcnt
)) {
3262 TAILQ_REMOVE(&np
->n_lock_owners
, nlop
, nlo_link
);
3263 nlop
->nlo_flags
&= ~NFS_LOCK_OWNER_LINK
;
3264 newcount
= os_ref_release_locked(&nlop
->nlo_refcnt
);
3265 lck_mtx_unlock(&np
->n_openlock
);
3268 nlop
->nlo_pid_start
= p
->p_start
;
3269 nlop
->nlo_seqid
= 0;
3270 nlop
->nlo_stategenid
= 0;
3274 if (!nlop
&& !newnlop
&& alloc
) {
3275 lck_mtx_unlock(&np
->n_openlock
);
3276 MALLOC(newnlop
, struct nfs_lock_owner
*, sizeof(struct nfs_lock_owner
), M_TEMP
, M_WAITOK
);
3280 bzero(newnlop
, sizeof(*newnlop
));
3281 lck_mtx_init(&newnlop
->nlo_lock
, nfs_open_grp
, LCK_ATTR_NULL
);
3282 newnlop
->nlo_pid
= pid
;
3283 newnlop
->nlo_pid_start
= p
->p_start
;
3284 newnlop
->nlo_name
= OSAddAtomic(1, &nfs_lock_owner_seqnum
);
3285 TAILQ_INIT(&newnlop
->nlo_locks
);
3288 if (!nlop
&& newnlop
) {
3289 newnlop
->nlo_flags
|= NFS_LOCK_OWNER_LINK
;
3290 os_ref_init(&newnlop
->nlo_refcnt
, NULL
);
3291 TAILQ_INSERT_HEAD(&np
->n_lock_owners
, newnlop
, nlo_link
);
3294 lck_mtx_unlock(&np
->n_openlock
);
3296 if (newnlop
&& (nlop
!= newnlop
)) {
3297 nfs_lock_owner_destroy(newnlop
);
3301 nfs_lock_owner_ref(nlop
);
3308 * destroy a lock owner that's no longer needed
3311 nfs_lock_owner_destroy(struct nfs_lock_owner
*nlop
)
3313 if (nlop
->nlo_open_owner
) {
3314 nfs_open_owner_rele(nlop
->nlo_open_owner
);
3315 nlop
->nlo_open_owner
= NULL
;
3317 lck_mtx_destroy(&nlop
->nlo_lock
, nfs_open_grp
);
3322 * acquire a reference count on a lock owner
3325 nfs_lock_owner_ref(struct nfs_lock_owner
*nlop
)
3327 lck_mtx_lock(&nlop
->nlo_lock
);
3328 os_ref_retain_locked(&nlop
->nlo_refcnt
);
3329 lck_mtx_unlock(&nlop
->nlo_lock
);
3333 * drop a reference count on a lock owner and destroy it if
3334 * it is no longer referenced and no longer on the mount's list.
3337 nfs_lock_owner_rele(struct nfs_lock_owner
*nlop
)
3339 os_ref_count_t newcount
;
3341 lck_mtx_lock(&nlop
->nlo_lock
);
3342 if (os_ref_get_count(&nlop
->nlo_refcnt
) < 1) {
3343 panic("nfs_lock_owner_rele: no refcnt");
3345 newcount
= os_ref_release_locked(&nlop
->nlo_refcnt
);
3346 if (!newcount
&& (nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
)) {
3347 panic("nfs_lock_owner_rele: busy");
3349 /* XXX we may potentially want to clean up idle/unused lock owner structures */
3350 if (newcount
|| (nlop
->nlo_flags
& NFS_LOCK_OWNER_LINK
)) {
3351 lck_mtx_unlock(&nlop
->nlo_lock
);
3354 /* owner is no longer referenced or linked to mount, so destroy it */
3355 lck_mtx_unlock(&nlop
->nlo_lock
);
3356 nfs_lock_owner_destroy(nlop
);
3360 * Mark a lock owner as busy because we are about to
3361 * start an operation that uses and updates lock owner state.
3364 nfs_lock_owner_set_busy(struct nfs_lock_owner
*nlop
, thread_t thd
)
3366 struct nfsmount
*nmp
;
3367 struct timespec ts
= {2, 0};
3368 int error
= 0, slpflag
;
3370 nmp
= nlop
->nlo_open_owner
->noo_mount
;
3371 if (nfs_mount_gone(nmp
)) {
3374 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
3376 lck_mtx_lock(&nlop
->nlo_lock
);
3377 while (nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
) {
3378 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
3381 nlop
->nlo_flags
|= NFS_LOCK_OWNER_WANT
;
3382 msleep(nlop
, &nlop
->nlo_lock
, slpflag
, "nfs_lock_owner_set_busy", &ts
);
3386 nlop
->nlo_flags
|= NFS_LOCK_OWNER_BUSY
;
3388 lck_mtx_unlock(&nlop
->nlo_lock
);
3394 * Clear the busy flag on a lock owner and wake up anyone waiting
3398 nfs_lock_owner_clear_busy(struct nfs_lock_owner
*nlop
)
3402 lck_mtx_lock(&nlop
->nlo_lock
);
3403 if (!(nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
)) {
3404 panic("nfs_lock_owner_clear_busy");
3406 wanted
= (nlop
->nlo_flags
& NFS_LOCK_OWNER_WANT
);
3407 nlop
->nlo_flags
&= ~(NFS_LOCK_OWNER_BUSY
| NFS_LOCK_OWNER_WANT
);
3408 lck_mtx_unlock(&nlop
->nlo_lock
);
3415 * Insert a held lock into a lock owner's sorted list.
3416 * (flock locks are always inserted at the head the list)
3419 nfs_lock_owner_insert_held_lock(struct nfs_lock_owner
*nlop
, struct nfs_file_lock
*newnflp
)
3421 struct nfs_file_lock
*nflp
;
3423 /* insert new lock in lock owner's held lock list */
3424 lck_mtx_lock(&nlop
->nlo_lock
);
3425 if ((newnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
) {
3426 TAILQ_INSERT_HEAD(&nlop
->nlo_locks
, newnflp
, nfl_lolink
);
3428 TAILQ_FOREACH(nflp
, &nlop
->nlo_locks
, nfl_lolink
) {
3429 if (newnflp
->nfl_start
< nflp
->nfl_start
) {
3434 TAILQ_INSERT_BEFORE(nflp
, newnflp
, nfl_lolink
);
3436 TAILQ_INSERT_TAIL(&nlop
->nlo_locks
, newnflp
, nfl_lolink
);
3439 lck_mtx_unlock(&nlop
->nlo_lock
);
3443 * Get a file lock structure for this lock owner.
3445 struct nfs_file_lock
*
3446 nfs_file_lock_alloc(struct nfs_lock_owner
*nlop
)
3448 struct nfs_file_lock
*nflp
= NULL
;
3450 lck_mtx_lock(&nlop
->nlo_lock
);
3451 if (!nlop
->nlo_alock
.nfl_owner
) {
3452 nflp
= &nlop
->nlo_alock
;
3453 nflp
->nfl_owner
= nlop
;
3455 lck_mtx_unlock(&nlop
->nlo_lock
);
3457 MALLOC(nflp
, struct nfs_file_lock
*, sizeof(struct nfs_file_lock
), M_TEMP
, M_WAITOK
);
3461 bzero(nflp
, sizeof(*nflp
));
3462 nflp
->nfl_flags
|= NFS_FILE_LOCK_ALLOC
;
3463 nflp
->nfl_owner
= nlop
;
3465 nfs_lock_owner_ref(nlop
);
3470 * destroy the given NFS file lock structure
3473 nfs_file_lock_destroy(struct nfs_file_lock
*nflp
)
3475 struct nfs_lock_owner
*nlop
= nflp
->nfl_owner
;
3477 if (nflp
->nfl_flags
& NFS_FILE_LOCK_ALLOC
) {
3478 nflp
->nfl_owner
= NULL
;
3481 lck_mtx_lock(&nlop
->nlo_lock
);
3482 bzero(nflp
, sizeof(*nflp
));
3483 lck_mtx_unlock(&nlop
->nlo_lock
);
3485 nfs_lock_owner_rele(nlop
);
3489 * Check if one file lock conflicts with another.
3490 * (nflp1 is the new lock. nflp2 is the existing lock.)
3493 nfs_file_lock_conflict(struct nfs_file_lock
*nflp1
, struct nfs_file_lock
*nflp2
, int *willsplit
)
3495 /* no conflict if lock is dead */
3496 if ((nflp1
->nfl_flags
& NFS_FILE_LOCK_DEAD
) || (nflp2
->nfl_flags
& NFS_FILE_LOCK_DEAD
)) {
3499 /* no conflict if it's ours - unless the lock style doesn't match */
3500 if ((nflp1
->nfl_owner
== nflp2
->nfl_owner
) &&
3501 ((nflp1
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == (nflp2
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
))) {
3502 if (willsplit
&& (nflp1
->nfl_type
!= nflp2
->nfl_type
) &&
3503 (nflp1
->nfl_start
> nflp2
->nfl_start
) &&
3504 (nflp1
->nfl_end
< nflp2
->nfl_end
)) {
3509 /* no conflict if ranges don't overlap */
3510 if ((nflp1
->nfl_start
> nflp2
->nfl_end
) || (nflp1
->nfl_end
< nflp2
->nfl_start
)) {
3513 /* no conflict if neither lock is exclusive */
3514 if ((nflp1
->nfl_type
!= F_WRLCK
) && (nflp2
->nfl_type
!= F_WRLCK
)) {
3522 * Send an NFSv4 LOCK RPC to the server.
3527 struct nfs_open_file
*nofp
,
3528 struct nfs_file_lock
*nflp
,
3534 struct nfs_lock_owner
*nlop
= nflp
->nfl_owner
;
3535 struct nfsmount
*nmp
;
3536 struct nfsm_chain nmreq
, nmrep
;
3539 int error
= 0, lockerror
= ENOENT
, newlocker
, numops
, status
;
3540 struct nfsreq_secinfo_args si
;
3543 if (nfs_mount_gone(nmp
)) {
3546 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
3550 newlocker
= (nlop
->nlo_stategenid
!= nmp
->nm_stategenid
);
3551 locktype
= (nflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
) ?
3552 ((nflp
->nfl_type
== F_WRLCK
) ?
3553 NFS_LOCK_TYPE_WRITEW
:
3554 NFS_LOCK_TYPE_READW
) :
3555 ((nflp
->nfl_type
== F_WRLCK
) ?
3556 NFS_LOCK_TYPE_WRITE
:
3557 NFS_LOCK_TYPE_READ
);
3559 error
= nfs_open_file_set_busy(nofp
, thd
);
3563 error
= nfs_open_owner_set_busy(nofp
->nof_owner
, thd
);
3565 nfs_open_file_clear_busy(nofp
);
3568 if (!nlop
->nlo_open_owner
) {
3569 nfs_open_owner_ref(nofp
->nof_owner
);
3570 nlop
->nlo_open_owner
= nofp
->nof_owner
;
3573 error
= nfs_lock_owner_set_busy(nlop
, thd
);
3576 nfs_open_owner_clear_busy(nofp
->nof_owner
);
3577 nfs_open_file_clear_busy(nofp
);
3582 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
3583 nfsm_chain_null(&nmreq
);
3584 nfsm_chain_null(&nmrep
);
3586 // PUTFH, GETATTR, LOCK
3588 nfsm_chain_build_alloc_init(error
, &nmreq
, 33 * NFSX_UNSIGNED
);
3589 nfsm_chain_add_compound_header(error
, &nmreq
, "lock", nmp
->nm_minor_vers
, numops
);
3591 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3592 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3594 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3595 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
3597 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCK
);
3598 nfsm_chain_add_32(error
, &nmreq
, locktype
);
3599 nfsm_chain_add_32(error
, &nmreq
, reclaim
);
3600 nfsm_chain_add_64(error
, &nmreq
, nflp
->nfl_start
);
3601 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(nflp
->nfl_start
, nflp
->nfl_end
));
3602 nfsm_chain_add_32(error
, &nmreq
, newlocker
);
3604 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_owner
->noo_seqid
);
3605 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
3606 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3607 nfsm_chain_add_lock_owner4(error
, &nmreq
, nmp
, nlop
);
3609 nfsm_chain_add_stateid(error
, &nmreq
, &nlop
->nlo_stateid
);
3610 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3612 nfsm_chain_build_done(error
, &nmreq
);
3613 nfsm_assert(error
, (numops
== 0), EPROTO
);
3616 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
| R_NOINTR
, &nmrep
, &xid
, &status
);
3618 if ((lockerror
= nfs_node_lock(np
))) {
3621 nfsm_chain_skip_tag(error
, &nmrep
);
3622 nfsm_chain_get_32(error
, &nmrep
, numops
);
3623 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3625 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3626 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
3628 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCK
);
3629 nfs_owner_seqid_increment(newlocker
? nofp
->nof_owner
: NULL
, nlop
, error
);
3630 nfsm_chain_get_stateid(error
, &nmrep
, &nlop
->nlo_stateid
);
3632 /* Update the lock owner's stategenid once it appears the server has state for it. */
3633 /* We determine this by noting the request was successful (we got a stateid). */
3634 if (newlocker
&& !error
) {
3635 nlop
->nlo_stategenid
= nmp
->nm_stategenid
;
3639 nfs_node_unlock(np
);
3641 nfs_lock_owner_clear_busy(nlop
);
3643 nfs_open_owner_clear_busy(nofp
->nof_owner
);
3644 nfs_open_file_clear_busy(nofp
);
3646 nfsm_chain_cleanup(&nmreq
);
3647 nfsm_chain_cleanup(&nmrep
);
3652 * Send an NFSv4 LOCKU RPC to the server.
3657 struct nfs_lock_owner
*nlop
,
3665 struct nfsmount
*nmp
;
3666 struct nfsm_chain nmreq
, nmrep
;
3668 int error
= 0, lockerror
= ENOENT
, numops
, status
;
3669 struct nfsreq_secinfo_args si
;
3672 if (nfs_mount_gone(nmp
)) {
3675 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
3679 error
= nfs_lock_owner_set_busy(nlop
, NULL
);
3684 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
3685 nfsm_chain_null(&nmreq
);
3686 nfsm_chain_null(&nmrep
);
3688 // PUTFH, GETATTR, LOCKU
3690 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
3691 nfsm_chain_add_compound_header(error
, &nmreq
, "unlock", nmp
->nm_minor_vers
, numops
);
3693 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3694 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3696 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3697 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
3699 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCKU
);
3700 nfsm_chain_add_32(error
, &nmreq
, (type
== F_WRLCK
) ? NFS_LOCK_TYPE_WRITE
: NFS_LOCK_TYPE_READ
);
3701 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3702 nfsm_chain_add_stateid(error
, &nmreq
, &nlop
->nlo_stateid
);
3703 nfsm_chain_add_64(error
, &nmreq
, start
);
3704 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(start
, end
));
3705 nfsm_chain_build_done(error
, &nmreq
);
3706 nfsm_assert(error
, (numops
== 0), EPROTO
);
3709 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
| R_NOINTR
, &nmrep
, &xid
, &status
);
3711 if ((lockerror
= nfs_node_lock(np
))) {
3714 nfsm_chain_skip_tag(error
, &nmrep
);
3715 nfsm_chain_get_32(error
, &nmrep
, numops
);
3716 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3718 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3719 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
3721 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCKU
);
3722 nfs_owner_seqid_increment(NULL
, nlop
, error
);
3723 nfsm_chain_get_stateid(error
, &nmrep
, &nlop
->nlo_stateid
);
3726 nfs_node_unlock(np
);
3728 nfs_lock_owner_clear_busy(nlop
);
3729 nfsm_chain_cleanup(&nmreq
);
3730 nfsm_chain_cleanup(&nmrep
);
3735 * Send an NFSv4 LOCKT RPC to the server.
3740 struct nfs_lock_owner
*nlop
,
3746 struct nfsmount
*nmp
;
3747 struct nfsm_chain nmreq
, nmrep
;
3748 uint64_t xid
, val64
= 0;
3750 int error
= 0, lockerror
, numops
, status
;
3751 struct nfsreq_secinfo_args si
;
3754 if (nfs_mount_gone(nmp
)) {
3757 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
3762 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
3763 nfsm_chain_null(&nmreq
);
3764 nfsm_chain_null(&nmrep
);
3766 // PUTFH, GETATTR, LOCKT
3768 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
3769 nfsm_chain_add_compound_header(error
, &nmreq
, "locktest", nmp
->nm_minor_vers
, numops
);
3771 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3772 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3774 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3775 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
3777 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCKT
);
3778 nfsm_chain_add_32(error
, &nmreq
, (fl
->l_type
== F_WRLCK
) ? NFS_LOCK_TYPE_WRITE
: NFS_LOCK_TYPE_READ
);
3779 nfsm_chain_add_64(error
, &nmreq
, start
);
3780 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(start
, end
));
3781 nfsm_chain_add_lock_owner4(error
, &nmreq
, nmp
, nlop
);
3782 nfsm_chain_build_done(error
, &nmreq
);
3783 nfsm_assert(error
, (numops
== 0), EPROTO
);
3786 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
3788 if ((lockerror
= nfs_node_lock(np
))) {
3791 nfsm_chain_skip_tag(error
, &nmrep
);
3792 nfsm_chain_get_32(error
, &nmrep
, numops
);
3793 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3795 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3796 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
3798 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCKT
);
3799 if (error
== NFSERR_DENIED
) {
3801 nfsm_chain_get_64(error
, &nmrep
, fl
->l_start
);
3802 nfsm_chain_get_64(error
, &nmrep
, val64
);
3803 fl
->l_len
= (val64
== UINT64_MAX
) ? 0 : val64
;
3804 nfsm_chain_get_32(error
, &nmrep
, val
);
3805 fl
->l_type
= (val
== NFS_LOCK_TYPE_WRITE
) ? F_WRLCK
: F_RDLCK
;
3807 fl
->l_whence
= SEEK_SET
;
3808 } else if (!error
) {
3809 fl
->l_type
= F_UNLCK
;
3813 nfs_node_unlock(np
);
3815 nfsm_chain_cleanup(&nmreq
);
3816 nfsm_chain_cleanup(&nmrep
);
3822 * Check for any conflicts with the given lock.
3824 * Checking for a lock doesn't require the file to be opened.
3825 * So we skip all the open owner, open file, lock owner work
3826 * and just check for a conflicting lock.
3829 nfs_advlock_getlock(
3831 struct nfs_lock_owner
*nlop
,
3837 struct nfsmount
*nmp
;
3838 struct nfs_file_lock
*nflp
;
3839 int error
= 0, answered
= 0;
3842 if (nfs_mount_gone(nmp
)) {
3847 if ((error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
)))) {
3851 lck_mtx_lock(&np
->n_openlock
);
3852 /* scan currently held locks for conflict */
3853 TAILQ_FOREACH(nflp
, &np
->n_locks
, nfl_link
) {
3854 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
3857 if ((start
<= nflp
->nfl_end
) && (end
>= nflp
->nfl_start
) &&
3858 ((fl
->l_type
== F_WRLCK
) || (nflp
->nfl_type
== F_WRLCK
))) {
3863 /* found a conflicting lock */
3864 fl
->l_type
= nflp
->nfl_type
;
3865 fl
->l_pid
= (nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_FLOCK
) ? -1 : nflp
->nfl_owner
->nlo_pid
;
3866 fl
->l_start
= nflp
->nfl_start
;
3867 fl
->l_len
= NFS_FLOCK_LENGTH(nflp
->nfl_start
, nflp
->nfl_end
);
3868 fl
->l_whence
= SEEK_SET
;
3870 } else if ((np
->n_openflags
& N_DELEG_WRITE
) && !(np
->n_openflags
& N_DELEG_RETURN
)) {
3872 * If we have a write delegation, we know there can't be other
3873 * locks on the server. So the answer is no conflicting lock found.
3875 fl
->l_type
= F_UNLCK
;
3878 lck_mtx_unlock(&np
->n_openlock
);
3880 nfs_mount_state_in_use_end(nmp
, 0);
3884 /* no conflict found locally, so ask the server */
3885 error
= nmp
->nm_funcs
->nf_getlock_rpc(np
, nlop
, fl
, start
, end
, ctx
);
3887 if (nfs_mount_state_in_use_end(nmp
, error
)) {
3894 * Acquire a file lock for the given range.
3896 * Add the lock (request) to the lock queue.
3897 * Scan the lock queue for any conflicting locks.
3898 * If a conflict is found, block or return an error.
3899 * Once end of queue is reached, send request to the server.
3900 * If the server grants the lock, scan the lock queue and
3901 * update any existing locks. Then (optionally) scan the
3902 * queue again to coalesce any locks adjacent to the new one.
3905 nfs_advlock_setlock(
3907 struct nfs_open_file
*nofp
,
3908 struct nfs_lock_owner
*nlop
,
3916 struct nfsmount
*nmp
;
3917 struct nfs_file_lock
*newnflp
, *nflp
, *nflp2
= NULL
, *nextnflp
, *flocknflp
= NULL
;
3918 struct nfs_file_lock
*coalnflp
;
3919 int error
= 0, error2
, willsplit
= 0, delay
, slpflag
, busy
= 0, inuse
= 0, restart
, inqueue
= 0;
3920 struct timespec ts
= {1, 0};
3923 if (nfs_mount_gone(nmp
)) {
3926 slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
3928 if ((type
!= F_RDLCK
) && (type
!= F_WRLCK
)) {
3932 /* allocate a new lock */
3933 newnflp
= nfs_file_lock_alloc(nlop
);
3937 newnflp
->nfl_start
= start
;
3938 newnflp
->nfl_end
= end
;
3939 newnflp
->nfl_type
= type
;
3940 if (op
== F_SETLKW
) {
3941 newnflp
->nfl_flags
|= NFS_FILE_LOCK_WAIT
;
3943 newnflp
->nfl_flags
|= style
;
3944 newnflp
->nfl_flags
|= NFS_FILE_LOCK_BLOCKED
;
3946 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) && (type
== F_WRLCK
)) {
3948 * For exclusive flock-style locks, if we block waiting for the
3949 * lock, we need to first release any currently held shared
3950 * flock-style lock. So, the first thing we do is check if we
3951 * have a shared flock-style lock.
3953 nflp
= TAILQ_FIRST(&nlop
->nlo_locks
);
3954 if (nflp
&& ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != NFS_FILE_LOCK_STYLE_FLOCK
)) {
3957 if (nflp
&& (nflp
->nfl_type
!= F_RDLCK
)) {
3965 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
3970 if (np
->n_flag
& NREVOKE
) {
3972 nfs_mount_state_in_use_end(nmp
, 0);
3976 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
3977 nfs_mount_state_in_use_end(nmp
, 0);
3979 error
= nfs4_reopen(nofp
, vfs_context_thread(ctx
));
3986 lck_mtx_lock(&np
->n_openlock
);
3988 /* insert new lock at beginning of list */
3989 TAILQ_INSERT_HEAD(&np
->n_locks
, newnflp
, nfl_link
);
3993 /* scan current list of locks (held and pending) for conflicts */
3994 for (nflp
= TAILQ_NEXT(newnflp
, nfl_link
); nflp
; nflp
= nextnflp
) {
3995 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
3996 if (!nfs_file_lock_conflict(newnflp
, nflp
, &willsplit
)) {
4000 if (!(newnflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
)) {
4004 /* Block until this lock is no longer held. */
4005 if (nflp
->nfl_blockcnt
== UINT_MAX
) {
4009 nflp
->nfl_blockcnt
++;
4012 /* release any currently held shared lock before sleeping */
4013 lck_mtx_unlock(&np
->n_openlock
);
4014 nfs_mount_state_in_use_end(nmp
, 0);
4016 error
= nfs_advlock_unlock(np
, nofp
, nlop
, 0, UINT64_MAX
, NFS_FILE_LOCK_STYLE_FLOCK
, ctx
);
4019 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
4022 lck_mtx_lock(&np
->n_openlock
);
4026 lck_mtx_lock(&np
->n_openlock
);
4027 /* no need to block/sleep if the conflict is gone */
4028 if (!nfs_file_lock_conflict(newnflp
, nflp
, NULL
)) {
4032 msleep(nflp
, &np
->n_openlock
, slpflag
, "nfs_advlock_setlock_blocked", &ts
);
4034 error
= nfs_sigintr(NFSTONMP(np
), NULL
, vfs_context_thread(ctx
), 0);
4035 if (!error
&& (nmp
->nm_state
& NFSSTA_RECOVER
)) {
4036 /* looks like we have a recover pending... restart */
4038 lck_mtx_unlock(&np
->n_openlock
);
4039 nfs_mount_state_in_use_end(nmp
, 0);
4041 lck_mtx_lock(&np
->n_openlock
);
4044 if (!error
&& (np
->n_flag
& NREVOKE
)) {
4047 } while (!error
&& nfs_file_lock_conflict(newnflp
, nflp
, NULL
));
4048 nflp
->nfl_blockcnt
--;
4049 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) && !nflp
->nfl_blockcnt
) {
4050 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4051 nfs_file_lock_destroy(nflp
);
4053 if (error
|| restart
) {
4056 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
4057 /* So, start this lock-scanning loop over from where it started. */
4058 nextnflp
= TAILQ_NEXT(newnflp
, nfl_link
);
4060 lck_mtx_unlock(&np
->n_openlock
);
4070 * It looks like this operation is splitting a lock.
4071 * We allocate a new lock now so we don't have to worry
4072 * about the allocation failing after we've updated some state.
4074 nflp2
= nfs_file_lock_alloc(nlop
);
4081 /* once scan for local conflicts is clear, send request to server */
4082 if ((error
= nfs_open_state_set_busy(np
, vfs_context_thread(ctx
)))) {
4088 /* do we have a delegation? (that we're not returning?) */
4089 if ((np
->n_openflags
& N_DELEG_MASK
) && !(np
->n_openflags
& N_DELEG_RETURN
)) {
4090 if (np
->n_openflags
& N_DELEG_WRITE
) {
4091 /* with a write delegation, just take the lock delegated */
4092 newnflp
->nfl_flags
|= NFS_FILE_LOCK_DELEGATED
;
4094 /* make sure the lock owner knows its open owner */
4095 if (!nlop
->nlo_open_owner
) {
4096 nfs_open_owner_ref(nofp
->nof_owner
);
4097 nlop
->nlo_open_owner
= nofp
->nof_owner
;
4102 * If we don't have any non-delegated opens but we do have
4103 * delegated opens, then we need to first claim the delegated
4104 * opens so that the lock request on the server can be associated
4105 * with an open it knows about.
4107 if ((!nofp
->nof_rw_drw
&& !nofp
->nof_w_drw
&& !nofp
->nof_r_drw
&&
4108 !nofp
->nof_rw_dw
&& !nofp
->nof_w_dw
&& !nofp
->nof_r_dw
&&
4109 !nofp
->nof_rw
&& !nofp
->nof_w
&& !nofp
->nof_r
) &&
4110 (nofp
->nof_d_rw_drw
|| nofp
->nof_d_w_drw
|| nofp
->nof_d_r_drw
||
4111 nofp
->nof_d_rw_dw
|| nofp
->nof_d_w_dw
|| nofp
->nof_d_r_dw
||
4112 nofp
->nof_d_rw
|| nofp
->nof_d_w
|| nofp
->nof_d_r
)) {
4113 error
= nfs4_claim_delegated_state_for_open_file(nofp
, 0);
4120 if (np
->n_flag
& NREVOKE
) {
4124 error
= nmp
->nm_funcs
->nf_setlock_rpc(np
, nofp
, newnflp
, 0, 0, vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4126 if (!error
|| ((error
!= NFSERR_DENIED
) && (error
!= NFSERR_GRACE
))) {
4129 /* request was denied due to either conflict or grace period */
4130 if ((error
== NFSERR_DENIED
) && !(newnflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
)) {
4135 /* release any currently held shared lock before sleeping */
4136 nfs_open_state_clear_busy(np
);
4138 nfs_mount_state_in_use_end(nmp
, 0);
4140 error2
= nfs_advlock_unlock(np
, nofp
, nlop
, 0, UINT64_MAX
, NFS_FILE_LOCK_STYLE_FLOCK
, ctx
);
4143 error2
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
4147 error2
= nfs_open_state_set_busy(np
, vfs_context_thread(ctx
));
4156 * Wait a little bit and send the request again.
4157 * Except for retries of blocked v2/v3 request where we've already waited a bit.
4159 if ((nmp
->nm_vers
>= NFS_VER4
) || (error
== NFSERR_GRACE
)) {
4160 if (error
== NFSERR_GRACE
) {
4166 tsleep(newnflp
, slpflag
, "nfs_advlock_setlock_delay", delay
* (hz
/ 2));
4169 error
= nfs_sigintr(NFSTONMP(np
), NULL
, vfs_context_thread(ctx
), 0);
4170 if (!error
&& (nmp
->nm_state
& NFSSTA_RECOVER
)) {
4171 /* looks like we have a recover pending... restart */
4172 nfs_open_state_clear_busy(np
);
4174 nfs_mount_state_in_use_end(nmp
, 0);
4178 if (!error
&& (np
->n_flag
& NREVOKE
)) {
4184 if (nfs_mount_state_error_should_restart(error
)) {
4185 /* looks like we need to restart this operation */
4187 nfs_open_state_clear_busy(np
);
4191 nfs_mount_state_in_use_end(nmp
, error
);
4196 lck_mtx_lock(&np
->n_openlock
);
4197 newnflp
->nfl_flags
&= ~NFS_FILE_LOCK_BLOCKED
;
4199 newnflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4200 if (newnflp
->nfl_blockcnt
) {
4201 /* wake up anyone blocked on this lock */
4204 /* remove newnflp from lock list and destroy */
4206 TAILQ_REMOVE(&np
->n_locks
, newnflp
, nfl_link
);
4208 nfs_file_lock_destroy(newnflp
);
4210 lck_mtx_unlock(&np
->n_openlock
);
4212 nfs_open_state_clear_busy(np
);
4215 nfs_mount_state_in_use_end(nmp
, error
);
4218 nfs_file_lock_destroy(nflp2
);
4223 /* server granted the lock */
4226 * Scan for locks to update.
4228 * Locks completely covered are killed.
4229 * At most two locks may need to be clipped.
4230 * It's possible that a single lock may need to be split.
4232 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4233 if (nflp
== newnflp
) {
4236 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
4239 if (nflp
->nfl_owner
!= nlop
) {
4242 if ((newnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != (nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
)) {
4245 if ((newnflp
->nfl_start
> nflp
->nfl_end
) || (newnflp
->nfl_end
< nflp
->nfl_start
)) {
4248 /* here's one to update */
4249 if ((newnflp
->nfl_start
<= nflp
->nfl_start
) && (newnflp
->nfl_end
>= nflp
->nfl_end
)) {
4250 /* The entire lock is being replaced. */
4251 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4252 lck_mtx_lock(&nlop
->nlo_lock
);
4253 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4254 lck_mtx_unlock(&nlop
->nlo_lock
);
4255 /* lock will be destroyed below, if no waiters */
4256 } else if ((newnflp
->nfl_start
> nflp
->nfl_start
) && (newnflp
->nfl_end
< nflp
->nfl_end
)) {
4257 /* We're replacing a range in the middle of a lock. */
4258 /* The current lock will be split into two locks. */
4259 /* Update locks and insert new lock after current lock. */
4260 nflp2
->nfl_flags
|= (nflp
->nfl_flags
& (NFS_FILE_LOCK_STYLE_MASK
| NFS_FILE_LOCK_DELEGATED
));
4261 nflp2
->nfl_type
= nflp
->nfl_type
;
4262 nflp2
->nfl_start
= newnflp
->nfl_end
+ 1;
4263 nflp2
->nfl_end
= nflp
->nfl_end
;
4264 nflp
->nfl_end
= newnflp
->nfl_start
- 1;
4265 TAILQ_INSERT_AFTER(&np
->n_locks
, nflp
, nflp2
, nfl_link
);
4266 nfs_lock_owner_insert_held_lock(nlop
, nflp2
);
4269 } else if (newnflp
->nfl_start
> nflp
->nfl_start
) {
4270 /* We're replacing the end of a lock. */
4271 nflp
->nfl_end
= newnflp
->nfl_start
- 1;
4272 } else if (newnflp
->nfl_end
< nflp
->nfl_end
) {
4273 /* We're replacing the start of a lock. */
4274 nflp
->nfl_start
= newnflp
->nfl_end
+ 1;
4276 if (nflp
->nfl_blockcnt
) {
4277 /* wake up anyone blocked on this lock */
4279 } else if (nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) {
4280 /* remove nflp from lock list and destroy */
4281 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4282 nfs_file_lock_destroy(nflp
);
4286 nfs_lock_owner_insert_held_lock(nlop
, newnflp
);
4289 * POSIX locks should be coalesced when possible.
4291 if ((style
== NFS_FILE_LOCK_STYLE_POSIX
) && (nofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
)) {
4293 * Walk through the lock queue and check each of our held locks with
4294 * the previous and next locks in the lock owner's "held lock list".
4295 * If the two locks can be coalesced, we merge the current lock into
4296 * the other (previous or next) lock. Merging this way makes sure that
4297 * lock ranges are always merged forward in the lock queue. This is
4298 * important because anyone blocked on the lock being "merged away"
4299 * will still need to block on that range and it will simply continue
4300 * checking locks that are further down the list.
4302 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4303 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
4306 if (nflp
->nfl_owner
!= nlop
) {
4309 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != NFS_FILE_LOCK_STYLE_POSIX
) {
4312 if (((coalnflp
= TAILQ_PREV(nflp
, nfs_file_lock_queue
, nfl_lolink
))) &&
4313 ((coalnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) &&
4314 (coalnflp
->nfl_type
== nflp
->nfl_type
) &&
4315 (coalnflp
->nfl_end
== (nflp
->nfl_start
- 1))) {
4316 coalnflp
->nfl_end
= nflp
->nfl_end
;
4317 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4318 lck_mtx_lock(&nlop
->nlo_lock
);
4319 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4320 lck_mtx_unlock(&nlop
->nlo_lock
);
4321 } else if (((coalnflp
= TAILQ_NEXT(nflp
, nfl_lolink
))) &&
4322 ((coalnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) &&
4323 (coalnflp
->nfl_type
== nflp
->nfl_type
) &&
4324 (coalnflp
->nfl_start
== (nflp
->nfl_end
+ 1))) {
4325 coalnflp
->nfl_start
= nflp
->nfl_start
;
4326 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4327 lck_mtx_lock(&nlop
->nlo_lock
);
4328 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4329 lck_mtx_unlock(&nlop
->nlo_lock
);
4331 if (!(nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
)) {
4334 if (nflp
->nfl_blockcnt
) {
4335 /* wake up anyone blocked on this lock */
4338 /* remove nflp from lock list and destroy */
4339 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4340 nfs_file_lock_destroy(nflp
);
4345 lck_mtx_unlock(&np
->n_openlock
);
4346 nfs_open_state_clear_busy(np
);
4347 nfs_mount_state_in_use_end(nmp
, error
);
4350 nfs_file_lock_destroy(nflp2
);
4356 * Release all (same style) locks within the given range.
4361 struct nfs_open_file
*nofp
,
4362 struct nfs_lock_owner
*nlop
,
4368 struct nfsmount
*nmp
;
4369 struct nfs_file_lock
*nflp
, *nextnflp
, *newnflp
= NULL
;
4370 int error
= 0, willsplit
= 0, send_unlock_rpcs
= 1;
4373 if (nfs_mount_gone(nmp
)) {
4378 if ((error
= nfs_mount_state_in_use_start(nmp
, NULL
))) {
4381 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
4382 nfs_mount_state_in_use_end(nmp
, 0);
4383 error
= nfs4_reopen(nofp
, NULL
);
4389 if ((error
= nfs_open_state_set_busy(np
, NULL
))) {
4390 nfs_mount_state_in_use_end(nmp
, error
);
4394 lck_mtx_lock(&np
->n_openlock
);
4395 if ((start
> 0) && (end
< UINT64_MAX
) && !willsplit
) {
4397 * We may need to allocate a new lock if an existing lock gets split.
4398 * So, we first scan the list to check for a split, and if there's
4399 * going to be one, we'll allocate one now.
4401 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4402 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
4405 if (nflp
->nfl_owner
!= nlop
) {
4408 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != style
) {
4411 if ((start
> nflp
->nfl_end
) || (end
< nflp
->nfl_start
)) {
4414 if ((start
> nflp
->nfl_start
) && (end
< nflp
->nfl_end
)) {
4420 lck_mtx_unlock(&np
->n_openlock
);
4421 nfs_open_state_clear_busy(np
);
4422 nfs_mount_state_in_use_end(nmp
, 0);
4423 newnflp
= nfs_file_lock_alloc(nlop
);
4432 * Free all of our locks in the given range.
4434 * Note that this process requires sending requests to the server.
4435 * Because of this, we will release the n_openlock while performing
4436 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4437 * locks from changing underneath us. However, other entries in the
4438 * list may be removed. So we need to be careful walking the list.
4442 * Don't unlock ranges that are held by other-style locks.
4443 * If style is posix, don't send any unlock rpcs if flock is held.
4444 * If we unlock an flock, don't send unlock rpcs for any posix-style
4445 * ranges held - instead send unlocks for the ranges not held.
4447 if ((style
== NFS_FILE_LOCK_STYLE_POSIX
) &&
4448 ((nflp
= TAILQ_FIRST(&nlop
->nlo_locks
))) &&
4449 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
)) {
4450 send_unlock_rpcs
= 0;
4452 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) &&
4453 ((nflp
= TAILQ_FIRST(&nlop
->nlo_locks
))) &&
4454 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
) &&
4455 ((nflp
= TAILQ_NEXT(nflp
, nfl_lolink
))) &&
4456 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
)) {
4458 int type
= TAILQ_FIRST(&nlop
->nlo_locks
)->nfl_type
;
4459 int delegated
= (TAILQ_FIRST(&nlop
->nlo_locks
)->nfl_flags
& NFS_FILE_LOCK_DELEGATED
);
4460 while (!delegated
&& nflp
) {
4461 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) {
4462 /* unlock the range preceding this lock */
4463 lck_mtx_unlock(&np
->n_openlock
);
4464 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, type
, s
, nflp
->nfl_start
- 1, 0,
4465 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4466 if (nfs_mount_state_error_should_restart(error
)) {
4467 nfs_open_state_clear_busy(np
);
4468 nfs_mount_state_in_use_end(nmp
, error
);
4471 lck_mtx_lock(&np
->n_openlock
);
4475 s
= nflp
->nfl_end
+ 1;
4477 nflp
= TAILQ_NEXT(nflp
, nfl_lolink
);
4480 lck_mtx_unlock(&np
->n_openlock
);
4481 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, type
, s
, end
, 0,
4482 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4483 if (nfs_mount_state_error_should_restart(error
)) {
4484 nfs_open_state_clear_busy(np
);
4485 nfs_mount_state_in_use_end(nmp
, error
);
4488 lck_mtx_lock(&np
->n_openlock
);
4493 send_unlock_rpcs
= 0;
4496 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4497 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
4500 if (nflp
->nfl_owner
!= nlop
) {
4503 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != style
) {
4506 if ((start
> nflp
->nfl_end
) || (end
< nflp
->nfl_start
)) {
4509 /* here's one to unlock */
4510 if ((start
<= nflp
->nfl_start
) && (end
>= nflp
->nfl_end
)) {
4511 /* The entire lock is being unlocked. */
4512 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4513 lck_mtx_unlock(&np
->n_openlock
);
4514 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, nflp
->nfl_start
, nflp
->nfl_end
, 0,
4515 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4516 if (nfs_mount_state_error_should_restart(error
)) {
4517 nfs_open_state_clear_busy(np
);
4518 nfs_mount_state_in_use_end(nmp
, error
);
4521 lck_mtx_lock(&np
->n_openlock
);
4523 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4527 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4528 lck_mtx_lock(&nlop
->nlo_lock
);
4529 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4530 lck_mtx_unlock(&nlop
->nlo_lock
);
4531 /* lock will be destroyed below, if no waiters */
4532 } else if ((start
> nflp
->nfl_start
) && (end
< nflp
->nfl_end
)) {
4533 /* We're unlocking a range in the middle of a lock. */
4534 /* The current lock will be split into two locks. */
4535 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4536 lck_mtx_unlock(&np
->n_openlock
);
4537 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, start
, end
, 0,
4538 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4539 if (nfs_mount_state_error_should_restart(error
)) {
4540 nfs_open_state_clear_busy(np
);
4541 nfs_mount_state_in_use_end(nmp
, error
);
4544 lck_mtx_lock(&np
->n_openlock
);
4549 /* update locks and insert new lock after current lock */
4550 newnflp
->nfl_flags
|= (nflp
->nfl_flags
& (NFS_FILE_LOCK_STYLE_MASK
| NFS_FILE_LOCK_DELEGATED
));
4551 newnflp
->nfl_type
= nflp
->nfl_type
;
4552 newnflp
->nfl_start
= end
+ 1;
4553 newnflp
->nfl_end
= nflp
->nfl_end
;
4554 nflp
->nfl_end
= start
- 1;
4555 TAILQ_INSERT_AFTER(&np
->n_locks
, nflp
, newnflp
, nfl_link
);
4556 nfs_lock_owner_insert_held_lock(nlop
, newnflp
);
4559 } else if (start
> nflp
->nfl_start
) {
4560 /* We're unlocking the end of a lock. */
4561 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4562 lck_mtx_unlock(&np
->n_openlock
);
4563 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, start
, nflp
->nfl_end
, 0,
4564 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4565 if (nfs_mount_state_error_should_restart(error
)) {
4566 nfs_open_state_clear_busy(np
);
4567 nfs_mount_state_in_use_end(nmp
, error
);
4570 lck_mtx_lock(&np
->n_openlock
);
4572 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4576 nflp
->nfl_end
= start
- 1;
4577 } else if (end
< nflp
->nfl_end
) {
4578 /* We're unlocking the start of a lock. */
4579 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4580 lck_mtx_unlock(&np
->n_openlock
);
4581 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, nflp
->nfl_start
, end
, 0,
4582 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4583 if (nfs_mount_state_error_should_restart(error
)) {
4584 nfs_open_state_clear_busy(np
);
4585 nfs_mount_state_in_use_end(nmp
, error
);
4588 lck_mtx_lock(&np
->n_openlock
);
4590 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4594 nflp
->nfl_start
= end
+ 1;
4596 if (nflp
->nfl_blockcnt
) {
4597 /* wake up anyone blocked on this lock */
4599 } else if (nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) {
4600 /* remove nflp from lock list and destroy */
4601 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4602 nfs_file_lock_destroy(nflp
);
4606 lck_mtx_unlock(&np
->n_openlock
);
4607 nfs_open_state_clear_busy(np
);
4608 nfs_mount_state_in_use_end(nmp
, 0);
4611 nfs_file_lock_destroy(newnflp
);
4617 * NFSv4 advisory file locking
4621 struct vnop_advlock_args
/* {
4622 * struct vnodeop_desc *a_desc;
4626 * struct flock *a_fl;
4628 * vfs_context_t a_context;
4631 vnode_t vp
= ap
->a_vp
;
4632 nfsnode_t np
= VTONFS(ap
->a_vp
);
4633 struct flock
*fl
= ap
->a_fl
;
4635 int flags
= ap
->a_flags
;
4636 vfs_context_t ctx
= ap
->a_context
;
4637 struct nfsmount
*nmp
;
4638 struct nfs_open_owner
*noop
= NULL
;
4639 struct nfs_open_file
*nofp
= NULL
;
4640 struct nfs_lock_owner
*nlop
= NULL
;
4642 uint64_t start
, end
;
4643 int error
= 0, modified
, style
;
4645 #define OFF_MAX QUAD_MAX
4647 nmp
= VTONMP(ap
->a_vp
);
4648 if (nfs_mount_gone(nmp
)) {
4651 lck_mtx_lock(&nmp
->nm_lock
);
4652 if ((nmp
->nm_vers
<= NFS_VER3
) && (nmp
->nm_lockmode
== NFS_LOCK_MODE_DISABLED
)) {
4653 lck_mtx_unlock(&nmp
->nm_lock
);
4656 lck_mtx_unlock(&nmp
->nm_lock
);
4658 if (np
->n_flag
& NREVOKE
) {
4661 vtype
= vnode_vtype(ap
->a_vp
);
4662 if (vtype
== VDIR
) { /* ignore lock requests on directories */
4665 if (vtype
!= VREG
) { /* anything other than regular files is invalid */
4669 /* Convert the flock structure into a start and end. */
4670 switch (fl
->l_whence
) {
4674 * Caller is responsible for adding any necessary offset
4675 * to fl->l_start when SEEK_CUR is used.
4677 lstart
= fl
->l_start
;
4680 /* need to flush, and refetch attributes to make */
4681 /* sure we have the correct end of file offset */
4682 if ((error
= nfs_node_lock(np
))) {
4685 modified
= (np
->n_flag
& NMODIFIED
);
4686 nfs_node_unlock(np
);
4687 if (modified
&& ((error
= nfs_vinvalbuf(vp
, V_SAVE
, ctx
, 1)))) {
4690 if ((error
= nfs_getattr(np
, NULL
, ctx
, NGA_UNCACHED
))) {
4693 nfs_data_lock(np
, NFS_DATA_LOCK_SHARED
);
4694 if ((np
->n_size
> OFF_MAX
) ||
4695 ((fl
->l_start
> 0) && (np
->n_size
> (u_quad_t
)(OFF_MAX
- fl
->l_start
)))) {
4698 lstart
= np
->n_size
+ fl
->l_start
;
4699 nfs_data_unlock(np
);
4711 if (fl
->l_len
== 0) {
4713 } else if (fl
->l_len
> 0) {
4714 if ((fl
->l_len
- 1) > (OFF_MAX
- lstart
)) {
4717 end
= start
- 1 + fl
->l_len
;
4718 } else { /* l_len is negative */
4719 if ((lstart
+ fl
->l_len
) < 0) {
4725 if ((nmp
->nm_vers
== NFS_VER2
) && ((start
> INT32_MAX
) || (fl
->l_len
&& (end
> INT32_MAX
)))) {
4729 style
= (flags
& F_FLOCK
) ? NFS_FILE_LOCK_STYLE_FLOCK
: NFS_FILE_LOCK_STYLE_POSIX
;
4730 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) && ((start
!= 0) || (end
!= UINT64_MAX
))) {
4734 /* find the lock owner, alloc if not unlock */
4735 nlop
= nfs_lock_owner_find(np
, vfs_context_proc(ctx
), (op
!= F_UNLCK
));
4737 error
= (op
== F_UNLCK
) ? 0 : ENOMEM
;
4739 NP(np
, "nfs_vnop_advlock: no lock owner, error %d", error
);
4744 if (op
== F_GETLK
) {
4745 error
= nfs_advlock_getlock(np
, nlop
, fl
, start
, end
, ctx
);
4747 /* find the open owner */
4748 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 0);
4750 NP(np
, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx
)));
4754 /* find the open file */
4756 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 0);
4760 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
4761 NP(np
, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
4764 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
4765 error
= nfs4_reopen(nofp
, ((op
== F_UNLCK
) ? NULL
: vfs_context_thread(ctx
)));
4772 NP(np
, "nfs_vnop_advlock: no open file %d, %d", error
, kauth_cred_getuid(noop
->noo_cred
));
4775 if (op
== F_UNLCK
) {
4776 error
= nfs_advlock_unlock(np
, nofp
, nlop
, start
, end
, style
, ctx
);
4777 } else if ((op
== F_SETLK
) || (op
== F_SETLKW
)) {
4778 if ((op
== F_SETLK
) && (flags
& F_WAIT
)) {
4781 error
= nfs_advlock_setlock(np
, nofp
, nlop
, op
, start
, end
, style
, fl
->l_type
, ctx
);
4783 /* not getlk, unlock or lock? */
4790 nfs_lock_owner_rele(nlop
);
4793 nfs_open_owner_rele(noop
);
4799 * Check if an open owner holds any locks on a file.
4802 nfs_check_for_locks(struct nfs_open_owner
*noop
, struct nfs_open_file
*nofp
)
4804 struct nfs_lock_owner
*nlop
;
4806 TAILQ_FOREACH(nlop
, &nofp
->nof_np
->n_lock_owners
, nlo_link
) {
4807 if (nlop
->nlo_open_owner
!= noop
) {
4810 if (!TAILQ_EMPTY(&nlop
->nlo_locks
)) {
4814 return nlop
? 1 : 0;
4818 * Reopen simple (no deny, no locks) open state that was lost.
4821 nfs4_reopen(struct nfs_open_file
*nofp
, thread_t thd
)
4823 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
4824 struct nfsmount
*nmp
= NFSTONMP(nofp
->nof_np
);
4825 nfsnode_t np
= nofp
->nof_np
;
4826 vnode_t vp
= NFSTOV(np
);
4828 struct componentname cn
;
4829 const char *vname
= NULL
;
4830 const char *name
= NULL
;
4832 char smallname
[128];
4833 char *filename
= NULL
;
4834 int error
= 0, done
= 0, slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
4835 struct timespec ts
= { 1, 0 };
4837 lck_mtx_lock(&nofp
->nof_lock
);
4838 while (nofp
->nof_flags
& NFS_OPEN_FILE_REOPENING
) {
4839 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
4842 msleep(&nofp
->nof_flags
, &nofp
->nof_lock
, slpflag
| (PZERO
- 1), "nfsreopenwait", &ts
);
4845 if (error
|| !(nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
4846 lck_mtx_unlock(&nofp
->nof_lock
);
4849 nofp
->nof_flags
|= NFS_OPEN_FILE_REOPENING
;
4850 lck_mtx_unlock(&nofp
->nof_lock
);
4852 nfs_node_lock_force(np
);
4853 if ((vnode_vtype(vp
) != VDIR
) && np
->n_sillyrename
) {
4855 * The node's been sillyrenamed, so we need to use
4856 * the sillyrename directory/name to do the open.
4858 struct nfs_sillyrename
*nsp
= np
->n_sillyrename
;
4859 dvp
= NFSTOV(nsp
->nsr_dnp
);
4860 if ((error
= vnode_get(dvp
))) {
4861 nfs_node_unlock(np
);
4864 name
= nsp
->nsr_name
;
4867 * [sigh] We can't trust VFS to get the parent right for named
4868 * attribute nodes. (It likes to reparent the nodes after we've
4869 * created them.) Luckily we can probably get the right parent
4870 * from the n_parent we have stashed away.
4872 if ((np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) &&
4873 (((dvp
= np
->n_parent
)) && (error
= vnode_get(dvp
)))) {
4877 dvp
= vnode_getparent(vp
);
4879 vname
= vnode_getname(vp
);
4880 if (!dvp
|| !vname
) {
4884 nfs_node_unlock(np
);
4889 filename
= &smallname
[0];
4890 namelen
= snprintf(filename
, sizeof(smallname
), "%s", name
);
4891 if (namelen
>= sizeof(smallname
)) {
4892 MALLOC(filename
, char *, namelen
+ 1, M_TEMP
, M_WAITOK
);
4897 snprintf(filename
, namelen
+ 1, "%s", name
);
4899 nfs_node_unlock(np
);
4900 bzero(&cn
, sizeof(cn
));
4901 cn
.cn_nameptr
= filename
;
4902 cn
.cn_namelen
= namelen
;
4906 if ((error
= nfs_mount_state_in_use_start(nmp
, thd
))) {
4911 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
);
4913 if (!error
&& nofp
->nof_w
) {
4914 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_NONE
);
4916 if (!error
&& nofp
->nof_r
) {
4917 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
);
4920 if (nfs_mount_state_in_use_end(nmp
, error
)) {
4921 if (error
== NFSERR_GRACE
) {
4924 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error
,
4925 (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) ? 1 : 0, name
? name
: "???");
4931 if (error
&& (error
!= EINTR
) && (error
!= ERESTART
)) {
4932 nfs_revoke_open_state_for_node(np
);
4934 lck_mtx_lock(&nofp
->nof_lock
);
4935 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPENING
;
4937 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPEN
;
4939 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error
,
4940 (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) ? 1 : 0, name
? name
: "???");
4942 lck_mtx_unlock(&nofp
->nof_lock
);
4943 if (filename
&& (filename
!= &smallname
[0])) {
4944 FREE(filename
, M_TEMP
);
4947 vnode_putname(vname
);
4949 if (dvp
!= NULLVP
) {
4956 * Send a normal OPEN RPC to open/create a file.
4960 struct nfs_open_file
*nofp
,
4962 struct componentname
*cnp
,
4963 struct vnode_attr
*vap
,
4970 return nfs4_open_rpc_internal(nofp
, ctx
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
4971 cnp
, vap
, dvp
, vpp
, create
, share_access
, share_deny
);
4975 * Send an OPEN RPC to reopen a file.
4978 nfs4_open_reopen_rpc(
4979 struct nfs_open_file
*nofp
,
4982 struct componentname
*cnp
,
4988 return nfs4_open_rpc_internal(nofp
, NULL
, thd
, cred
, cnp
, NULL
, dvp
, vpp
, NFS_OPEN_NOCREATE
, share_access
, share_deny
);
4992 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
4995 nfs4_open_confirm_rpc(
4996 struct nfsmount
*nmp
,
5000 struct nfs_open_owner
*noop
,
5004 struct nfs_vattr
*nvap
,
5007 struct nfsm_chain nmreq
, nmrep
;
5008 int error
= 0, status
, numops
;
5009 struct nfsreq_secinfo_args si
;
5011 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
5012 nfsm_chain_null(&nmreq
);
5013 nfsm_chain_null(&nmrep
);
5015 // PUTFH, OPEN_CONFIRM, GETATTR
5017 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
5018 nfsm_chain_add_compound_header(error
, &nmreq
, "open_confirm", nmp
->nm_minor_vers
, numops
);
5020 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5021 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, fhp
, fhlen
);
5023 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN_CONFIRM
);
5024 nfsm_chain_add_stateid(error
, &nmreq
, sid
);
5025 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5027 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5028 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
5029 nfsm_chain_build_done(error
, &nmreq
);
5030 nfsm_assert(error
, (numops
== 0), EPROTO
);
5032 error
= nfs_request2(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, R_NOINTR
, &nmrep
, xidp
, &status
);
5034 nfsm_chain_skip_tag(error
, &nmrep
);
5035 nfsm_chain_get_32(error
, &nmrep
, numops
);
5036 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5038 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN_CONFIRM
);
5039 nfs_owner_seqid_increment(noop
, NULL
, error
);
5040 nfsm_chain_get_stateid(error
, &nmrep
, sid
);
5041 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5043 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
, NULL
);
5045 nfsm_chain_cleanup(&nmreq
);
5046 nfsm_chain_cleanup(&nmrep
);
5051 * common OPEN RPC code
5053 * If create is set, ctx must be passed in.
5054 * Returns a node on success if no node passed in.
5057 nfs4_open_rpc_internal(
5058 struct nfs_open_file
*nofp
,
5062 struct componentname
*cnp
,
5063 struct vnode_attr
*vap
,
5070 struct nfsmount
*nmp
;
5071 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5072 struct nfs_vattr nvattr
;
5073 int error
= 0, open_error
= EIO
, lockerror
= ENOENT
, busyerror
= ENOENT
, status
;
5074 int nfsvers
, namedattrs
, numops
, exclusive
= 0, gotuid
, gotgid
;
5075 u_int64_t xid
, savedxid
= 0;
5076 nfsnode_t dnp
= VTONFS(dvp
);
5077 nfsnode_t np
, newnp
= NULL
;
5078 vnode_t newvp
= NULL
;
5079 struct nfsm_chain nmreq
, nmrep
;
5080 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
5081 uint32_t rflags
, delegation
, recall
;
5082 struct nfs_stateid stateid
, dstateid
, *sid
;
5084 struct nfsreq rq
, *req
= &rq
;
5085 struct nfs_dulookup dul
;
5087 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
;
5088 struct kauth_ace ace
;
5089 struct nfsreq_secinfo_args si
;
5091 if (create
&& !ctx
) {
5096 if (nfs_mount_gone(nmp
)) {
5099 nfsvers
= nmp
->nm_vers
;
5100 namedattrs
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
);
5101 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
5105 np
= *vpp
? VTONFS(*vpp
) : NULL
;
5106 if (create
&& vap
) {
5107 exclusive
= (vap
->va_vaflags
& VA_EXCLUSIVE
);
5108 nfs_avoid_needless_id_setting_on_create(dnp
, vap
, ctx
);
5109 gotuid
= VATTR_IS_ACTIVE(vap
, va_uid
);
5110 gotgid
= VATTR_IS_ACTIVE(vap
, va_gid
);
5111 if (exclusive
&& (!VATTR_IS_ACTIVE(vap
, va_access_time
) || !VATTR_IS_ACTIVE(vap
, va_modify_time
))) {
5112 vap
->va_vaflags
|= VA_UTIMES_NULL
;
5115 exclusive
= gotuid
= gotgid
= 0;
5118 sid
= &nofp
->nof_stateid
;
5120 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
5124 if ((error
= nfs_open_owner_set_busy(noop
, thd
))) {
5128 rflags
= delegation
= recall
= 0;
5131 slen
= sizeof(sbuf
);
5132 NVATTR_INIT(&nvattr
);
5133 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, cnp
->cn_nameptr
, cnp
->cn_namelen
);
5135 nfsm_chain_null(&nmreq
);
5136 nfsm_chain_null(&nmrep
);
5138 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
5140 nfsm_chain_build_alloc_init(error
, &nmreq
, 53 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
5141 nfsm_chain_add_compound_header(error
, &nmreq
, create
? "create" : "open", nmp
->nm_minor_vers
, numops
);
5143 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5144 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
5146 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
5148 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
5149 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5150 nfsm_chain_add_32(error
, &nmreq
, share_access
);
5151 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
5152 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
);
5153 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
5154 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
));
5155 nfsm_chain_add_32(error
, &nmreq
, create
);
5158 static uint32_t create_verf
; // XXX need a better verifier
5160 nfsm_chain_add_32(error
, &nmreq
, NFS_CREATE_EXCLUSIVE
);
5161 /* insert 64 bit verifier */
5162 nfsm_chain_add_32(error
, &nmreq
, create_verf
);
5163 nfsm_chain_add_32(error
, &nmreq
, create_verf
);
5165 nfsm_chain_add_32(error
, &nmreq
, NFS_CREATE_UNCHECKED
);
5166 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
5169 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_NULL
);
5170 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
5172 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5173 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
5174 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
5175 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
5177 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
5179 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5180 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
5181 nfsm_chain_build_done(error
, &nmreq
);
5182 nfsm_assert(error
, (numops
== 0), EPROTO
);
5184 error
= busyerror
= nfs_node_set_busy(dnp
, thd
);
5188 if (create
&& !namedattrs
) {
5189 nfs_dulookup_init(&dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
5192 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, R_NOINTR
, NULL
, &req
);
5194 if (create
&& !namedattrs
) {
5195 nfs_dulookup_start(&dul
, dnp
, ctx
);
5197 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
5201 if (create
&& !namedattrs
) {
5202 nfs_dulookup_finish(&dul
, dnp
, ctx
);
5205 if ((lockerror
= nfs_node_lock(dnp
))) {
5208 nfsm_chain_skip_tag(error
, &nmrep
);
5209 nfsm_chain_get_32(error
, &nmrep
, numops
);
5210 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5211 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
5213 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
5214 nfs_owner_seqid_increment(noop
, NULL
, error
);
5215 nfsm_chain_get_stateid(error
, &nmrep
, sid
);
5216 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
5217 nfsm_chain_get_32(error
, &nmrep
, rflags
);
5218 bmlen
= NFS_ATTR_BITMAP_LEN
;
5219 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
5220 nfsm_chain_get_32(error
, &nmrep
, delegation
);
5222 switch (delegation
) {
5223 case NFS_OPEN_DELEGATE_NONE
:
5225 case NFS_OPEN_DELEGATE_READ
:
5226 case NFS_OPEN_DELEGATE_WRITE
:
5227 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
5228 nfsm_chain_get_32(error
, &nmrep
, recall
);
5229 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) { // space (skip) XXX
5230 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
5232 /* if we have any trouble accepting the ACE, just invalidate it */
5233 ace_type
= ace_flags
= ace_mask
= len
= 0;
5234 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
5235 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
5236 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
5237 nfsm_chain_get_32(error
, &nmrep
, len
);
5238 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
5239 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
5240 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
5241 if (!error
&& (len
>= slen
)) {
5242 MALLOC(s
, char*, len
+ 1, M_TEMP
, M_WAITOK
);
5250 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
5252 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
5256 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
))) {
5263 if (s
&& (s
!= sbuf
)) {
5272 /* At this point if we have no error, the object was created/opened. */
5275 if (create
&& vap
&& !exclusive
) {
5276 nfs_vattr_set_supported(bitmap
, vap
);
5278 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5280 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
5282 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
5283 printf("nfs: open/create didn't return filehandle? %s\n", cnp
->cn_nameptr
);
5287 if (!create
&& np
&& !NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
5288 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
5289 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5290 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
5291 NP(np
, "nfs4_open_rpc: warning: file handle mismatch");
5294 /* directory attributes: if we don't get them, make sure to invalidate */
5295 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
5296 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5297 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
5299 NATTRINVALIDATE(dnp
);
5303 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
) {
5304 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
5307 if (rflags
& NFS_OPEN_RESULT_CONFIRM
) {
5308 nfs_node_unlock(dnp
);
5310 NVATTR_CLEANUP(&nvattr
);
5311 error
= nfs4_open_confirm_rpc(nmp
, dnp
, fh
.fh_data
, fh
.fh_len
, noop
, sid
, thd
, cred
, &nvattr
, &xid
);
5314 if ((lockerror
= nfs_node_lock(dnp
))) {
5320 nfsm_chain_cleanup(&nmreq
);
5321 nfsm_chain_cleanup(&nmrep
);
5323 if (!lockerror
&& create
) {
5324 if (!open_error
&& (dnp
->n_flag
& NNEGNCENTRIES
)) {
5325 dnp
->n_flag
&= ~NNEGNCENTRIES
;
5326 cache_purge_negatives(dvp
);
5328 dnp
->n_flag
|= NMODIFIED
;
5329 nfs_node_unlock(dnp
);
5331 nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
);
5334 nfs_node_unlock(dnp
);
5336 if (!error
&& !np
&& fh
.fh_len
) {
5337 /* create the vnode with the filehandle and attributes */
5339 error
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, NG_MAKEENTRY
, &newnp
);
5341 newvp
= NFSTOV(newnp
);
5344 NVATTR_CLEANUP(&nvattr
);
5346 nfs_node_clear_busy(dnp
);
5348 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
5352 if (!error
&& np
&& !recall
) {
5353 /* stuff the delegation state in the node */
5354 lck_mtx_lock(&np
->n_openlock
);
5355 np
->n_openflags
&= ~N_DELEG_MASK
;
5356 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5357 np
->n_dstateid
= dstateid
;
5359 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5360 lck_mtx_lock(&nmp
->nm_lock
);
5361 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5362 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5364 lck_mtx_unlock(&nmp
->nm_lock
);
5366 lck_mtx_unlock(&np
->n_openlock
);
5368 /* give the delegation back */
5370 if (NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
5371 /* update delegation state and return it */
5372 lck_mtx_lock(&np
->n_openlock
);
5373 np
->n_openflags
&= ~N_DELEG_MASK
;
5374 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5375 np
->n_dstateid
= dstateid
;
5377 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5378 lck_mtx_lock(&nmp
->nm_lock
);
5379 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5380 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5382 lck_mtx_unlock(&nmp
->nm_lock
);
5384 lck_mtx_unlock(&np
->n_openlock
);
5385 /* don't need to send a separate delegreturn for fh */
5388 /* return np's current delegation */
5389 nfs4_delegation_return(np
, 0, thd
, cred
);
5391 if (fh
.fh_len
) { /* return fh's delegation if it wasn't for np */
5392 nfs4_delegreturn_rpc(nmp
, fh
.fh_data
, fh
.fh_len
, &dstateid
, 0, thd
, cred
);
5397 if (exclusive
&& (error
== NFSERR_NOTSUPP
)) {
5402 nfs_node_unlock(newnp
);
5405 } else if (create
) {
5406 nfs_node_unlock(newnp
);
5408 error
= nfs4_setattr_rpc(newnp
, vap
, ctx
);
5409 if (error
&& (gotuid
|| gotgid
)) {
5410 /* it's possible the server didn't like our attempt to set IDs. */
5411 /* so, let's try it again without those */
5412 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
5413 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
5414 error
= nfs4_setattr_rpc(newnp
, vap
, ctx
);
5423 nfs_open_owner_clear_busy(noop
);
5429 * Send an OPEN RPC to claim a delegated open for a file
5432 nfs4_claim_delegated_open_rpc(
5433 struct nfs_open_file
*nofp
,
5438 struct nfsmount
*nmp
;
5439 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5440 struct nfs_vattr nvattr
;
5441 int error
= 0, lockerror
= ENOENT
, status
;
5442 int nfsvers
, numops
;
5444 nfsnode_t np
= nofp
->nof_np
;
5445 struct nfsm_chain nmreq
, nmrep
;
5446 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
5447 uint32_t rflags
= 0, delegation
, recall
= 0;
5449 struct nfs_stateid dstateid
;
5450 char sbuf
[64], *s
= sbuf
;
5451 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
= sizeof(sbuf
);
5452 struct kauth_ace ace
;
5454 const char *vname
= NULL
;
5455 const char *name
= NULL
;
5457 char smallname
[128];
5458 char *filename
= NULL
;
5459 struct nfsreq_secinfo_args si
;
5462 if (nfs_mount_gone(nmp
)) {
5465 nfsvers
= nmp
->nm_vers
;
5467 nfs_node_lock_force(np
);
5468 if ((vnode_vtype(NFSTOV(np
)) != VDIR
) && np
->n_sillyrename
) {
5470 * The node's been sillyrenamed, so we need to use
5471 * the sillyrename directory/name to do the open.
5473 struct nfs_sillyrename
*nsp
= np
->n_sillyrename
;
5474 dvp
= NFSTOV(nsp
->nsr_dnp
);
5475 if ((error
= vnode_get(dvp
))) {
5476 nfs_node_unlock(np
);
5479 name
= nsp
->nsr_name
;
5482 * [sigh] We can't trust VFS to get the parent right for named
5483 * attribute nodes. (It likes to reparent the nodes after we've
5484 * created them.) Luckily we can probably get the right parent
5485 * from the n_parent we have stashed away.
5487 if ((np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) &&
5488 (((dvp
= np
->n_parent
)) && (error
= vnode_get(dvp
)))) {
5492 dvp
= vnode_getparent(NFSTOV(np
));
5494 vname
= vnode_getname(NFSTOV(np
));
5495 if (!dvp
|| !vname
) {
5499 nfs_node_unlock(np
);
5504 filename
= &smallname
[0];
5505 namelen
= snprintf(filename
, sizeof(smallname
), "%s", name
);
5506 if (namelen
>= sizeof(smallname
)) {
5507 MALLOC(filename
, char *, namelen
+ 1, M_TEMP
, M_WAITOK
);
5510 nfs_node_unlock(np
);
5513 snprintf(filename
, namelen
+ 1, "%s", name
);
5515 nfs_node_unlock(np
);
5517 if ((error
= nfs_open_owner_set_busy(noop
, NULL
))) {
5520 NVATTR_INIT(&nvattr
);
5521 delegation
= NFS_OPEN_DELEGATE_NONE
;
5522 dstateid
= np
->n_dstateid
;
5523 NFSREQ_SECINFO_SET(&si
, VTONFS(dvp
), NULL
, 0, filename
, namelen
);
5525 nfsm_chain_null(&nmreq
);
5526 nfsm_chain_null(&nmrep
);
5528 // PUTFH, OPEN, GETATTR(FH)
5530 nfsm_chain_build_alloc_init(error
, &nmreq
, 48 * NFSX_UNSIGNED
);
5531 nfsm_chain_add_compound_header(error
, &nmreq
, "open_claim_d", nmp
->nm_minor_vers
, numops
);
5533 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5534 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, VTONFS(dvp
)->n_fhp
, VTONFS(dvp
)->n_fhsize
);
5536 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
5537 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5538 nfsm_chain_add_32(error
, &nmreq
, share_access
);
5539 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
5540 // open owner: clientid + uid
5541 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
); // open_owner4.clientid
5542 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
5543 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
)); // open_owner4.owner
5545 nfsm_chain_add_32(error
, &nmreq
, NFS_OPEN_NOCREATE
);
5547 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_DELEGATE_CUR
);
5548 nfsm_chain_add_stateid(error
, &nmreq
, &np
->n_dstateid
);
5549 nfsm_chain_add_name(error
, &nmreq
, filename
, namelen
, nmp
);
5551 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5552 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
5553 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
5554 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
5555 nfsm_chain_build_done(error
, &nmreq
);
5556 nfsm_assert(error
, (numops
== 0), EPROTO
);
5559 error
= nfs_request2(np
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, current_thread(),
5560 noop
->noo_cred
, &si
, flags
| R_NOINTR
, &nmrep
, &xid
, &status
);
5562 if ((lockerror
= nfs_node_lock(np
))) {
5565 nfsm_chain_skip_tag(error
, &nmrep
);
5566 nfsm_chain_get_32(error
, &nmrep
, numops
);
5567 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5569 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
5570 nfs_owner_seqid_increment(noop
, NULL
, error
);
5571 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
5572 nfsm_chain_check_change_info(error
, &nmrep
, np
);
5573 nfsm_chain_get_32(error
, &nmrep
, rflags
);
5574 bmlen
= NFS_ATTR_BITMAP_LEN
;
5575 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
5576 nfsm_chain_get_32(error
, &nmrep
, delegation
);
5578 switch (delegation
) {
5579 case NFS_OPEN_DELEGATE_NONE
:
5580 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
5581 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
5583 case NFS_OPEN_DELEGATE_READ
:
5584 case NFS_OPEN_DELEGATE_WRITE
:
5585 if ((((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_READ
) &&
5586 (delegation
== NFS_OPEN_DELEGATE_WRITE
)) ||
5587 (((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_WRITE
) &&
5588 (delegation
== NFS_OPEN_DELEGATE_READ
))) {
5589 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
5590 ((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_WRITE
) ? "W" : "R",
5591 (delegation
== NFS_OPEN_DELEGATE_WRITE
) ? "W" : "R", filename
? filename
: "???");
5593 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
5594 nfsm_chain_get_32(error
, &nmrep
, recall
);
5595 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) { // space (skip) XXX
5596 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
5598 /* if we have any trouble accepting the ACE, just invalidate it */
5599 ace_type
= ace_flags
= ace_mask
= len
= 0;
5600 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
5601 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
5602 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
5603 nfsm_chain_get_32(error
, &nmrep
, len
);
5604 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
5605 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
5606 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
5607 if (!error
&& (len
>= slen
)) {
5608 MALLOC(s
, char*, len
+ 1, M_TEMP
, M_WAITOK
);
5616 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
5618 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
5622 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
))) {
5629 if (s
&& (s
!= sbuf
)) {
5633 /* stuff the latest delegation state in the node */
5634 lck_mtx_lock(&np
->n_openlock
);
5635 np
->n_openflags
&= ~N_DELEG_MASK
;
5636 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5637 np
->n_dstateid
= dstateid
;
5639 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5640 lck_mtx_lock(&nmp
->nm_lock
);
5641 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5642 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5644 lck_mtx_unlock(&nmp
->nm_lock
);
5646 lck_mtx_unlock(&np
->n_openlock
);
5655 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5656 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
5658 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
5659 printf("nfs: open reclaim didn't return filehandle? %s\n", filename
? filename
: "???");
5663 if (!NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
5664 // XXX what if fh doesn't match the vnode we think we're re-opening?
5665 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5666 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
5667 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename
? filename
: "???");
5670 error
= nfs_loadattrcache(np
, &nvattr
, &xid
, 1);
5672 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
) {
5673 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
5676 NVATTR_CLEANUP(&nvattr
);
5677 nfsm_chain_cleanup(&nmreq
);
5678 nfsm_chain_cleanup(&nmrep
);
5680 nfs_node_unlock(np
);
5682 nfs_open_owner_clear_busy(noop
);
5683 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
5686 * We're making a delegated claim.
5687 * Don't return the delegation here in case we have more to claim.
5688 * Just make sure it's queued up to be returned.
5690 nfs4_delegation_return_enqueue(np
);
5695 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5696 if (filename
&& (filename
!= &smallname
[0])) {
5697 FREE(filename
, M_TEMP
);
5700 vnode_putname(vname
);
5702 if (dvp
!= NULLVP
) {
5709 * Send an OPEN RPC to reclaim an open file.
5712 nfs4_open_reclaim_rpc(
5713 struct nfs_open_file
*nofp
,
5717 struct nfsmount
*nmp
;
5718 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5719 struct nfs_vattr nvattr
;
5720 int error
= 0, lockerror
= ENOENT
, status
;
5721 int nfsvers
, numops
;
5723 nfsnode_t np
= nofp
->nof_np
;
5724 struct nfsm_chain nmreq
, nmrep
;
5725 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
5726 uint32_t rflags
= 0, delegation
, recall
= 0;
5728 struct nfs_stateid dstateid
;
5729 char sbuf
[64], *s
= sbuf
;
5730 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
= sizeof(sbuf
);
5731 struct kauth_ace ace
;
5732 struct nfsreq_secinfo_args si
;
5735 if (nfs_mount_gone(nmp
)) {
5738 nfsvers
= nmp
->nm_vers
;
5740 if ((error
= nfs_open_owner_set_busy(noop
, NULL
))) {
5744 NVATTR_INIT(&nvattr
);
5745 delegation
= NFS_OPEN_DELEGATE_NONE
;
5746 dstateid
= np
->n_dstateid
;
5747 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
5749 nfsm_chain_null(&nmreq
);
5750 nfsm_chain_null(&nmrep
);
5752 // PUTFH, OPEN, GETATTR(FH)
5754 nfsm_chain_build_alloc_init(error
, &nmreq
, 48 * NFSX_UNSIGNED
);
5755 nfsm_chain_add_compound_header(error
, &nmreq
, "open_reclaim", nmp
->nm_minor_vers
, numops
);
5757 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5758 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
5760 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
5761 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5762 nfsm_chain_add_32(error
, &nmreq
, share_access
);
5763 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
5764 // open owner: clientid + uid
5765 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
); // open_owner4.clientid
5766 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
5767 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
)); // open_owner4.owner
5769 nfsm_chain_add_32(error
, &nmreq
, NFS_OPEN_NOCREATE
);
5771 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_PREVIOUS
);
5772 delegation
= (np
->n_openflags
& N_DELEG_READ
) ? NFS_OPEN_DELEGATE_READ
:
5773 (np
->n_openflags
& N_DELEG_WRITE
) ? NFS_OPEN_DELEGATE_WRITE
:
5774 NFS_OPEN_DELEGATE_NONE
;
5775 nfsm_chain_add_32(error
, &nmreq
, delegation
);
5776 delegation
= NFS_OPEN_DELEGATE_NONE
;
5778 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5779 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
5780 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
5781 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
5782 nfsm_chain_build_done(error
, &nmreq
);
5783 nfsm_assert(error
, (numops
== 0), EPROTO
);
5786 error
= nfs_request2(np
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, current_thread(),
5787 noop
->noo_cred
, &si
, R_RECOVER
| R_NOINTR
, &nmrep
, &xid
, &status
);
5789 if ((lockerror
= nfs_node_lock(np
))) {
5792 nfsm_chain_skip_tag(error
, &nmrep
);
5793 nfsm_chain_get_32(error
, &nmrep
, numops
);
5794 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5796 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
5797 nfs_owner_seqid_increment(noop
, NULL
, error
);
5798 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
5799 nfsm_chain_check_change_info(error
, &nmrep
, np
);
5800 nfsm_chain_get_32(error
, &nmrep
, rflags
);
5801 bmlen
= NFS_ATTR_BITMAP_LEN
;
5802 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
5803 nfsm_chain_get_32(error
, &nmrep
, delegation
);
5805 switch (delegation
) {
5806 case NFS_OPEN_DELEGATE_NONE
:
5807 if (np
->n_openflags
& N_DELEG_MASK
) {
5809 * Hey! We were supposed to get our delegation back even
5810 * if it was getting immediately recalled. Bad server!
5812 * Just try to return the existing delegation.
5814 // NP(np, "nfs: open reclaim didn't return delegation?");
5815 delegation
= (np
->n_openflags
& N_DELEG_WRITE
) ? NFS_OPEN_DELEGATE_WRITE
: NFS_OPEN_DELEGATE_READ
;
5819 case NFS_OPEN_DELEGATE_READ
:
5820 case NFS_OPEN_DELEGATE_WRITE
:
5821 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
5822 nfsm_chain_get_32(error
, &nmrep
, recall
);
5823 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) { // space (skip) XXX
5824 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
5826 /* if we have any trouble accepting the ACE, just invalidate it */
5827 ace_type
= ace_flags
= ace_mask
= len
= 0;
5828 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
5829 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
5830 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
5831 nfsm_chain_get_32(error
, &nmrep
, len
);
5832 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
5833 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
5834 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
5835 if (!error
&& (len
>= slen
)) {
5836 MALLOC(s
, char*, len
+ 1, M_TEMP
, M_WAITOK
);
5844 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
5846 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
5850 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
))) {
5857 if (s
&& (s
!= sbuf
)) {
5861 /* stuff the delegation state in the node */
5862 lck_mtx_lock(&np
->n_openlock
);
5863 np
->n_openflags
&= ~N_DELEG_MASK
;
5864 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5865 np
->n_dstateid
= dstateid
;
5867 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5868 lck_mtx_lock(&nmp
->nm_lock
);
5869 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5870 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5872 lck_mtx_unlock(&nmp
->nm_lock
);
5874 lck_mtx_unlock(&np
->n_openlock
);
5883 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5884 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
5886 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
5887 NP(np
, "nfs: open reclaim didn't return filehandle?");
5891 if (!NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
5892 // XXX what if fh doesn't match the vnode we think we're re-opening?
5893 // That should be pretty hard in this case, given that we are doing
5894 // the open reclaim using the file handle (and not a dir/name pair).
5895 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5896 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
5897 NP(np
, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
5900 error
= nfs_loadattrcache(np
, &nvattr
, &xid
, 1);
5902 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
) {
5903 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
5907 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
5908 NVATTR_CLEANUP(&nvattr
);
5909 nfsm_chain_cleanup(&nmreq
);
5910 nfsm_chain_cleanup(&nmrep
);
5912 nfs_node_unlock(np
);
5914 nfs_open_owner_clear_busy(noop
);
5915 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
5917 nfs4_delegation_return_enqueue(np
);
5924 nfs4_open_downgrade_rpc(
5926 struct nfs_open_file
*nofp
,
5929 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5930 struct nfsmount
*nmp
;
5931 int error
, lockerror
= ENOENT
, status
, nfsvers
, numops
;
5932 struct nfsm_chain nmreq
, nmrep
;
5934 struct nfsreq_secinfo_args si
;
5937 if (nfs_mount_gone(nmp
)) {
5940 nfsvers
= nmp
->nm_vers
;
5942 if ((error
= nfs_open_owner_set_busy(noop
, NULL
))) {
5946 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
5947 nfsm_chain_null(&nmreq
);
5948 nfsm_chain_null(&nmrep
);
5950 // PUTFH, OPEN_DOWNGRADE, GETATTR
5952 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
5953 nfsm_chain_add_compound_header(error
, &nmreq
, "open_downgrd", nmp
->nm_minor_vers
, numops
);
5955 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5956 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
5958 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN_DOWNGRADE
);
5959 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
5960 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5961 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_access
);
5962 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_deny
);
5964 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5965 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
5966 nfsm_chain_build_done(error
, &nmreq
);
5967 nfsm_assert(error
, (numops
== 0), EPROTO
);
5969 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
5970 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
5971 &si
, R_NOINTR
, &nmrep
, &xid
, &status
);
5973 if ((lockerror
= nfs_node_lock(np
))) {
5976 nfsm_chain_skip_tag(error
, &nmrep
);
5977 nfsm_chain_get_32(error
, &nmrep
, numops
);
5978 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5980 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN_DOWNGRADE
);
5981 nfs_owner_seqid_increment(noop
, NULL
, error
);
5982 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
5983 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5984 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
5987 nfs_node_unlock(np
);
5989 nfs_open_owner_clear_busy(noop
);
5990 nfsm_chain_cleanup(&nmreq
);
5991 nfsm_chain_cleanup(&nmrep
);
5998 struct nfs_open_file
*nofp
,
6003 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
6004 struct nfsmount
*nmp
;
6005 int error
, lockerror
= ENOENT
, status
, nfsvers
, numops
;
6006 struct nfsm_chain nmreq
, nmrep
;
6008 struct nfsreq_secinfo_args si
;
6011 if (nfs_mount_gone(nmp
)) {
6014 nfsvers
= nmp
->nm_vers
;
6016 if ((error
= nfs_open_owner_set_busy(noop
, NULL
))) {
6020 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
6021 nfsm_chain_null(&nmreq
);
6022 nfsm_chain_null(&nmrep
);
6024 // PUTFH, CLOSE, GETATTR
6026 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
6027 nfsm_chain_add_compound_header(error
, &nmreq
, "close", nmp
->nm_minor_vers
, numops
);
6029 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6030 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
6032 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_CLOSE
);
6033 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
6034 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
6036 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6037 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
6038 nfsm_chain_build_done(error
, &nmreq
);
6039 nfsm_assert(error
, (numops
== 0), EPROTO
);
6041 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
| R_NOINTR
, &nmrep
, &xid
, &status
);
6043 if ((lockerror
= nfs_node_lock(np
))) {
6046 nfsm_chain_skip_tag(error
, &nmrep
);
6047 nfsm_chain_get_32(error
, &nmrep
, numops
);
6048 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6050 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_CLOSE
);
6051 nfs_owner_seqid_increment(noop
, NULL
, error
);
6052 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
6053 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6054 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
6057 nfs_node_unlock(np
);
6059 nfs_open_owner_clear_busy(noop
);
6060 nfsm_chain_cleanup(&nmreq
);
6061 nfsm_chain_cleanup(&nmrep
);
6067 * Claim the delegated open combinations this open file holds.
6070 nfs4_claim_delegated_state_for_open_file(struct nfs_open_file
*nofp
, int flags
)
6072 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
6073 struct nfs_lock_owner
*nlop
;
6074 struct nfs_file_lock
*nflp
, *nextnflp
;
6075 struct nfsmount
*nmp
;
6076 int error
= 0, reopen
= 0;
6078 if (nofp
->nof_d_rw_drw
) {
6079 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_BOTH
, flags
);
6081 lck_mtx_lock(&nofp
->nof_lock
);
6082 nofp
->nof_rw_drw
+= nofp
->nof_d_rw_drw
;
6083 nofp
->nof_d_rw_drw
= 0;
6084 lck_mtx_unlock(&nofp
->nof_lock
);
6087 if (!error
&& nofp
->nof_d_w_drw
) {
6088 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_BOTH
, flags
);
6090 lck_mtx_lock(&nofp
->nof_lock
);
6091 nofp
->nof_w_drw
+= nofp
->nof_d_w_drw
;
6092 nofp
->nof_d_w_drw
= 0;
6093 lck_mtx_unlock(&nofp
->nof_lock
);
6096 if (!error
&& nofp
->nof_d_r_drw
) {
6097 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_BOTH
, flags
);
6099 lck_mtx_lock(&nofp
->nof_lock
);
6100 nofp
->nof_r_drw
+= nofp
->nof_d_r_drw
;
6101 nofp
->nof_d_r_drw
= 0;
6102 lck_mtx_unlock(&nofp
->nof_lock
);
6105 if (!error
&& nofp
->nof_d_rw_dw
) {
6106 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_WRITE
, flags
);
6108 lck_mtx_lock(&nofp
->nof_lock
);
6109 nofp
->nof_rw_dw
+= nofp
->nof_d_rw_dw
;
6110 nofp
->nof_d_rw_dw
= 0;
6111 lck_mtx_unlock(&nofp
->nof_lock
);
6114 if (!error
&& nofp
->nof_d_w_dw
) {
6115 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_WRITE
, flags
);
6117 lck_mtx_lock(&nofp
->nof_lock
);
6118 nofp
->nof_w_dw
+= nofp
->nof_d_w_dw
;
6119 nofp
->nof_d_w_dw
= 0;
6120 lck_mtx_unlock(&nofp
->nof_lock
);
6123 if (!error
&& nofp
->nof_d_r_dw
) {
6124 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_WRITE
, flags
);
6126 lck_mtx_lock(&nofp
->nof_lock
);
6127 nofp
->nof_r_dw
+= nofp
->nof_d_r_dw
;
6128 nofp
->nof_d_r_dw
= 0;
6129 lck_mtx_unlock(&nofp
->nof_lock
);
6132 /* non-deny-mode opens may be reopened if no locks are held */
6133 if (!error
&& nofp
->nof_d_rw
) {
6134 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
, flags
);
6135 /* for some errors, we should just try reopening the file */
6136 if (nfs_mount_state_error_delegation_lost(error
)) {
6139 if (!error
|| reopen
) {
6140 lck_mtx_lock(&nofp
->nof_lock
);
6141 nofp
->nof_rw
+= nofp
->nof_d_rw
;
6143 lck_mtx_unlock(&nofp
->nof_lock
);
6146 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
6147 if ((!error
|| reopen
) && nofp
->nof_d_w
) {
6149 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_NONE
, flags
);
6150 /* for some errors, we should just try reopening the file */
6151 if (nfs_mount_state_error_delegation_lost(error
)) {
6155 if (!error
|| reopen
) {
6156 lck_mtx_lock(&nofp
->nof_lock
);
6157 nofp
->nof_w
+= nofp
->nof_d_w
;
6159 lck_mtx_unlock(&nofp
->nof_lock
);
6162 if ((!error
|| reopen
) && nofp
->nof_d_r
) {
6164 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, flags
);
6165 /* for some errors, we should just try reopening the file */
6166 if (nfs_mount_state_error_delegation_lost(error
)) {
6170 if (!error
|| reopen
) {
6171 lck_mtx_lock(&nofp
->nof_lock
);
6172 nofp
->nof_r
+= nofp
->nof_d_r
;
6174 lck_mtx_unlock(&nofp
->nof_lock
);
6180 * Any problems with the delegation probably indicates that we
6181 * should review/return all of our current delegation state.
6183 if ((nmp
= NFSTONMP(nofp
->nof_np
))) {
6184 nfs4_delegation_return_enqueue(nofp
->nof_np
);
6185 lck_mtx_lock(&nmp
->nm_lock
);
6186 nfs_need_recover(nmp
, NFSERR_EXPIRED
);
6187 lck_mtx_unlock(&nmp
->nm_lock
);
6189 if (reopen
&& (nfs_check_for_locks(noop
, nofp
) == 0)) {
6190 /* just reopen the file on next access */
6191 NP(nofp
->nof_np
, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
6192 reopen
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6193 lck_mtx_lock(&nofp
->nof_lock
);
6194 nofp
->nof_flags
|= NFS_OPEN_FILE_REOPEN
;
6195 lck_mtx_unlock(&nofp
->nof_lock
);
6199 NP(nofp
->nof_np
, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
6200 reopen
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6204 if (!error
&& ((nmp
= NFSTONMP(nofp
->nof_np
)))) {
6205 /* claim delegated locks */
6206 TAILQ_FOREACH(nlop
, &nofp
->nof_np
->n_lock_owners
, nlo_link
) {
6207 if (nlop
->nlo_open_owner
!= noop
) {
6210 TAILQ_FOREACH_SAFE(nflp
, &nlop
->nlo_locks
, nfl_lolink
, nextnflp
) {
6211 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
6212 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_DEAD
| NFS_FILE_LOCK_BLOCKED
)) {
6215 /* skip non-delegated locks */
6216 if (!(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
6219 error
= nmp
->nm_funcs
->nf_setlock_rpc(nofp
->nof_np
, nofp
, nflp
, 0, flags
, current_thread(), noop
->noo_cred
);
6221 NP(nofp
->nof_np
, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
6222 nflp
->nfl_start
, nflp
->nfl_end
, error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6226 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
6227 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6236 if (!error
) { /* all state claimed successfully! */
6240 /* restart if it looks like a problem more than just losing the delegation */
6241 if (!nfs_mount_state_error_delegation_lost(error
) &&
6242 ((error
== ETIMEDOUT
) || nfs_mount_state_error_should_restart(error
))) {
6243 NP(nofp
->nof_np
, "nfs delegated lock claim error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6244 if ((error
== ETIMEDOUT
) && ((nmp
= NFSTONMP(nofp
->nof_np
)))) {
6245 nfs_need_reconnect(nmp
);
6250 /* delegated state lost (once held but now not claimable) */
6251 NP(nofp
->nof_np
, "nfs delegated state claim error %d, state lost, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6254 * Any problems with the delegation probably indicates that we
6255 * should review/return all of our current delegation state.
6257 if ((nmp
= NFSTONMP(nofp
->nof_np
))) {
6258 nfs4_delegation_return_enqueue(nofp
->nof_np
);
6259 lck_mtx_lock(&nmp
->nm_lock
);
6260 nfs_need_recover(nmp
, NFSERR_EXPIRED
);
6261 lck_mtx_unlock(&nmp
->nm_lock
);
6264 /* revoke all open file state */
6265 nfs_revoke_open_state_for_node(nofp
->nof_np
);
6271 * Release all open state for the given node.
6274 nfs_release_open_state_for_node(nfsnode_t np
, int force
)
6276 struct nfsmount
*nmp
= NFSTONMP(np
);
6277 struct nfs_open_file
*nofp
;
6278 struct nfs_file_lock
*nflp
, *nextnflp
;
6280 /* drop held locks */
6281 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
6282 /* skip dead & blocked lock requests */
6283 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_DEAD
| NFS_FILE_LOCK_BLOCKED
)) {
6286 /* send an unlock if not a delegated lock */
6287 if (!force
&& nmp
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
6288 nmp
->nm_funcs
->nf_unlock_rpc(np
, nflp
->nfl_owner
, F_WRLCK
, nflp
->nfl_start
, nflp
->nfl_end
, R_RECOVER
,
6289 NULL
, nflp
->nfl_owner
->nlo_open_owner
->noo_cred
);
6291 /* kill/remove the lock */
6292 lck_mtx_lock(&np
->n_openlock
);
6293 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
6294 lck_mtx_lock(&nflp
->nfl_owner
->nlo_lock
);
6295 TAILQ_REMOVE(&nflp
->nfl_owner
->nlo_locks
, nflp
, nfl_lolink
);
6296 lck_mtx_unlock(&nflp
->nfl_owner
->nlo_lock
);
6297 if (nflp
->nfl_blockcnt
) {
6298 /* wake up anyone blocked on this lock */
6301 /* remove nflp from lock list and destroy */
6302 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
6303 nfs_file_lock_destroy(nflp
);
6305 lck_mtx_unlock(&np
->n_openlock
);
6308 lck_mtx_lock(&np
->n_openlock
);
6310 /* drop all opens */
6311 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
6312 if (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) {
6315 /* mark open state as lost */
6316 lck_mtx_lock(&nofp
->nof_lock
);
6317 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPEN
;
6318 nofp
->nof_flags
|= NFS_OPEN_FILE_LOST
;
6320 lck_mtx_unlock(&nofp
->nof_lock
);
6321 if (!force
&& nmp
&& (nmp
->nm_vers
>= NFS_VER4
)) {
6322 nfs4_close_rpc(np
, nofp
, NULL
, nofp
->nof_owner
->noo_cred
, R_RECOVER
);
6326 lck_mtx_unlock(&np
->n_openlock
);
6330 * State for a node has been lost, drop it, and revoke the node.
6331 * Attempt to return any state if possible in case the server
6332 * might somehow think we hold it.
6335 nfs_revoke_open_state_for_node(nfsnode_t np
)
6337 struct nfsmount
*nmp
;
6339 /* mark node as needing to be revoked */
6340 nfs_node_lock_force(np
);
6341 if (np
->n_flag
& NREVOKE
) { /* already revoked? */
6342 NP(np
, "nfs_revoke_open_state_for_node(): already revoked");
6343 nfs_node_unlock(np
);
6346 np
->n_flag
|= NREVOKE
;
6347 nfs_node_unlock(np
);
6349 nfs_release_open_state_for_node(np
, 0);
6350 NP(np
, "nfs: state lost for %p 0x%x", np
, np
->n_flag
);
6352 /* mark mount as needing a revoke scan and have the socket thread do it. */
6353 if ((nmp
= NFSTONMP(np
))) {
6354 lck_mtx_lock(&nmp
->nm_lock
);
6355 nmp
->nm_state
|= NFSSTA_REVOKE
;
6356 nfs_mount_sock_thread_wake(nmp
);
6357 lck_mtx_unlock(&nmp
->nm_lock
);
6362 * Claim the delegated open combinations that each of this node's open files hold.
6365 nfs4_claim_delegated_state_for_node(nfsnode_t np
, int flags
)
6367 struct nfs_open_file
*nofp
;
6370 lck_mtx_lock(&np
->n_openlock
);
6372 /* walk the open file list looking for opens with delegated state to claim */
6374 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
6375 if (!nofp
->nof_d_rw_drw
&& !nofp
->nof_d_w_drw
&& !nofp
->nof_d_r_drw
&&
6376 !nofp
->nof_d_rw_dw
&& !nofp
->nof_d_w_dw
&& !nofp
->nof_d_r_dw
&&
6377 !nofp
->nof_d_rw
&& !nofp
->nof_d_w
&& !nofp
->nof_d_r
) {
6380 lck_mtx_unlock(&np
->n_openlock
);
6381 error
= nfs4_claim_delegated_state_for_open_file(nofp
, flags
);
6382 lck_mtx_lock(&np
->n_openlock
);
6389 lck_mtx_unlock(&np
->n_openlock
);
6395 * Mark a node as needed to have its delegation returned.
6396 * Queue it up on the delegation return queue.
6397 * Make sure the thread is running.
6400 nfs4_delegation_return_enqueue(nfsnode_t np
)
6402 struct nfsmount
*nmp
;
6405 if (nfs_mount_gone(nmp
)) {
6409 lck_mtx_lock(&np
->n_openlock
);
6410 np
->n_openflags
|= N_DELEG_RETURN
;
6411 lck_mtx_unlock(&np
->n_openlock
);
6413 lck_mtx_lock(&nmp
->nm_lock
);
6414 if (np
->n_dreturn
.tqe_next
== NFSNOLIST
) {
6415 TAILQ_INSERT_TAIL(&nmp
->nm_dreturnq
, np
, n_dreturn
);
6417 nfs_mount_sock_thread_wake(nmp
);
6418 lck_mtx_unlock(&nmp
->nm_lock
);
6422 * return any delegation we may have for the given node
6425 nfs4_delegation_return(nfsnode_t np
, int flags
, thread_t thd
, kauth_cred_t cred
)
6427 struct nfsmount
*nmp
;
6429 nfs_stateid dstateid
;
6433 if (nfs_mount_gone(nmp
)) {
6437 /* first, make sure the node's marked for delegation return */
6438 lck_mtx_lock(&np
->n_openlock
);
6439 np
->n_openflags
|= (N_DELEG_RETURN
| N_DELEG_RETURNING
);
6440 lck_mtx_unlock(&np
->n_openlock
);
6442 /* make sure nobody else is using the delegation state */
6443 if ((error
= nfs_open_state_set_busy(np
, NULL
))) {
6447 /* claim any delegated state */
6448 if ((error
= nfs4_claim_delegated_state_for_node(np
, flags
))) {
6452 /* return the delegation */
6453 lck_mtx_lock(&np
->n_openlock
);
6454 dstateid
= np
->n_dstateid
;
6455 fh
.fh_len
= np
->n_fhsize
;
6456 bcopy(np
->n_fhp
, &fh
.fh_data
, fh
.fh_len
);
6457 lck_mtx_unlock(&np
->n_openlock
);
6458 error
= nfs4_delegreturn_rpc(NFSTONMP(np
), fh
.fh_data
, fh
.fh_len
, &dstateid
, flags
, thd
, cred
);
6459 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
6460 if ((error
!= ETIMEDOUT
) && (error
!= NFSERR_MOVED
) && (error
!= NFSERR_LEASE_MOVED
)) {
6461 lck_mtx_lock(&np
->n_openlock
);
6462 np
->n_openflags
&= ~N_DELEG_MASK
;
6463 lck_mtx_lock(&nmp
->nm_lock
);
6464 if (np
->n_dlink
.tqe_next
!= NFSNOLIST
) {
6465 TAILQ_REMOVE(&nmp
->nm_delegations
, np
, n_dlink
);
6466 np
->n_dlink
.tqe_next
= NFSNOLIST
;
6468 lck_mtx_unlock(&nmp
->nm_lock
);
6469 lck_mtx_unlock(&np
->n_openlock
);
6473 /* make sure it's no longer on the return queue and clear the return flags */
6474 lck_mtx_lock(&nmp
->nm_lock
);
6475 if (np
->n_dreturn
.tqe_next
!= NFSNOLIST
) {
6476 TAILQ_REMOVE(&nmp
->nm_dreturnq
, np
, n_dreturn
);
6477 np
->n_dreturn
.tqe_next
= NFSNOLIST
;
6479 lck_mtx_unlock(&nmp
->nm_lock
);
6480 lck_mtx_lock(&np
->n_openlock
);
6481 np
->n_openflags
&= ~(N_DELEG_RETURN
| N_DELEG_RETURNING
);
6482 lck_mtx_unlock(&np
->n_openlock
);
6485 NP(np
, "nfs4_delegation_return, error %d", error
);
6486 if (error
== ETIMEDOUT
) {
6487 nfs_need_reconnect(nmp
);
6489 if (nfs_mount_state_error_should_restart(error
)) {
6490 /* make sure recovery happens */
6491 lck_mtx_lock(&nmp
->nm_lock
);
6492 nfs_need_recover(nmp
, nfs_mount_state_error_delegation_lost(error
) ? NFSERR_EXPIRED
: 0);
6493 lck_mtx_unlock(&nmp
->nm_lock
);
6497 nfs_open_state_clear_busy(np
);
6503 * RPC to return a delegation for a file handle
6506 nfs4_delegreturn_rpc(struct nfsmount
*nmp
, u_char
*fhp
, int fhlen
, struct nfs_stateid
*sid
, int flags
, thread_t thd
, kauth_cred_t cred
)
6508 int error
= 0, status
, numops
;
6510 struct nfsm_chain nmreq
, nmrep
;
6511 struct nfsreq_secinfo_args si
;
6513 NFSREQ_SECINFO_SET(&si
, NULL
, fhp
, fhlen
, NULL
, 0);
6514 nfsm_chain_null(&nmreq
);
6515 nfsm_chain_null(&nmrep
);
6517 // PUTFH, DELEGRETURN
6519 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
6520 nfsm_chain_add_compound_header(error
, &nmreq
, "delegreturn", nmp
->nm_minor_vers
, numops
);
6522 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6523 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, fhp
, fhlen
);
6525 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_DELEGRETURN
);
6526 nfsm_chain_add_stateid(error
, &nmreq
, sid
);
6527 nfsm_chain_build_done(error
, &nmreq
);
6528 nfsm_assert(error
, (numops
== 0), EPROTO
);
6530 error
= nfs_request2(NULL
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
, &nmrep
, &xid
, &status
);
6531 nfsm_chain_skip_tag(error
, &nmrep
);
6532 nfsm_chain_get_32(error
, &nmrep
, numops
);
6533 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6534 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_DELEGRETURN
);
6536 nfsm_chain_cleanup(&nmreq
);
6537 nfsm_chain_cleanup(&nmrep
);
6544 * Just call nfs_bioread() to do the work.
6546 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6547 * without first calling VNOP_OPEN, so we make sure the file is open here.
6551 struct vnop_read_args
/* {
6552 * struct vnodeop_desc *a_desc;
6554 * struct uio *a_uio;
6556 * vfs_context_t a_context;
6559 vnode_t vp
= ap
->a_vp
;
6560 vfs_context_t ctx
= ap
->a_context
;
6562 struct nfsmount
*nmp
;
6563 struct nfs_open_owner
*noop
;
6564 struct nfs_open_file
*nofp
;
6567 if (vnode_vtype(ap
->a_vp
) != VREG
) {
6568 return (vnode_vtype(vp
) == VDIR
) ? EISDIR
: EPERM
;
6573 if (nfs_mount_gone(nmp
)) {
6576 if (np
->n_flag
& NREVOKE
) {
6580 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
6585 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 1);
6586 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
6587 NP(np
, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop
->noo_cred
));
6590 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
6591 error
= nfs4_reopen(nofp
, vfs_context_thread(ctx
));
6598 nfs_open_owner_rele(noop
);
6602 * Since the read path is a hot path, if we already have
6603 * read access, lets go and try and do the read, without
6604 * busying the mount and open file node for this open owner.
6606 * N.B. This is inherently racy w.r.t. an execve using
6607 * an already open file, in that the read at the end of
6608 * this routine will be racing with a potential close.
6609 * The code below ultimately has the same problem. In practice
6610 * this does not seem to be an issue.
6612 if (nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
) {
6613 nfs_open_owner_rele(noop
);
6616 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
6618 nfs_open_owner_rele(noop
);
6622 * If we don't have a file already open with the access we need (read) then
6623 * we need to open one. Otherwise we just co-opt an open. We might not already
6624 * have access because we're trying to read the first page of the
6627 error
= nfs_open_file_set_busy(nofp
, vfs_context_thread(ctx
));
6629 nfs_mount_state_in_use_end(nmp
, 0);
6630 nfs_open_owner_rele(noop
);
6633 if (!(nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
)) {
6634 /* we don't have the file open, so open it for read access if we're not denied */
6635 if (nofp
->nof_flags
& NFS_OPEN_FILE_NEEDCLOSE
) {
6636 NP(np
, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
6637 nofp
->nof_access
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
), thread_tid(vfs_context_thread(ctx
)));
6639 if (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_READ
) {
6640 nfs_open_file_clear_busy(nofp
);
6641 nfs_mount_state_in_use_end(nmp
, 0);
6642 nfs_open_owner_rele(noop
);
6645 if (np
->n_flag
& NREVOKE
) {
6647 nfs_open_file_clear_busy(nofp
);
6648 nfs_mount_state_in_use_end(nmp
, 0);
6649 nfs_open_owner_rele(noop
);
6652 if (nmp
->nm_vers
< NFS_VER4
) {
6653 /* NFS v2/v3 opens are always allowed - so just add it. */
6654 nfs_open_file_add_open(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, 0);
6656 error
= nfs4_open(np
, nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
6659 nofp
->nof_flags
|= NFS_OPEN_FILE_NEEDCLOSE
;
6663 nfs_open_file_clear_busy(nofp
);
6665 if (nfs_mount_state_in_use_end(nmp
, error
)) {
6669 nfs_open_owner_rele(noop
);
6674 return nfs_bioread(VTONFS(ap
->a_vp
), ap
->a_uio
, ap
->a_ioflag
, ap
->a_context
);
6678 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6679 * Files are created using the NFSv4 OPEN RPC. So we must open the
6680 * file to create it and then close it.
6684 struct vnop_create_args
/* {
6685 * struct vnodeop_desc *a_desc;
6688 * struct componentname *a_cnp;
6689 * struct vnode_attr *a_vap;
6690 * vfs_context_t a_context;
6693 vfs_context_t ctx
= ap
->a_context
;
6694 struct componentname
*cnp
= ap
->a_cnp
;
6695 struct vnode_attr
*vap
= ap
->a_vap
;
6696 vnode_t dvp
= ap
->a_dvp
;
6697 vnode_t
*vpp
= ap
->a_vpp
;
6698 struct nfsmount
*nmp
;
6700 int error
= 0, busyerror
= 0, accessMode
, denyMode
;
6701 struct nfs_open_owner
*noop
= NULL
;
6702 struct nfs_open_file
*newnofp
= NULL
, *nofp
= NULL
;
6705 if (nfs_mount_gone(nmp
)) {
6710 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp
), vap
, ctx
);
6713 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
6719 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
6721 nfs_open_owner_rele(noop
);
6725 /* grab a provisional, nodeless open file */
6726 error
= nfs_open_file_find(NULL
, noop
, &newnofp
, 0, 0, 1);
6727 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
6728 printf("nfs_vnop_create: LOST\n");
6731 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
6732 /* This shouldn't happen given that this is a new, nodeless nofp */
6733 nfs_mount_state_in_use_end(nmp
, 0);
6734 error
= nfs4_reopen(newnofp
, vfs_context_thread(ctx
));
6735 nfs_open_file_destroy(newnofp
);
6742 error
= nfs_open_file_set_busy(newnofp
, vfs_context_thread(ctx
));
6746 nfs_open_file_destroy(newnofp
);
6753 * We're just trying to create the file.
6754 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6756 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
6757 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
6759 /* Do the open/create */
6760 error
= nfs4_open_rpc(newnofp
, ctx
, cnp
, vap
, dvp
, vpp
, NFS_OPEN_CREATE
, accessMode
, denyMode
);
6761 if ((error
== EACCES
) && vap
&& !(vap
->va_vaflags
& VA_EXCLUSIVE
) &&
6762 VATTR_IS_ACTIVE(vap
, va_mode
) && !(vap
->va_mode
& S_IWUSR
)) {
6764 * Hmm... it looks like we may have a situation where the request was
6765 * retransmitted because we didn't get the first response which successfully
6766 * created/opened the file and then the second time we were denied the open
6767 * because the mode the file was created with doesn't allow write access.
6769 * We'll try to work around this by temporarily updating the mode and
6770 * retrying the open.
6772 struct vnode_attr vattr
;
6774 /* first make sure it's there */
6775 int error2
= nfs_lookitup(VTONFS(dvp
), cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, &np
);
6776 if (!error2
&& np
) {
6777 nfs_node_unlock(np
);
6779 if (vnode_vtype(NFSTOV(np
)) == VREG
) {
6781 VATTR_SET(&vattr
, va_mode
, (vap
->va_mode
| S_IWUSR
));
6782 if (!nfs4_setattr_rpc(np
, &vattr
, ctx
)) {
6783 error2
= nfs4_open_rpc(newnofp
, ctx
, cnp
, NULL
, dvp
, vpp
, NFS_OPEN_NOCREATE
, accessMode
, denyMode
);
6785 VATTR_SET(&vattr
, va_mode
, vap
->va_mode
);
6786 nfs4_setattr_rpc(np
, &vattr
, ctx
);
6798 if (!error
&& !*vpp
) {
6799 printf("nfs4_open_rpc returned without a node?\n");
6800 /* Hmmm... with no node, we have no filehandle and can't close it */
6804 /* need to cleanup our temporary nofp */
6805 nfs_open_file_clear_busy(newnofp
);
6806 nfs_open_file_destroy(newnofp
);
6810 /* After we have a node, add our open file struct to the node */
6812 nfs_open_file_add_open(newnofp
, accessMode
, denyMode
, 0);
6814 error
= nfs_open_file_find_internal(np
, noop
, &nofp
, 0, 0, 0);
6816 /* This shouldn't happen, because we passed in a new nofp to use. */
6817 printf("nfs_open_file_find_internal failed! %d\n", error
);
6819 } else if (nofp
!= newnofp
) {
6821 * Hmm... an open file struct already exists.
6822 * Mark the existing one busy and merge our open into it.
6823 * Then destroy the one we created.
6824 * Note: there's no chance of an open confict because the
6825 * open has already been granted.
6827 busyerror
= nfs_open_file_set_busy(nofp
, NULL
);
6828 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0);
6829 nofp
->nof_stateid
= newnofp
->nof_stateid
;
6830 if (newnofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
) {
6831 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
6833 nfs_open_file_clear_busy(newnofp
);
6834 nfs_open_file_destroy(newnofp
);
6837 /* mark the node as holding a create-initiated open */
6838 nofp
->nof_flags
|= NFS_OPEN_FILE_CREATE
;
6839 nofp
->nof_creator
= current_thread();
6841 if (nofp
&& !busyerror
) {
6842 nfs_open_file_clear_busy(nofp
);
6844 if (nfs_mount_state_in_use_end(nmp
, error
)) {
6845 nofp
= newnofp
= NULL
;
6850 nfs_open_owner_rele(noop
);
6856 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6862 struct componentname
*cnp
,
6863 struct vnode_attr
*vap
,
6868 struct nfsmount
*nmp
;
6869 struct nfs_vattr nvattr
;
6870 int error
= 0, create_error
= EIO
, lockerror
= ENOENT
, busyerror
= ENOENT
, status
;
6871 int nfsvers
, namedattrs
, numops
;
6872 u_int64_t xid
, savedxid
= 0;
6873 nfsnode_t np
= NULL
;
6874 vnode_t newvp
= NULL
;
6875 struct nfsm_chain nmreq
, nmrep
;
6876 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
6880 struct nfsreq rq
, *req
= &rq
;
6881 struct nfs_dulookup dul
;
6882 struct nfsreq_secinfo_args si
;
6884 nmp
= NFSTONMP(dnp
);
6885 if (nfs_mount_gone(nmp
)) {
6888 nfsvers
= nmp
->nm_vers
;
6889 namedattrs
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
);
6890 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
6894 sd
.specdata1
= sd
.specdata2
= 0;
6903 if (!VATTR_IS_ACTIVE(vap
, va_rdev
)) {
6906 sd
.specdata1
= major(vap
->va_rdev
);
6907 sd
.specdata2
= minor(vap
->va_rdev
);
6920 nfs_avoid_needless_id_setting_on_create(dnp
, vap
, ctx
);
6922 error
= busyerror
= nfs_node_set_busy(dnp
, vfs_context_thread(ctx
));
6924 nfs_dulookup_init(&dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
6927 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
6928 NVATTR_INIT(&nvattr
);
6929 nfsm_chain_null(&nmreq
);
6930 nfsm_chain_null(&nmrep
);
6932 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
6934 nfsm_chain_build_alloc_init(error
, &nmreq
, 66 * NFSX_UNSIGNED
);
6935 nfsm_chain_add_compound_header(error
, &nmreq
, tag
, nmp
->nm_minor_vers
, numops
);
6937 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6938 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
6940 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
6942 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_CREATE
);
6943 nfsm_chain_add_32(error
, &nmreq
, type
);
6944 if (type
== NFLNK
) {
6945 nfsm_chain_add_name(error
, &nmreq
, link
, strlen(link
), nmp
);
6946 } else if ((type
== NFBLK
) || (type
== NFCHR
)) {
6947 nfsm_chain_add_32(error
, &nmreq
, sd
.specdata1
);
6948 nfsm_chain_add_32(error
, &nmreq
, sd
.specdata2
);
6950 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
6951 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
6953 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6954 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
6955 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
6956 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, NULL
);
6958 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
6960 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6961 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
6962 nfsm_chain_build_done(error
, &nmreq
);
6963 nfsm_assert(error
, (numops
== 0), EPROTO
);
6966 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
6967 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, 0, NULL
, &req
);
6970 nfs_dulookup_start(&dul
, dnp
, ctx
);
6972 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
6975 if ((lockerror
= nfs_node_lock(dnp
))) {
6978 nfsm_chain_skip_tag(error
, &nmrep
);
6979 nfsm_chain_get_32(error
, &nmrep
, numops
);
6980 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6981 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
6983 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_CREATE
);
6984 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
6985 bmlen
= NFS_ATTR_BITMAP_LEN
;
6986 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
6987 /* At this point if we have no error, the object was created. */
6988 /* if we don't get attributes, then we should lookitup. */
6989 create_error
= error
;
6991 nfs_vattr_set_supported(bitmap
, vap
);
6992 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6994 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
6996 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
6997 printf("nfs: create/%s didn't return filehandle? %s\n", tag
, cnp
->cn_nameptr
);
7001 /* directory attributes: if we don't get them, make sure to invalidate */
7002 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
7003 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7005 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
7007 NATTRINVALIDATE(dnp
);
7011 nfsm_chain_cleanup(&nmreq
);
7012 nfsm_chain_cleanup(&nmrep
);
7015 if (!create_error
&& (dnp
->n_flag
& NNEGNCENTRIES
)) {
7016 dnp
->n_flag
&= ~NNEGNCENTRIES
;
7017 cache_purge_negatives(NFSTOV(dnp
));
7019 dnp
->n_flag
|= NMODIFIED
;
7020 nfs_node_unlock(dnp
);
7021 /* nfs_getattr() will check changed and purge caches */
7022 nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
);
7025 if (!error
&& fh
.fh_len
) {
7026 /* create the vnode with the filehandle and attributes */
7028 error
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, NG_MAKEENTRY
, &np
);
7033 NVATTR_CLEANUP(&nvattr
);
7036 nfs_dulookup_finish(&dul
, dnp
, ctx
);
7040 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
7041 * if we can succeed in looking up the object.
7043 if ((create_error
== EEXIST
) || (!create_error
&& !newvp
)) {
7044 error
= nfs_lookitup(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, &np
);
7047 if (vnode_vtype(newvp
) != nfstov_type(type
, nfsvers
)) {
7053 nfs_node_clear_busy(dnp
);
7057 nfs_node_unlock(np
);
7061 nfs_node_unlock(np
);
7069 struct vnop_mknod_args
/* {
7070 * struct vnodeop_desc *a_desc;
7073 * struct componentname *a_cnp;
7074 * struct vnode_attr *a_vap;
7075 * vfs_context_t a_context;
7078 nfsnode_t np
= NULL
;
7079 struct nfsmount
*nmp
;
7082 nmp
= VTONMP(ap
->a_dvp
);
7083 if (nfs_mount_gone(nmp
)) {
7087 if (!VATTR_IS_ACTIVE(ap
->a_vap
, va_type
)) {
7090 switch (ap
->a_vap
->va_type
) {
7100 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
7101 vtonfs_type(ap
->a_vap
->va_type
, nmp
->nm_vers
), NULL
, &np
);
7103 *ap
->a_vpp
= NFSTOV(np
);
7110 struct vnop_mkdir_args
/* {
7111 * struct vnodeop_desc *a_desc;
7114 * struct componentname *a_cnp;
7115 * struct vnode_attr *a_vap;
7116 * vfs_context_t a_context;
7119 nfsnode_t np
= NULL
;
7122 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
7125 *ap
->a_vpp
= NFSTOV(np
);
7132 struct vnop_symlink_args
/* {
7133 * struct vnodeop_desc *a_desc;
7136 * struct componentname *a_cnp;
7137 * struct vnode_attr *a_vap;
7139 * vfs_context_t a_context;
7142 nfsnode_t np
= NULL
;
7145 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
7146 NFLNK
, ap
->a_target
, &np
);
7148 *ap
->a_vpp
= NFSTOV(np
);
7155 struct vnop_link_args
/* {
7156 * struct vnodeop_desc *a_desc;
7159 * struct componentname *a_cnp;
7160 * vfs_context_t a_context;
7163 vfs_context_t ctx
= ap
->a_context
;
7164 vnode_t vp
= ap
->a_vp
;
7165 vnode_t tdvp
= ap
->a_tdvp
;
7166 struct componentname
*cnp
= ap
->a_cnp
;
7167 int error
= 0, lockerror
= ENOENT
, status
;
7168 struct nfsmount
*nmp
;
7169 nfsnode_t np
= VTONFS(vp
);
7170 nfsnode_t tdnp
= VTONFS(tdvp
);
7171 int nfsvers
, numops
;
7172 u_int64_t xid
, savedxid
;
7173 struct nfsm_chain nmreq
, nmrep
;
7174 struct nfsreq_secinfo_args si
;
7176 if (vnode_mount(vp
) != vnode_mount(tdvp
)) {
7181 if (nfs_mount_gone(nmp
)) {
7184 nfsvers
= nmp
->nm_vers
;
7185 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
7188 if (tdnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
7193 * Push all writes to the server, so that the attribute cache
7194 * doesn't get "out of sync" with the server.
7195 * XXX There should be a better way!
7197 nfs_flush(np
, MNT_WAIT
, vfs_context_thread(ctx
), V_IGNORE_WRITEERR
);
7199 if ((error
= nfs_node_set_busy2(tdnp
, np
, vfs_context_thread(ctx
)))) {
7203 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
7204 nfsm_chain_null(&nmreq
);
7205 nfsm_chain_null(&nmrep
);
7207 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
7209 nfsm_chain_build_alloc_init(error
, &nmreq
, 29 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
7210 nfsm_chain_add_compound_header(error
, &nmreq
, "link", nmp
->nm_minor_vers
, numops
);
7212 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7213 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
7215 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
7217 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7218 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, tdnp
->n_fhp
, tdnp
->n_fhsize
);
7220 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LINK
);
7221 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
7223 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7224 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, tdnp
);
7226 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
7228 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7229 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
7230 nfsm_chain_build_done(error
, &nmreq
);
7231 nfsm_assert(error
, (numops
== 0), EPROTO
);
7233 error
= nfs_request(tdnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
7235 if ((lockerror
= nfs_node_lock2(tdnp
, np
))) {
7239 nfsm_chain_skip_tag(error
, &nmrep
);
7240 nfsm_chain_get_32(error
, &nmrep
, numops
);
7241 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7242 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
7243 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7244 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LINK
);
7245 nfsm_chain_check_change_info(error
, &nmrep
, tdnp
);
7246 /* directory attributes: if we don't get them, make sure to invalidate */
7247 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7249 nfsm_chain_loadattr(error
, &nmrep
, tdnp
, nfsvers
, &xid
);
7251 NATTRINVALIDATE(tdnp
);
7253 /* link attributes: if we don't get them, make sure to invalidate */
7254 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
7255 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7257 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
7259 NATTRINVALIDATE(np
);
7262 nfsm_chain_cleanup(&nmreq
);
7263 nfsm_chain_cleanup(&nmrep
);
7265 tdnp
->n_flag
|= NMODIFIED
;
7267 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
7268 if (error
== EEXIST
) {
7271 if (!error
&& (tdnp
->n_flag
& NNEGNCENTRIES
)) {
7272 tdnp
->n_flag
&= ~NNEGNCENTRIES
;
7273 cache_purge_negatives(tdvp
);
7276 nfs_node_unlock2(tdnp
, np
);
7278 nfs_node_clear_busy2(tdnp
, np
);
7284 struct vnop_rmdir_args
/* {
7285 * struct vnodeop_desc *a_desc;
7288 * struct componentname *a_cnp;
7289 * vfs_context_t a_context;
7292 vfs_context_t ctx
= ap
->a_context
;
7293 vnode_t vp
= ap
->a_vp
;
7294 vnode_t dvp
= ap
->a_dvp
;
7295 struct componentname
*cnp
= ap
->a_cnp
;
7296 struct nfsmount
*nmp
;
7297 int error
= 0, namedattrs
;
7298 nfsnode_t np
= VTONFS(vp
);
7299 nfsnode_t dnp
= VTONFS(dvp
);
7300 struct nfs_dulookup dul
;
7302 if (vnode_vtype(vp
) != VDIR
) {
7306 nmp
= NFSTONMP(dnp
);
7307 if (nfs_mount_gone(nmp
)) {
7310 namedattrs
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
);
7312 if ((error
= nfs_node_set_busy2(dnp
, np
, vfs_context_thread(ctx
)))) {
7317 nfs_dulookup_init(&dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
7318 nfs_dulookup_start(&dul
, dnp
, ctx
);
7321 error
= nfs4_remove_rpc(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
,
7322 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
7324 nfs_name_cache_purge(dnp
, np
, cnp
, ctx
);
7325 /* nfs_getattr() will check changed and purge caches */
7326 nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
);
7328 nfs_dulookup_finish(&dul
, dnp
, ctx
);
7330 nfs_node_clear_busy2(dnp
, np
);
7333 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
7335 if (error
== ENOENT
) {
7340 * remove nfsnode from hash now so we can't accidentally find it
7341 * again if another object gets created with the same filehandle
7342 * before this vnode gets reclaimed
7344 lck_mtx_lock(nfs_node_hash_mutex
);
7345 if (np
->n_hflag
& NHHASHED
) {
7346 LIST_REMOVE(np
, n_hash
);
7347 np
->n_hflag
&= ~NHHASHED
;
7348 FSDBG(266, 0, np
, np
->n_flag
, 0xb1eb1e);
7350 lck_mtx_unlock(nfs_node_hash_mutex
);
7356 * NFSv4 Named Attributes
7358 * Both the extended attributes interface and the named streams interface
7359 * are backed by NFSv4 named attributes. The implementations for both use
7360 * a common set of routines in an attempt to reduce code duplication, to
7361 * increase efficiency, to increase caching of both names and data, and to
7362 * confine the complexity.
7364 * Each NFS node caches its named attribute directory's file handle.
7365 * The directory nodes for the named attribute directories are handled
7366 * exactly like regular directories (with a couple minor exceptions).
7367 * Named attribute nodes are also treated as much like regular files as
7370 * Most of the heavy lifting is done by nfs4_named_attr_get().
7374 * Get the given node's attribute directory node.
7375 * If !fetch, then only return a cached node.
7376 * Otherwise, we will attempt to fetch the node from the server.
7377 * (Note: the node should be marked busy.)
7380 nfs4_named_attr_dir_get(nfsnode_t np
, int fetch
, vfs_context_t ctx
)
7382 nfsnode_t adnp
= NULL
;
7383 struct nfsmount
*nmp
;
7384 int error
= 0, status
, numops
;
7385 struct nfsm_chain nmreq
, nmrep
;
7387 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
7389 struct nfs_vattr nvattr
;
7390 struct componentname cn
;
7391 struct nfsreq rq
, *req
= &rq
;
7392 struct nfsreq_secinfo_args si
;
7395 if (nfs_mount_gone(nmp
)) {
7398 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
7402 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
7403 NVATTR_INIT(&nvattr
);
7404 nfsm_chain_null(&nmreq
);
7405 nfsm_chain_null(&nmrep
);
7407 bzero(&cn
, sizeof(cn
));
7408 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER
, const, char *); /* "/..namedfork/" */
7409 cn
.cn_namelen
= strlen(_PATH_FORKSPECIFIER
);
7410 cn
.cn_nameiop
= LOOKUP
;
7412 if (np
->n_attrdirfh
) {
7413 // XXX can't set parent correctly (to np) yet
7414 error
= nfs_nget(nmp
->nm_mountp
, NULL
, &cn
, np
->n_attrdirfh
+ 1, *np
->n_attrdirfh
,
7415 NULL
, NULL
, RPCAUTH_UNKNOWN
, NG_NOCREATE
, &adnp
);
7425 // PUTFH, OPENATTR, GETATTR
7427 nfsm_chain_build_alloc_init(error
, &nmreq
, 22 * NFSX_UNSIGNED
);
7428 nfsm_chain_add_compound_header(error
, &nmreq
, "openattr", nmp
->nm_minor_vers
, numops
);
7430 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7431 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, np
->n_fhp
, np
->n_fhsize
);
7433 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPENATTR
);
7434 nfsm_chain_add_32(error
, &nmreq
, 0);
7436 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7437 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
7438 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
7439 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
7440 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7441 nfsm_chain_build_done(error
, &nmreq
);
7442 nfsm_assert(error
, (numops
== 0), EPROTO
);
7444 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
7445 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, 0, NULL
, &req
);
7447 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
7450 nfsm_chain_skip_tag(error
, &nmrep
);
7451 nfsm_chain_get_32(error
, &nmrep
, numops
);
7452 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7453 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPENATTR
);
7454 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7456 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
7458 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
) || !fh
.fh_len
) {
7462 if (!np
->n_attrdirfh
|| (*np
->n_attrdirfh
!= fh
.fh_len
)) {
7463 /* (re)allocate attrdir fh buffer */
7464 if (np
->n_attrdirfh
) {
7465 FREE(np
->n_attrdirfh
, M_TEMP
);
7467 MALLOC(np
->n_attrdirfh
, u_char
*, fh
.fh_len
+ 1, M_TEMP
, M_WAITOK
);
7469 if (!np
->n_attrdirfh
) {
7473 /* cache the attrdir fh in the node */
7474 *np
->n_attrdirfh
= fh
.fh_len
;
7475 bcopy(fh
.fh_data
, np
->n_attrdirfh
+ 1, fh
.fh_len
);
7476 /* create node for attrdir */
7477 // XXX can't set parent correctly (to np) yet
7478 error
= nfs_nget(NFSTOMP(np
), NULL
, &cn
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, 0, &adnp
);
7480 NVATTR_CLEANUP(&nvattr
);
7481 nfsm_chain_cleanup(&nmreq
);
7482 nfsm_chain_cleanup(&nmrep
);
7485 /* sanity check that this node is an attribute directory */
7486 if (adnp
->n_vattr
.nva_type
!= VDIR
) {
7489 if (!(adnp
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
7492 nfs_node_unlock(adnp
);
7494 vnode_put(NFSTOV(adnp
));
7497 return error
? NULL
: adnp
;
7501 * Get the given node's named attribute node for the name given.
7503 * In an effort to increase the performance of named attribute access, we try
7504 * to reduce server requests by doing the following:
7506 * - cache the node's named attribute directory file handle in the node
7507 * - maintain a directory vnode for the attribute directory
7508 * - use name cache entries (positive and negative) to speed up lookups
7509 * - optionally open the named attribute (with the given accessMode) in the same RPC
7510 * - combine attribute directory retrieval with the lookup/open RPC
7511 * - optionally prefetch the named attribute's first block of data in the same RPC
7513 * Also, in an attempt to reduce the number of copies/variations of this code,
7514 * parts of the RPC building/processing code are conditionalized on what is
7515 * needed for any particular request (openattr, lookup vs. open, read).
7517 * Note that because we may not have the attribute directory node when we start
7518 * the lookup/open, we lock both the node and the attribute directory node.
7521 #define NFS_GET_NAMED_ATTR_CREATE 0x1
7522 #define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
7523 #define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
7524 #define NFS_GET_NAMED_ATTR_PREFETCH 0x8
7527 nfs4_named_attr_get(
7529 struct componentname
*cnp
,
7530 uint32_t accessMode
,
7534 struct nfs_open_file
**nofpp
)
7536 struct nfsmount
*nmp
;
7537 int error
= 0, open_error
= EIO
;
7538 int inuse
= 0, adlockerror
= ENOENT
, busyerror
= ENOENT
, adbusyerror
= ENOENT
, nofpbusyerror
= ENOENT
;
7539 int create
, guarded
, prefetch
, truncate
, noopbusy
= 0;
7540 int open
, status
, numops
, hadattrdir
, negnamecache
;
7541 struct nfs_vattr nvattr
;
7542 struct vnode_attr vattr
;
7543 nfsnode_t adnp
= NULL
, anp
= NULL
;
7545 u_int64_t xid
, savedxid
= 0;
7546 struct nfsm_chain nmreq
, nmrep
;
7547 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
7548 uint32_t denyMode
, rflags
, delegation
, recall
, eof
, rlen
, retlen
;
7549 nfs_stateid stateid
, dstateid
;
7551 struct nfs_open_owner
*noop
= NULL
;
7552 struct nfs_open_file
*newnofp
= NULL
, *nofp
= NULL
;
7553 struct vnop_access_args naa
;
7558 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
;
7559 struct kauth_ace ace
;
7560 struct nfsreq rq
, *req
= &rq
;
7561 struct nfsreq_secinfo_args si
;
7565 rflags
= delegation
= recall
= eof
= rlen
= retlen
= 0;
7568 slen
= sizeof(sbuf
);
7571 if (nfs_mount_gone(nmp
)) {
7574 NVATTR_INIT(&nvattr
);
7575 negnamecache
= !NMFLAG(nmp
, NONEGNAMECACHE
);
7576 thd
= vfs_context_thread(ctx
);
7577 cred
= vfs_context_ucred(ctx
);
7578 create
= (flags
& NFS_GET_NAMED_ATTR_CREATE
) ? NFS_OPEN_CREATE
: NFS_OPEN_NOCREATE
;
7579 guarded
= (flags
& NFS_GET_NAMED_ATTR_CREATE_GUARDED
) ? NFS_CREATE_GUARDED
: NFS_CREATE_UNCHECKED
;
7580 truncate
= (flags
& NFS_GET_NAMED_ATTR_TRUNCATE
);
7581 prefetch
= (flags
& NFS_GET_NAMED_ATTR_PREFETCH
);
7584 error
= nfs_getattr(np
, &nvattr
, ctx
, NGA_CACHED
);
7588 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
7589 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
)) {
7592 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_NONE
) {
7593 /* shouldn't happen... but just be safe */
7594 printf("nfs4_named_attr_get: create with no access %s\n", cnp
->cn_nameptr
);
7595 accessMode
= NFS_OPEN_SHARE_ACCESS_READ
;
7597 open
= (accessMode
!= NFS_OPEN_SHARE_ACCESS_NONE
);
7600 * We're trying to open the file.
7601 * We'll create/open it with the given access mode,
7602 * and set NFS_OPEN_FILE_CREATE.
7604 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
7605 if (prefetch
&& guarded
) {
7606 prefetch
= 0; /* no sense prefetching data that can't be there */
7608 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
7614 if ((error
= busyerror
= nfs_node_set_busy(np
, vfs_context_thread(ctx
)))) {
7618 adnp
= nfs4_named_attr_dir_get(np
, 0, ctx
);
7619 hadattrdir
= (adnp
!= NULL
);
7622 /* use the special state ID because we don't have a real one to send */
7623 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
7624 rlen
= MIN(nmp
->nm_rsize
, nmp
->nm_biosize
);
7626 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
7627 nfsm_chain_null(&nmreq
);
7628 nfsm_chain_null(&nmrep
);
7631 if ((error
= adbusyerror
= nfs_node_set_busy(adnp
, vfs_context_thread(ctx
)))) {
7634 /* nfs_getattr() will check changed and purge caches */
7635 error
= nfs_getattr(adnp
, NULL
, ctx
, NGA_CACHED
);
7637 error
= cache_lookup(NFSTOV(adnp
), &avp
, cnp
);
7640 /* negative cache entry */
7644 /* try dir buf cache lookup */
7645 error
= nfs_dir_buf_cache_lookup(adnp
, &anp
, cnp
, ctx
, 0);
7646 if (!error
&& anp
) {
7647 /* dir buf cache hit */
7651 if (error
!= -1) { /* cache miss */
7656 /* cache hit, not really an error */
7657 OSAddAtomic64(1, &nfsstats
.lookupcache_hits
);
7659 *anpp
= anp
= VTONFS(avp
);
7662 nfs_node_clear_busy(adnp
);
7663 adbusyerror
= ENOENT
;
7665 /* check for directory access */
7666 naa
.a_desc
= &vnop_access_desc
;
7667 naa
.a_vp
= NFSTOV(adnp
);
7668 naa
.a_action
= KAUTH_VNODE_SEARCH
;
7669 naa
.a_context
= ctx
;
7671 /* compute actual success/failure based on accessibility */
7672 error
= nfs_vnop_access(&naa
);
7675 /* we either found it, or hit an error */
7676 if (!error
&& guarded
) {
7677 /* found cached entry but told not to use it */
7679 vnode_put(NFSTOV(anp
));
7682 /* we're done if error or we don't need to open */
7683 if (error
|| !open
) {
7686 /* no error and we need to open... */
7692 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
7694 nfs_open_owner_rele(noop
);
7700 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7701 error
= nfs_open_file_find(anp
, noop
, &newnofp
, 0, 0, 1);
7702 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
7703 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop
->noo_cred
), cnp
->cn_nameptr
);
7706 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
7707 nfs_mount_state_in_use_end(nmp
, 0);
7708 error
= nfs4_reopen(newnofp
, vfs_context_thread(ctx
));
7709 nfs_open_file_destroy(newnofp
);
7716 error
= nfs_open_file_set_busy(newnofp
, vfs_context_thread(ctx
));
7720 nfs_open_file_destroy(newnofp
);
7727 * We already have the node. So we just need to open
7728 * it - which we may be able to do with a delegation.
7730 open_error
= error
= nfs4_open(anp
, newnofp
, accessMode
, denyMode
, ctx
);
7732 /* open succeeded, so our open file is no longer temporary */
7745 * We either don't have the attrdir or we didn't find the attribute
7746 * in the name cache, so we need to talk to the server.
7748 * If we don't have the attrdir, we'll need to ask the server for that too.
7749 * If the caller is requesting that the attribute be created, we need to
7750 * make sure the attrdir is created.
7751 * The caller may also request that the first block of an existing attribute
7752 * be retrieved at the same time.
7756 /* need to mark the open owner busy during the RPC */
7757 if ((error
= nfs_open_owner_set_busy(noop
, thd
))) {
7764 * We'd like to get updated post-open/lookup attributes for the
7765 * directory and we may also want to prefetch some data via READ.
7766 * We'd like the READ results to be last so that we can leave the
7767 * data in the mbufs until the end.
7769 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7773 numops
+= 3; // also sending: OPENATTR, GETATTR, OPENATTR
7776 numops
+= 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7778 nfsm_chain_build_alloc_init(error
, &nmreq
, 64 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
7779 nfsm_chain_add_compound_header(error
, &nmreq
, "getnamedattr", nmp
->nm_minor_vers
, numops
);
7782 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7783 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, adnp
->n_fhp
, adnp
->n_fhsize
);
7786 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7787 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, np
->n_fhp
, np
->n_fhsize
);
7789 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPENATTR
);
7790 nfsm_chain_add_32(error
, &nmreq
, create
? 1 : 0);
7792 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7793 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
7794 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
7795 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
7796 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7800 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
7801 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
7802 nfsm_chain_add_32(error
, &nmreq
, accessMode
);
7803 nfsm_chain_add_32(error
, &nmreq
, denyMode
);
7804 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
);
7805 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
7806 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
));
7807 nfsm_chain_add_32(error
, &nmreq
, create
);
7809 nfsm_chain_add_32(error
, &nmreq
, guarded
);
7812 VATTR_SET(&vattr
, va_data_size
, 0);
7814 nfsm_chain_add_fattr4(error
, &nmreq
, &vattr
, nmp
);
7816 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_NULL
);
7817 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
7820 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUP
);
7821 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
7824 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7825 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
7826 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
7827 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
7828 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7831 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
7835 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7836 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, adnp
->n_fhp
, adnp
->n_fhsize
);
7839 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7840 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, np
->n_fhp
, np
->n_fhsize
);
7842 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPENATTR
);
7843 nfsm_chain_add_32(error
, &nmreq
, 0);
7846 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7847 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
7848 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7851 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
7853 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_NVERIFY
);
7855 VATTR_SET(&vattr
, va_data_size
, 0);
7856 nfsm_chain_add_fattr4(error
, &nmreq
, &vattr
, nmp
);
7858 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READ
);
7859 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
7860 nfsm_chain_add_64(error
, &nmreq
, 0);
7861 nfsm_chain_add_32(error
, &nmreq
, rlen
);
7863 nfsm_chain_build_done(error
, &nmreq
);
7864 nfsm_assert(error
, (numops
== 0), EPROTO
);
7866 error
= nfs_request_async(hadattrdir
? adnp
: np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
7867 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, open
? R_NOINTR
: 0, NULL
, &req
);
7869 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
7872 if (hadattrdir
&& ((adlockerror
= nfs_node_lock(adnp
)))) {
7873 error
= adlockerror
;
7876 nfsm_chain_skip_tag(error
, &nmrep
);
7877 nfsm_chain_get_32(error
, &nmrep
, numops
);
7878 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7880 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPENATTR
);
7881 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7883 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
7885 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
) && fh
.fh_len
) {
7886 if (!np
->n_attrdirfh
|| (*np
->n_attrdirfh
!= fh
.fh_len
)) {
7887 /* (re)allocate attrdir fh buffer */
7888 if (np
->n_attrdirfh
) {
7889 FREE(np
->n_attrdirfh
, M_TEMP
);
7891 MALLOC(np
->n_attrdirfh
, u_char
*, fh
.fh_len
+ 1, M_TEMP
, M_WAITOK
);
7893 if (np
->n_attrdirfh
) {
7894 /* remember the attrdir fh in the node */
7895 *np
->n_attrdirfh
= fh
.fh_len
;
7896 bcopy(fh
.fh_data
, np
->n_attrdirfh
+ 1, fh
.fh_len
);
7897 /* create busied node for attrdir */
7898 struct componentname cn
;
7899 bzero(&cn
, sizeof(cn
));
7900 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER
, const, char *); /* "/..namedfork/" */
7901 cn
.cn_namelen
= strlen(_PATH_FORKSPECIFIER
);
7902 cn
.cn_nameiop
= LOOKUP
;
7903 // XXX can't set parent correctly (to np) yet
7904 error
= nfs_nget(NFSTOMP(np
), NULL
, &cn
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, 0, &adnp
);
7907 /* set the node busy */
7908 SET(adnp
->n_flag
, NBUSY
);
7911 /* if no adnp, oh well... */
7915 NVATTR_CLEANUP(&nvattr
);
7919 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
7920 nfs_owner_seqid_increment(noop
, NULL
, error
);
7921 nfsm_chain_get_stateid(error
, &nmrep
, &newnofp
->nof_stateid
);
7922 nfsm_chain_check_change_info(error
, &nmrep
, adnp
);
7923 nfsm_chain_get_32(error
, &nmrep
, rflags
);
7924 bmlen
= NFS_ATTR_BITMAP_LEN
;
7925 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
7926 nfsm_chain_get_32(error
, &nmrep
, delegation
);
7928 switch (delegation
) {
7929 case NFS_OPEN_DELEGATE_NONE
:
7931 case NFS_OPEN_DELEGATE_READ
:
7932 case NFS_OPEN_DELEGATE_WRITE
:
7933 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
7934 nfsm_chain_get_32(error
, &nmrep
, recall
);
7935 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) { // space (skip) XXX
7936 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
7938 /* if we have any trouble accepting the ACE, just invalidate it */
7939 ace_type
= ace_flags
= ace_mask
= len
= 0;
7940 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
7941 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
7942 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
7943 nfsm_chain_get_32(error
, &nmrep
, len
);
7944 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
7945 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
7946 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
7947 if (!error
&& (len
>= slen
)) {
7948 MALLOC(s
, char*, len
+ 1, M_TEMP
, M_WAITOK
);
7956 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
7958 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
7962 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
))) {
7969 if (s
&& (s
!= sbuf
)) {
7978 /* At this point if we have no error, the object was created/opened. */
7981 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOOKUP
);
7983 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7985 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
7987 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
) || !fh
.fh_len
) {
7992 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
7994 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7996 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPENATTR
);
7998 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
8001 nfsm_chain_loadattr(error
, &nmrep
, adnp
, nmp
->nm_vers
, &xid
);
8005 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
) {
8006 newnofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
8008 if (rflags
& NFS_OPEN_RESULT_CONFIRM
) {
8010 nfs_node_unlock(adnp
);
8011 adlockerror
= ENOENT
;
8013 NVATTR_CLEANUP(&nvattr
);
8014 error
= nfs4_open_confirm_rpc(nmp
, adnp
? adnp
: np
, fh
.fh_data
, fh
.fh_len
, noop
, &newnofp
->nof_stateid
, thd
, cred
, &nvattr
, &xid
);
8017 if ((adlockerror
= nfs_node_lock(adnp
))) {
8018 error
= adlockerror
;
8024 if (open
&& adnp
&& !adlockerror
) {
8025 if (!open_error
&& (adnp
->n_flag
& NNEGNCENTRIES
)) {
8026 adnp
->n_flag
&= ~NNEGNCENTRIES
;
8027 cache_purge_negatives(NFSTOV(adnp
));
8029 adnp
->n_flag
|= NMODIFIED
;
8030 nfs_node_unlock(adnp
);
8031 adlockerror
= ENOENT
;
8032 nfs_getattr(adnp
, NULL
, ctx
, NGA_CACHED
);
8034 if (adnp
&& !adlockerror
&& (error
== ENOENT
) &&
8035 (cnp
->cn_flags
& MAKEENTRY
) && (cnp
->cn_nameiop
!= CREATE
) && negnamecache
) {
8036 /* add a negative entry in the name cache */
8037 cache_enter(NFSTOV(adnp
), NULL
, cnp
);
8038 adnp
->n_flag
|= NNEGNCENTRIES
;
8040 if (adnp
&& !adlockerror
) {
8041 nfs_node_unlock(adnp
);
8042 adlockerror
= ENOENT
;
8044 if (!error
&& !anp
&& fh
.fh_len
) {
8045 /* create the vnode with the filehandle and attributes */
8047 error
= nfs_nget(NFSTOMP(np
), adnp
, cnp
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, NG_MAKEENTRY
, &anp
);
8050 nfs_node_unlock(anp
);
8052 if (!error
&& open
) {
8053 nfs_open_file_add_open(newnofp
, accessMode
, denyMode
, 0);
8054 /* After we have a node, add our open file struct to the node */
8056 error
= nfs_open_file_find_internal(anp
, noop
, &nofp
, 0, 0, 0);
8058 /* This shouldn't happen, because we passed in a new nofp to use. */
8059 printf("nfs_open_file_find_internal failed! %d\n", error
);
8061 } else if (nofp
!= newnofp
) {
8063 * Hmm... an open file struct already exists.
8064 * Mark the existing one busy and merge our open into it.
8065 * Then destroy the one we created.
8066 * Note: there's no chance of an open confict because the
8067 * open has already been granted.
8069 nofpbusyerror
= nfs_open_file_set_busy(nofp
, NULL
);
8070 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0);
8071 nofp
->nof_stateid
= newnofp
->nof_stateid
;
8072 if (newnofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
) {
8073 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
8075 nfs_open_file_clear_busy(newnofp
);
8076 nfs_open_file_destroy(newnofp
);
8082 /* mark the node as holding a create-initiated open */
8083 nofp
->nof_flags
|= NFS_OPEN_FILE_CREATE
;
8084 nofp
->nof_creator
= current_thread();
8091 NVATTR_CLEANUP(&nvattr
);
8092 if (open
&& ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
))) {
8093 if (!error
&& anp
&& !recall
) {
8094 /* stuff the delegation state in the node */
8095 lck_mtx_lock(&anp
->n_openlock
);
8096 anp
->n_openflags
&= ~N_DELEG_MASK
;
8097 anp
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
8098 anp
->n_dstateid
= dstateid
;
8100 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
8101 lck_mtx_lock(&nmp
->nm_lock
);
8102 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
8103 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, anp
, n_dlink
);
8105 lck_mtx_unlock(&nmp
->nm_lock
);
8107 lck_mtx_unlock(&anp
->n_openlock
);
8109 /* give the delegation back */
8111 if (NFS_CMPFH(anp
, fh
.fh_data
, fh
.fh_len
)) {
8112 /* update delegation state and return it */
8113 lck_mtx_lock(&anp
->n_openlock
);
8114 anp
->n_openflags
&= ~N_DELEG_MASK
;
8115 anp
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
8116 anp
->n_dstateid
= dstateid
;
8118 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
8119 lck_mtx_lock(&nmp
->nm_lock
);
8120 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
8121 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, anp
, n_dlink
);
8123 lck_mtx_unlock(&nmp
->nm_lock
);
8125 lck_mtx_unlock(&anp
->n_openlock
);
8126 /* don't need to send a separate delegreturn for fh */
8129 /* return anp's current delegation */
8130 nfs4_delegation_return(anp
, 0, thd
, cred
);
8132 if (fh
.fh_len
) { /* return fh's delegation if it wasn't for anp */
8133 nfs4_delegreturn_rpc(nmp
, fh
.fh_data
, fh
.fh_len
, &dstateid
, 0, thd
, cred
);
8139 /* need to cleanup our temporary nofp */
8140 nfs_open_file_clear_busy(newnofp
);
8141 nfs_open_file_destroy(newnofp
);
8143 } else if (nofp
&& !nofpbusyerror
) {
8144 nfs_open_file_clear_busy(nofp
);
8145 nofpbusyerror
= ENOENT
;
8147 if (inuse
&& nfs_mount_state_in_use_end(nmp
, error
)) {
8149 nofp
= newnofp
= NULL
;
8150 rflags
= delegation
= recall
= eof
= rlen
= retlen
= 0;
8153 slen
= sizeof(sbuf
);
8154 nfsm_chain_cleanup(&nmreq
);
8155 nfsm_chain_cleanup(&nmrep
);
8157 vnode_put(NFSTOV(anp
));
8160 hadattrdir
= (adnp
!= NULL
);
8162 nfs_open_owner_clear_busy(noop
);
8169 nfs_open_owner_clear_busy(noop
);
8172 nfs_open_owner_rele(noop
);
8175 if (!error
&& prefetch
&& nmrep
.nmc_mhead
) {
8176 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
8177 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_NVERIFY
);
8178 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READ
);
8179 nfsm_chain_get_32(error
, &nmrep
, eof
);
8180 nfsm_chain_get_32(error
, &nmrep
, retlen
);
8181 if (!error
&& anp
) {
8183 * There can be one problem with doing the prefetch.
8184 * Because we don't have the node before we start the RPC, we
8185 * can't have the buffer busy while the READ is performed.
8186 * So there is a chance that other I/O occured on the same
8187 * range of data while we were performing this RPC. If that
8188 * happens, then it's possible the data we have in the READ
8189 * response is no longer up to date.
8190 * Once we have the node and the buffer, we need to make sure
8191 * that there's no chance we could be putting stale data in
8193 * So, we check if the range read is dirty or if any I/O may
8194 * have occured on it while we were performing our RPC.
8196 struct nfsbuf
*bp
= NULL
;
8200 retlen
= MIN(retlen
, rlen
);
8202 /* check if node needs size update or invalidation */
8203 if (ISSET(anp
->n_flag
, NUPDATESIZE
)) {
8204 nfs_data_update_size(anp
, 0);
8206 if (!(error
= nfs_node_lock(anp
))) {
8207 if (anp
->n_flag
& NNEEDINVALIDATE
) {
8208 anp
->n_flag
&= ~NNEEDINVALIDATE
;
8209 nfs_node_unlock(anp
);
8210 error
= nfs_vinvalbuf(NFSTOV(anp
), V_SAVE
| V_IGNORE_WRITEERR
, ctx
, 1);
8211 if (!error
) { /* lets play it safe and just drop the data */
8215 nfs_node_unlock(anp
);
8219 /* calculate page mask for the range of data read */
8220 lastpg
= (trunc_page_32(retlen
) - 1) / PAGE_SIZE
;
8221 pagemask
= ((1 << (lastpg
+ 1)) - 1);
8224 error
= nfs_buf_get(anp
, 0, nmp
->nm_biosize
, thd
, NBLK_READ
| NBLK_NOWAIT
, &bp
);
8226 /* don't save the data if dirty or potential I/O conflict */
8227 if (!error
&& bp
&& !bp
->nb_dirtyoff
&& !(bp
->nb_dirty
& pagemask
) &&
8228 timevalcmp(&anp
->n_lastio
, &now
, <)) {
8229 OSAddAtomic64(1, &nfsstats
.read_bios
);
8230 CLR(bp
->nb_flags
, (NB_DONE
| NB_ASYNC
));
8231 SET(bp
->nb_flags
, NB_READ
);
8233 nfsm_chain_get_opaque(error
, &nmrep
, retlen
, bp
->nb_data
);
8235 bp
->nb_error
= error
;
8236 SET(bp
->nb_flags
, NB_ERROR
);
8239 bp
->nb_endio
= rlen
;
8240 if ((retlen
> 0) && (bp
->nb_endio
< (int)retlen
)) {
8241 bp
->nb_endio
= retlen
;
8243 if (eof
|| (retlen
== 0)) {
8244 /* zero out the remaining data (up to EOF) */
8245 off_t rpcrem
, eofrem
, rem
;
8246 rpcrem
= (rlen
- retlen
);
8247 eofrem
= anp
->n_size
- (NBOFF(bp
) + retlen
);
8248 rem
= (rpcrem
< eofrem
) ? rpcrem
: eofrem
;
8250 bzero(bp
->nb_data
+ retlen
, rem
);
8252 } else if ((retlen
< rlen
) && !ISSET(bp
->nb_flags
, NB_ERROR
)) {
8253 /* ugh... short read ... just invalidate for now... */
8254 SET(bp
->nb_flags
, NB_INVAL
);
8257 nfs_buf_read_finish(bp
);
8258 microuptime(&anp
->n_lastio
);
8261 nfs_buf_release(bp
, 1);
8264 error
= 0; /* ignore any transient error in processing the prefetch */
8266 if (adnp
&& !adbusyerror
) {
8267 nfs_node_clear_busy(adnp
);
8268 adbusyerror
= ENOENT
;
8271 nfs_node_clear_busy(np
);
8275 vnode_put(NFSTOV(adnp
));
8277 if (error
&& *anpp
) {
8278 vnode_put(NFSTOV(*anpp
));
8281 nfsm_chain_cleanup(&nmreq
);
8282 nfsm_chain_cleanup(&nmrep
);
8287 * Remove a named attribute.
8290 nfs4_named_attr_remove(nfsnode_t np
, nfsnode_t anp
, const char *name
, vfs_context_t ctx
)
8292 nfsnode_t adnp
= NULL
;
8293 struct nfsmount
*nmp
;
8294 struct componentname cn
;
8295 struct vnop_remove_args vra
;
8296 int error
, putanp
= 0;
8299 if (nfs_mount_gone(nmp
)) {
8303 bzero(&cn
, sizeof(cn
));
8304 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(name
, const, char *);
8305 cn
.cn_namelen
= strlen(name
);
8306 cn
.cn_nameiop
= DELETE
;
8310 error
= nfs4_named_attr_get(np
, &cn
, NFS_OPEN_SHARE_ACCESS_NONE
,
8311 0, ctx
, &anp
, NULL
);
8312 if ((!error
&& !anp
) || (error
== ENOATTR
)) {
8317 vnode_put(NFSTOV(anp
));
8325 if ((error
= nfs_node_set_busy(np
, vfs_context_thread(ctx
)))) {
8328 adnp
= nfs4_named_attr_dir_get(np
, 1, ctx
);
8329 nfs_node_clear_busy(np
);
8335 vra
.a_desc
= &vnop_remove_desc
;
8336 vra
.a_dvp
= NFSTOV(adnp
);
8337 vra
.a_vp
= NFSTOV(anp
);
8340 vra
.a_context
= ctx
;
8341 error
= nfs_vnop_remove(&vra
);
8344 vnode_put(NFSTOV(adnp
));
8347 vnode_put(NFSTOV(anp
));
8354 struct vnop_getxattr_args
/* {
8355 * struct vnodeop_desc *a_desc;
8357 * const char * a_name;
8361 * vfs_context_t a_context;
8364 vfs_context_t ctx
= ap
->a_context
;
8365 struct nfsmount
*nmp
;
8366 struct nfs_vattr nvattr
;
8367 struct componentname cn
;
8369 int error
= 0, isrsrcfork
;
8371 nmp
= VTONMP(ap
->a_vp
);
8372 if (nfs_mount_gone(nmp
)) {
8376 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8379 error
= nfs_getattr(VTONFS(ap
->a_vp
), &nvattr
, ctx
, NGA_CACHED
);
8383 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
8384 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
)) {
8388 bzero(&cn
, sizeof(cn
));
8389 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(ap
->a_name
, const, char *);
8390 cn
.cn_namelen
= strlen(ap
->a_name
);
8391 cn
.cn_nameiop
= LOOKUP
;
8392 cn
.cn_flags
= MAKEENTRY
;
8394 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
8395 isrsrcfork
= (bcmp(ap
->a_name
, XATTR_RESOURCEFORK_NAME
, sizeof(XATTR_RESOURCEFORK_NAME
)) == 0);
8397 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_NONE
,
8398 !isrsrcfork
? NFS_GET_NAMED_ATTR_PREFETCH
: 0, ctx
, &anp
, NULL
);
8399 if ((!error
&& !anp
) || (error
== ENOENT
)) {
8404 error
= nfs_bioread(anp
, ap
->a_uio
, 0, ctx
);
8406 *ap
->a_size
= anp
->n_size
;
8410 vnode_put(NFSTOV(anp
));
8417 struct vnop_setxattr_args
/* {
8418 * struct vnodeop_desc *a_desc;
8420 * const char * a_name;
8423 * vfs_context_t a_context;
8426 vfs_context_t ctx
= ap
->a_context
;
8427 int options
= ap
->a_options
;
8428 uio_t uio
= ap
->a_uio
;
8429 const char *name
= ap
->a_name
;
8430 struct nfsmount
*nmp
;
8431 struct componentname cn
;
8432 nfsnode_t anp
= NULL
;
8433 int error
= 0, closeerror
= 0, flags
, isrsrcfork
, isfinderinfo
, empty
= 0, i
;
8434 #define FINDERINFOSIZE 32
8435 uint8_t finfo
[FINDERINFOSIZE
];
8437 struct nfs_open_file
*nofp
= NULL
;
8438 char uio_buf
[UIO_SIZEOF(1)];
8440 struct vnop_write_args vwa
;
8442 nmp
= VTONMP(ap
->a_vp
);
8443 if (nfs_mount_gone(nmp
)) {
8447 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8451 if ((options
& XATTR_CREATE
) && (options
& XATTR_REPLACE
)) {
8455 /* XXX limitation based on need to back up uio on short write */
8456 if (uio_iovcnt(uio
) > 1) {
8457 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
8461 bzero(&cn
, sizeof(cn
));
8462 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(name
, const, char *);
8463 cn
.cn_namelen
= strlen(name
);
8464 cn
.cn_nameiop
= CREATE
;
8465 cn
.cn_flags
= MAKEENTRY
;
8467 isfinderinfo
= (bcmp(name
, XATTR_FINDERINFO_NAME
, sizeof(XATTR_FINDERINFO_NAME
)) == 0);
8468 isrsrcfork
= isfinderinfo
? 0 : (bcmp(name
, XATTR_RESOURCEFORK_NAME
, sizeof(XATTR_RESOURCEFORK_NAME
)) == 0);
8470 uio_setoffset(uio
, 0);
8473 if (uio_resid(uio
) != sizeof(finfo
)) {
8476 error
= uiomove((char*)&finfo
, sizeof(finfo
), uio
);
8480 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
8482 for (i
= 0, finfop
= (uint32_t*)&finfo
; i
< (int)(sizeof(finfo
) / sizeof(uint32_t)); i
++) {
8488 if (empty
&& !(options
& (XATTR_CREATE
| XATTR_REPLACE
))) {
8489 error
= nfs4_named_attr_remove(VTONFS(ap
->a_vp
), anp
, name
, ctx
);
8490 if (error
== ENOENT
) {
8495 /* first, let's see if we get a create/replace error */
8499 * create/open the xattr
8501 * We need to make sure not to create it if XATTR_REPLACE.
8502 * For all xattrs except the resource fork, we also want to
8503 * truncate the xattr to remove any current data. We'll do
8504 * that by setting the size to 0 on create/open.
8507 if (!(options
& XATTR_REPLACE
)) {
8508 flags
|= NFS_GET_NAMED_ATTR_CREATE
;
8510 if (options
& XATTR_CREATE
) {
8511 flags
|= NFS_GET_NAMED_ATTR_CREATE_GUARDED
;
8514 flags
|= NFS_GET_NAMED_ATTR_TRUNCATE
;
8517 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_BOTH
,
8518 flags
, ctx
, &anp
, &nofp
);
8519 if (!error
&& !anp
) {
8525 /* grab the open state from the get/create/open */
8526 if (nofp
&& !(error
= nfs_open_file_set_busy(nofp
, NULL
))) {
8527 nofp
->nof_flags
&= ~NFS_OPEN_FILE_CREATE
;
8528 nofp
->nof_creator
= NULL
;
8529 nfs_open_file_clear_busy(nofp
);
8532 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
8533 if (isfinderinfo
&& empty
) {
8538 * Write the data out and flush.
8540 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
8542 vwa
.a_desc
= &vnop_write_desc
;
8543 vwa
.a_vp
= NFSTOV(anp
);
8546 vwa
.a_context
= ctx
;
8548 auio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_WRITE
, &uio_buf
, sizeof(uio_buf
));
8549 uio_addiov(auio
, (uintptr_t)&finfo
, sizeof(finfo
));
8551 } else if (uio_resid(uio
) > 0) {
8555 error
= nfs_vnop_write(&vwa
);
8557 error
= nfs_flush(anp
, MNT_WAIT
, vfs_context_thread(ctx
), 0);
8561 /* Close the xattr. */
8563 int busyerror
= nfs_open_file_set_busy(nofp
, NULL
);
8564 closeerror
= nfs_close(anp
, nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
8566 nfs_open_file_clear_busy(nofp
);
8569 if (!error
&& isfinderinfo
&& empty
) { /* Setting an empty FinderInfo really means remove it */
8570 error
= nfs4_named_attr_remove(VTONFS(ap
->a_vp
), anp
, name
, ctx
);
8571 if (error
== ENOENT
) {
8580 vnode_put(NFSTOV(anp
));
8582 if (error
== ENOENT
) {
8589 nfs4_vnop_removexattr(
8590 struct vnop_removexattr_args
/* {
8591 * struct vnodeop_desc *a_desc;
8593 * const char * a_name;
8595 * vfs_context_t a_context;
8598 struct nfsmount
*nmp
= VTONMP(ap
->a_vp
);
8601 if (nfs_mount_gone(nmp
)) {
8604 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8608 error
= nfs4_named_attr_remove(VTONFS(ap
->a_vp
), NULL
, ap
->a_name
, ap
->a_context
);
8609 if (error
== ENOENT
) {
8616 nfs4_vnop_listxattr(
8617 struct vnop_listxattr_args
/* {
8618 * struct vnodeop_desc *a_desc;
8623 * vfs_context_t a_context;
8626 vfs_context_t ctx
= ap
->a_context
;
8627 nfsnode_t np
= VTONFS(ap
->a_vp
);
8628 uio_t uio
= ap
->a_uio
;
8629 nfsnode_t adnp
= NULL
;
8630 struct nfsmount
*nmp
;
8632 struct nfs_vattr nvattr
;
8633 uint64_t cookie
, nextcookie
, lbn
= 0;
8634 struct nfsbuf
*bp
= NULL
;
8635 struct nfs_dir_buf_header
*ndbhp
;
8636 struct direntry
*dp
;
8638 nmp
= VTONMP(ap
->a_vp
);
8639 if (nfs_mount_gone(nmp
)) {
8643 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8647 error
= nfs_getattr(np
, &nvattr
, ctx
, NGA_CACHED
);
8651 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
8652 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
)) {
8656 if ((error
= nfs_node_set_busy(np
, vfs_context_thread(ctx
)))) {
8659 adnp
= nfs4_named_attr_dir_get(np
, 1, ctx
);
8660 nfs_node_clear_busy(np
);
8665 if ((error
= nfs_node_lock(adnp
))) {
8669 if (adnp
->n_flag
& NNEEDINVALIDATE
) {
8670 adnp
->n_flag
&= ~NNEEDINVALIDATE
;
8672 nfs_node_unlock(adnp
);
8673 error
= nfs_vinvalbuf(NFSTOV(adnp
), 0, ctx
, 1);
8675 error
= nfs_node_lock(adnp
);
8683 * check for need to invalidate when (re)starting at beginning
8685 if (adnp
->n_flag
& NMODIFIED
) {
8687 nfs_node_unlock(adnp
);
8688 if ((error
= nfs_vinvalbuf(NFSTOV(adnp
), 0, ctx
, 1))) {
8692 nfs_node_unlock(adnp
);
8694 /* nfs_getattr() will check changed and purge caches */
8695 if ((error
= nfs_getattr(adnp
, &nvattr
, ctx
, NGA_UNCACHED
))) {
8699 if (uio
&& (uio_resid(uio
) == 0)) {
8704 nextcookie
= lbn
= 0;
8706 while (!error
&& !done
) {
8707 OSAddAtomic64(1, &nfsstats
.biocache_readdirs
);
8708 cookie
= nextcookie
;
8710 error
= nfs_buf_get(adnp
, lbn
, NFS_DIRBLKSIZ
, vfs_context_thread(ctx
), NBLK_READ
, &bp
);
8714 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
8715 if (!ISSET(bp
->nb_flags
, NB_CACHE
) || !ISSET(ndbhp
->ndbh_flags
, NDB_FULL
)) {
8716 if (!ISSET(bp
->nb_flags
, NB_CACHE
)) { /* initialize the buffer */
8717 ndbhp
->ndbh_flags
= 0;
8718 ndbhp
->ndbh_count
= 0;
8719 ndbhp
->ndbh_entry_end
= sizeof(*ndbhp
);
8720 ndbhp
->ndbh_ncgen
= adnp
->n_ncgen
;
8722 error
= nfs_buf_readdir(bp
, ctx
);
8723 if (error
== NFSERR_DIRBUFDROPPED
) {
8727 nfs_buf_release(bp
, 1);
8729 if (error
&& (error
!= ENXIO
) && (error
!= ETIMEDOUT
) && (error
!= EINTR
) && (error
!= ERESTART
)) {
8730 if (!nfs_node_lock(adnp
)) {
8732 nfs_node_unlock(adnp
);
8734 nfs_vinvalbuf(NFSTOV(adnp
), 0, ctx
, 1);
8735 if (error
== NFSERR_BAD_COOKIE
) {
8744 /* go through all the entries copying/counting */
8745 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
8746 for (i
= 0; i
< ndbhp
->ndbh_count
; i
++) {
8747 if (!xattr_protected(dp
->d_name
)) {
8749 *ap
->a_size
+= dp
->d_namlen
+ 1;
8750 } else if (uio_resid(uio
) < (dp
->d_namlen
+ 1)) {
8753 error
= uiomove(dp
->d_name
, dp
->d_namlen
+ 1, uio
);
8754 if (error
&& (error
!= EFAULT
)) {
8759 nextcookie
= dp
->d_seekoff
;
8760 dp
= NFS_DIRENTRY_NEXT(dp
);
8763 if (i
== ndbhp
->ndbh_count
) {
8764 /* hit end of buffer, move to next buffer */
8766 /* if we also hit EOF, we're done */
8767 if (ISSET(ndbhp
->ndbh_flags
, NDB_EOF
)) {
8771 if (!error
&& !done
&& (nextcookie
== cookie
)) {
8772 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie
, i
, ndbhp
->ndbh_count
);
8775 nfs_buf_release(bp
, 1);
8779 vnode_put(NFSTOV(adnp
));
8786 nfs4_vnop_getnamedstream(
8787 struct vnop_getnamedstream_args
/* {
8788 * struct vnodeop_desc *a_desc;
8791 * const char *a_name;
8792 * enum nsoperation a_operation;
8794 * vfs_context_t a_context;
8797 vfs_context_t ctx
= ap
->a_context
;
8798 struct nfsmount
*nmp
;
8799 struct nfs_vattr nvattr
;
8800 struct componentname cn
;
8804 nmp
= VTONMP(ap
->a_vp
);
8805 if (nfs_mount_gone(nmp
)) {
8809 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8812 error
= nfs_getattr(VTONFS(ap
->a_vp
), &nvattr
, ctx
, NGA_CACHED
);
8816 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
8817 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
)) {
8821 bzero(&cn
, sizeof(cn
));
8822 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(ap
->a_name
, const, char *);
8823 cn
.cn_namelen
= strlen(ap
->a_name
);
8824 cn
.cn_nameiop
= LOOKUP
;
8825 cn
.cn_flags
= MAKEENTRY
;
8827 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_NONE
,
8828 0, ctx
, &anp
, NULL
);
8829 if ((!error
&& !anp
) || (error
== ENOENT
)) {
8832 if (!error
&& anp
) {
8833 *ap
->a_svpp
= NFSTOV(anp
);
8835 vnode_put(NFSTOV(anp
));
8841 nfs4_vnop_makenamedstream(
8842 struct vnop_makenamedstream_args
/* {
8843 * struct vnodeop_desc *a_desc;
8846 * const char *a_name;
8848 * vfs_context_t a_context;
8851 vfs_context_t ctx
= ap
->a_context
;
8852 struct nfsmount
*nmp
;
8853 struct componentname cn
;
8857 nmp
= VTONMP(ap
->a_vp
);
8858 if (nfs_mount_gone(nmp
)) {
8862 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8866 bzero(&cn
, sizeof(cn
));
8867 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(ap
->a_name
, const, char *);
8868 cn
.cn_namelen
= strlen(ap
->a_name
);
8869 cn
.cn_nameiop
= CREATE
;
8870 cn
.cn_flags
= MAKEENTRY
;
8872 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_BOTH
,
8873 NFS_GET_NAMED_ATTR_CREATE
, ctx
, &anp
, NULL
);
8874 if ((!error
&& !anp
) || (error
== ENOENT
)) {
8877 if (!error
&& anp
) {
8878 *ap
->a_svpp
= NFSTOV(anp
);
8880 vnode_put(NFSTOV(anp
));
8886 nfs4_vnop_removenamedstream(
8887 struct vnop_removenamedstream_args
/* {
8888 * struct vnodeop_desc *a_desc;
8891 * const char *a_name;
8893 * vfs_context_t a_context;
8896 struct nfsmount
*nmp
= VTONMP(ap
->a_vp
);
8897 nfsnode_t np
= ap
->a_vp
? VTONFS(ap
->a_vp
) : NULL
;
8898 nfsnode_t anp
= ap
->a_svp
? VTONFS(ap
->a_svp
) : NULL
;
8900 if (nfs_mount_gone(nmp
)) {
8905 * Given that a_svp is a named stream, checking for
8906 * named attribute support is kinda pointless.
8908 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8912 return nfs4_named_attr_remove(np
, anp
, ap
->a_name
, ap
->a_context
);