2 * Copyright (c) 2006-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <nfs/nfs_conf.h>
33 * vnode op calls for NFS version 4
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/resourcevar.h>
39 #include <sys/proc_internal.h>
40 #include <sys/kauth.h>
41 #include <sys/mount_internal.h>
42 #include <sys/malloc.h>
43 #include <sys/kpi_mbuf.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/dirent.h>
47 #include <sys/fcntl.h>
48 #include <sys/lockf.h>
49 #include <sys/ubc_internal.h>
51 #include <sys/signalvar.h>
52 #include <sys/uio_internal.h>
53 #include <sys/xattr.h>
54 #include <sys/paths.h>
56 #include <vfs/vfs_support.h>
61 #include <kern/clock.h>
62 #include <libkern/OSAtomic.h>
64 #include <miscfs/fifofs/fifo.h>
65 #include <miscfs/specfs/specdev.h>
67 #include <nfs/rpcv2.h>
68 #include <nfs/nfsproto.h>
70 #include <nfs/nfsnode.h>
71 #include <nfs/nfs_gss.h>
72 #include <nfs/nfsmount.h>
73 #include <nfs/nfs_lock.h>
74 #include <nfs/xdr_subs.h>
75 #include <nfs/nfsm_subs.h>
78 #include <netinet/in.h>
79 #include <netinet/in_var.h>
80 #include <vm/vm_kern.h>
82 #include <kern/task.h>
83 #include <kern/sched_prim.h>
87 nfs4_access_rpc(nfsnode_t np
, u_int32_t
*access
, int rpcflags
, vfs_context_t ctx
)
89 int error
= 0, lockerror
= ENOENT
, status
, numops
, slot
;
91 struct nfsm_chain nmreq
, nmrep
;
93 uint32_t access_result
= 0, supported
= 0, missing
;
94 struct nfsmount
*nmp
= NFSTONMP(np
);
95 int nfsvers
= nmp
->nm_vers
;
97 struct nfsreq_secinfo_args si
;
99 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
103 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
104 nfsm_chain_null(&nmreq
);
105 nfsm_chain_null(&nmrep
);
107 // PUTFH, ACCESS, GETATTR
109 nfsm_chain_build_alloc_init(error
, &nmreq
, 17 * NFSX_UNSIGNED
);
110 nfsm_chain_add_compound_header(error
, &nmreq
, "access", nmp
->nm_minor_vers
, numops
);
112 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
113 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
115 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_ACCESS
);
116 nfsm_chain_add_32(error
, &nmreq
, *access
);
118 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
119 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
120 nfsm_chain_build_done(error
, &nmreq
);
121 nfsm_assert(error
, (numops
== 0), EPROTO
);
123 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
124 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
125 &si
, rpcflags
, &nmrep
, &xid
, &status
);
127 if ((lockerror
= nfs_node_lock(np
))) {
130 nfsm_chain_skip_tag(error
, &nmrep
);
131 nfsm_chain_get_32(error
, &nmrep
, numops
);
132 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
133 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_ACCESS
);
134 nfsm_chain_get_32(error
, &nmrep
, supported
);
135 nfsm_chain_get_32(error
, &nmrep
, access_result
);
137 if ((missing
= (*access
& ~supported
))) {
138 /* missing support for something(s) we wanted */
139 if (missing
& NFS_ACCESS_DELETE
) {
141 * If the server doesn't report DELETE (possible
142 * on UNIX systems), we'll assume that it is OK
143 * and just let any subsequent delete action fail
144 * if it really isn't deletable.
146 access_result
|= NFS_ACCESS_DELETE
;
149 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
150 if (nfs_access_dotzfs
) {
151 vnode_t dvp
= NULLVP
;
152 if (np
->n_flag
& NISDOTZFSCHILD
) { /* may be able to create/delete snapshot dirs */
153 access_result
|= (NFS_ACCESS_MODIFY
| NFS_ACCESS_EXTEND
| NFS_ACCESS_DELETE
);
154 } else if (((dvp
= vnode_getparent(NFSTOV(np
))) != NULLVP
) && (VTONFS(dvp
)->n_flag
& NISDOTZFSCHILD
)) {
155 access_result
|= NFS_ACCESS_DELETE
; /* may be able to delete snapshot dirs */
161 /* Some servers report DELETE support but erroneously give a denied answer. */
162 if (nfs_access_delete
&& (*access
& NFS_ACCESS_DELETE
) && !(access_result
& NFS_ACCESS_DELETE
)) {
163 access_result
|= NFS_ACCESS_DELETE
;
165 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
166 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
169 if (nfs_mount_gone(nmp
)) {
174 if (auth_is_kerberized(np
->n_auth
) || auth_is_kerberized(nmp
->nm_auth
)) {
175 uid
= nfs_cred_getasid2uid(vfs_context_ucred(ctx
));
177 uid
= kauth_cred_getuid(vfs_context_ucred(ctx
));
179 slot
= nfs_node_access_slot(np
, uid
, 1);
180 np
->n_accessuid
[slot
] = uid
;
182 np
->n_accessstamp
[slot
] = now
.tv_sec
;
183 np
->n_access
[slot
] = access_result
;
185 /* pass back the access returned with this request */
186 *access
= np
->n_access
[slot
];
191 nfsm_chain_cleanup(&nmreq
);
192 nfsm_chain_cleanup(&nmrep
);
204 struct nfs_vattr
*nvap
,
207 struct nfsmount
*nmp
= mp
? VFSTONFS(mp
) : NFSTONMP(np
);
208 int error
= 0, status
, nfsvers
, numops
, rpcflags
= 0, acls
;
209 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
210 struct nfsm_chain nmreq
, nmrep
;
211 struct nfsreq_secinfo_args si
;
213 if (nfs_mount_gone(nmp
)) {
216 nfsvers
= nmp
->nm_vers
;
217 acls
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_ACL
);
219 if (np
&& (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
)) {
220 nfs4_default_attrs_for_referral_trigger(VTONFS(np
->n_parent
), NULL
, 0, nvap
, NULL
);
224 if (flags
& NGA_MONITOR
) { /* vnode monitor requests should be soft */
225 rpcflags
= R_RECOVER
;
228 if (flags
& NGA_SOFT
) { /* Return ETIMEDOUT if server not responding */
232 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
233 nfsm_chain_null(&nmreq
);
234 nfsm_chain_null(&nmrep
);
238 nfsm_chain_build_alloc_init(error
, &nmreq
, 15 * NFSX_UNSIGNED
);
239 nfsm_chain_add_compound_header(error
, &nmreq
, "getattr", nmp
->nm_minor_vers
, numops
);
241 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
242 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, fhp
, fhsize
);
244 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
245 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
246 if ((flags
& NGA_ACL
) && acls
) {
247 NFS_BITMAP_SET(bitmap
, NFS_FATTR_ACL
);
249 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
250 nfsm_chain_build_done(error
, &nmreq
);
251 nfsm_assert(error
, (numops
== 0), EPROTO
);
253 error
= nfs_request2(np
, mp
, &nmreq
, NFSPROC4_COMPOUND
,
254 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
255 NULL
, rpcflags
, &nmrep
, xidp
, &status
);
257 nfsm_chain_skip_tag(error
, &nmrep
);
258 nfsm_chain_get_32(error
, &nmrep
, numops
);
259 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
260 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
262 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
, NULL
);
264 if ((flags
& NGA_ACL
) && acls
&& !NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_ACL
)) {
265 /* we asked for the ACL but didn't get one... assume there isn't one */
266 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_ACL
);
267 nvap
->nva_acl
= NULL
;
270 nfsm_chain_cleanup(&nmreq
);
271 nfsm_chain_cleanup(&nmrep
);
276 nfs4_readlink_rpc(nfsnode_t np
, char *buf
, uint32_t *buflenp
, vfs_context_t ctx
)
278 struct nfsmount
*nmp
;
279 int error
= 0, lockerror
= ENOENT
, status
, numops
;
282 struct nfsm_chain nmreq
, nmrep
;
283 struct nfsreq_secinfo_args si
;
286 if (nfs_mount_gone(nmp
)) {
289 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
292 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
293 nfsm_chain_null(&nmreq
);
294 nfsm_chain_null(&nmrep
);
296 // PUTFH, GETATTR, READLINK
298 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
299 nfsm_chain_add_compound_header(error
, &nmreq
, "readlink", nmp
->nm_minor_vers
, numops
);
301 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
302 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
304 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
305 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
307 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READLINK
);
308 nfsm_chain_build_done(error
, &nmreq
);
309 nfsm_assert(error
, (numops
== 0), EPROTO
);
311 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
313 if ((lockerror
= nfs_node_lock(np
))) {
316 nfsm_chain_skip_tag(error
, &nmrep
);
317 nfsm_chain_get_32(error
, &nmrep
, numops
);
318 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
319 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
320 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
321 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READLINK
);
322 nfsm_chain_get_32(error
, &nmrep
, len
);
324 if (len
>= *buflenp
) {
325 if (np
->n_size
&& (np
->n_size
< *buflenp
)) {
331 nfsm_chain_get_opaque(error
, &nmrep
, len
, buf
);
339 nfsm_chain_cleanup(&nmreq
);
340 nfsm_chain_cleanup(&nmrep
);
351 struct nfsreq_cbinfo
*cb
,
352 struct nfsreq
**reqp
)
354 struct nfsmount
*nmp
;
355 int error
= 0, nfsvers
, numops
;
357 struct nfsm_chain nmreq
;
358 struct nfsreq_secinfo_args si
;
361 if (nfs_mount_gone(nmp
)) {
364 nfsvers
= nmp
->nm_vers
;
365 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
369 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
370 nfsm_chain_null(&nmreq
);
372 // PUTFH, READ, GETATTR
374 nfsm_chain_build_alloc_init(error
, &nmreq
, 22 * NFSX_UNSIGNED
);
375 nfsm_chain_add_compound_header(error
, &nmreq
, "read", nmp
->nm_minor_vers
, numops
);
377 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
378 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
380 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READ
);
381 nfs_get_stateid(np
, thd
, cred
, &stateid
);
382 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
383 nfsm_chain_add_64(error
, &nmreq
, offset
);
384 nfsm_chain_add_32(error
, &nmreq
, len
);
386 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
387 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
388 nfsm_chain_build_done(error
, &nmreq
);
389 nfsm_assert(error
, (numops
== 0), EPROTO
);
391 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, 0, cb
, reqp
);
393 nfsm_chain_cleanup(&nmreq
);
398 nfs4_read_rpc_async_finish(
405 struct nfsmount
*nmp
;
406 int error
= 0, lockerror
, nfsvers
, numops
, status
, eof
= 0;
409 struct nfsm_chain nmrep
;
412 if (nfs_mount_gone(nmp
)) {
413 nfs_request_async_cancel(req
);
416 nfsvers
= nmp
->nm_vers
;
418 nfsm_chain_null(&nmrep
);
420 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
421 if (error
== EINPROGRESS
) { /* async request restarted */
425 if ((lockerror
= nfs_node_lock(np
))) {
428 nfsm_chain_skip_tag(error
, &nmrep
);
429 nfsm_chain_get_32(error
, &nmrep
, numops
);
430 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
431 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READ
);
432 nfsm_chain_get_32(error
, &nmrep
, eof
);
433 nfsm_chain_get_32(error
, &nmrep
, retlen
);
435 *lenp
= MIN(retlen
, *lenp
);
436 error
= nfsm_chain_get_uio(&nmrep
, *lenp
, uio
);
438 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
439 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
444 if (!eof
&& !retlen
) {
449 nfsm_chain_cleanup(&nmrep
);
450 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) {
451 microuptime(&np
->n_lastio
);
457 nfs4_write_rpc_async(
464 struct nfsreq_cbinfo
*cb
,
465 struct nfsreq
**reqp
)
467 struct nfsmount
*nmp
;
469 int error
= 0, nfsvers
, numops
;
471 struct nfsm_chain nmreq
;
472 struct nfsreq_secinfo_args si
;
475 if (nfs_mount_gone(nmp
)) {
478 nfsvers
= nmp
->nm_vers
;
479 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
483 /* for async mounts, don't bother sending sync write requests */
484 if ((iomode
!= NFS_WRITE_UNSTABLE
) && nfs_allow_async
&&
485 ((mp
= NFSTOMP(np
))) && (vfs_flags(mp
) & MNT_ASYNC
)) {
486 iomode
= NFS_WRITE_UNSTABLE
;
489 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
490 nfsm_chain_null(&nmreq
);
492 // PUTFH, WRITE, GETATTR
494 nfsm_chain_build_alloc_init(error
, &nmreq
, 25 * NFSX_UNSIGNED
+ len
);
495 nfsm_chain_add_compound_header(error
, &nmreq
, "write", nmp
->nm_minor_vers
, numops
);
497 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
498 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
500 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_WRITE
);
501 nfs_get_stateid(np
, thd
, cred
, &stateid
);
502 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
503 nfsm_chain_add_64(error
, &nmreq
, uio_offset(uio
));
504 nfsm_chain_add_32(error
, &nmreq
, iomode
);
505 nfsm_chain_add_32(error
, &nmreq
, len
);
507 error
= nfsm_chain_add_uio(&nmreq
, uio
, len
);
510 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
511 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
512 nfsm_chain_build_done(error
, &nmreq
);
513 nfsm_assert(error
, (numops
== 0), EPROTO
);
516 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, 0, cb
, reqp
);
518 nfsm_chain_cleanup(&nmreq
);
523 nfs4_write_rpc_async_finish(
530 struct nfsmount
*nmp
;
531 int error
= 0, lockerror
= ENOENT
, nfsvers
, numops
, status
;
532 int committed
= NFS_WRITE_FILESYNC
;
534 u_int64_t xid
, wverf
;
536 struct nfsm_chain nmrep
;
539 if (nfs_mount_gone(nmp
)) {
540 nfs_request_async_cancel(req
);
543 nfsvers
= nmp
->nm_vers
;
545 nfsm_chain_null(&nmrep
);
547 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
548 if (error
== EINPROGRESS
) { /* async request restarted */
552 if (nfs_mount_gone(nmp
)) {
555 if (!error
&& (lockerror
= nfs_node_lock(np
))) {
558 nfsm_chain_skip_tag(error
, &nmrep
);
559 nfsm_chain_get_32(error
, &nmrep
, numops
);
560 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
561 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_WRITE
);
562 nfsm_chain_get_32(error
, &nmrep
, rlen
);
568 nfsm_chain_get_32(error
, &nmrep
, committed
);
569 nfsm_chain_get_64(error
, &nmrep
, wverf
);
574 lck_mtx_lock(&nmp
->nm_lock
);
575 if (!(nmp
->nm_state
& NFSSTA_HASWRITEVERF
)) {
576 nmp
->nm_verf
= wverf
;
577 nmp
->nm_state
|= NFSSTA_HASWRITEVERF
;
578 } else if (nmp
->nm_verf
!= wverf
) {
579 nmp
->nm_verf
= wverf
;
581 lck_mtx_unlock(&nmp
->nm_lock
);
582 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
583 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
588 nfsm_chain_cleanup(&nmrep
);
589 if ((committed
!= NFS_WRITE_FILESYNC
) && nfs_allow_async
&&
590 ((mp
= NFSTOMP(np
))) && (vfs_flags(mp
) & MNT_ASYNC
)) {
591 committed
= NFS_WRITE_FILESYNC
;
593 *iomodep
= committed
;
594 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) {
595 microuptime(&np
->n_lastio
);
608 int error
= 0, lockerror
= ENOENT
, remove_error
= 0, status
;
609 struct nfsmount
*nmp
;
612 struct nfsm_chain nmreq
, nmrep
;
613 struct nfsreq_secinfo_args si
;
616 if (nfs_mount_gone(nmp
)) {
619 nfsvers
= nmp
->nm_vers
;
620 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
623 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
625 nfsm_chain_null(&nmreq
);
626 nfsm_chain_null(&nmrep
);
628 // PUTFH, REMOVE, GETATTR
630 nfsm_chain_build_alloc_init(error
, &nmreq
, 17 * NFSX_UNSIGNED
+ namelen
);
631 nfsm_chain_add_compound_header(error
, &nmreq
, "remove", nmp
->nm_minor_vers
, numops
);
633 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
634 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
636 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_REMOVE
);
637 nfsm_chain_add_name(error
, &nmreq
, name
, namelen
, nmp
);
639 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
640 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
641 nfsm_chain_build_done(error
, &nmreq
);
642 nfsm_assert(error
, (numops
== 0), EPROTO
);
645 error
= nfs_request2(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, 0, &nmrep
, &xid
, &status
);
647 if ((lockerror
= nfs_node_lock(dnp
))) {
650 nfsm_chain_skip_tag(error
, &nmrep
);
651 nfsm_chain_get_32(error
, &nmrep
, numops
);
652 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
653 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_REMOVE
);
654 remove_error
= error
;
655 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
656 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
657 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
658 if (error
&& !lockerror
) {
659 NATTRINVALIDATE(dnp
);
662 nfsm_chain_cleanup(&nmreq
);
663 nfsm_chain_cleanup(&nmrep
);
666 dnp
->n_flag
|= NMODIFIED
;
667 nfs_node_unlock(dnp
);
669 if (error
== NFSERR_GRACE
) {
670 tsleep(&nmp
->nm_state
, (PZERO
- 1), "nfsgrace", 2 * hz
);
687 int error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
;
688 struct nfsmount
*nmp
;
689 u_int64_t xid
, savedxid
;
690 struct nfsm_chain nmreq
, nmrep
;
691 struct nfsreq_secinfo_args si
;
693 nmp
= NFSTONMP(fdnp
);
694 if (nfs_mount_gone(nmp
)) {
697 nfsvers
= nmp
->nm_vers
;
698 if (fdnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
701 if (tdnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
705 NFSREQ_SECINFO_SET(&si
, fdnp
, NULL
, 0, NULL
, 0);
706 nfsm_chain_null(&nmreq
);
707 nfsm_chain_null(&nmrep
);
709 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
711 nfsm_chain_build_alloc_init(error
, &nmreq
, 30 * NFSX_UNSIGNED
+ fnamelen
+ tnamelen
);
712 nfsm_chain_add_compound_header(error
, &nmreq
, "rename", nmp
->nm_minor_vers
, numops
);
714 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
715 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, fdnp
->n_fhp
, fdnp
->n_fhsize
);
717 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
719 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
720 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, tdnp
->n_fhp
, tdnp
->n_fhsize
);
722 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RENAME
);
723 nfsm_chain_add_name(error
, &nmreq
, fnameptr
, fnamelen
, nmp
);
724 nfsm_chain_add_name(error
, &nmreq
, tnameptr
, tnamelen
, nmp
);
726 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
727 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, tdnp
);
729 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
731 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
732 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, fdnp
);
733 nfsm_chain_build_done(error
, &nmreq
);
734 nfsm_assert(error
, (numops
== 0), EPROTO
);
737 error
= nfs_request(fdnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
739 if ((lockerror
= nfs_node_lock2(fdnp
, tdnp
))) {
742 nfsm_chain_skip_tag(error
, &nmrep
);
743 nfsm_chain_get_32(error
, &nmrep
, numops
);
744 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
745 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
746 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
747 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RENAME
);
748 nfsm_chain_check_change_info(error
, &nmrep
, fdnp
);
749 nfsm_chain_check_change_info(error
, &nmrep
, tdnp
);
750 /* directory attributes: if we don't get them, make sure to invalidate */
751 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
753 nfsm_chain_loadattr(error
, &nmrep
, tdnp
, nfsvers
, &xid
);
754 if (error
&& !lockerror
) {
755 NATTRINVALIDATE(tdnp
);
757 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
758 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
760 nfsm_chain_loadattr(error
, &nmrep
, fdnp
, nfsvers
, &xid
);
761 if (error
&& !lockerror
) {
762 NATTRINVALIDATE(fdnp
);
765 nfsm_chain_cleanup(&nmreq
);
766 nfsm_chain_cleanup(&nmrep
);
768 fdnp
->n_flag
|= NMODIFIED
;
769 tdnp
->n_flag
|= NMODIFIED
;
770 nfs_node_unlock2(fdnp
, tdnp
);
776 * NFS V4 readdir RPC.
779 nfs4_readdir_rpc(nfsnode_t dnp
, struct nfsbuf
*bp
, vfs_context_t ctx
)
781 struct nfsmount
*nmp
;
782 int error
= 0, lockerror
, nfsvers
, namedattr
, rdirplus
, bigcookies
, numops
;
783 int i
, status
, more_entries
= 1, eof
, bp_dropped
= 0;
784 uint32_t nmreaddirsize
, nmrsize
;
785 uint32_t namlen
, skiplen
, fhlen
, xlen
, attrlen
, reclen
, space_free
, space_needed
;
786 uint64_t cookie
, lastcookie
, xid
, savedxid
;
787 struct nfsm_chain nmreq
, nmrep
, nmrepsave
;
789 struct nfs_vattr nvattr
, *nvattrp
;
790 struct nfs_dir_buf_header
*ndbhp
;
792 char *padstart
, padlen
;
794 uint32_t entry_attrs
[NFS_ATTR_BITMAP_LEN
];
796 struct nfsreq_secinfo_args si
;
799 if (nfs_mount_gone(nmp
)) {
802 nfsvers
= nmp
->nm_vers
;
803 nmreaddirsize
= nmp
->nm_readdirsize
;
804 nmrsize
= nmp
->nm_rsize
;
805 bigcookies
= nmp
->nm_state
& NFSSTA_BIGCOOKIES
;
806 namedattr
= (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) ? 1 : 0;
807 rdirplus
= (NMFLAG(nmp
, RDIRPLUS
) || namedattr
) ? 1 : 0;
808 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
811 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
814 * Set up attribute request for entries.
815 * For READDIRPLUS functionality, get everything.
816 * Otherwise, just get what we need for struct direntry.
820 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, entry_attrs
);
821 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_FILEHANDLE
);
824 NFS_CLEAR_ATTRIBUTES(entry_attrs
);
825 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_TYPE
);
826 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_FILEID
);
827 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_MOUNTED_ON_FILEID
);
829 NFS_BITMAP_SET(entry_attrs
, NFS_FATTR_RDATTR_ERROR
);
831 /* lock to protect access to cookie verifier */
832 if ((lockerror
= nfs_node_lock(dnp
))) {
836 /* determine cookie to use, and move dp to the right offset */
837 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
838 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
839 if (ndbhp
->ndbh_count
) {
840 for (i
= 0; i
< ndbhp
->ndbh_count
- 1; i
++) {
841 dp
= NFS_DIRENTRY_NEXT(dp
);
843 cookie
= dp
->d_seekoff
;
844 dp
= NFS_DIRENTRY_NEXT(dp
);
846 cookie
= bp
->nb_lblkno
;
847 /* increment with every buffer read */
848 OSAddAtomic64(1, &nfsstats
.readdir_bios
);
853 * The NFS client is responsible for the "." and ".." entries in the
854 * directory. So, we put them at the start of the first buffer.
855 * Don't bother for attribute directories.
857 if (((bp
->nb_lblkno
== 0) && (ndbhp
->ndbh_count
== 0)) &&
858 !(dnp
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
860 fhlen
= rdirplus
? fh
.fh_len
+ 1 : 0;
861 xlen
= rdirplus
? (fhlen
+ sizeof(time_t)) : 0;
864 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
866 bzero(&dp
->d_name
[namlen
+ 1], xlen
);
868 dp
->d_namlen
= namlen
;
869 strlcpy(dp
->d_name
, ".", namlen
+ 1);
870 dp
->d_fileno
= dnp
->n_vattr
.nva_fileid
;
872 dp
->d_reclen
= reclen
;
874 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
875 dp
= NFS_DIRENTRY_NEXT(dp
);
876 padlen
= (char*)dp
- padstart
;
878 bzero(padstart
, padlen
);
880 if (rdirplus
) { /* zero out attributes */
881 bzero(NFS_DIR_BUF_NVATTR(bp
, 0), sizeof(struct nfs_vattr
));
886 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
888 bzero(&dp
->d_name
[namlen
+ 1], xlen
);
890 dp
->d_namlen
= namlen
;
891 strlcpy(dp
->d_name
, "..", namlen
+ 1);
893 dp
->d_fileno
= VTONFS(dnp
->n_parent
)->n_vattr
.nva_fileid
;
895 dp
->d_fileno
= dnp
->n_vattr
.nva_fileid
;
898 dp
->d_reclen
= reclen
;
900 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
901 dp
= NFS_DIRENTRY_NEXT(dp
);
902 padlen
= (char*)dp
- padstart
;
904 bzero(padstart
, padlen
);
906 if (rdirplus
) { /* zero out attributes */
907 bzero(NFS_DIR_BUF_NVATTR(bp
, 1), sizeof(struct nfs_vattr
));
910 ndbhp
->ndbh_entry_end
= (char*)dp
- bp
->nb_data
;
911 ndbhp
->ndbh_count
= 2;
915 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
916 * the buffer is full (or we hit EOF). Then put the remainder of the
917 * results in the next buffer(s).
919 nfsm_chain_null(&nmreq
);
920 nfsm_chain_null(&nmrep
);
921 while (nfs_dir_buf_freespace(bp
, rdirplus
) && !(ndbhp
->ndbh_flags
& NDB_FULL
)) {
922 // PUTFH, GETATTR, READDIR
924 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
925 nfsm_chain_add_compound_header(error
, &nmreq
, tag
, nmp
->nm_minor_vers
, numops
);
927 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
928 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
930 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
931 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
933 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READDIR
);
934 nfsm_chain_add_64(error
, &nmreq
, (cookie
<= 2) ? 0 : cookie
);
935 nfsm_chain_add_64(error
, &nmreq
, dnp
->n_cookieverf
);
936 nfsm_chain_add_32(error
, &nmreq
, nmreaddirsize
);
937 nfsm_chain_add_32(error
, &nmreq
, nmrsize
);
938 nfsm_chain_add_bitmap_supported(error
, &nmreq
, entry_attrs
, nmp
, dnp
);
939 nfsm_chain_build_done(error
, &nmreq
);
940 nfsm_assert(error
, (numops
== 0), EPROTO
);
941 nfs_node_unlock(dnp
);
943 error
= nfs_request(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
945 if ((lockerror
= nfs_node_lock(dnp
))) {
950 nfsm_chain_skip_tag(error
, &nmrep
);
951 nfsm_chain_get_32(error
, &nmrep
, numops
);
952 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
953 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
954 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
955 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READDIR
);
956 nfsm_chain_get_64(error
, &nmrep
, dnp
->n_cookieverf
);
957 nfsm_chain_get_32(error
, &nmrep
, more_entries
);
960 nfs_node_unlock(dnp
);
967 if (lastcookie
== 0) {
968 dnp
->n_rdirplusstamp_sof
= now
.tv_sec
;
969 dnp
->n_rdirplusstamp_eof
= 0;
973 /* loop through the entries packing them into the buffer */
974 while (more_entries
) {
975 /* Entry: COOKIE, NAME, FATTR */
976 nfsm_chain_get_64(error
, &nmrep
, cookie
);
977 nfsm_chain_get_32(error
, &nmrep
, namlen
);
979 if (!bigcookies
&& (cookie
>> 32) && (nmp
== NFSTONMP(dnp
))) {
980 /* we've got a big cookie, make sure flag is set */
981 lck_mtx_lock(&nmp
->nm_lock
);
982 nmp
->nm_state
|= NFSSTA_BIGCOOKIES
;
983 lck_mtx_unlock(&nmp
->nm_lock
);
986 /* just truncate names that don't fit in direntry.d_name */
991 if (namlen
> (sizeof(dp
->d_name
) - 1)) {
992 skiplen
= namlen
- sizeof(dp
->d_name
) + 1;
993 namlen
= sizeof(dp
->d_name
) - 1;
997 /* guess that fh size will be same as parent */
998 fhlen
= rdirplus
? (1 + dnp
->n_fhsize
) : 0;
999 xlen
= rdirplus
? (fhlen
+ sizeof(time_t)) : 0;
1000 attrlen
= rdirplus
? sizeof(struct nfs_vattr
) : 0;
1001 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
1002 space_needed
= reclen
+ attrlen
;
1003 space_free
= nfs_dir_buf_freespace(bp
, rdirplus
);
1004 if (space_needed
> space_free
) {
1006 * We still have entries to pack, but we've
1007 * run out of room in the current buffer.
1008 * So we need to move to the next buffer.
1009 * The block# for the next buffer is the
1010 * last cookie in the current buffer.
1013 ndbhp
->ndbh_flags
|= NDB_FULL
;
1014 nfs_buf_release(bp
, 0);
1017 error
= nfs_buf_get(dnp
, lastcookie
, NFS_DIRBLKSIZ
, vfs_context_thread(ctx
), NBLK_READ
, &bp
);
1019 /* initialize buffer */
1020 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
1021 ndbhp
->ndbh_flags
= 0;
1022 ndbhp
->ndbh_count
= 0;
1023 ndbhp
->ndbh_entry_end
= sizeof(*ndbhp
);
1024 ndbhp
->ndbh_ncgen
= dnp
->n_ncgen
;
1025 space_free
= nfs_dir_buf_freespace(bp
, rdirplus
);
1026 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
1027 /* increment with every buffer read */
1028 OSAddAtomic64(1, &nfsstats
.readdir_bios
);
1031 dp
->d_fileno
= cookie
; /* placeholder */
1032 dp
->d_seekoff
= cookie
;
1033 dp
->d_namlen
= namlen
;
1034 dp
->d_reclen
= reclen
;
1035 dp
->d_type
= DT_UNKNOWN
;
1036 nfsm_chain_get_opaque(error
, &nmrep
, namlen
, dp
->d_name
);
1038 dp
->d_name
[namlen
] = '\0';
1040 nfsm_chain_adv(error
, &nmrep
,
1041 nfsm_rndup(namlen
+ skiplen
) - nfsm_rndup(namlen
));
1044 nvattrp
= rdirplus
? NFS_DIR_BUF_NVATTR(bp
, ndbhp
->ndbh_count
) : &nvattr
;
1045 error
= nfs4_parsefattr(&nmrep
, NULL
, nvattrp
, &fh
, NULL
, NULL
);
1046 if (!error
&& NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_ACL
)) {
1047 /* we do NOT want ACLs returned to us here */
1048 NFS_BITMAP_CLR(nvattrp
->nva_bitmap
, NFS_FATTR_ACL
);
1049 if (nvattrp
->nva_acl
) {
1050 kauth_acl_free(nvattrp
->nva_acl
);
1051 nvattrp
->nva_acl
= NULL
;
1054 if (error
&& NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_RDATTR_ERROR
)) {
1055 /* OK, we may not have gotten all of the attributes but we will use what we can. */
1056 if ((error
== NFSERR_MOVED
) || (error
== NFSERR_INVAL
)) {
1057 /* set this up to look like a referral trigger */
1058 nfs4_default_attrs_for_referral_trigger(dnp
, dp
->d_name
, namlen
, nvattrp
, &fh
);
1062 /* check for more entries after this one */
1063 nfsm_chain_get_32(error
, &nmrep
, more_entries
);
1066 /* Skip any "." and ".." entries returned from server. */
1067 /* Also skip any bothersome named attribute entries. */
1068 if (((dp
->d_name
[0] == '.') && ((namlen
== 1) || ((namlen
== 2) && (dp
->d_name
[1] == '.')))) ||
1069 (namedattr
&& (namlen
== 11) && (!strcmp(dp
->d_name
, "SUNWattr_ro") || !strcmp(dp
->d_name
, "SUNWattr_rw")))) {
1070 lastcookie
= cookie
;
1074 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_TYPE
)) {
1075 dp
->d_type
= IFTODT(VTTOIF(nvattrp
->nva_type
));
1077 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_FILEID
)) {
1078 dp
->d_fileno
= nvattrp
->nva_fileid
;
1081 /* fileid is already in d_fileno, so stash xid in attrs */
1082 nvattrp
->nva_fileid
= savedxid
;
1083 if (NFS_BITMAP_ISSET(nvattrp
->nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
1084 fhlen
= fh
.fh_len
+ 1;
1085 xlen
= fhlen
+ sizeof(time_t);
1086 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
1087 space_needed
= reclen
+ attrlen
;
1088 if (space_needed
> space_free
) {
1089 /* didn't actually have the room... move on to next buffer */
1093 /* pack the file handle into the record */
1094 dp
->d_name
[dp
->d_namlen
+ 1] = fh
.fh_len
;
1095 bcopy(fh
.fh_data
, &dp
->d_name
[dp
->d_namlen
+ 2], fh
.fh_len
);
1097 /* mark the file handle invalid */
1099 fhlen
= fh
.fh_len
+ 1;
1100 xlen
= fhlen
+ sizeof(time_t);
1101 reclen
= NFS_DIRENTRY_LEN(namlen
+ xlen
);
1102 bzero(&dp
->d_name
[dp
->d_namlen
+ 1], fhlen
);
1104 *(time_t*)(&dp
->d_name
[dp
->d_namlen
+ 1 + fhlen
]) = now
.tv_sec
;
1105 dp
->d_reclen
= reclen
;
1106 nfs_rdirplus_update_node_attrs(dnp
, dp
, &fh
, nvattrp
, &savedxid
);
1108 padstart
= dp
->d_name
+ dp
->d_namlen
+ 1 + xlen
;
1109 ndbhp
->ndbh_count
++;
1110 lastcookie
= cookie
;
1112 /* advance to next direntry in buffer */
1113 dp
= NFS_DIRENTRY_NEXT(dp
);
1114 ndbhp
->ndbh_entry_end
= (char*)dp
- bp
->nb_data
;
1115 /* zero out the pad bytes */
1116 padlen
= (char*)dp
- padstart
;
1118 bzero(padstart
, padlen
);
1121 /* Finally, get the eof boolean */
1122 nfsm_chain_get_32(error
, &nmrep
, eof
);
1125 ndbhp
->ndbh_flags
|= (NDB_FULL
| NDB_EOF
);
1126 nfs_node_lock_force(dnp
);
1127 dnp
->n_eofcookie
= lastcookie
;
1129 dnp
->n_rdirplusstamp_eof
= now
.tv_sec
;
1131 nfs_node_unlock(dnp
);
1136 nfs_buf_release(bp
, 0);
1140 if ((lockerror
= nfs_node_lock(dnp
))) {
1144 nfsm_chain_cleanup(&nmrep
);
1145 nfsm_chain_null(&nmreq
);
1148 if (bp_dropped
&& bp
) {
1149 nfs_buf_release(bp
, 0);
1152 nfs_node_unlock(dnp
);
1154 nfsm_chain_cleanup(&nmreq
);
1155 nfsm_chain_cleanup(&nmrep
);
1156 return bp_dropped
? NFSERR_DIRBUFDROPPED
: error
;
1160 nfs4_lookup_rpc_async(
1165 struct nfsreq
**reqp
)
1167 int error
= 0, isdotdot
= 0, nfsvers
, numops
;
1168 struct nfsm_chain nmreq
;
1169 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
1170 struct nfsmount
*nmp
;
1171 struct nfsreq_secinfo_args si
;
1173 nmp
= NFSTONMP(dnp
);
1174 if (nfs_mount_gone(nmp
)) {
1177 nfsvers
= nmp
->nm_vers
;
1178 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
1182 if ((name
[0] == '.') && (name
[1] == '.') && (namelen
== 2)) {
1184 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
1186 NFSREQ_SECINFO_SET(&si
, dnp
, dnp
->n_fhp
, dnp
->n_fhsize
, name
, namelen
);
1189 nfsm_chain_null(&nmreq
);
1191 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1193 nfsm_chain_build_alloc_init(error
, &nmreq
, 20 * NFSX_UNSIGNED
+ namelen
);
1194 nfsm_chain_add_compound_header(error
, &nmreq
, "lookup", nmp
->nm_minor_vers
, numops
);
1196 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1197 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
1199 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1200 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
1203 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUPP
);
1205 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUP
);
1206 nfsm_chain_add_name(error
, &nmreq
, name
, namelen
, nmp
);
1209 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETFH
);
1211 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1212 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
1213 /* some ".zfs" directories can't handle being asked for some attributes */
1214 if ((dnp
->n_flag
& NISDOTZFS
) && !isdotdot
) {
1215 NFS_BITMAP_CLR(bitmap
, NFS_FATTR_NAMED_ATTR
);
1217 if ((dnp
->n_flag
& NISDOTZFSCHILD
) && isdotdot
) {
1218 NFS_BITMAP_CLR(bitmap
, NFS_FATTR_NAMED_ATTR
);
1220 if (((namelen
== 4) && (name
[0] == '.') && (name
[1] == 'z') && (name
[2] == 'f') && (name
[3] == 's'))) {
1221 NFS_BITMAP_CLR(bitmap
, NFS_FATTR_NAMED_ATTR
);
1223 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, NULL
);
1224 nfsm_chain_build_done(error
, &nmreq
);
1225 nfsm_assert(error
, (numops
== 0), EPROTO
);
1227 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
1228 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, 0, NULL
, reqp
);
1230 nfsm_chain_cleanup(&nmreq
);
1236 nfs4_lookup_rpc_async_finish(
1244 struct nfs_vattr
*nvap
)
1246 int error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
, isdotdot
= 0;
1247 uint32_t op
= NFS_OP_LOOKUP
;
1249 struct nfsmount
*nmp
;
1250 struct nfsm_chain nmrep
;
1252 nmp
= NFSTONMP(dnp
);
1256 nfsvers
= nmp
->nm_vers
;
1257 if ((name
[0] == '.') && (name
[1] == '.') && (namelen
== 2)) {
1261 nfsm_chain_null(&nmrep
);
1263 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
1265 if ((lockerror
= nfs_node_lock(dnp
))) {
1268 nfsm_chain_skip_tag(error
, &nmrep
);
1269 nfsm_chain_get_32(error
, &nmrep
, numops
);
1270 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1271 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1275 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
1277 nfsm_chain_op_check(error
, &nmrep
, (isdotdot
? NFS_OP_LOOKUPP
: NFS_OP_LOOKUP
));
1278 nfsmout_if(error
|| !fhp
|| !nvap
);
1279 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETFH
);
1280 nfsm_chain_get_32(error
, &nmrep
, fhp
->fh_len
);
1281 if (error
== 0 && fhp
->fh_len
> sizeof(fhp
->fh_data
)) {
1285 nfsm_chain_get_opaque(error
, &nmrep
, fhp
->fh_len
, fhp
->fh_data
);
1286 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1287 if ((error
== NFSERR_MOVED
) || (error
== NFSERR_INVAL
)) {
1288 /* set this up to look like a referral trigger */
1289 nfs4_default_attrs_for_referral_trigger(dnp
, name
, namelen
, nvap
, fhp
);
1293 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
, NULL
);
1297 nfs_node_unlock(dnp
);
1299 nfsm_chain_cleanup(&nmrep
);
1300 if (!error
&& (op
== NFS_OP_LOOKUP
) && (nmp
->nm_state
& NFSSTA_NEEDSECINFO
)) {
1301 /* We still need to get SECINFO to set default for mount. */
1302 /* Do so for the first LOOKUP that returns successfully. */
1305 sec
.count
= NX_MAX_SEC_FLAVORS
;
1306 error
= nfs4_secinfo_rpc(nmp
, &req
->r_secinfo
, vfs_context_ucred(ctx
), sec
.flavors
, &sec
.count
);
1307 /* [sigh] some implementations return "illegal" error for unsupported ops */
1308 if (error
== NFSERR_OP_ILLEGAL
) {
1312 /* set our default security flavor to the first in the list */
1313 lck_mtx_lock(&nmp
->nm_lock
);
1315 nmp
->nm_auth
= sec
.flavors
[0];
1317 nmp
->nm_state
&= ~NFSSTA_NEEDSECINFO
;
1318 lck_mtx_unlock(&nmp
->nm_lock
);
1332 struct nfsmount
*nmp
;
1333 int error
= 0, lockerror
, status
, nfsvers
, numops
;
1334 u_int64_t xid
, newwverf
;
1336 struct nfsm_chain nmreq
, nmrep
;
1337 struct nfsreq_secinfo_args si
;
1340 FSDBG(521, np
, offset
, count
, nmp
? nmp
->nm_state
: 0);
1341 if (nfs_mount_gone(nmp
)) {
1344 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
1347 if (!(nmp
->nm_state
& NFSSTA_HASWRITEVERF
)) {
1350 nfsvers
= nmp
->nm_vers
;
1352 if (count
> UINT32_MAX
) {
1358 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
1359 nfsm_chain_null(&nmreq
);
1360 nfsm_chain_null(&nmrep
);
1362 // PUTFH, COMMIT, GETATTR
1364 nfsm_chain_build_alloc_init(error
, &nmreq
, 19 * NFSX_UNSIGNED
);
1365 nfsm_chain_add_compound_header(error
, &nmreq
, "commit", nmp
->nm_minor_vers
, numops
);
1367 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1368 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1370 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_COMMIT
);
1371 nfsm_chain_add_64(error
, &nmreq
, offset
);
1372 nfsm_chain_add_32(error
, &nmreq
, count32
);
1374 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1375 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
1376 nfsm_chain_build_done(error
, &nmreq
);
1377 nfsm_assert(error
, (numops
== 0), EPROTO
);
1379 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
1380 current_thread(), cred
, &si
, 0, &nmrep
, &xid
, &status
);
1382 if ((lockerror
= nfs_node_lock(np
))) {
1385 nfsm_chain_skip_tag(error
, &nmrep
);
1386 nfsm_chain_get_32(error
, &nmrep
, numops
);
1387 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1388 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_COMMIT
);
1389 nfsm_chain_get_64(error
, &nmrep
, newwverf
);
1390 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1391 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
1393 nfs_node_unlock(np
);
1396 lck_mtx_lock(&nmp
->nm_lock
);
1397 if (nmp
->nm_verf
!= newwverf
) {
1398 nmp
->nm_verf
= newwverf
;
1400 if (wverf
!= newwverf
) {
1401 error
= NFSERR_STALEWRITEVERF
;
1403 lck_mtx_unlock(&nmp
->nm_lock
);
1405 nfsm_chain_cleanup(&nmreq
);
1406 nfsm_chain_cleanup(&nmrep
);
1413 struct nfs_fsattr
*nfsap
,
1417 int error
= 0, lockerror
, status
, nfsvers
, numops
;
1418 struct nfsm_chain nmreq
, nmrep
;
1419 struct nfsmount
*nmp
= NFSTONMP(np
);
1420 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
1421 struct nfs_vattr nvattr
;
1422 struct nfsreq_secinfo_args si
;
1424 if (nfs_mount_gone(nmp
)) {
1427 nfsvers
= nmp
->nm_vers
;
1428 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
1432 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
1433 NVATTR_INIT(&nvattr
);
1434 nfsm_chain_null(&nmreq
);
1435 nfsm_chain_null(&nmrep
);
1437 /* NFSv4: fetch "pathconf" info for this node */
1440 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
1441 nfsm_chain_add_compound_header(error
, &nmreq
, "pathconf", nmp
->nm_minor_vers
, numops
);
1443 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1444 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1446 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1447 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
1448 NFS_BITMAP_SET(bitmap
, NFS_FATTR_MAXLINK
);
1449 NFS_BITMAP_SET(bitmap
, NFS_FATTR_MAXNAME
);
1450 NFS_BITMAP_SET(bitmap
, NFS_FATTR_NO_TRUNC
);
1451 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CHOWN_RESTRICTED
);
1452 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CASE_INSENSITIVE
);
1453 NFS_BITMAP_SET(bitmap
, NFS_FATTR_CASE_PRESERVING
);
1454 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
1455 nfsm_chain_build_done(error
, &nmreq
);
1456 nfsm_assert(error
, (numops
== 0), EPROTO
);
1458 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
1460 nfsm_chain_skip_tag(error
, &nmrep
);
1461 nfsm_chain_get_32(error
, &nmrep
, numops
);
1462 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1463 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1465 error
= nfs4_parsefattr(&nmrep
, nfsap
, &nvattr
, NULL
, NULL
, NULL
);
1467 if ((lockerror
= nfs_node_lock(np
))) {
1471 nfs_loadattrcache(np
, &nvattr
, &xid
, 0);
1474 nfs_node_unlock(np
);
1477 NVATTR_CLEANUP(&nvattr
);
1478 nfsm_chain_cleanup(&nmreq
);
1479 nfsm_chain_cleanup(&nmrep
);
1485 struct vnop_getattr_args
/* {
1486 * struct vnodeop_desc *a_desc;
1488 * struct vnode_attr *a_vap;
1489 * vfs_context_t a_context;
1492 struct vnode_attr
*vap
= ap
->a_vap
;
1493 struct nfsmount
*nmp
;
1494 struct nfs_vattr nva
;
1495 int error
, acls
, ngaflags
;
1497 nmp
= VTONMP(ap
->a_vp
);
1498 if (nfs_mount_gone(nmp
)) {
1501 acls
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_ACL
);
1503 ngaflags
= NGA_CACHED
;
1504 if (VATTR_IS_ACTIVE(vap
, va_acl
) && acls
) {
1505 ngaflags
|= NGA_ACL
;
1507 error
= nfs_getattr(VTONFS(ap
->a_vp
), &nva
, ap
->a_context
, ngaflags
);
1512 /* copy what we have in nva to *a_vap */
1513 if (VATTR_IS_ACTIVE(vap
, va_rdev
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_RAWDEV
)) {
1514 dev_t rdev
= makedev(nva
.nva_rawdev
.specdata1
, nva
.nva_rawdev
.specdata2
);
1515 VATTR_RETURN(vap
, va_rdev
, rdev
);
1517 if (VATTR_IS_ACTIVE(vap
, va_nlink
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_NUMLINKS
)) {
1518 VATTR_RETURN(vap
, va_nlink
, nva
.nva_nlink
);
1520 if (VATTR_IS_ACTIVE(vap
, va_data_size
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_SIZE
)) {
1521 VATTR_RETURN(vap
, va_data_size
, nva
.nva_size
);
1523 // VATTR_RETURN(vap, va_data_alloc, ???);
1524 // VATTR_RETURN(vap, va_total_size, ???);
1525 if (VATTR_IS_ACTIVE(vap
, va_total_alloc
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_SPACE_USED
)) {
1526 VATTR_RETURN(vap
, va_total_alloc
, nva
.nva_bytes
);
1528 if (VATTR_IS_ACTIVE(vap
, va_uid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER
)) {
1529 VATTR_RETURN(vap
, va_uid
, nva
.nva_uid
);
1531 if (VATTR_IS_ACTIVE(vap
, va_uuuid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER
)) {
1532 VATTR_RETURN(vap
, va_uuuid
, nva
.nva_uuuid
);
1534 if (VATTR_IS_ACTIVE(vap
, va_gid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER_GROUP
)) {
1535 VATTR_RETURN(vap
, va_gid
, nva
.nva_gid
);
1537 if (VATTR_IS_ACTIVE(vap
, va_guuid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_OWNER_GROUP
)) {
1538 VATTR_RETURN(vap
, va_guuid
, nva
.nva_guuid
);
1540 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
1541 if (NMFLAG(nmp
, ACLONLY
) || !NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_MODE
)) {
1542 VATTR_RETURN(vap
, va_mode
, 0777);
1544 VATTR_RETURN(vap
, va_mode
, nva
.nva_mode
);
1547 if (VATTR_IS_ACTIVE(vap
, va_flags
) &&
1548 (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_ARCHIVE
) ||
1549 NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_HIDDEN
) ||
1550 (nva
.nva_flags
& NFS_FFLAG_TRIGGER
))) {
1552 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_ARCHIVE
) &&
1553 (nva
.nva_flags
& NFS_FFLAG_ARCHIVED
)) {
1554 flags
|= SF_ARCHIVED
;
1556 if (NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_HIDDEN
) &&
1557 (nva
.nva_flags
& NFS_FFLAG_HIDDEN
)) {
1560 VATTR_RETURN(vap
, va_flags
, flags
);
1562 if (VATTR_IS_ACTIVE(vap
, va_create_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_CREATE
)) {
1563 vap
->va_create_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_CREATE
];
1564 vap
->va_create_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_CREATE
];
1565 VATTR_SET_SUPPORTED(vap
, va_create_time
);
1567 if (VATTR_IS_ACTIVE(vap
, va_access_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_ACCESS
)) {
1568 vap
->va_access_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_ACCESS
];
1569 vap
->va_access_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_ACCESS
];
1570 VATTR_SET_SUPPORTED(vap
, va_access_time
);
1572 if (VATTR_IS_ACTIVE(vap
, va_modify_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_MODIFY
)) {
1573 vap
->va_modify_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_MODIFY
];
1574 vap
->va_modify_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_MODIFY
];
1575 VATTR_SET_SUPPORTED(vap
, va_modify_time
);
1577 if (VATTR_IS_ACTIVE(vap
, va_change_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_METADATA
)) {
1578 vap
->va_change_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_CHANGE
];
1579 vap
->va_change_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_CHANGE
];
1580 VATTR_SET_SUPPORTED(vap
, va_change_time
);
1582 if (VATTR_IS_ACTIVE(vap
, va_backup_time
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TIME_BACKUP
)) {
1583 vap
->va_backup_time
.tv_sec
= nva
.nva_timesec
[NFSTIME_BACKUP
];
1584 vap
->va_backup_time
.tv_nsec
= nva
.nva_timensec
[NFSTIME_BACKUP
];
1585 VATTR_SET_SUPPORTED(vap
, va_backup_time
);
1587 if (VATTR_IS_ACTIVE(vap
, va_fileid
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_FILEID
)) {
1588 VATTR_RETURN(vap
, va_fileid
, nva
.nva_fileid
);
1590 if (VATTR_IS_ACTIVE(vap
, va_type
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_TYPE
)) {
1591 VATTR_RETURN(vap
, va_type
, nva
.nva_type
);
1593 if (VATTR_IS_ACTIVE(vap
, va_filerev
) && NFS_BITMAP_ISSET(nva
.nva_bitmap
, NFS_FATTR_CHANGE
)) {
1594 VATTR_RETURN(vap
, va_filerev
, nva
.nva_change
);
1597 if (VATTR_IS_ACTIVE(vap
, va_acl
) && acls
) {
1598 VATTR_RETURN(vap
, va_acl
, nva
.nva_acl
);
1602 // other attrs we might support someday:
1603 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
1605 NVATTR_CLEANUP(&nva
);
1612 struct vnode_attr
*vap
,
1615 struct nfsmount
*nmp
= NFSTONMP(np
);
1616 int error
= 0, setattr_error
= 0, lockerror
= ENOENT
, status
, nfsvers
, numops
;
1617 u_int64_t xid
, nextxid
;
1618 struct nfsm_chain nmreq
, nmrep
;
1619 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
1620 uint32_t getbitmap
[NFS_ATTR_BITMAP_LEN
];
1621 uint32_t setbitmap
[NFS_ATTR_BITMAP_LEN
];
1622 nfs_stateid stateid
;
1623 struct nfsreq_secinfo_args si
;
1625 if (nfs_mount_gone(nmp
)) {
1628 nfsvers
= nmp
->nm_vers
;
1629 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
1633 if (VATTR_IS_ACTIVE(vap
, va_flags
) && (vap
->va_flags
& ~(SF_ARCHIVED
| UF_HIDDEN
))) {
1634 /* we don't support setting unsupported flags (duh!) */
1635 if (vap
->va_active
& ~VNODE_ATTR_va_flags
) {
1636 return EINVAL
; /* return EINVAL if other attributes also set */
1638 return ENOTSUP
; /* return ENOTSUP for chflags(2) */
1642 /* don't bother requesting some changes if they don't look like they are changing */
1643 if (VATTR_IS_ACTIVE(vap
, va_uid
) && (vap
->va_uid
== np
->n_vattr
.nva_uid
)) {
1644 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
1646 if (VATTR_IS_ACTIVE(vap
, va_gid
) && (vap
->va_gid
== np
->n_vattr
.nva_gid
)) {
1647 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
1649 if (VATTR_IS_ACTIVE(vap
, va_uuuid
) && kauth_guid_equal(&vap
->va_uuuid
, &np
->n_vattr
.nva_uuuid
)) {
1650 VATTR_CLEAR_ACTIVE(vap
, va_uuuid
);
1652 if (VATTR_IS_ACTIVE(vap
, va_guuid
) && kauth_guid_equal(&vap
->va_guuid
, &np
->n_vattr
.nva_guuid
)) {
1653 VATTR_CLEAR_ACTIVE(vap
, va_guuid
);
1657 /* do nothing if no attributes will be sent */
1658 nfs_vattr_set_bitmap(nmp
, bitmap
, vap
);
1659 if (!bitmap
[0] && !bitmap
[1]) {
1663 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
1664 nfsm_chain_null(&nmreq
);
1665 nfsm_chain_null(&nmrep
);
1668 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1669 * need to invalidate any cached ACL. And if we had an ACL cached,
1670 * we might as well also fetch the new value.
1672 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, getbitmap
);
1673 if (NFS_BITMAP_ISSET(bitmap
, NFS_FATTR_ACL
) ||
1674 NFS_BITMAP_ISSET(bitmap
, NFS_FATTR_MODE
)) {
1675 if (NACLVALID(np
)) {
1676 NFS_BITMAP_SET(getbitmap
, NFS_FATTR_ACL
);
1681 // PUTFH, SETATTR, GETATTR
1683 nfsm_chain_build_alloc_init(error
, &nmreq
, 40 * NFSX_UNSIGNED
);
1684 nfsm_chain_add_compound_header(error
, &nmreq
, "setattr", nmp
->nm_minor_vers
, numops
);
1686 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
1687 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
1689 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SETATTR
);
1690 if (VATTR_IS_ACTIVE(vap
, va_data_size
)) {
1691 nfs_get_stateid(np
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &stateid
);
1693 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
1695 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
1696 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
1698 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
1699 nfsm_chain_add_bitmap_supported(error
, &nmreq
, getbitmap
, nmp
, np
);
1700 nfsm_chain_build_done(error
, &nmreq
);
1701 nfsm_assert(error
, (numops
== 0), EPROTO
);
1703 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
1705 if ((lockerror
= nfs_node_lock(np
))) {
1708 nfsm_chain_skip_tag(error
, &nmrep
);
1709 nfsm_chain_get_32(error
, &nmrep
, numops
);
1710 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
1712 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SETATTR
);
1713 nfsmout_if(error
== EBADRPC
);
1714 setattr_error
= error
;
1716 bmlen
= NFS_ATTR_BITMAP_LEN
;
1717 nfsm_chain_get_bitmap(error
, &nmrep
, setbitmap
, bmlen
);
1719 if (VATTR_IS_ACTIVE(vap
, va_data_size
) && (np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
1720 microuptime(&np
->n_lastio
);
1722 nfs_vattr_set_supported(setbitmap
, vap
);
1723 error
= setattr_error
;
1725 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
1726 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
1728 NATTRINVALIDATE(np
);
1731 * We just changed the attributes and we want to make sure that we
1732 * see the latest attributes. Get the next XID. If it's not the
1733 * next XID after the SETATTR XID, then it's possible that another
1734 * RPC was in flight at the same time and it might put stale attributes
1735 * in the cache. In that case, we invalidate the attributes and set
1736 * the attribute cache XID to guarantee that newer attributes will
1740 nfs_get_xid(&nextxid
);
1741 if (nextxid
!= (xid
+ 1)) {
1742 np
->n_xid
= nextxid
;
1743 NATTRINVALIDATE(np
);
1747 nfs_node_unlock(np
);
1749 nfsm_chain_cleanup(&nmreq
);
1750 nfsm_chain_cleanup(&nmrep
);
1751 if ((setattr_error
== EINVAL
) && VATTR_IS_ACTIVE(vap
, va_acl
) && VATTR_IS_ACTIVE(vap
, va_mode
) && !NMFLAG(nmp
, ACLONLY
)) {
1753 * Some server's may not like ACL/mode combos that get sent.
1754 * If it looks like that's what the server choked on, try setting
1755 * just the ACL and not the mode (unless it looks like everything
1756 * but mode was already successfully set).
1758 if (((bitmap
[0] & setbitmap
[0]) != bitmap
[0]) ||
1759 ((bitmap
[1] & (setbitmap
[1] | NFS_FATTR_MODE
)) != bitmap
[1])) {
1760 VATTR_CLEAR_ACTIVE(vap
, va_mode
);
1767 #endif /* CONFIG_NFS4 */
1770 * Wait for any pending recovery to complete.
1773 nfs_mount_state_wait_for_recovery(struct nfsmount
*nmp
)
1775 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
1776 int error
= 0, slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
1778 lck_mtx_lock(&nmp
->nm_lock
);
1779 while (nmp
->nm_state
& NFSSTA_RECOVER
) {
1780 if ((error
= nfs_sigintr(nmp
, NULL
, current_thread(), 1))) {
1783 nfs_mount_sock_thread_wake(nmp
);
1784 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, slpflag
| (PZERO
- 1), "nfsrecoverwait", &ts
);
1787 lck_mtx_unlock(&nmp
->nm_lock
);
1793 * We're about to use/manipulate NFS mount's open/lock state.
1794 * Wait for any pending state recovery to complete, then
1795 * mark the state as being in use (which will hold off
1796 * the recovery thread until we're done).
1799 nfs_mount_state_in_use_start(struct nfsmount
*nmp
, thread_t thd
)
1801 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
1802 int error
= 0, slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
1804 if (nfs_mount_gone(nmp
)) {
1807 lck_mtx_lock(&nmp
->nm_lock
);
1808 if (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) {
1809 lck_mtx_unlock(&nmp
->nm_lock
);
1812 while (nmp
->nm_state
& NFSSTA_RECOVER
) {
1813 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 1))) {
1816 nfs_mount_sock_thread_wake(nmp
);
1817 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, slpflag
| (PZERO
- 1), "nfsrecoverwait", &ts
);
1821 nmp
->nm_stateinuse
++;
1823 lck_mtx_unlock(&nmp
->nm_lock
);
1829 * We're done using/manipulating the NFS mount's open/lock
1830 * state. If the given error indicates that recovery should
1831 * be performed, we'll initiate recovery.
1834 nfs_mount_state_in_use_end(struct nfsmount
*nmp
, int error
)
1836 int restart
= nfs_mount_state_error_should_restart(error
);
1838 if (nfs_mount_gone(nmp
)) {
1841 lck_mtx_lock(&nmp
->nm_lock
);
1842 if (restart
&& (error
!= NFSERR_OLD_STATEID
) && (error
!= NFSERR_GRACE
)) {
1843 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
1844 error
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nmp
->nm_stategenid
);
1845 nfs_need_recover(nmp
, error
);
1847 if (nmp
->nm_stateinuse
> 0) {
1848 nmp
->nm_stateinuse
--;
1850 panic("NFS mount state in use count underrun");
1852 if (!nmp
->nm_stateinuse
&& (nmp
->nm_state
& NFSSTA_RECOVER
)) {
1853 wakeup(&nmp
->nm_stateinuse
);
1855 lck_mtx_unlock(&nmp
->nm_lock
);
1856 if (error
== NFSERR_GRACE
) {
1857 tsleep(&nmp
->nm_state
, (PZERO
- 1), "nfsgrace", 2 * hz
);
1864 * Does the error mean we should restart/redo a state-related operation?
1867 nfs_mount_state_error_should_restart(int error
)
1870 case NFSERR_STALE_STATEID
:
1871 case NFSERR_STALE_CLIENTID
:
1872 case NFSERR_ADMIN_REVOKED
:
1873 case NFSERR_EXPIRED
:
1874 case NFSERR_OLD_STATEID
:
1875 case NFSERR_BAD_STATEID
:
1883 * In some cases we may want to limit how many times we restart a
1884 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1885 * Base the limit on the lease (as long as it's not too short).
1888 nfs_mount_state_max_restarts(struct nfsmount
*nmp
)
1890 return MAX(nmp
->nm_fsattr
.nfsa_lease
, 60);
1894 * Does the error mean we probably lost a delegation?
1897 nfs_mount_state_error_delegation_lost(int error
)
1900 case NFSERR_STALE_STATEID
:
1901 case NFSERR_ADMIN_REVOKED
:
1902 case NFSERR_EXPIRED
:
1903 case NFSERR_OLD_STATEID
:
1904 case NFSERR_BAD_STATEID
:
1905 case NFSERR_GRACE
: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
1913 * Mark an NFS node's open state as busy.
1916 nfs_open_state_set_busy(nfsnode_t np
, thread_t thd
)
1918 struct nfsmount
*nmp
;
1919 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
1920 int error
= 0, slpflag
;
1923 if (nfs_mount_gone(nmp
)) {
1926 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
1928 lck_mtx_lock(&np
->n_openlock
);
1929 while (np
->n_openflags
& N_OPENBUSY
) {
1930 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
1933 np
->n_openflags
|= N_OPENWANT
;
1934 msleep(&np
->n_openflags
, &np
->n_openlock
, slpflag
, "nfs_open_state_set_busy", &ts
);
1938 np
->n_openflags
|= N_OPENBUSY
;
1940 lck_mtx_unlock(&np
->n_openlock
);
1946 * Clear an NFS node's open state busy flag and wake up
1947 * anyone wanting it.
1950 nfs_open_state_clear_busy(nfsnode_t np
)
1954 lck_mtx_lock(&np
->n_openlock
);
1955 if (!(np
->n_openflags
& N_OPENBUSY
)) {
1956 panic("nfs_open_state_clear_busy");
1958 wanted
= (np
->n_openflags
& N_OPENWANT
);
1959 np
->n_openflags
&= ~(N_OPENBUSY
| N_OPENWANT
);
1960 lck_mtx_unlock(&np
->n_openlock
);
1962 wakeup(&np
->n_openflags
);
1967 * Search a mount's open owner list for the owner for this credential.
1968 * If not found and "alloc" is set, then allocate a new one.
1970 struct nfs_open_owner
*
1971 nfs_open_owner_find(struct nfsmount
*nmp
, kauth_cred_t cred
, int alloc
)
1973 uid_t uid
= kauth_cred_getuid(cred
);
1974 struct nfs_open_owner
*noop
, *newnoop
= NULL
;
1977 lck_mtx_lock(&nmp
->nm_lock
);
1978 TAILQ_FOREACH(noop
, &nmp
->nm_open_owners
, noo_link
) {
1979 if (kauth_cred_getuid(noop
->noo_cred
) == uid
) {
1984 if (!noop
&& !newnoop
&& alloc
) {
1985 lck_mtx_unlock(&nmp
->nm_lock
);
1986 MALLOC(newnoop
, struct nfs_open_owner
*, sizeof(struct nfs_open_owner
), M_TEMP
, M_WAITOK
);
1990 bzero(newnoop
, sizeof(*newnoop
));
1991 lck_mtx_init(&newnoop
->noo_lock
, nfs_open_grp
, LCK_ATTR_NULL
);
1992 newnoop
->noo_mount
= nmp
;
1993 kauth_cred_ref(cred
);
1994 newnoop
->noo_cred
= cred
;
1995 newnoop
->noo_name
= OSAddAtomic(1, &nfs_open_owner_seqnum
);
1996 TAILQ_INIT(&newnoop
->noo_opens
);
1999 if (!noop
&& newnoop
) {
2000 newnoop
->noo_flags
|= NFS_OPEN_OWNER_LINK
;
2001 os_ref_init(&newnoop
->noo_refcnt
, NULL
);
2002 TAILQ_INSERT_HEAD(&nmp
->nm_open_owners
, newnoop
, noo_link
);
2005 lck_mtx_unlock(&nmp
->nm_lock
);
2007 if (newnoop
&& (noop
!= newnoop
)) {
2008 nfs_open_owner_destroy(newnoop
);
2012 nfs_open_owner_ref(noop
);
2019 * destroy an open owner that's no longer needed
2022 nfs_open_owner_destroy(struct nfs_open_owner
*noop
)
2024 if (noop
->noo_cred
) {
2025 kauth_cred_unref(&noop
->noo_cred
);
2027 lck_mtx_destroy(&noop
->noo_lock
, nfs_open_grp
);
2032 * acquire a reference count on an open owner
2035 nfs_open_owner_ref(struct nfs_open_owner
*noop
)
2037 lck_mtx_lock(&noop
->noo_lock
);
2038 os_ref_retain_locked(&noop
->noo_refcnt
);
2039 lck_mtx_unlock(&noop
->noo_lock
);
2043 * drop a reference count on an open owner and destroy it if
2044 * it is no longer referenced and no longer on the mount's list.
2047 nfs_open_owner_rele(struct nfs_open_owner
*noop
)
2049 os_ref_count_t newcount
;
2051 lck_mtx_lock(&noop
->noo_lock
);
2052 if (os_ref_get_count(&noop
->noo_refcnt
) < 1) {
2053 panic("nfs_open_owner_rele: no refcnt");
2055 newcount
= os_ref_release_locked(&noop
->noo_refcnt
);
2056 if (!newcount
&& (noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
)) {
2057 panic("nfs_open_owner_rele: busy");
2059 /* XXX we may potentially want to clean up idle/unused open owner structures */
2060 if (newcount
|| (noop
->noo_flags
& NFS_OPEN_OWNER_LINK
)) {
2061 lck_mtx_unlock(&noop
->noo_lock
);
2064 /* owner is no longer referenced or linked to mount, so destroy it */
2065 lck_mtx_unlock(&noop
->noo_lock
);
2066 nfs_open_owner_destroy(noop
);
2070 * Mark an open owner as busy because we are about to
2071 * start an operation that uses and updates open owner state.
2074 nfs_open_owner_set_busy(struct nfs_open_owner
*noop
, thread_t thd
)
2076 struct nfsmount
*nmp
;
2077 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
2078 int error
= 0, slpflag
;
2080 nmp
= noop
->noo_mount
;
2081 if (nfs_mount_gone(nmp
)) {
2084 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
2086 lck_mtx_lock(&noop
->noo_lock
);
2087 while (noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
) {
2088 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
2091 noop
->noo_flags
|= NFS_OPEN_OWNER_WANT
;
2092 msleep(noop
, &noop
->noo_lock
, slpflag
, "nfs_open_owner_set_busy", &ts
);
2096 noop
->noo_flags
|= NFS_OPEN_OWNER_BUSY
;
2098 lck_mtx_unlock(&noop
->noo_lock
);
2104 * Clear the busy flag on an open owner and wake up anyone waiting
2108 nfs_open_owner_clear_busy(struct nfs_open_owner
*noop
)
2112 lck_mtx_lock(&noop
->noo_lock
);
2113 if (!(noop
->noo_flags
& NFS_OPEN_OWNER_BUSY
)) {
2114 panic("nfs_open_owner_clear_busy");
2116 wanted
= (noop
->noo_flags
& NFS_OPEN_OWNER_WANT
);
2117 noop
->noo_flags
&= ~(NFS_OPEN_OWNER_BUSY
| NFS_OPEN_OWNER_WANT
);
2118 lck_mtx_unlock(&noop
->noo_lock
);
2125 * Given an open/lock owner and an error code, increment the
2126 * sequence ID if appropriate.
2129 nfs_owner_seqid_increment(struct nfs_open_owner
*noop
, struct nfs_lock_owner
*nlop
, int error
)
2132 case NFSERR_STALE_CLIENTID
:
2133 case NFSERR_STALE_STATEID
:
2134 case NFSERR_OLD_STATEID
:
2135 case NFSERR_BAD_STATEID
:
2136 case NFSERR_BAD_SEQID
:
2138 case NFSERR_RESOURCE
:
2139 case NFSERR_NOFILEHANDLE
:
2140 /* do not increment the open seqid on these errors */
2152 * Search a node's open file list for any conflicts with this request.
2153 * Also find this open owner's open file structure.
2154 * If not found and "alloc" is set, then allocate one.
2159 struct nfs_open_owner
*noop
,
2160 struct nfs_open_file
**nofpp
,
2161 uint32_t accessMode
,
2166 return nfs_open_file_find_internal(np
, noop
, nofpp
, accessMode
, denyMode
, alloc
);
2170 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
2171 * if an existing one is not found. This is used in "create" scenarios to
2172 * officially add the provisional nofp to the node once the node is created.
2175 nfs_open_file_find_internal(
2177 struct nfs_open_owner
*noop
,
2178 struct nfs_open_file
**nofpp
,
2179 uint32_t accessMode
,
2183 struct nfs_open_file
*nofp
= NULL
, *nofp2
, *newnofp
= NULL
;
2189 lck_mtx_lock(&np
->n_openlock
);
2190 TAILQ_FOREACH(nofp2
, &np
->n_opens
, nof_link
) {
2191 if (nofp2
->nof_owner
== noop
) {
2197 if ((accessMode
& nofp2
->nof_deny
) || (denyMode
& nofp2
->nof_access
)) {
2198 /* This request conflicts with an existing open on this client. */
2199 lck_mtx_unlock(&np
->n_openlock
);
2205 * If this open owner doesn't have an open
2206 * file structure yet, we create one for it.
2208 if (!nofp
&& !*nofpp
&& !newnofp
&& alloc
) {
2209 lck_mtx_unlock(&np
->n_openlock
);
2211 MALLOC(newnofp
, struct nfs_open_file
*, sizeof(struct nfs_open_file
), M_TEMP
, M_WAITOK
);
2215 bzero(newnofp
, sizeof(*newnofp
));
2216 lck_mtx_init(&newnofp
->nof_lock
, nfs_open_grp
, LCK_ATTR_NULL
);
2217 newnofp
->nof_owner
= noop
;
2218 nfs_open_owner_ref(noop
);
2219 newnofp
->nof_np
= np
;
2220 lck_mtx_lock(&noop
->noo_lock
);
2221 TAILQ_INSERT_HEAD(&noop
->noo_opens
, newnofp
, nof_oolink
);
2222 lck_mtx_unlock(&noop
->noo_lock
);
2229 (*nofpp
)->nof_np
= np
;
2235 TAILQ_INSERT_HEAD(&np
->n_opens
, nofp
, nof_link
);
2239 lck_mtx_unlock(&np
->n_openlock
);
2242 if (alloc
&& newnofp
&& (nofp
!= newnofp
)) {
2243 nfs_open_file_destroy(newnofp
);
2247 return nofp
? 0 : ESRCH
;
2251 * Destroy an open file structure.
2254 nfs_open_file_destroy(struct nfs_open_file
*nofp
)
2256 lck_mtx_lock(&nofp
->nof_owner
->noo_lock
);
2257 TAILQ_REMOVE(&nofp
->nof_owner
->noo_opens
, nofp
, nof_oolink
);
2258 lck_mtx_unlock(&nofp
->nof_owner
->noo_lock
);
2259 nfs_open_owner_rele(nofp
->nof_owner
);
2260 lck_mtx_destroy(&nofp
->nof_lock
, nfs_open_grp
);
2265 * Mark an open file as busy because we are about to
2266 * start an operation that uses and updates open file state.
2269 nfs_open_file_set_busy(struct nfs_open_file
*nofp
, thread_t thd
)
2271 struct nfsmount
*nmp
;
2272 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
2273 int error
= 0, slpflag
;
2275 nmp
= nofp
->nof_owner
->noo_mount
;
2276 if (nfs_mount_gone(nmp
)) {
2279 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
2281 lck_mtx_lock(&nofp
->nof_lock
);
2282 while (nofp
->nof_flags
& NFS_OPEN_FILE_BUSY
) {
2283 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
2286 nofp
->nof_flags
|= NFS_OPEN_FILE_WANT
;
2287 msleep(nofp
, &nofp
->nof_lock
, slpflag
, "nfs_open_file_set_busy", &ts
);
2291 nofp
->nof_flags
|= NFS_OPEN_FILE_BUSY
;
2293 lck_mtx_unlock(&nofp
->nof_lock
);
2299 * Clear the busy flag on an open file and wake up anyone waiting
2303 nfs_open_file_clear_busy(struct nfs_open_file
*nofp
)
2307 lck_mtx_lock(&nofp
->nof_lock
);
2308 if (!(nofp
->nof_flags
& NFS_OPEN_FILE_BUSY
)) {
2309 panic("nfs_open_file_clear_busy");
2311 wanted
= (nofp
->nof_flags
& NFS_OPEN_FILE_WANT
);
2312 nofp
->nof_flags
&= ~(NFS_OPEN_FILE_BUSY
| NFS_OPEN_FILE_WANT
);
2313 lck_mtx_unlock(&nofp
->nof_lock
);
2320 * Add the open state for the given access/deny modes to this open file.
2323 nfs_open_file_add_open(struct nfs_open_file
*nofp
, uint32_t accessMode
, uint32_t denyMode
, int delegated
)
2325 lck_mtx_lock(&nofp
->nof_lock
);
2326 nofp
->nof_access
|= accessMode
;
2327 nofp
->nof_deny
|= denyMode
;
2330 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2331 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2333 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2335 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2338 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2339 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2341 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2343 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2344 nofp
->nof_d_rw_dw
++;
2346 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2347 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2348 nofp
->nof_d_r_drw
++;
2349 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2350 nofp
->nof_d_w_drw
++;
2351 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2352 nofp
->nof_d_rw_drw
++;
2356 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2357 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2359 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2361 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2364 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2365 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2367 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2369 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2372 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2373 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2375 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2377 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2383 nofp
->nof_opencnt
++;
2384 lck_mtx_unlock(&nofp
->nof_lock
);
2388 * Find which particular open combo will be closed and report what
2389 * the new modes will be and whether the open was delegated.
2392 nfs_open_file_remove_open_find(
2393 struct nfs_open_file
*nofp
,
2394 uint32_t accessMode
,
2396 uint32_t *newAccessMode
,
2397 uint32_t *newDenyMode
,
2401 * Calculate new modes: a mode bit gets removed when there's only
2402 * one count in all the corresponding counts
2404 *newAccessMode
= nofp
->nof_access
;
2405 *newDenyMode
= nofp
->nof_deny
;
2407 if ((accessMode
& NFS_OPEN_SHARE_ACCESS_READ
) &&
2408 (nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
) &&
2409 ((nofp
->nof_r
+ nofp
->nof_d_r
+
2410 nofp
->nof_rw
+ nofp
->nof_d_rw
+
2411 nofp
->nof_r_dw
+ nofp
->nof_d_r_dw
+
2412 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
+
2413 nofp
->nof_r_drw
+ nofp
->nof_d_r_drw
+
2414 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
) == 1)) {
2415 *newAccessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2417 if ((accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
) &&
2418 (nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_WRITE
) &&
2419 ((nofp
->nof_w
+ nofp
->nof_d_w
+
2420 nofp
->nof_rw
+ nofp
->nof_d_rw
+
2421 nofp
->nof_w_dw
+ nofp
->nof_d_w_dw
+
2422 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
+
2423 nofp
->nof_w_drw
+ nofp
->nof_d_w_drw
+
2424 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
) == 1)) {
2425 *newAccessMode
&= ~NFS_OPEN_SHARE_ACCESS_WRITE
;
2427 if ((denyMode
& NFS_OPEN_SHARE_DENY_READ
) &&
2428 (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_READ
) &&
2429 ((nofp
->nof_r_drw
+ nofp
->nof_d_r_drw
+
2430 nofp
->nof_w_drw
+ nofp
->nof_d_w_drw
+
2431 nofp
->nof_rw_drw
+ nofp
->nof_d_rw_drw
) == 1)) {
2432 *newDenyMode
&= ~NFS_OPEN_SHARE_DENY_READ
;
2434 if ((denyMode
& NFS_OPEN_SHARE_DENY_WRITE
) &&
2435 (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_WRITE
) &&
2436 ((nofp
->nof_r_drw
+ nofp
->nof_d_r_drw
+
2437 nofp
->nof_w_drw
+ nofp
->nof_d_w_drw
+
2438 nofp
->nof_rw_drw
+ nofp
->nof_d_rw_drw
+
2439 nofp
->nof_r_dw
+ nofp
->nof_d_r_dw
+
2440 nofp
->nof_w_dw
+ nofp
->nof_d_w_dw
+
2441 nofp
->nof_rw_dw
+ nofp
->nof_d_rw_dw
) == 1)) {
2442 *newDenyMode
&= ~NFS_OPEN_SHARE_DENY_WRITE
;
2445 /* Find the corresponding open access/deny mode counter. */
2446 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2447 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2448 *delegated
= (nofp
->nof_d_r
!= 0);
2449 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2450 *delegated
= (nofp
->nof_d_w
!= 0);
2451 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2452 *delegated
= (nofp
->nof_d_rw
!= 0);
2456 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2457 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2458 *delegated
= (nofp
->nof_d_r_dw
!= 0);
2459 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2460 *delegated
= (nofp
->nof_d_w_dw
!= 0);
2461 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2462 *delegated
= (nofp
->nof_d_rw_dw
!= 0);
2466 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2467 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2468 *delegated
= (nofp
->nof_d_r_drw
!= 0);
2469 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2470 *delegated
= (nofp
->nof_d_w_drw
!= 0);
2471 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2472 *delegated
= (nofp
->nof_d_rw_drw
!= 0);
2480 * Remove the open state for the given access/deny modes to this open file.
2483 nfs_open_file_remove_open(struct nfs_open_file
*nofp
, uint32_t accessMode
, uint32_t denyMode
)
2485 uint32_t newAccessMode
, newDenyMode
;
2488 lck_mtx_lock(&nofp
->nof_lock
);
2489 nfs_open_file_remove_open_find(nofp
, accessMode
, denyMode
, &newAccessMode
, &newDenyMode
, &delegated
);
2491 /* Decrement the corresponding open access/deny mode counter. */
2492 if (denyMode
== NFS_OPEN_SHARE_DENY_NONE
) {
2493 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2495 if (nofp
->nof_d_r
== 0) {
2496 NP(nofp
->nof_np
, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2501 if (nofp
->nof_r
== 0) {
2502 NP(nofp
->nof_np
, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2507 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2509 if (nofp
->nof_d_w
== 0) {
2510 NP(nofp
->nof_np
, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2515 if (nofp
->nof_w
== 0) {
2516 NP(nofp
->nof_np
, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2521 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2523 if (nofp
->nof_d_rw
== 0) {
2524 NP(nofp
->nof_np
, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2529 if (nofp
->nof_rw
== 0) {
2530 NP(nofp
->nof_np
, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2536 } else if (denyMode
== NFS_OPEN_SHARE_DENY_WRITE
) {
2537 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2539 if (nofp
->nof_d_r_dw
== 0) {
2540 NP(nofp
->nof_np
, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2545 if (nofp
->nof_r_dw
== 0) {
2546 NP(nofp
->nof_np
, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2551 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2553 if (nofp
->nof_d_w_dw
== 0) {
2554 NP(nofp
->nof_np
, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2559 if (nofp
->nof_w_dw
== 0) {
2560 NP(nofp
->nof_np
, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2565 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2567 if (nofp
->nof_d_rw_dw
== 0) {
2568 NP(nofp
->nof_np
, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2570 nofp
->nof_d_rw_dw
--;
2573 if (nofp
->nof_rw_dw
== 0) {
2574 NP(nofp
->nof_np
, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2580 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2581 if (accessMode
== NFS_OPEN_SHARE_ACCESS_READ
) {
2583 if (nofp
->nof_d_r_drw
== 0) {
2584 NP(nofp
->nof_np
, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2586 nofp
->nof_d_r_drw
--;
2589 if (nofp
->nof_r_drw
== 0) {
2590 NP(nofp
->nof_np
, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2595 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_WRITE
) {
2597 if (nofp
->nof_d_w_drw
== 0) {
2598 NP(nofp
->nof_np
, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2600 nofp
->nof_d_w_drw
--;
2603 if (nofp
->nof_w_drw
== 0) {
2604 NP(nofp
->nof_np
, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2609 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
2611 if (nofp
->nof_d_rw_drw
== 0) {
2612 NP(nofp
->nof_np
, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2614 nofp
->nof_d_rw_drw
--;
2617 if (nofp
->nof_rw_drw
== 0) {
2618 NP(nofp
->nof_np
, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
2626 /* update the modes */
2627 nofp
->nof_access
= newAccessMode
;
2628 nofp
->nof_deny
= newDenyMode
;
2629 nofp
->nof_opencnt
--;
2630 lck_mtx_unlock(&nofp
->nof_lock
);
2635 * Get the current (delegation, lock, open, default) stateid for this node.
2636 * If node has a delegation, use that stateid.
2637 * If pid has a lock, use the lockowner's stateid.
2638 * Or use the open file's stateid.
2639 * If no open file, use a default stateid of all ones.
2642 nfs_get_stateid(nfsnode_t np
, thread_t thd
, kauth_cred_t cred
, nfs_stateid
*sid
)
2644 struct nfsmount
*nmp
= NFSTONMP(np
);
2645 proc_t p
= thd
? get_bsdthreadtask_info(thd
) : current_proc(); // XXX async I/O requests don't have a thread
2646 struct nfs_open_owner
*noop
= NULL
;
2647 struct nfs_open_file
*nofp
= NULL
;
2648 struct nfs_lock_owner
*nlop
= NULL
;
2649 nfs_stateid
*s
= NULL
;
2651 if (np
->n_openflags
& N_DELEG_MASK
) {
2652 s
= &np
->n_dstateid
;
2655 nlop
= nfs_lock_owner_find(np
, p
, 0);
2657 if (nlop
&& !TAILQ_EMPTY(&nlop
->nlo_locks
)) {
2658 /* we hold locks, use lock stateid */
2659 s
= &nlop
->nlo_stateid
;
2660 } else if (((noop
= nfs_open_owner_find(nmp
, cred
, 0))) &&
2661 (nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 0) == 0) &&
2662 !(nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) &&
2664 /* we (should) have the file open, use open stateid */
2665 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
2666 nfs4_reopen(nofp
, thd
);
2668 if (!(nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
2669 s
= &nofp
->nof_stateid
;
2675 sid
->seqid
= s
->seqid
;
2676 sid
->other
[0] = s
->other
[0];
2677 sid
->other
[1] = s
->other
[1];
2678 sid
->other
[2] = s
->other
[2];
2680 /* named attributes may not have a stateid for reads, so don't complain for them */
2681 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
2682 NP(np
, "nfs_get_stateid: no stateid");
2684 sid
->seqid
= sid
->other
[0] = sid
->other
[1] = sid
->other
[2] = 0xffffffff;
2687 nfs_lock_owner_rele(nlop
);
2690 nfs_open_owner_rele(noop
);
2696 * When we have a delegation, we may be able to perform the OPEN locally.
2697 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2700 nfs4_open_delegated(
2702 struct nfs_open_file
*nofp
,
2703 uint32_t accessMode
,
2707 int error
= 0, ismember
, readtoo
= 0, authorized
= 0;
2709 struct kauth_acl_eval eval
;
2710 kauth_cred_t cred
= vfs_context_ucred(ctx
);
2712 if (!(accessMode
& NFS_OPEN_SHARE_ACCESS_READ
)) {
2714 * Try to open it for read access too,
2715 * so the buffer cache can read data.
2718 accessMode
|= NFS_OPEN_SHARE_ACCESS_READ
;
2723 if (accessMode
& NFS_OPEN_SHARE_ACCESS_READ
) {
2724 action
|= KAUTH_VNODE_READ_DATA
;
2726 if (accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
) {
2727 action
|= KAUTH_VNODE_WRITE_DATA
;
2730 /* evaluate ACE (if we have one) */
2731 if (np
->n_dace
.ace_flags
) {
2732 eval
.ae_requested
= action
;
2733 eval
.ae_acl
= &np
->n_dace
;
2735 eval
.ae_options
= 0;
2736 if (np
->n_vattr
.nva_uid
== kauth_cred_getuid(cred
)) {
2737 eval
.ae_options
|= KAUTH_AEVAL_IS_OWNER
;
2739 error
= kauth_cred_ismember_gid(cred
, np
->n_vattr
.nva_gid
, &ismember
);
2740 if (!error
&& ismember
) {
2741 eval
.ae_options
|= KAUTH_AEVAL_IN_GROUP
;
2744 eval
.ae_exp_gall
= KAUTH_VNODE_GENERIC_ALL_BITS
;
2745 eval
.ae_exp_gread
= KAUTH_VNODE_GENERIC_READ_BITS
;
2746 eval
.ae_exp_gwrite
= KAUTH_VNODE_GENERIC_WRITE_BITS
;
2747 eval
.ae_exp_gexec
= KAUTH_VNODE_GENERIC_EXECUTE_BITS
;
2749 error
= kauth_acl_evaluate(cred
, &eval
);
2751 if (!error
&& (eval
.ae_result
== KAUTH_RESULT_ALLOW
)) {
2757 /* need to ask the server via ACCESS */
2758 struct vnop_access_args naa
;
2759 naa
.a_desc
= &vnop_access_desc
;
2760 naa
.a_vp
= NFSTOV(np
);
2761 naa
.a_action
= action
;
2762 naa
.a_context
= ctx
;
2763 if (!(error
= nfs_vnop_access(&naa
))) {
2770 /* try again without the extra read access */
2771 accessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2775 return error
? error
: EACCES
;
2778 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 1);
2785 * Open a file with the given access/deny modes.
2787 * If we have a delegation, we may be able to handle the open locally.
2788 * Otherwise, we will always send the open RPC even if this open's mode is
2789 * a subset of all the existing opens. This makes sure that we will always
2790 * be able to do a downgrade to any of the open modes.
2792 * Note: local conflicts should have already been checked in nfs_open_file_find().
2797 struct nfs_open_file
*nofp
,
2798 uint32_t accessMode
,
2802 vnode_t vp
= NFSTOV(np
);
2804 struct componentname cn
;
2805 const char *vname
= NULL
;
2807 char smallname
[128];
2808 char *filename
= NULL
;
2809 int error
= 0, readtoo
= 0;
2812 * We can handle the OPEN ourselves if we have a delegation,
2813 * unless it's a read delegation and the open is asking for
2814 * either write access or deny read. We also don't bother to
2815 * use the delegation if it's being returned.
2817 if (np
->n_openflags
& N_DELEG_MASK
) {
2818 if ((error
= nfs_open_state_set_busy(np
, vfs_context_thread(ctx
)))) {
2821 if ((np
->n_openflags
& N_DELEG_MASK
) && !(np
->n_openflags
& N_DELEG_RETURN
) &&
2822 (((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_WRITE
) ||
2823 (!(accessMode
& NFS_OPEN_SHARE_ACCESS_WRITE
) && !(denyMode
& NFS_OPEN_SHARE_DENY_READ
)))) {
2824 error
= nfs4_open_delegated(np
, nofp
, accessMode
, denyMode
, ctx
);
2825 nfs_open_state_clear_busy(np
);
2828 nfs_open_state_clear_busy(np
);
2832 * [sigh] We can't trust VFS to get the parent right for named
2833 * attribute nodes. (It likes to reparent the nodes after we've
2834 * created them.) Luckily we can probably get the right parent
2835 * from the n_parent we have stashed away.
2837 if ((np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) &&
2838 (((dvp
= np
->n_parent
)) && (error
= vnode_get(dvp
)))) {
2842 dvp
= vnode_getparent(vp
);
2844 vname
= vnode_getname(vp
);
2845 if (!dvp
|| !vname
) {
2851 filename
= &smallname
[0];
2852 namelen
= snprintf(filename
, sizeof(smallname
), "%s", vname
);
2853 if (namelen
>= sizeof(smallname
)) {
2854 MALLOC(filename
, char *, namelen
+ 1, M_TEMP
, M_WAITOK
);
2859 snprintf(filename
, namelen
+ 1, "%s", vname
);
2861 bzero(&cn
, sizeof(cn
));
2862 cn
.cn_nameptr
= filename
;
2863 cn
.cn_namelen
= namelen
;
2865 if (!(accessMode
& NFS_OPEN_SHARE_ACCESS_READ
)) {
2867 * Try to open it for read access too,
2868 * so the buffer cache can read data.
2871 accessMode
|= NFS_OPEN_SHARE_ACCESS_READ
;
2874 error
= nfs4_open_rpc(nofp
, ctx
, &cn
, NULL
, dvp
, &vp
, NFS_OPEN_NOCREATE
, accessMode
, denyMode
);
2876 if (!nfs_mount_state_error_should_restart(error
) &&
2877 (error
!= EINTR
) && (error
!= ERESTART
) && readtoo
) {
2878 /* try again without the extra read access */
2879 accessMode
&= ~NFS_OPEN_SHARE_ACCESS_READ
;
2885 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0);
2887 if (filename
&& (filename
!= &smallname
[0])) {
2888 FREE(filename
, M_TEMP
);
2891 vnode_putname(vname
);
2893 if (dvp
!= NULLVP
) {
2898 #endif /* CONFIG_NFS4 */
2902 struct vnop_mmap_args
/* {
2903 * struct vnodeop_desc *a_desc;
2906 * vfs_context_t a_context;
2909 vfs_context_t ctx
= ap
->a_context
;
2910 vnode_t vp
= ap
->a_vp
;
2911 nfsnode_t np
= VTONFS(vp
);
2912 int error
= 0, accessMode
, denyMode
, delegated
;
2913 struct nfsmount
*nmp
;
2914 struct nfs_open_owner
*noop
= NULL
;
2915 struct nfs_open_file
*nofp
= NULL
;
2918 if (nfs_mount_gone(nmp
)) {
2922 if (!vnode_isreg(vp
) || !(ap
->a_fflags
& (PROT_READ
| PROT_WRITE
))) {
2925 if (np
->n_flag
& NREVOKE
) {
2930 * fflags contains some combination of: PROT_READ, PROT_WRITE
2931 * Since it's not possible to mmap() without having the file open for reading,
2932 * read access is always there (regardless if PROT_READ is not set).
2934 accessMode
= NFS_OPEN_SHARE_ACCESS_READ
;
2935 if (ap
->a_fflags
& PROT_WRITE
) {
2936 accessMode
|= NFS_OPEN_SHARE_ACCESS_WRITE
;
2938 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
2940 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
2946 error
= nfs_mount_state_in_use_start(nmp
, NULL
);
2948 nfs_open_owner_rele(noop
);
2951 if (np
->n_flag
& NREVOKE
) {
2953 nfs_mount_state_in_use_end(nmp
, 0);
2954 nfs_open_owner_rele(noop
);
2958 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 1);
2959 if (error
|| (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
))) {
2960 NP(np
, "nfs_vnop_mmap: no open file for owner, error %d, %d", error
, kauth_cred_getuid(noop
->noo_cred
));
2964 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
2965 nfs_mount_state_in_use_end(nmp
, 0);
2966 error
= nfs4_reopen(nofp
, NULL
);
2974 error
= nfs_open_file_set_busy(nofp
, NULL
);
2982 * The open reference for mmap must mirror an existing open because
2983 * we may need to reclaim it after the file is closed.
2984 * So grab another open count matching the accessMode passed in.
2985 * If we already had an mmap open, prefer read/write without deny mode.
2986 * This means we may have to drop the current mmap open first.
2988 * N.B. We should have an open for the mmap, because, mmap was
2989 * called on an open descriptor, or we've created an open for read
2990 * from reading the first page for execve. However, if we piggy
2991 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
2992 * that open may have closed.
2995 if (!(nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
)) {
2996 if (nofp
->nof_flags
& NFS_OPEN_FILE_NEEDCLOSE
) {
2997 /* We shouldn't get here. We've already open the file for execve */
2998 NP(np
, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
2999 nofp
->nof_access
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
), thread_tid(vfs_context_thread(ctx
)));
3002 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
3003 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
3005 if (accessMode
!= NFS_OPEN_SHARE_ACCESS_READ
|| (accessMode
& nofp
->nof_deny
)) {
3006 /* not asking for just read access -> fail */
3010 /* we don't have the file open, so open it for read access */
3011 if (nmp
->nm_vers
< NFS_VER4
) {
3012 /* NFS v2/v3 opens are always allowed - so just add it. */
3013 nfs_open_file_add_open(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, 0);
3018 error
= nfs4_open(np
, nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
3022 nofp
->nof_flags
|= NFS_OPEN_FILE_NEEDCLOSE
;
3029 /* determine deny mode for open */
3030 if (accessMode
== NFS_OPEN_SHARE_ACCESS_BOTH
) {
3031 if (nofp
->nof_d_rw
|| nofp
->nof_d_rw_dw
|| nofp
->nof_d_rw_drw
) {
3033 if (nofp
->nof_d_rw
) {
3034 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3035 } else if (nofp
->nof_d_rw_dw
) {
3036 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3037 } else if (nofp
->nof_d_rw_drw
) {
3038 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3040 } else if (nofp
->nof_rw
|| nofp
->nof_rw_dw
|| nofp
->nof_rw_drw
) {
3043 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3044 } else if (nofp
->nof_rw_dw
) {
3045 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3046 } else if (nofp
->nof_rw_drw
) {
3047 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3052 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
3053 if (nofp
->nof_d_r
|| nofp
->nof_d_r_dw
|| nofp
->nof_d_r_drw
) {
3055 if (nofp
->nof_d_r
) {
3056 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3057 } else if (nofp
->nof_d_r_dw
) {
3058 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3059 } else if (nofp
->nof_d_r_drw
) {
3060 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3062 } else if (nofp
->nof_r
|| nofp
->nof_r_dw
|| nofp
->nof_r_drw
) {
3065 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3066 } else if (nofp
->nof_r_dw
) {
3067 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3068 } else if (nofp
->nof_r_drw
) {
3069 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3071 } else if (nofp
->nof_d_rw
|| nofp
->nof_d_rw_dw
|| nofp
->nof_d_rw_drw
) {
3073 * This clause and the one below is to co-opt a read write access
3074 * for a read only mmaping. We probably got here in that an
3075 * existing rw open for an executable file already exists.
3078 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
3079 if (nofp
->nof_d_rw
) {
3080 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3081 } else if (nofp
->nof_d_rw_dw
) {
3082 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3083 } else if (nofp
->nof_d_rw_drw
) {
3084 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3086 } else if (nofp
->nof_rw
|| nofp
->nof_rw_dw
|| nofp
->nof_rw_drw
) {
3088 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
3090 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
3091 } else if (nofp
->nof_rw_dw
) {
3092 denyMode
= NFS_OPEN_SHARE_DENY_WRITE
;
3093 } else if (nofp
->nof_rw_drw
) {
3094 denyMode
= NFS_OPEN_SHARE_DENY_BOTH
;
3100 if (error
) { /* mmap mode without proper open mode */
3105 * If the existing mmap access is more than the new access OR the
3106 * existing access is the same and the existing deny mode is less,
3107 * then we'll stick with the existing mmap open mode.
3109 if ((nofp
->nof_mmap_access
> accessMode
) ||
3110 ((nofp
->nof_mmap_access
== accessMode
) && (nofp
->nof_mmap_deny
<= denyMode
))) {
3114 /* update mmap open mode */
3115 if (nofp
->nof_mmap_access
) {
3116 error
= nfs_close(np
, nofp
, nofp
->nof_mmap_access
, nofp
->nof_mmap_deny
, ctx
);
3118 if (!nfs_mount_state_error_should_restart(error
)) {
3119 NP(np
, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
3121 NP(np
, "nfs_vnop_mmap: update, close error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
3124 nofp
->nof_mmap_access
= nofp
->nof_mmap_deny
= 0;
3127 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, delegated
);
3128 nofp
->nof_mmap_access
= accessMode
;
3129 nofp
->nof_mmap_deny
= denyMode
;
3133 nfs_open_file_clear_busy(nofp
);
3135 if (nfs_mount_state_in_use_end(nmp
, error
)) {
3140 nfs_open_owner_rele(noop
);
3145 nfs_node_lock_force(np
);
3146 if ((np
->n_flag
& NISMAPPED
) == 0) {
3147 np
->n_flag
|= NISMAPPED
;
3150 nfs_node_unlock(np
);
3152 lck_mtx_lock(&nmp
->nm_lock
);
3153 nmp
->nm_state
&= ~NFSSTA_SQUISHY
;
3154 nmp
->nm_curdeadtimeout
= nmp
->nm_deadtimeout
;
3155 if (nmp
->nm_curdeadtimeout
<= 0) {
3156 nmp
->nm_deadto_start
= 0;
3159 lck_mtx_unlock(&nmp
->nm_lock
);
3169 struct vnop_mnomap_args
/* {
3170 * struct vnodeop_desc *a_desc;
3172 * vfs_context_t a_context;
3175 vfs_context_t ctx
= ap
->a_context
;
3176 vnode_t vp
= ap
->a_vp
;
3177 nfsnode_t np
= VTONFS(vp
);
3178 struct nfsmount
*nmp
;
3179 struct nfs_open_file
*nofp
= NULL
;
3182 int is_mapped_flag
= 0;
3185 if (nfs_mount_gone(nmp
)) {
3189 nfs_node_lock_force(np
);
3190 if (np
->n_flag
& NISMAPPED
) {
3192 np
->n_flag
&= ~NISMAPPED
;
3194 nfs_node_unlock(np
);
3195 if (is_mapped_flag
) {
3196 lck_mtx_lock(&nmp
->nm_lock
);
3197 if (nmp
->nm_mappers
) {
3200 NP(np
, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
3202 lck_mtx_unlock(&nmp
->nm_lock
);
3205 /* flush buffers/ubc before we drop the open (in case it's our last open) */
3206 nfs_flush(np
, MNT_WAIT
, vfs_context_thread(ctx
), V_IGNORE_WRITEERR
);
3207 if (UBCINFOEXISTS(vp
) && (size
= ubc_getsize(vp
))) {
3208 ubc_msync(vp
, 0, size
, NULL
, UBC_PUSHALL
| UBC_SYNC
);
3211 /* walk all open files and close all mmap opens */
3213 error
= nfs_mount_state_in_use_start(nmp
, NULL
);
3217 lck_mtx_lock(&np
->n_openlock
);
3218 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
3219 if (!nofp
->nof_mmap_access
) {
3222 lck_mtx_unlock(&np
->n_openlock
);
3224 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
3225 nfs_mount_state_in_use_end(nmp
, 0);
3226 error
= nfs4_reopen(nofp
, NULL
);
3233 error
= nfs_open_file_set_busy(nofp
, NULL
);
3236 lck_mtx_lock(&np
->n_openlock
);
3239 if (nofp
->nof_mmap_access
) {
3240 error
= nfs_close(np
, nofp
, nofp
->nof_mmap_access
, nofp
->nof_mmap_deny
, ctx
);
3241 if (!nfs_mount_state_error_should_restart(error
)) {
3242 if (error
) { /* not a state-operation-restarting error, so just clear the access */
3243 NP(np
, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
3245 nofp
->nof_mmap_access
= nofp
->nof_mmap_deny
= 0;
3248 NP(np
, "nfs_vnop_mnomap: error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
3251 nfs_open_file_clear_busy(nofp
);
3252 nfs_mount_state_in_use_end(nmp
, error
);
3255 lck_mtx_unlock(&np
->n_openlock
);
3256 nfs_mount_state_in_use_end(nmp
, error
);
3261 * Search a node's lock owner list for the owner for this process.
3262 * If not found and "alloc" is set, then allocate a new one.
3264 struct nfs_lock_owner
*
3265 nfs_lock_owner_find(nfsnode_t np
, proc_t p
, int alloc
)
3267 pid_t pid
= proc_pid(p
);
3268 struct nfs_lock_owner
*nlop
, *newnlop
= NULL
;
3271 lck_mtx_lock(&np
->n_openlock
);
3272 TAILQ_FOREACH(nlop
, &np
->n_lock_owners
, nlo_link
) {
3273 os_ref_count_t newcount
;
3275 if (nlop
->nlo_pid
!= pid
) {
3278 if (timevalcmp(&nlop
->nlo_pid_start
, &p
->p_start
, ==)) {
3281 /* stale lock owner... reuse it if we can */
3282 if (os_ref_get_count(&nlop
->nlo_refcnt
)) {
3283 TAILQ_REMOVE(&np
->n_lock_owners
, nlop
, nlo_link
);
3284 nlop
->nlo_flags
&= ~NFS_LOCK_OWNER_LINK
;
3285 newcount
= os_ref_release_locked(&nlop
->nlo_refcnt
);
3286 lck_mtx_unlock(&np
->n_openlock
);
3289 nlop
->nlo_pid_start
= p
->p_start
;
3290 nlop
->nlo_seqid
= 0;
3291 nlop
->nlo_stategenid
= 0;
3295 if (!nlop
&& !newnlop
&& alloc
) {
3296 lck_mtx_unlock(&np
->n_openlock
);
3297 MALLOC(newnlop
, struct nfs_lock_owner
*, sizeof(struct nfs_lock_owner
), M_TEMP
, M_WAITOK
);
3301 bzero(newnlop
, sizeof(*newnlop
));
3302 lck_mtx_init(&newnlop
->nlo_lock
, nfs_open_grp
, LCK_ATTR_NULL
);
3303 newnlop
->nlo_pid
= pid
;
3304 newnlop
->nlo_pid_start
= p
->p_start
;
3305 newnlop
->nlo_name
= OSAddAtomic(1, &nfs_lock_owner_seqnum
);
3306 TAILQ_INIT(&newnlop
->nlo_locks
);
3309 if (!nlop
&& newnlop
) {
3310 newnlop
->nlo_flags
|= NFS_LOCK_OWNER_LINK
;
3311 os_ref_init(&newnlop
->nlo_refcnt
, NULL
);
3312 TAILQ_INSERT_HEAD(&np
->n_lock_owners
, newnlop
, nlo_link
);
3315 lck_mtx_unlock(&np
->n_openlock
);
3317 if (newnlop
&& (nlop
!= newnlop
)) {
3318 nfs_lock_owner_destroy(newnlop
);
3322 nfs_lock_owner_ref(nlop
);
3329 * destroy a lock owner that's no longer needed
3332 nfs_lock_owner_destroy(struct nfs_lock_owner
*nlop
)
3334 if (nlop
->nlo_open_owner
) {
3335 nfs_open_owner_rele(nlop
->nlo_open_owner
);
3336 nlop
->nlo_open_owner
= NULL
;
3338 lck_mtx_destroy(&nlop
->nlo_lock
, nfs_open_grp
);
3343 * acquire a reference count on a lock owner
3346 nfs_lock_owner_ref(struct nfs_lock_owner
*nlop
)
3348 lck_mtx_lock(&nlop
->nlo_lock
);
3349 os_ref_retain_locked(&nlop
->nlo_refcnt
);
3350 lck_mtx_unlock(&nlop
->nlo_lock
);
3354 * drop a reference count on a lock owner and destroy it if
3355 * it is no longer referenced and no longer on the mount's list.
3358 nfs_lock_owner_rele(struct nfs_lock_owner
*nlop
)
3360 os_ref_count_t newcount
;
3362 lck_mtx_lock(&nlop
->nlo_lock
);
3363 if (os_ref_get_count(&nlop
->nlo_refcnt
) < 1) {
3364 panic("nfs_lock_owner_rele: no refcnt");
3366 newcount
= os_ref_release_locked(&nlop
->nlo_refcnt
);
3367 if (!newcount
&& (nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
)) {
3368 panic("nfs_lock_owner_rele: busy");
3370 /* XXX we may potentially want to clean up idle/unused lock owner structures */
3371 if (newcount
|| (nlop
->nlo_flags
& NFS_LOCK_OWNER_LINK
)) {
3372 lck_mtx_unlock(&nlop
->nlo_lock
);
3375 /* owner is no longer referenced or linked to mount, so destroy it */
3376 lck_mtx_unlock(&nlop
->nlo_lock
);
3377 nfs_lock_owner_destroy(nlop
);
3381 * Mark a lock owner as busy because we are about to
3382 * start an operation that uses and updates lock owner state.
3385 nfs_lock_owner_set_busy(struct nfs_lock_owner
*nlop
, thread_t thd
)
3387 struct nfsmount
*nmp
;
3388 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
3389 int error
= 0, slpflag
;
3391 nmp
= nlop
->nlo_open_owner
->noo_mount
;
3392 if (nfs_mount_gone(nmp
)) {
3395 slpflag
= (NMFLAG(nmp
, INTR
) && thd
) ? PCATCH
: 0;
3397 lck_mtx_lock(&nlop
->nlo_lock
);
3398 while (nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
) {
3399 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
3402 nlop
->nlo_flags
|= NFS_LOCK_OWNER_WANT
;
3403 msleep(nlop
, &nlop
->nlo_lock
, slpflag
, "nfs_lock_owner_set_busy", &ts
);
3407 nlop
->nlo_flags
|= NFS_LOCK_OWNER_BUSY
;
3409 lck_mtx_unlock(&nlop
->nlo_lock
);
3415 * Clear the busy flag on a lock owner and wake up anyone waiting
3419 nfs_lock_owner_clear_busy(struct nfs_lock_owner
*nlop
)
3423 lck_mtx_lock(&nlop
->nlo_lock
);
3424 if (!(nlop
->nlo_flags
& NFS_LOCK_OWNER_BUSY
)) {
3425 panic("nfs_lock_owner_clear_busy");
3427 wanted
= (nlop
->nlo_flags
& NFS_LOCK_OWNER_WANT
);
3428 nlop
->nlo_flags
&= ~(NFS_LOCK_OWNER_BUSY
| NFS_LOCK_OWNER_WANT
);
3429 lck_mtx_unlock(&nlop
->nlo_lock
);
3436 * Insert a held lock into a lock owner's sorted list.
3437 * (flock locks are always inserted at the head the list)
3440 nfs_lock_owner_insert_held_lock(struct nfs_lock_owner
*nlop
, struct nfs_file_lock
*newnflp
)
3442 struct nfs_file_lock
*nflp
;
3444 /* insert new lock in lock owner's held lock list */
3445 lck_mtx_lock(&nlop
->nlo_lock
);
3446 if ((newnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
) {
3447 TAILQ_INSERT_HEAD(&nlop
->nlo_locks
, newnflp
, nfl_lolink
);
3449 TAILQ_FOREACH(nflp
, &nlop
->nlo_locks
, nfl_lolink
) {
3450 if (newnflp
->nfl_start
< nflp
->nfl_start
) {
3455 TAILQ_INSERT_BEFORE(nflp
, newnflp
, nfl_lolink
);
3457 TAILQ_INSERT_TAIL(&nlop
->nlo_locks
, newnflp
, nfl_lolink
);
3460 lck_mtx_unlock(&nlop
->nlo_lock
);
3464 * Get a file lock structure for this lock owner.
3466 struct nfs_file_lock
*
3467 nfs_file_lock_alloc(struct nfs_lock_owner
*nlop
)
3469 struct nfs_file_lock
*nflp
= NULL
;
3471 lck_mtx_lock(&nlop
->nlo_lock
);
3472 if (!nlop
->nlo_alock
.nfl_owner
) {
3473 nflp
= &nlop
->nlo_alock
;
3474 nflp
->nfl_owner
= nlop
;
3476 lck_mtx_unlock(&nlop
->nlo_lock
);
3478 MALLOC(nflp
, struct nfs_file_lock
*, sizeof(struct nfs_file_lock
), M_TEMP
, M_WAITOK
);
3482 bzero(nflp
, sizeof(*nflp
));
3483 nflp
->nfl_flags
|= NFS_FILE_LOCK_ALLOC
;
3484 nflp
->nfl_owner
= nlop
;
3486 nfs_lock_owner_ref(nlop
);
3491 * destroy the given NFS file lock structure
3494 nfs_file_lock_destroy(struct nfs_file_lock
*nflp
)
3496 struct nfs_lock_owner
*nlop
= nflp
->nfl_owner
;
3498 if (nflp
->nfl_flags
& NFS_FILE_LOCK_ALLOC
) {
3499 nflp
->nfl_owner
= NULL
;
3502 lck_mtx_lock(&nlop
->nlo_lock
);
3503 bzero(nflp
, sizeof(*nflp
));
3504 lck_mtx_unlock(&nlop
->nlo_lock
);
3506 nfs_lock_owner_rele(nlop
);
3510 * Check if one file lock conflicts with another.
3511 * (nflp1 is the new lock. nflp2 is the existing lock.)
3514 nfs_file_lock_conflict(struct nfs_file_lock
*nflp1
, struct nfs_file_lock
*nflp2
, int *willsplit
)
3516 /* no conflict if lock is dead */
3517 if ((nflp1
->nfl_flags
& NFS_FILE_LOCK_DEAD
) || (nflp2
->nfl_flags
& NFS_FILE_LOCK_DEAD
)) {
3520 /* no conflict if it's ours - unless the lock style doesn't match */
3521 if ((nflp1
->nfl_owner
== nflp2
->nfl_owner
) &&
3522 ((nflp1
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == (nflp2
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
))) {
3523 if (willsplit
&& (nflp1
->nfl_type
!= nflp2
->nfl_type
) &&
3524 (nflp1
->nfl_start
> nflp2
->nfl_start
) &&
3525 (nflp1
->nfl_end
< nflp2
->nfl_end
)) {
3530 /* no conflict if ranges don't overlap */
3531 if ((nflp1
->nfl_start
> nflp2
->nfl_end
) || (nflp1
->nfl_end
< nflp2
->nfl_start
)) {
3534 /* no conflict if neither lock is exclusive */
3535 if ((nflp1
->nfl_type
!= F_WRLCK
) && (nflp2
->nfl_type
!= F_WRLCK
)) {
3544 * Send an NFSv4 LOCK RPC to the server.
3549 struct nfs_open_file
*nofp
,
3550 struct nfs_file_lock
*nflp
,
3556 struct nfs_lock_owner
*nlop
= nflp
->nfl_owner
;
3557 struct nfsmount
*nmp
;
3558 struct nfsm_chain nmreq
, nmrep
;
3561 int error
= 0, lockerror
= ENOENT
, newlocker
, numops
, status
;
3562 struct nfsreq_secinfo_args si
;
3565 if (nfs_mount_gone(nmp
)) {
3568 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
3572 newlocker
= (nlop
->nlo_stategenid
!= nmp
->nm_stategenid
);
3573 locktype
= (nflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
) ?
3574 ((nflp
->nfl_type
== F_WRLCK
) ?
3575 NFS_LOCK_TYPE_WRITEW
:
3576 NFS_LOCK_TYPE_READW
) :
3577 ((nflp
->nfl_type
== F_WRLCK
) ?
3578 NFS_LOCK_TYPE_WRITE
:
3579 NFS_LOCK_TYPE_READ
);
3581 error
= nfs_open_file_set_busy(nofp
, thd
);
3585 error
= nfs_open_owner_set_busy(nofp
->nof_owner
, thd
);
3587 nfs_open_file_clear_busy(nofp
);
3590 if (!nlop
->nlo_open_owner
) {
3591 nfs_open_owner_ref(nofp
->nof_owner
);
3592 nlop
->nlo_open_owner
= nofp
->nof_owner
;
3595 error
= nfs_lock_owner_set_busy(nlop
, thd
);
3598 nfs_open_owner_clear_busy(nofp
->nof_owner
);
3599 nfs_open_file_clear_busy(nofp
);
3604 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
3605 nfsm_chain_null(&nmreq
);
3606 nfsm_chain_null(&nmrep
);
3608 // PUTFH, GETATTR, LOCK
3610 nfsm_chain_build_alloc_init(error
, &nmreq
, 33 * NFSX_UNSIGNED
);
3611 nfsm_chain_add_compound_header(error
, &nmreq
, "lock", nmp
->nm_minor_vers
, numops
);
3613 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3614 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3616 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3617 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
3619 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCK
);
3620 nfsm_chain_add_32(error
, &nmreq
, locktype
);
3621 nfsm_chain_add_32(error
, &nmreq
, reclaim
);
3622 nfsm_chain_add_64(error
, &nmreq
, nflp
->nfl_start
);
3623 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(nflp
->nfl_start
, nflp
->nfl_end
));
3624 nfsm_chain_add_32(error
, &nmreq
, newlocker
);
3626 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_owner
->noo_seqid
);
3627 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
3628 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3629 nfsm_chain_add_lock_owner4(error
, &nmreq
, nmp
, nlop
);
3631 nfsm_chain_add_stateid(error
, &nmreq
, &nlop
->nlo_stateid
);
3632 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3634 nfsm_chain_build_done(error
, &nmreq
);
3635 nfsm_assert(error
, (numops
== 0), EPROTO
);
3638 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
| R_NOINTR
, &nmrep
, &xid
, &status
);
3640 if ((lockerror
= nfs_node_lock(np
))) {
3643 nfsm_chain_skip_tag(error
, &nmrep
);
3644 nfsm_chain_get_32(error
, &nmrep
, numops
);
3645 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3647 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3648 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
3650 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCK
);
3651 nfs_owner_seqid_increment(newlocker
? nofp
->nof_owner
: NULL
, nlop
, error
);
3652 nfsm_chain_get_stateid(error
, &nmrep
, &nlop
->nlo_stateid
);
3654 /* Update the lock owner's stategenid once it appears the server has state for it. */
3655 /* We determine this by noting the request was successful (we got a stateid). */
3656 if (newlocker
&& !error
) {
3657 nlop
->nlo_stategenid
= nmp
->nm_stategenid
;
3661 nfs_node_unlock(np
);
3663 nfs_lock_owner_clear_busy(nlop
);
3665 nfs_open_owner_clear_busy(nofp
->nof_owner
);
3666 nfs_open_file_clear_busy(nofp
);
3668 nfsm_chain_cleanup(&nmreq
);
3669 nfsm_chain_cleanup(&nmrep
);
3674 * Send an NFSv4 LOCKU RPC to the server.
3679 struct nfs_lock_owner
*nlop
,
3687 struct nfsmount
*nmp
;
3688 struct nfsm_chain nmreq
, nmrep
;
3690 int error
= 0, lockerror
= ENOENT
, numops
, status
;
3691 struct nfsreq_secinfo_args si
;
3694 if (nfs_mount_gone(nmp
)) {
3697 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
3701 error
= nfs_lock_owner_set_busy(nlop
, NULL
);
3706 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
3707 nfsm_chain_null(&nmreq
);
3708 nfsm_chain_null(&nmrep
);
3710 // PUTFH, GETATTR, LOCKU
3712 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
3713 nfsm_chain_add_compound_header(error
, &nmreq
, "unlock", nmp
->nm_minor_vers
, numops
);
3715 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3716 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3718 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3719 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
3721 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCKU
);
3722 nfsm_chain_add_32(error
, &nmreq
, (type
== F_WRLCK
) ? NFS_LOCK_TYPE_WRITE
: NFS_LOCK_TYPE_READ
);
3723 nfsm_chain_add_32(error
, &nmreq
, nlop
->nlo_seqid
);
3724 nfsm_chain_add_stateid(error
, &nmreq
, &nlop
->nlo_stateid
);
3725 nfsm_chain_add_64(error
, &nmreq
, start
);
3726 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(start
, end
));
3727 nfsm_chain_build_done(error
, &nmreq
);
3728 nfsm_assert(error
, (numops
== 0), EPROTO
);
3731 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
| R_NOINTR
, &nmrep
, &xid
, &status
);
3733 if ((lockerror
= nfs_node_lock(np
))) {
3736 nfsm_chain_skip_tag(error
, &nmrep
);
3737 nfsm_chain_get_32(error
, &nmrep
, numops
);
3738 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3740 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3741 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
3743 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCKU
);
3744 nfs_owner_seqid_increment(NULL
, nlop
, error
);
3745 nfsm_chain_get_stateid(error
, &nmrep
, &nlop
->nlo_stateid
);
3748 nfs_node_unlock(np
);
3750 nfs_lock_owner_clear_busy(nlop
);
3751 nfsm_chain_cleanup(&nmreq
);
3752 nfsm_chain_cleanup(&nmrep
);
3757 * Send an NFSv4 LOCKT RPC to the server.
3762 struct nfs_lock_owner
*nlop
,
3768 struct nfsmount
*nmp
;
3769 struct nfsm_chain nmreq
, nmrep
;
3770 uint64_t xid
, val64
= 0;
3772 int error
= 0, lockerror
, numops
, status
;
3773 struct nfsreq_secinfo_args si
;
3776 if (nfs_mount_gone(nmp
)) {
3779 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
3784 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
3785 nfsm_chain_null(&nmreq
);
3786 nfsm_chain_null(&nmrep
);
3788 // PUTFH, GETATTR, LOCKT
3790 nfsm_chain_build_alloc_init(error
, &nmreq
, 26 * NFSX_UNSIGNED
);
3791 nfsm_chain_add_compound_header(error
, &nmreq
, "locktest", nmp
->nm_minor_vers
, numops
);
3793 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
3794 nfsm_chain_add_fh(error
, &nmreq
, NFS_VER4
, np
->n_fhp
, np
->n_fhsize
);
3796 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
3797 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
3799 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOCKT
);
3800 nfsm_chain_add_32(error
, &nmreq
, (fl
->l_type
== F_WRLCK
) ? NFS_LOCK_TYPE_WRITE
: NFS_LOCK_TYPE_READ
);
3801 nfsm_chain_add_64(error
, &nmreq
, start
);
3802 nfsm_chain_add_64(error
, &nmreq
, NFS_LOCK_LENGTH(start
, end
));
3803 nfsm_chain_add_lock_owner4(error
, &nmreq
, nmp
, nlop
);
3804 nfsm_chain_build_done(error
, &nmreq
);
3805 nfsm_assert(error
, (numops
== 0), EPROTO
);
3808 error
= nfs_request(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
3810 if ((lockerror
= nfs_node_lock(np
))) {
3813 nfsm_chain_skip_tag(error
, &nmrep
);
3814 nfsm_chain_get_32(error
, &nmrep
, numops
);
3815 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
3817 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
3818 nfsm_chain_loadattr(error
, &nmrep
, np
, NFS_VER4
, &xid
);
3820 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOCKT
);
3821 if (error
== NFSERR_DENIED
) {
3823 nfsm_chain_get_64(error
, &nmrep
, fl
->l_start
);
3824 nfsm_chain_get_64(error
, &nmrep
, val64
);
3825 fl
->l_len
= (val64
== UINT64_MAX
) ? 0 : val64
;
3826 nfsm_chain_get_32(error
, &nmrep
, val
);
3827 fl
->l_type
= (val
== NFS_LOCK_TYPE_WRITE
) ? F_WRLCK
: F_RDLCK
;
3829 fl
->l_whence
= SEEK_SET
;
3830 } else if (!error
) {
3831 fl
->l_type
= F_UNLCK
;
3835 nfs_node_unlock(np
);
3837 nfsm_chain_cleanup(&nmreq
);
3838 nfsm_chain_cleanup(&nmrep
);
3841 #endif /* CONFIG_NFS4 */
3844 * Check for any conflicts with the given lock.
3846 * Checking for a lock doesn't require the file to be opened.
3847 * So we skip all the open owner, open file, lock owner work
3848 * and just check for a conflicting lock.
3851 nfs_advlock_getlock(
3853 struct nfs_lock_owner
*nlop
,
3859 struct nfsmount
*nmp
;
3860 struct nfs_file_lock
*nflp
;
3861 int error
= 0, answered
= 0;
3864 if (nfs_mount_gone(nmp
)) {
3869 if ((error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
)))) {
3873 lck_mtx_lock(&np
->n_openlock
);
3874 /* scan currently held locks for conflict */
3875 TAILQ_FOREACH(nflp
, &np
->n_locks
, nfl_link
) {
3876 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
3879 if ((start
<= nflp
->nfl_end
) && (end
>= nflp
->nfl_start
) &&
3880 ((fl
->l_type
== F_WRLCK
) || (nflp
->nfl_type
== F_WRLCK
))) {
3885 /* found a conflicting lock */
3886 fl
->l_type
= nflp
->nfl_type
;
3887 fl
->l_pid
= (nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_FLOCK
) ? -1 : nflp
->nfl_owner
->nlo_pid
;
3888 fl
->l_start
= nflp
->nfl_start
;
3889 fl
->l_len
= NFS_FLOCK_LENGTH(nflp
->nfl_start
, nflp
->nfl_end
);
3890 fl
->l_whence
= SEEK_SET
;
3892 } else if ((np
->n_openflags
& N_DELEG_WRITE
) && !(np
->n_openflags
& N_DELEG_RETURN
)) {
3894 * If we have a write delegation, we know there can't be other
3895 * locks on the server. So the answer is no conflicting lock found.
3897 fl
->l_type
= F_UNLCK
;
3900 lck_mtx_unlock(&np
->n_openlock
);
3902 nfs_mount_state_in_use_end(nmp
, 0);
3906 /* no conflict found locally, so ask the server */
3907 error
= nmp
->nm_funcs
->nf_getlock_rpc(np
, nlop
, fl
, start
, end
, ctx
);
3909 if (nfs_mount_state_in_use_end(nmp
, error
)) {
3916 * Acquire a file lock for the given range.
3918 * Add the lock (request) to the lock queue.
3919 * Scan the lock queue for any conflicting locks.
3920 * If a conflict is found, block or return an error.
3921 * Once end of queue is reached, send request to the server.
3922 * If the server grants the lock, scan the lock queue and
3923 * update any existing locks. Then (optionally) scan the
3924 * queue again to coalesce any locks adjacent to the new one.
3927 nfs_advlock_setlock(
3929 struct nfs_open_file
*nofp
,
3930 struct nfs_lock_owner
*nlop
,
3938 struct nfsmount
*nmp
;
3939 struct nfs_file_lock
*newnflp
, *nflp
, *nflp2
= NULL
, *nextnflp
, *flocknflp
= NULL
;
3940 struct nfs_file_lock
*coalnflp
;
3941 int error
= 0, error2
, willsplit
= 0, delay
, slpflag
, busy
= 0, inuse
= 0, restart
, inqueue
= 0;
3942 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
3945 if (nfs_mount_gone(nmp
)) {
3948 slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
3950 if ((type
!= F_RDLCK
) && (type
!= F_WRLCK
)) {
3954 /* allocate a new lock */
3955 newnflp
= nfs_file_lock_alloc(nlop
);
3959 newnflp
->nfl_start
= start
;
3960 newnflp
->nfl_end
= end
;
3961 newnflp
->nfl_type
= type
;
3962 if (op
== F_SETLKW
) {
3963 newnflp
->nfl_flags
|= NFS_FILE_LOCK_WAIT
;
3965 newnflp
->nfl_flags
|= style
;
3966 newnflp
->nfl_flags
|= NFS_FILE_LOCK_BLOCKED
;
3968 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) && (type
== F_WRLCK
)) {
3970 * For exclusive flock-style locks, if we block waiting for the
3971 * lock, we need to first release any currently held shared
3972 * flock-style lock. So, the first thing we do is check if we
3973 * have a shared flock-style lock.
3975 nflp
= TAILQ_FIRST(&nlop
->nlo_locks
);
3976 if (nflp
&& ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != NFS_FILE_LOCK_STYLE_FLOCK
)) {
3979 if (nflp
&& (nflp
->nfl_type
!= F_RDLCK
)) {
3987 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
3992 if (np
->n_flag
& NREVOKE
) {
3994 nfs_mount_state_in_use_end(nmp
, 0);
3999 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
4000 nfs_mount_state_in_use_end(nmp
, 0);
4002 error
= nfs4_reopen(nofp
, vfs_context_thread(ctx
));
4010 lck_mtx_lock(&np
->n_openlock
);
4012 /* insert new lock at beginning of list */
4013 TAILQ_INSERT_HEAD(&np
->n_locks
, newnflp
, nfl_link
);
4017 /* scan current list of locks (held and pending) for conflicts */
4018 for (nflp
= TAILQ_NEXT(newnflp
, nfl_link
); nflp
; nflp
= nextnflp
) {
4019 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4020 if (!nfs_file_lock_conflict(newnflp
, nflp
, &willsplit
)) {
4024 if (!(newnflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
)) {
4028 /* Block until this lock is no longer held. */
4029 if (nflp
->nfl_blockcnt
== UINT_MAX
) {
4033 nflp
->nfl_blockcnt
++;
4036 /* release any currently held shared lock before sleeping */
4037 lck_mtx_unlock(&np
->n_openlock
);
4038 nfs_mount_state_in_use_end(nmp
, 0);
4040 error
= nfs_advlock_unlock(np
, nofp
, nlop
, 0, UINT64_MAX
, NFS_FILE_LOCK_STYLE_FLOCK
, ctx
);
4043 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
4046 lck_mtx_lock(&np
->n_openlock
);
4050 lck_mtx_lock(&np
->n_openlock
);
4051 /* no need to block/sleep if the conflict is gone */
4052 if (!nfs_file_lock_conflict(newnflp
, nflp
, NULL
)) {
4056 msleep(nflp
, &np
->n_openlock
, slpflag
, "nfs_advlock_setlock_blocked", &ts
);
4058 error
= nfs_sigintr(NFSTONMP(np
), NULL
, vfs_context_thread(ctx
), 0);
4059 if (!error
&& (nmp
->nm_state
& NFSSTA_RECOVER
)) {
4060 /* looks like we have a recover pending... restart */
4062 lck_mtx_unlock(&np
->n_openlock
);
4063 nfs_mount_state_in_use_end(nmp
, 0);
4065 lck_mtx_lock(&np
->n_openlock
);
4068 if (!error
&& (np
->n_flag
& NREVOKE
)) {
4071 } while (!error
&& nfs_file_lock_conflict(newnflp
, nflp
, NULL
));
4072 nflp
->nfl_blockcnt
--;
4073 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) && !nflp
->nfl_blockcnt
) {
4074 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4075 nfs_file_lock_destroy(nflp
);
4077 if (error
|| restart
) {
4080 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
4081 /* So, start this lock-scanning loop over from where it started. */
4082 nextnflp
= TAILQ_NEXT(newnflp
, nfl_link
);
4084 lck_mtx_unlock(&np
->n_openlock
);
4094 * It looks like this operation is splitting a lock.
4095 * We allocate a new lock now so we don't have to worry
4096 * about the allocation failing after we've updated some state.
4098 nflp2
= nfs_file_lock_alloc(nlop
);
4105 /* once scan for local conflicts is clear, send request to server */
4106 if ((error
= nfs_open_state_set_busy(np
, vfs_context_thread(ctx
)))) {
4113 /* do we have a delegation? (that we're not returning?) */
4114 if ((np
->n_openflags
& N_DELEG_MASK
) && !(np
->n_openflags
& N_DELEG_RETURN
)) {
4115 if (np
->n_openflags
& N_DELEG_WRITE
) {
4116 /* with a write delegation, just take the lock delegated */
4117 newnflp
->nfl_flags
|= NFS_FILE_LOCK_DELEGATED
;
4119 /* make sure the lock owner knows its open owner */
4120 if (!nlop
->nlo_open_owner
) {
4121 nfs_open_owner_ref(nofp
->nof_owner
);
4122 nlop
->nlo_open_owner
= nofp
->nof_owner
;
4127 * If we don't have any non-delegated opens but we do have
4128 * delegated opens, then we need to first claim the delegated
4129 * opens so that the lock request on the server can be associated
4130 * with an open it knows about.
4132 if ((!nofp
->nof_rw_drw
&& !nofp
->nof_w_drw
&& !nofp
->nof_r_drw
&&
4133 !nofp
->nof_rw_dw
&& !nofp
->nof_w_dw
&& !nofp
->nof_r_dw
&&
4134 !nofp
->nof_rw
&& !nofp
->nof_w
&& !nofp
->nof_r
) &&
4135 (nofp
->nof_d_rw_drw
|| nofp
->nof_d_w_drw
|| nofp
->nof_d_r_drw
||
4136 nofp
->nof_d_rw_dw
|| nofp
->nof_d_w_dw
|| nofp
->nof_d_r_dw
||
4137 nofp
->nof_d_rw
|| nofp
->nof_d_w
|| nofp
->nof_d_r
)) {
4138 error
= nfs4_claim_delegated_state_for_open_file(nofp
, 0);
4146 if (np
->n_flag
& NREVOKE
) {
4150 error
= nmp
->nm_funcs
->nf_setlock_rpc(np
, nofp
, newnflp
, 0, 0, vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4152 if (!error
|| ((error
!= NFSERR_DENIED
) && (error
!= NFSERR_GRACE
))) {
4155 /* request was denied due to either conflict or grace period */
4156 if ((error
== NFSERR_DENIED
) && !(newnflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
)) {
4161 /* release any currently held shared lock before sleeping */
4162 nfs_open_state_clear_busy(np
);
4164 nfs_mount_state_in_use_end(nmp
, 0);
4166 error2
= nfs_advlock_unlock(np
, nofp
, nlop
, 0, UINT64_MAX
, NFS_FILE_LOCK_STYLE_FLOCK
, ctx
);
4169 error2
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
4173 error2
= nfs_open_state_set_busy(np
, vfs_context_thread(ctx
));
4182 * Wait a little bit and send the request again.
4183 * Except for retries of blocked v2/v3 request where we've already waited a bit.
4185 if ((nmp
->nm_vers
>= NFS_VER4
) || (error
== NFSERR_GRACE
)) {
4186 if (error
== NFSERR_GRACE
) {
4192 tsleep(newnflp
, slpflag
, "nfs_advlock_setlock_delay", delay
* (hz
/ 2));
4195 error
= nfs_sigintr(NFSTONMP(np
), NULL
, vfs_context_thread(ctx
), 0);
4196 if (!error
&& (nmp
->nm_state
& NFSSTA_RECOVER
)) {
4197 /* looks like we have a recover pending... restart */
4198 nfs_open_state_clear_busy(np
);
4200 nfs_mount_state_in_use_end(nmp
, 0);
4204 if (!error
&& (np
->n_flag
& NREVOKE
)) {
4210 if (nfs_mount_state_error_should_restart(error
)) {
4211 /* looks like we need to restart this operation */
4213 nfs_open_state_clear_busy(np
);
4217 nfs_mount_state_in_use_end(nmp
, error
);
4222 lck_mtx_lock(&np
->n_openlock
);
4223 newnflp
->nfl_flags
&= ~NFS_FILE_LOCK_BLOCKED
;
4225 newnflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4226 if (newnflp
->nfl_blockcnt
) {
4227 /* wake up anyone blocked on this lock */
4230 /* remove newnflp from lock list and destroy */
4232 TAILQ_REMOVE(&np
->n_locks
, newnflp
, nfl_link
);
4234 nfs_file_lock_destroy(newnflp
);
4236 lck_mtx_unlock(&np
->n_openlock
);
4238 nfs_open_state_clear_busy(np
);
4241 nfs_mount_state_in_use_end(nmp
, error
);
4244 nfs_file_lock_destroy(nflp2
);
4249 /* server granted the lock */
4252 * Scan for locks to update.
4254 * Locks completely covered are killed.
4255 * At most two locks may need to be clipped.
4256 * It's possible that a single lock may need to be split.
4258 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4259 if (nflp
== newnflp
) {
4262 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
4265 if (nflp
->nfl_owner
!= nlop
) {
4268 if ((newnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != (nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
)) {
4271 if ((newnflp
->nfl_start
> nflp
->nfl_end
) || (newnflp
->nfl_end
< nflp
->nfl_start
)) {
4274 /* here's one to update */
4275 if ((newnflp
->nfl_start
<= nflp
->nfl_start
) && (newnflp
->nfl_end
>= nflp
->nfl_end
)) {
4276 /* The entire lock is being replaced. */
4277 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4278 lck_mtx_lock(&nlop
->nlo_lock
);
4279 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4280 lck_mtx_unlock(&nlop
->nlo_lock
);
4281 /* lock will be destroyed below, if no waiters */
4282 } else if ((newnflp
->nfl_start
> nflp
->nfl_start
) && (newnflp
->nfl_end
< nflp
->nfl_end
)) {
4283 /* We're replacing a range in the middle of a lock. */
4284 /* The current lock will be split into two locks. */
4285 /* Update locks and insert new lock after current lock. */
4286 nflp2
->nfl_flags
|= (nflp
->nfl_flags
& (NFS_FILE_LOCK_STYLE_MASK
| NFS_FILE_LOCK_DELEGATED
));
4287 nflp2
->nfl_type
= nflp
->nfl_type
;
4288 nflp2
->nfl_start
= newnflp
->nfl_end
+ 1;
4289 nflp2
->nfl_end
= nflp
->nfl_end
;
4290 nflp
->nfl_end
= newnflp
->nfl_start
- 1;
4291 TAILQ_INSERT_AFTER(&np
->n_locks
, nflp
, nflp2
, nfl_link
);
4292 nfs_lock_owner_insert_held_lock(nlop
, nflp2
);
4295 } else if (newnflp
->nfl_start
> nflp
->nfl_start
) {
4296 /* We're replacing the end of a lock. */
4297 nflp
->nfl_end
= newnflp
->nfl_start
- 1;
4298 } else if (newnflp
->nfl_end
< nflp
->nfl_end
) {
4299 /* We're replacing the start of a lock. */
4300 nflp
->nfl_start
= newnflp
->nfl_end
+ 1;
4302 if (nflp
->nfl_blockcnt
) {
4303 /* wake up anyone blocked on this lock */
4305 } else if (nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) {
4306 /* remove nflp from lock list and destroy */
4307 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4308 nfs_file_lock_destroy(nflp
);
4312 nfs_lock_owner_insert_held_lock(nlop
, newnflp
);
4315 * POSIX locks should be coalesced when possible.
4317 if ((style
== NFS_FILE_LOCK_STYLE_POSIX
) && (nofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
)) {
4319 * Walk through the lock queue and check each of our held locks with
4320 * the previous and next locks in the lock owner's "held lock list".
4321 * If the two locks can be coalesced, we merge the current lock into
4322 * the other (previous or next) lock. Merging this way makes sure that
4323 * lock ranges are always merged forward in the lock queue. This is
4324 * important because anyone blocked on the lock being "merged away"
4325 * will still need to block on that range and it will simply continue
4326 * checking locks that are further down the list.
4328 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4329 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
4332 if (nflp
->nfl_owner
!= nlop
) {
4335 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != NFS_FILE_LOCK_STYLE_POSIX
) {
4338 if (((coalnflp
= TAILQ_PREV(nflp
, nfs_file_lock_queue
, nfl_lolink
))) &&
4339 ((coalnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) &&
4340 (coalnflp
->nfl_type
== nflp
->nfl_type
) &&
4341 (coalnflp
->nfl_end
== (nflp
->nfl_start
- 1))) {
4342 coalnflp
->nfl_end
= nflp
->nfl_end
;
4343 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4344 lck_mtx_lock(&nlop
->nlo_lock
);
4345 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4346 lck_mtx_unlock(&nlop
->nlo_lock
);
4347 } else if (((coalnflp
= TAILQ_NEXT(nflp
, nfl_lolink
))) &&
4348 ((coalnflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) &&
4349 (coalnflp
->nfl_type
== nflp
->nfl_type
) &&
4350 (coalnflp
->nfl_start
== (nflp
->nfl_end
+ 1))) {
4351 coalnflp
->nfl_start
= nflp
->nfl_start
;
4352 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4353 lck_mtx_lock(&nlop
->nlo_lock
);
4354 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4355 lck_mtx_unlock(&nlop
->nlo_lock
);
4357 if (!(nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
)) {
4360 if (nflp
->nfl_blockcnt
) {
4361 /* wake up anyone blocked on this lock */
4364 /* remove nflp from lock list and destroy */
4365 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4366 nfs_file_lock_destroy(nflp
);
4371 lck_mtx_unlock(&np
->n_openlock
);
4372 nfs_open_state_clear_busy(np
);
4373 nfs_mount_state_in_use_end(nmp
, error
);
4376 nfs_file_lock_destroy(nflp2
);
4382 * Release all (same style) locks within the given range.
4387 struct nfs_open_file
*nofp
4392 struct nfs_lock_owner
*nlop
,
4398 struct nfsmount
*nmp
;
4399 struct nfs_file_lock
*nflp
, *nextnflp
, *newnflp
= NULL
;
4400 int error
= 0, willsplit
= 0, send_unlock_rpcs
= 1;
4403 if (nfs_mount_gone(nmp
)) {
4408 if ((error
= nfs_mount_state_in_use_start(nmp
, NULL
))) {
4412 if (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
) {
4413 nfs_mount_state_in_use_end(nmp
, 0);
4414 error
= nfs4_reopen(nofp
, NULL
);
4421 if ((error
= nfs_open_state_set_busy(np
, NULL
))) {
4422 nfs_mount_state_in_use_end(nmp
, error
);
4426 lck_mtx_lock(&np
->n_openlock
);
4427 if ((start
> 0) && (end
< UINT64_MAX
) && !willsplit
) {
4429 * We may need to allocate a new lock if an existing lock gets split.
4430 * So, we first scan the list to check for a split, and if there's
4431 * going to be one, we'll allocate one now.
4433 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4434 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
4437 if (nflp
->nfl_owner
!= nlop
) {
4440 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != style
) {
4443 if ((start
> nflp
->nfl_end
) || (end
< nflp
->nfl_start
)) {
4446 if ((start
> nflp
->nfl_start
) && (end
< nflp
->nfl_end
)) {
4452 lck_mtx_unlock(&np
->n_openlock
);
4453 nfs_open_state_clear_busy(np
);
4454 nfs_mount_state_in_use_end(nmp
, 0);
4455 newnflp
= nfs_file_lock_alloc(nlop
);
4464 * Free all of our locks in the given range.
4466 * Note that this process requires sending requests to the server.
4467 * Because of this, we will release the n_openlock while performing
4468 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4469 * locks from changing underneath us. However, other entries in the
4470 * list may be removed. So we need to be careful walking the list.
4474 * Don't unlock ranges that are held by other-style locks.
4475 * If style is posix, don't send any unlock rpcs if flock is held.
4476 * If we unlock an flock, don't send unlock rpcs for any posix-style
4477 * ranges held - instead send unlocks for the ranges not held.
4479 if ((style
== NFS_FILE_LOCK_STYLE_POSIX
) &&
4480 ((nflp
= TAILQ_FIRST(&nlop
->nlo_locks
))) &&
4481 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
)) {
4482 send_unlock_rpcs
= 0;
4484 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) &&
4485 ((nflp
= TAILQ_FIRST(&nlop
->nlo_locks
))) &&
4486 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_FLOCK
) &&
4487 ((nflp
= TAILQ_NEXT(nflp
, nfl_lolink
))) &&
4488 ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
)) {
4490 int type
= TAILQ_FIRST(&nlop
->nlo_locks
)->nfl_type
;
4491 int delegated
= (TAILQ_FIRST(&nlop
->nlo_locks
)->nfl_flags
& NFS_FILE_LOCK_DELEGATED
);
4492 while (!delegated
&& nflp
) {
4493 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) == NFS_FILE_LOCK_STYLE_POSIX
) {
4494 /* unlock the range preceding this lock */
4495 lck_mtx_unlock(&np
->n_openlock
);
4496 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, type
, s
, nflp
->nfl_start
- 1, 0,
4497 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4498 if (nfs_mount_state_error_should_restart(error
)) {
4499 nfs_open_state_clear_busy(np
);
4500 nfs_mount_state_in_use_end(nmp
, error
);
4503 lck_mtx_lock(&np
->n_openlock
);
4507 s
= nflp
->nfl_end
+ 1;
4509 nflp
= TAILQ_NEXT(nflp
, nfl_lolink
);
4512 lck_mtx_unlock(&np
->n_openlock
);
4513 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, type
, s
, end
, 0,
4514 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4515 if (nfs_mount_state_error_should_restart(error
)) {
4516 nfs_open_state_clear_busy(np
);
4517 nfs_mount_state_in_use_end(nmp
, error
);
4520 lck_mtx_lock(&np
->n_openlock
);
4525 send_unlock_rpcs
= 0;
4528 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
4529 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_BLOCKED
| NFS_FILE_LOCK_DEAD
)) {
4532 if (nflp
->nfl_owner
!= nlop
) {
4535 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_STYLE_MASK
) != style
) {
4538 if ((start
> nflp
->nfl_end
) || (end
< nflp
->nfl_start
)) {
4541 /* here's one to unlock */
4542 if ((start
<= nflp
->nfl_start
) && (end
>= nflp
->nfl_end
)) {
4543 /* The entire lock is being unlocked. */
4544 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4545 lck_mtx_unlock(&np
->n_openlock
);
4546 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, nflp
->nfl_start
, nflp
->nfl_end
, 0,
4547 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4548 if (nfs_mount_state_error_should_restart(error
)) {
4549 nfs_open_state_clear_busy(np
);
4550 nfs_mount_state_in_use_end(nmp
, error
);
4553 lck_mtx_lock(&np
->n_openlock
);
4555 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4559 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
4560 lck_mtx_lock(&nlop
->nlo_lock
);
4561 TAILQ_REMOVE(&nlop
->nlo_locks
, nflp
, nfl_lolink
);
4562 lck_mtx_unlock(&nlop
->nlo_lock
);
4563 /* lock will be destroyed below, if no waiters */
4564 } else if ((start
> nflp
->nfl_start
) && (end
< nflp
->nfl_end
)) {
4565 /* We're unlocking a range in the middle of a lock. */
4566 /* The current lock will be split into two locks. */
4567 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4568 lck_mtx_unlock(&np
->n_openlock
);
4569 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, start
, end
, 0,
4570 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4571 if (nfs_mount_state_error_should_restart(error
)) {
4572 nfs_open_state_clear_busy(np
);
4573 nfs_mount_state_in_use_end(nmp
, error
);
4576 lck_mtx_lock(&np
->n_openlock
);
4581 /* update locks and insert new lock after current lock */
4582 newnflp
->nfl_flags
|= (nflp
->nfl_flags
& (NFS_FILE_LOCK_STYLE_MASK
| NFS_FILE_LOCK_DELEGATED
));
4583 newnflp
->nfl_type
= nflp
->nfl_type
;
4584 newnflp
->nfl_start
= end
+ 1;
4585 newnflp
->nfl_end
= nflp
->nfl_end
;
4586 nflp
->nfl_end
= start
- 1;
4587 TAILQ_INSERT_AFTER(&np
->n_locks
, nflp
, newnflp
, nfl_link
);
4588 nfs_lock_owner_insert_held_lock(nlop
, newnflp
);
4591 } else if (start
> nflp
->nfl_start
) {
4592 /* We're unlocking the end of a lock. */
4593 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4594 lck_mtx_unlock(&np
->n_openlock
);
4595 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, start
, nflp
->nfl_end
, 0,
4596 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4597 if (nfs_mount_state_error_should_restart(error
)) {
4598 nfs_open_state_clear_busy(np
);
4599 nfs_mount_state_in_use_end(nmp
, error
);
4602 lck_mtx_lock(&np
->n_openlock
);
4604 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4608 nflp
->nfl_end
= start
- 1;
4609 } else if (end
< nflp
->nfl_end
) {
4610 /* We're unlocking the start of a lock. */
4611 if (send_unlock_rpcs
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
4612 lck_mtx_unlock(&np
->n_openlock
);
4613 error
= nmp
->nm_funcs
->nf_unlock_rpc(np
, nlop
, nflp
->nfl_type
, nflp
->nfl_start
, end
, 0,
4614 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
4615 if (nfs_mount_state_error_should_restart(error
)) {
4616 nfs_open_state_clear_busy(np
);
4617 nfs_mount_state_in_use_end(nmp
, error
);
4620 lck_mtx_lock(&np
->n_openlock
);
4622 nextnflp
= TAILQ_NEXT(nflp
, nfl_link
);
4626 nflp
->nfl_start
= end
+ 1;
4628 if (nflp
->nfl_blockcnt
) {
4629 /* wake up anyone blocked on this lock */
4631 } else if (nflp
->nfl_flags
& NFS_FILE_LOCK_DEAD
) {
4632 /* remove nflp from lock list and destroy */
4633 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
4634 nfs_file_lock_destroy(nflp
);
4638 lck_mtx_unlock(&np
->n_openlock
);
4639 nfs_open_state_clear_busy(np
);
4640 nfs_mount_state_in_use_end(nmp
, 0);
4643 nfs_file_lock_destroy(newnflp
);
4649 * NFSv4 advisory file locking
4653 struct vnop_advlock_args
/* {
4654 * struct vnodeop_desc *a_desc;
4658 * struct flock *a_fl;
4660 * vfs_context_t a_context;
4663 vnode_t vp
= ap
->a_vp
;
4664 nfsnode_t np
= VTONFS(ap
->a_vp
);
4665 struct flock
*fl
= ap
->a_fl
;
4667 int flags
= ap
->a_flags
;
4668 vfs_context_t ctx
= ap
->a_context
;
4669 struct nfsmount
*nmp
;
4670 struct nfs_open_owner
*noop
= NULL
;
4671 struct nfs_open_file
*nofp
= NULL
;
4672 struct nfs_lock_owner
*nlop
= NULL
;
4674 uint64_t start
, end
;
4675 int error
= 0, modified
, style
;
4677 #define OFF_MAX QUAD_MAX
4679 nmp
= VTONMP(ap
->a_vp
);
4680 if (nfs_mount_gone(nmp
)) {
4683 lck_mtx_lock(&nmp
->nm_lock
);
4684 if ((nmp
->nm_vers
<= NFS_VER3
) && (nmp
->nm_lockmode
== NFS_LOCK_MODE_DISABLED
)) {
4685 lck_mtx_unlock(&nmp
->nm_lock
);
4688 lck_mtx_unlock(&nmp
->nm_lock
);
4690 if (np
->n_flag
& NREVOKE
) {
4693 vtype
= vnode_vtype(ap
->a_vp
);
4694 if (vtype
== VDIR
) { /* ignore lock requests on directories */
4697 if (vtype
!= VREG
) { /* anything other than regular files is invalid */
4701 /* Convert the flock structure into a start and end. */
4702 switch (fl
->l_whence
) {
4706 * Caller is responsible for adding any necessary offset
4707 * to fl->l_start when SEEK_CUR is used.
4709 lstart
= fl
->l_start
;
4712 /* need to flush, and refetch attributes to make */
4713 /* sure we have the correct end of file offset */
4714 if ((error
= nfs_node_lock(np
))) {
4717 modified
= (np
->n_flag
& NMODIFIED
);
4718 nfs_node_unlock(np
);
4719 if (modified
&& ((error
= nfs_vinvalbuf(vp
, V_SAVE
, ctx
, 1)))) {
4722 if ((error
= nfs_getattr(np
, NULL
, ctx
, NGA_UNCACHED
))) {
4725 nfs_data_lock(np
, NFS_DATA_LOCK_SHARED
);
4726 if ((np
->n_size
> OFF_MAX
) ||
4727 ((fl
->l_start
> 0) && (np
->n_size
> (u_quad_t
)(OFF_MAX
- fl
->l_start
)))) {
4730 lstart
= np
->n_size
+ fl
->l_start
;
4731 nfs_data_unlock(np
);
4743 if (fl
->l_len
== 0) {
4745 } else if (fl
->l_len
> 0) {
4746 if ((fl
->l_len
- 1) > (OFF_MAX
- lstart
)) {
4749 end
= start
- 1 + fl
->l_len
;
4750 } else { /* l_len is negative */
4751 if ((lstart
+ fl
->l_len
) < 0) {
4757 if ((nmp
->nm_vers
== NFS_VER2
) && ((start
> INT32_MAX
) || (fl
->l_len
&& (end
> INT32_MAX
)))) {
4761 style
= (flags
& F_FLOCK
) ? NFS_FILE_LOCK_STYLE_FLOCK
: NFS_FILE_LOCK_STYLE_POSIX
;
4762 if ((style
== NFS_FILE_LOCK_STYLE_FLOCK
) && ((start
!= 0) || (end
!= UINT64_MAX
))) {
4766 /* find the lock owner, alloc if not unlock */
4767 nlop
= nfs_lock_owner_find(np
, vfs_context_proc(ctx
), (op
!= F_UNLCK
));
4769 error
= (op
== F_UNLCK
) ? 0 : ENOMEM
;
4771 NP(np
, "nfs_vnop_advlock: no lock owner, error %d", error
);
4776 if (op
== F_GETLK
) {
4777 error
= nfs_advlock_getlock(np
, nlop
, fl
, start
, end
, ctx
);
4779 /* find the open owner */
4780 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 0);
4782 NP(np
, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx
)));
4786 /* find the open file */
4790 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 0);
4794 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
4795 NP(np
, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
4799 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
4800 error
= nfs4_reopen(nofp
, ((op
== F_UNLCK
) ? NULL
: vfs_context_thread(ctx
)));
4808 NP(np
, "nfs_vnop_advlock: no open file %d, %d", error
, kauth_cred_getuid(noop
->noo_cred
));
4811 if (op
== F_UNLCK
) {
4812 error
= nfs_advlock_unlock(np
, nofp
, nlop
, start
, end
, style
, ctx
);
4813 } else if ((op
== F_SETLK
) || (op
== F_SETLKW
)) {
4814 if ((op
== F_SETLK
) && (flags
& F_WAIT
)) {
4817 error
= nfs_advlock_setlock(np
, nofp
, nlop
, op
, start
, end
, style
, fl
->l_type
, ctx
);
4819 /* not getlk, unlock or lock? */
4826 nfs_lock_owner_rele(nlop
);
4829 nfs_open_owner_rele(noop
);
4835 * Check if an open owner holds any locks on a file.
4838 nfs_check_for_locks(struct nfs_open_owner
*noop
, struct nfs_open_file
*nofp
)
4840 struct nfs_lock_owner
*nlop
;
4842 TAILQ_FOREACH(nlop
, &nofp
->nof_np
->n_lock_owners
, nlo_link
) {
4843 if (nlop
->nlo_open_owner
!= noop
) {
4846 if (!TAILQ_EMPTY(&nlop
->nlo_locks
)) {
4850 return nlop
? 1 : 0;
4855 * Reopen simple (no deny, no locks) open state that was lost.
4858 nfs4_reopen(struct nfs_open_file
*nofp
, thread_t thd
)
4860 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
4861 struct nfsmount
*nmp
= NFSTONMP(nofp
->nof_np
);
4862 nfsnode_t np
= nofp
->nof_np
;
4863 vnode_t vp
= NFSTOV(np
);
4865 struct componentname cn
;
4866 const char *vname
= NULL
;
4867 const char *name
= NULL
;
4869 char smallname
[128];
4870 char *filename
= NULL
;
4871 int error
= 0, done
= 0, slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
4872 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
4874 lck_mtx_lock(&nofp
->nof_lock
);
4875 while (nofp
->nof_flags
& NFS_OPEN_FILE_REOPENING
) {
4876 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
4879 msleep(&nofp
->nof_flags
, &nofp
->nof_lock
, slpflag
| (PZERO
- 1), "nfsreopenwait", &ts
);
4882 if (error
|| !(nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
4883 lck_mtx_unlock(&nofp
->nof_lock
);
4886 nofp
->nof_flags
|= NFS_OPEN_FILE_REOPENING
;
4887 lck_mtx_unlock(&nofp
->nof_lock
);
4889 nfs_node_lock_force(np
);
4890 if ((vnode_vtype(vp
) != VDIR
) && np
->n_sillyrename
) {
4892 * The node's been sillyrenamed, so we need to use
4893 * the sillyrename directory/name to do the open.
4895 struct nfs_sillyrename
*nsp
= np
->n_sillyrename
;
4896 dvp
= NFSTOV(nsp
->nsr_dnp
);
4897 if ((error
= vnode_get(dvp
))) {
4899 nfs_node_unlock(np
);
4902 name
= nsp
->nsr_name
;
4905 * [sigh] We can't trust VFS to get the parent right for named
4906 * attribute nodes. (It likes to reparent the nodes after we've
4907 * created them.) Luckily we can probably get the right parent
4908 * from the n_parent we have stashed away.
4910 if ((np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) &&
4911 (((dvp
= np
->n_parent
)) && (error
= vnode_get(dvp
)))) {
4915 dvp
= vnode_getparent(vp
);
4917 vname
= vnode_getname(vp
);
4918 if (!dvp
|| !vname
) {
4922 nfs_node_unlock(np
);
4927 filename
= &smallname
[0];
4928 namelen
= snprintf(filename
, sizeof(smallname
), "%s", name
);
4929 if (namelen
>= sizeof(smallname
)) {
4930 MALLOC(filename
, char *, namelen
+ 1, M_TEMP
, M_WAITOK
);
4935 snprintf(filename
, namelen
+ 1, "%s", name
);
4937 nfs_node_unlock(np
);
4938 bzero(&cn
, sizeof(cn
));
4939 cn
.cn_nameptr
= filename
;
4940 cn
.cn_namelen
= namelen
;
4944 if ((error
= nfs_mount_state_in_use_start(nmp
, thd
))) {
4949 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
);
4951 if (!error
&& nofp
->nof_w
) {
4952 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_NONE
);
4954 if (!error
&& nofp
->nof_r
) {
4955 error
= nfs4_open_reopen_rpc(nofp
, thd
, noop
->noo_cred
, &cn
, dvp
, &vp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
);
4958 if (nfs_mount_state_in_use_end(nmp
, error
)) {
4959 if (error
== NFSERR_GRACE
) {
4962 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error
,
4963 (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) ? 1 : 0, name
? name
: "???");
4969 if (error
&& (error
!= EINTR
) && (error
!= ERESTART
)) {
4970 nfs_revoke_open_state_for_node(np
);
4972 lck_mtx_lock(&nofp
->nof_lock
);
4973 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPENING
;
4975 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPEN
;
4977 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error
,
4978 (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) ? 1 : 0, name
? name
: "???");
4980 lck_mtx_unlock(&nofp
->nof_lock
);
4981 if (filename
&& (filename
!= &smallname
[0])) {
4982 FREE(filename
, M_TEMP
);
4985 vnode_putname(vname
);
4987 if (dvp
!= NULLVP
) {
4994 * Send a normal OPEN RPC to open/create a file.
4998 struct nfs_open_file
*nofp
,
5000 struct componentname
*cnp
,
5001 struct vnode_attr
*vap
,
5008 return nfs4_open_rpc_internal(nofp
, ctx
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
5009 cnp
, vap
, dvp
, vpp
, create
, share_access
, share_deny
);
5013 * Send an OPEN RPC to reopen a file.
5016 nfs4_open_reopen_rpc(
5017 struct nfs_open_file
*nofp
,
5020 struct componentname
*cnp
,
5026 return nfs4_open_rpc_internal(nofp
, NULL
, thd
, cred
, cnp
, NULL
, dvp
, vpp
, NFS_OPEN_NOCREATE
, share_access
, share_deny
);
5030 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
5033 nfs4_open_confirm_rpc(
5034 struct nfsmount
*nmp
,
5038 struct nfs_open_owner
*noop
,
5042 struct nfs_vattr
*nvap
,
5045 struct nfsm_chain nmreq
, nmrep
;
5046 int error
= 0, status
, numops
;
5047 struct nfsreq_secinfo_args si
;
5049 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
5050 nfsm_chain_null(&nmreq
);
5051 nfsm_chain_null(&nmrep
);
5053 // PUTFH, OPEN_CONFIRM, GETATTR
5055 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
5056 nfsm_chain_add_compound_header(error
, &nmreq
, "open_confirm", nmp
->nm_minor_vers
, numops
);
5058 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5059 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, fhp
, fhlen
);
5061 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN_CONFIRM
);
5062 nfsm_chain_add_stateid(error
, &nmreq
, sid
);
5063 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5065 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5066 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
5067 nfsm_chain_build_done(error
, &nmreq
);
5068 nfsm_assert(error
, (numops
== 0), EPROTO
);
5070 error
= nfs_request2(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, R_NOINTR
, &nmrep
, xidp
, &status
);
5072 nfsm_chain_skip_tag(error
, &nmrep
);
5073 nfsm_chain_get_32(error
, &nmrep
, numops
);
5074 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5076 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN_CONFIRM
);
5077 nfs_owner_seqid_increment(noop
, NULL
, error
);
5078 nfsm_chain_get_stateid(error
, &nmrep
, sid
);
5079 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5081 error
= nfs4_parsefattr(&nmrep
, NULL
, nvap
, NULL
, NULL
, NULL
);
5083 nfsm_chain_cleanup(&nmreq
);
5084 nfsm_chain_cleanup(&nmrep
);
5089 * common OPEN RPC code
5091 * If create is set, ctx must be passed in.
5092 * Returns a node on success if no node passed in.
5095 nfs4_open_rpc_internal(
5096 struct nfs_open_file
*nofp
,
5100 struct componentname
*cnp
,
5101 struct vnode_attr
*vap
,
5108 struct nfsmount
*nmp
;
5109 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5110 struct nfs_vattr nvattr
;
5111 int error
= 0, open_error
= EIO
, lockerror
= ENOENT
, busyerror
= ENOENT
, status
;
5112 int nfsvers
, namedattrs
, numops
, exclusive
= 0, gotuid
, gotgid
;
5113 u_int64_t xid
, savedxid
= 0;
5114 nfsnode_t dnp
= VTONFS(dvp
);
5115 nfsnode_t np
, newnp
= NULL
;
5116 vnode_t newvp
= NULL
;
5117 struct nfsm_chain nmreq
, nmrep
;
5118 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
5119 uint32_t rflags
, delegation
, recall
;
5120 struct nfs_stateid stateid
, dstateid
, *sid
;
5122 struct nfsreq rq
, *req
= &rq
;
5123 struct nfs_dulookup dul
;
5125 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
;
5126 struct kauth_ace ace
;
5127 struct nfsreq_secinfo_args si
;
5129 if (create
&& !ctx
) {
5134 if (nfs_mount_gone(nmp
)) {
5137 nfsvers
= nmp
->nm_vers
;
5138 namedattrs
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
);
5139 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
5143 np
= *vpp
? VTONFS(*vpp
) : NULL
;
5144 if (create
&& vap
) {
5145 exclusive
= (vap
->va_vaflags
& VA_EXCLUSIVE
);
5146 nfs_avoid_needless_id_setting_on_create(dnp
, vap
, ctx
);
5147 gotuid
= VATTR_IS_ACTIVE(vap
, va_uid
);
5148 gotgid
= VATTR_IS_ACTIVE(vap
, va_gid
);
5149 if (exclusive
&& (!VATTR_IS_ACTIVE(vap
, va_access_time
) || !VATTR_IS_ACTIVE(vap
, va_modify_time
))) {
5150 vap
->va_vaflags
|= VA_UTIMES_NULL
;
5153 exclusive
= gotuid
= gotgid
= 0;
5156 sid
= &nofp
->nof_stateid
;
5158 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
5162 if ((error
= nfs_open_owner_set_busy(noop
, thd
))) {
5166 rflags
= delegation
= recall
= 0;
5169 slen
= sizeof(sbuf
);
5170 NVATTR_INIT(&nvattr
);
5171 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, cnp
->cn_nameptr
, cnp
->cn_namelen
);
5173 nfsm_chain_null(&nmreq
);
5174 nfsm_chain_null(&nmrep
);
5176 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
5178 nfsm_chain_build_alloc_init(error
, &nmreq
, 53 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
5179 nfsm_chain_add_compound_header(error
, &nmreq
, create
? "create" : "open", nmp
->nm_minor_vers
, numops
);
5181 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5182 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
5184 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
5186 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
5187 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5188 nfsm_chain_add_32(error
, &nmreq
, share_access
);
5189 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
5190 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
);
5191 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
5192 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
));
5193 nfsm_chain_add_32(error
, &nmreq
, create
);
5196 static uint32_t create_verf
; // XXX need a better verifier
5198 nfsm_chain_add_32(error
, &nmreq
, NFS_CREATE_EXCLUSIVE
);
5199 /* insert 64 bit verifier */
5200 nfsm_chain_add_32(error
, &nmreq
, create_verf
);
5201 nfsm_chain_add_32(error
, &nmreq
, create_verf
);
5203 nfsm_chain_add_32(error
, &nmreq
, NFS_CREATE_UNCHECKED
);
5204 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
5207 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_NULL
);
5208 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
5210 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5211 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
5212 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
5213 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
5215 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
5217 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5218 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
5219 nfsm_chain_build_done(error
, &nmreq
);
5220 nfsm_assert(error
, (numops
== 0), EPROTO
);
5222 error
= busyerror
= nfs_node_set_busy(dnp
, thd
);
5226 if (create
&& !namedattrs
) {
5227 nfs_dulookup_init(&dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
5230 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, R_NOINTR
, NULL
, &req
);
5232 if (create
&& !namedattrs
) {
5233 nfs_dulookup_start(&dul
, dnp
, ctx
);
5235 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
5239 if (create
&& !namedattrs
) {
5240 nfs_dulookup_finish(&dul
, dnp
, ctx
);
5243 if ((lockerror
= nfs_node_lock(dnp
))) {
5246 nfsm_chain_skip_tag(error
, &nmrep
);
5247 nfsm_chain_get_32(error
, &nmrep
, numops
);
5248 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5249 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
5251 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
5252 nfs_owner_seqid_increment(noop
, NULL
, error
);
5253 nfsm_chain_get_stateid(error
, &nmrep
, sid
);
5254 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
5255 nfsm_chain_get_32(error
, &nmrep
, rflags
);
5256 bmlen
= NFS_ATTR_BITMAP_LEN
;
5257 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
5258 nfsm_chain_get_32(error
, &nmrep
, delegation
);
5260 switch (delegation
) {
5261 case NFS_OPEN_DELEGATE_NONE
:
5263 case NFS_OPEN_DELEGATE_READ
:
5264 case NFS_OPEN_DELEGATE_WRITE
:
5265 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
5266 nfsm_chain_get_32(error
, &nmrep
, recall
);
5267 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) { // space (skip) XXX
5268 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
5270 /* if we have any trouble accepting the ACE, just invalidate it */
5271 ace_type
= ace_flags
= ace_mask
= len
= 0;
5272 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
5273 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
5274 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
5275 nfsm_chain_get_32(error
, &nmrep
, len
);
5276 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
5277 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
5278 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
5279 if (!error
&& (len
>= slen
)) {
5280 MALLOC(s
, char*, len
+ 1, M_TEMP
, M_WAITOK
);
5288 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
5290 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
5294 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
))) {
5301 if (s
&& (s
!= sbuf
)) {
5310 /* At this point if we have no error, the object was created/opened. */
5313 if (create
&& vap
&& !exclusive
) {
5314 nfs_vattr_set_supported(bitmap
, vap
);
5316 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5318 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
5320 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
5321 printf("nfs: open/create didn't return filehandle? %s\n", cnp
->cn_nameptr
);
5325 if (!create
&& np
&& !NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
5326 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
5327 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5328 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
5329 NP(np
, "nfs4_open_rpc: warning: file handle mismatch");
5332 /* directory attributes: if we don't get them, make sure to invalidate */
5333 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
5334 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5335 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
5337 NATTRINVALIDATE(dnp
);
5341 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
) {
5342 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
5345 if (rflags
& NFS_OPEN_RESULT_CONFIRM
) {
5346 nfs_node_unlock(dnp
);
5348 NVATTR_CLEANUP(&nvattr
);
5349 error
= nfs4_open_confirm_rpc(nmp
, dnp
, fh
.fh_data
, fh
.fh_len
, noop
, sid
, thd
, cred
, &nvattr
, &xid
);
5352 if ((lockerror
= nfs_node_lock(dnp
))) {
5358 nfsm_chain_cleanup(&nmreq
);
5359 nfsm_chain_cleanup(&nmrep
);
5361 if (!lockerror
&& create
) {
5362 if (!open_error
&& (dnp
->n_flag
& NNEGNCENTRIES
)) {
5363 dnp
->n_flag
&= ~NNEGNCENTRIES
;
5364 cache_purge_negatives(dvp
);
5366 dnp
->n_flag
|= NMODIFIED
;
5367 nfs_node_unlock(dnp
);
5369 nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
);
5372 nfs_node_unlock(dnp
);
5374 if (!error
&& !np
&& fh
.fh_len
) {
5375 /* create the vnode with the filehandle and attributes */
5377 error
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, NG_MAKEENTRY
, &newnp
);
5379 newvp
= NFSTOV(newnp
);
5382 NVATTR_CLEANUP(&nvattr
);
5384 nfs_node_clear_busy(dnp
);
5386 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
5390 if (!error
&& np
&& !recall
) {
5391 /* stuff the delegation state in the node */
5392 lck_mtx_lock(&np
->n_openlock
);
5393 np
->n_openflags
&= ~N_DELEG_MASK
;
5394 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5395 np
->n_dstateid
= dstateid
;
5397 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5398 lck_mtx_lock(&nmp
->nm_lock
);
5399 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5400 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5402 lck_mtx_unlock(&nmp
->nm_lock
);
5404 lck_mtx_unlock(&np
->n_openlock
);
5406 /* give the delegation back */
5408 if (NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
5409 /* update delegation state and return it */
5410 lck_mtx_lock(&np
->n_openlock
);
5411 np
->n_openflags
&= ~N_DELEG_MASK
;
5412 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5413 np
->n_dstateid
= dstateid
;
5415 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5416 lck_mtx_lock(&nmp
->nm_lock
);
5417 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5418 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5420 lck_mtx_unlock(&nmp
->nm_lock
);
5422 lck_mtx_unlock(&np
->n_openlock
);
5423 /* don't need to send a separate delegreturn for fh */
5426 /* return np's current delegation */
5427 nfs4_delegation_return(np
, 0, thd
, cred
);
5429 if (fh
.fh_len
) { /* return fh's delegation if it wasn't for np */
5430 nfs4_delegreturn_rpc(nmp
, fh
.fh_data
, fh
.fh_len
, &dstateid
, 0, thd
, cred
);
5435 if (exclusive
&& (error
== NFSERR_NOTSUPP
)) {
5440 nfs_node_unlock(newnp
);
5443 } else if (create
) {
5444 nfs_node_unlock(newnp
);
5446 error
= nfs4_setattr_rpc(newnp
, vap
, ctx
);
5447 if (error
&& (gotuid
|| gotgid
)) {
5448 /* it's possible the server didn't like our attempt to set IDs. */
5449 /* so, let's try it again without those */
5450 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
5451 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
5452 error
= nfs4_setattr_rpc(newnp
, vap
, ctx
);
5461 nfs_open_owner_clear_busy(noop
);
5467 * Send an OPEN RPC to claim a delegated open for a file
5470 nfs4_claim_delegated_open_rpc(
5471 struct nfs_open_file
*nofp
,
5476 struct nfsmount
*nmp
;
5477 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5478 struct nfs_vattr nvattr
;
5479 int error
= 0, lockerror
= ENOENT
, status
;
5480 int nfsvers
, numops
;
5482 nfsnode_t np
= nofp
->nof_np
;
5483 struct nfsm_chain nmreq
, nmrep
;
5484 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
5485 uint32_t rflags
= 0, delegation
, recall
= 0;
5487 struct nfs_stateid dstateid
;
5488 char sbuf
[64], *s
= sbuf
;
5489 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
= sizeof(sbuf
);
5490 struct kauth_ace ace
;
5492 const char *vname
= NULL
;
5493 const char *name
= NULL
;
5495 char smallname
[128];
5496 char *filename
= NULL
;
5497 struct nfsreq_secinfo_args si
;
5500 if (nfs_mount_gone(nmp
)) {
5503 nfsvers
= nmp
->nm_vers
;
5505 nfs_node_lock_force(np
);
5506 if ((vnode_vtype(NFSTOV(np
)) != VDIR
) && np
->n_sillyrename
) {
5508 * The node's been sillyrenamed, so we need to use
5509 * the sillyrename directory/name to do the open.
5511 struct nfs_sillyrename
*nsp
= np
->n_sillyrename
;
5512 dvp
= NFSTOV(nsp
->nsr_dnp
);
5513 if ((error
= vnode_get(dvp
))) {
5515 nfs_node_unlock(np
);
5518 name
= nsp
->nsr_name
;
5521 * [sigh] We can't trust VFS to get the parent right for named
5522 * attribute nodes. (It likes to reparent the nodes after we've
5523 * created them.) Luckily we can probably get the right parent
5524 * from the n_parent we have stashed away.
5526 if ((np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
) &&
5527 (((dvp
= np
->n_parent
)) && (error
= vnode_get(dvp
)))) {
5531 dvp
= vnode_getparent(NFSTOV(np
));
5533 vname
= vnode_getname(NFSTOV(np
));
5534 if (!dvp
|| !vname
) {
5538 nfs_node_unlock(np
);
5543 filename
= &smallname
[0];
5544 namelen
= snprintf(filename
, sizeof(smallname
), "%s", name
);
5545 if (namelen
>= sizeof(smallname
)) {
5546 MALLOC(filename
, char *, namelen
+ 1, M_TEMP
, M_WAITOK
);
5549 nfs_node_unlock(np
);
5552 snprintf(filename
, namelen
+ 1, "%s", name
);
5554 nfs_node_unlock(np
);
5556 if ((error
= nfs_open_owner_set_busy(noop
, NULL
))) {
5559 NVATTR_INIT(&nvattr
);
5560 delegation
= NFS_OPEN_DELEGATE_NONE
;
5561 dstateid
= np
->n_dstateid
;
5562 NFSREQ_SECINFO_SET(&si
, VTONFS(dvp
), NULL
, 0, filename
, namelen
);
5564 nfsm_chain_null(&nmreq
);
5565 nfsm_chain_null(&nmrep
);
5567 // PUTFH, OPEN, GETATTR(FH)
5569 nfsm_chain_build_alloc_init(error
, &nmreq
, 48 * NFSX_UNSIGNED
);
5570 nfsm_chain_add_compound_header(error
, &nmreq
, "open_claim_d", nmp
->nm_minor_vers
, numops
);
5572 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5573 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, VTONFS(dvp
)->n_fhp
, VTONFS(dvp
)->n_fhsize
);
5575 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
5576 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5577 nfsm_chain_add_32(error
, &nmreq
, share_access
);
5578 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
5579 // open owner: clientid + uid
5580 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
); // open_owner4.clientid
5581 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
5582 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
)); // open_owner4.owner
5584 nfsm_chain_add_32(error
, &nmreq
, NFS_OPEN_NOCREATE
);
5586 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_DELEGATE_CUR
);
5587 nfsm_chain_add_stateid(error
, &nmreq
, &np
->n_dstateid
);
5588 nfsm_chain_add_name(error
, &nmreq
, filename
, namelen
, nmp
);
5590 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5591 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
5592 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
5593 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
5594 nfsm_chain_build_done(error
, &nmreq
);
5595 nfsm_assert(error
, (numops
== 0), EPROTO
);
5598 error
= nfs_request2(np
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, current_thread(),
5599 noop
->noo_cred
, &si
, flags
| R_NOINTR
, &nmrep
, &xid
, &status
);
5601 if ((lockerror
= nfs_node_lock(np
))) {
5604 nfsm_chain_skip_tag(error
, &nmrep
);
5605 nfsm_chain_get_32(error
, &nmrep
, numops
);
5606 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5608 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
5609 nfs_owner_seqid_increment(noop
, NULL
, error
);
5610 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
5611 nfsm_chain_check_change_info(error
, &nmrep
, np
);
5612 nfsm_chain_get_32(error
, &nmrep
, rflags
);
5613 bmlen
= NFS_ATTR_BITMAP_LEN
;
5614 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
5615 nfsm_chain_get_32(error
, &nmrep
, delegation
);
5617 switch (delegation
) {
5618 case NFS_OPEN_DELEGATE_NONE
:
5619 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
5620 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
5622 case NFS_OPEN_DELEGATE_READ
:
5623 case NFS_OPEN_DELEGATE_WRITE
:
5624 if ((((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_READ
) &&
5625 (delegation
== NFS_OPEN_DELEGATE_WRITE
)) ||
5626 (((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_WRITE
) &&
5627 (delegation
== NFS_OPEN_DELEGATE_READ
))) {
5628 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
5629 ((np
->n_openflags
& N_DELEG_MASK
) == N_DELEG_WRITE
) ? "W" : "R",
5630 (delegation
== NFS_OPEN_DELEGATE_WRITE
) ? "W" : "R", filename
? filename
: "???");
5632 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
5633 nfsm_chain_get_32(error
, &nmrep
, recall
);
5634 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) { // space (skip) XXX
5635 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
5637 /* if we have any trouble accepting the ACE, just invalidate it */
5638 ace_type
= ace_flags
= ace_mask
= len
= 0;
5639 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
5640 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
5641 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
5642 nfsm_chain_get_32(error
, &nmrep
, len
);
5643 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
5644 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
5645 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
5646 if (!error
&& (len
>= slen
)) {
5647 MALLOC(s
, char*, len
+ 1, M_TEMP
, M_WAITOK
);
5655 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
5657 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
5661 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
))) {
5668 if (s
&& (s
!= sbuf
)) {
5672 /* stuff the latest delegation state in the node */
5673 lck_mtx_lock(&np
->n_openlock
);
5674 np
->n_openflags
&= ~N_DELEG_MASK
;
5675 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5676 np
->n_dstateid
= dstateid
;
5678 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5679 lck_mtx_lock(&nmp
->nm_lock
);
5680 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5681 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5683 lck_mtx_unlock(&nmp
->nm_lock
);
5685 lck_mtx_unlock(&np
->n_openlock
);
5694 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5695 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
5697 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
5698 printf("nfs: open reclaim didn't return filehandle? %s\n", filename
? filename
: "???");
5702 if (!NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
5703 // XXX what if fh doesn't match the vnode we think we're re-opening?
5704 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5705 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
5706 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename
? filename
: "???");
5709 error
= nfs_loadattrcache(np
, &nvattr
, &xid
, 1);
5711 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
) {
5712 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
5715 NVATTR_CLEANUP(&nvattr
);
5716 nfsm_chain_cleanup(&nmreq
);
5717 nfsm_chain_cleanup(&nmrep
);
5719 nfs_node_unlock(np
);
5721 nfs_open_owner_clear_busy(noop
);
5722 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
5725 * We're making a delegated claim.
5726 * Don't return the delegation here in case we have more to claim.
5727 * Just make sure it's queued up to be returned.
5729 nfs4_delegation_return_enqueue(np
);
5734 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5735 if (filename
&& (filename
!= &smallname
[0])) {
5736 FREE(filename
, M_TEMP
);
5739 vnode_putname(vname
);
5741 if (dvp
!= NULLVP
) {
5748 * Send an OPEN RPC to reclaim an open file.
5751 nfs4_open_reclaim_rpc(
5752 struct nfs_open_file
*nofp
,
5756 struct nfsmount
*nmp
;
5757 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5758 struct nfs_vattr nvattr
;
5759 int error
= 0, lockerror
= ENOENT
, status
;
5760 int nfsvers
, numops
;
5762 nfsnode_t np
= nofp
->nof_np
;
5763 struct nfsm_chain nmreq
, nmrep
;
5764 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
5765 uint32_t rflags
= 0, delegation
, recall
= 0;
5767 struct nfs_stateid dstateid
;
5768 char sbuf
[64], *s
= sbuf
;
5769 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
= sizeof(sbuf
);
5770 struct kauth_ace ace
;
5771 struct nfsreq_secinfo_args si
;
5774 if (nfs_mount_gone(nmp
)) {
5777 nfsvers
= nmp
->nm_vers
;
5779 if ((error
= nfs_open_owner_set_busy(noop
, NULL
))) {
5783 NVATTR_INIT(&nvattr
);
5784 delegation
= NFS_OPEN_DELEGATE_NONE
;
5785 dstateid
= np
->n_dstateid
;
5786 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
5788 nfsm_chain_null(&nmreq
);
5789 nfsm_chain_null(&nmrep
);
5791 // PUTFH, OPEN, GETATTR(FH)
5793 nfsm_chain_build_alloc_init(error
, &nmreq
, 48 * NFSX_UNSIGNED
);
5794 nfsm_chain_add_compound_header(error
, &nmreq
, "open_reclaim", nmp
->nm_minor_vers
, numops
);
5796 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5797 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
5799 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
5800 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
5801 nfsm_chain_add_32(error
, &nmreq
, share_access
);
5802 nfsm_chain_add_32(error
, &nmreq
, share_deny
);
5803 // open owner: clientid + uid
5804 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
); // open_owner4.clientid
5805 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
5806 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
)); // open_owner4.owner
5808 nfsm_chain_add_32(error
, &nmreq
, NFS_OPEN_NOCREATE
);
5810 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_PREVIOUS
);
5811 delegation
= (np
->n_openflags
& N_DELEG_READ
) ? NFS_OPEN_DELEGATE_READ
:
5812 (np
->n_openflags
& N_DELEG_WRITE
) ? NFS_OPEN_DELEGATE_WRITE
:
5813 NFS_OPEN_DELEGATE_NONE
;
5814 nfsm_chain_add_32(error
, &nmreq
, delegation
);
5815 delegation
= NFS_OPEN_DELEGATE_NONE
;
5817 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
5818 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
5819 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
5820 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, np
);
5821 nfsm_chain_build_done(error
, &nmreq
);
5822 nfsm_assert(error
, (numops
== 0), EPROTO
);
5825 error
= nfs_request2(np
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, current_thread(),
5826 noop
->noo_cred
, &si
, R_RECOVER
| R_NOINTR
, &nmrep
, &xid
, &status
);
5828 if ((lockerror
= nfs_node_lock(np
))) {
5831 nfsm_chain_skip_tag(error
, &nmrep
);
5832 nfsm_chain_get_32(error
, &nmrep
, numops
);
5833 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
5835 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
5836 nfs_owner_seqid_increment(noop
, NULL
, error
);
5837 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
5838 nfsm_chain_check_change_info(error
, &nmrep
, np
);
5839 nfsm_chain_get_32(error
, &nmrep
, rflags
);
5840 bmlen
= NFS_ATTR_BITMAP_LEN
;
5841 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
5842 nfsm_chain_get_32(error
, &nmrep
, delegation
);
5844 switch (delegation
) {
5845 case NFS_OPEN_DELEGATE_NONE
:
5846 if (np
->n_openflags
& N_DELEG_MASK
) {
5848 * Hey! We were supposed to get our delegation back even
5849 * if it was getting immediately recalled. Bad server!
5851 * Just try to return the existing delegation.
5853 // NP(np, "nfs: open reclaim didn't return delegation?");
5854 delegation
= (np
->n_openflags
& N_DELEG_WRITE
) ? NFS_OPEN_DELEGATE_WRITE
: NFS_OPEN_DELEGATE_READ
;
5858 case NFS_OPEN_DELEGATE_READ
:
5859 case NFS_OPEN_DELEGATE_WRITE
:
5860 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
5861 nfsm_chain_get_32(error
, &nmrep
, recall
);
5862 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) { // space (skip) XXX
5863 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
5865 /* if we have any trouble accepting the ACE, just invalidate it */
5866 ace_type
= ace_flags
= ace_mask
= len
= 0;
5867 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
5868 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
5869 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
5870 nfsm_chain_get_32(error
, &nmrep
, len
);
5871 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
5872 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
5873 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
5874 if (!error
&& (len
>= slen
)) {
5875 MALLOC(s
, char*, len
+ 1, M_TEMP
, M_WAITOK
);
5883 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
5885 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
5889 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
))) {
5896 if (s
&& (s
!= sbuf
)) {
5900 /* stuff the delegation state in the node */
5901 lck_mtx_lock(&np
->n_openlock
);
5902 np
->n_openflags
&= ~N_DELEG_MASK
;
5903 np
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
5904 np
->n_dstateid
= dstateid
;
5906 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5907 lck_mtx_lock(&nmp
->nm_lock
);
5908 if (np
->n_dlink
.tqe_next
== NFSNOLIST
) {
5909 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, np
, n_dlink
);
5911 lck_mtx_unlock(&nmp
->nm_lock
);
5913 lck_mtx_unlock(&np
->n_openlock
);
5922 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
5923 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
5925 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
5926 NP(np
, "nfs: open reclaim didn't return filehandle?");
5930 if (!NFS_CMPFH(np
, fh
.fh_data
, fh
.fh_len
)) {
5931 // XXX what if fh doesn't match the vnode we think we're re-opening?
5932 // That should be pretty hard in this case, given that we are doing
5933 // the open reclaim using the file handle (and not a dir/name pair).
5934 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5935 if (!(np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
5936 NP(np
, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
5939 error
= nfs_loadattrcache(np
, &nvattr
, &xid
, 1);
5941 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
) {
5942 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
5946 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
5947 NVATTR_CLEANUP(&nvattr
);
5948 nfsm_chain_cleanup(&nmreq
);
5949 nfsm_chain_cleanup(&nmrep
);
5951 nfs_node_unlock(np
);
5953 nfs_open_owner_clear_busy(noop
);
5954 if ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
)) {
5956 nfs4_delegation_return_enqueue(np
);
5963 nfs4_open_downgrade_rpc(
5965 struct nfs_open_file
*nofp
,
5968 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
5969 struct nfsmount
*nmp
;
5970 int error
, lockerror
= ENOENT
, status
, nfsvers
, numops
;
5971 struct nfsm_chain nmreq
, nmrep
;
5973 struct nfsreq_secinfo_args si
;
5976 if (nfs_mount_gone(nmp
)) {
5979 nfsvers
= nmp
->nm_vers
;
5981 if ((error
= nfs_open_owner_set_busy(noop
, NULL
))) {
5985 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
5986 nfsm_chain_null(&nmreq
);
5987 nfsm_chain_null(&nmrep
);
5989 // PUTFH, OPEN_DOWNGRADE, GETATTR
5991 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
5992 nfsm_chain_add_compound_header(error
, &nmreq
, "open_downgrd", nmp
->nm_minor_vers
, numops
);
5994 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
5995 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
5997 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN_DOWNGRADE
);
5998 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
5999 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
6000 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_access
);
6001 nfsm_chain_add_32(error
, &nmreq
, nofp
->nof_deny
);
6003 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6004 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
6005 nfsm_chain_build_done(error
, &nmreq
);
6006 nfsm_assert(error
, (numops
== 0), EPROTO
);
6008 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
6009 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
6010 &si
, R_NOINTR
, &nmrep
, &xid
, &status
);
6012 if ((lockerror
= nfs_node_lock(np
))) {
6015 nfsm_chain_skip_tag(error
, &nmrep
);
6016 nfsm_chain_get_32(error
, &nmrep
, numops
);
6017 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6019 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN_DOWNGRADE
);
6020 nfs_owner_seqid_increment(noop
, NULL
, error
);
6021 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
6022 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6023 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
6026 nfs_node_unlock(np
);
6028 nfs_open_owner_clear_busy(noop
);
6029 nfsm_chain_cleanup(&nmreq
);
6030 nfsm_chain_cleanup(&nmrep
);
6037 struct nfs_open_file
*nofp
,
6042 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
6043 struct nfsmount
*nmp
;
6044 int error
, lockerror
= ENOENT
, status
, nfsvers
, numops
;
6045 struct nfsm_chain nmreq
, nmrep
;
6047 struct nfsreq_secinfo_args si
;
6050 if (nfs_mount_gone(nmp
)) {
6053 nfsvers
= nmp
->nm_vers
;
6055 if ((error
= nfs_open_owner_set_busy(noop
, NULL
))) {
6059 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
6060 nfsm_chain_null(&nmreq
);
6061 nfsm_chain_null(&nmrep
);
6063 // PUTFH, CLOSE, GETATTR
6065 nfsm_chain_build_alloc_init(error
, &nmreq
, 23 * NFSX_UNSIGNED
);
6066 nfsm_chain_add_compound_header(error
, &nmreq
, "close", nmp
->nm_minor_vers
, numops
);
6068 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6069 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
6071 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_CLOSE
);
6072 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
6073 nfsm_chain_add_stateid(error
, &nmreq
, &nofp
->nof_stateid
);
6075 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
6076 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
6077 nfsm_chain_build_done(error
, &nmreq
);
6078 nfsm_assert(error
, (numops
== 0), EPROTO
);
6080 error
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
| R_NOINTR
, &nmrep
, &xid
, &status
);
6082 if ((lockerror
= nfs_node_lock(np
))) {
6085 nfsm_chain_skip_tag(error
, &nmrep
);
6086 nfsm_chain_get_32(error
, &nmrep
, numops
);
6087 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6089 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_CLOSE
);
6090 nfs_owner_seqid_increment(noop
, NULL
, error
);
6091 nfsm_chain_get_stateid(error
, &nmrep
, &nofp
->nof_stateid
);
6092 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
6093 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
6096 nfs_node_unlock(np
);
6098 nfs_open_owner_clear_busy(noop
);
6099 nfsm_chain_cleanup(&nmreq
);
6100 nfsm_chain_cleanup(&nmrep
);
6106 * Claim the delegated open combinations this open file holds.
6109 nfs4_claim_delegated_state_for_open_file(struct nfs_open_file
*nofp
, int flags
)
6111 struct nfs_open_owner
*noop
= nofp
->nof_owner
;
6112 struct nfs_lock_owner
*nlop
;
6113 struct nfs_file_lock
*nflp
, *nextnflp
;
6114 struct nfsmount
*nmp
;
6115 int error
= 0, reopen
= 0;
6117 if (nofp
->nof_d_rw_drw
) {
6118 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_BOTH
, flags
);
6120 lck_mtx_lock(&nofp
->nof_lock
);
6121 nofp
->nof_rw_drw
+= nofp
->nof_d_rw_drw
;
6122 nofp
->nof_d_rw_drw
= 0;
6123 lck_mtx_unlock(&nofp
->nof_lock
);
6126 if (!error
&& nofp
->nof_d_w_drw
) {
6127 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_BOTH
, flags
);
6129 lck_mtx_lock(&nofp
->nof_lock
);
6130 nofp
->nof_w_drw
+= nofp
->nof_d_w_drw
;
6131 nofp
->nof_d_w_drw
= 0;
6132 lck_mtx_unlock(&nofp
->nof_lock
);
6135 if (!error
&& nofp
->nof_d_r_drw
) {
6136 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_BOTH
, flags
);
6138 lck_mtx_lock(&nofp
->nof_lock
);
6139 nofp
->nof_r_drw
+= nofp
->nof_d_r_drw
;
6140 nofp
->nof_d_r_drw
= 0;
6141 lck_mtx_unlock(&nofp
->nof_lock
);
6144 if (!error
&& nofp
->nof_d_rw_dw
) {
6145 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_WRITE
, flags
);
6147 lck_mtx_lock(&nofp
->nof_lock
);
6148 nofp
->nof_rw_dw
+= nofp
->nof_d_rw_dw
;
6149 nofp
->nof_d_rw_dw
= 0;
6150 lck_mtx_unlock(&nofp
->nof_lock
);
6153 if (!error
&& nofp
->nof_d_w_dw
) {
6154 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_WRITE
, flags
);
6156 lck_mtx_lock(&nofp
->nof_lock
);
6157 nofp
->nof_w_dw
+= nofp
->nof_d_w_dw
;
6158 nofp
->nof_d_w_dw
= 0;
6159 lck_mtx_unlock(&nofp
->nof_lock
);
6162 if (!error
&& nofp
->nof_d_r_dw
) {
6163 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_WRITE
, flags
);
6165 lck_mtx_lock(&nofp
->nof_lock
);
6166 nofp
->nof_r_dw
+= nofp
->nof_d_r_dw
;
6167 nofp
->nof_d_r_dw
= 0;
6168 lck_mtx_unlock(&nofp
->nof_lock
);
6171 /* non-deny-mode opens may be reopened if no locks are held */
6172 if (!error
&& nofp
->nof_d_rw
) {
6173 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
, flags
);
6174 /* for some errors, we should just try reopening the file */
6175 if (nfs_mount_state_error_delegation_lost(error
)) {
6178 if (!error
|| reopen
) {
6179 lck_mtx_lock(&nofp
->nof_lock
);
6180 nofp
->nof_rw
+= nofp
->nof_d_rw
;
6182 lck_mtx_unlock(&nofp
->nof_lock
);
6185 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
6186 if ((!error
|| reopen
) && nofp
->nof_d_w
) {
6188 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_NONE
, flags
);
6189 /* for some errors, we should just try reopening the file */
6190 if (nfs_mount_state_error_delegation_lost(error
)) {
6194 if (!error
|| reopen
) {
6195 lck_mtx_lock(&nofp
->nof_lock
);
6196 nofp
->nof_w
+= nofp
->nof_d_w
;
6198 lck_mtx_unlock(&nofp
->nof_lock
);
6201 if ((!error
|| reopen
) && nofp
->nof_d_r
) {
6203 error
= nfs4_claim_delegated_open_rpc(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, flags
);
6204 /* for some errors, we should just try reopening the file */
6205 if (nfs_mount_state_error_delegation_lost(error
)) {
6209 if (!error
|| reopen
) {
6210 lck_mtx_lock(&nofp
->nof_lock
);
6211 nofp
->nof_r
+= nofp
->nof_d_r
;
6213 lck_mtx_unlock(&nofp
->nof_lock
);
6219 * Any problems with the delegation probably indicates that we
6220 * should review/return all of our current delegation state.
6222 if ((nmp
= NFSTONMP(nofp
->nof_np
))) {
6223 nfs4_delegation_return_enqueue(nofp
->nof_np
);
6224 lck_mtx_lock(&nmp
->nm_lock
);
6225 nfs_need_recover(nmp
, NFSERR_EXPIRED
);
6226 lck_mtx_unlock(&nmp
->nm_lock
);
6228 if (reopen
&& (nfs_check_for_locks(noop
, nofp
) == 0)) {
6229 /* just reopen the file on next access */
6230 NP(nofp
->nof_np
, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
6231 reopen
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6232 lck_mtx_lock(&nofp
->nof_lock
);
6233 nofp
->nof_flags
|= NFS_OPEN_FILE_REOPEN
;
6234 lck_mtx_unlock(&nofp
->nof_lock
);
6238 NP(nofp
->nof_np
, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
6239 reopen
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6243 if (!error
&& ((nmp
= NFSTONMP(nofp
->nof_np
)))) {
6244 /* claim delegated locks */
6245 TAILQ_FOREACH(nlop
, &nofp
->nof_np
->n_lock_owners
, nlo_link
) {
6246 if (nlop
->nlo_open_owner
!= noop
) {
6249 TAILQ_FOREACH_SAFE(nflp
, &nlop
->nlo_locks
, nfl_lolink
, nextnflp
) {
6250 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
6251 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_DEAD
| NFS_FILE_LOCK_BLOCKED
)) {
6254 /* skip non-delegated locks */
6255 if (!(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
6258 error
= nmp
->nm_funcs
->nf_setlock_rpc(nofp
->nof_np
, nofp
, nflp
, 0, flags
, current_thread(), noop
->noo_cred
);
6260 NP(nofp
->nof_np
, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
6261 nflp
->nfl_start
, nflp
->nfl_end
, error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6265 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
6266 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6275 if (!error
) { /* all state claimed successfully! */
6279 /* restart if it looks like a problem more than just losing the delegation */
6280 if (!nfs_mount_state_error_delegation_lost(error
) &&
6281 ((error
== ETIMEDOUT
) || nfs_mount_state_error_should_restart(error
))) {
6282 NP(nofp
->nof_np
, "nfs delegated lock claim error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6283 if ((error
== ETIMEDOUT
) && ((nmp
= NFSTONMP(nofp
->nof_np
)))) {
6284 nfs_need_reconnect(nmp
);
6289 /* delegated state lost (once held but now not claimable) */
6290 NP(nofp
->nof_np
, "nfs delegated state claim error %d, state lost, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
));
6293 * Any problems with the delegation probably indicates that we
6294 * should review/return all of our current delegation state.
6296 if ((nmp
= NFSTONMP(nofp
->nof_np
))) {
6297 nfs4_delegation_return_enqueue(nofp
->nof_np
);
6298 lck_mtx_lock(&nmp
->nm_lock
);
6299 nfs_need_recover(nmp
, NFSERR_EXPIRED
);
6300 lck_mtx_unlock(&nmp
->nm_lock
);
6303 /* revoke all open file state */
6304 nfs_revoke_open_state_for_node(nofp
->nof_np
);
6308 #endif /* CONFIG_NFS4*/
6311 * Release all open state for the given node.
6314 nfs_release_open_state_for_node(nfsnode_t np
, int force
)
6316 struct nfsmount
*nmp
= NFSTONMP(np
);
6317 struct nfs_open_file
*nofp
;
6318 struct nfs_file_lock
*nflp
, *nextnflp
;
6320 /* drop held locks */
6321 TAILQ_FOREACH_SAFE(nflp
, &np
->n_locks
, nfl_link
, nextnflp
) {
6322 /* skip dead & blocked lock requests */
6323 if (nflp
->nfl_flags
& (NFS_FILE_LOCK_DEAD
| NFS_FILE_LOCK_BLOCKED
)) {
6326 /* send an unlock if not a delegated lock */
6327 if (!force
&& nmp
&& !(nflp
->nfl_flags
& NFS_FILE_LOCK_DELEGATED
)) {
6328 nmp
->nm_funcs
->nf_unlock_rpc(np
, nflp
->nfl_owner
, F_WRLCK
, nflp
->nfl_start
, nflp
->nfl_end
, R_RECOVER
,
6329 NULL
, nflp
->nfl_owner
->nlo_open_owner
->noo_cred
);
6331 /* kill/remove the lock */
6332 lck_mtx_lock(&np
->n_openlock
);
6333 nflp
->nfl_flags
|= NFS_FILE_LOCK_DEAD
;
6334 lck_mtx_lock(&nflp
->nfl_owner
->nlo_lock
);
6335 TAILQ_REMOVE(&nflp
->nfl_owner
->nlo_locks
, nflp
, nfl_lolink
);
6336 lck_mtx_unlock(&nflp
->nfl_owner
->nlo_lock
);
6337 if (nflp
->nfl_blockcnt
) {
6338 /* wake up anyone blocked on this lock */
6341 /* remove nflp from lock list and destroy */
6342 TAILQ_REMOVE(&np
->n_locks
, nflp
, nfl_link
);
6343 nfs_file_lock_destroy(nflp
);
6345 lck_mtx_unlock(&np
->n_openlock
);
6348 lck_mtx_lock(&np
->n_openlock
);
6350 /* drop all opens */
6351 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
6352 if (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
) {
6355 /* mark open state as lost */
6356 lck_mtx_lock(&nofp
->nof_lock
);
6357 nofp
->nof_flags
&= ~NFS_OPEN_FILE_REOPEN
;
6358 nofp
->nof_flags
|= NFS_OPEN_FILE_LOST
;
6360 lck_mtx_unlock(&nofp
->nof_lock
);
6362 if (!force
&& nmp
&& (nmp
->nm_vers
>= NFS_VER4
)) {
6363 nfs4_close_rpc(np
, nofp
, NULL
, nofp
->nof_owner
->noo_cred
, R_RECOVER
);
6368 lck_mtx_unlock(&np
->n_openlock
);
6372 * State for a node has been lost, drop it, and revoke the node.
6373 * Attempt to return any state if possible in case the server
6374 * might somehow think we hold it.
6377 nfs_revoke_open_state_for_node(nfsnode_t np
)
6379 struct nfsmount
*nmp
;
6381 /* mark node as needing to be revoked */
6382 nfs_node_lock_force(np
);
6383 if (np
->n_flag
& NREVOKE
) { /* already revoked? */
6384 NP(np
, "nfs_revoke_open_state_for_node(): already revoked");
6385 nfs_node_unlock(np
);
6388 np
->n_flag
|= NREVOKE
;
6389 nfs_node_unlock(np
);
6391 nfs_release_open_state_for_node(np
, 0);
6392 NP(np
, "nfs: state lost for %p 0x%x", np
, np
->n_flag
);
6394 /* mark mount as needing a revoke scan and have the socket thread do it. */
6395 if ((nmp
= NFSTONMP(np
))) {
6396 lck_mtx_lock(&nmp
->nm_lock
);
6397 nmp
->nm_state
|= NFSSTA_REVOKE
;
6398 nfs_mount_sock_thread_wake(nmp
);
6399 lck_mtx_unlock(&nmp
->nm_lock
);
6405 * Claim the delegated open combinations that each of this node's open files hold.
6408 nfs4_claim_delegated_state_for_node(nfsnode_t np
, int flags
)
6410 struct nfs_open_file
*nofp
;
6413 lck_mtx_lock(&np
->n_openlock
);
6415 /* walk the open file list looking for opens with delegated state to claim */
6417 TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) {
6418 if (!nofp
->nof_d_rw_drw
&& !nofp
->nof_d_w_drw
&& !nofp
->nof_d_r_drw
&&
6419 !nofp
->nof_d_rw_dw
&& !nofp
->nof_d_w_dw
&& !nofp
->nof_d_r_dw
&&
6420 !nofp
->nof_d_rw
&& !nofp
->nof_d_w
&& !nofp
->nof_d_r
) {
6423 lck_mtx_unlock(&np
->n_openlock
);
6424 error
= nfs4_claim_delegated_state_for_open_file(nofp
, flags
);
6425 lck_mtx_lock(&np
->n_openlock
);
6432 lck_mtx_unlock(&np
->n_openlock
);
6438 * Mark a node as needed to have its delegation returned.
6439 * Queue it up on the delegation return queue.
6440 * Make sure the thread is running.
6443 nfs4_delegation_return_enqueue(nfsnode_t np
)
6445 struct nfsmount
*nmp
;
6448 if (nfs_mount_gone(nmp
)) {
6452 lck_mtx_lock(&np
->n_openlock
);
6453 np
->n_openflags
|= N_DELEG_RETURN
;
6454 lck_mtx_unlock(&np
->n_openlock
);
6456 lck_mtx_lock(&nmp
->nm_lock
);
6457 if (np
->n_dreturn
.tqe_next
== NFSNOLIST
) {
6458 TAILQ_INSERT_TAIL(&nmp
->nm_dreturnq
, np
, n_dreturn
);
6460 nfs_mount_sock_thread_wake(nmp
);
6461 lck_mtx_unlock(&nmp
->nm_lock
);
6465 * return any delegation we may have for the given node
6468 nfs4_delegation_return(nfsnode_t np
, int flags
, thread_t thd
, kauth_cred_t cred
)
6470 struct nfsmount
*nmp
;
6472 nfs_stateid dstateid
;
6476 if (nfs_mount_gone(nmp
)) {
6480 /* first, make sure the node's marked for delegation return */
6481 lck_mtx_lock(&np
->n_openlock
);
6482 np
->n_openflags
|= (N_DELEG_RETURN
| N_DELEG_RETURNING
);
6483 lck_mtx_unlock(&np
->n_openlock
);
6485 /* make sure nobody else is using the delegation state */
6486 if ((error
= nfs_open_state_set_busy(np
, NULL
))) {
6490 /* claim any delegated state */
6491 if ((error
= nfs4_claim_delegated_state_for_node(np
, flags
))) {
6495 /* return the delegation */
6496 lck_mtx_lock(&np
->n_openlock
);
6497 dstateid
= np
->n_dstateid
;
6498 fh
.fh_len
= np
->n_fhsize
;
6499 bcopy(np
->n_fhp
, &fh
.fh_data
, fh
.fh_len
);
6500 lck_mtx_unlock(&np
->n_openlock
);
6501 error
= nfs4_delegreturn_rpc(NFSTONMP(np
), fh
.fh_data
, fh
.fh_len
, &dstateid
, flags
, thd
, cred
);
6502 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
6503 if ((error
!= ETIMEDOUT
) && (error
!= NFSERR_MOVED
) && (error
!= NFSERR_LEASE_MOVED
)) {
6504 lck_mtx_lock(&np
->n_openlock
);
6505 np
->n_openflags
&= ~N_DELEG_MASK
;
6506 lck_mtx_lock(&nmp
->nm_lock
);
6507 if (np
->n_dlink
.tqe_next
!= NFSNOLIST
) {
6508 TAILQ_REMOVE(&nmp
->nm_delegations
, np
, n_dlink
);
6509 np
->n_dlink
.tqe_next
= NFSNOLIST
;
6511 lck_mtx_unlock(&nmp
->nm_lock
);
6512 lck_mtx_unlock(&np
->n_openlock
);
6516 /* make sure it's no longer on the return queue and clear the return flags */
6517 lck_mtx_lock(&nmp
->nm_lock
);
6518 if (np
->n_dreturn
.tqe_next
!= NFSNOLIST
) {
6519 TAILQ_REMOVE(&nmp
->nm_dreturnq
, np
, n_dreturn
);
6520 np
->n_dreturn
.tqe_next
= NFSNOLIST
;
6522 lck_mtx_unlock(&nmp
->nm_lock
);
6523 lck_mtx_lock(&np
->n_openlock
);
6524 np
->n_openflags
&= ~(N_DELEG_RETURN
| N_DELEG_RETURNING
);
6525 lck_mtx_unlock(&np
->n_openlock
);
6528 NP(np
, "nfs4_delegation_return, error %d", error
);
6529 if (error
== ETIMEDOUT
) {
6530 nfs_need_reconnect(nmp
);
6532 if (nfs_mount_state_error_should_restart(error
)) {
6533 /* make sure recovery happens */
6534 lck_mtx_lock(&nmp
->nm_lock
);
6535 nfs_need_recover(nmp
, nfs_mount_state_error_delegation_lost(error
) ? NFSERR_EXPIRED
: 0);
6536 lck_mtx_unlock(&nmp
->nm_lock
);
6540 nfs_open_state_clear_busy(np
);
6546 * RPC to return a delegation for a file handle
6549 nfs4_delegreturn_rpc(struct nfsmount
*nmp
, u_char
*fhp
, int fhlen
, struct nfs_stateid
*sid
, int flags
, thread_t thd
, kauth_cred_t cred
)
6551 int error
= 0, status
, numops
;
6553 struct nfsm_chain nmreq
, nmrep
;
6554 struct nfsreq_secinfo_args si
;
6556 NFSREQ_SECINFO_SET(&si
, NULL
, fhp
, fhlen
, NULL
, 0);
6557 nfsm_chain_null(&nmreq
);
6558 nfsm_chain_null(&nmrep
);
6560 // PUTFH, DELEGRETURN
6562 nfsm_chain_build_alloc_init(error
, &nmreq
, 16 * NFSX_UNSIGNED
);
6563 nfsm_chain_add_compound_header(error
, &nmreq
, "delegreturn", nmp
->nm_minor_vers
, numops
);
6565 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6566 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, fhp
, fhlen
);
6568 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_DELEGRETURN
);
6569 nfsm_chain_add_stateid(error
, &nmreq
, sid
);
6570 nfsm_chain_build_done(error
, &nmreq
);
6571 nfsm_assert(error
, (numops
== 0), EPROTO
);
6573 error
= nfs_request2(NULL
, nmp
->nm_mountp
, &nmreq
, NFSPROC4_COMPOUND
, thd
, cred
, &si
, flags
, &nmrep
, &xid
, &status
);
6574 nfsm_chain_skip_tag(error
, &nmrep
);
6575 nfsm_chain_get_32(error
, &nmrep
, numops
);
6576 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
6577 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_DELEGRETURN
);
6579 nfsm_chain_cleanup(&nmreq
);
6580 nfsm_chain_cleanup(&nmrep
);
6583 #endif /* CONFIG_NFS4 */
6587 * Just call nfs_bioread() to do the work.
6589 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6590 * without first calling VNOP_OPEN, so we make sure the file is open here.
6594 struct vnop_read_args
/* {
6595 * struct vnodeop_desc *a_desc;
6597 * struct uio *a_uio;
6599 * vfs_context_t a_context;
6602 vnode_t vp
= ap
->a_vp
;
6603 vfs_context_t ctx
= ap
->a_context
;
6605 struct nfsmount
*nmp
;
6606 struct nfs_open_owner
*noop
;
6607 struct nfs_open_file
*nofp
;
6610 if (vnode_vtype(ap
->a_vp
) != VREG
) {
6611 return (vnode_vtype(vp
) == VDIR
) ? EISDIR
: EPERM
;
6616 if (nfs_mount_gone(nmp
)) {
6619 if (np
->n_flag
& NREVOKE
) {
6623 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
6628 error
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 1);
6629 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
6630 NP(np
, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop
->noo_cred
));
6634 if (!error
&& (nofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
6635 error
= nfs4_reopen(nofp
, vfs_context_thread(ctx
));
6643 nfs_open_owner_rele(noop
);
6647 * Since the read path is a hot path, if we already have
6648 * read access, lets go and try and do the read, without
6649 * busying the mount and open file node for this open owner.
6651 * N.B. This is inherently racy w.r.t. an execve using
6652 * an already open file, in that the read at the end of
6653 * this routine will be racing with a potential close.
6654 * The code below ultimately has the same problem. In practice
6655 * this does not seem to be an issue.
6657 if (nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
) {
6658 nfs_open_owner_rele(noop
);
6661 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
6663 nfs_open_owner_rele(noop
);
6667 * If we don't have a file already open with the access we need (read) then
6668 * we need to open one. Otherwise we just co-opt an open. We might not already
6669 * have access because we're trying to read the first page of the
6672 error
= nfs_open_file_set_busy(nofp
, vfs_context_thread(ctx
));
6674 nfs_mount_state_in_use_end(nmp
, 0);
6675 nfs_open_owner_rele(noop
);
6678 if (!(nofp
->nof_access
& NFS_OPEN_SHARE_ACCESS_READ
)) {
6679 /* we don't have the file open, so open it for read access if we're not denied */
6680 if (nofp
->nof_flags
& NFS_OPEN_FILE_NEEDCLOSE
) {
6681 NP(np
, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
6682 nofp
->nof_access
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
), thread_tid(vfs_context_thread(ctx
)));
6684 if (nofp
->nof_deny
& NFS_OPEN_SHARE_DENY_READ
) {
6685 nfs_open_file_clear_busy(nofp
);
6686 nfs_mount_state_in_use_end(nmp
, 0);
6687 nfs_open_owner_rele(noop
);
6690 if (np
->n_flag
& NREVOKE
) {
6692 nfs_open_file_clear_busy(nofp
);
6693 nfs_mount_state_in_use_end(nmp
, 0);
6694 nfs_open_owner_rele(noop
);
6697 if (nmp
->nm_vers
< NFS_VER4
) {
6698 /* NFS v2/v3 opens are always allowed - so just add it. */
6699 nfs_open_file_add_open(nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, 0);
6703 error
= nfs4_open(np
, nofp
, NFS_OPEN_SHARE_ACCESS_READ
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
6707 nofp
->nof_flags
|= NFS_OPEN_FILE_NEEDCLOSE
;
6711 nfs_open_file_clear_busy(nofp
);
6713 if (nfs_mount_state_in_use_end(nmp
, error
)) {
6717 nfs_open_owner_rele(noop
);
6722 return nfs_bioread(VTONFS(ap
->a_vp
), ap
->a_uio
, ap
->a_ioflag
, ap
->a_context
);
6727 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6728 * Files are created using the NFSv4 OPEN RPC. So we must open the
6729 * file to create it and then close it.
6733 struct vnop_create_args
/* {
6734 * struct vnodeop_desc *a_desc;
6737 * struct componentname *a_cnp;
6738 * struct vnode_attr *a_vap;
6739 * vfs_context_t a_context;
6742 vfs_context_t ctx
= ap
->a_context
;
6743 struct componentname
*cnp
= ap
->a_cnp
;
6744 struct vnode_attr
*vap
= ap
->a_vap
;
6745 vnode_t dvp
= ap
->a_dvp
;
6746 vnode_t
*vpp
= ap
->a_vpp
;
6747 struct nfsmount
*nmp
;
6749 int error
= 0, busyerror
= 0, accessMode
, denyMode
;
6750 struct nfs_open_owner
*noop
= NULL
;
6751 struct nfs_open_file
*newnofp
= NULL
, *nofp
= NULL
;
6754 if (nfs_mount_gone(nmp
)) {
6759 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp
), vap
, ctx
);
6762 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
6768 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
6770 nfs_open_owner_rele(noop
);
6774 /* grab a provisional, nodeless open file */
6775 error
= nfs_open_file_find(NULL
, noop
, &newnofp
, 0, 0, 1);
6776 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
6777 printf("nfs_vnop_create: LOST\n");
6780 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
6781 /* This shouldn't happen given that this is a new, nodeless nofp */
6782 nfs_mount_state_in_use_end(nmp
, 0);
6783 error
= nfs4_reopen(newnofp
, vfs_context_thread(ctx
));
6784 nfs_open_file_destroy(newnofp
);
6791 error
= nfs_open_file_set_busy(newnofp
, vfs_context_thread(ctx
));
6795 nfs_open_file_destroy(newnofp
);
6802 * We're just trying to create the file.
6803 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6805 accessMode
= NFS_OPEN_SHARE_ACCESS_BOTH
;
6806 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
6808 /* Do the open/create */
6809 error
= nfs4_open_rpc(newnofp
, ctx
, cnp
, vap
, dvp
, vpp
, NFS_OPEN_CREATE
, accessMode
, denyMode
);
6810 if ((error
== EACCES
) && vap
&& !(vap
->va_vaflags
& VA_EXCLUSIVE
) &&
6811 VATTR_IS_ACTIVE(vap
, va_mode
) && !(vap
->va_mode
& S_IWUSR
)) {
6813 * Hmm... it looks like we may have a situation where the request was
6814 * retransmitted because we didn't get the first response which successfully
6815 * created/opened the file and then the second time we were denied the open
6816 * because the mode the file was created with doesn't allow write access.
6818 * We'll try to work around this by temporarily updating the mode and
6819 * retrying the open.
6821 struct vnode_attr vattr
;
6823 /* first make sure it's there */
6824 int error2
= nfs_lookitup(VTONFS(dvp
), cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, &np
);
6825 if (!error2
&& np
) {
6826 nfs_node_unlock(np
);
6828 if (vnode_vtype(NFSTOV(np
)) == VREG
) {
6830 VATTR_SET(&vattr
, va_mode
, (vap
->va_mode
| S_IWUSR
));
6831 if (!nfs4_setattr_rpc(np
, &vattr
, ctx
)) {
6832 error2
= nfs4_open_rpc(newnofp
, ctx
, cnp
, NULL
, dvp
, vpp
, NFS_OPEN_NOCREATE
, accessMode
, denyMode
);
6834 VATTR_SET(&vattr
, va_mode
, vap
->va_mode
);
6835 nfs4_setattr_rpc(np
, &vattr
, ctx
);
6847 if (!error
&& !*vpp
) {
6848 printf("nfs4_open_rpc returned without a node?\n");
6849 /* Hmmm... with no node, we have no filehandle and can't close it */
6853 /* need to cleanup our temporary nofp */
6854 nfs_open_file_clear_busy(newnofp
);
6855 nfs_open_file_destroy(newnofp
);
6859 /* After we have a node, add our open file struct to the node */
6861 nfs_open_file_add_open(newnofp
, accessMode
, denyMode
, 0);
6863 error
= nfs_open_file_find_internal(np
, noop
, &nofp
, 0, 0, 0);
6865 /* This shouldn't happen, because we passed in a new nofp to use. */
6866 printf("nfs_open_file_find_internal failed! %d\n", error
);
6868 } else if (nofp
!= newnofp
) {
6870 * Hmm... an open file struct already exists.
6871 * Mark the existing one busy and merge our open into it.
6872 * Then destroy the one we created.
6873 * Note: there's no chance of an open confict because the
6874 * open has already been granted.
6876 busyerror
= nfs_open_file_set_busy(nofp
, NULL
);
6877 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0);
6878 nofp
->nof_stateid
= newnofp
->nof_stateid
;
6879 if (newnofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
) {
6880 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
6882 nfs_open_file_clear_busy(newnofp
);
6883 nfs_open_file_destroy(newnofp
);
6886 /* mark the node as holding a create-initiated open */
6887 nofp
->nof_flags
|= NFS_OPEN_FILE_CREATE
;
6888 nofp
->nof_creator
= current_thread();
6890 if (nofp
&& !busyerror
) {
6891 nfs_open_file_clear_busy(nofp
);
6893 if (nfs_mount_state_in_use_end(nmp
, error
)) {
6894 nofp
= newnofp
= NULL
;
6899 nfs_open_owner_rele(noop
);
6905 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6911 struct componentname
*cnp
,
6912 struct vnode_attr
*vap
,
6917 struct nfsmount
*nmp
;
6918 struct nfs_vattr nvattr
;
6919 int error
= 0, create_error
= EIO
, lockerror
= ENOENT
, busyerror
= ENOENT
, status
;
6920 int nfsvers
, namedattrs
, numops
;
6921 u_int64_t xid
, savedxid
= 0;
6922 nfsnode_t np
= NULL
;
6923 vnode_t newvp
= NULL
;
6924 struct nfsm_chain nmreq
, nmrep
;
6925 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
6929 struct nfsreq rq
, *req
= &rq
;
6930 struct nfs_dulookup dul
;
6931 struct nfsreq_secinfo_args si
;
6933 nmp
= NFSTONMP(dnp
);
6934 if (nfs_mount_gone(nmp
)) {
6937 nfsvers
= nmp
->nm_vers
;
6938 namedattrs
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
);
6939 if (dnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
6943 sd
.specdata1
= sd
.specdata2
= 0;
6952 if (!VATTR_IS_ACTIVE(vap
, va_rdev
)) {
6955 sd
.specdata1
= major(vap
->va_rdev
);
6956 sd
.specdata2
= minor(vap
->va_rdev
);
6969 nfs_avoid_needless_id_setting_on_create(dnp
, vap
, ctx
);
6971 error
= busyerror
= nfs_node_set_busy(dnp
, vfs_context_thread(ctx
));
6973 nfs_dulookup_init(&dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
6976 NFSREQ_SECINFO_SET(&si
, dnp
, NULL
, 0, NULL
, 0);
6977 NVATTR_INIT(&nvattr
);
6978 nfsm_chain_null(&nmreq
);
6979 nfsm_chain_null(&nmrep
);
6981 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
6983 nfsm_chain_build_alloc_init(error
, &nmreq
, 66 * NFSX_UNSIGNED
);
6984 nfsm_chain_add_compound_header(error
, &nmreq
, tag
, nmp
->nm_minor_vers
, numops
);
6986 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
6987 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
);
6989 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
6991 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_CREATE
);
6992 nfsm_chain_add_32(error
, &nmreq
, type
);
6993 if (type
== NFLNK
) {
6994 nfsm_chain_add_name(error
, &nmreq
, link
, strlen(link
), nmp
);
6995 } else if ((type
== NFBLK
) || (type
== NFCHR
)) {
6996 nfsm_chain_add_32(error
, &nmreq
, sd
.specdata1
);
6997 nfsm_chain_add_32(error
, &nmreq
, sd
.specdata2
);
6999 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
7000 nfsm_chain_add_fattr4(error
, &nmreq
, vap
, nmp
);
7002 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7003 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
7004 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
7005 nfsm_chain_add_bitmap_supported(error
, &nmreq
, bitmap
, nmp
, NULL
);
7007 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
7009 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7010 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, dnp
);
7011 nfsm_chain_build_done(error
, &nmreq
);
7012 nfsm_assert(error
, (numops
== 0), EPROTO
);
7015 error
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
7016 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, 0, NULL
, &req
);
7019 nfs_dulookup_start(&dul
, dnp
, ctx
);
7021 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
7024 if ((lockerror
= nfs_node_lock(dnp
))) {
7027 nfsm_chain_skip_tag(error
, &nmrep
);
7028 nfsm_chain_get_32(error
, &nmrep
, numops
);
7029 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7030 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
7032 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_CREATE
);
7033 nfsm_chain_check_change_info(error
, &nmrep
, dnp
);
7034 bmlen
= NFS_ATTR_BITMAP_LEN
;
7035 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
7036 /* At this point if we have no error, the object was created. */
7037 /* if we don't get attributes, then we should lookitup. */
7038 create_error
= error
;
7040 nfs_vattr_set_supported(bitmap
, vap
);
7041 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7043 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
7045 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
)) {
7046 printf("nfs: create/%s didn't return filehandle? %s\n", tag
, cnp
->cn_nameptr
);
7050 /* directory attributes: if we don't get them, make sure to invalidate */
7051 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
7052 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7054 nfsm_chain_loadattr(error
, &nmrep
, dnp
, nfsvers
, &xid
);
7056 NATTRINVALIDATE(dnp
);
7060 nfsm_chain_cleanup(&nmreq
);
7061 nfsm_chain_cleanup(&nmrep
);
7064 if (!create_error
&& (dnp
->n_flag
& NNEGNCENTRIES
)) {
7065 dnp
->n_flag
&= ~NNEGNCENTRIES
;
7066 cache_purge_negatives(NFSTOV(dnp
));
7068 dnp
->n_flag
|= NMODIFIED
;
7069 nfs_node_unlock(dnp
);
7070 /* nfs_getattr() will check changed and purge caches */
7071 nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
);
7074 if (!error
&& fh
.fh_len
) {
7075 /* create the vnode with the filehandle and attributes */
7077 error
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, NG_MAKEENTRY
, &np
);
7082 NVATTR_CLEANUP(&nvattr
);
7085 nfs_dulookup_finish(&dul
, dnp
, ctx
);
7089 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
7090 * if we can succeed in looking up the object.
7092 if ((create_error
== EEXIST
) || (!create_error
&& !newvp
)) {
7093 error
= nfs_lookitup(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, &np
);
7096 if (vnode_vtype(newvp
) != nfstov_type(type
, nfsvers
)) {
7102 nfs_node_clear_busy(dnp
);
7106 nfs_node_unlock(np
);
7110 nfs_node_unlock(np
);
7118 struct vnop_mknod_args
/* {
7119 * struct vnodeop_desc *a_desc;
7122 * struct componentname *a_cnp;
7123 * struct vnode_attr *a_vap;
7124 * vfs_context_t a_context;
7127 nfsnode_t np
= NULL
;
7128 struct nfsmount
*nmp
;
7131 nmp
= VTONMP(ap
->a_dvp
);
7132 if (nfs_mount_gone(nmp
)) {
7136 if (!VATTR_IS_ACTIVE(ap
->a_vap
, va_type
)) {
7139 switch (ap
->a_vap
->va_type
) {
7149 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
7150 vtonfs_type(ap
->a_vap
->va_type
, nmp
->nm_vers
), NULL
, &np
);
7152 *ap
->a_vpp
= NFSTOV(np
);
7159 struct vnop_mkdir_args
/* {
7160 * struct vnodeop_desc *a_desc;
7163 * struct componentname *a_cnp;
7164 * struct vnode_attr *a_vap;
7165 * vfs_context_t a_context;
7168 nfsnode_t np
= NULL
;
7171 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
7174 *ap
->a_vpp
= NFSTOV(np
);
7181 struct vnop_symlink_args
/* {
7182 * struct vnodeop_desc *a_desc;
7185 * struct componentname *a_cnp;
7186 * struct vnode_attr *a_vap;
7188 * vfs_context_t a_context;
7191 nfsnode_t np
= NULL
;
7194 error
= nfs4_create_rpc(ap
->a_context
, VTONFS(ap
->a_dvp
), ap
->a_cnp
, ap
->a_vap
,
7195 NFLNK
, ap
->a_target
, &np
);
7197 *ap
->a_vpp
= NFSTOV(np
);
7204 struct vnop_link_args
/* {
7205 * struct vnodeop_desc *a_desc;
7208 * struct componentname *a_cnp;
7209 * vfs_context_t a_context;
7212 vfs_context_t ctx
= ap
->a_context
;
7213 vnode_t vp
= ap
->a_vp
;
7214 vnode_t tdvp
= ap
->a_tdvp
;
7215 struct componentname
*cnp
= ap
->a_cnp
;
7216 int error
= 0, lockerror
= ENOENT
, status
;
7217 struct nfsmount
*nmp
;
7218 nfsnode_t np
= VTONFS(vp
);
7219 nfsnode_t tdnp
= VTONFS(tdvp
);
7220 int nfsvers
, numops
;
7221 u_int64_t xid
, savedxid
;
7222 struct nfsm_chain nmreq
, nmrep
;
7223 struct nfsreq_secinfo_args si
;
7225 if (vnode_mount(vp
) != vnode_mount(tdvp
)) {
7230 if (nfs_mount_gone(nmp
)) {
7233 nfsvers
= nmp
->nm_vers
;
7234 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
7237 if (tdnp
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
7242 * Push all writes to the server, so that the attribute cache
7243 * doesn't get "out of sync" with the server.
7244 * XXX There should be a better way!
7246 nfs_flush(np
, MNT_WAIT
, vfs_context_thread(ctx
), V_IGNORE_WRITEERR
);
7248 if ((error
= nfs_node_set_busy2(tdnp
, np
, vfs_context_thread(ctx
)))) {
7252 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
7253 nfsm_chain_null(&nmreq
);
7254 nfsm_chain_null(&nmrep
);
7256 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
7258 nfsm_chain_build_alloc_init(error
, &nmreq
, 29 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
7259 nfsm_chain_add_compound_header(error
, &nmreq
, "link", nmp
->nm_minor_vers
, numops
);
7261 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7262 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
);
7264 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
7266 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7267 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, tdnp
->n_fhp
, tdnp
->n_fhsize
);
7269 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LINK
);
7270 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
7272 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7273 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, tdnp
);
7275 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
7277 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7278 nfsm_chain_add_bitmap_supported(error
, &nmreq
, nfs_getattr_bitmap
, nmp
, np
);
7279 nfsm_chain_build_done(error
, &nmreq
);
7280 nfsm_assert(error
, (numops
== 0), EPROTO
);
7282 error
= nfs_request(tdnp
, NULL
, &nmreq
, NFSPROC4_COMPOUND
, ctx
, &si
, &nmrep
, &xid
, &status
);
7284 if ((lockerror
= nfs_node_lock2(tdnp
, np
))) {
7288 nfsm_chain_skip_tag(error
, &nmrep
);
7289 nfsm_chain_get_32(error
, &nmrep
, numops
);
7290 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7291 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
7292 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7293 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LINK
);
7294 nfsm_chain_check_change_info(error
, &nmrep
, tdnp
);
7295 /* directory attributes: if we don't get them, make sure to invalidate */
7296 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7298 nfsm_chain_loadattr(error
, &nmrep
, tdnp
, nfsvers
, &xid
);
7300 NATTRINVALIDATE(tdnp
);
7302 /* link attributes: if we don't get them, make sure to invalidate */
7303 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
7304 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7306 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
);
7308 NATTRINVALIDATE(np
);
7311 nfsm_chain_cleanup(&nmreq
);
7312 nfsm_chain_cleanup(&nmrep
);
7314 tdnp
->n_flag
|= NMODIFIED
;
7316 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
7317 if (error
== EEXIST
) {
7320 if (!error
&& (tdnp
->n_flag
& NNEGNCENTRIES
)) {
7321 tdnp
->n_flag
&= ~NNEGNCENTRIES
;
7322 cache_purge_negatives(tdvp
);
7325 nfs_node_unlock2(tdnp
, np
);
7327 nfs_node_clear_busy2(tdnp
, np
);
7333 struct vnop_rmdir_args
/* {
7334 * struct vnodeop_desc *a_desc;
7337 * struct componentname *a_cnp;
7338 * vfs_context_t a_context;
7341 vfs_context_t ctx
= ap
->a_context
;
7342 vnode_t vp
= ap
->a_vp
;
7343 vnode_t dvp
= ap
->a_dvp
;
7344 struct componentname
*cnp
= ap
->a_cnp
;
7345 struct nfsmount
*nmp
;
7346 int error
= 0, namedattrs
;
7347 nfsnode_t np
= VTONFS(vp
);
7348 nfsnode_t dnp
= VTONFS(dvp
);
7349 struct nfs_dulookup dul
;
7351 if (vnode_vtype(vp
) != VDIR
) {
7355 nmp
= NFSTONMP(dnp
);
7356 if (nfs_mount_gone(nmp
)) {
7359 namedattrs
= (nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
);
7361 if ((error
= nfs_node_set_busy2(dnp
, np
, vfs_context_thread(ctx
)))) {
7366 nfs_dulookup_init(&dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
);
7367 nfs_dulookup_start(&dul
, dnp
, ctx
);
7370 error
= nfs4_remove_rpc(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
,
7371 vfs_context_thread(ctx
), vfs_context_ucred(ctx
));
7373 nfs_name_cache_purge(dnp
, np
, cnp
, ctx
);
7374 /* nfs_getattr() will check changed and purge caches */
7375 nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
);
7377 nfs_dulookup_finish(&dul
, dnp
, ctx
);
7379 nfs_node_clear_busy2(dnp
, np
);
7382 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
7384 if (error
== ENOENT
) {
7389 * remove nfsnode from hash now so we can't accidentally find it
7390 * again if another object gets created with the same filehandle
7391 * before this vnode gets reclaimed
7393 lck_mtx_lock(nfs_node_hash_mutex
);
7394 if (np
->n_hflag
& NHHASHED
) {
7395 LIST_REMOVE(np
, n_hash
);
7396 np
->n_hflag
&= ~NHHASHED
;
7397 FSDBG(266, 0, np
, np
->n_flag
, 0xb1eb1e);
7399 lck_mtx_unlock(nfs_node_hash_mutex
);
7405 * NFSv4 Named Attributes
7407 * Both the extended attributes interface and the named streams interface
7408 * are backed by NFSv4 named attributes. The implementations for both use
7409 * a common set of routines in an attempt to reduce code duplication, to
7410 * increase efficiency, to increase caching of both names and data, and to
7411 * confine the complexity.
7413 * Each NFS node caches its named attribute directory's file handle.
7414 * The directory nodes for the named attribute directories are handled
7415 * exactly like regular directories (with a couple minor exceptions).
7416 * Named attribute nodes are also treated as much like regular files as
7419 * Most of the heavy lifting is done by nfs4_named_attr_get().
7423 * Get the given node's attribute directory node.
7424 * If !fetch, then only return a cached node.
7425 * Otherwise, we will attempt to fetch the node from the server.
7426 * (Note: the node should be marked busy.)
7429 nfs4_named_attr_dir_get(nfsnode_t np
, int fetch
, vfs_context_t ctx
)
7431 nfsnode_t adnp
= NULL
;
7432 struct nfsmount
*nmp
;
7433 int error
= 0, status
, numops
;
7434 struct nfsm_chain nmreq
, nmrep
;
7436 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
];
7438 struct nfs_vattr nvattr
;
7439 struct componentname cn
;
7440 struct nfsreq rq
, *req
= &rq
;
7441 struct nfsreq_secinfo_args si
;
7444 if (nfs_mount_gone(nmp
)) {
7447 if (np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
) {
7451 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
7452 NVATTR_INIT(&nvattr
);
7453 nfsm_chain_null(&nmreq
);
7454 nfsm_chain_null(&nmrep
);
7456 bzero(&cn
, sizeof(cn
));
7457 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER
, const, char *); /* "/..namedfork/" */
7458 cn
.cn_namelen
= strlen(_PATH_FORKSPECIFIER
);
7459 cn
.cn_nameiop
= LOOKUP
;
7461 if (np
->n_attrdirfh
) {
7462 // XXX can't set parent correctly (to np) yet
7463 error
= nfs_nget(nmp
->nm_mountp
, NULL
, &cn
, np
->n_attrdirfh
+ 1, *np
->n_attrdirfh
,
7464 NULL
, NULL
, RPCAUTH_UNKNOWN
, NG_NOCREATE
, &adnp
);
7474 // PUTFH, OPENATTR, GETATTR
7476 nfsm_chain_build_alloc_init(error
, &nmreq
, 22 * NFSX_UNSIGNED
);
7477 nfsm_chain_add_compound_header(error
, &nmreq
, "openattr", nmp
->nm_minor_vers
, numops
);
7479 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7480 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, np
->n_fhp
, np
->n_fhsize
);
7482 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPENATTR
);
7483 nfsm_chain_add_32(error
, &nmreq
, 0);
7485 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7486 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
7487 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
7488 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
7489 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7490 nfsm_chain_build_done(error
, &nmreq
);
7491 nfsm_assert(error
, (numops
== 0), EPROTO
);
7493 error
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
7494 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, 0, NULL
, &req
);
7496 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
7499 nfsm_chain_skip_tag(error
, &nmrep
);
7500 nfsm_chain_get_32(error
, &nmrep
, numops
);
7501 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7502 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPENATTR
);
7503 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7505 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
7507 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
) || !fh
.fh_len
) {
7511 if (!np
->n_attrdirfh
|| (*np
->n_attrdirfh
!= fh
.fh_len
)) {
7512 /* (re)allocate attrdir fh buffer */
7513 if (np
->n_attrdirfh
) {
7514 FREE(np
->n_attrdirfh
, M_TEMP
);
7516 MALLOC(np
->n_attrdirfh
, u_char
*, fh
.fh_len
+ 1, M_TEMP
, M_WAITOK
);
7518 if (!np
->n_attrdirfh
) {
7522 /* cache the attrdir fh in the node */
7523 *np
->n_attrdirfh
= fh
.fh_len
;
7524 bcopy(fh
.fh_data
, np
->n_attrdirfh
+ 1, fh
.fh_len
);
7525 /* create node for attrdir */
7526 // XXX can't set parent correctly (to np) yet
7527 error
= nfs_nget(NFSTOMP(np
), NULL
, &cn
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, 0, &adnp
);
7529 NVATTR_CLEANUP(&nvattr
);
7530 nfsm_chain_cleanup(&nmreq
);
7531 nfsm_chain_cleanup(&nmrep
);
7534 /* sanity check that this node is an attribute directory */
7535 if (adnp
->n_vattr
.nva_type
!= VDIR
) {
7538 if (!(adnp
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
)) {
7541 nfs_node_unlock(adnp
);
7543 vnode_put(NFSTOV(adnp
));
7546 return error
? NULL
: adnp
;
7550 * Get the given node's named attribute node for the name given.
7552 * In an effort to increase the performance of named attribute access, we try
7553 * to reduce server requests by doing the following:
7555 * - cache the node's named attribute directory file handle in the node
7556 * - maintain a directory vnode for the attribute directory
7557 * - use name cache entries (positive and negative) to speed up lookups
7558 * - optionally open the named attribute (with the given accessMode) in the same RPC
7559 * - combine attribute directory retrieval with the lookup/open RPC
7560 * - optionally prefetch the named attribute's first block of data in the same RPC
7562 * Also, in an attempt to reduce the number of copies/variations of this code,
7563 * parts of the RPC building/processing code are conditionalized on what is
7564 * needed for any particular request (openattr, lookup vs. open, read).
7566 * Note that because we may not have the attribute directory node when we start
7567 * the lookup/open, we lock both the node and the attribute directory node.
7570 #define NFS_GET_NAMED_ATTR_CREATE 0x1
7571 #define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
7572 #define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
7573 #define NFS_GET_NAMED_ATTR_PREFETCH 0x8
7576 nfs4_named_attr_get(
7578 struct componentname
*cnp
,
7579 uint32_t accessMode
,
7583 struct nfs_open_file
**nofpp
)
7585 struct nfsmount
*nmp
;
7586 int error
= 0, open_error
= EIO
;
7587 int inuse
= 0, adlockerror
= ENOENT
, busyerror
= ENOENT
, adbusyerror
= ENOENT
, nofpbusyerror
= ENOENT
;
7588 int create
, guarded
, prefetch
, truncate
, noopbusy
= 0;
7589 int open
, status
, numops
, hadattrdir
, negnamecache
;
7590 struct nfs_vattr nvattr
;
7591 struct vnode_attr vattr
;
7592 nfsnode_t adnp
= NULL
, anp
= NULL
;
7594 u_int64_t xid
, savedxid
= 0;
7595 struct nfsm_chain nmreq
, nmrep
;
7596 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
;
7597 uint32_t denyMode
, rflags
, delegation
, recall
, eof
, rlen
, retlen
;
7598 nfs_stateid stateid
, dstateid
;
7600 struct nfs_open_owner
*noop
= NULL
;
7601 struct nfs_open_file
*newnofp
= NULL
, *nofp
= NULL
;
7602 struct vnop_access_args naa
;
7607 uint32_t ace_type
, ace_flags
, ace_mask
, len
, slen
;
7608 struct kauth_ace ace
;
7609 struct nfsreq rq
, *req
= &rq
;
7610 struct nfsreq_secinfo_args si
;
7614 rflags
= delegation
= recall
= eof
= rlen
= retlen
= 0;
7617 slen
= sizeof(sbuf
);
7620 if (nfs_mount_gone(nmp
)) {
7623 NVATTR_INIT(&nvattr
);
7624 negnamecache
= !NMFLAG(nmp
, NONEGNAMECACHE
);
7625 thd
= vfs_context_thread(ctx
);
7626 cred
= vfs_context_ucred(ctx
);
7627 create
= (flags
& NFS_GET_NAMED_ATTR_CREATE
) ? NFS_OPEN_CREATE
: NFS_OPEN_NOCREATE
;
7628 guarded
= (flags
& NFS_GET_NAMED_ATTR_CREATE_GUARDED
) ? NFS_CREATE_GUARDED
: NFS_CREATE_UNCHECKED
;
7629 truncate
= (flags
& NFS_GET_NAMED_ATTR_TRUNCATE
);
7630 prefetch
= (flags
& NFS_GET_NAMED_ATTR_PREFETCH
);
7633 error
= nfs_getattr(np
, &nvattr
, ctx
, NGA_CACHED
);
7637 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
7638 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
)) {
7641 } else if (accessMode
== NFS_OPEN_SHARE_ACCESS_NONE
) {
7642 /* shouldn't happen... but just be safe */
7643 printf("nfs4_named_attr_get: create with no access %s\n", cnp
->cn_nameptr
);
7644 accessMode
= NFS_OPEN_SHARE_ACCESS_READ
;
7646 open
= (accessMode
!= NFS_OPEN_SHARE_ACCESS_NONE
);
7649 * We're trying to open the file.
7650 * We'll create/open it with the given access mode,
7651 * and set NFS_OPEN_FILE_CREATE.
7653 denyMode
= NFS_OPEN_SHARE_DENY_NONE
;
7654 if (prefetch
&& guarded
) {
7655 prefetch
= 0; /* no sense prefetching data that can't be there */
7657 noop
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1);
7663 if ((error
= busyerror
= nfs_node_set_busy(np
, vfs_context_thread(ctx
)))) {
7667 adnp
= nfs4_named_attr_dir_get(np
, 0, ctx
);
7668 hadattrdir
= (adnp
!= NULL
);
7671 /* use the special state ID because we don't have a real one to send */
7672 stateid
.seqid
= stateid
.other
[0] = stateid
.other
[1] = stateid
.other
[2] = 0;
7673 rlen
= MIN(nmp
->nm_rsize
, nmp
->nm_biosize
);
7675 NFSREQ_SECINFO_SET(&si
, np
, NULL
, 0, NULL
, 0);
7676 nfsm_chain_null(&nmreq
);
7677 nfsm_chain_null(&nmrep
);
7680 if ((error
= adbusyerror
= nfs_node_set_busy(adnp
, vfs_context_thread(ctx
)))) {
7683 /* nfs_getattr() will check changed and purge caches */
7684 error
= nfs_getattr(adnp
, NULL
, ctx
, NGA_CACHED
);
7686 error
= cache_lookup(NFSTOV(adnp
), &avp
, cnp
);
7689 /* negative cache entry */
7693 /* try dir buf cache lookup */
7694 error
= nfs_dir_buf_cache_lookup(adnp
, &anp
, cnp
, ctx
, 0);
7695 if (!error
&& anp
) {
7696 /* dir buf cache hit */
7700 if (error
!= -1) { /* cache miss */
7705 /* cache hit, not really an error */
7706 OSAddAtomic64(1, &nfsstats
.lookupcache_hits
);
7708 *anpp
= anp
= VTONFS(avp
);
7711 nfs_node_clear_busy(adnp
);
7712 adbusyerror
= ENOENT
;
7714 /* check for directory access */
7715 naa
.a_desc
= &vnop_access_desc
;
7716 naa
.a_vp
= NFSTOV(adnp
);
7717 naa
.a_action
= KAUTH_VNODE_SEARCH
;
7718 naa
.a_context
= ctx
;
7720 /* compute actual success/failure based on accessibility */
7721 error
= nfs_vnop_access(&naa
);
7724 /* we either found it, or hit an error */
7725 if (!error
&& guarded
) {
7726 /* found cached entry but told not to use it */
7728 vnode_put(NFSTOV(anp
));
7731 /* we're done if error or we don't need to open */
7732 if (error
|| !open
) {
7735 /* no error and we need to open... */
7741 error
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
));
7743 nfs_open_owner_rele(noop
);
7749 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7750 error
= nfs_open_file_find(anp
, noop
, &newnofp
, 0, 0, 1);
7751 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_LOST
)) {
7752 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop
->noo_cred
), cnp
->cn_nameptr
);
7755 if (!error
&& (newnofp
->nof_flags
& NFS_OPEN_FILE_REOPEN
)) {
7756 nfs_mount_state_in_use_end(nmp
, 0);
7757 error
= nfs4_reopen(newnofp
, vfs_context_thread(ctx
));
7758 nfs_open_file_destroy(newnofp
);
7765 error
= nfs_open_file_set_busy(newnofp
, vfs_context_thread(ctx
));
7769 nfs_open_file_destroy(newnofp
);
7776 * We already have the node. So we just need to open
7777 * it - which we may be able to do with a delegation.
7779 open_error
= error
= nfs4_open(anp
, newnofp
, accessMode
, denyMode
, ctx
);
7781 /* open succeeded, so our open file is no longer temporary */
7794 * We either don't have the attrdir or we didn't find the attribute
7795 * in the name cache, so we need to talk to the server.
7797 * If we don't have the attrdir, we'll need to ask the server for that too.
7798 * If the caller is requesting that the attribute be created, we need to
7799 * make sure the attrdir is created.
7800 * The caller may also request that the first block of an existing attribute
7801 * be retrieved at the same time.
7805 /* need to mark the open owner busy during the RPC */
7806 if ((error
= nfs_open_owner_set_busy(noop
, thd
))) {
7813 * We'd like to get updated post-open/lookup attributes for the
7814 * directory and we may also want to prefetch some data via READ.
7815 * We'd like the READ results to be last so that we can leave the
7816 * data in the mbufs until the end.
7818 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7822 numops
+= 3; // also sending: OPENATTR, GETATTR, OPENATTR
7825 numops
+= 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7827 nfsm_chain_build_alloc_init(error
, &nmreq
, 64 * NFSX_UNSIGNED
+ cnp
->cn_namelen
);
7828 nfsm_chain_add_compound_header(error
, &nmreq
, "getnamedattr", nmp
->nm_minor_vers
, numops
);
7831 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7832 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, adnp
->n_fhp
, adnp
->n_fhsize
);
7835 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7836 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, np
->n_fhp
, np
->n_fhsize
);
7838 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPENATTR
);
7839 nfsm_chain_add_32(error
, &nmreq
, create
? 1 : 0);
7841 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7842 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
7843 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
7844 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
7845 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7849 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPEN
);
7850 nfsm_chain_add_32(error
, &nmreq
, noop
->noo_seqid
);
7851 nfsm_chain_add_32(error
, &nmreq
, accessMode
);
7852 nfsm_chain_add_32(error
, &nmreq
, denyMode
);
7853 nfsm_chain_add_64(error
, &nmreq
, nmp
->nm_clientid
);
7854 nfsm_chain_add_32(error
, &nmreq
, NFSX_UNSIGNED
);
7855 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(noop
->noo_cred
));
7856 nfsm_chain_add_32(error
, &nmreq
, create
);
7858 nfsm_chain_add_32(error
, &nmreq
, guarded
);
7861 VATTR_SET(&vattr
, va_data_size
, 0);
7863 nfsm_chain_add_fattr4(error
, &nmreq
, &vattr
, nmp
);
7865 nfsm_chain_add_32(error
, &nmreq
, NFS_CLAIM_NULL
);
7866 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
7869 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_LOOKUP
);
7870 nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
);
7873 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7874 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap
, bitmap
);
7875 NFS_BITMAP_SET(bitmap
, NFS_FATTR_FILEHANDLE
);
7876 nfsm_chain_add_bitmap_masked(error
, &nmreq
, bitmap
,
7877 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7880 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_SAVEFH
);
7884 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7885 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, adnp
->n_fhp
, adnp
->n_fhsize
);
7888 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_PUTFH
);
7889 nfsm_chain_add_fh(error
, &nmreq
, nmp
->nm_vers
, np
->n_fhp
, np
->n_fhsize
);
7891 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_OPENATTR
);
7892 nfsm_chain_add_32(error
, &nmreq
, 0);
7895 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_GETATTR
);
7896 nfsm_chain_add_bitmap_masked(error
, &nmreq
, nfs_getattr_bitmap
,
7897 NFS_ATTR_BITMAP_LEN
, nmp
->nm_fsattr
.nfsa_supp_attr
);
7900 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_RESTOREFH
);
7902 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_NVERIFY
);
7904 VATTR_SET(&vattr
, va_data_size
, 0);
7905 nfsm_chain_add_fattr4(error
, &nmreq
, &vattr
, nmp
);
7907 nfsm_chain_add_32(error
, &nmreq
, NFS_OP_READ
);
7908 nfsm_chain_add_stateid(error
, &nmreq
, &stateid
);
7909 nfsm_chain_add_64(error
, &nmreq
, 0);
7910 nfsm_chain_add_32(error
, &nmreq
, rlen
);
7912 nfsm_chain_build_done(error
, &nmreq
);
7913 nfsm_assert(error
, (numops
== 0), EPROTO
);
7915 error
= nfs_request_async(hadattrdir
? adnp
: np
, NULL
, &nmreq
, NFSPROC4_COMPOUND
,
7916 vfs_context_thread(ctx
), vfs_context_ucred(ctx
), &si
, open
? R_NOINTR
: 0, NULL
, &req
);
7918 error
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
);
7921 if (hadattrdir
&& ((adlockerror
= nfs_node_lock(adnp
)))) {
7922 error
= adlockerror
;
7925 nfsm_chain_skip_tag(error
, &nmrep
);
7926 nfsm_chain_get_32(error
, &nmrep
, numops
);
7927 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
7929 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPENATTR
);
7930 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
7932 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
7934 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
) && fh
.fh_len
) {
7935 if (!np
->n_attrdirfh
|| (*np
->n_attrdirfh
!= fh
.fh_len
)) {
7936 /* (re)allocate attrdir fh buffer */
7937 if (np
->n_attrdirfh
) {
7938 FREE(np
->n_attrdirfh
, M_TEMP
);
7940 MALLOC(np
->n_attrdirfh
, u_char
*, fh
.fh_len
+ 1, M_TEMP
, M_WAITOK
);
7942 if (np
->n_attrdirfh
) {
7943 /* remember the attrdir fh in the node */
7944 *np
->n_attrdirfh
= fh
.fh_len
;
7945 bcopy(fh
.fh_data
, np
->n_attrdirfh
+ 1, fh
.fh_len
);
7946 /* create busied node for attrdir */
7947 struct componentname cn
;
7948 bzero(&cn
, sizeof(cn
));
7949 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER
, const, char *); /* "/..namedfork/" */
7950 cn
.cn_namelen
= strlen(_PATH_FORKSPECIFIER
);
7951 cn
.cn_nameiop
= LOOKUP
;
7952 // XXX can't set parent correctly (to np) yet
7953 error
= nfs_nget(NFSTOMP(np
), NULL
, &cn
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, 0, &adnp
);
7956 /* set the node busy */
7957 SET(adnp
->n_flag
, NBUSY
);
7960 /* if no adnp, oh well... */
7964 NVATTR_CLEANUP(&nvattr
);
7968 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPEN
);
7969 nfs_owner_seqid_increment(noop
, NULL
, error
);
7970 nfsm_chain_get_stateid(error
, &nmrep
, &newnofp
->nof_stateid
);
7971 nfsm_chain_check_change_info(error
, &nmrep
, adnp
);
7972 nfsm_chain_get_32(error
, &nmrep
, rflags
);
7973 bmlen
= NFS_ATTR_BITMAP_LEN
;
7974 nfsm_chain_get_bitmap(error
, &nmrep
, bitmap
, bmlen
);
7975 nfsm_chain_get_32(error
, &nmrep
, delegation
);
7977 switch (delegation
) {
7978 case NFS_OPEN_DELEGATE_NONE
:
7980 case NFS_OPEN_DELEGATE_READ
:
7981 case NFS_OPEN_DELEGATE_WRITE
:
7982 nfsm_chain_get_stateid(error
, &nmrep
, &dstateid
);
7983 nfsm_chain_get_32(error
, &nmrep
, recall
);
7984 if (delegation
== NFS_OPEN_DELEGATE_WRITE
) { // space (skip) XXX
7985 nfsm_chain_adv(error
, &nmrep
, 3 * NFSX_UNSIGNED
);
7987 /* if we have any trouble accepting the ACE, just invalidate it */
7988 ace_type
= ace_flags
= ace_mask
= len
= 0;
7989 nfsm_chain_get_32(error
, &nmrep
, ace_type
);
7990 nfsm_chain_get_32(error
, &nmrep
, ace_flags
);
7991 nfsm_chain_get_32(error
, &nmrep
, ace_mask
);
7992 nfsm_chain_get_32(error
, &nmrep
, len
);
7993 ace
.ace_flags
= nfs4_ace_nfstype_to_vfstype(ace_type
, &error
);
7994 ace
.ace_flags
|= nfs4_ace_nfsflags_to_vfsflags(ace_flags
);
7995 ace
.ace_rights
= nfs4_ace_nfsmask_to_vfsrights(ace_mask
);
7996 if (!error
&& (len
>= slen
)) {
7997 MALLOC(s
, char*, len
+ 1, M_TEMP
, M_WAITOK
);
8005 nfsm_chain_get_opaque(error
, &nmrep
, len
, s
);
8007 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(len
));
8011 if (nfs4_id2guid(s
, &ace
.ace_applicable
, (ace_flags
& NFS_ACE_IDENTIFIER_GROUP
))) {
8018 if (s
&& (s
!= sbuf
)) {
8027 /* At this point if we have no error, the object was created/opened. */
8030 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_LOOKUP
);
8032 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
8034 error
= nfs4_parsefattr(&nmrep
, NULL
, &nvattr
, &fh
, NULL
, NULL
);
8036 if (!NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_FILEHANDLE
) || !fh
.fh_len
) {
8041 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_SAVEFH
);
8043 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_PUTFH
);
8045 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_OPENATTR
);
8047 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_GETATTR
);
8050 nfsm_chain_loadattr(error
, &nmrep
, adnp
, nmp
->nm_vers
, &xid
);
8054 if (rflags
& NFS_OPEN_RESULT_LOCKTYPE_POSIX
) {
8055 newnofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
8057 if (rflags
& NFS_OPEN_RESULT_CONFIRM
) {
8059 nfs_node_unlock(adnp
);
8060 adlockerror
= ENOENT
;
8062 NVATTR_CLEANUP(&nvattr
);
8063 error
= nfs4_open_confirm_rpc(nmp
, adnp
? adnp
: np
, fh
.fh_data
, fh
.fh_len
, noop
, &newnofp
->nof_stateid
, thd
, cred
, &nvattr
, &xid
);
8066 if ((adlockerror
= nfs_node_lock(adnp
))) {
8067 error
= adlockerror
;
8073 if (open
&& adnp
&& !adlockerror
) {
8074 if (!open_error
&& (adnp
->n_flag
& NNEGNCENTRIES
)) {
8075 adnp
->n_flag
&= ~NNEGNCENTRIES
;
8076 cache_purge_negatives(NFSTOV(adnp
));
8078 adnp
->n_flag
|= NMODIFIED
;
8079 nfs_node_unlock(adnp
);
8080 adlockerror
= ENOENT
;
8081 nfs_getattr(adnp
, NULL
, ctx
, NGA_CACHED
);
8083 if (adnp
&& !adlockerror
&& (error
== ENOENT
) &&
8084 (cnp
->cn_flags
& MAKEENTRY
) && (cnp
->cn_nameiop
!= CREATE
) && negnamecache
) {
8085 /* add a negative entry in the name cache */
8086 cache_enter(NFSTOV(adnp
), NULL
, cnp
);
8087 adnp
->n_flag
|= NNEGNCENTRIES
;
8089 if (adnp
&& !adlockerror
) {
8090 nfs_node_unlock(adnp
);
8091 adlockerror
= ENOENT
;
8093 if (!error
&& !anp
&& fh
.fh_len
) {
8094 /* create the vnode with the filehandle and attributes */
8096 error
= nfs_nget(NFSTOMP(np
), adnp
, cnp
, fh
.fh_data
, fh
.fh_len
, &nvattr
, &xid
, rq
.r_auth
, NG_MAKEENTRY
, &anp
);
8099 nfs_node_unlock(anp
);
8101 if (!error
&& open
) {
8102 nfs_open_file_add_open(newnofp
, accessMode
, denyMode
, 0);
8103 /* After we have a node, add our open file struct to the node */
8105 error
= nfs_open_file_find_internal(anp
, noop
, &nofp
, 0, 0, 0);
8107 /* This shouldn't happen, because we passed in a new nofp to use. */
8108 printf("nfs_open_file_find_internal failed! %d\n", error
);
8110 } else if (nofp
!= newnofp
) {
8112 * Hmm... an open file struct already exists.
8113 * Mark the existing one busy and merge our open into it.
8114 * Then destroy the one we created.
8115 * Note: there's no chance of an open confict because the
8116 * open has already been granted.
8118 nofpbusyerror
= nfs_open_file_set_busy(nofp
, NULL
);
8119 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0);
8120 nofp
->nof_stateid
= newnofp
->nof_stateid
;
8121 if (newnofp
->nof_flags
& NFS_OPEN_FILE_POSIXLOCK
) {
8122 nofp
->nof_flags
|= NFS_OPEN_FILE_POSIXLOCK
;
8124 nfs_open_file_clear_busy(newnofp
);
8125 nfs_open_file_destroy(newnofp
);
8131 /* mark the node as holding a create-initiated open */
8132 nofp
->nof_flags
|= NFS_OPEN_FILE_CREATE
;
8133 nofp
->nof_creator
= current_thread();
8140 NVATTR_CLEANUP(&nvattr
);
8141 if (open
&& ((delegation
== NFS_OPEN_DELEGATE_READ
) || (delegation
== NFS_OPEN_DELEGATE_WRITE
))) {
8142 if (!error
&& anp
&& !recall
) {
8143 /* stuff the delegation state in the node */
8144 lck_mtx_lock(&anp
->n_openlock
);
8145 anp
->n_openflags
&= ~N_DELEG_MASK
;
8146 anp
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
8147 anp
->n_dstateid
= dstateid
;
8149 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
8150 lck_mtx_lock(&nmp
->nm_lock
);
8151 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
8152 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, anp
, n_dlink
);
8154 lck_mtx_unlock(&nmp
->nm_lock
);
8156 lck_mtx_unlock(&anp
->n_openlock
);
8158 /* give the delegation back */
8160 if (NFS_CMPFH(anp
, fh
.fh_data
, fh
.fh_len
)) {
8161 /* update delegation state and return it */
8162 lck_mtx_lock(&anp
->n_openlock
);
8163 anp
->n_openflags
&= ~N_DELEG_MASK
;
8164 anp
->n_openflags
|= ((delegation
== NFS_OPEN_DELEGATE_READ
) ? N_DELEG_READ
: N_DELEG_WRITE
);
8165 anp
->n_dstateid
= dstateid
;
8167 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
8168 lck_mtx_lock(&nmp
->nm_lock
);
8169 if (anp
->n_dlink
.tqe_next
== NFSNOLIST
) {
8170 TAILQ_INSERT_TAIL(&nmp
->nm_delegations
, anp
, n_dlink
);
8172 lck_mtx_unlock(&nmp
->nm_lock
);
8174 lck_mtx_unlock(&anp
->n_openlock
);
8175 /* don't need to send a separate delegreturn for fh */
8178 /* return anp's current delegation */
8179 nfs4_delegation_return(anp
, 0, thd
, cred
);
8181 if (fh
.fh_len
) { /* return fh's delegation if it wasn't for anp */
8182 nfs4_delegreturn_rpc(nmp
, fh
.fh_data
, fh
.fh_len
, &dstateid
, 0, thd
, cred
);
8188 /* need to cleanup our temporary nofp */
8189 nfs_open_file_clear_busy(newnofp
);
8190 nfs_open_file_destroy(newnofp
);
8192 } else if (nofp
&& !nofpbusyerror
) {
8193 nfs_open_file_clear_busy(nofp
);
8194 nofpbusyerror
= ENOENT
;
8196 if (inuse
&& nfs_mount_state_in_use_end(nmp
, error
)) {
8198 nofp
= newnofp
= NULL
;
8199 rflags
= delegation
= recall
= eof
= rlen
= retlen
= 0;
8202 slen
= sizeof(sbuf
);
8203 nfsm_chain_cleanup(&nmreq
);
8204 nfsm_chain_cleanup(&nmrep
);
8206 vnode_put(NFSTOV(anp
));
8209 hadattrdir
= (adnp
!= NULL
);
8211 nfs_open_owner_clear_busy(noop
);
8218 nfs_open_owner_clear_busy(noop
);
8221 nfs_open_owner_rele(noop
);
8224 if (!error
&& prefetch
&& nmrep
.nmc_mhead
) {
8225 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_RESTOREFH
);
8226 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_NVERIFY
);
8227 nfsm_chain_op_check(error
, &nmrep
, NFS_OP_READ
);
8228 nfsm_chain_get_32(error
, &nmrep
, eof
);
8229 nfsm_chain_get_32(error
, &nmrep
, retlen
);
8230 if (!error
&& anp
) {
8232 * There can be one problem with doing the prefetch.
8233 * Because we don't have the node before we start the RPC, we
8234 * can't have the buffer busy while the READ is performed.
8235 * So there is a chance that other I/O occured on the same
8236 * range of data while we were performing this RPC. If that
8237 * happens, then it's possible the data we have in the READ
8238 * response is no longer up to date.
8239 * Once we have the node and the buffer, we need to make sure
8240 * that there's no chance we could be putting stale data in
8242 * So, we check if the range read is dirty or if any I/O may
8243 * have occured on it while we were performing our RPC.
8245 struct nfsbuf
*bp
= NULL
;
8249 retlen
= MIN(retlen
, rlen
);
8251 /* check if node needs size update or invalidation */
8252 if (ISSET(anp
->n_flag
, NUPDATESIZE
)) {
8253 nfs_data_update_size(anp
, 0);
8255 if (!(error
= nfs_node_lock(anp
))) {
8256 if (anp
->n_flag
& NNEEDINVALIDATE
) {
8257 anp
->n_flag
&= ~NNEEDINVALIDATE
;
8258 nfs_node_unlock(anp
);
8259 error
= nfs_vinvalbuf(NFSTOV(anp
), V_SAVE
| V_IGNORE_WRITEERR
, ctx
, 1);
8260 if (!error
) { /* lets play it safe and just drop the data */
8264 nfs_node_unlock(anp
);
8268 /* calculate page mask for the range of data read */
8269 lastpg
= (trunc_page_32(retlen
) - 1) / PAGE_SIZE
;
8270 pagemask
= ((1 << (lastpg
+ 1)) - 1);
8273 error
= nfs_buf_get(anp
, 0, nmp
->nm_biosize
, thd
, NBLK_READ
| NBLK_NOWAIT
, &bp
);
8275 /* don't save the data if dirty or potential I/O conflict */
8276 if (!error
&& bp
&& !bp
->nb_dirtyoff
&& !(bp
->nb_dirty
& pagemask
) &&
8277 timevalcmp(&anp
->n_lastio
, &now
, <)) {
8278 OSAddAtomic64(1, &nfsstats
.read_bios
);
8279 CLR(bp
->nb_flags
, (NB_DONE
| NB_ASYNC
));
8280 SET(bp
->nb_flags
, NB_READ
);
8282 nfsm_chain_get_opaque(error
, &nmrep
, retlen
, bp
->nb_data
);
8284 bp
->nb_error
= error
;
8285 SET(bp
->nb_flags
, NB_ERROR
);
8288 bp
->nb_endio
= rlen
;
8289 if ((retlen
> 0) && (bp
->nb_endio
< (int)retlen
)) {
8290 bp
->nb_endio
= retlen
;
8292 if (eof
|| (retlen
== 0)) {
8293 /* zero out the remaining data (up to EOF) */
8294 off_t rpcrem
, eofrem
, rem
;
8295 rpcrem
= (rlen
- retlen
);
8296 eofrem
= anp
->n_size
- (NBOFF(bp
) + retlen
);
8297 rem
= (rpcrem
< eofrem
) ? rpcrem
: eofrem
;
8299 bzero(bp
->nb_data
+ retlen
, rem
);
8301 } else if ((retlen
< rlen
) && !ISSET(bp
->nb_flags
, NB_ERROR
)) {
8302 /* ugh... short read ... just invalidate for now... */
8303 SET(bp
->nb_flags
, NB_INVAL
);
8306 nfs_buf_read_finish(bp
);
8307 microuptime(&anp
->n_lastio
);
8310 nfs_buf_release(bp
, 1);
8313 error
= 0; /* ignore any transient error in processing the prefetch */
8315 if (adnp
&& !adbusyerror
) {
8316 nfs_node_clear_busy(adnp
);
8317 adbusyerror
= ENOENT
;
8320 nfs_node_clear_busy(np
);
8324 vnode_put(NFSTOV(adnp
));
8326 if (error
&& *anpp
) {
8327 vnode_put(NFSTOV(*anpp
));
8330 nfsm_chain_cleanup(&nmreq
);
8331 nfsm_chain_cleanup(&nmrep
);
8336 * Remove a named attribute.
8339 nfs4_named_attr_remove(nfsnode_t np
, nfsnode_t anp
, const char *name
, vfs_context_t ctx
)
8341 nfsnode_t adnp
= NULL
;
8342 struct nfsmount
*nmp
;
8343 struct componentname cn
;
8344 struct vnop_remove_args vra
;
8345 int error
, putanp
= 0;
8348 if (nfs_mount_gone(nmp
)) {
8352 bzero(&cn
, sizeof(cn
));
8353 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(name
, const, char *);
8354 cn
.cn_namelen
= strlen(name
);
8355 cn
.cn_nameiop
= DELETE
;
8359 error
= nfs4_named_attr_get(np
, &cn
, NFS_OPEN_SHARE_ACCESS_NONE
,
8360 0, ctx
, &anp
, NULL
);
8361 if ((!error
&& !anp
) || (error
== ENOATTR
)) {
8366 vnode_put(NFSTOV(anp
));
8374 if ((error
= nfs_node_set_busy(np
, vfs_context_thread(ctx
)))) {
8377 adnp
= nfs4_named_attr_dir_get(np
, 1, ctx
);
8378 nfs_node_clear_busy(np
);
8384 vra
.a_desc
= &vnop_remove_desc
;
8385 vra
.a_dvp
= NFSTOV(adnp
);
8386 vra
.a_vp
= NFSTOV(anp
);
8389 vra
.a_context
= ctx
;
8390 error
= nfs_vnop_remove(&vra
);
8393 vnode_put(NFSTOV(adnp
));
8396 vnode_put(NFSTOV(anp
));
8403 struct vnop_getxattr_args
/* {
8404 * struct vnodeop_desc *a_desc;
8406 * const char * a_name;
8410 * vfs_context_t a_context;
8413 vfs_context_t ctx
= ap
->a_context
;
8414 struct nfsmount
*nmp
;
8415 struct nfs_vattr nvattr
;
8416 struct componentname cn
;
8418 int error
= 0, isrsrcfork
;
8420 nmp
= VTONMP(ap
->a_vp
);
8421 if (nfs_mount_gone(nmp
)) {
8425 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8428 error
= nfs_getattr(VTONFS(ap
->a_vp
), &nvattr
, ctx
, NGA_CACHED
);
8432 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
8433 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
)) {
8437 bzero(&cn
, sizeof(cn
));
8438 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(ap
->a_name
, const, char *);
8439 cn
.cn_namelen
= strlen(ap
->a_name
);
8440 cn
.cn_nameiop
= LOOKUP
;
8441 cn
.cn_flags
= MAKEENTRY
;
8443 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
8444 isrsrcfork
= (bcmp(ap
->a_name
, XATTR_RESOURCEFORK_NAME
, sizeof(XATTR_RESOURCEFORK_NAME
)) == 0);
8446 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_NONE
,
8447 !isrsrcfork
? NFS_GET_NAMED_ATTR_PREFETCH
: 0, ctx
, &anp
, NULL
);
8448 if ((!error
&& !anp
) || (error
== ENOENT
)) {
8453 error
= nfs_bioread(anp
, ap
->a_uio
, 0, ctx
);
8455 *ap
->a_size
= anp
->n_size
;
8459 vnode_put(NFSTOV(anp
));
8466 struct vnop_setxattr_args
/* {
8467 * struct vnodeop_desc *a_desc;
8469 * const char * a_name;
8472 * vfs_context_t a_context;
8475 vfs_context_t ctx
= ap
->a_context
;
8476 int options
= ap
->a_options
;
8477 uio_t uio
= ap
->a_uio
;
8478 const char *name
= ap
->a_name
;
8479 struct nfsmount
*nmp
;
8480 struct componentname cn
;
8481 nfsnode_t anp
= NULL
;
8482 int error
= 0, closeerror
= 0, flags
, isrsrcfork
, isfinderinfo
, empty
= 0, i
;
8483 #define FINDERINFOSIZE 32
8484 uint8_t finfo
[FINDERINFOSIZE
];
8486 struct nfs_open_file
*nofp
= NULL
;
8487 char uio_buf
[UIO_SIZEOF(1)];
8489 struct vnop_write_args vwa
;
8491 nmp
= VTONMP(ap
->a_vp
);
8492 if (nfs_mount_gone(nmp
)) {
8496 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8500 if ((options
& XATTR_CREATE
) && (options
& XATTR_REPLACE
)) {
8504 /* XXX limitation based on need to back up uio on short write */
8505 if (uio_iovcnt(uio
) > 1) {
8506 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
8510 bzero(&cn
, sizeof(cn
));
8511 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(name
, const, char *);
8512 cn
.cn_namelen
= strlen(name
);
8513 cn
.cn_nameiop
= CREATE
;
8514 cn
.cn_flags
= MAKEENTRY
;
8516 isfinderinfo
= (bcmp(name
, XATTR_FINDERINFO_NAME
, sizeof(XATTR_FINDERINFO_NAME
)) == 0);
8517 isrsrcfork
= isfinderinfo
? 0 : (bcmp(name
, XATTR_RESOURCEFORK_NAME
, sizeof(XATTR_RESOURCEFORK_NAME
)) == 0);
8519 uio_setoffset(uio
, 0);
8522 if (uio_resid(uio
) != sizeof(finfo
)) {
8525 error
= uiomove((char*)&finfo
, sizeof(finfo
), uio
);
8529 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
8531 for (i
= 0, finfop
= (uint32_t*)&finfo
; i
< (int)(sizeof(finfo
) / sizeof(uint32_t)); i
++) {
8537 if (empty
&& !(options
& (XATTR_CREATE
| XATTR_REPLACE
))) {
8538 error
= nfs4_named_attr_remove(VTONFS(ap
->a_vp
), anp
, name
, ctx
);
8539 if (error
== ENOENT
) {
8544 /* first, let's see if we get a create/replace error */
8548 * create/open the xattr
8550 * We need to make sure not to create it if XATTR_REPLACE.
8551 * For all xattrs except the resource fork, we also want to
8552 * truncate the xattr to remove any current data. We'll do
8553 * that by setting the size to 0 on create/open.
8556 if (!(options
& XATTR_REPLACE
)) {
8557 flags
|= NFS_GET_NAMED_ATTR_CREATE
;
8559 if (options
& XATTR_CREATE
) {
8560 flags
|= NFS_GET_NAMED_ATTR_CREATE_GUARDED
;
8563 flags
|= NFS_GET_NAMED_ATTR_TRUNCATE
;
8566 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_BOTH
,
8567 flags
, ctx
, &anp
, &nofp
);
8568 if (!error
&& !anp
) {
8574 /* grab the open state from the get/create/open */
8575 if (nofp
&& !(error
= nfs_open_file_set_busy(nofp
, NULL
))) {
8576 nofp
->nof_flags
&= ~NFS_OPEN_FILE_CREATE
;
8577 nofp
->nof_creator
= NULL
;
8578 nfs_open_file_clear_busy(nofp
);
8581 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
8582 if (isfinderinfo
&& empty
) {
8587 * Write the data out and flush.
8589 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
8591 vwa
.a_desc
= &vnop_write_desc
;
8592 vwa
.a_vp
= NFSTOV(anp
);
8595 vwa
.a_context
= ctx
;
8597 auio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_WRITE
, &uio_buf
, sizeof(uio_buf
));
8598 uio_addiov(auio
, (uintptr_t)&finfo
, sizeof(finfo
));
8600 } else if (uio_resid(uio
) > 0) {
8604 error
= nfs_vnop_write(&vwa
);
8606 error
= nfs_flush(anp
, MNT_WAIT
, vfs_context_thread(ctx
), 0);
8610 /* Close the xattr. */
8612 int busyerror
= nfs_open_file_set_busy(nofp
, NULL
);
8613 closeerror
= nfs_close(anp
, nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
, ctx
);
8615 nfs_open_file_clear_busy(nofp
);
8618 if (!error
&& isfinderinfo
&& empty
) { /* Setting an empty FinderInfo really means remove it */
8619 error
= nfs4_named_attr_remove(VTONFS(ap
->a_vp
), anp
, name
, ctx
);
8620 if (error
== ENOENT
) {
8629 vnode_put(NFSTOV(anp
));
8631 if (error
== ENOENT
) {
8638 nfs4_vnop_removexattr(
8639 struct vnop_removexattr_args
/* {
8640 * struct vnodeop_desc *a_desc;
8642 * const char * a_name;
8644 * vfs_context_t a_context;
8647 struct nfsmount
*nmp
= VTONMP(ap
->a_vp
);
8650 if (nfs_mount_gone(nmp
)) {
8653 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8657 error
= nfs4_named_attr_remove(VTONFS(ap
->a_vp
), NULL
, ap
->a_name
, ap
->a_context
);
8658 if (error
== ENOENT
) {
8665 nfs4_vnop_listxattr(
8666 struct vnop_listxattr_args
/* {
8667 * struct vnodeop_desc *a_desc;
8672 * vfs_context_t a_context;
8675 vfs_context_t ctx
= ap
->a_context
;
8676 nfsnode_t np
= VTONFS(ap
->a_vp
);
8677 uio_t uio
= ap
->a_uio
;
8678 nfsnode_t adnp
= NULL
;
8679 struct nfsmount
*nmp
;
8681 struct nfs_vattr nvattr
;
8682 uint64_t cookie
, nextcookie
, lbn
= 0;
8683 struct nfsbuf
*bp
= NULL
;
8684 struct nfs_dir_buf_header
*ndbhp
;
8685 struct direntry
*dp
;
8687 nmp
= VTONMP(ap
->a_vp
);
8688 if (nfs_mount_gone(nmp
)) {
8692 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8696 error
= nfs_getattr(np
, &nvattr
, ctx
, NGA_CACHED
);
8700 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
8701 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
)) {
8705 if ((error
= nfs_node_set_busy(np
, vfs_context_thread(ctx
)))) {
8708 adnp
= nfs4_named_attr_dir_get(np
, 1, ctx
);
8709 nfs_node_clear_busy(np
);
8714 if ((error
= nfs_node_lock(adnp
))) {
8718 if (adnp
->n_flag
& NNEEDINVALIDATE
) {
8719 adnp
->n_flag
&= ~NNEEDINVALIDATE
;
8721 nfs_node_unlock(adnp
);
8722 error
= nfs_vinvalbuf(NFSTOV(adnp
), 0, ctx
, 1);
8724 error
= nfs_node_lock(adnp
);
8732 * check for need to invalidate when (re)starting at beginning
8734 if (adnp
->n_flag
& NMODIFIED
) {
8736 nfs_node_unlock(adnp
);
8737 if ((error
= nfs_vinvalbuf(NFSTOV(adnp
), 0, ctx
, 1))) {
8741 nfs_node_unlock(adnp
);
8743 /* nfs_getattr() will check changed and purge caches */
8744 if ((error
= nfs_getattr(adnp
, &nvattr
, ctx
, NGA_UNCACHED
))) {
8748 if (uio
&& (uio_resid(uio
) == 0)) {
8753 nextcookie
= lbn
= 0;
8755 while (!error
&& !done
) {
8756 OSAddAtomic64(1, &nfsstats
.biocache_readdirs
);
8757 cookie
= nextcookie
;
8759 error
= nfs_buf_get(adnp
, lbn
, NFS_DIRBLKSIZ
, vfs_context_thread(ctx
), NBLK_READ
, &bp
);
8763 ndbhp
= (struct nfs_dir_buf_header
*)bp
->nb_data
;
8764 if (!ISSET(bp
->nb_flags
, NB_CACHE
) || !ISSET(ndbhp
->ndbh_flags
, NDB_FULL
)) {
8765 if (!ISSET(bp
->nb_flags
, NB_CACHE
)) { /* initialize the buffer */
8766 ndbhp
->ndbh_flags
= 0;
8767 ndbhp
->ndbh_count
= 0;
8768 ndbhp
->ndbh_entry_end
= sizeof(*ndbhp
);
8769 ndbhp
->ndbh_ncgen
= adnp
->n_ncgen
;
8771 error
= nfs_buf_readdir(bp
, ctx
);
8772 if (error
== NFSERR_DIRBUFDROPPED
) {
8776 nfs_buf_release(bp
, 1);
8778 if (error
&& (error
!= ENXIO
) && (error
!= ETIMEDOUT
) && (error
!= EINTR
) && (error
!= ERESTART
)) {
8779 if (!nfs_node_lock(adnp
)) {
8781 nfs_node_unlock(adnp
);
8783 nfs_vinvalbuf(NFSTOV(adnp
), 0, ctx
, 1);
8784 if (error
== NFSERR_BAD_COOKIE
) {
8793 /* go through all the entries copying/counting */
8794 dp
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
);
8795 for (i
= 0; i
< ndbhp
->ndbh_count
; i
++) {
8796 if (!xattr_protected(dp
->d_name
)) {
8798 *ap
->a_size
+= dp
->d_namlen
+ 1;
8799 } else if (uio_resid(uio
) < (dp
->d_namlen
+ 1)) {
8802 error
= uiomove(dp
->d_name
, dp
->d_namlen
+ 1, uio
);
8803 if (error
&& (error
!= EFAULT
)) {
8808 nextcookie
= dp
->d_seekoff
;
8809 dp
= NFS_DIRENTRY_NEXT(dp
);
8812 if (i
== ndbhp
->ndbh_count
) {
8813 /* hit end of buffer, move to next buffer */
8815 /* if we also hit EOF, we're done */
8816 if (ISSET(ndbhp
->ndbh_flags
, NDB_EOF
)) {
8820 if (!error
&& !done
&& (nextcookie
== cookie
)) {
8821 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie
, i
, ndbhp
->ndbh_count
);
8824 nfs_buf_release(bp
, 1);
8828 vnode_put(NFSTOV(adnp
));
8835 nfs4_vnop_getnamedstream(
8836 struct vnop_getnamedstream_args
/* {
8837 * struct vnodeop_desc *a_desc;
8840 * const char *a_name;
8841 * enum nsoperation a_operation;
8843 * vfs_context_t a_context;
8846 vfs_context_t ctx
= ap
->a_context
;
8847 struct nfsmount
*nmp
;
8848 struct nfs_vattr nvattr
;
8849 struct componentname cn
;
8853 nmp
= VTONMP(ap
->a_vp
);
8854 if (nfs_mount_gone(nmp
)) {
8858 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8861 error
= nfs_getattr(VTONFS(ap
->a_vp
), &nvattr
, ctx
, NGA_CACHED
);
8865 if (NFS_BITMAP_ISSET(nvattr
.nva_bitmap
, NFS_FATTR_NAMED_ATTR
) &&
8866 !(nvattr
.nva_flags
& NFS_FFLAG_HAS_NAMED_ATTRS
)) {
8870 bzero(&cn
, sizeof(cn
));
8871 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(ap
->a_name
, const, char *);
8872 cn
.cn_namelen
= strlen(ap
->a_name
);
8873 cn
.cn_nameiop
= LOOKUP
;
8874 cn
.cn_flags
= MAKEENTRY
;
8876 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_NONE
,
8877 0, ctx
, &anp
, NULL
);
8878 if ((!error
&& !anp
) || (error
== ENOENT
)) {
8881 if (!error
&& anp
) {
8882 *ap
->a_svpp
= NFSTOV(anp
);
8884 vnode_put(NFSTOV(anp
));
8890 nfs4_vnop_makenamedstream(
8891 struct vnop_makenamedstream_args
/* {
8892 * struct vnodeop_desc *a_desc;
8895 * const char *a_name;
8897 * vfs_context_t a_context;
8900 vfs_context_t ctx
= ap
->a_context
;
8901 struct nfsmount
*nmp
;
8902 struct componentname cn
;
8906 nmp
= VTONMP(ap
->a_vp
);
8907 if (nfs_mount_gone(nmp
)) {
8911 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8915 bzero(&cn
, sizeof(cn
));
8916 cn
.cn_nameptr
= __CAST_AWAY_QUALIFIER(ap
->a_name
, const, char *);
8917 cn
.cn_namelen
= strlen(ap
->a_name
);
8918 cn
.cn_nameiop
= CREATE
;
8919 cn
.cn_flags
= MAKEENTRY
;
8921 error
= nfs4_named_attr_get(VTONFS(ap
->a_vp
), &cn
, NFS_OPEN_SHARE_ACCESS_BOTH
,
8922 NFS_GET_NAMED_ATTR_CREATE
, ctx
, &anp
, NULL
);
8923 if ((!error
&& !anp
) || (error
== ENOENT
)) {
8926 if (!error
&& anp
) {
8927 *ap
->a_svpp
= NFSTOV(anp
);
8929 vnode_put(NFSTOV(anp
));
8935 nfs4_vnop_removenamedstream(
8936 struct vnop_removenamedstream_args
/* {
8937 * struct vnodeop_desc *a_desc;
8940 * const char *a_name;
8942 * vfs_context_t a_context;
8945 struct nfsmount
*nmp
= VTONMP(ap
->a_vp
);
8946 nfsnode_t np
= ap
->a_vp
? VTONFS(ap
->a_vp
) : NULL
;
8947 nfsnode_t anp
= ap
->a_svp
? VTONFS(ap
->a_svp
) : NULL
;
8949 if (nfs_mount_gone(nmp
)) {
8954 * Given that a_svp is a named stream, checking for
8955 * named attribute support is kinda pointless.
8957 if (!(nmp
->nm_fsattr
.nfsa_flags
& NFS_FSFLAG_NAMED_ATTR
)) {
8961 return nfs4_named_attr_remove(np
, anp
, ap
->a_name
, ap
->a_context
);
8965 #endif /* CONFIG_NFS4 */
8967 #endif /* CONFIG_NFS_CLIENT */