]> git.saurik.com Git - apple/xnu.git/blame - bsd/nfs/nfs4_vnops.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / nfs / nfs4_vnops.c
CommitLineData
2d21ac55 1/*
f427ee49 2 * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
2d21ac55
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
ea3f0419
A
29#include <nfs/nfs_conf.h>
30#if CONFIG_NFS_CLIENT
31
2d21ac55
A
32/*
33 * vnode op calls for NFS version 4
34 */
35#include <sys/param.h>
36#include <sys/kernel.h>
37#include <sys/systm.h>
38#include <sys/resourcevar.h>
39#include <sys/proc_internal.h>
40#include <sys/kauth.h>
41#include <sys/mount_internal.h>
42#include <sys/malloc.h>
43#include <sys/kpi_mbuf.h>
44#include <sys/conf.h>
45#include <sys/vnode_internal.h>
46#include <sys/dirent.h>
47#include <sys/fcntl.h>
48#include <sys/lockf.h>
49#include <sys/ubc_internal.h>
50#include <sys/attr.h>
51#include <sys/signalvar.h>
6d2010ae
A
52#include <sys/uio_internal.h>
53#include <sys/xattr.h>
54#include <sys/paths.h>
2d21ac55
A
55
56#include <vfs/vfs_support.h>
57
58#include <sys/vm.h>
59
60#include <sys/time.h>
61#include <kern/clock.h>
62#include <libkern/OSAtomic.h>
63
64#include <miscfs/fifofs/fifo.h>
65#include <miscfs/specfs/specdev.h>
66
67#include <nfs/rpcv2.h>
68#include <nfs/nfsproto.h>
69#include <nfs/nfs.h>
70#include <nfs/nfsnode.h>
71#include <nfs/nfs_gss.h>
72#include <nfs/nfsmount.h>
73#include <nfs/nfs_lock.h>
74#include <nfs/xdr_subs.h>
75#include <nfs/nfsm_subs.h>
76
77#include <net/if.h>
78#include <netinet/in.h>
79#include <netinet/in_var.h>
80#include <vm/vm_kern.h>
81
82#include <kern/task.h>
83#include <kern/sched_prim.h>
84
cb323159 85#if CONFIG_NFS4
2d21ac55 86int
fe8ab488 87nfs4_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx)
2d21ac55 88{
b0d623f7 89 int error = 0, lockerror = ENOENT, status, numops, slot;
2d21ac55
A
90 u_int64_t xid;
91 struct nfsm_chain nmreq, nmrep;
92 struct timeval now;
6d2010ae 93 uint32_t access_result = 0, supported = 0, missing;
2d21ac55
A
94 struct nfsmount *nmp = NFSTONMP(np);
95 int nfsvers = nmp->nm_vers;
96 uid_t uid;
6d2010ae 97 struct nfsreq_secinfo_args si;
2d21ac55 98
0a7de745
A
99 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
100 return 0;
101 }
6d2010ae
A
102
103 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
104 nfsm_chain_null(&nmreq);
105 nfsm_chain_null(&nmrep);
106
b0d623f7
A
107 // PUTFH, ACCESS, GETATTR
108 numops = 3;
2d21ac55 109 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED);
3e170ce0 110 nfsm_chain_add_compound_header(error, &nmreq, "access", nmp->nm_minor_vers, numops);
2d21ac55
A
111 numops--;
112 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
113 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
114 numops--;
115 nfsm_chain_add_32(error, &nmreq, NFS_OP_ACCESS);
6d2010ae 116 nfsm_chain_add_32(error, &nmreq, *access);
2d21ac55
A
117 numops--;
118 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 119 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
120 nfsm_chain_build_done(error, &nmreq);
121 nfsm_assert(error, (numops == 0), EPROTO);
122 nfsmout_if(error);
fe8ab488 123 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745
A
124 vfs_context_thread(ctx), vfs_context_ucred(ctx),
125 &si, rpcflags, &nmrep, &xid, &status);
2d21ac55 126
0a7de745 127 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 128 error = lockerror;
0a7de745 129 }
2d21ac55
A
130 nfsm_chain_skip_tag(error, &nmrep);
131 nfsm_chain_get_32(error, &nmrep, numops);
132 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
133 nfsm_chain_op_check(error, &nmrep, NFS_OP_ACCESS);
134 nfsm_chain_get_32(error, &nmrep, supported);
6d2010ae 135 nfsm_chain_get_32(error, &nmrep, access_result);
2d21ac55 136 nfsmout_if(error);
6d2010ae 137 if ((missing = (*access & ~supported))) {
2d21ac55
A
138 /* missing support for something(s) we wanted */
139 if (missing & NFS_ACCESS_DELETE) {
140 /*
141 * If the server doesn't report DELETE (possible
142 * on UNIX systems), we'll assume that it is OK
143 * and just let any subsequent delete action fail
144 * if it really isn't deletable.
145 */
6d2010ae 146 access_result |= NFS_ACCESS_DELETE;
2d21ac55
A
147 }
148 }
6d2010ae
A
149 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
150 if (nfs_access_dotzfs) {
151 vnode_t dvp = NULLVP;
0a7de745
A
152 if (np->n_flag & NISDOTZFSCHILD) { /* may be able to create/delete snapshot dirs */
153 access_result |= (NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE);
154 } else if (((dvp = vnode_getparent(NFSTOV(np))) != NULLVP) && (VTONFS(dvp)->n_flag & NISDOTZFSCHILD)) {
6d2010ae 155 access_result |= NFS_ACCESS_DELETE; /* may be able to delete snapshot dirs */
0a7de745
A
156 }
157 if (dvp != NULLVP) {
6d2010ae 158 vnode_put(dvp);
0a7de745 159 }
6d2010ae 160 }
b0d623f7 161 /* Some servers report DELETE support but erroneously give a denied answer. */
0a7de745 162 if (nfs_access_delete && (*access & NFS_ACCESS_DELETE) && !(access_result & NFS_ACCESS_DELETE)) {
6d2010ae 163 access_result |= NFS_ACCESS_DELETE;
0a7de745 164 }
2d21ac55 165 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 166 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
2d21ac55
A
167 nfsmout_if(error);
168
813fb2f6
A
169 if (nfs_mount_gone(nmp)) {
170 error = ENXIO;
171 }
172 nfsmout_if(error);
173
174 if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) {
175 uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx));
176 } else {
177 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
178 }
6d2010ae
A
179 slot = nfs_node_access_slot(np, uid, 1);
180 np->n_accessuid[slot] = uid;
2d21ac55 181 microuptime(&now);
6d2010ae
A
182 np->n_accessstamp[slot] = now.tv_sec;
183 np->n_access[slot] = access_result;
2d21ac55 184
6d2010ae
A
185 /* pass back the access returned with this request */
186 *access = np->n_access[slot];
2d21ac55 187nfsmout:
0a7de745 188 if (!lockerror) {
b0d623f7 189 nfs_node_unlock(np);
0a7de745 190 }
2d21ac55
A
191 nfsm_chain_cleanup(&nmreq);
192 nfsm_chain_cleanup(&nmrep);
0a7de745 193 return error;
2d21ac55
A
194}
195
196int
197nfs4_getattr_rpc(
198 nfsnode_t np,
199 mount_t mp,
200 u_char *fhp,
201 size_t fhsize,
6d2010ae 202 int flags,
2d21ac55
A
203 vfs_context_t ctx,
204 struct nfs_vattr *nvap,
205 u_int64_t *xidp)
206{
207 struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
6d2010ae
A
208 int error = 0, status, nfsvers, numops, rpcflags = 0, acls;
209 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
2d21ac55 210 struct nfsm_chain nmreq, nmrep;
6d2010ae 211 struct nfsreq_secinfo_args si;
2d21ac55 212
0a7de745
A
213 if (nfs_mount_gone(nmp)) {
214 return ENXIO;
215 }
2d21ac55 216 nfsvers = nmp->nm_vers;
6d2010ae
A
217 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
218
219 if (np && (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)) {
220 nfs4_default_attrs_for_referral_trigger(VTONFS(np->n_parent), NULL, 0, nvap, NULL);
0a7de745 221 return 0;
6d2010ae
A
222 }
223
0a7de745 224 if (flags & NGA_MONITOR) { /* vnode monitor requests should be soft */
6d2010ae 225 rpcflags = R_RECOVER;
0a7de745 226 }
2d21ac55 227
0a7de745 228 if (flags & NGA_SOFT) { /* Return ETIMEDOUT if server not responding */
fe8ab488 229 rpcflags |= R_SOFT;
0a7de745 230 }
fe8ab488 231
6d2010ae 232 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
233 nfsm_chain_null(&nmreq);
234 nfsm_chain_null(&nmrep);
235
b0d623f7
A
236 // PUTFH, GETATTR
237 numops = 2;
2d21ac55 238 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
3e170ce0 239 nfsm_chain_add_compound_header(error, &nmreq, "getattr", nmp->nm_minor_vers, numops);
2d21ac55
A
240 numops--;
241 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
242 nfsm_chain_add_fh(error, &nmreq, nfsvers, fhp, fhsize);
243 numops--;
244 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 245 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
0a7de745 246 if ((flags & NGA_ACL) && acls) {
6d2010ae 247 NFS_BITMAP_SET(bitmap, NFS_FATTR_ACL);
0a7de745 248 }
6d2010ae 249 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
2d21ac55
A
250 nfsm_chain_build_done(error, &nmreq);
251 nfsm_assert(error, (numops == 0), EPROTO);
252 nfsmout_if(error);
0a7de745
A
253 error = nfs_request2(np, mp, &nmreq, NFSPROC4_COMPOUND,
254 vfs_context_thread(ctx), vfs_context_ucred(ctx),
255 NULL, rpcflags, &nmrep, xidp, &status);
2d21ac55
A
256
257 nfsm_chain_skip_tag(error, &nmrep);
258 nfsm_chain_get_32(error, &nmrep, numops);
259 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
260 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
261 nfsmout_if(error);
6d2010ae
A
262 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
263 nfsmout_if(error);
264 if ((flags & NGA_ACL) && acls && !NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) {
265 /* we asked for the ACL but didn't get one... assume there isn't one */
266 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_ACL);
267 nvap->nva_acl = NULL;
268 }
2d21ac55
A
269nfsmout:
270 nfsm_chain_cleanup(&nmreq);
271 nfsm_chain_cleanup(&nmrep);
0a7de745 272 return error;
2d21ac55
A
273}
274
275int
f427ee49 276nfs4_readlink_rpc(nfsnode_t np, char *buf, size_t *buflenp, vfs_context_t ctx)
2d21ac55
A
277{
278 struct nfsmount *nmp;
279 int error = 0, lockerror = ENOENT, status, numops;
f427ee49 280 size_t len = 0;
2d21ac55
A
281 u_int64_t xid;
282 struct nfsm_chain nmreq, nmrep;
6d2010ae 283 struct nfsreq_secinfo_args si;
2d21ac55
A
284
285 nmp = NFSTONMP(np);
0a7de745
A
286 if (nfs_mount_gone(nmp)) {
287 return ENXIO;
288 }
289 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
290 return EINVAL;
291 }
6d2010ae 292 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
293 nfsm_chain_null(&nmreq);
294 nfsm_chain_null(&nmrep);
295
b0d623f7
A
296 // PUTFH, GETATTR, READLINK
297 numops = 3;
2d21ac55 298 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
3e170ce0 299 nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
2d21ac55
A
300 numops--;
301 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
302 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
303 numops--;
304 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 305 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
306 numops--;
307 nfsm_chain_add_32(error, &nmreq, NFS_OP_READLINK);
308 nfsm_chain_build_done(error, &nmreq);
309 nfsm_assert(error, (numops == 0), EPROTO);
310 nfsmout_if(error);
6d2010ae 311 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 312
0a7de745 313 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 314 error = lockerror;
0a7de745 315 }
2d21ac55
A
316 nfsm_chain_skip_tag(error, &nmrep);
317 nfsm_chain_get_32(error, &nmrep, numops);
318 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
319 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 320 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
2d21ac55
A
321 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
322 nfsm_chain_get_32(error, &nmrep, len);
323 nfsmout_if(error);
324 if (len >= *buflenp) {
0a7de745 325 if (np->n_size && (np->n_size < *buflenp)) {
2d21ac55 326 len = np->n_size;
0a7de745 327 } else {
2d21ac55 328 len = *buflenp - 1;
0a7de745 329 }
2d21ac55
A
330 }
331 nfsm_chain_get_opaque(error, &nmrep, len, buf);
0a7de745 332 if (!error) {
2d21ac55 333 *buflenp = len;
0a7de745 334 }
2d21ac55 335nfsmout:
0a7de745 336 if (!lockerror) {
b0d623f7 337 nfs_node_unlock(np);
0a7de745 338 }
2d21ac55
A
339 nfsm_chain_cleanup(&nmreq);
340 nfsm_chain_cleanup(&nmrep);
0a7de745 341 return error;
2d21ac55
A
342}
343
344int
345nfs4_read_rpc_async(
346 nfsnode_t np,
347 off_t offset,
348 size_t len,
349 thread_t thd,
350 kauth_cred_t cred,
351 struct nfsreq_cbinfo *cb,
352 struct nfsreq **reqp)
353{
354 struct nfsmount *nmp;
355 int error = 0, nfsvers, numops;
b0d623f7 356 nfs_stateid stateid;
2d21ac55 357 struct nfsm_chain nmreq;
6d2010ae 358 struct nfsreq_secinfo_args si;
2d21ac55
A
359
360 nmp = NFSTONMP(np);
0a7de745
A
361 if (nfs_mount_gone(nmp)) {
362 return ENXIO;
363 }
2d21ac55 364 nfsvers = nmp->nm_vers;
0a7de745
A
365 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
366 return EINVAL;
367 }
2d21ac55 368
6d2010ae 369 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
370 nfsm_chain_null(&nmreq);
371
f427ee49
A
372 // PUTFH, READ
373 numops = 2;
2d21ac55 374 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
3e170ce0 375 nfsm_chain_add_compound_header(error, &nmreq, "read", nmp->nm_minor_vers, numops);
2d21ac55
A
376 numops--;
377 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
378 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
379 numops--;
380 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
b0d623f7
A
381 nfs_get_stateid(np, thd, cred, &stateid);
382 nfsm_chain_add_stateid(error, &nmreq, &stateid);
2d21ac55
A
383 nfsm_chain_add_64(error, &nmreq, offset);
384 nfsm_chain_add_32(error, &nmreq, len);
2d21ac55
A
385 nfsm_chain_build_done(error, &nmreq);
386 nfsm_assert(error, (numops == 0), EPROTO);
387 nfsmout_if(error);
6d2010ae 388 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
2d21ac55
A
389nfsmout:
390 nfsm_chain_cleanup(&nmreq);
0a7de745 391 return error;
2d21ac55
A
392}
393
394int
395nfs4_read_rpc_async_finish(
396 nfsnode_t np,
397 struct nfsreq *req,
b0d623f7 398 uio_t uio,
2d21ac55
A
399 size_t *lenp,
400 int *eofp)
401{
402 struct nfsmount *nmp;
403 int error = 0, lockerror, nfsvers, numops, status, eof = 0;
404 size_t retlen = 0;
405 u_int64_t xid;
406 struct nfsm_chain nmrep;
407
408 nmp = NFSTONMP(np);
fe8ab488 409 if (nfs_mount_gone(nmp)) {
2d21ac55 410 nfs_request_async_cancel(req);
0a7de745 411 return ENXIO;
2d21ac55
A
412 }
413 nfsvers = nmp->nm_vers;
414
415 nfsm_chain_null(&nmrep);
416
417 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
0a7de745
A
418 if (error == EINPROGRESS) { /* async request restarted */
419 return error;
420 }
2d21ac55 421
0a7de745 422 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 423 error = lockerror;
0a7de745 424 }
2d21ac55
A
425 nfsm_chain_skip_tag(error, &nmrep);
426 nfsm_chain_get_32(error, &nmrep, numops);
427 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
428 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
429 nfsm_chain_get_32(error, &nmrep, eof);
430 nfsm_chain_get_32(error, &nmrep, retlen);
431 if (!error) {
432 *lenp = MIN(retlen, *lenp);
b0d623f7 433 error = nfsm_chain_get_uio(&nmrep, *lenp, uio);
2d21ac55 434 }
0a7de745 435 if (!lockerror) {
b0d623f7 436 nfs_node_unlock(np);
0a7de745 437 }
2d21ac55 438 if (eofp) {
0a7de745 439 if (!eof && !retlen) {
2d21ac55 440 eof = 1;
0a7de745 441 }
2d21ac55
A
442 *eofp = eof;
443 }
444 nfsm_chain_cleanup(&nmrep);
0a7de745 445 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
6d2010ae 446 microuptime(&np->n_lastio);
0a7de745
A
447 }
448 return error;
2d21ac55
A
449}
450
451int
452nfs4_write_rpc_async(
453 nfsnode_t np,
b0d623f7 454 uio_t uio,
2d21ac55
A
455 size_t len,
456 thread_t thd,
457 kauth_cred_t cred,
458 int iomode,
459 struct nfsreq_cbinfo *cb,
460 struct nfsreq **reqp)
461{
462 struct nfsmount *nmp;
6d2010ae 463 mount_t mp;
2d21ac55 464 int error = 0, nfsvers, numops;
b0d623f7 465 nfs_stateid stateid;
2d21ac55 466 struct nfsm_chain nmreq;
6d2010ae 467 struct nfsreq_secinfo_args si;
2d21ac55
A
468
469 nmp = NFSTONMP(np);
0a7de745
A
470 if (nfs_mount_gone(nmp)) {
471 return ENXIO;
472 }
2d21ac55 473 nfsvers = nmp->nm_vers;
0a7de745
A
474 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
475 return EINVAL;
476 }
6d2010ae
A
477
478 /* for async mounts, don't bother sending sync write requests */
479 if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async &&
0a7de745 480 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
6d2010ae 481 iomode = NFS_WRITE_UNSTABLE;
0a7de745 482 }
2d21ac55 483
6d2010ae 484 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
485 nfsm_chain_null(&nmreq);
486
b0d623f7 487 // PUTFH, WRITE, GETATTR
2d21ac55
A
488 numops = 3;
489 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED + len);
3e170ce0 490 nfsm_chain_add_compound_header(error, &nmreq, "write", nmp->nm_minor_vers, numops);
2d21ac55
A
491 numops--;
492 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
493 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
494 numops--;
495 nfsm_chain_add_32(error, &nmreq, NFS_OP_WRITE);
b0d623f7
A
496 nfs_get_stateid(np, thd, cred, &stateid);
497 nfsm_chain_add_stateid(error, &nmreq, &stateid);
498 nfsm_chain_add_64(error, &nmreq, uio_offset(uio));
2d21ac55
A
499 nfsm_chain_add_32(error, &nmreq, iomode);
500 nfsm_chain_add_32(error, &nmreq, len);
0a7de745 501 if (!error) {
b0d623f7 502 error = nfsm_chain_add_uio(&nmreq, uio, len);
0a7de745 503 }
2d21ac55
A
504 numops--;
505 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
f427ee49 506 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs4_getattr_write_bitmap, nmp, np);
2d21ac55
A
507 nfsm_chain_build_done(error, &nmreq);
508 nfsm_assert(error, (numops == 0), EPROTO);
509 nfsmout_if(error);
510
6d2010ae 511 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
2d21ac55
A
512nfsmout:
513 nfsm_chain_cleanup(&nmreq);
0a7de745 514 return error;
2d21ac55
A
515}
516
517int
518nfs4_write_rpc_async_finish(
519 nfsnode_t np,
520 struct nfsreq *req,
521 int *iomodep,
522 size_t *rlenp,
523 uint64_t *wverfp)
524{
525 struct nfsmount *nmp;
526 int error = 0, lockerror = ENOENT, nfsvers, numops, status;
527 int committed = NFS_WRITE_FILESYNC;
528 size_t rlen = 0;
529 u_int64_t xid, wverf;
530 mount_t mp;
531 struct nfsm_chain nmrep;
532
533 nmp = NFSTONMP(np);
fe8ab488 534 if (nfs_mount_gone(nmp)) {
2d21ac55 535 nfs_request_async_cancel(req);
0a7de745 536 return ENXIO;
2d21ac55
A
537 }
538 nfsvers = nmp->nm_vers;
539
540 nfsm_chain_null(&nmrep);
541
542 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
0a7de745
A
543 if (error == EINPROGRESS) { /* async request restarted */
544 return error;
545 }
2d21ac55 546 nmp = NFSTONMP(np);
0a7de745 547 if (nfs_mount_gone(nmp)) {
2d21ac55 548 error = ENXIO;
0a7de745
A
549 }
550 if (!error && (lockerror = nfs_node_lock(np))) {
2d21ac55 551 error = lockerror;
0a7de745 552 }
2d21ac55
A
553 nfsm_chain_skip_tag(error, &nmrep);
554 nfsm_chain_get_32(error, &nmrep, numops);
555 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
556 nfsm_chain_op_check(error, &nmrep, NFS_OP_WRITE);
557 nfsm_chain_get_32(error, &nmrep, rlen);
558 nfsmout_if(error);
559 *rlenp = rlen;
0a7de745 560 if (rlen <= 0) {
2d21ac55 561 error = NFSERR_IO;
0a7de745 562 }
2d21ac55
A
563 nfsm_chain_get_32(error, &nmrep, committed);
564 nfsm_chain_get_64(error, &nmrep, wverf);
565 nfsmout_if(error);
0a7de745 566 if (wverfp) {
2d21ac55 567 *wverfp = wverf;
0a7de745 568 }
2d21ac55
A
569 lck_mtx_lock(&nmp->nm_lock);
570 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
571 nmp->nm_verf = wverf;
572 nmp->nm_state |= NFSSTA_HASWRITEVERF;
573 } else if (nmp->nm_verf != wverf) {
574 nmp->nm_verf = wverf;
575 }
576 lck_mtx_unlock(&nmp->nm_lock);
577 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
f427ee49
A
578
579 /*
580 * NFSv4 WRITE RPCs contain partial GETATTR requests - only type, change, size, metadatatime and modifytime are requested.
581 * In such cases, we do not update the time stamp - but the requested attributes.
582 */
583 np->n_vattr.nva_flags |= NFS_FFLAG_PARTIAL_WRITE;
6d2010ae 584 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
f427ee49
A
585 np->n_vattr.nva_flags &= ~NFS_FFLAG_PARTIAL_WRITE;
586
2d21ac55 587nfsmout:
0a7de745 588 if (!lockerror) {
b0d623f7 589 nfs_node_unlock(np);
0a7de745 590 }
2d21ac55
A
591 nfsm_chain_cleanup(&nmrep);
592 if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async &&
0a7de745 593 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
2d21ac55 594 committed = NFS_WRITE_FILESYNC;
0a7de745 595 }
2d21ac55 596 *iomodep = committed;
0a7de745 597 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
6d2010ae 598 microuptime(&np->n_lastio);
0a7de745
A
599 }
600 return error;
2d21ac55
A
601}
602
603int
604nfs4_remove_rpc(
605 nfsnode_t dnp,
606 char *name,
607 int namelen,
608 thread_t thd,
609 kauth_cred_t cred)
610{
b0d623f7 611 int error = 0, lockerror = ENOENT, remove_error = 0, status;
2d21ac55
A
612 struct nfsmount *nmp;
613 int nfsvers, numops;
614 u_int64_t xid;
615 struct nfsm_chain nmreq, nmrep;
6d2010ae 616 struct nfsreq_secinfo_args si;
2d21ac55
A
617
618 nmp = NFSTONMP(dnp);
0a7de745
A
619 if (nfs_mount_gone(nmp)) {
620 return ENXIO;
621 }
2d21ac55 622 nfsvers = nmp->nm_vers;
0a7de745
A
623 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
624 return EINVAL;
625 }
6d2010ae 626 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
b0d623f7 627restart:
2d21ac55
A
628 nfsm_chain_null(&nmreq);
629 nfsm_chain_null(&nmrep);
630
631 // PUTFH, REMOVE, GETATTR
632 numops = 3;
633 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED + namelen);
3e170ce0 634 nfsm_chain_add_compound_header(error, &nmreq, "remove", nmp->nm_minor_vers, numops);
2d21ac55
A
635 numops--;
636 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
637 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
638 numops--;
639 nfsm_chain_add_32(error, &nmreq, NFS_OP_REMOVE);
6d2010ae 640 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
2d21ac55
A
641 numops--;
642 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 643 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
2d21ac55
A
644 nfsm_chain_build_done(error, &nmreq);
645 nfsm_assert(error, (numops == 0), EPROTO);
646 nfsmout_if(error);
647
6d2010ae 648 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, &nmrep, &xid, &status);
2d21ac55 649
0a7de745 650 if ((lockerror = nfs_node_lock(dnp))) {
b0d623f7 651 error = lockerror;
0a7de745 652 }
2d21ac55
A
653 nfsm_chain_skip_tag(error, &nmrep);
654 nfsm_chain_get_32(error, &nmrep, numops);
655 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
656 nfsm_chain_op_check(error, &nmrep, NFS_OP_REMOVE);
657 remove_error = error;
658 nfsm_chain_check_change_info(error, &nmrep, dnp);
659 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 660 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
0a7de745 661 if (error && !lockerror) {
2d21ac55 662 NATTRINVALIDATE(dnp);
0a7de745 663 }
2d21ac55
A
664nfsmout:
665 nfsm_chain_cleanup(&nmreq);
666 nfsm_chain_cleanup(&nmrep);
667
b0d623f7
A
668 if (!lockerror) {
669 dnp->n_flag |= NMODIFIED;
670 nfs_node_unlock(dnp);
671 }
672 if (error == NFSERR_GRACE) {
0a7de745 673 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
b0d623f7
A
674 goto restart;
675 }
2d21ac55 676
0a7de745 677 return remove_error;
2d21ac55
A
678}
679
680int
681nfs4_rename_rpc(
682 nfsnode_t fdnp,
683 char *fnameptr,
684 int fnamelen,
685 nfsnode_t tdnp,
686 char *tnameptr,
687 int tnamelen,
688 vfs_context_t ctx)
689{
b0d623f7 690 int error = 0, lockerror = ENOENT, status, nfsvers, numops;
2d21ac55
A
691 struct nfsmount *nmp;
692 u_int64_t xid, savedxid;
693 struct nfsm_chain nmreq, nmrep;
6d2010ae 694 struct nfsreq_secinfo_args si;
2d21ac55
A
695
696 nmp = NFSTONMP(fdnp);
0a7de745
A
697 if (nfs_mount_gone(nmp)) {
698 return ENXIO;
699 }
2d21ac55 700 nfsvers = nmp->nm_vers;
0a7de745
A
701 if (fdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
702 return EINVAL;
703 }
704 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
705 return EINVAL;
706 }
2d21ac55 707
6d2010ae 708 NFSREQ_SECINFO_SET(&si, fdnp, NULL, 0, NULL, 0);
2d21ac55
A
709 nfsm_chain_null(&nmreq);
710 nfsm_chain_null(&nmrep);
711
712 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
713 numops = 7;
714 nfsm_chain_build_alloc_init(error, &nmreq, 30 * NFSX_UNSIGNED + fnamelen + tnamelen);
3e170ce0 715 nfsm_chain_add_compound_header(error, &nmreq, "rename", nmp->nm_minor_vers, numops);
2d21ac55
A
716 numops--;
717 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
718 nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize);
719 numops--;
720 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
721 numops--;
722 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
723 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
724 numops--;
725 nfsm_chain_add_32(error, &nmreq, NFS_OP_RENAME);
6d2010ae
A
726 nfsm_chain_add_name(error, &nmreq, fnameptr, fnamelen, nmp);
727 nfsm_chain_add_name(error, &nmreq, tnameptr, tnamelen, nmp);
2d21ac55
A
728 numops--;
729 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 730 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
2d21ac55
A
731 numops--;
732 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
733 numops--;
734 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 735 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, fdnp);
2d21ac55
A
736 nfsm_chain_build_done(error, &nmreq);
737 nfsm_assert(error, (numops == 0), EPROTO);
738 nfsmout_if(error);
739
6d2010ae 740 error = nfs_request(fdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 741
0a7de745 742 if ((lockerror = nfs_node_lock2(fdnp, tdnp))) {
b0d623f7 743 error = lockerror;
0a7de745 744 }
2d21ac55
A
745 nfsm_chain_skip_tag(error, &nmrep);
746 nfsm_chain_get_32(error, &nmrep, numops);
747 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
748 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
749 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
750 nfsm_chain_op_check(error, &nmrep, NFS_OP_RENAME);
751 nfsm_chain_check_change_info(error, &nmrep, fdnp);
752 nfsm_chain_check_change_info(error, &nmrep, tdnp);
753 /* directory attributes: if we don't get them, make sure to invalidate */
754 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
755 savedxid = xid;
6d2010ae 756 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
0a7de745 757 if (error && !lockerror) {
2d21ac55 758 NATTRINVALIDATE(tdnp);
0a7de745 759 }
2d21ac55
A
760 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
761 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
762 xid = savedxid;
6d2010ae 763 nfsm_chain_loadattr(error, &nmrep, fdnp, nfsvers, &xid);
0a7de745 764 if (error && !lockerror) {
2d21ac55 765 NATTRINVALIDATE(fdnp);
0a7de745 766 }
2d21ac55
A
767nfsmout:
768 nfsm_chain_cleanup(&nmreq);
769 nfsm_chain_cleanup(&nmrep);
b0d623f7
A
770 if (!lockerror) {
771 fdnp->n_flag |= NMODIFIED;
772 tdnp->n_flag |= NMODIFIED;
773 nfs_node_unlock2(fdnp, tdnp);
774 }
0a7de745 775 return error;
2d21ac55
A
776}
777
778/*
779 * NFS V4 readdir RPC.
780 */
2d21ac55 781int
b0d623f7
A
782nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx)
783{
2d21ac55 784 struct nfsmount *nmp;
6d2010ae 785 int error = 0, lockerror, nfsvers, namedattr, rdirplus, bigcookies, numops;
b0d623f7 786 int i, status, more_entries = 1, eof, bp_dropped = 0;
f427ee49 787 uint16_t namlen, reclen;
b0d623f7 788 uint32_t nmreaddirsize, nmrsize;
f427ee49
A
789 uint32_t namlen32, skiplen, fhlen, xlen, attrlen;
790 uint64_t padlen, cookie, lastcookie, xid, savedxid, space_free, space_needed;
b0d623f7 791 struct nfsm_chain nmreq, nmrep, nmrepsave;
f427ee49
A
792 fhandle_t *fh;
793 struct nfs_vattr *nvattr, *nvattrp;
b0d623f7
A
794 struct nfs_dir_buf_header *ndbhp;
795 struct direntry *dp;
f427ee49 796 char *padstart;
2d21ac55
A
797 const char *tag;
798 uint32_t entry_attrs[NFS_ATTR_BITMAP_LEN];
b0d623f7 799 struct timeval now;
6d2010ae 800 struct nfsreq_secinfo_args si;
2d21ac55 801
2d21ac55 802 nmp = NFSTONMP(dnp);
0a7de745
A
803 if (nfs_mount_gone(nmp)) {
804 return ENXIO;
805 }
2d21ac55
A
806 nfsvers = nmp->nm_vers;
807 nmreaddirsize = nmp->nm_readdirsize;
808 nmrsize = nmp->nm_rsize;
b0d623f7 809 bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES;
6d2010ae
A
810 namedattr = (dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) ? 1 : 0;
811 rdirplus = (NMFLAG(nmp, RDIRPLUS) || namedattr) ? 1 : 0;
0a7de745
A
812 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
813 return EINVAL;
814 }
6d2010ae 815 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
2d21ac55
A
816
817 /*
818 * Set up attribute request for entries.
819 * For READDIRPLUS functionality, get everything.
b0d623f7 820 * Otherwise, just get what we need for struct direntry.
2d21ac55
A
821 */
822 if (rdirplus) {
b0d623f7 823 tag = "readdirplus";
6d2010ae 824 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, entry_attrs);
2d21ac55
A
825 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEHANDLE);
826 } else {
b0d623f7 827 tag = "readdir";
2d21ac55
A
828 NFS_CLEAR_ATTRIBUTES(entry_attrs);
829 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_TYPE);
830 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEID);
6d2010ae 831 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_MOUNTED_ON_FILEID);
2d21ac55 832 }
2d21ac55
A
833 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_RDATTR_ERROR);
834
b0d623f7 835 /* lock to protect access to cookie verifier */
0a7de745
A
836 if ((lockerror = nfs_node_lock(dnp))) {
837 return lockerror;
838 }
2d21ac55 839
f427ee49
A
840 fh = zalloc(nfs_fhandle_zone);
841 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
842
b0d623f7
A
843 /* determine cookie to use, and move dp to the right offset */
844 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
845 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
846 if (ndbhp->ndbh_count) {
0a7de745 847 for (i = 0; i < ndbhp->ndbh_count - 1; i++) {
b0d623f7 848 dp = NFS_DIRENTRY_NEXT(dp);
0a7de745 849 }
b0d623f7
A
850 cookie = dp->d_seekoff;
851 dp = NFS_DIRENTRY_NEXT(dp);
852 } else {
853 cookie = bp->nb_lblkno;
854 /* increment with every buffer read */
316670eb 855 OSAddAtomic64(1, &nfsstats.readdir_bios);
2d21ac55 856 }
b0d623f7 857 lastcookie = cookie;
2d21ac55
A
858
859 /*
b0d623f7
A
860 * The NFS client is responsible for the "." and ".." entries in the
861 * directory. So, we put them at the start of the first buffer.
6d2010ae 862 * Don't bother for attribute directories.
2d21ac55 863 */
6d2010ae
A
864 if (((bp->nb_lblkno == 0) && (ndbhp->ndbh_count == 0)) &&
865 !(dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
f427ee49
A
866 fh->fh_len = 0;
867 fhlen = rdirplus ? fh->fh_len + 1 : 0;
b0d623f7
A
868 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
869 /* "." */
870 namlen = 1;
f427ee49 871 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
0a7de745
A
872 if (xlen) {
873 bzero(&dp->d_name[namlen + 1], xlen);
874 }
b0d623f7 875 dp->d_namlen = namlen;
0a7de745 876 strlcpy(dp->d_name, ".", namlen + 1);
2d21ac55 877 dp->d_fileno = dnp->n_vattr.nva_fileid;
2d21ac55 878 dp->d_type = DT_DIR;
b0d623f7
A
879 dp->d_reclen = reclen;
880 dp->d_seekoff = 1;
881 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
882 dp = NFS_DIRENTRY_NEXT(dp);
883 padlen = (char*)dp - padstart;
0a7de745 884 if (padlen > 0) {
b0d623f7 885 bzero(padstart, padlen);
0a7de745
A
886 }
887 if (rdirplus) { /* zero out attributes */
b0d623f7 888 bzero(NFS_DIR_BUF_NVATTR(bp, 0), sizeof(struct nfs_vattr));
0a7de745 889 }
b0d623f7
A
890
891 /* ".." */
892 namlen = 2;
f427ee49 893 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
0a7de745
A
894 if (xlen) {
895 bzero(&dp->d_name[namlen + 1], xlen);
896 }
b0d623f7 897 dp->d_namlen = namlen;
0a7de745
A
898 strlcpy(dp->d_name, "..", namlen + 1);
899 if (dnp->n_parent) {
2d21ac55 900 dp->d_fileno = VTONFS(dnp->n_parent)->n_vattr.nva_fileid;
0a7de745 901 } else {
2d21ac55 902 dp->d_fileno = dnp->n_vattr.nva_fileid;
0a7de745 903 }
2d21ac55 904 dp->d_type = DT_DIR;
b0d623f7
A
905 dp->d_reclen = reclen;
906 dp->d_seekoff = 2;
907 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
908 dp = NFS_DIRENTRY_NEXT(dp);
909 padlen = (char*)dp - padstart;
0a7de745 910 if (padlen > 0) {
b0d623f7 911 bzero(padstart, padlen);
0a7de745
A
912 }
913 if (rdirplus) { /* zero out attributes */
b0d623f7 914 bzero(NFS_DIR_BUF_NVATTR(bp, 1), sizeof(struct nfs_vattr));
0a7de745 915 }
b0d623f7
A
916
917 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
918 ndbhp->ndbh_count = 2;
2d21ac55
A
919 }
920
921 /*
b0d623f7
A
922 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
923 * the buffer is full (or we hit EOF). Then put the remainder of the
924 * results in the next buffer(s).
2d21ac55 925 */
b0d623f7
A
926 nfsm_chain_null(&nmreq);
927 nfsm_chain_null(&nmrep);
928 while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) {
b0d623f7
A
929 // PUTFH, GETATTR, READDIR
930 numops = 3;
2d21ac55 931 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3e170ce0 932 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
2d21ac55
A
933 numops--;
934 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
935 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
936 numops--;
937 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 938 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
2d21ac55
A
939 numops--;
940 nfsm_chain_add_32(error, &nmreq, NFS_OP_READDIR);
b0d623f7
A
941 nfsm_chain_add_64(error, &nmreq, (cookie <= 2) ? 0 : cookie);
942 nfsm_chain_add_64(error, &nmreq, dnp->n_cookieverf);
2d21ac55
A
943 nfsm_chain_add_32(error, &nmreq, nmreaddirsize);
944 nfsm_chain_add_32(error, &nmreq, nmrsize);
6d2010ae 945 nfsm_chain_add_bitmap_supported(error, &nmreq, entry_attrs, nmp, dnp);
2d21ac55
A
946 nfsm_chain_build_done(error, &nmreq);
947 nfsm_assert(error, (numops == 0), EPROTO);
b0d623f7 948 nfs_node_unlock(dnp);
2d21ac55 949 nfsmout_if(error);
6d2010ae 950 error = nfs_request(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 951
0a7de745 952 if ((lockerror = nfs_node_lock(dnp))) {
2d21ac55 953 error = lockerror;
0a7de745 954 }
b0d623f7
A
955
956 savedxid = xid;
2d21ac55
A
957 nfsm_chain_skip_tag(error, &nmrep);
958 nfsm_chain_get_32(error, &nmrep, numops);
959 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
960 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 961 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
2d21ac55 962 nfsm_chain_op_check(error, &nmrep, NFS_OP_READDIR);
b0d623f7 963 nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf);
2d21ac55 964 nfsm_chain_get_32(error, &nmrep, more_entries);
b0d623f7
A
965
966 if (!lockerror) {
967 nfs_node_unlock(dnp);
968 lockerror = ENOENT;
969 }
2d21ac55
A
970 nfsmout_if(error);
971
0a7de745 972 if (rdirplus) {
b0d623f7 973 microuptime(&now);
ea3f0419
A
974 if (lastcookie == 0) {
975 dnp->n_rdirplusstamp_sof = now.tv_sec;
976 dnp->n_rdirplusstamp_eof = 0;
977 }
0a7de745 978 }
b0d623f7
A
979
980 /* loop through the entries packing them into the buffer */
981 while (more_entries) {
2d21ac55 982 /* Entry: COOKIE, NAME, FATTR */
b0d623f7 983 nfsm_chain_get_64(error, &nmrep, cookie);
f427ee49
A
984 nfsm_chain_get_32(error, &nmrep, namlen32);
985 if (namlen32 > UINT16_MAX) {
986 error = EBADRPC;
987 goto nfsmout;
988 }
989 namlen = (uint16_t)namlen32;
2d21ac55 990 nfsmout_if(error);
b0d623f7
A
991 if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) {
992 /* we've got a big cookie, make sure flag is set */
993 lck_mtx_lock(&nmp->nm_lock);
994 nmp->nm_state |= NFSSTA_BIGCOOKIES;
995 lck_mtx_unlock(&nmp->nm_lock);
996 bigcookies = 1;
997 }
998 /* just truncate names that don't fit in direntry.d_name */
999 if (namlen <= 0) {
2d21ac55
A
1000 error = EBADRPC;
1001 goto nfsmout;
1002 }
0a7de745 1003 if (namlen > (sizeof(dp->d_name) - 1)) {
b0d623f7
A
1004 skiplen = namlen - sizeof(dp->d_name) + 1;
1005 namlen = sizeof(dp->d_name) - 1;
2d21ac55
A
1006 } else {
1007 skiplen = 0;
1008 }
b0d623f7
A
1009 /* guess that fh size will be same as parent */
1010 fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0;
1011 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
1012 attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0;
f427ee49 1013 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
b0d623f7
A
1014 space_needed = reclen + attrlen;
1015 space_free = nfs_dir_buf_freespace(bp, rdirplus);
1016 if (space_needed > space_free) {
1017 /*
1018 * We still have entries to pack, but we've
1019 * run out of room in the current buffer.
1020 * So we need to move to the next buffer.
1021 * The block# for the next buffer is the
1022 * last cookie in the current buffer.
1023 */
1024nextbuffer:
1025 ndbhp->ndbh_flags |= NDB_FULL;
1026 nfs_buf_release(bp, 0);
1027 bp_dropped = 1;
1028 bp = NULL;
1029 error = nfs_buf_get(dnp, lastcookie, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
1030 nfsmout_if(error);
1031 /* initialize buffer */
1032 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
1033 ndbhp->ndbh_flags = 0;
1034 ndbhp->ndbh_count = 0;
1035 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
1036 ndbhp->ndbh_ncgen = dnp->n_ncgen;
1037 space_free = nfs_dir_buf_freespace(bp, rdirplus);
1038 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
1039 /* increment with every buffer read */
316670eb 1040 OSAddAtomic64(1, &nfsstats.readdir_bios);
2d21ac55 1041 }
b0d623f7
A
1042 nmrepsave = nmrep;
1043 dp->d_fileno = cookie; /* placeholder */
1044 dp->d_seekoff = cookie;
1045 dp->d_namlen = namlen;
1046 dp->d_reclen = reclen;
2d21ac55 1047 dp->d_type = DT_UNKNOWN;
b0d623f7
A
1048 nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name);
1049 nfsmout_if(error);
1050 dp->d_name[namlen] = '\0';
0a7de745 1051 if (skiplen) {
2d21ac55 1052 nfsm_chain_adv(error, &nmrep,
0a7de745
A
1053 nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen));
1054 }
2d21ac55 1055 nfsmout_if(error);
f427ee49
A
1056 nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : nvattr;
1057 error = nfs4_parsefattr(&nmrep, NULL, nvattrp, fh, NULL, NULL);
6d2010ae
A
1058 if (!error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_ACL)) {
1059 /* we do NOT want ACLs returned to us here */
1060 NFS_BITMAP_CLR(nvattrp->nva_bitmap, NFS_FATTR_ACL);
1061 if (nvattrp->nva_acl) {
1062 kauth_acl_free(nvattrp->nva_acl);
1063 nvattrp->nva_acl = NULL;
1064 }
1065 }
b0d623f7 1066 if (error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_RDATTR_ERROR)) {
6d2010ae
A
1067 /* OK, we may not have gotten all of the attributes but we will use what we can. */
1068 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1069 /* set this up to look like a referral trigger */
f427ee49 1070 nfs4_default_attrs_for_referral_trigger(dnp, dp->d_name, namlen, nvattrp, fh);
6d2010ae 1071 }
2d21ac55
A
1072 error = 0;
1073 }
b0d623f7 1074 /* check for more entries after this one */
2d21ac55
A
1075 nfsm_chain_get_32(error, &nmrep, more_entries);
1076 nfsmout_if(error);
1077
b0d623f7 1078 /* Skip any "." and ".." entries returned from server. */
6d2010ae
A
1079 /* Also skip any bothersome named attribute entries. */
1080 if (((dp->d_name[0] == '.') && ((namlen == 1) || ((namlen == 2) && (dp->d_name[1] == '.')))) ||
1081 (namedattr && (namlen == 11) && (!strcmp(dp->d_name, "SUNWattr_ro") || !strcmp(dp->d_name, "SUNWattr_rw")))) {
b0d623f7 1082 lastcookie = cookie;
2d21ac55
A
1083 continue;
1084 }
1085
0a7de745 1086 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_TYPE)) {
b0d623f7 1087 dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type));
0a7de745
A
1088 }
1089 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEID)) {
b0d623f7 1090 dp->d_fileno = nvattrp->nva_fileid;
0a7de745 1091 }
b0d623f7
A
1092 if (rdirplus) {
1093 /* fileid is already in d_fileno, so stash xid in attrs */
1094 nvattrp->nva_fileid = savedxid;
f427ee49 1095 nvattrp->nva_flags |= NFS_FFLAG_FILEID_CONTAINS_XID;
b0d623f7 1096 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
f427ee49 1097 fhlen = fh->fh_len + 1;
b0d623f7 1098 xlen = fhlen + sizeof(time_t);
f427ee49 1099 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
b0d623f7
A
1100 space_needed = reclen + attrlen;
1101 if (space_needed > space_free) {
1102 /* didn't actually have the room... move on to next buffer */
1103 nmrep = nmrepsave;
1104 goto nextbuffer;
1105 }
1106 /* pack the file handle into the record */
f427ee49
A
1107 dp->d_name[dp->d_namlen + 1] = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */
1108 bcopy(fh->fh_data, &dp->d_name[dp->d_namlen + 2], fh->fh_len);
b0d623f7
A
1109 } else {
1110 /* mark the file handle invalid */
f427ee49
A
1111 fh->fh_len = 0;
1112 fhlen = fh->fh_len + 1;
b0d623f7 1113 xlen = fhlen + sizeof(time_t);
f427ee49 1114 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
0a7de745 1115 bzero(&dp->d_name[dp->d_namlen + 1], fhlen);
2d21ac55 1116 }
0a7de745 1117 *(time_t*)(&dp->d_name[dp->d_namlen + 1 + fhlen]) = now.tv_sec;
b0d623f7 1118 dp->d_reclen = reclen;
f427ee49 1119 nfs_rdirplus_update_node_attrs(dnp, dp, fh, nvattrp, &savedxid);
2d21ac55 1120 }
b0d623f7
A
1121 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
1122 ndbhp->ndbh_count++;
1123 lastcookie = cookie;
1124
1125 /* advance to next direntry in buffer */
1126 dp = NFS_DIRENTRY_NEXT(dp);
1127 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
1128 /* zero out the pad bytes */
1129 padlen = (char*)dp - padstart;
0a7de745 1130 if (padlen > 0) {
b0d623f7 1131 bzero(padstart, padlen);
0a7de745 1132 }
b0d623f7
A
1133 }
1134 /* Finally, get the eof boolean */
1135 nfsm_chain_get_32(error, &nmrep, eof);
1136 nfsmout_if(error);
1137 if (eof) {
0a7de745 1138 ndbhp->ndbh_flags |= (NDB_FULL | NDB_EOF);
b0d623f7
A
1139 nfs_node_lock_force(dnp);
1140 dnp->n_eofcookie = lastcookie;
ea3f0419
A
1141 if (rdirplus) {
1142 dnp->n_rdirplusstamp_eof = now.tv_sec;
1143 }
b0d623f7
A
1144 nfs_node_unlock(dnp);
1145 } else {
1146 more_entries = 1;
2d21ac55 1147 }
b0d623f7
A
1148 if (bp_dropped) {
1149 nfs_buf_release(bp, 0);
1150 bp = NULL;
1151 break;
2d21ac55 1152 }
0a7de745 1153 if ((lockerror = nfs_node_lock(dnp))) {
2d21ac55 1154 error = lockerror;
0a7de745 1155 }
2d21ac55
A
1156 nfsmout_if(error);
1157 nfsm_chain_cleanup(&nmrep);
b0d623f7 1158 nfsm_chain_null(&nmreq);
2d21ac55 1159 }
2d21ac55 1160nfsmout:
0a7de745 1161 if (bp_dropped && bp) {
b0d623f7 1162 nfs_buf_release(bp, 0);
0a7de745
A
1163 }
1164 if (!lockerror) {
b0d623f7 1165 nfs_node_unlock(dnp);
0a7de745 1166 }
2d21ac55
A
1167 nfsm_chain_cleanup(&nmreq);
1168 nfsm_chain_cleanup(&nmrep);
f427ee49
A
1169 NFS_ZFREE(nfs_fhandle_zone, fh);
1170 FREE(nvattr, M_TEMP);
0a7de745 1171 return bp_dropped ? NFSERR_DIRBUFDROPPED : error;
2d21ac55
A
1172}
1173
1174int
1175nfs4_lookup_rpc_async(
1176 nfsnode_t dnp,
1177 char *name,
1178 int namelen,
1179 vfs_context_t ctx,
1180 struct nfsreq **reqp)
1181{
6d2010ae 1182 int error = 0, isdotdot = 0, nfsvers, numops;
2d21ac55
A
1183 struct nfsm_chain nmreq;
1184 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1185 struct nfsmount *nmp;
6d2010ae 1186 struct nfsreq_secinfo_args si;
2d21ac55
A
1187
1188 nmp = NFSTONMP(dnp);
0a7de745
A
1189 if (nfs_mount_gone(nmp)) {
1190 return ENXIO;
1191 }
2d21ac55 1192 nfsvers = nmp->nm_vers;
0a7de745
A
1193 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1194 return EINVAL;
1195 }
2d21ac55 1196
6d2010ae 1197 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
2d21ac55 1198 isdotdot = 1;
6d2010ae
A
1199 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
1200 } else {
1201 NFSREQ_SECINFO_SET(&si, dnp, dnp->n_fhp, dnp->n_fhsize, name, namelen);
1202 }
2d21ac55
A
1203
1204 nfsm_chain_null(&nmreq);
1205
6d2010ae
A
1206 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1207 numops = 5;
2d21ac55 1208 nfsm_chain_build_alloc_init(error, &nmreq, 20 * NFSX_UNSIGNED + namelen);
3e170ce0 1209 nfsm_chain_add_compound_header(error, &nmreq, "lookup", nmp->nm_minor_vers, numops);
2d21ac55
A
1210 numops--;
1211 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1212 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
1213 numops--;
1214 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 1215 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
2d21ac55
A
1216 numops--;
1217 if (isdotdot) {
1218 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUPP);
1219 } else {
1220 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
6d2010ae 1221 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
2d21ac55 1222 }
6d2010ae
A
1223 numops--;
1224 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETFH);
1225 numops--;
1226 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1227 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1228 /* some ".zfs" directories can't handle being asked for some attributes */
0a7de745 1229 if ((dnp->n_flag & NISDOTZFS) && !isdotdot) {
6d2010ae 1230 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
0a7de745
A
1231 }
1232 if ((dnp->n_flag & NISDOTZFSCHILD) && isdotdot) {
6d2010ae 1233 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
0a7de745
A
1234 }
1235 if (((namelen == 4) && (name[0] == '.') && (name[1] == 'z') && (name[2] == 'f') && (name[3] == 's'))) {
6d2010ae 1236 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
0a7de745 1237 }
6d2010ae 1238 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
2d21ac55
A
1239 nfsm_chain_build_done(error, &nmreq);
1240 nfsm_assert(error, (numops == 0), EPROTO);
1241 nfsmout_if(error);
1242 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745 1243 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, reqp);
2d21ac55
A
1244nfsmout:
1245 nfsm_chain_cleanup(&nmreq);
0a7de745 1246 return error;
2d21ac55
A
1247}
1248
6d2010ae 1249
2d21ac55
A
1250int
1251nfs4_lookup_rpc_async_finish(
1252 nfsnode_t dnp,
6d2010ae
A
1253 char *name,
1254 int namelen,
1255 vfs_context_t ctx,
2d21ac55
A
1256 struct nfsreq *req,
1257 u_int64_t *xidp,
1258 fhandle_t *fhp,
1259 struct nfs_vattr *nvap)
1260{
6d2010ae
A
1261 int error = 0, lockerror = ENOENT, status, nfsvers, numops, isdotdot = 0;
1262 uint32_t op = NFS_OP_LOOKUP;
2d21ac55
A
1263 u_int64_t xid;
1264 struct nfsmount *nmp;
1265 struct nfsm_chain nmrep;
1266
1267 nmp = NFSTONMP(dnp);
0a7de745
A
1268 if (nmp == NULL) {
1269 return ENXIO;
1270 }
2d21ac55 1271 nfsvers = nmp->nm_vers;
0a7de745 1272 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
6d2010ae 1273 isdotdot = 1;
0a7de745 1274 }
2d21ac55
A
1275
1276 nfsm_chain_null(&nmrep);
1277
1278 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1279
0a7de745 1280 if ((lockerror = nfs_node_lock(dnp))) {
b0d623f7 1281 error = lockerror;
0a7de745 1282 }
2d21ac55
A
1283 nfsm_chain_skip_tag(error, &nmrep);
1284 nfsm_chain_get_32(error, &nmrep, numops);
1285 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1286 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
0a7de745 1287 if (xidp) {
2d21ac55 1288 *xidp = xid;
0a7de745 1289 }
6d2010ae 1290 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
2d21ac55 1291
6d2010ae 1292 nfsm_chain_op_check(error, &nmrep, (isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP));
2d21ac55 1293 nfsmout_if(error || !fhp || !nvap);
6d2010ae
A
1294 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
1295 nfsm_chain_get_32(error, &nmrep, fhp->fh_len);
0a7de745 1296 if (error == 0 && fhp->fh_len > sizeof(fhp->fh_data)) {
d26ffc64 1297 error = EBADRPC;
0a7de745 1298 }
d26ffc64 1299 nfsmout_if(error);
6d2010ae 1300 nfsm_chain_get_opaque(error, &nmrep, fhp->fh_len, fhp->fh_data);
2d21ac55 1301 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae
A
1302 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1303 /* set this up to look like a referral trigger */
1304 nfs4_default_attrs_for_referral_trigger(dnp, name, namelen, nvap, fhp);
1305 error = 0;
1306 } else {
1307 nfsmout_if(error);
1308 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
2d21ac55
A
1309 }
1310nfsmout:
0a7de745 1311 if (!lockerror) {
b0d623f7 1312 nfs_node_unlock(dnp);
0a7de745 1313 }
2d21ac55 1314 nfsm_chain_cleanup(&nmrep);
6d2010ae
A
1315 if (!error && (op == NFS_OP_LOOKUP) && (nmp->nm_state & NFSSTA_NEEDSECINFO)) {
1316 /* We still need to get SECINFO to set default for mount. */
1317 /* Do so for the first LOOKUP that returns successfully. */
1318 struct nfs_sec sec;
1319
1320 sec.count = NX_MAX_SEC_FLAVORS;
1321 error = nfs4_secinfo_rpc(nmp, &req->r_secinfo, vfs_context_ucred(ctx), sec.flavors, &sec.count);
1322 /* [sigh] some implementations return "illegal" error for unsupported ops */
0a7de745 1323 if (error == NFSERR_OP_ILLEGAL) {
6d2010ae 1324 error = 0;
0a7de745 1325 }
6d2010ae
A
1326 if (!error) {
1327 /* set our default security flavor to the first in the list */
1328 lck_mtx_lock(&nmp->nm_lock);
0a7de745 1329 if (sec.count) {
6d2010ae 1330 nmp->nm_auth = sec.flavors[0];
0a7de745 1331 }
6d2010ae
A
1332 nmp->nm_state &= ~NFSSTA_NEEDSECINFO;
1333 lck_mtx_unlock(&nmp->nm_lock);
1334 }
1335 }
0a7de745 1336 return error;
2d21ac55
A
1337}
1338
1339int
1340nfs4_commit_rpc(
1341 nfsnode_t np,
6d2010ae
A
1342 uint64_t offset,
1343 uint64_t count,
1344 kauth_cred_t cred,
1345 uint64_t wverf)
2d21ac55
A
1346{
1347 struct nfsmount *nmp;
1348 int error = 0, lockerror, status, nfsvers, numops;
6d2010ae 1349 u_int64_t xid, newwverf;
2d21ac55
A
1350 uint32_t count32;
1351 struct nfsm_chain nmreq, nmrep;
6d2010ae 1352 struct nfsreq_secinfo_args si;
2d21ac55
A
1353
1354 nmp = NFSTONMP(np);
1355 FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
0a7de745
A
1356 if (nfs_mount_gone(nmp)) {
1357 return ENXIO;
1358 }
1359 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1360 return EINVAL;
1361 }
1362 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
1363 return 0;
1364 }
2d21ac55 1365 nfsvers = nmp->nm_vers;
f427ee49 1366 count32 = count > UINT32_MAX ? 0 : (uint32_t)count;
2d21ac55 1367
6d2010ae 1368 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
1369 nfsm_chain_null(&nmreq);
1370 nfsm_chain_null(&nmrep);
1371
1372 // PUTFH, COMMIT, GETATTR
1373 numops = 3;
1374 nfsm_chain_build_alloc_init(error, &nmreq, 19 * NFSX_UNSIGNED);
3e170ce0 1375 nfsm_chain_add_compound_header(error, &nmreq, "commit", nmp->nm_minor_vers, numops);
2d21ac55
A
1376 numops--;
1377 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1378 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1379 numops--;
1380 nfsm_chain_add_32(error, &nmreq, NFS_OP_COMMIT);
1381 nfsm_chain_add_64(error, &nmreq, offset);
1382 nfsm_chain_add_32(error, &nmreq, count32);
1383 numops--;
1384 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 1385 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
1386 nfsm_chain_build_done(error, &nmreq);
1387 nfsm_assert(error, (numops == 0), EPROTO);
1388 nfsmout_if(error);
1389 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745 1390 current_thread(), cred, &si, 0, &nmrep, &xid, &status);
2d21ac55 1391
0a7de745 1392 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 1393 error = lockerror;
0a7de745 1394 }
2d21ac55
A
1395 nfsm_chain_skip_tag(error, &nmrep);
1396 nfsm_chain_get_32(error, &nmrep, numops);
1397 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1398 nfsm_chain_op_check(error, &nmrep, NFS_OP_COMMIT);
6d2010ae 1399 nfsm_chain_get_64(error, &nmrep, newwverf);
2d21ac55 1400 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 1401 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
0a7de745 1402 if (!lockerror) {
b0d623f7 1403 nfs_node_unlock(np);
0a7de745 1404 }
2d21ac55
A
1405 nfsmout_if(error);
1406 lck_mtx_lock(&nmp->nm_lock);
0a7de745 1407 if (nmp->nm_verf != newwverf) {
6d2010ae 1408 nmp->nm_verf = newwverf;
0a7de745
A
1409 }
1410 if (wverf != newwverf) {
2d21ac55 1411 error = NFSERR_STALEWRITEVERF;
0a7de745 1412 }
2d21ac55
A
1413 lck_mtx_unlock(&nmp->nm_lock);
1414nfsmout:
1415 nfsm_chain_cleanup(&nmreq);
1416 nfsm_chain_cleanup(&nmrep);
0a7de745 1417 return error;
2d21ac55
A
1418}
1419
1420int
1421nfs4_pathconf_rpc(
1422 nfsnode_t np,
1423 struct nfs_fsattr *nfsap,
1424 vfs_context_t ctx)
1425{
1426 u_int64_t xid;
1427 int error = 0, lockerror, status, nfsvers, numops;
1428 struct nfsm_chain nmreq, nmrep;
1429 struct nfsmount *nmp = NFSTONMP(np);
1430 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
f427ee49 1431 struct nfs_vattr *nvattr;
6d2010ae 1432 struct nfsreq_secinfo_args si;
2d21ac55 1433
0a7de745
A
1434 if (nfs_mount_gone(nmp)) {
1435 return ENXIO;
1436 }
2d21ac55 1437 nfsvers = nmp->nm_vers;
0a7de745
A
1438 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1439 return EINVAL;
1440 }
2d21ac55 1441
6d2010ae 1442 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
f427ee49
A
1443 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
1444 NVATTR_INIT(nvattr);
2d21ac55
A
1445 nfsm_chain_null(&nmreq);
1446 nfsm_chain_null(&nmrep);
1447
1448 /* NFSv4: fetch "pathconf" info for this node */
b0d623f7
A
1449 // PUTFH, GETATTR
1450 numops = 2;
2d21ac55 1451 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
3e170ce0 1452 nfsm_chain_add_compound_header(error, &nmreq, "pathconf", nmp->nm_minor_vers, numops);
2d21ac55
A
1453 numops--;
1454 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1455 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1456 numops--;
1457 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1458 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1459 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXLINK);
1460 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXNAME);
1461 NFS_BITMAP_SET(bitmap, NFS_FATTR_NO_TRUNC);
1462 NFS_BITMAP_SET(bitmap, NFS_FATTR_CHOWN_RESTRICTED);
1463 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_INSENSITIVE);
1464 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_PRESERVING);
6d2010ae 1465 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
2d21ac55
A
1466 nfsm_chain_build_done(error, &nmreq);
1467 nfsm_assert(error, (numops == 0), EPROTO);
1468 nfsmout_if(error);
6d2010ae 1469 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55
A
1470
1471 nfsm_chain_skip_tag(error, &nmrep);
1472 nfsm_chain_get_32(error, &nmrep, numops);
1473 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1474 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1475 nfsmout_if(error);
f427ee49 1476 error = nfs4_parsefattr(&nmrep, nfsap, nvattr, NULL, NULL, NULL);
2d21ac55 1477 nfsmout_if(error);
0a7de745 1478 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 1479 error = lockerror;
0a7de745
A
1480 }
1481 if (!error) {
f427ee49 1482 nfs_loadattrcache(np, nvattr, &xid, 0);
0a7de745
A
1483 }
1484 if (!lockerror) {
b0d623f7 1485 nfs_node_unlock(np);
0a7de745 1486 }
2d21ac55 1487nfsmout:
f427ee49
A
1488 NVATTR_CLEANUP(nvattr);
1489 FREE(nvattr, M_TEMP);
2d21ac55
A
1490 nfsm_chain_cleanup(&nmreq);
1491 nfsm_chain_cleanup(&nmrep);
0a7de745 1492 return error;
2d21ac55
A
1493}
1494
1495int
1496nfs4_vnop_getattr(
1497 struct vnop_getattr_args /* {
0a7de745
A
1498 * struct vnodeop_desc *a_desc;
1499 * vnode_t a_vp;
1500 * struct vnode_attr *a_vap;
1501 * vfs_context_t a_context;
1502 * } */*ap)
2d21ac55
A
1503{
1504 struct vnode_attr *vap = ap->a_vap;
6d2010ae 1505 struct nfsmount *nmp;
f427ee49 1506 struct nfs_vattr *nva;
6d2010ae
A
1507 int error, acls, ngaflags;
1508
fe8ab488 1509 nmp = VTONMP(ap->a_vp);
0a7de745
A
1510 if (nfs_mount_gone(nmp)) {
1511 return ENXIO;
1512 }
6d2010ae 1513 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
2d21ac55 1514
6d2010ae 1515 ngaflags = NGA_CACHED;
0a7de745 1516 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
6d2010ae 1517 ngaflags |= NGA_ACL;
0a7de745 1518 }
f427ee49
A
1519 MALLOC(nva, struct nfs_vattr *, sizeof(*nva), M_TEMP, M_WAITOK);
1520 error = nfs_getattr(VTONFS(ap->a_vp), nva, ap->a_context, ngaflags);
0a7de745 1521 if (error) {
f427ee49 1522 goto out;
0a7de745 1523 }
5ba3f43e 1524
2d21ac55 1525 /* copy what we have in nva to *a_vap */
f427ee49
A
1526 if (VATTR_IS_ACTIVE(vap, va_rdev) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_RAWDEV)) {
1527 dev_t rdev = makedev(nva->nva_rawdev.specdata1, nva->nva_rawdev.specdata2);
2d21ac55
A
1528 VATTR_RETURN(vap, va_rdev, rdev);
1529 }
f427ee49
A
1530 if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_NUMLINKS)) {
1531 VATTR_RETURN(vap, va_nlink, nva->nva_nlink);
0a7de745 1532 }
f427ee49
A
1533 if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_SIZE)) {
1534 VATTR_RETURN(vap, va_data_size, nva->nva_size);
0a7de745 1535 }
2d21ac55
A
1536 // VATTR_RETURN(vap, va_data_alloc, ???);
1537 // VATTR_RETURN(vap, va_total_size, ???);
f427ee49
A
1538 if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_SPACE_USED)) {
1539 VATTR_RETURN(vap, va_total_alloc, nva->nva_bytes);
0a7de745 1540 }
f427ee49
A
1541 if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER)) {
1542 VATTR_RETURN(vap, va_uid, nva->nva_uid);
0a7de745 1543 }
f427ee49
A
1544 if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER)) {
1545 VATTR_RETURN(vap, va_uuuid, nva->nva_uuuid);
0a7de745 1546 }
f427ee49
A
1547 if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
1548 VATTR_RETURN(vap, va_gid, nva->nva_gid);
0a7de745 1549 }
f427ee49
A
1550 if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
1551 VATTR_RETURN(vap, va_guuid, nva->nva_guuid);
0a7de745 1552 }
6d2010ae 1553 if (VATTR_IS_ACTIVE(vap, va_mode)) {
f427ee49
A
1554 if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_MODE)) {
1555 VATTR_RETURN(vap, va_mode, ACCESSPERMS);
0a7de745 1556 } else {
f427ee49 1557 VATTR_RETURN(vap, va_mode, nva->nva_mode);
0a7de745 1558 }
6d2010ae
A
1559 }
1560 if (VATTR_IS_ACTIVE(vap, va_flags) &&
f427ee49
A
1561 (NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_ARCHIVE) ||
1562 NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_HIDDEN) ||
1563 (nva->nva_flags & NFS_FFLAG_TRIGGER))) {
2d21ac55 1564 uint32_t flags = 0;
f427ee49
A
1565 if (NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_ARCHIVE) &&
1566 (nva->nva_flags & NFS_FFLAG_ARCHIVED)) {
2d21ac55 1567 flags |= SF_ARCHIVED;
0a7de745 1568 }
f427ee49
A
1569 if (NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_HIDDEN) &&
1570 (nva->nva_flags & NFS_FFLAG_HIDDEN)) {
2d21ac55 1571 flags |= UF_HIDDEN;
0a7de745 1572 }
2d21ac55
A
1573 VATTR_RETURN(vap, va_flags, flags);
1574 }
f427ee49
A
1575 if (VATTR_IS_ACTIVE(vap, va_create_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_CREATE)) {
1576 vap->va_create_time.tv_sec = nva->nva_timesec[NFSTIME_CREATE];
1577 vap->va_create_time.tv_nsec = nva->nva_timensec[NFSTIME_CREATE];
2d21ac55
A
1578 VATTR_SET_SUPPORTED(vap, va_create_time);
1579 }
f427ee49
A
1580 if (VATTR_IS_ACTIVE(vap, va_access_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_ACCESS)) {
1581 vap->va_access_time.tv_sec = nva->nva_timesec[NFSTIME_ACCESS];
1582 vap->va_access_time.tv_nsec = nva->nva_timensec[NFSTIME_ACCESS];
2d21ac55
A
1583 VATTR_SET_SUPPORTED(vap, va_access_time);
1584 }
f427ee49
A
1585 if (VATTR_IS_ACTIVE(vap, va_modify_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_MODIFY)) {
1586 vap->va_modify_time.tv_sec = nva->nva_timesec[NFSTIME_MODIFY];
1587 vap->va_modify_time.tv_nsec = nva->nva_timensec[NFSTIME_MODIFY];
2d21ac55
A
1588 VATTR_SET_SUPPORTED(vap, va_modify_time);
1589 }
f427ee49
A
1590 if (VATTR_IS_ACTIVE(vap, va_change_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_METADATA)) {
1591 vap->va_change_time.tv_sec = nva->nva_timesec[NFSTIME_CHANGE];
1592 vap->va_change_time.tv_nsec = nva->nva_timensec[NFSTIME_CHANGE];
2d21ac55
A
1593 VATTR_SET_SUPPORTED(vap, va_change_time);
1594 }
f427ee49
A
1595 if (VATTR_IS_ACTIVE(vap, va_backup_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_BACKUP)) {
1596 vap->va_backup_time.tv_sec = nva->nva_timesec[NFSTIME_BACKUP];
1597 vap->va_backup_time.tv_nsec = nva->nva_timensec[NFSTIME_BACKUP];
2d21ac55
A
1598 VATTR_SET_SUPPORTED(vap, va_backup_time);
1599 }
f427ee49
A
1600 if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_FILEID)) {
1601 VATTR_RETURN(vap, va_fileid, nva->nva_fileid);
0a7de745 1602 }
f427ee49
A
1603 if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TYPE)) {
1604 VATTR_RETURN(vap, va_type, nva->nva_type);
0a7de745 1605 }
f427ee49
A
1606 if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_CHANGE)) {
1607 VATTR_RETURN(vap, va_filerev, nva->nva_change);
0a7de745 1608 }
2d21ac55 1609
6d2010ae 1610 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
f427ee49
A
1611 VATTR_RETURN(vap, va_acl, nva->nva_acl);
1612 nva->nva_acl = NULL;
6d2010ae
A
1613 }
1614
2d21ac55
A
1615 // other attrs we might support someday:
1616 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
2d21ac55 1617
f427ee49
A
1618 NVATTR_CLEANUP(nva);
1619out:
1620 FREE(nva, M_TEMP);
0a7de745 1621 return error;
2d21ac55
A
1622}
1623
1624int
1625nfs4_setattr_rpc(
1626 nfsnode_t np,
1627 struct vnode_attr *vap,
b0d623f7 1628 vfs_context_t ctx)
2d21ac55
A
1629{
1630 struct nfsmount *nmp = NFSTONMP(np);
6d2010ae 1631 int error = 0, setattr_error = 0, lockerror = ENOENT, status, nfsvers, numops;
b0d623f7 1632 u_int64_t xid, nextxid;
2d21ac55 1633 struct nfsm_chain nmreq, nmrep;
b0d623f7 1634 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6d2010ae
A
1635 uint32_t getbitmap[NFS_ATTR_BITMAP_LEN];
1636 uint32_t setbitmap[NFS_ATTR_BITMAP_LEN];
b0d623f7 1637 nfs_stateid stateid;
6d2010ae 1638 struct nfsreq_secinfo_args si;
2d21ac55 1639
0a7de745
A
1640 if (nfs_mount_gone(nmp)) {
1641 return ENXIO;
1642 }
2d21ac55 1643 nfsvers = nmp->nm_vers;
0a7de745
A
1644 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1645 return EINVAL;
1646 }
2d21ac55 1647
0a7de745 1648 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & ~(SF_ARCHIVED | UF_HIDDEN))) {
2d21ac55 1649 /* we don't support setting unsupported flags (duh!) */
0a7de745
A
1650 if (vap->va_active & ~VNODE_ATTR_va_flags) {
1651 return EINVAL; /* return EINVAL if other attributes also set */
1652 } else {
1653 return ENOTSUP; /* return ENOTSUP for chflags(2) */
1654 }
2d21ac55
A
1655 }
1656
6d2010ae 1657 /* don't bother requesting some changes if they don't look like they are changing */
0a7de745 1658 if (VATTR_IS_ACTIVE(vap, va_uid) && (vap->va_uid == np->n_vattr.nva_uid)) {
6d2010ae 1659 VATTR_CLEAR_ACTIVE(vap, va_uid);
0a7de745
A
1660 }
1661 if (VATTR_IS_ACTIVE(vap, va_gid) && (vap->va_gid == np->n_vattr.nva_gid)) {
6d2010ae 1662 VATTR_CLEAR_ACTIVE(vap, va_gid);
0a7de745
A
1663 }
1664 if (VATTR_IS_ACTIVE(vap, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &np->n_vattr.nva_uuuid)) {
6d2010ae 1665 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
0a7de745
A
1666 }
1667 if (VATTR_IS_ACTIVE(vap, va_guuid) && kauth_guid_equal(&vap->va_guuid, &np->n_vattr.nva_guuid)) {
6d2010ae 1668 VATTR_CLEAR_ACTIVE(vap, va_guuid);
0a7de745 1669 }
6d2010ae
A
1670
1671tryagain:
1672 /* do nothing if no attributes will be sent */
1673 nfs_vattr_set_bitmap(nmp, bitmap, vap);
0a7de745
A
1674 if (!bitmap[0] && !bitmap[1]) {
1675 return 0;
1676 }
6d2010ae
A
1677
1678 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
1679 nfsm_chain_null(&nmreq);
1680 nfsm_chain_null(&nmrep);
1681
6d2010ae
A
1682 /*
1683 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1684 * need to invalidate any cached ACL. And if we had an ACL cached,
1685 * we might as well also fetch the new value.
1686 */
1687 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, getbitmap);
1688 if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL) ||
1689 NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) {
0a7de745 1690 if (NACLVALID(np)) {
6d2010ae 1691 NFS_BITMAP_SET(getbitmap, NFS_FATTR_ACL);
0a7de745 1692 }
6d2010ae
A
1693 NACLINVALIDATE(np);
1694 }
1695
2d21ac55
A
1696 // PUTFH, SETATTR, GETATTR
1697 numops = 3;
1698 nfsm_chain_build_alloc_init(error, &nmreq, 40 * NFSX_UNSIGNED);
3e170ce0 1699 nfsm_chain_add_compound_header(error, &nmreq, "setattr", nmp->nm_minor_vers, numops);
2d21ac55
A
1700 numops--;
1701 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1702 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1703 numops--;
1704 nfsm_chain_add_32(error, &nmreq, NFS_OP_SETATTR);
0a7de745 1705 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
b0d623f7 1706 nfs_get_stateid(np, vfs_context_thread(ctx), vfs_context_ucred(ctx), &stateid);
0a7de745 1707 } else {
b0d623f7 1708 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
0a7de745 1709 }
b0d623f7 1710 nfsm_chain_add_stateid(error, &nmreq, &stateid);
2d21ac55
A
1711 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
1712 numops--;
1713 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 1714 nfsm_chain_add_bitmap_supported(error, &nmreq, getbitmap, nmp, np);
2d21ac55
A
1715 nfsm_chain_build_done(error, &nmreq);
1716 nfsm_assert(error, (numops == 0), EPROTO);
1717 nfsmout_if(error);
6d2010ae 1718 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 1719
0a7de745 1720 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 1721 error = lockerror;
0a7de745 1722 }
2d21ac55
A
1723 nfsm_chain_skip_tag(error, &nmrep);
1724 nfsm_chain_get_32(error, &nmrep, numops);
1725 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6d2010ae 1726 nfsmout_if(error);
2d21ac55 1727 nfsm_chain_op_check(error, &nmrep, NFS_OP_SETATTR);
6d2010ae
A
1728 nfsmout_if(error == EBADRPC);
1729 setattr_error = error;
1730 error = 0;
2d21ac55 1731 bmlen = NFS_ATTR_BITMAP_LEN;
6d2010ae
A
1732 nfsm_chain_get_bitmap(error, &nmrep, setbitmap, bmlen);
1733 if (!error) {
0a7de745 1734 if (VATTR_IS_ACTIVE(vap, va_data_size) && (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 1735 microuptime(&np->n_lastio);
0a7de745 1736 }
6d2010ae
A
1737 nfs_vattr_set_supported(setbitmap, vap);
1738 error = setattr_error;
1739 }
2d21ac55 1740 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 1741 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
0a7de745 1742 if (error) {
2d21ac55 1743 NATTRINVALIDATE(np);
0a7de745 1744 }
b0d623f7
A
1745 /*
1746 * We just changed the attributes and we want to make sure that we
1747 * see the latest attributes. Get the next XID. If it's not the
1748 * next XID after the SETATTR XID, then it's possible that another
1749 * RPC was in flight at the same time and it might put stale attributes
1750 * in the cache. In that case, we invalidate the attributes and set
1751 * the attribute cache XID to guarantee that newer attributes will
1752 * get loaded next.
1753 */
1754 nextxid = 0;
1755 nfs_get_xid(&nextxid);
1756 if (nextxid != (xid + 1)) {
1757 np->n_xid = nextxid;
1758 NATTRINVALIDATE(np);
1759 }
2d21ac55 1760nfsmout:
0a7de745 1761 if (!lockerror) {
b0d623f7 1762 nfs_node_unlock(np);
0a7de745 1763 }
2d21ac55
A
1764 nfsm_chain_cleanup(&nmreq);
1765 nfsm_chain_cleanup(&nmrep);
6d2010ae
A
1766 if ((setattr_error == EINVAL) && VATTR_IS_ACTIVE(vap, va_acl) && VATTR_IS_ACTIVE(vap, va_mode) && !NMFLAG(nmp, ACLONLY)) {
1767 /*
1768 * Some server's may not like ACL/mode combos that get sent.
1769 * If it looks like that's what the server choked on, try setting
1770 * just the ACL and not the mode (unless it looks like everything
1771 * but mode was already successfully set).
1772 */
1773 if (((bitmap[0] & setbitmap[0]) != bitmap[0]) ||
0a7de745 1774 ((bitmap[1] & (setbitmap[1] | NFS_FATTR_MODE)) != bitmap[1])) {
6d2010ae
A
1775 VATTR_CLEAR_ACTIVE(vap, va_mode);
1776 error = 0;
1777 goto tryagain;
1778 }
1779 }
0a7de745 1780 return error;
2d21ac55 1781}
cb323159 1782#endif /* CONFIG_NFS4 */
2d21ac55 1783
b0d623f7
A
1784/*
1785 * Wait for any pending recovery to complete.
1786 */
2d21ac55 1787int
b0d623f7 1788nfs_mount_state_wait_for_recovery(struct nfsmount *nmp)
2d21ac55 1789{
cb323159 1790 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
6d2010ae 1791 int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
b0d623f7
A
1792
1793 lck_mtx_lock(&nmp->nm_lock);
1794 while (nmp->nm_state & NFSSTA_RECOVER) {
0a7de745 1795 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1))) {
b0d623f7 1796 break;
0a7de745 1797 }
b0d623f7 1798 nfs_mount_sock_thread_wake(nmp);
0a7de745 1799 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
6d2010ae 1800 slpflag = 0;
b0d623f7
A
1801 }
1802 lck_mtx_unlock(&nmp->nm_lock);
1803
0a7de745 1804 return error;
2d21ac55
A
1805}
1806
b0d623f7
A
1807/*
1808 * We're about to use/manipulate NFS mount's open/lock state.
1809 * Wait for any pending state recovery to complete, then
1810 * mark the state as being in use (which will hold off
1811 * the recovery thread until we're done).
1812 */
2d21ac55 1813int
6d2010ae 1814nfs_mount_state_in_use_start(struct nfsmount *nmp, thread_t thd)
2d21ac55 1815{
cb323159 1816 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
6d2010ae 1817 int error = 0, slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7 1818
0a7de745
A
1819 if (nfs_mount_gone(nmp)) {
1820 return ENXIO;
1821 }
b0d623f7 1822 lck_mtx_lock(&nmp->nm_lock);
0a7de745 1823 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
6d2010ae 1824 lck_mtx_unlock(&nmp->nm_lock);
0a7de745 1825 return ENXIO;
6d2010ae 1826 }
b0d623f7 1827 while (nmp->nm_state & NFSSTA_RECOVER) {
0a7de745 1828 if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
b0d623f7 1829 break;
0a7de745 1830 }
b0d623f7 1831 nfs_mount_sock_thread_wake(nmp);
0a7de745 1832 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
6d2010ae 1833 slpflag = 0;
b0d623f7 1834 }
0a7de745 1835 if (!error) {
b0d623f7 1836 nmp->nm_stateinuse++;
0a7de745 1837 }
b0d623f7
A
1838 lck_mtx_unlock(&nmp->nm_lock);
1839
0a7de745 1840 return error;
2d21ac55
A
1841}
1842
b0d623f7
A
1843/*
1844 * We're done using/manipulating the NFS mount's open/lock
1845 * state. If the given error indicates that recovery should
1846 * be performed, we'll initiate recovery.
1847 */
2d21ac55 1848int
b0d623f7 1849nfs_mount_state_in_use_end(struct nfsmount *nmp, int error)
2d21ac55 1850{
b0d623f7
A
1851 int restart = nfs_mount_state_error_should_restart(error);
1852
0a7de745 1853 if (nfs_mount_gone(nmp)) {
f427ee49 1854 return ENXIO;
0a7de745 1855 }
b0d623f7
A
1856 lck_mtx_lock(&nmp->nm_lock);
1857 if (restart && (error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE)) {
6d2010ae 1858 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
0a7de745 1859 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid);
6d2010ae 1860 nfs_need_recover(nmp, error);
b0d623f7 1861 }
0a7de745 1862 if (nmp->nm_stateinuse > 0) {
b0d623f7 1863 nmp->nm_stateinuse--;
0a7de745 1864 } else {
b0d623f7 1865 panic("NFS mount state in use count underrun");
0a7de745
A
1866 }
1867 if (!nmp->nm_stateinuse && (nmp->nm_state & NFSSTA_RECOVER)) {
b0d623f7 1868 wakeup(&nmp->nm_stateinuse);
0a7de745 1869 }
b0d623f7 1870 lck_mtx_unlock(&nmp->nm_lock);
0a7de745
A
1871 if (error == NFSERR_GRACE) {
1872 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
1873 }
b0d623f7 1874
0a7de745 1875 return restart;
2d21ac55
A
1876}
1877
1878/*
b0d623f7 1879 * Does the error mean we should restart/redo a state-related operation?
2d21ac55
A
1880 */
1881int
b0d623f7 1882nfs_mount_state_error_should_restart(int error)
2d21ac55 1883{
b0d623f7
A
1884 switch (error) {
1885 case NFSERR_STALE_STATEID:
1886 case NFSERR_STALE_CLIENTID:
1887 case NFSERR_ADMIN_REVOKED:
1888 case NFSERR_EXPIRED:
1889 case NFSERR_OLD_STATEID:
1890 case NFSERR_BAD_STATEID:
1891 case NFSERR_GRACE:
0a7de745 1892 return 1;
b0d623f7 1893 }
0a7de745 1894 return 0;
b0d623f7 1895}
2d21ac55 1896
b0d623f7
A
1897/*
1898 * In some cases we may want to limit how many times we restart a
1899 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1900 * Base the limit on the lease (as long as it's not too short).
1901 */
1902uint
1903nfs_mount_state_max_restarts(struct nfsmount *nmp)
1904{
0a7de745 1905 return MAX(nmp->nm_fsattr.nfsa_lease, 60);
b0d623f7 1906}
2d21ac55 1907
6d2010ae
A
1908/*
1909 * Does the error mean we probably lost a delegation?
1910 */
1911int
1912nfs_mount_state_error_delegation_lost(int error)
1913{
1914 switch (error) {
1915 case NFSERR_STALE_STATEID:
1916 case NFSERR_ADMIN_REVOKED:
1917 case NFSERR_EXPIRED:
1918 case NFSERR_OLD_STATEID:
1919 case NFSERR_BAD_STATEID:
1920 case NFSERR_GRACE: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
0a7de745 1921 return 1;
6d2010ae 1922 }
0a7de745 1923 return 0;
6d2010ae
A
1924}
1925
b0d623f7
A
1926
1927/*
1928 * Mark an NFS node's open state as busy.
1929 */
1930int
6d2010ae 1931nfs_open_state_set_busy(nfsnode_t np, thread_t thd)
b0d623f7
A
1932{
1933 struct nfsmount *nmp;
cb323159 1934 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
b0d623f7
A
1935 int error = 0, slpflag;
1936
1937 nmp = NFSTONMP(np);
0a7de745
A
1938 if (nfs_mount_gone(nmp)) {
1939 return ENXIO;
1940 }
6d2010ae 1941 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2d21ac55 1942
b0d623f7
A
1943 lck_mtx_lock(&np->n_openlock);
1944 while (np->n_openflags & N_OPENBUSY) {
0a7de745 1945 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 1946 break;
0a7de745 1947 }
b0d623f7
A
1948 np->n_openflags |= N_OPENWANT;
1949 msleep(&np->n_openflags, &np->n_openlock, slpflag, "nfs_open_state_set_busy", &ts);
6d2010ae 1950 slpflag = 0;
b0d623f7 1951 }
0a7de745 1952 if (!error) {
b0d623f7 1953 np->n_openflags |= N_OPENBUSY;
0a7de745 1954 }
b0d623f7 1955 lck_mtx_unlock(&np->n_openlock);
2d21ac55 1956
0a7de745 1957 return error;
b0d623f7 1958}
2d21ac55 1959
b0d623f7
A
1960/*
1961 * Clear an NFS node's open state busy flag and wake up
1962 * anyone wanting it.
1963 */
1964void
1965nfs_open_state_clear_busy(nfsnode_t np)
1966{
1967 int wanted;
1968
1969 lck_mtx_lock(&np->n_openlock);
0a7de745 1970 if (!(np->n_openflags & N_OPENBUSY)) {
b0d623f7 1971 panic("nfs_open_state_clear_busy");
0a7de745 1972 }
b0d623f7 1973 wanted = (np->n_openflags & N_OPENWANT);
0a7de745 1974 np->n_openflags &= ~(N_OPENBUSY | N_OPENWANT);
b0d623f7 1975 lck_mtx_unlock(&np->n_openlock);
0a7de745 1976 if (wanted) {
b0d623f7 1977 wakeup(&np->n_openflags);
0a7de745 1978 }
b0d623f7 1979}
2d21ac55 1980
b0d623f7
A
1981/*
1982 * Search a mount's open owner list for the owner for this credential.
1983 * If not found and "alloc" is set, then allocate a new one.
1984 */
1985struct nfs_open_owner *
1986nfs_open_owner_find(struct nfsmount *nmp, kauth_cred_t cred, int alloc)
1987{
1988 uid_t uid = kauth_cred_getuid(cred);
1989 struct nfs_open_owner *noop, *newnoop = NULL;
2d21ac55 1990
b0d623f7
A
1991tryagain:
1992 lck_mtx_lock(&nmp->nm_lock);
1993 TAILQ_FOREACH(noop, &nmp->nm_open_owners, noo_link) {
0a7de745 1994 if (kauth_cred_getuid(noop->noo_cred) == uid) {
b0d623f7 1995 break;
0a7de745 1996 }
2d21ac55 1997 }
2d21ac55 1998
b0d623f7
A
1999 if (!noop && !newnoop && alloc) {
2000 lck_mtx_unlock(&nmp->nm_lock);
2001 MALLOC(newnoop, struct nfs_open_owner *, sizeof(struct nfs_open_owner), M_TEMP, M_WAITOK);
0a7de745
A
2002 if (!newnoop) {
2003 return NULL;
2004 }
b0d623f7 2005 bzero(newnoop, sizeof(*newnoop));
c3c9b80d 2006 lck_mtx_init(&newnoop->noo_lock, &nfs_open_grp, LCK_ATTR_NULL);
b0d623f7
A
2007 newnoop->noo_mount = nmp;
2008 kauth_cred_ref(cred);
2009 newnoop->noo_cred = cred;
2010 newnoop->noo_name = OSAddAtomic(1, &nfs_open_owner_seqnum);
2011 TAILQ_INIT(&newnoop->noo_opens);
2012 goto tryagain;
2013 }
2014 if (!noop && newnoop) {
2015 newnoop->noo_flags |= NFS_OPEN_OWNER_LINK;
0a7de745 2016 os_ref_init(&newnoop->noo_refcnt, NULL);
b0d623f7
A
2017 TAILQ_INSERT_HEAD(&nmp->nm_open_owners, newnoop, noo_link);
2018 noop = newnoop;
2019 }
2020 lck_mtx_unlock(&nmp->nm_lock);
2021
0a7de745 2022 if (newnoop && (noop != newnoop)) {
b0d623f7 2023 nfs_open_owner_destroy(newnoop);
0a7de745 2024 }
b0d623f7 2025
0a7de745 2026 if (noop) {
b0d623f7 2027 nfs_open_owner_ref(noop);
0a7de745 2028 }
b0d623f7 2029
0a7de745 2030 return noop;
b0d623f7
A
2031}
2032
2033/*
2034 * destroy an open owner that's no longer needed
2035 */
2036void
2037nfs_open_owner_destroy(struct nfs_open_owner *noop)
2038{
0a7de745 2039 if (noop->noo_cred) {
b0d623f7 2040 kauth_cred_unref(&noop->noo_cred);
0a7de745 2041 }
c3c9b80d 2042 lck_mtx_destroy(&noop->noo_lock, &nfs_open_grp);
b0d623f7
A
2043 FREE(noop, M_TEMP);
2044}
2045
2046/*
2047 * acquire a reference count on an open owner
2048 */
2049void
2050nfs_open_owner_ref(struct nfs_open_owner *noop)
2051{
2052 lck_mtx_lock(&noop->noo_lock);
0a7de745 2053 os_ref_retain_locked(&noop->noo_refcnt);
b0d623f7
A
2054 lck_mtx_unlock(&noop->noo_lock);
2055}
2056
2057/*
2058 * drop a reference count on an open owner and destroy it if
2059 * it is no longer referenced and no longer on the mount's list.
2060 */
2061void
2062nfs_open_owner_rele(struct nfs_open_owner *noop)
2063{
0a7de745
A
2064 os_ref_count_t newcount;
2065
b0d623f7 2066 lck_mtx_lock(&noop->noo_lock);
0a7de745 2067 if (os_ref_get_count(&noop->noo_refcnt) < 1) {
b0d623f7 2068 panic("nfs_open_owner_rele: no refcnt");
0a7de745
A
2069 }
2070 newcount = os_ref_release_locked(&noop->noo_refcnt);
2071 if (!newcount && (noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
b0d623f7 2072 panic("nfs_open_owner_rele: busy");
0a7de745 2073 }
b0d623f7 2074 /* XXX we may potentially want to clean up idle/unused open owner structures */
0a7de745 2075 if (newcount || (noop->noo_flags & NFS_OPEN_OWNER_LINK)) {
b0d623f7
A
2076 lck_mtx_unlock(&noop->noo_lock);
2077 return;
2078 }
2079 /* owner is no longer referenced or linked to mount, so destroy it */
2080 lck_mtx_unlock(&noop->noo_lock);
2081 nfs_open_owner_destroy(noop);
2082}
2083
2084/*
2085 * Mark an open owner as busy because we are about to
2086 * start an operation that uses and updates open owner state.
2087 */
2088int
2089nfs_open_owner_set_busy(struct nfs_open_owner *noop, thread_t thd)
2090{
2091 struct nfsmount *nmp;
cb323159 2092 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
b0d623f7
A
2093 int error = 0, slpflag;
2094
2095 nmp = noop->noo_mount;
0a7de745
A
2096 if (nfs_mount_gone(nmp)) {
2097 return ENXIO;
2098 }
6d2010ae 2099 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7
A
2100
2101 lck_mtx_lock(&noop->noo_lock);
2102 while (noop->noo_flags & NFS_OPEN_OWNER_BUSY) {
0a7de745 2103 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 2104 break;
0a7de745 2105 }
b0d623f7
A
2106 noop->noo_flags |= NFS_OPEN_OWNER_WANT;
2107 msleep(noop, &noop->noo_lock, slpflag, "nfs_open_owner_set_busy", &ts);
6d2010ae 2108 slpflag = 0;
b0d623f7 2109 }
0a7de745 2110 if (!error) {
b0d623f7 2111 noop->noo_flags |= NFS_OPEN_OWNER_BUSY;
0a7de745 2112 }
b0d623f7
A
2113 lck_mtx_unlock(&noop->noo_lock);
2114
0a7de745 2115 return error;
b0d623f7
A
2116}
2117
2118/*
2119 * Clear the busy flag on an open owner and wake up anyone waiting
2120 * to mark it busy.
2121 */
2122void
2123nfs_open_owner_clear_busy(struct nfs_open_owner *noop)
2124{
2125 int wanted;
2126
2127 lck_mtx_lock(&noop->noo_lock);
0a7de745 2128 if (!(noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
b0d623f7 2129 panic("nfs_open_owner_clear_busy");
0a7de745 2130 }
b0d623f7 2131 wanted = (noop->noo_flags & NFS_OPEN_OWNER_WANT);
0a7de745 2132 noop->noo_flags &= ~(NFS_OPEN_OWNER_BUSY | NFS_OPEN_OWNER_WANT);
b0d623f7 2133 lck_mtx_unlock(&noop->noo_lock);
0a7de745 2134 if (wanted) {
b0d623f7 2135 wakeup(noop);
0a7de745 2136 }
b0d623f7
A
2137}
2138
2139/*
2140 * Given an open/lock owner and an error code, increment the
2141 * sequence ID if appropriate.
2142 */
2143void
2144nfs_owner_seqid_increment(struct nfs_open_owner *noop, struct nfs_lock_owner *nlop, int error)
2145{
2146 switch (error) {
2147 case NFSERR_STALE_CLIENTID:
2148 case NFSERR_STALE_STATEID:
2149 case NFSERR_OLD_STATEID:
2150 case NFSERR_BAD_STATEID:
2151 case NFSERR_BAD_SEQID:
2152 case NFSERR_BADXDR:
2153 case NFSERR_RESOURCE:
2154 case NFSERR_NOFILEHANDLE:
2155 /* do not increment the open seqid on these errors */
2156 return;
2157 }
0a7de745 2158 if (noop) {
b0d623f7 2159 noop->noo_seqid++;
0a7de745
A
2160 }
2161 if (nlop) {
b0d623f7 2162 nlop->nlo_seqid++;
0a7de745 2163 }
b0d623f7
A
2164}
2165
2166/*
2167 * Search a node's open file list for any conflicts with this request.
2168 * Also find this open owner's open file structure.
2169 * If not found and "alloc" is set, then allocate one.
2170 */
2171int
2172nfs_open_file_find(
2173 nfsnode_t np,
2174 struct nfs_open_owner *noop,
2175 struct nfs_open_file **nofpp,
2176 uint32_t accessMode,
2177 uint32_t denyMode,
2178 int alloc)
6d2010ae
A
2179{
2180 *nofpp = NULL;
2181 return nfs_open_file_find_internal(np, noop, nofpp, accessMode, denyMode, alloc);
2182}
2183
2184/*
2185 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
2186 * if an existing one is not found. This is used in "create" scenarios to
2187 * officially add the provisional nofp to the node once the node is created.
2188 */
2189int
2190nfs_open_file_find_internal(
2191 nfsnode_t np,
2192 struct nfs_open_owner *noop,
2193 struct nfs_open_file **nofpp,
2194 uint32_t accessMode,
2195 uint32_t denyMode,
2196 int alloc)
b0d623f7
A
2197{
2198 struct nfs_open_file *nofp = NULL, *nofp2, *newnofp = NULL;
2199
0a7de745 2200 if (!np) {
b0d623f7 2201 goto alloc;
0a7de745 2202 }
b0d623f7
A
2203tryagain:
2204 lck_mtx_lock(&np->n_openlock);
2205 TAILQ_FOREACH(nofp2, &np->n_opens, nof_link) {
2206 if (nofp2->nof_owner == noop) {
2207 nofp = nofp2;
0a7de745 2208 if (!accessMode) {
b0d623f7 2209 break;
0a7de745 2210 }
b0d623f7
A
2211 }
2212 if ((accessMode & nofp2->nof_deny) || (denyMode & nofp2->nof_access)) {
2213 /* This request conflicts with an existing open on this client. */
2214 lck_mtx_unlock(&np->n_openlock);
0a7de745 2215 return EACCES;
b0d623f7
A
2216 }
2217 }
2218
2219 /*
2220 * If this open owner doesn't have an open
2221 * file structure yet, we create one for it.
2222 */
6d2010ae 2223 if (!nofp && !*nofpp && !newnofp && alloc) {
b0d623f7
A
2224 lck_mtx_unlock(&np->n_openlock);
2225alloc:
2226 MALLOC(newnofp, struct nfs_open_file *, sizeof(struct nfs_open_file), M_TEMP, M_WAITOK);
0a7de745
A
2227 if (!newnofp) {
2228 return ENOMEM;
2229 }
b0d623f7 2230 bzero(newnofp, sizeof(*newnofp));
c3c9b80d 2231 lck_mtx_init(&newnofp->nof_lock, &nfs_open_grp, LCK_ATTR_NULL);
b0d623f7
A
2232 newnofp->nof_owner = noop;
2233 nfs_open_owner_ref(noop);
2234 newnofp->nof_np = np;
2235 lck_mtx_lock(&noop->noo_lock);
2236 TAILQ_INSERT_HEAD(&noop->noo_opens, newnofp, nof_oolink);
2237 lck_mtx_unlock(&noop->noo_lock);
0a7de745 2238 if (np) {
b0d623f7 2239 goto tryagain;
0a7de745 2240 }
b0d623f7 2241 }
6d2010ae
A
2242 if (!nofp) {
2243 if (*nofpp) {
2244 (*nofpp)->nof_np = np;
2245 nofp = *nofpp;
2246 } else {
2247 nofp = newnofp;
2248 }
0a7de745 2249 if (nofp && np) {
6d2010ae 2250 TAILQ_INSERT_HEAD(&np->n_opens, nofp, nof_link);
0a7de745 2251 }
b0d623f7 2252 }
0a7de745 2253 if (np) {
b0d623f7 2254 lck_mtx_unlock(&np->n_openlock);
0a7de745 2255 }
b0d623f7 2256
0a7de745 2257 if (alloc && newnofp && (nofp != newnofp)) {
b0d623f7 2258 nfs_open_file_destroy(newnofp);
0a7de745 2259 }
b0d623f7
A
2260
2261 *nofpp = nofp;
0a7de745 2262 return nofp ? 0 : ESRCH;
b0d623f7
A
2263}
2264
2265/*
2266 * Destroy an open file structure.
2267 */
2268void
2269nfs_open_file_destroy(struct nfs_open_file *nofp)
2270{
2271 lck_mtx_lock(&nofp->nof_owner->noo_lock);
2272 TAILQ_REMOVE(&nofp->nof_owner->noo_opens, nofp, nof_oolink);
2273 lck_mtx_unlock(&nofp->nof_owner->noo_lock);
2274 nfs_open_owner_rele(nofp->nof_owner);
c3c9b80d 2275 lck_mtx_destroy(&nofp->nof_lock, &nfs_open_grp);
b0d623f7
A
2276 FREE(nofp, M_TEMP);
2277}
2278
2279/*
2280 * Mark an open file as busy because we are about to
2281 * start an operation that uses and updates open file state.
2282 */
2283int
2284nfs_open_file_set_busy(struct nfs_open_file *nofp, thread_t thd)
2285{
2286 struct nfsmount *nmp;
cb323159 2287 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
b0d623f7
A
2288 int error = 0, slpflag;
2289
2290 nmp = nofp->nof_owner->noo_mount;
0a7de745
A
2291 if (nfs_mount_gone(nmp)) {
2292 return ENXIO;
2293 }
6d2010ae 2294 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7
A
2295
2296 lck_mtx_lock(&nofp->nof_lock);
2297 while (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
0a7de745 2298 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 2299 break;
0a7de745 2300 }
b0d623f7
A
2301 nofp->nof_flags |= NFS_OPEN_FILE_WANT;
2302 msleep(nofp, &nofp->nof_lock, slpflag, "nfs_open_file_set_busy", &ts);
6d2010ae 2303 slpflag = 0;
b0d623f7 2304 }
0a7de745 2305 if (!error) {
b0d623f7 2306 nofp->nof_flags |= NFS_OPEN_FILE_BUSY;
0a7de745 2307 }
b0d623f7
A
2308 lck_mtx_unlock(&nofp->nof_lock);
2309
0a7de745 2310 return error;
b0d623f7
A
2311}
2312
2313/*
2314 * Clear the busy flag on an open file and wake up anyone waiting
2315 * to mark it busy.
2316 */
2317void
2318nfs_open_file_clear_busy(struct nfs_open_file *nofp)
2319{
2320 int wanted;
2321
2322 lck_mtx_lock(&nofp->nof_lock);
0a7de745 2323 if (!(nofp->nof_flags & NFS_OPEN_FILE_BUSY)) {
b0d623f7 2324 panic("nfs_open_file_clear_busy");
0a7de745 2325 }
b0d623f7 2326 wanted = (nofp->nof_flags & NFS_OPEN_FILE_WANT);
0a7de745 2327 nofp->nof_flags &= ~(NFS_OPEN_FILE_BUSY | NFS_OPEN_FILE_WANT);
b0d623f7 2328 lck_mtx_unlock(&nofp->nof_lock);
0a7de745 2329 if (wanted) {
b0d623f7 2330 wakeup(nofp);
0a7de745 2331 }
b0d623f7
A
2332}
2333
2334/*
6d2010ae 2335 * Add the open state for the given access/deny modes to this open file.
b0d623f7
A
2336 */
2337void
6d2010ae 2338nfs_open_file_add_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode, int delegated)
b0d623f7 2339{
6d2010ae
A
2340 lck_mtx_lock(&nofp->nof_lock);
2341 nofp->nof_access |= accessMode;
2342 nofp->nof_deny |= denyMode;
b0d623f7 2343
6d2010ae
A
2344 if (delegated) {
2345 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
0a7de745 2346 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2347 nofp->nof_d_r++;
0a7de745 2348 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2349 nofp->nof_d_w++;
0a7de745 2350 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2351 nofp->nof_d_rw++;
0a7de745 2352 }
6d2010ae 2353 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
0a7de745 2354 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2355 nofp->nof_d_r_dw++;
0a7de745 2356 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2357 nofp->nof_d_w_dw++;
0a7de745 2358 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2359 nofp->nof_d_rw_dw++;
0a7de745 2360 }
6d2010ae 2361 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
0a7de745 2362 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2363 nofp->nof_d_r_drw++;
0a7de745 2364 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2365 nofp->nof_d_w_drw++;
0a7de745 2366 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2367 nofp->nof_d_rw_drw++;
0a7de745 2368 }
6d2010ae 2369 }
b0d623f7 2370 } else {
6d2010ae 2371 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
0a7de745 2372 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2373 nofp->nof_r++;
0a7de745 2374 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2375 nofp->nof_w++;
0a7de745 2376 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2377 nofp->nof_rw++;
0a7de745 2378 }
6d2010ae 2379 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
0a7de745 2380 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2381 nofp->nof_r_dw++;
0a7de745 2382 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2383 nofp->nof_w_dw++;
0a7de745 2384 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2385 nofp->nof_rw_dw++;
0a7de745 2386 }
6d2010ae 2387 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
0a7de745 2388 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2389 nofp->nof_r_drw++;
0a7de745 2390 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2391 nofp->nof_w_drw++;
0a7de745 2392 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2393 nofp->nof_rw_drw++;
0a7de745 2394 }
6d2010ae 2395 }
b0d623f7 2396 }
6d2010ae
A
2397
2398 nofp->nof_opencnt++;
2399 lck_mtx_unlock(&nofp->nof_lock);
b0d623f7
A
2400}
2401
2402/*
6d2010ae
A
2403 * Find which particular open combo will be closed and report what
2404 * the new modes will be and whether the open was delegated.
b0d623f7 2405 */
6d2010ae
A
2406void
2407nfs_open_file_remove_open_find(
b0d623f7
A
2408 struct nfs_open_file *nofp,
2409 uint32_t accessMode,
2410 uint32_t denyMode,
f427ee49
A
2411 uint8_t *newAccessMode,
2412 uint8_t *newDenyMode,
6d2010ae 2413 int *delegated)
b0d623f7 2414{
6d2010ae
A
2415 /*
2416 * Calculate new modes: a mode bit gets removed when there's only
2417 * one count in all the corresponding counts
2418 */
2419 *newAccessMode = nofp->nof_access;
2420 *newDenyMode = nofp->nof_deny;
b0d623f7 2421
6d2010ae
A
2422 if ((accessMode & NFS_OPEN_SHARE_ACCESS_READ) &&
2423 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) &&
2424 ((nofp->nof_r + nofp->nof_d_r +
0a7de745
A
2425 nofp->nof_rw + nofp->nof_d_rw +
2426 nofp->nof_r_dw + nofp->nof_d_r_dw +
2427 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2428 nofp->nof_r_drw + nofp->nof_d_r_drw +
2429 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
6d2010ae 2430 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
0a7de745 2431 }
6d2010ae
A
2432 if ((accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2433 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2434 ((nofp->nof_w + nofp->nof_d_w +
0a7de745
A
2435 nofp->nof_rw + nofp->nof_d_rw +
2436 nofp->nof_w_dw + nofp->nof_d_w_dw +
2437 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2438 nofp->nof_w_drw + nofp->nof_d_w_drw +
2439 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
6d2010ae 2440 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_WRITE;
0a7de745 2441 }
6d2010ae
A
2442 if ((denyMode & NFS_OPEN_SHARE_DENY_READ) &&
2443 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) &&
2444 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
0a7de745
A
2445 nofp->nof_w_drw + nofp->nof_d_w_drw +
2446 nofp->nof_rw_drw + nofp->nof_d_rw_drw) == 1)) {
6d2010ae 2447 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_READ;
0a7de745 2448 }
6d2010ae
A
2449 if ((denyMode & NFS_OPEN_SHARE_DENY_WRITE) &&
2450 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) &&
2451 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
0a7de745
A
2452 nofp->nof_w_drw + nofp->nof_d_w_drw +
2453 nofp->nof_rw_drw + nofp->nof_d_rw_drw +
2454 nofp->nof_r_dw + nofp->nof_d_r_dw +
2455 nofp->nof_w_dw + nofp->nof_d_w_dw +
2456 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
6d2010ae 2457 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 2458 }
6d2010ae
A
2459
2460 /* Find the corresponding open access/deny mode counter. */
b0d623f7 2461 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
0a7de745 2462 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2463 *delegated = (nofp->nof_d_r != 0);
0a7de745 2464 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2465 *delegated = (nofp->nof_d_w != 0);
0a7de745 2466 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2467 *delegated = (nofp->nof_d_rw != 0);
0a7de745 2468 } else {
6d2010ae 2469 *delegated = 0;
0a7de745 2470 }
b0d623f7 2471 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
0a7de745 2472 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2473 *delegated = (nofp->nof_d_r_dw != 0);
0a7de745 2474 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2475 *delegated = (nofp->nof_d_w_dw != 0);
0a7de745 2476 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2477 *delegated = (nofp->nof_d_rw_dw != 0);
0a7de745 2478 } else {
6d2010ae 2479 *delegated = 0;
0a7de745 2480 }
b0d623f7 2481 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
0a7de745 2482 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2483 *delegated = (nofp->nof_d_r_drw != 0);
0a7de745 2484 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2485 *delegated = (nofp->nof_d_w_drw != 0);
0a7de745 2486 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2487 *delegated = (nofp->nof_d_rw_drw != 0);
0a7de745 2488 } else {
6d2010ae 2489 *delegated = 0;
0a7de745 2490 }
b0d623f7 2491 }
6d2010ae
A
2492}
2493
2494/*
2495 * Remove the open state for the given access/deny modes to this open file.
2496 */
2497void
2498nfs_open_file_remove_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode)
2499{
f427ee49 2500 uint8_t newAccessMode, newDenyMode;
6d2010ae
A
2501 int delegated = 0;
2502
2503 lck_mtx_lock(&nofp->nof_lock);
2504 nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated);
2505
2506 /* Decrement the corresponding open access/deny mode counter. */
2507 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2508 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2509 if (delegated) {
0a7de745 2510 if (nofp->nof_d_r == 0) {
6d2010ae 2511 NP(nofp->nof_np, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2512 } else {
6d2010ae 2513 nofp->nof_d_r--;
0a7de745 2514 }
6d2010ae 2515 } else {
0a7de745 2516 if (nofp->nof_r == 0) {
6d2010ae 2517 NP(nofp->nof_np, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2518 } else {
6d2010ae 2519 nofp->nof_r--;
0a7de745 2520 }
6d2010ae
A
2521 }
2522 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2523 if (delegated) {
0a7de745 2524 if (nofp->nof_d_w == 0) {
6d2010ae 2525 NP(nofp->nof_np, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2526 } else {
6d2010ae 2527 nofp->nof_d_w--;
0a7de745 2528 }
6d2010ae 2529 } else {
0a7de745 2530 if (nofp->nof_w == 0) {
6d2010ae 2531 NP(nofp->nof_np, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2532 } else {
6d2010ae 2533 nofp->nof_w--;
0a7de745 2534 }
6d2010ae
A
2535 }
2536 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2537 if (delegated) {
0a7de745 2538 if (nofp->nof_d_rw == 0) {
6d2010ae 2539 NP(nofp->nof_np, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2540 } else {
6d2010ae 2541 nofp->nof_d_rw--;
0a7de745 2542 }
6d2010ae 2543 } else {
0a7de745 2544 if (nofp->nof_rw == 0) {
6d2010ae 2545 NP(nofp->nof_np, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2546 } else {
6d2010ae 2547 nofp->nof_rw--;
0a7de745 2548 }
6d2010ae
A
2549 }
2550 }
2551 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2552 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2553 if (delegated) {
0a7de745 2554 if (nofp->nof_d_r_dw == 0) {
6d2010ae 2555 NP(nofp->nof_np, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2556 } else {
6d2010ae 2557 nofp->nof_d_r_dw--;
0a7de745 2558 }
6d2010ae 2559 } else {
0a7de745 2560 if (nofp->nof_r_dw == 0) {
6d2010ae 2561 NP(nofp->nof_np, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2562 } else {
6d2010ae 2563 nofp->nof_r_dw--;
0a7de745 2564 }
6d2010ae
A
2565 }
2566 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2567 if (delegated) {
0a7de745 2568 if (nofp->nof_d_w_dw == 0) {
6d2010ae 2569 NP(nofp->nof_np, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2570 } else {
6d2010ae 2571 nofp->nof_d_w_dw--;
0a7de745 2572 }
6d2010ae 2573 } else {
0a7de745 2574 if (nofp->nof_w_dw == 0) {
6d2010ae 2575 NP(nofp->nof_np, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2576 } else {
6d2010ae 2577 nofp->nof_w_dw--;
0a7de745 2578 }
6d2010ae
A
2579 }
2580 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2581 if (delegated) {
0a7de745 2582 if (nofp->nof_d_rw_dw == 0) {
6d2010ae 2583 NP(nofp->nof_np, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2584 } else {
6d2010ae 2585 nofp->nof_d_rw_dw--;
0a7de745 2586 }
6d2010ae 2587 } else {
0a7de745 2588 if (nofp->nof_rw_dw == 0) {
6d2010ae 2589 NP(nofp->nof_np, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2590 } else {
6d2010ae 2591 nofp->nof_rw_dw--;
0a7de745 2592 }
6d2010ae
A
2593 }
2594 }
2595 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2596 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2597 if (delegated) {
0a7de745 2598 if (nofp->nof_d_r_drw == 0) {
6d2010ae 2599 NP(nofp->nof_np, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2600 } else {
6d2010ae 2601 nofp->nof_d_r_drw--;
0a7de745 2602 }
6d2010ae 2603 } else {
0a7de745 2604 if (nofp->nof_r_drw == 0) {
6d2010ae 2605 NP(nofp->nof_np, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2606 } else {
6d2010ae 2607 nofp->nof_r_drw--;
0a7de745 2608 }
6d2010ae
A
2609 }
2610 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2611 if (delegated) {
0a7de745 2612 if (nofp->nof_d_w_drw == 0) {
6d2010ae 2613 NP(nofp->nof_np, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2614 } else {
6d2010ae 2615 nofp->nof_d_w_drw--;
0a7de745 2616 }
6d2010ae 2617 } else {
0a7de745 2618 if (nofp->nof_w_drw == 0) {
6d2010ae 2619 NP(nofp->nof_np, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2620 } else {
6d2010ae 2621 nofp->nof_w_drw--;
0a7de745 2622 }
6d2010ae
A
2623 }
2624 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2625 if (delegated) {
0a7de745 2626 if (nofp->nof_d_rw_drw == 0) {
6d2010ae 2627 NP(nofp->nof_np, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2628 } else {
6d2010ae 2629 nofp->nof_d_rw_drw--;
0a7de745 2630 }
6d2010ae 2631 } else {
0a7de745 2632 if (nofp->nof_rw_drw == 0) {
6d2010ae 2633 NP(nofp->nof_np, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2634 } else {
6d2010ae 2635 nofp->nof_rw_drw--;
0a7de745 2636 }
6d2010ae
A
2637 }
2638 }
2639 }
2640
2641 /* update the modes */
2642 nofp->nof_access = newAccessMode;
2643 nofp->nof_deny = newDenyMode;
2644 nofp->nof_opencnt--;
2645 lck_mtx_unlock(&nofp->nof_lock);
2646}
2647
cb323159 2648#if CONFIG_NFS4
6d2010ae
A
2649/*
2650 * Get the current (delegation, lock, open, default) stateid for this node.
2651 * If node has a delegation, use that stateid.
2652 * If pid has a lock, use the lockowner's stateid.
2653 * Or use the open file's stateid.
2654 * If no open file, use a default stateid of all ones.
2655 */
2656void
2657nfs_get_stateid(nfsnode_t np, thread_t thd, kauth_cred_t cred, nfs_stateid *sid)
2658{
2659 struct nfsmount *nmp = NFSTONMP(np);
2660 proc_t p = thd ? get_bsdthreadtask_info(thd) : current_proc(); // XXX async I/O requests don't have a thread
2661 struct nfs_open_owner *noop = NULL;
2662 struct nfs_open_file *nofp = NULL;
2663 struct nfs_lock_owner *nlop = NULL;
2664 nfs_stateid *s = NULL;
2665
2666 if (np->n_openflags & N_DELEG_MASK) {
2667 s = &np->n_dstateid;
2668 } else {
0a7de745 2669 if (p) {
6d2010ae 2670 nlop = nfs_lock_owner_find(np, p, 0);
0a7de745 2671 }
6d2010ae
A
2672 if (nlop && !TAILQ_EMPTY(&nlop->nlo_locks)) {
2673 /* we hold locks, use lock stateid */
2674 s = &nlop->nlo_stateid;
2675 } else if (((noop = nfs_open_owner_find(nmp, cred, 0))) &&
0a7de745
A
2676 (nfs_open_file_find(np, noop, &nofp, 0, 0, 0) == 0) &&
2677 !(nofp->nof_flags & NFS_OPEN_FILE_LOST) &&
2678 nofp->nof_access) {
6d2010ae 2679 /* we (should) have the file open, use open stateid */
0a7de745 2680 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
6d2010ae 2681 nfs4_reopen(nofp, thd);
0a7de745
A
2682 }
2683 if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6d2010ae 2684 s = &nofp->nof_stateid;
0a7de745 2685 }
6d2010ae
A
2686 }
2687 }
2688
2689 if (s) {
2690 sid->seqid = s->seqid;
2691 sid->other[0] = s->other[0];
2692 sid->other[1] = s->other[1];
2693 sid->other[2] = s->other[2];
2694 } else {
2695 /* named attributes may not have a stateid for reads, so don't complain for them */
0a7de745 2696 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 2697 NP(np, "nfs_get_stateid: no stateid");
0a7de745 2698 }
6d2010ae
A
2699 sid->seqid = sid->other[0] = sid->other[1] = sid->other[2] = 0xffffffff;
2700 }
0a7de745 2701 if (nlop) {
6d2010ae 2702 nfs_lock_owner_rele(nlop);
0a7de745
A
2703 }
2704 if (noop) {
6d2010ae 2705 nfs_open_owner_rele(noop);
0a7de745 2706 }
6d2010ae
A
2707}
2708
2709
2710/*
2711 * When we have a delegation, we may be able to perform the OPEN locally.
2712 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2713 */
2714int
2715nfs4_open_delegated(
2716 nfsnode_t np,
2717 struct nfs_open_file *nofp,
2718 uint32_t accessMode,
2719 uint32_t denyMode,
2720 vfs_context_t ctx)
2721{
2722 int error = 0, ismember, readtoo = 0, authorized = 0;
2723 uint32_t action;
2724 struct kauth_acl_eval eval;
2725 kauth_cred_t cred = vfs_context_ucred(ctx);
2726
2727 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2728 /*
2729 * Try to open it for read access too,
2730 * so the buffer cache can read data.
2731 */
2732 readtoo = 1;
2733 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2734 }
2735
2736tryagain:
2737 action = 0;
0a7de745 2738 if (accessMode & NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2739 action |= KAUTH_VNODE_READ_DATA;
0a7de745
A
2740 }
2741 if (accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2742 action |= KAUTH_VNODE_WRITE_DATA;
0a7de745 2743 }
6d2010ae
A
2744
2745 /* evaluate ACE (if we have one) */
2746 if (np->n_dace.ace_flags) {
2747 eval.ae_requested = action;
2748 eval.ae_acl = &np->n_dace;
2749 eval.ae_count = 1;
2750 eval.ae_options = 0;
0a7de745 2751 if (np->n_vattr.nva_uid == kauth_cred_getuid(cred)) {
6d2010ae 2752 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
0a7de745 2753 }
6d2010ae 2754 error = kauth_cred_ismember_gid(cred, np->n_vattr.nva_gid, &ismember);
0a7de745 2755 if (!error && ismember) {
6d2010ae 2756 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
0a7de745 2757 }
6d2010ae
A
2758
2759 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
2760 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
2761 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
2762 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
2763
2764 error = kauth_acl_evaluate(cred, &eval);
2765
0a7de745 2766 if (!error && (eval.ae_result == KAUTH_RESULT_ALLOW)) {
6d2010ae 2767 authorized = 1;
0a7de745 2768 }
6d2010ae
A
2769 }
2770
2771 if (!authorized) {
2772 /* need to ask the server via ACCESS */
2773 struct vnop_access_args naa;
2774 naa.a_desc = &vnop_access_desc;
2775 naa.a_vp = NFSTOV(np);
2776 naa.a_action = action;
2777 naa.a_context = ctx;
0a7de745 2778 if (!(error = nfs_vnop_access(&naa))) {
6d2010ae 2779 authorized = 1;
0a7de745 2780 }
6d2010ae
A
2781 }
2782
2783 if (!authorized) {
2784 if (readtoo) {
2785 /* try again without the extra read access */
2786 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2787 readtoo = 0;
2788 goto tryagain;
2789 }
0a7de745 2790 return error ? error : EACCES;
6d2010ae
A
2791 }
2792
2793 nfs_open_file_add_open(nofp, accessMode, denyMode, 1);
2794
0a7de745 2795 return 0;
6d2010ae
A
2796}
2797
2798
2799/*
2800 * Open a file with the given access/deny modes.
2801 *
2802 * If we have a delegation, we may be able to handle the open locally.
2803 * Otherwise, we will always send the open RPC even if this open's mode is
2804 * a subset of all the existing opens. This makes sure that we will always
2805 * be able to do a downgrade to any of the open modes.
2806 *
2807 * Note: local conflicts should have already been checked in nfs_open_file_find().
2808 */
2809int
2810nfs4_open(
2811 nfsnode_t np,
2812 struct nfs_open_file *nofp,
2813 uint32_t accessMode,
2814 uint32_t denyMode,
2815 vfs_context_t ctx)
2816{
2817 vnode_t vp = NFSTOV(np);
2818 vnode_t dvp = NULL;
2819 struct componentname cn;
2820 const char *vname = NULL;
f427ee49 2821 uint32_t namelen;
6d2010ae
A
2822 char smallname[128];
2823 char *filename = NULL;
2824 int error = 0, readtoo = 0;
2825
2826 /*
2827 * We can handle the OPEN ourselves if we have a delegation,
2828 * unless it's a read delegation and the open is asking for
2829 * either write access or deny read. We also don't bother to
2830 * use the delegation if it's being returned.
2831 */
2832 if (np->n_openflags & N_DELEG_MASK) {
0a7de745
A
2833 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
2834 return error;
2835 }
6d2010ae
A
2836 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN) &&
2837 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ||
0a7de745 2838 (!(accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) && !(denyMode & NFS_OPEN_SHARE_DENY_READ)))) {
6d2010ae
A
2839 error = nfs4_open_delegated(np, nofp, accessMode, denyMode, ctx);
2840 nfs_open_state_clear_busy(np);
0a7de745 2841 return error;
6d2010ae
A
2842 }
2843 nfs_open_state_clear_busy(np);
2844 }
2845
2846 /*
2847 * [sigh] We can't trust VFS to get the parent right for named
2848 * attribute nodes. (It likes to reparent the nodes after we've
2849 * created them.) Luckily we can probably get the right parent
2850 * from the n_parent we have stashed away.
2851 */
2852 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
0a7de745 2853 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
6d2010ae 2854 dvp = NULL;
0a7de745
A
2855 }
2856 if (!dvp) {
6d2010ae 2857 dvp = vnode_getparent(vp);
0a7de745 2858 }
6d2010ae
A
2859 vname = vnode_getname(vp);
2860 if (!dvp || !vname) {
0a7de745 2861 if (!error) {
6d2010ae 2862 error = EIO;
0a7de745 2863 }
6d2010ae
A
2864 goto out;
2865 }
2866 filename = &smallname[0];
2867 namelen = snprintf(filename, sizeof(smallname), "%s", vname);
2868 if (namelen >= sizeof(smallname)) {
0a7de745 2869 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
6d2010ae
A
2870 if (!filename) {
2871 error = ENOMEM;
2872 goto out;
2873 }
0a7de745 2874 snprintf(filename, namelen + 1, "%s", vname);
6d2010ae
A
2875 }
2876 bzero(&cn, sizeof(cn));
2877 cn.cn_nameptr = filename;
2878 cn.cn_namelen = namelen;
2879
2880 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2881 /*
2882 * Try to open it for read access too,
2883 * so the buffer cache can read data.
2884 */
2885 readtoo = 1;
2886 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2887 }
2888tryagain:
2889 error = nfs4_open_rpc(nofp, ctx, &cn, NULL, dvp, &vp, NFS_OPEN_NOCREATE, accessMode, denyMode);
2890 if (error) {
2891 if (!nfs_mount_state_error_should_restart(error) &&
2892 (error != EINTR) && (error != ERESTART) && readtoo) {
2893 /* try again without the extra read access */
2894 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2895 readtoo = 0;
2896 goto tryagain;
2897 }
2898 goto out;
2899 }
2900 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
b0d623f7 2901out:
0a7de745 2902 if (filename && (filename != &smallname[0])) {
b0d623f7 2903 FREE(filename, M_TEMP);
0a7de745
A
2904 }
2905 if (vname) {
b0d623f7 2906 vnode_putname(vname);
0a7de745
A
2907 }
2908 if (dvp != NULLVP) {
b0d623f7 2909 vnode_put(dvp);
0a7de745
A
2910 }
2911 return error;
b0d623f7 2912}
cb323159 2913#endif /* CONFIG_NFS4 */
b0d623f7 2914
b0d623f7 2915int
6d2010ae
A
2916nfs_vnop_mmap(
2917 struct vnop_mmap_args /* {
0a7de745
A
2918 * struct vnodeop_desc *a_desc;
2919 * vnode_t a_vp;
2920 * int a_fflags;
2921 * vfs_context_t a_context;
2922 * } */*ap)
b0d623f7
A
2923{
2924 vfs_context_t ctx = ap->a_context;
2925 vnode_t vp = ap->a_vp;
2926 nfsnode_t np = VTONFS(vp);
f427ee49
A
2927 int error = 0, delegated = 0;
2928 uint8_t accessMode, denyMode;
b0d623f7 2929 struct nfsmount *nmp;
b0d623f7
A
2930 struct nfs_open_owner *noop = NULL;
2931 struct nfs_open_file *nofp = NULL;
2932
b0d623f7 2933 nmp = VTONMP(vp);
0a7de745
A
2934 if (nfs_mount_gone(nmp)) {
2935 return ENXIO;
2936 }
b0d623f7 2937
0a7de745
A
2938 if (!vnode_isreg(vp) || !(ap->a_fflags & (PROT_READ | PROT_WRITE))) {
2939 return EINVAL;
2940 }
2941 if (np->n_flag & NREVOKE) {
2942 return EIO;
2943 }
b0d623f7 2944
6d2010ae
A
2945 /*
2946 * fflags contains some combination of: PROT_READ, PROT_WRITE
2947 * Since it's not possible to mmap() without having the file open for reading,
2948 * read access is always there (regardless if PROT_READ is not set).
2949 */
2950 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
0a7de745 2951 if (ap->a_fflags & PROT_WRITE) {
b0d623f7 2952 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
0a7de745 2953 }
6d2010ae 2954 denyMode = NFS_OPEN_SHARE_DENY_NONE;
b0d623f7
A
2955
2956 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
0a7de745
A
2957 if (!noop) {
2958 return ENOMEM;
2959 }
b0d623f7
A
2960
2961restart:
6d2010ae 2962 error = nfs_mount_state_in_use_start(nmp, NULL);
b0d623f7
A
2963 if (error) {
2964 nfs_open_owner_rele(noop);
0a7de745 2965 return error;
b0d623f7 2966 }
6d2010ae 2967 if (np->n_flag & NREVOKE) {
b0d623f7 2968 error = EIO;
6d2010ae
A
2969 nfs_mount_state_in_use_end(nmp, 0);
2970 nfs_open_owner_rele(noop);
0a7de745 2971 return error;
6d2010ae
A
2972 }
2973
2974 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
2975 if (error || (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST))) {
2976 NP(np, "nfs_vnop_mmap: no open file for owner, error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
2977 error = EPERM;
b0d623f7 2978 }
cb323159 2979#if CONFIG_NFS4
b0d623f7 2980 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6d2010ae 2981 error = nfs4_reopen(nofp, NULL);
b0d623f7 2982 nofp = NULL;
0a7de745 2983 if (!error) {
f427ee49 2984 nfs_mount_state_in_use_end(nmp, 0);
6d2010ae 2985 goto restart;
0a7de745 2986 }
b0d623f7 2987 }
cb323159 2988#endif
0a7de745 2989 if (!error) {
6d2010ae 2990 error = nfs_open_file_set_busy(nofp, NULL);
0a7de745 2991 }
b0d623f7
A
2992 if (error) {
2993 nofp = NULL;
2994 goto out;
2995 }
2996
2997 /*
6d2010ae
A
2998 * The open reference for mmap must mirror an existing open because
2999 * we may need to reclaim it after the file is closed.
3000 * So grab another open count matching the accessMode passed in.
3001 * If we already had an mmap open, prefer read/write without deny mode.
3002 * This means we may have to drop the current mmap open first.
3e170ce0
A
3003 *
3004 * N.B. We should have an open for the mmap, because, mmap was
3005 * called on an open descriptor, or we've created an open for read
3006 * from reading the first page for execve. However, if we piggy
3007 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
3008 * that open may have closed.
b0d623f7 3009 */
6d2010ae 3010
3e170ce0
A
3011 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
3012 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
3013 /* We shouldn't get here. We've already open the file for execve */
3014 NP(np, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
0a7de745 3015 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
3e170ce0
A
3016 }
3017 /*
3018 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
3019 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
3020 */
3021 if (accessMode != NFS_OPEN_SHARE_ACCESS_READ || (accessMode & nofp->nof_deny)) {
6d2010ae
A
3022 /* not asking for just read access -> fail */
3023 error = EPERM;
3024 goto out;
3025 }
3026 /* we don't have the file open, so open it for read access */
3027 if (nmp->nm_vers < NFS_VER4) {
3028 /* NFS v2/v3 opens are always allowed - so just add it. */
3029 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
b0d623f7 3030 error = 0;
cb323159
A
3031 }
3032#if CONFIG_NFS4
3033 else {
6d2010ae 3034 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
b0d623f7 3035 }
cb323159 3036#endif
0a7de745 3037 if (!error) {
6d2010ae 3038 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
0a7de745
A
3039 }
3040 if (error) {
b0d623f7 3041 goto out;
0a7de745 3042 }
6d2010ae
A
3043 }
3044
3045 /* determine deny mode for open */
3046 if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
3047 if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3048 delegated = 1;
0a7de745 3049 if (nofp->nof_d_rw) {
6d2010ae 3050 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3051 } else if (nofp->nof_d_rw_dw) {
6d2010ae 3052 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3053 } else if (nofp->nof_d_rw_drw) {
6d2010ae 3054 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3055 }
6d2010ae
A
3056 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3057 delegated = 0;
0a7de745 3058 if (nofp->nof_rw) {
6d2010ae 3059 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3060 } else if (nofp->nof_rw_dw) {
6d2010ae 3061 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3062 } else if (nofp->nof_rw_drw) {
6d2010ae 3063 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3064 }
6d2010ae
A
3065 } else {
3066 error = EPERM;
3067 }
3068 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
3069 if (nofp->nof_d_r || nofp->nof_d_r_dw || nofp->nof_d_r_drw) {
3070 delegated = 1;
0a7de745 3071 if (nofp->nof_d_r) {
6d2010ae 3072 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3073 } else if (nofp->nof_d_r_dw) {
6d2010ae 3074 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3075 } else if (nofp->nof_d_r_drw) {
6d2010ae 3076 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3077 }
6d2010ae
A
3078 } else if (nofp->nof_r || nofp->nof_r_dw || nofp->nof_r_drw) {
3079 delegated = 0;
0a7de745 3080 if (nofp->nof_r) {
6d2010ae 3081 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3082 } else if (nofp->nof_r_dw) {
6d2010ae 3083 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3084 } else if (nofp->nof_r_drw) {
6d2010ae 3085 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3086 }
3e170ce0
A
3087 } else if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3088 /*
3089 * This clause and the one below is to co-opt a read write access
3090 * for a read only mmaping. We probably got here in that an
3091 * existing rw open for an executable file already exists.
3092 */
3093 delegated = 1;
3094 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
0a7de745 3095 if (nofp->nof_d_rw) {
3e170ce0 3096 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3097 } else if (nofp->nof_d_rw_dw) {
3e170ce0 3098 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3099 } else if (nofp->nof_d_rw_drw) {
3e170ce0 3100 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3101 }
3e170ce0
A
3102 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3103 delegated = 0;
3104 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
0a7de745 3105 if (nofp->nof_rw) {
3e170ce0 3106 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3107 } else if (nofp->nof_rw_dw) {
3e170ce0 3108 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3109 } else if (nofp->nof_rw_drw) {
3e170ce0 3110 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3111 }
6d2010ae
A
3112 } else {
3113 error = EPERM;
3114 }
3115 }
0a7de745 3116 if (error) { /* mmap mode without proper open mode */
6d2010ae 3117 goto out;
0a7de745 3118 }
6d2010ae
A
3119
3120 /*
3121 * If the existing mmap access is more than the new access OR the
3122 * existing access is the same and the existing deny mode is less,
3123 * then we'll stick with the existing mmap open mode.
3124 */
3125 if ((nofp->nof_mmap_access > accessMode) ||
0a7de745 3126 ((nofp->nof_mmap_access == accessMode) && (nofp->nof_mmap_deny <= denyMode))) {
6d2010ae 3127 goto out;
0a7de745 3128 }
6d2010ae
A
3129
3130 /* update mmap open mode */
3131 if (nofp->nof_mmap_access) {
3132 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3133 if (error) {
0a7de745 3134 if (!nfs_mount_state_error_should_restart(error)) {
6d2010ae 3135 NP(np, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 3136 }
6d2010ae
A
3137 NP(np, "nfs_vnop_mmap: update, close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3138 goto out;
b0d623f7 3139 }
6d2010ae 3140 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
b0d623f7
A
3141 }
3142
6d2010ae
A
3143 nfs_open_file_add_open(nofp, accessMode, denyMode, delegated);
3144 nofp->nof_mmap_access = accessMode;
3145 nofp->nof_mmap_deny = denyMode;
3146
b0d623f7 3147out:
0a7de745 3148 if (nofp) {
b0d623f7 3149 nfs_open_file_clear_busy(nofp);
0a7de745 3150 }
b0d623f7
A
3151 if (nfs_mount_state_in_use_end(nmp, error)) {
3152 nofp = NULL;
3153 goto restart;
3154 }
0a7de745 3155 if (noop) {
b0d623f7 3156 nfs_open_owner_rele(noop);
0a7de745 3157 }
316670eb
A
3158
3159 if (!error) {
3160 int ismapped = 0;
3161 nfs_node_lock_force(np);
3162 if ((np->n_flag & NISMAPPED) == 0) {
3163 np->n_flag |= NISMAPPED;
3164 ismapped = 1;
3165 }
3166 nfs_node_unlock(np);
3167 if (ismapped) {
3168 lck_mtx_lock(&nmp->nm_lock);
3169 nmp->nm_state &= ~NFSSTA_SQUISHY;
3170 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
0a7de745 3171 if (nmp->nm_curdeadtimeout <= 0) {
316670eb 3172 nmp->nm_deadto_start = 0;
0a7de745 3173 }
316670eb
A
3174 nmp->nm_mappers++;
3175 lck_mtx_unlock(&nmp->nm_lock);
3176 }
3177 }
3178
0a7de745 3179 return error;
b0d623f7
A
3180}
3181
f427ee49
A
3182int
3183nfs_vnop_mmap_check(
3184 struct vnop_mmap_check_args /* {
3185 * struct vnodeop_desc *a_desc;
3186 * vnode_t a_vp;
3187 * int a_flags;
3188 * vfs_context_t a_context;
3189 * } */*ap)
3190{
3191 vfs_context_t ctx = ap->a_context;
3192 vnode_t vp = ap->a_vp;
3193 struct nfsmount *nmp = VTONMP(vp);
3194 struct vnop_access_args naa;
3195 int error = 0;
3196
3197 if (nfs_mount_gone(nmp)) {
3198 return ENXIO;
3199 }
3200
3201 if (vnode_isreg(vp)) {
3202 /*
3203 * We only need to ensure that a page-in will be
3204 * possible with these credentials. Everything
3205 * else has been checked at other layers.
3206 */
3207 naa.a_desc = &vnop_access_desc;
3208 naa.a_vp = vp;
3209 naa.a_action = KAUTH_VNODE_READ_DATA;
3210 naa.a_context = ctx;
3211
3212 /* compute actual success/failure based on accessibility */
3213 error = nfs_vnop_access(&naa);
3214 }
3215
3216 return error;
3217}
b0d623f7
A
3218
3219int
6d2010ae
A
3220nfs_vnop_mnomap(
3221 struct vnop_mnomap_args /* {
0a7de745
A
3222 * struct vnodeop_desc *a_desc;
3223 * vnode_t a_vp;
3224 * vfs_context_t a_context;
3225 * } */*ap)
b0d623f7
A
3226{
3227 vfs_context_t ctx = ap->a_context;
3228 vnode_t vp = ap->a_vp;
b0d623f7
A
3229 nfsnode_t np = VTONFS(vp);
3230 struct nfsmount *nmp;
b0d623f7 3231 struct nfs_open_file *nofp = NULL;
6d2010ae
A
3232 off_t size;
3233 int error;
316670eb 3234 int is_mapped_flag = 0;
0a7de745 3235
b0d623f7 3236 nmp = VTONMP(vp);
0a7de745
A
3237 if (nfs_mount_gone(nmp)) {
3238 return ENXIO;
3239 }
b0d623f7 3240
316670eb
A
3241 nfs_node_lock_force(np);
3242 if (np->n_flag & NISMAPPED) {
3243 is_mapped_flag = 1;
3244 np->n_flag &= ~NISMAPPED;
3245 }
3246 nfs_node_unlock(np);
3247 if (is_mapped_flag) {
3248 lck_mtx_lock(&nmp->nm_lock);
0a7de745 3249 if (nmp->nm_mappers) {
316670eb 3250 nmp->nm_mappers--;
0a7de745 3251 } else {
316670eb 3252 NP(np, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
0a7de745 3253 }
316670eb
A
3254 lck_mtx_unlock(&nmp->nm_lock);
3255 }
3256
6d2010ae
A
3257 /* flush buffers/ubc before we drop the open (in case it's our last open) */
3258 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
0a7de745 3259 if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp))) {
6d2010ae 3260 ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC);
0a7de745 3261 }
b0d623f7 3262
6d2010ae
A
3263 /* walk all open files and close all mmap opens */
3264loop:
3265 error = nfs_mount_state_in_use_start(nmp, NULL);
0a7de745
A
3266 if (error) {
3267 return error;
3268 }
6d2010ae
A
3269 lck_mtx_lock(&np->n_openlock);
3270 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
0a7de745 3271 if (!nofp->nof_mmap_access) {
6d2010ae 3272 continue;
0a7de745 3273 }
b0d623f7 3274 lck_mtx_unlock(&np->n_openlock);
cb323159 3275#if CONFIG_NFS4
6d2010ae 3276 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
6d2010ae 3277 error = nfs4_reopen(nofp, NULL);
0a7de745 3278 if (!error) {
f427ee49 3279 nfs_mount_state_in_use_end(nmp, 0);
6d2010ae 3280 goto loop;
0a7de745 3281 }
6d2010ae 3282 }
cb323159 3283#endif
0a7de745 3284 if (!error) {
6d2010ae 3285 error = nfs_open_file_set_busy(nofp, NULL);
0a7de745 3286 }
6d2010ae
A
3287 if (error) {
3288 lck_mtx_lock(&np->n_openlock);
3289 break;
3290 }
3291 if (nofp->nof_mmap_access) {
3292 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3293 if (!nfs_mount_state_error_should_restart(error)) {
0a7de745 3294 if (error) { /* not a state-operation-restarting error, so just clear the access */
6d2010ae 3295 NP(np, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 3296 }
6d2010ae
A
3297 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
3298 }
0a7de745 3299 if (error) {
6d2010ae 3300 NP(np, "nfs_vnop_mnomap: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 3301 }
6d2010ae
A
3302 }
3303 nfs_open_file_clear_busy(nofp);
3304 nfs_mount_state_in_use_end(nmp, error);
3305 goto loop;
b0d623f7 3306 }
6d2010ae
A
3307 lck_mtx_unlock(&np->n_openlock);
3308 nfs_mount_state_in_use_end(nmp, error);
0a7de745 3309 return error;
6d2010ae 3310}
b0d623f7 3311
6d2010ae
A
3312/*
3313 * Search a node's lock owner list for the owner for this process.
3314 * If not found and "alloc" is set, then allocate a new one.
3315 */
3316struct nfs_lock_owner *
3317nfs_lock_owner_find(nfsnode_t np, proc_t p, int alloc)
3318{
3319 pid_t pid = proc_pid(p);
3320 struct nfs_lock_owner *nlop, *newnlop = NULL;
b0d623f7 3321
6d2010ae
A
3322tryagain:
3323 lck_mtx_lock(&np->n_openlock);
3324 TAILQ_FOREACH(nlop, &np->n_lock_owners, nlo_link) {
0a7de745
A
3325 os_ref_count_t newcount;
3326
3327 if (nlop->nlo_pid != pid) {
6d2010ae 3328 continue;
0a7de745
A
3329 }
3330 if (timevalcmp(&nlop->nlo_pid_start, &p->p_start, ==)) {
6d2010ae 3331 break;
0a7de745 3332 }
6d2010ae 3333 /* stale lock owner... reuse it if we can */
0a7de745 3334 if (os_ref_get_count(&nlop->nlo_refcnt)) {
6d2010ae
A
3335 TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
3336 nlop->nlo_flags &= ~NFS_LOCK_OWNER_LINK;
0a7de745 3337 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
6d2010ae
A
3338 lck_mtx_unlock(&np->n_openlock);
3339 goto tryagain;
3340 }
3341 nlop->nlo_pid_start = p->p_start;
3342 nlop->nlo_seqid = 0;
3343 nlop->nlo_stategenid = 0;
3344 break;
b0d623f7
A
3345 }
3346
6d2010ae
A
3347 if (!nlop && !newnlop && alloc) {
3348 lck_mtx_unlock(&np->n_openlock);
3349 MALLOC(newnlop, struct nfs_lock_owner *, sizeof(struct nfs_lock_owner), M_TEMP, M_WAITOK);
0a7de745
A
3350 if (!newnlop) {
3351 return NULL;
3352 }
6d2010ae 3353 bzero(newnlop, sizeof(*newnlop));
c3c9b80d 3354 lck_mtx_init(&newnlop->nlo_lock, &nfs_open_grp, LCK_ATTR_NULL);
6d2010ae
A
3355 newnlop->nlo_pid = pid;
3356 newnlop->nlo_pid_start = p->p_start;
3357 newnlop->nlo_name = OSAddAtomic(1, &nfs_lock_owner_seqnum);
3358 TAILQ_INIT(&newnlop->nlo_locks);
3359 goto tryagain;
b0d623f7 3360 }
6d2010ae
A
3361 if (!nlop && newnlop) {
3362 newnlop->nlo_flags |= NFS_LOCK_OWNER_LINK;
0a7de745 3363 os_ref_init(&newnlop->nlo_refcnt, NULL);
6d2010ae
A
3364 TAILQ_INSERT_HEAD(&np->n_lock_owners, newnlop, nlo_link);
3365 nlop = newnlop;
b0d623f7 3366 }
6d2010ae 3367 lck_mtx_unlock(&np->n_openlock);
b0d623f7 3368
0a7de745 3369 if (newnlop && (nlop != newnlop)) {
6d2010ae 3370 nfs_lock_owner_destroy(newnlop);
0a7de745 3371 }
b0d623f7 3372
0a7de745 3373 if (nlop) {
6d2010ae 3374 nfs_lock_owner_ref(nlop);
0a7de745 3375 }
b0d623f7 3376
0a7de745 3377 return nlop;
6d2010ae 3378}
b0d623f7
A
3379
3380/*
3381 * destroy a lock owner that's no longer needed
3382 */
3383void
3384nfs_lock_owner_destroy(struct nfs_lock_owner *nlop)
3385{
3386 if (nlop->nlo_open_owner) {
3387 nfs_open_owner_rele(nlop->nlo_open_owner);
3388 nlop->nlo_open_owner = NULL;
3389 }
c3c9b80d 3390 lck_mtx_destroy(&nlop->nlo_lock, &nfs_open_grp);
b0d623f7
A
3391 FREE(nlop, M_TEMP);
3392}
3393
3394/*
3395 * acquire a reference count on a lock owner
3396 */
3397void
3398nfs_lock_owner_ref(struct nfs_lock_owner *nlop)
3399{
3400 lck_mtx_lock(&nlop->nlo_lock);
0a7de745 3401 os_ref_retain_locked(&nlop->nlo_refcnt);
b0d623f7
A
3402 lck_mtx_unlock(&nlop->nlo_lock);
3403}
3404
3405/*
3406 * drop a reference count on a lock owner and destroy it if
3407 * it is no longer referenced and no longer on the mount's list.
3408 */
3409void
3410nfs_lock_owner_rele(struct nfs_lock_owner *nlop)
3411{
0a7de745
A
3412 os_ref_count_t newcount;
3413
b0d623f7 3414 lck_mtx_lock(&nlop->nlo_lock);
0a7de745 3415 if (os_ref_get_count(&nlop->nlo_refcnt) < 1) {
b0d623f7 3416 panic("nfs_lock_owner_rele: no refcnt");
0a7de745
A
3417 }
3418 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
3419 if (!newcount && (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
b0d623f7 3420 panic("nfs_lock_owner_rele: busy");
0a7de745 3421 }
b0d623f7 3422 /* XXX we may potentially want to clean up idle/unused lock owner structures */
0a7de745 3423 if (newcount || (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) {
b0d623f7
A
3424 lck_mtx_unlock(&nlop->nlo_lock);
3425 return;
3426 }
3427 /* owner is no longer referenced or linked to mount, so destroy it */
3428 lck_mtx_unlock(&nlop->nlo_lock);
3429 nfs_lock_owner_destroy(nlop);
3430}
3431
3432/*
3433 * Mark a lock owner as busy because we are about to
3434 * start an operation that uses and updates lock owner state.
3435 */
3436int
3437nfs_lock_owner_set_busy(struct nfs_lock_owner *nlop, thread_t thd)
3438{
3439 struct nfsmount *nmp;
cb323159 3440 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
b0d623f7
A
3441 int error = 0, slpflag;
3442
3443 nmp = nlop->nlo_open_owner->noo_mount;
0a7de745
A
3444 if (nfs_mount_gone(nmp)) {
3445 return ENXIO;
3446 }
6d2010ae 3447 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7
A
3448
3449 lck_mtx_lock(&nlop->nlo_lock);
3450 while (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY) {
0a7de745 3451 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 3452 break;
0a7de745 3453 }
b0d623f7
A
3454 nlop->nlo_flags |= NFS_LOCK_OWNER_WANT;
3455 msleep(nlop, &nlop->nlo_lock, slpflag, "nfs_lock_owner_set_busy", &ts);
6d2010ae 3456 slpflag = 0;
b0d623f7 3457 }
0a7de745 3458 if (!error) {
b0d623f7 3459 nlop->nlo_flags |= NFS_LOCK_OWNER_BUSY;
0a7de745 3460 }
b0d623f7
A
3461 lck_mtx_unlock(&nlop->nlo_lock);
3462
0a7de745 3463 return error;
b0d623f7
A
3464}
3465
3466/*
3467 * Clear the busy flag on a lock owner and wake up anyone waiting
3468 * to mark it busy.
3469 */
3470void
3471nfs_lock_owner_clear_busy(struct nfs_lock_owner *nlop)
3472{
3473 int wanted;
3474
3475 lck_mtx_lock(&nlop->nlo_lock);
0a7de745 3476 if (!(nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
b0d623f7 3477 panic("nfs_lock_owner_clear_busy");
0a7de745 3478 }
b0d623f7 3479 wanted = (nlop->nlo_flags & NFS_LOCK_OWNER_WANT);
0a7de745 3480 nlop->nlo_flags &= ~(NFS_LOCK_OWNER_BUSY | NFS_LOCK_OWNER_WANT);
b0d623f7 3481 lck_mtx_unlock(&nlop->nlo_lock);
0a7de745 3482 if (wanted) {
b0d623f7 3483 wakeup(nlop);
0a7de745 3484 }
b0d623f7
A
3485}
3486
3487/*
3488 * Insert a held lock into a lock owner's sorted list.
3489 * (flock locks are always inserted at the head the list)
3490 */
3491void
3492nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *nlop, struct nfs_file_lock *newnflp)
3493{
3494 struct nfs_file_lock *nflp;
3495
3496 /* insert new lock in lock owner's held lock list */
3497 lck_mtx_lock(&nlop->nlo_lock);
3498 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) {
3499 TAILQ_INSERT_HEAD(&nlop->nlo_locks, newnflp, nfl_lolink);
3500 } else {
3501 TAILQ_FOREACH(nflp, &nlop->nlo_locks, nfl_lolink) {
0a7de745 3502 if (newnflp->nfl_start < nflp->nfl_start) {
b0d623f7 3503 break;
0a7de745 3504 }
b0d623f7 3505 }
0a7de745 3506 if (nflp) {
b0d623f7 3507 TAILQ_INSERT_BEFORE(nflp, newnflp, nfl_lolink);
0a7de745 3508 } else {
b0d623f7 3509 TAILQ_INSERT_TAIL(&nlop->nlo_locks, newnflp, nfl_lolink);
0a7de745 3510 }
b0d623f7
A
3511 }
3512 lck_mtx_unlock(&nlop->nlo_lock);
3513}
3514
3515/*
3516 * Get a file lock structure for this lock owner.
3517 */
3518struct nfs_file_lock *
3519nfs_file_lock_alloc(struct nfs_lock_owner *nlop)
3520{
3521 struct nfs_file_lock *nflp = NULL;
3522
3523 lck_mtx_lock(&nlop->nlo_lock);
3524 if (!nlop->nlo_alock.nfl_owner) {
3525 nflp = &nlop->nlo_alock;
3526 nflp->nfl_owner = nlop;
3527 }
3528 lck_mtx_unlock(&nlop->nlo_lock);
3529 if (!nflp) {
3530 MALLOC(nflp, struct nfs_file_lock *, sizeof(struct nfs_file_lock), M_TEMP, M_WAITOK);
0a7de745
A
3531 if (!nflp) {
3532 return NULL;
3533 }
b0d623f7
A
3534 bzero(nflp, sizeof(*nflp));
3535 nflp->nfl_flags |= NFS_FILE_LOCK_ALLOC;
3536 nflp->nfl_owner = nlop;
3537 }
3538 nfs_lock_owner_ref(nlop);
0a7de745 3539 return nflp;
b0d623f7
A
3540}
3541
3542/*
3543 * destroy the given NFS file lock structure
3544 */
3545void
3546nfs_file_lock_destroy(struct nfs_file_lock *nflp)
3547{
3548 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3549
3550 if (nflp->nfl_flags & NFS_FILE_LOCK_ALLOC) {
3551 nflp->nfl_owner = NULL;
3552 FREE(nflp, M_TEMP);
3553 } else {
3554 lck_mtx_lock(&nlop->nlo_lock);
3e170ce0 3555 bzero(nflp, sizeof(*nflp));
b0d623f7
A
3556 lck_mtx_unlock(&nlop->nlo_lock);
3557 }
3558 nfs_lock_owner_rele(nlop);
3559}
3560
3561/*
3562 * Check if one file lock conflicts with another.
3563 * (nflp1 is the new lock. nflp2 is the existing lock.)
3564 */
3565int
3566nfs_file_lock_conflict(struct nfs_file_lock *nflp1, struct nfs_file_lock *nflp2, int *willsplit)
3567{
3568 /* no conflict if lock is dead */
0a7de745
A
3569 if ((nflp1->nfl_flags & NFS_FILE_LOCK_DEAD) || (nflp2->nfl_flags & NFS_FILE_LOCK_DEAD)) {
3570 return 0;
3571 }
b0d623f7
A
3572 /* no conflict if it's ours - unless the lock style doesn't match */
3573 if ((nflp1->nfl_owner == nflp2->nfl_owner) &&
3574 ((nflp1->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == (nflp2->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))) {
3575 if (willsplit && (nflp1->nfl_type != nflp2->nfl_type) &&
3576 (nflp1->nfl_start > nflp2->nfl_start) &&
0a7de745 3577 (nflp1->nfl_end < nflp2->nfl_end)) {
b0d623f7 3578 *willsplit = 1;
0a7de745
A
3579 }
3580 return 0;
b0d623f7
A
3581 }
3582 /* no conflict if ranges don't overlap */
0a7de745
A
3583 if ((nflp1->nfl_start > nflp2->nfl_end) || (nflp1->nfl_end < nflp2->nfl_start)) {
3584 return 0;
3585 }
b0d623f7 3586 /* no conflict if neither lock is exclusive */
0a7de745
A
3587 if ((nflp1->nfl_type != F_WRLCK) && (nflp2->nfl_type != F_WRLCK)) {
3588 return 0;
3589 }
b0d623f7 3590 /* conflict */
0a7de745 3591 return 1;
b0d623f7
A
3592}
3593
cb323159 3594#if CONFIG_NFS4
b0d623f7
A
3595/*
3596 * Send an NFSv4 LOCK RPC to the server.
3597 */
3598int
6d2010ae 3599nfs4_setlock_rpc(
b0d623f7
A
3600 nfsnode_t np,
3601 struct nfs_open_file *nofp,
3602 struct nfs_file_lock *nflp,
3603 int reclaim,
6d2010ae 3604 int flags,
b0d623f7
A
3605 thread_t thd,
3606 kauth_cred_t cred)
3607{
3608 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3609 struct nfsmount *nmp;
3610 struct nfsm_chain nmreq, nmrep;
3611 uint64_t xid;
3612 uint32_t locktype;
3613 int error = 0, lockerror = ENOENT, newlocker, numops, status;
6d2010ae 3614 struct nfsreq_secinfo_args si;
b0d623f7
A
3615
3616 nmp = NFSTONMP(np);
0a7de745
A
3617 if (nfs_mount_gone(nmp)) {
3618 return ENXIO;
3619 }
3620 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3621 return EINVAL;
3622 }
b0d623f7
A
3623
3624 newlocker = (nlop->nlo_stategenid != nmp->nm_stategenid);
3625 locktype = (nflp->nfl_flags & NFS_FILE_LOCK_WAIT) ?
0a7de745
A
3626 ((nflp->nfl_type == F_WRLCK) ?
3627 NFS_LOCK_TYPE_WRITEW :
3628 NFS_LOCK_TYPE_READW) :
3629 ((nflp->nfl_type == F_WRLCK) ?
3630 NFS_LOCK_TYPE_WRITE :
3631 NFS_LOCK_TYPE_READ);
b0d623f7
A
3632 if (newlocker) {
3633 error = nfs_open_file_set_busy(nofp, thd);
0a7de745
A
3634 if (error) {
3635 return error;
3636 }
b0d623f7
A
3637 error = nfs_open_owner_set_busy(nofp->nof_owner, thd);
3638 if (error) {
3639 nfs_open_file_clear_busy(nofp);
0a7de745 3640 return error;
b0d623f7
A
3641 }
3642 if (!nlop->nlo_open_owner) {
3643 nfs_open_owner_ref(nofp->nof_owner);
3644 nlop->nlo_open_owner = nofp->nof_owner;
3645 }
3646 }
3647 error = nfs_lock_owner_set_busy(nlop, thd);
3648 if (error) {
3649 if (newlocker) {
3650 nfs_open_owner_clear_busy(nofp->nof_owner);
3651 nfs_open_file_clear_busy(nofp);
3652 }
0a7de745 3653 return error;
b0d623f7
A
3654 }
3655
6d2010ae 3656 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
3657 nfsm_chain_null(&nmreq);
3658 nfsm_chain_null(&nmrep);
3659
3660 // PUTFH, GETATTR, LOCK
3661 numops = 3;
3662 nfsm_chain_build_alloc_init(error, &nmreq, 33 * NFSX_UNSIGNED);
3e170ce0 3663 nfsm_chain_add_compound_header(error, &nmreq, "lock", nmp->nm_minor_vers, numops);
b0d623f7
A
3664 numops--;
3665 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3666 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3667 numops--;
3668 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 3669 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
3670 numops--;
3671 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCK);
3672 nfsm_chain_add_32(error, &nmreq, locktype);
3673 nfsm_chain_add_32(error, &nmreq, reclaim);
3674 nfsm_chain_add_64(error, &nmreq, nflp->nfl_start);
3675 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(nflp->nfl_start, nflp->nfl_end));
3676 nfsm_chain_add_32(error, &nmreq, newlocker);
3677 if (newlocker) {
3678 nfsm_chain_add_32(error, &nmreq, nofp->nof_owner->noo_seqid);
3679 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
3680 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3681 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3682 } else {
3683 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3684 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3685 }
3686 nfsm_chain_build_done(error, &nmreq);
3687 nfsm_assert(error, (numops == 0), EPROTO);
3688 nfsmout_if(error);
3689
0a7de745 3690 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
b0d623f7 3691
0a7de745 3692 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 3693 error = lockerror;
0a7de745 3694 }
b0d623f7
A
3695 nfsm_chain_skip_tag(error, &nmrep);
3696 nfsm_chain_get_32(error, &nmrep, numops);
3697 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3698 nfsmout_if(error);
3699 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 3700 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
b0d623f7
A
3701 nfsmout_if(error);
3702 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCK);
3703 nfs_owner_seqid_increment(newlocker ? nofp->nof_owner : NULL, nlop, error);
3704 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3705
3706 /* Update the lock owner's stategenid once it appears the server has state for it. */
3707 /* We determine this by noting the request was successful (we got a stateid). */
0a7de745 3708 if (newlocker && !error) {
b0d623f7 3709 nlop->nlo_stategenid = nmp->nm_stategenid;
0a7de745 3710 }
b0d623f7 3711nfsmout:
0a7de745 3712 if (!lockerror) {
b0d623f7 3713 nfs_node_unlock(np);
0a7de745 3714 }
b0d623f7
A
3715 nfs_lock_owner_clear_busy(nlop);
3716 if (newlocker) {
3717 nfs_open_owner_clear_busy(nofp->nof_owner);
3718 nfs_open_file_clear_busy(nofp);
3719 }
3720 nfsm_chain_cleanup(&nmreq);
3721 nfsm_chain_cleanup(&nmrep);
0a7de745 3722 return error;
b0d623f7
A
3723}
3724
3725/*
3726 * Send an NFSv4 LOCKU RPC to the server.
3727 */
3728int
3729nfs4_unlock_rpc(
3730 nfsnode_t np,
3731 struct nfs_lock_owner *nlop,
3732 int type,
3733 uint64_t start,
3734 uint64_t end,
6d2010ae
A
3735 int flags,
3736 thread_t thd,
3737 kauth_cred_t cred)
b0d623f7
A
3738{
3739 struct nfsmount *nmp;
3740 struct nfsm_chain nmreq, nmrep;
3741 uint64_t xid;
3742 int error = 0, lockerror = ENOENT, numops, status;
6d2010ae 3743 struct nfsreq_secinfo_args si;
b0d623f7
A
3744
3745 nmp = NFSTONMP(np);
0a7de745
A
3746 if (nfs_mount_gone(nmp)) {
3747 return ENXIO;
3748 }
3749 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3750 return EINVAL;
3751 }
b0d623f7 3752
6d2010ae 3753 error = nfs_lock_owner_set_busy(nlop, NULL);
0a7de745
A
3754 if (error) {
3755 return error;
3756 }
b0d623f7 3757
6d2010ae 3758 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
3759 nfsm_chain_null(&nmreq);
3760 nfsm_chain_null(&nmrep);
3761
3762 // PUTFH, GETATTR, LOCKU
3763 numops = 3;
3764 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3e170ce0 3765 nfsm_chain_add_compound_header(error, &nmreq, "unlock", nmp->nm_minor_vers, numops);
b0d623f7
A
3766 numops--;
3767 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3768 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3769 numops--;
3770 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 3771 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
3772 numops--;
3773 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKU);
3774 nfsm_chain_add_32(error, &nmreq, (type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3775 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3776 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3777 nfsm_chain_add_64(error, &nmreq, start);
3778 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3779 nfsm_chain_build_done(error, &nmreq);
3780 nfsm_assert(error, (numops == 0), EPROTO);
3781 nfsmout_if(error);
3782
0a7de745 3783 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
b0d623f7 3784
0a7de745 3785 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 3786 error = lockerror;
0a7de745 3787 }
b0d623f7
A
3788 nfsm_chain_skip_tag(error, &nmrep);
3789 nfsm_chain_get_32(error, &nmrep, numops);
3790 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3791 nfsmout_if(error);
3792 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 3793 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
b0d623f7
A
3794 nfsmout_if(error);
3795 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKU);
3796 nfs_owner_seqid_increment(NULL, nlop, error);
3797 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3798nfsmout:
0a7de745 3799 if (!lockerror) {
b0d623f7 3800 nfs_node_unlock(np);
0a7de745 3801 }
b0d623f7
A
3802 nfs_lock_owner_clear_busy(nlop);
3803 nfsm_chain_cleanup(&nmreq);
3804 nfsm_chain_cleanup(&nmrep);
0a7de745 3805 return error;
b0d623f7
A
3806}
3807
3808/*
6d2010ae 3809 * Send an NFSv4 LOCKT RPC to the server.
b0d623f7
A
3810 */
3811int
6d2010ae 3812nfs4_getlock_rpc(
b0d623f7
A
3813 nfsnode_t np,
3814 struct nfs_lock_owner *nlop,
3815 struct flock *fl,
3816 uint64_t start,
3817 uint64_t end,
3818 vfs_context_t ctx)
3819{
3820 struct nfsmount *nmp;
b0d623f7
A
3821 struct nfsm_chain nmreq, nmrep;
3822 uint64_t xid, val64 = 0;
3823 uint32_t val = 0;
6d2010ae
A
3824 int error = 0, lockerror, numops, status;
3825 struct nfsreq_secinfo_args si;
b0d623f7
A
3826
3827 nmp = NFSTONMP(np);
0a7de745
A
3828 if (nfs_mount_gone(nmp)) {
3829 return ENXIO;
3830 }
3831 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3832 return EINVAL;
3833 }
b0d623f7 3834
6d2010ae
A
3835 lockerror = ENOENT;
3836 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
3837 nfsm_chain_null(&nmreq);
3838 nfsm_chain_null(&nmrep);
3839
3840 // PUTFH, GETATTR, LOCKT
3841 numops = 3;
3842 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3e170ce0 3843 nfsm_chain_add_compound_header(error, &nmreq, "locktest", nmp->nm_minor_vers, numops);
b0d623f7
A
3844 numops--;
3845 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3846 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3847 numops--;
3848 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 3849 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
3850 numops--;
3851 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKT);
3852 nfsm_chain_add_32(error, &nmreq, (fl->l_type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3853 nfsm_chain_add_64(error, &nmreq, start);
3854 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3855 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3856 nfsm_chain_build_done(error, &nmreq);
3857 nfsm_assert(error, (numops == 0), EPROTO);
3858 nfsmout_if(error);
3859
6d2010ae 3860 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
b0d623f7 3861
0a7de745 3862 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 3863 error = lockerror;
0a7de745 3864 }
b0d623f7
A
3865 nfsm_chain_skip_tag(error, &nmrep);
3866 nfsm_chain_get_32(error, &nmrep, numops);
3867 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3868 nfsmout_if(error);
3869 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 3870 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
b0d623f7
A
3871 nfsmout_if(error);
3872 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKT);
3873 if (error == NFSERR_DENIED) {
3874 error = 0;
3875 nfsm_chain_get_64(error, &nmrep, fl->l_start);
3876 nfsm_chain_get_64(error, &nmrep, val64);
3877 fl->l_len = (val64 == UINT64_MAX) ? 0 : val64;
3878 nfsm_chain_get_32(error, &nmrep, val);
3879 fl->l_type = (val == NFS_LOCK_TYPE_WRITE) ? F_WRLCK : F_RDLCK;
3880 fl->l_pid = 0;
3881 fl->l_whence = SEEK_SET;
3882 } else if (!error) {
3883 fl->l_type = F_UNLCK;
3884 }
3885nfsmout:
0a7de745 3886 if (!lockerror) {
b0d623f7 3887 nfs_node_unlock(np);
0a7de745 3888 }
b0d623f7
A
3889 nfsm_chain_cleanup(&nmreq);
3890 nfsm_chain_cleanup(&nmrep);
0a7de745 3891 return error;
b0d623f7 3892}
cb323159 3893#endif /* CONFIG_NFS4 */
6d2010ae
A
3894
3895/*
3896 * Check for any conflicts with the given lock.
3897 *
3898 * Checking for a lock doesn't require the file to be opened.
3899 * So we skip all the open owner, open file, lock owner work
3900 * and just check for a conflicting lock.
3901 */
3902int
3903nfs_advlock_getlock(
3904 nfsnode_t np,
3905 struct nfs_lock_owner *nlop,
3906 struct flock *fl,
3907 uint64_t start,
3908 uint64_t end,
3909 vfs_context_t ctx)
3910{
3911 struct nfsmount *nmp;
3912 struct nfs_file_lock *nflp;
3913 int error = 0, answered = 0;
3914
3915 nmp = NFSTONMP(np);
0a7de745
A
3916 if (nfs_mount_gone(nmp)) {
3917 return ENXIO;
3918 }
6d2010ae
A
3919
3920restart:
0a7de745
A
3921 if ((error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)))) {
3922 return error;
3923 }
6d2010ae
A
3924
3925 lck_mtx_lock(&np->n_openlock);
3926 /* scan currently held locks for conflict */
3927 TAILQ_FOREACH(nflp, &np->n_locks, nfl_link) {
0a7de745 3928 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
6d2010ae 3929 continue;
0a7de745 3930 }
6d2010ae 3931 if ((start <= nflp->nfl_end) && (end >= nflp->nfl_start) &&
0a7de745 3932 ((fl->l_type == F_WRLCK) || (nflp->nfl_type == F_WRLCK))) {
6d2010ae 3933 break;
0a7de745 3934 }
6d2010ae
A
3935 }
3936 if (nflp) {
3937 /* found a conflicting lock */
3938 fl->l_type = nflp->nfl_type;
3939 fl->l_pid = (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_FLOCK) ? -1 : nflp->nfl_owner->nlo_pid;
3940 fl->l_start = nflp->nfl_start;
3941 fl->l_len = NFS_FLOCK_LENGTH(nflp->nfl_start, nflp->nfl_end);
3942 fl->l_whence = SEEK_SET;
3943 answered = 1;
3944 } else if ((np->n_openflags & N_DELEG_WRITE) && !(np->n_openflags & N_DELEG_RETURN)) {
3945 /*
3946 * If we have a write delegation, we know there can't be other
3947 * locks on the server. So the answer is no conflicting lock found.
3948 */
3949 fl->l_type = F_UNLCK;
3950 answered = 1;
3951 }
3952 lck_mtx_unlock(&np->n_openlock);
3953 if (answered) {
3954 nfs_mount_state_in_use_end(nmp, 0);
0a7de745 3955 return 0;
6d2010ae
A
3956 }
3957
3958 /* no conflict found locally, so ask the server */
3959 error = nmp->nm_funcs->nf_getlock_rpc(np, nlop, fl, start, end, ctx);
3960
0a7de745 3961 if (nfs_mount_state_in_use_end(nmp, error)) {
6d2010ae 3962 goto restart;
0a7de745
A
3963 }
3964 return error;
6d2010ae
A
3965}
3966
b0d623f7
A
3967/*
3968 * Acquire a file lock for the given range.
3969 *
3970 * Add the lock (request) to the lock queue.
3971 * Scan the lock queue for any conflicting locks.
3972 * If a conflict is found, block or return an error.
3973 * Once end of queue is reached, send request to the server.
3974 * If the server grants the lock, scan the lock queue and
3975 * update any existing locks. Then (optionally) scan the
3976 * queue again to coalesce any locks adjacent to the new one.
3977 */
3978int
6d2010ae 3979nfs_advlock_setlock(
b0d623f7
A
3980 nfsnode_t np,
3981 struct nfs_open_file *nofp,
3982 struct nfs_lock_owner *nlop,
3983 int op,
3984 uint64_t start,
3985 uint64_t end,
3986 int style,
3987 short type,
3988 vfs_context_t ctx)
3989{
3990 struct nfsmount *nmp;
3991 struct nfs_file_lock *newnflp, *nflp, *nflp2 = NULL, *nextnflp, *flocknflp = NULL;
3992 struct nfs_file_lock *coalnflp;
3993 int error = 0, error2, willsplit = 0, delay, slpflag, busy = 0, inuse = 0, restart, inqueue = 0;
cb323159 3994 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
b0d623f7
A
3995
3996 nmp = NFSTONMP(np);
0a7de745
A
3997 if (nfs_mount_gone(nmp)) {
3998 return ENXIO;
3999 }
6d2010ae
A
4000 slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
4001
0a7de745
A
4002 if ((type != F_RDLCK) && (type != F_WRLCK)) {
4003 return EINVAL;
4004 }
b0d623f7
A
4005
4006 /* allocate a new lock */
4007 newnflp = nfs_file_lock_alloc(nlop);
0a7de745
A
4008 if (!newnflp) {
4009 return ENOLCK;
4010 }
b0d623f7
A
4011 newnflp->nfl_start = start;
4012 newnflp->nfl_end = end;
4013 newnflp->nfl_type = type;
0a7de745 4014 if (op == F_SETLKW) {
b0d623f7 4015 newnflp->nfl_flags |= NFS_FILE_LOCK_WAIT;
0a7de745 4016 }
b0d623f7
A
4017 newnflp->nfl_flags |= style;
4018 newnflp->nfl_flags |= NFS_FILE_LOCK_BLOCKED;
4019
4020 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && (type == F_WRLCK)) {
4021 /*
4022 * For exclusive flock-style locks, if we block waiting for the
4023 * lock, we need to first release any currently held shared
4024 * flock-style lock. So, the first thing we do is check if we
4025 * have a shared flock-style lock.
4026 */
4027 nflp = TAILQ_FIRST(&nlop->nlo_locks);
0a7de745 4028 if (nflp && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_FLOCK)) {
b0d623f7 4029 nflp = NULL;
0a7de745
A
4030 }
4031 if (nflp && (nflp->nfl_type != F_RDLCK)) {
b0d623f7 4032 nflp = NULL;
0a7de745 4033 }
b0d623f7
A
4034 flocknflp = nflp;
4035 }
4036
4037restart:
4038 restart = 0;
6d2010ae 4039 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
0a7de745 4040 if (error) {
b0d623f7 4041 goto error_out;
0a7de745 4042 }
b0d623f7 4043 inuse = 1;
6d2010ae
A
4044 if (np->n_flag & NREVOKE) {
4045 error = EIO;
4046 nfs_mount_state_in_use_end(nmp, 0);
4047 inuse = 0;
4048 goto error_out;
4049 }
cb323159 4050#if CONFIG_NFS4
b0d623f7
A
4051 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4052 nfs_mount_state_in_use_end(nmp, 0);
4053 inuse = 0;
6d2010ae 4054 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
0a7de745 4055 if (error) {
6d2010ae 4056 goto error_out;
0a7de745 4057 }
b0d623f7
A
4058 goto restart;
4059 }
cb323159 4060#endif
b0d623f7
A
4061
4062 lck_mtx_lock(&np->n_openlock);
4063 if (!inqueue) {
4064 /* insert new lock at beginning of list */
4065 TAILQ_INSERT_HEAD(&np->n_locks, newnflp, nfl_link);
4066 inqueue = 1;
4067 }
4068
4069 /* scan current list of locks (held and pending) for conflicts */
6d2010ae
A
4070 for (nflp = TAILQ_NEXT(newnflp, nfl_link); nflp; nflp = nextnflp) {
4071 nextnflp = TAILQ_NEXT(nflp, nfl_link);
0a7de745 4072 if (!nfs_file_lock_conflict(newnflp, nflp, &willsplit)) {
b0d623f7 4073 continue;
0a7de745 4074 }
b0d623f7
A
4075 /* Conflict */
4076 if (!(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
4077 error = EAGAIN;
4078 break;
4079 }
4080 /* Block until this lock is no longer held. */
4081 if (nflp->nfl_blockcnt == UINT_MAX) {
4082 error = ENOLCK;
4083 break;
4084 }
4085 nflp->nfl_blockcnt++;
4086 do {
4087 if (flocknflp) {
4088 /* release any currently held shared lock before sleeping */
4089 lck_mtx_unlock(&np->n_openlock);
4090 nfs_mount_state_in_use_end(nmp, 0);
4091 inuse = 0;
6d2010ae 4092 error = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
b0d623f7 4093 flocknflp = NULL;
0a7de745 4094 if (!error) {
6d2010ae 4095 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
0a7de745 4096 }
b0d623f7
A
4097 if (error) {
4098 lck_mtx_lock(&np->n_openlock);
4099 break;
4100 }
4101 inuse = 1;
4102 lck_mtx_lock(&np->n_openlock);
4103 /* no need to block/sleep if the conflict is gone */
0a7de745 4104 if (!nfs_file_lock_conflict(newnflp, nflp, NULL)) {
b0d623f7 4105 break;
0a7de745 4106 }
b0d623f7 4107 }
6d2010ae
A
4108 msleep(nflp, &np->n_openlock, slpflag, "nfs_advlock_setlock_blocked", &ts);
4109 slpflag = 0;
b0d623f7
A
4110 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4111 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4112 /* looks like we have a recover pending... restart */
4113 restart = 1;
4114 lck_mtx_unlock(&np->n_openlock);
4115 nfs_mount_state_in_use_end(nmp, 0);
4116 inuse = 0;
4117 lck_mtx_lock(&np->n_openlock);
4118 break;
4119 }
0a7de745 4120 if (!error && (np->n_flag & NREVOKE)) {
6d2010ae 4121 error = EIO;
0a7de745 4122 }
b0d623f7
A
4123 } while (!error && nfs_file_lock_conflict(newnflp, nflp, NULL));
4124 nflp->nfl_blockcnt--;
4125 if ((nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !nflp->nfl_blockcnt) {
4126 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4127 nfs_file_lock_destroy(nflp);
4128 }
0a7de745 4129 if (error || restart) {
b0d623f7 4130 break;
0a7de745 4131 }
6d2010ae
A
4132 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
4133 /* So, start this lock-scanning loop over from where it started. */
4134 nextnflp = TAILQ_NEXT(newnflp, nfl_link);
b0d623f7
A
4135 }
4136 lck_mtx_unlock(&np->n_openlock);
0a7de745 4137 if (restart) {
b0d623f7 4138 goto restart;
0a7de745
A
4139 }
4140 if (error) {
b0d623f7 4141 goto error_out;
0a7de745 4142 }
b0d623f7
A
4143
4144 if (willsplit) {
4145 /*
4146 * It looks like this operation is splitting a lock.
4147 * We allocate a new lock now so we don't have to worry
4148 * about the allocation failing after we've updated some state.
4149 */
4150 nflp2 = nfs_file_lock_alloc(nlop);
4151 if (!nflp2) {
4152 error = ENOLCK;
4153 goto error_out;
4154 }
4155 }
4156
4157 /* once scan for local conflicts is clear, send request to server */
0a7de745 4158 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
b0d623f7 4159 goto error_out;
0a7de745 4160 }
b0d623f7
A
4161 busy = 1;
4162 delay = 0;
4163 do {
cb323159 4164#if CONFIG_NFS4
6d2010ae
A
4165 /* do we have a delegation? (that we're not returning?) */
4166 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN)) {
4167 if (np->n_openflags & N_DELEG_WRITE) {
4168 /* with a write delegation, just take the lock delegated */
4169 newnflp->nfl_flags |= NFS_FILE_LOCK_DELEGATED;
4170 error = 0;
4171 /* make sure the lock owner knows its open owner */
4172 if (!nlop->nlo_open_owner) {
4173 nfs_open_owner_ref(nofp->nof_owner);
4174 nlop->nlo_open_owner = nofp->nof_owner;
4175 }
4176 break;
4177 } else {
4178 /*
4179 * If we don't have any non-delegated opens but we do have
4180 * delegated opens, then we need to first claim the delegated
4181 * opens so that the lock request on the server can be associated
4182 * with an open it knows about.
4183 */
4184 if ((!nofp->nof_rw_drw && !nofp->nof_w_drw && !nofp->nof_r_drw &&
0a7de745
A
4185 !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw &&
4186 !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r) &&
6d2010ae 4187 (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw ||
0a7de745
A
4188 nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw ||
4189 nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r)) {
6d2010ae 4190 error = nfs4_claim_delegated_state_for_open_file(nofp, 0);
0a7de745 4191 if (error) {
6d2010ae 4192 break;
0a7de745 4193 }
6d2010ae
A
4194 }
4195 }
4196 }
cb323159 4197#endif
0a7de745 4198 if (np->n_flag & NREVOKE) {
6d2010ae 4199 error = EIO;
0a7de745
A
4200 }
4201 if (!error) {
c3c9b80d
A
4202 if (busy) {
4203 nfs_open_state_clear_busy(np);
4204 busy = 0;
4205 }
6d2010ae 4206 error = nmp->nm_funcs->nf_setlock_rpc(np, nofp, newnflp, 0, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
c3c9b80d
A
4207 if (!busy && !nfs_open_state_set_busy(np, vfs_context_thread(ctx))) {
4208 busy = 1;
4209 }
0a7de745
A
4210 }
4211 if (!error || ((error != NFSERR_DENIED) && (error != NFSERR_GRACE))) {
b0d623f7 4212 break;
0a7de745 4213 }
b0d623f7 4214 /* request was denied due to either conflict or grace period */
6d2010ae 4215 if ((error == NFSERR_DENIED) && !(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
b0d623f7
A
4216 error = EAGAIN;
4217 break;
4218 }
4219 if (flocknflp) {
4220 /* release any currently held shared lock before sleeping */
4221 nfs_open_state_clear_busy(np);
4222 busy = 0;
f427ee49
A
4223 if (inuse) {
4224 nfs_mount_state_in_use_end(nmp, 0);
4225 inuse = 0;
4226 }
6d2010ae 4227 error2 = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
b0d623f7 4228 flocknflp = NULL;
0a7de745 4229 if (!error2) {
6d2010ae 4230 error2 = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
0a7de745 4231 }
b0d623f7
A
4232 if (!error2) {
4233 inuse = 1;
6d2010ae 4234 error2 = nfs_open_state_set_busy(np, vfs_context_thread(ctx));
b0d623f7
A
4235 }
4236 if (error2) {
4237 error = error2;
4238 break;
4239 }
4240 busy = 1;
4241 }
6d2010ae
A
4242 /*
4243 * Wait a little bit and send the request again.
4244 * Except for retries of blocked v2/v3 request where we've already waited a bit.
4245 */
4246 if ((nmp->nm_vers >= NFS_VER4) || (error == NFSERR_GRACE)) {
0a7de745 4247 if (error == NFSERR_GRACE) {
6d2010ae 4248 delay = 4;
0a7de745
A
4249 }
4250 if (delay < 4) {
6d2010ae 4251 delay++;
0a7de745
A
4252 }
4253 tsleep(newnflp, slpflag, "nfs_advlock_setlock_delay", delay * (hz / 2));
6d2010ae
A
4254 slpflag = 0;
4255 }
b0d623f7
A
4256 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4257 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4258 /* looks like we have a recover pending... restart */
4259 nfs_open_state_clear_busy(np);
4260 busy = 0;
f427ee49
A
4261 if (inuse) {
4262 nfs_mount_state_in_use_end(nmp, 0);
4263 inuse = 0;
4264 }
b0d623f7
A
4265 goto restart;
4266 }
0a7de745 4267 if (!error && (np->n_flag & NREVOKE)) {
6d2010ae 4268 error = EIO;
0a7de745 4269 }
b0d623f7
A
4270 } while (!error);
4271
4272error_out:
4273 if (nfs_mount_state_error_should_restart(error)) {
4274 /* looks like we need to restart this operation */
4275 if (busy) {
4276 nfs_open_state_clear_busy(np);
4277 busy = 0;
4278 }
4279 if (inuse) {
4280 nfs_mount_state_in_use_end(nmp, error);
4281 inuse = 0;
4282 }
4283 goto restart;
4284 }
4285 lck_mtx_lock(&np->n_openlock);
4286 newnflp->nfl_flags &= ~NFS_FILE_LOCK_BLOCKED;
4287 if (error) {
4288 newnflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4289 if (newnflp->nfl_blockcnt) {
4290 /* wake up anyone blocked on this lock */
4291 wakeup(newnflp);
4292 } else {
4293 /* remove newnflp from lock list and destroy */
0a7de745 4294 if (inqueue) {
316670eb 4295 TAILQ_REMOVE(&np->n_locks, newnflp, nfl_link);
0a7de745 4296 }
b0d623f7
A
4297 nfs_file_lock_destroy(newnflp);
4298 }
4299 lck_mtx_unlock(&np->n_openlock);
0a7de745 4300 if (busy) {
b0d623f7 4301 nfs_open_state_clear_busy(np);
0a7de745
A
4302 }
4303 if (inuse) {
b0d623f7 4304 nfs_mount_state_in_use_end(nmp, error);
0a7de745
A
4305 }
4306 if (nflp2) {
b0d623f7 4307 nfs_file_lock_destroy(nflp2);
0a7de745
A
4308 }
4309 return error;
b0d623f7
A
4310 }
4311
4312 /* server granted the lock */
4313
4314 /*
4315 * Scan for locks to update.
4316 *
4317 * Locks completely covered are killed.
4318 * At most two locks may need to be clipped.
4319 * It's possible that a single lock may need to be split.
4320 */
4321 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
0a7de745 4322 if (nflp == newnflp) {
b0d623f7 4323 continue;
0a7de745
A
4324 }
4325 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
b0d623f7 4326 continue;
0a7de745
A
4327 }
4328 if (nflp->nfl_owner != nlop) {
b0d623f7 4329 continue;
0a7de745
A
4330 }
4331 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK)) {
b0d623f7 4332 continue;
0a7de745
A
4333 }
4334 if ((newnflp->nfl_start > nflp->nfl_end) || (newnflp->nfl_end < nflp->nfl_start)) {
b0d623f7 4335 continue;
0a7de745 4336 }
b0d623f7
A
4337 /* here's one to update */
4338 if ((newnflp->nfl_start <= nflp->nfl_start) && (newnflp->nfl_end >= nflp->nfl_end)) {
4339 /* The entire lock is being replaced. */
4340 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4341 lck_mtx_lock(&nlop->nlo_lock);
4342 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4343 lck_mtx_unlock(&nlop->nlo_lock);
4344 /* lock will be destroyed below, if no waiters */
4345 } else if ((newnflp->nfl_start > nflp->nfl_start) && (newnflp->nfl_end < nflp->nfl_end)) {
4346 /* We're replacing a range in the middle of a lock. */
4347 /* The current lock will be split into two locks. */
4348 /* Update locks and insert new lock after current lock. */
0a7de745 4349 nflp2->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
b0d623f7
A
4350 nflp2->nfl_type = nflp->nfl_type;
4351 nflp2->nfl_start = newnflp->nfl_end + 1;
4352 nflp2->nfl_end = nflp->nfl_end;
4353 nflp->nfl_end = newnflp->nfl_start - 1;
4354 TAILQ_INSERT_AFTER(&np->n_locks, nflp, nflp2, nfl_link);
4355 nfs_lock_owner_insert_held_lock(nlop, nflp2);
4356 nextnflp = nflp2;
4357 nflp2 = NULL;
4358 } else if (newnflp->nfl_start > nflp->nfl_start) {
4359 /* We're replacing the end of a lock. */
4360 nflp->nfl_end = newnflp->nfl_start - 1;
4361 } else if (newnflp->nfl_end < nflp->nfl_end) {
4362 /* We're replacing the start of a lock. */
4363 nflp->nfl_start = newnflp->nfl_end + 1;
4364 }
4365 if (nflp->nfl_blockcnt) {
4366 /* wake up anyone blocked on this lock */
4367 wakeup(nflp);
4368 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4369 /* remove nflp from lock list and destroy */
4370 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4371 nfs_file_lock_destroy(nflp);
4372 }
4373 }
4374
4375 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4376
4377 /*
4378 * POSIX locks should be coalesced when possible.
4379 */
4380 if ((style == NFS_FILE_LOCK_STYLE_POSIX) && (nofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)) {
4381 /*
4382 * Walk through the lock queue and check each of our held locks with
4383 * the previous and next locks in the lock owner's "held lock list".
4384 * If the two locks can be coalesced, we merge the current lock into
4385 * the other (previous or next) lock. Merging this way makes sure that
4386 * lock ranges are always merged forward in the lock queue. This is
4387 * important because anyone blocked on the lock being "merged away"
4388 * will still need to block on that range and it will simply continue
4389 * checking locks that are further down the list.
4390 */
4391 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
0a7de745 4392 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
b0d623f7 4393 continue;
0a7de745
A
4394 }
4395 if (nflp->nfl_owner != nlop) {
b0d623f7 4396 continue;
0a7de745
A
4397 }
4398 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_POSIX) {
b0d623f7 4399 continue;
0a7de745 4400 }
b0d623f7
A
4401 if (((coalnflp = TAILQ_PREV(nflp, nfs_file_lock_queue, nfl_lolink))) &&
4402 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4403 (coalnflp->nfl_type == nflp->nfl_type) &&
4404 (coalnflp->nfl_end == (nflp->nfl_start - 1))) {
4405 coalnflp->nfl_end = nflp->nfl_end;
4406 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4407 lck_mtx_lock(&nlop->nlo_lock);
4408 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4409 lck_mtx_unlock(&nlop->nlo_lock);
4410 } else if (((coalnflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4411 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4412 (coalnflp->nfl_type == nflp->nfl_type) &&
4413 (coalnflp->nfl_start == (nflp->nfl_end + 1))) {
4414 coalnflp->nfl_start = nflp->nfl_start;
4415 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4416 lck_mtx_lock(&nlop->nlo_lock);
4417 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4418 lck_mtx_unlock(&nlop->nlo_lock);
4419 }
0a7de745 4420 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD)) {
b0d623f7 4421 continue;
0a7de745 4422 }
b0d623f7
A
4423 if (nflp->nfl_blockcnt) {
4424 /* wake up anyone blocked on this lock */
4425 wakeup(nflp);
4426 } else {
4427 /* remove nflp from lock list and destroy */
4428 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4429 nfs_file_lock_destroy(nflp);
4430 }
4431 }
4432 }
4433
4434 lck_mtx_unlock(&np->n_openlock);
4435 nfs_open_state_clear_busy(np);
b0d623f7 4436
f427ee49
A
4437 if (inuse) {
4438 nfs_mount_state_in_use_end(nmp, error);
4439 }
0a7de745 4440 if (nflp2) {
b0d623f7 4441 nfs_file_lock_destroy(nflp2);
0a7de745
A
4442 }
4443 return error;
b0d623f7
A
4444}
4445
6d2010ae
A
4446/*
4447 * Release all (same style) locks within the given range.
4448 */
b0d623f7 4449int
6d2010ae 4450nfs_advlock_unlock(
b0d623f7 4451 nfsnode_t np,
cb323159
A
4452 struct nfs_open_file *nofp
4453#if !CONFIG_NFS4
4454 __unused
4455#endif
4456 ,
b0d623f7
A
4457 struct nfs_lock_owner *nlop,
4458 uint64_t start,
4459 uint64_t end,
4460 int style,
4461 vfs_context_t ctx)
4462{
4463 struct nfsmount *nmp;
4464 struct nfs_file_lock *nflp, *nextnflp, *newnflp = NULL;
4465 int error = 0, willsplit = 0, send_unlock_rpcs = 1;
4466
4467 nmp = NFSTONMP(np);
0a7de745
A
4468 if (nfs_mount_gone(nmp)) {
4469 return ENXIO;
4470 }
b0d623f7
A
4471
4472restart:
0a7de745
A
4473 if ((error = nfs_mount_state_in_use_start(nmp, NULL))) {
4474 return error;
4475 }
cb323159 4476#if CONFIG_NFS4
b0d623f7
A
4477 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4478 nfs_mount_state_in_use_end(nmp, 0);
6d2010ae 4479 error = nfs4_reopen(nofp, NULL);
0a7de745
A
4480 if (error) {
4481 return error;
4482 }
b0d623f7
A
4483 goto restart;
4484 }
cb323159 4485#endif
6d2010ae 4486 if ((error = nfs_open_state_set_busy(np, NULL))) {
b0d623f7 4487 nfs_mount_state_in_use_end(nmp, error);
0a7de745 4488 return error;
b0d623f7
A
4489 }
4490
4491 lck_mtx_lock(&np->n_openlock);
4492 if ((start > 0) && (end < UINT64_MAX) && !willsplit) {
4493 /*
4494 * We may need to allocate a new lock if an existing lock gets split.
4495 * So, we first scan the list to check for a split, and if there's
4496 * going to be one, we'll allocate one now.
4497 */
4498 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
0a7de745 4499 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
b0d623f7 4500 continue;
0a7de745
A
4501 }
4502 if (nflp->nfl_owner != nlop) {
b0d623f7 4503 continue;
0a7de745
A
4504 }
4505 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
b0d623f7 4506 continue;
0a7de745
A
4507 }
4508 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
b0d623f7 4509 continue;
0a7de745 4510 }
b0d623f7
A
4511 if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4512 willsplit = 1;
4513 break;
4514 }
4515 }
4516 if (willsplit) {
4517 lck_mtx_unlock(&np->n_openlock);
4518 nfs_open_state_clear_busy(np);
4519 nfs_mount_state_in_use_end(nmp, 0);
4520 newnflp = nfs_file_lock_alloc(nlop);
0a7de745
A
4521 if (!newnflp) {
4522 return ENOMEM;
4523 }
b0d623f7
A
4524 goto restart;
4525 }
4526 }
4527
4528 /*
4529 * Free all of our locks in the given range.
4530 *
4531 * Note that this process requires sending requests to the server.
0a7de745 4532 * Because of this, we will release the n_openlock while performing
b0d623f7
A
4533 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4534 * locks from changing underneath us. However, other entries in the
4535 * list may be removed. So we need to be careful walking the list.
4536 */
4537
4538 /*
4539 * Don't unlock ranges that are held by other-style locks.
4540 * If style is posix, don't send any unlock rpcs if flock is held.
4541 * If we unlock an flock, don't send unlock rpcs for any posix-style
4542 * ranges held - instead send unlocks for the ranges not held.
4543 */
4544 if ((style == NFS_FILE_LOCK_STYLE_POSIX) &&
4545 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
0a7de745 4546 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK)) {
b0d623f7 4547 send_unlock_rpcs = 0;
0a7de745 4548 }
b0d623f7
A
4549 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) &&
4550 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4551 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) &&
4552 ((nflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4553 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX)) {
4554 uint64_t s = 0;
4555 int type = TAILQ_FIRST(&nlop->nlo_locks)->nfl_type;
6d2010ae
A
4556 int delegated = (TAILQ_FIRST(&nlop->nlo_locks)->nfl_flags & NFS_FILE_LOCK_DELEGATED);
4557 while (!delegated && nflp) {
b0d623f7
A
4558 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) {
4559 /* unlock the range preceding this lock */
4560 lck_mtx_unlock(&np->n_openlock);
0a7de745
A
4561 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, nflp->nfl_start - 1, 0,
4562 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4563 if (nfs_mount_state_error_should_restart(error)) {
4564 nfs_open_state_clear_busy(np);
4565 nfs_mount_state_in_use_end(nmp, error);
4566 goto restart;
4567 }
4568 lck_mtx_lock(&np->n_openlock);
0a7de745 4569 if (error) {
b0d623f7 4570 goto out;
0a7de745
A
4571 }
4572 s = nflp->nfl_end + 1;
b0d623f7
A
4573 }
4574 nflp = TAILQ_NEXT(nflp, nfl_lolink);
4575 }
6d2010ae
A
4576 if (!delegated) {
4577 lck_mtx_unlock(&np->n_openlock);
4578 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, end, 0,
0a7de745 4579 vfs_context_thread(ctx), vfs_context_ucred(ctx));
6d2010ae
A
4580 if (nfs_mount_state_error_should_restart(error)) {
4581 nfs_open_state_clear_busy(np);
4582 nfs_mount_state_in_use_end(nmp, error);
4583 goto restart;
4584 }
4585 lck_mtx_lock(&np->n_openlock);
0a7de745 4586 if (error) {
6d2010ae 4587 goto out;
0a7de745 4588 }
b0d623f7 4589 }
b0d623f7
A
4590 send_unlock_rpcs = 0;
4591 }
4592
4593 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
0a7de745 4594 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
b0d623f7 4595 continue;
0a7de745
A
4596 }
4597 if (nflp->nfl_owner != nlop) {
b0d623f7 4598 continue;
0a7de745
A
4599 }
4600 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
b0d623f7 4601 continue;
0a7de745
A
4602 }
4603 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
b0d623f7 4604 continue;
0a7de745 4605 }
b0d623f7
A
4606 /* here's one to unlock */
4607 if ((start <= nflp->nfl_start) && (end >= nflp->nfl_end)) {
4608 /* The entire lock is being unlocked. */
6d2010ae 4609 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4610 lck_mtx_unlock(&np->n_openlock);
6d2010ae 4611 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, nflp->nfl_end, 0,
0a7de745 4612 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4613 if (nfs_mount_state_error_should_restart(error)) {
4614 nfs_open_state_clear_busy(np);
4615 nfs_mount_state_in_use_end(nmp, error);
4616 goto restart;
4617 }
4618 lck_mtx_lock(&np->n_openlock);
4619 }
4620 nextnflp = TAILQ_NEXT(nflp, nfl_link);
0a7de745 4621 if (error) {
b0d623f7 4622 break;
0a7de745 4623 }
b0d623f7
A
4624 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4625 lck_mtx_lock(&nlop->nlo_lock);
4626 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4627 lck_mtx_unlock(&nlop->nlo_lock);
4628 /* lock will be destroyed below, if no waiters */
4629 } else if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4630 /* We're unlocking a range in the middle of a lock. */
4631 /* The current lock will be split into two locks. */
6d2010ae 4632 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4633 lck_mtx_unlock(&np->n_openlock);
6d2010ae 4634 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, end, 0,
0a7de745 4635 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4636 if (nfs_mount_state_error_should_restart(error)) {
4637 nfs_open_state_clear_busy(np);
4638 nfs_mount_state_in_use_end(nmp, error);
4639 goto restart;
4640 }
4641 lck_mtx_lock(&np->n_openlock);
4642 }
0a7de745 4643 if (error) {
b0d623f7 4644 break;
0a7de745 4645 }
b0d623f7 4646 /* update locks and insert new lock after current lock */
0a7de745 4647 newnflp->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
b0d623f7
A
4648 newnflp->nfl_type = nflp->nfl_type;
4649 newnflp->nfl_start = end + 1;
4650 newnflp->nfl_end = nflp->nfl_end;
4651 nflp->nfl_end = start - 1;
4652 TAILQ_INSERT_AFTER(&np->n_locks, nflp, newnflp, nfl_link);
4653 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4654 nextnflp = newnflp;
4655 newnflp = NULL;
4656 } else if (start > nflp->nfl_start) {
4657 /* We're unlocking the end of a lock. */
6d2010ae 4658 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4659 lck_mtx_unlock(&np->n_openlock);
6d2010ae 4660 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, nflp->nfl_end, 0,
0a7de745 4661 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4662 if (nfs_mount_state_error_should_restart(error)) {
4663 nfs_open_state_clear_busy(np);
4664 nfs_mount_state_in_use_end(nmp, error);
4665 goto restart;
4666 }
4667 lck_mtx_lock(&np->n_openlock);
4668 }
4669 nextnflp = TAILQ_NEXT(nflp, nfl_link);
0a7de745 4670 if (error) {
b0d623f7 4671 break;
0a7de745 4672 }
b0d623f7
A
4673 nflp->nfl_end = start - 1;
4674 } else if (end < nflp->nfl_end) {
4675 /* We're unlocking the start of a lock. */
6d2010ae 4676 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4677 lck_mtx_unlock(&np->n_openlock);
6d2010ae 4678 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, end, 0,
0a7de745 4679 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4680 if (nfs_mount_state_error_should_restart(error)) {
4681 nfs_open_state_clear_busy(np);
4682 nfs_mount_state_in_use_end(nmp, error);
4683 goto restart;
4684 }
4685 lck_mtx_lock(&np->n_openlock);
4686 }
4687 nextnflp = TAILQ_NEXT(nflp, nfl_link);
0a7de745 4688 if (error) {
b0d623f7 4689 break;
0a7de745 4690 }
b0d623f7
A
4691 nflp->nfl_start = end + 1;
4692 }
4693 if (nflp->nfl_blockcnt) {
4694 /* wake up anyone blocked on this lock */
4695 wakeup(nflp);
4696 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4697 /* remove nflp from lock list and destroy */
4698 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4699 nfs_file_lock_destroy(nflp);
4700 }
4701 }
4702out:
4703 lck_mtx_unlock(&np->n_openlock);
4704 nfs_open_state_clear_busy(np);
4705 nfs_mount_state_in_use_end(nmp, 0);
4706
0a7de745 4707 if (newnflp) {
b0d623f7 4708 nfs_file_lock_destroy(newnflp);
0a7de745
A
4709 }
4710 return error;
b0d623f7
A
4711}
4712
4713/*
4714 * NFSv4 advisory file locking
4715 */
4716int
6d2010ae 4717nfs_vnop_advlock(
b0d623f7 4718 struct vnop_advlock_args /* {
0a7de745
A
4719 * struct vnodeop_desc *a_desc;
4720 * vnode_t a_vp;
4721 * caddr_t a_id;
4722 * int a_op;
4723 * struct flock *a_fl;
4724 * int a_flags;
4725 * vfs_context_t a_context;
4726 * } */*ap)
b0d623f7
A
4727{
4728 vnode_t vp = ap->a_vp;
4729 nfsnode_t np = VTONFS(ap->a_vp);
4730 struct flock *fl = ap->a_fl;
4731 int op = ap->a_op;
4732 int flags = ap->a_flags;
4733 vfs_context_t ctx = ap->a_context;
4734 struct nfsmount *nmp;
b0d623f7
A
4735 struct nfs_open_owner *noop = NULL;
4736 struct nfs_open_file *nofp = NULL;
4737 struct nfs_lock_owner *nlop = NULL;
4738 off_t lstart;
4739 uint64_t start, end;
4740 int error = 0, modified, style;
6d2010ae 4741 enum vtype vtype;
b0d623f7
A
4742#define OFF_MAX QUAD_MAX
4743
4744 nmp = VTONMP(ap->a_vp);
0a7de745
A
4745 if (nfs_mount_gone(nmp)) {
4746 return ENXIO;
4747 }
6d2010ae
A
4748 lck_mtx_lock(&nmp->nm_lock);
4749 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) {
4750 lck_mtx_unlock(&nmp->nm_lock);
0a7de745 4751 return ENOTSUP;
6d2010ae
A
4752 }
4753 lck_mtx_unlock(&nmp->nm_lock);
b0d623f7 4754
0a7de745
A
4755 if (np->n_flag & NREVOKE) {
4756 return EIO;
4757 }
6d2010ae 4758 vtype = vnode_vtype(ap->a_vp);
0a7de745
A
4759 if (vtype == VDIR) { /* ignore lock requests on directories */
4760 return 0;
4761 }
4762 if (vtype != VREG) { /* anything other than regular files is invalid */
4763 return EINVAL;
4764 }
6d2010ae
A
4765
4766 /* Convert the flock structure into a start and end. */
b0d623f7
A
4767 switch (fl->l_whence) {
4768 case SEEK_SET:
4769 case SEEK_CUR:
4770 /*
4771 * Caller is responsible for adding any necessary offset
4772 * to fl->l_start when SEEK_CUR is used.
4773 */
4774 lstart = fl->l_start;
4775 break;
4776 case SEEK_END:
4777 /* need to flush, and refetch attributes to make */
4778 /* sure we have the correct end of file offset */
0a7de745
A
4779 if ((error = nfs_node_lock(np))) {
4780 return error;
4781 }
b0d623f7
A
4782 modified = (np->n_flag & NMODIFIED);
4783 nfs_node_unlock(np);
0a7de745
A
4784 if (modified && ((error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1)))) {
4785 return error;
4786 }
4787 if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED))) {
4788 return error;
4789 }
b0d623f7
A
4790 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
4791 if ((np->n_size > OFF_MAX) ||
0a7de745 4792 ((fl->l_start > 0) && (np->n_size > (u_quad_t)(OFF_MAX - fl->l_start)))) {
b0d623f7 4793 error = EOVERFLOW;
0a7de745 4794 }
b0d623f7
A
4795 lstart = np->n_size + fl->l_start;
4796 nfs_data_unlock(np);
0a7de745
A
4797 if (error) {
4798 return error;
4799 }
b0d623f7
A
4800 break;
4801 default:
0a7de745
A
4802 return EINVAL;
4803 }
4804 if (lstart < 0) {
4805 return EINVAL;
b0d623f7 4806 }
b0d623f7
A
4807 start = lstart;
4808 if (fl->l_len == 0) {
4809 end = UINT64_MAX;
4810 } else if (fl->l_len > 0) {
0a7de745
A
4811 if ((fl->l_len - 1) > (OFF_MAX - lstart)) {
4812 return EOVERFLOW;
4813 }
b0d623f7
A
4814 end = start - 1 + fl->l_len;
4815 } else { /* l_len is negative */
0a7de745
A
4816 if ((lstart + fl->l_len) < 0) {
4817 return EINVAL;
4818 }
b0d623f7
A
4819 end = start - 1;
4820 start += fl->l_len;
4821 }
0a7de745
A
4822 if ((nmp->nm_vers == NFS_VER2) && ((start > INT32_MAX) || (fl->l_len && (end > INT32_MAX)))) {
4823 return EINVAL;
4824 }
b0d623f7
A
4825
4826 style = (flags & F_FLOCK) ? NFS_FILE_LOCK_STYLE_FLOCK : NFS_FILE_LOCK_STYLE_POSIX;
0a7de745
A
4827 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((start != 0) || (end != UINT64_MAX))) {
4828 return EINVAL;
4829 }
b0d623f7
A
4830
4831 /* find the lock owner, alloc if not unlock */
4832 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), (op != F_UNLCK));
4833 if (!nlop) {
4834 error = (op == F_UNLCK) ? 0 : ENOMEM;
0a7de745 4835 if (error) {
6d2010ae 4836 NP(np, "nfs_vnop_advlock: no lock owner, error %d", error);
0a7de745 4837 }
b0d623f7
A
4838 goto out;
4839 }
4840
4841 if (op == F_GETLK) {
6d2010ae 4842 error = nfs_advlock_getlock(np, nlop, fl, start, end, ctx);
b0d623f7
A
4843 } else {
4844 /* find the open owner */
4845 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
4846 if (!noop) {
6d2010ae 4847 NP(np, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx)));
b0d623f7
A
4848 error = EPERM;
4849 goto out;
4850 }
4851 /* find the open file */
cb323159 4852#if CONFIG_NFS4
b0d623f7 4853restart:
cb323159 4854#endif
b0d623f7 4855 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
0a7de745 4856 if (error) {
b0d623f7 4857 error = EBADF;
0a7de745 4858 }
b0d623f7 4859 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6d2010ae 4860 NP(np, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
b0d623f7
A
4861 error = EIO;
4862 }
cb323159 4863#if CONFIG_NFS4
b0d623f7 4864 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6d2010ae 4865 error = nfs4_reopen(nofp, ((op == F_UNLCK) ? NULL : vfs_context_thread(ctx)));
b0d623f7 4866 nofp = NULL;
0a7de745 4867 if (!error) {
6d2010ae 4868 goto restart;
0a7de745 4869 }
b0d623f7 4870 }
cb323159 4871#endif
b0d623f7 4872 if (error) {
6d2010ae 4873 NP(np, "nfs_vnop_advlock: no open file %d, %d", error, kauth_cred_getuid(noop->noo_cred));
b0d623f7
A
4874 goto out;
4875 }
4876 if (op == F_UNLCK) {
6d2010ae 4877 error = nfs_advlock_unlock(np, nofp, nlop, start, end, style, ctx);
b0d623f7 4878 } else if ((op == F_SETLK) || (op == F_SETLKW)) {
0a7de745 4879 if ((op == F_SETLK) && (flags & F_WAIT)) {
b0d623f7 4880 op = F_SETLKW;
0a7de745 4881 }
6d2010ae 4882 error = nfs_advlock_setlock(np, nofp, nlop, op, start, end, style, fl->l_type, ctx);
b0d623f7
A
4883 } else {
4884 /* not getlk, unlock or lock? */
4885 error = EINVAL;
4886 }
4887 }
4888
4889out:
0a7de745 4890 if (nlop) {
b0d623f7 4891 nfs_lock_owner_rele(nlop);
0a7de745
A
4892 }
4893 if (noop) {
b0d623f7 4894 nfs_open_owner_rele(noop);
0a7de745
A
4895 }
4896 return error;
b0d623f7
A
4897}
4898
4899/*
4900 * Check if an open owner holds any locks on a file.
4901 */
4902int
6d2010ae 4903nfs_check_for_locks(struct nfs_open_owner *noop, struct nfs_open_file *nofp)
b0d623f7
A
4904{
4905 struct nfs_lock_owner *nlop;
4906
4907 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
0a7de745 4908 if (nlop->nlo_open_owner != noop) {
b0d623f7 4909 continue;
0a7de745
A
4910 }
4911 if (!TAILQ_EMPTY(&nlop->nlo_locks)) {
b0d623f7 4912 break;
0a7de745 4913 }
b0d623f7 4914 }
0a7de745 4915 return nlop ? 1 : 0;
b0d623f7
A
4916}
4917
cb323159 4918#if CONFIG_NFS4
b0d623f7
A
4919/*
4920 * Reopen simple (no deny, no locks) open state that was lost.
4921 */
6d2010ae 4922int
b0d623f7
A
4923nfs4_reopen(struct nfs_open_file *nofp, thread_t thd)
4924{
4925 struct nfs_open_owner *noop = nofp->nof_owner;
4926 struct nfsmount *nmp = NFSTONMP(nofp->nof_np);
6d2010ae
A
4927 nfsnode_t np = nofp->nof_np;
4928 vnode_t vp = NFSTOV(np);
b0d623f7
A
4929 vnode_t dvp = NULL;
4930 struct componentname cn;
4931 const char *vname = NULL;
6d2010ae 4932 const char *name = NULL;
f427ee49 4933 uint32_t namelen;
b0d623f7
A
4934 char smallname[128];
4935 char *filename = NULL;
6d2010ae 4936 int error = 0, done = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
cb323159 4937 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
b0d623f7
A
4938
4939 lck_mtx_lock(&nofp->nof_lock);
4940 while (nofp->nof_flags & NFS_OPEN_FILE_REOPENING) {
0a7de745 4941 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 4942 break;
0a7de745
A
4943 }
4944 msleep(&nofp->nof_flags, &nofp->nof_lock, slpflag | (PZERO - 1), "nfsreopenwait", &ts);
6d2010ae 4945 slpflag = 0;
b0d623f7 4946 }
6d2010ae 4947 if (error || !(nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
b0d623f7 4948 lck_mtx_unlock(&nofp->nof_lock);
0a7de745 4949 return error;
b0d623f7
A
4950 }
4951 nofp->nof_flags |= NFS_OPEN_FILE_REOPENING;
4952 lck_mtx_unlock(&nofp->nof_lock);
4953
6d2010ae
A
4954 nfs_node_lock_force(np);
4955 if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) {
4956 /*
4957 * The node's been sillyrenamed, so we need to use
4958 * the sillyrename directory/name to do the open.
4959 */
4960 struct nfs_sillyrename *nsp = np->n_sillyrename;
4961 dvp = NFSTOV(nsp->nsr_dnp);
4962 if ((error = vnode_get(dvp))) {
cb323159 4963 dvp = NULLVP;
6d2010ae
A
4964 nfs_node_unlock(np);
4965 goto out;
4966 }
4967 name = nsp->nsr_name;
4968 } else {
4969 /*
4970 * [sigh] We can't trust VFS to get the parent right for named
4971 * attribute nodes. (It likes to reparent the nodes after we've
4972 * created them.) Luckily we can probably get the right parent
4973 * from the n_parent we have stashed away.
4974 */
4975 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
0a7de745 4976 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
6d2010ae 4977 dvp = NULL;
0a7de745
A
4978 }
4979 if (!dvp) {
6d2010ae 4980 dvp = vnode_getparent(vp);
0a7de745 4981 }
6d2010ae
A
4982 vname = vnode_getname(vp);
4983 if (!dvp || !vname) {
0a7de745 4984 if (!error) {
6d2010ae 4985 error = EIO;
0a7de745 4986 }
6d2010ae
A
4987 nfs_node_unlock(np);
4988 goto out;
4989 }
4990 name = vname;
b0d623f7
A
4991 }
4992 filename = &smallname[0];
6d2010ae 4993 namelen = snprintf(filename, sizeof(smallname), "%s", name);
b0d623f7 4994 if (namelen >= sizeof(smallname)) {
0a7de745 4995 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
b0d623f7
A
4996 if (!filename) {
4997 error = ENOMEM;
4998 goto out;
4999 }
0a7de745 5000 snprintf(filename, namelen + 1, "%s", name);
b0d623f7 5001 }
6d2010ae 5002 nfs_node_unlock(np);
b0d623f7
A
5003 bzero(&cn, sizeof(cn));
5004 cn.cn_nameptr = filename;
5005 cn.cn_namelen = namelen;
5006
5007restart:
5008 done = 0;
0a7de745 5009 if ((error = nfs_mount_state_in_use_start(nmp, thd))) {
b0d623f7 5010 goto out;
0a7de745 5011 }
b0d623f7 5012
0a7de745 5013 if (nofp->nof_rw) {
b0d623f7 5014 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE);
0a7de745
A
5015 }
5016 if (!error && nofp->nof_w) {
b0d623f7 5017 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE);
0a7de745
A
5018 }
5019 if (!error && nofp->nof_r) {
b0d623f7 5020 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE);
0a7de745 5021 }
b0d623f7
A
5022
5023 if (nfs_mount_state_in_use_end(nmp, error)) {
0a7de745 5024 if (error == NFSERR_GRACE) {
b0d623f7 5025 goto restart;
0a7de745 5026 }
6d2010ae 5027 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error,
0a7de745 5028 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
b0d623f7
A
5029 error = 0;
5030 goto out;
5031 }
5032 done = 1;
5033out:
0a7de745 5034 if (error && (error != EINTR) && (error != ERESTART)) {
6d2010ae 5035 nfs_revoke_open_state_for_node(np);
0a7de745 5036 }
b0d623f7
A
5037 lck_mtx_lock(&nofp->nof_lock);
5038 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPENING;
0a7de745 5039 if (done) {
b0d623f7 5040 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
0a7de745 5041 } else if (error) {
6d2010ae 5042 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error,
0a7de745
A
5043 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
5044 }
b0d623f7 5045 lck_mtx_unlock(&nofp->nof_lock);
0a7de745 5046 if (filename && (filename != &smallname[0])) {
b0d623f7 5047 FREE(filename, M_TEMP);
0a7de745
A
5048 }
5049 if (vname) {
b0d623f7 5050 vnode_putname(vname);
0a7de745
A
5051 }
5052 if (dvp != NULLVP) {
b0d623f7 5053 vnode_put(dvp);
0a7de745
A
5054 }
5055 return error;
b0d623f7
A
5056}
5057
5058/*
5059 * Send a normal OPEN RPC to open/create a file.
5060 */
5061int
5062nfs4_open_rpc(
5063 struct nfs_open_file *nofp,
5064 vfs_context_t ctx,
5065 struct componentname *cnp,
5066 struct vnode_attr *vap,
5067 vnode_t dvp,
5068 vnode_t *vpp,
5069 int create,
5070 int share_access,
5071 int share_deny)
5072{
0a7de745
A
5073 return nfs4_open_rpc_internal(nofp, ctx, vfs_context_thread(ctx), vfs_context_ucred(ctx),
5074 cnp, vap, dvp, vpp, create, share_access, share_deny);
b0d623f7
A
5075}
5076
5077/*
5078 * Send an OPEN RPC to reopen a file.
5079 */
5080int
5081nfs4_open_reopen_rpc(
5082 struct nfs_open_file *nofp,
5083 thread_t thd,
5084 kauth_cred_t cred,
5085 struct componentname *cnp,
5086 vnode_t dvp,
5087 vnode_t *vpp,
5088 int share_access,
5089 int share_deny)
5090{
0a7de745 5091 return nfs4_open_rpc_internal(nofp, NULL, thd, cred, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, share_access, share_deny);
6d2010ae
A
5092}
5093
5094/*
5095 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
5096 */
5097int
5098nfs4_open_confirm_rpc(
5099 struct nfsmount *nmp,
5100 nfsnode_t dnp,
5101 u_char *fhp,
5102 int fhlen,
5103 struct nfs_open_owner *noop,
5104 nfs_stateid *sid,
5105 thread_t thd,
5106 kauth_cred_t cred,
5107 struct nfs_vattr *nvap,
5108 uint64_t *xidp)
5109{
5110 struct nfsm_chain nmreq, nmrep;
5111 int error = 0, status, numops;
5112 struct nfsreq_secinfo_args si;
5113
5114 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
5115 nfsm_chain_null(&nmreq);
5116 nfsm_chain_null(&nmrep);
5117
5118 // PUTFH, OPEN_CONFIRM, GETATTR
5119 numops = 3;
5120 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
3e170ce0 5121 nfsm_chain_add_compound_header(error, &nmreq, "open_confirm", nmp->nm_minor_vers, numops);
6d2010ae
A
5122 numops--;
5123 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5124 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
5125 numops--;
5126 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_CONFIRM);
5127 nfsm_chain_add_stateid(error, &nmreq, sid);
5128 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5129 numops--;
5130 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5131 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
5132 nfsm_chain_build_done(error, &nmreq);
5133 nfsm_assert(error, (numops == 0), EPROTO);
5134 nfsmout_if(error);
5135 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, &nmrep, xidp, &status);
5136
5137 nfsm_chain_skip_tag(error, &nmrep);
5138 nfsm_chain_get_32(error, &nmrep, numops);
5139 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5140 nfsmout_if(error);
5141 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_CONFIRM);
5142 nfs_owner_seqid_increment(noop, NULL, error);
5143 nfsm_chain_get_stateid(error, &nmrep, sid);
5144 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5145 nfsmout_if(error);
5146 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
5147nfsmout:
5148 nfsm_chain_cleanup(&nmreq);
5149 nfsm_chain_cleanup(&nmrep);
0a7de745 5150 return error;
b0d623f7
A
5151}
5152
5153/*
5154 * common OPEN RPC code
5155 *
5156 * If create is set, ctx must be passed in.
6d2010ae 5157 * Returns a node on success if no node passed in.
b0d623f7
A
5158 */
5159int
5160nfs4_open_rpc_internal(
5161 struct nfs_open_file *nofp,
5162 vfs_context_t ctx,
5163 thread_t thd,
5164 kauth_cred_t cred,
5165 struct componentname *cnp,
5166 struct vnode_attr *vap,
5167 vnode_t dvp,
5168 vnode_t *vpp,
5169 int create,
5170 int share_access,
5171 int share_deny)
5172{
5173 struct nfsmount *nmp;
5174 struct nfs_open_owner *noop = nofp->nof_owner;
f427ee49 5175 struct nfs_vattr *nvattr;
b0d623f7 5176 int error = 0, open_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
6d2010ae 5177 int nfsvers, namedattrs, numops, exclusive = 0, gotuid, gotgid;
b0d623f7
A
5178 u_int64_t xid, savedxid = 0;
5179 nfsnode_t dnp = VTONFS(dvp);
5180 nfsnode_t np, newnp = NULL;
5181 vnode_t newvp = NULL;
5182 struct nfsm_chain nmreq, nmrep;
5183 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6d2010ae 5184 uint32_t rflags, delegation, recall;
b0d623f7 5185 struct nfs_stateid stateid, dstateid, *sid;
f427ee49
A
5186 fhandle_t *fh;
5187 struct nfsreq *req;
5188 struct nfs_dulookup *dul;
6d2010ae
A
5189 char sbuf[64], *s;
5190 uint32_t ace_type, ace_flags, ace_mask, len, slen;
5191 struct kauth_ace ace;
5192 struct nfsreq_secinfo_args si;
b0d623f7 5193
0a7de745
A
5194 if (create && !ctx) {
5195 return EINVAL;
5196 }
b0d623f7
A
5197
5198 nmp = VTONMP(dvp);
0a7de745
A
5199 if (nfs_mount_gone(nmp)) {
5200 return ENXIO;
5201 }
b0d623f7 5202 nfsvers = nmp->nm_vers;
6d2010ae 5203 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
f427ee49 5204 bzero(&dstateid, sizeof(dstateid));
0a7de745
A
5205 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
5206 return EINVAL;
5207 }
b0d623f7
A
5208
5209 np = *vpp ? VTONFS(*vpp) : NULL;
5210 if (create && vap) {
5211 exclusive = (vap->va_vaflags & VA_EXCLUSIVE);
5212 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
5213 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
5214 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
0a7de745 5215 if (exclusive && (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time))) {
6d2010ae 5216 vap->va_vaflags |= VA_UTIMES_NULL;
0a7de745 5217 }
b0d623f7
A
5218 } else {
5219 exclusive = gotuid = gotgid = 0;
5220 }
5221 if (nofp) {
5222 sid = &nofp->nof_stateid;
5223 } else {
5224 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
5225 sid = &stateid;
5226 }
5227
0a7de745
A
5228 if ((error = nfs_open_owner_set_busy(noop, thd))) {
5229 return error;
5230 }
f427ee49
A
5231
5232 fh = zalloc(nfs_fhandle_zone);
5233 req = zalloc(nfs_req_zone);
5234 MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK);
5235 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
5236
b0d623f7 5237again:
6d2010ae
A
5238 rflags = delegation = recall = 0;
5239 ace.ace_flags = 0;
5240 s = sbuf;
5241 slen = sizeof(sbuf);
f427ee49 5242 NVATTR_INIT(nvattr);
6d2010ae 5243 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, cnp->cn_nameptr, cnp->cn_namelen);
b0d623f7
A
5244
5245 nfsm_chain_null(&nmreq);
5246 nfsm_chain_null(&nmrep);
5247
5248 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
5249 numops = 6;
5250 nfsm_chain_build_alloc_init(error, &nmreq, 53 * NFSX_UNSIGNED + cnp->cn_namelen);
3e170ce0 5251 nfsm_chain_add_compound_header(error, &nmreq, create ? "create" : "open", nmp->nm_minor_vers, numops);
b0d623f7
A
5252 numops--;
5253 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5254 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
5255 numops--;
5256 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
5257 numops--;
5258 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5259 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5260 nfsm_chain_add_32(error, &nmreq, share_access);
5261 nfsm_chain_add_32(error, &nmreq, share_deny);
6d2010ae 5262 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
b0d623f7 5263 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
6d2010ae 5264 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
b0d623f7
A
5265 nfsm_chain_add_32(error, &nmreq, create);
5266 if (create) {
5267 if (exclusive) {
5268 static uint32_t create_verf; // XXX need a better verifier
5269 create_verf++;
5270 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE);
5271 /* insert 64 bit verifier */
5272 nfsm_chain_add_32(error, &nmreq, create_verf);
5273 nfsm_chain_add_32(error, &nmreq, create_verf);
5274 } else {
5275 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_UNCHECKED);
5276 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
5277 }
5278 }
b0d623f7 5279 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
6d2010ae 5280 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
b0d623f7
A
5281 numops--;
5282 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5283 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5284 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6d2010ae 5285 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
b0d623f7
A
5286 numops--;
5287 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
5288 numops--;
5289 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 5290 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
b0d623f7
A
5291 nfsm_chain_build_done(error, &nmreq);
5292 nfsm_assert(error, (numops == 0), EPROTO);
0a7de745 5293 if (!error) {
b0d623f7 5294 error = busyerror = nfs_node_set_busy(dnp, thd);
0a7de745 5295 }
b0d623f7
A
5296 nfsmout_if(error);
5297
0a7de745 5298 if (create && !namedattrs) {
f427ee49 5299 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
0a7de745 5300 }
b0d623f7 5301
6d2010ae 5302 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, NULL, &req);
b0d623f7 5303 if (!error) {
0a7de745 5304 if (create && !namedattrs) {
f427ee49 5305 nfs_dulookup_start(dul, dnp, ctx);
0a7de745 5306 }
b0d623f7
A
5307 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
5308 savedxid = xid;
5309 }
5310
0a7de745 5311 if (create && !namedattrs) {
f427ee49 5312 nfs_dulookup_finish(dul, dnp, ctx);
0a7de745 5313 }
b0d623f7 5314
0a7de745 5315 if ((lockerror = nfs_node_lock(dnp))) {
b0d623f7 5316 error = lockerror;
0a7de745 5317 }
b0d623f7
A
5318 nfsm_chain_skip_tag(error, &nmrep);
5319 nfsm_chain_get_32(error, &nmrep, numops);
5320 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5321 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
5322 nfsmout_if(error);
5323 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5324 nfs_owner_seqid_increment(noop, NULL, error);
5325 nfsm_chain_get_stateid(error, &nmrep, sid);
5326 nfsm_chain_check_change_info(error, &nmrep, dnp);
5327 nfsm_chain_get_32(error, &nmrep, rflags);
5328 bmlen = NFS_ATTR_BITMAP_LEN;
5329 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5330 nfsm_chain_get_32(error, &nmrep, delegation);
0a7de745 5331 if (!error) {
b0d623f7
A
5332 switch (delegation) {
5333 case NFS_OPEN_DELEGATE_NONE:
5334 break;
5335 case NFS_OPEN_DELEGATE_READ:
b0d623f7
A
5336 case NFS_OPEN_DELEGATE_WRITE:
5337 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5338 nfsm_chain_get_32(error, &nmrep, recall);
0a7de745 5339 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
6d2010ae 5340 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
0a7de745 5341 }
6d2010ae
A
5342 /* if we have any trouble accepting the ACE, just invalidate it */
5343 ace_type = ace_flags = ace_mask = len = 0;
5344 nfsm_chain_get_32(error, &nmrep, ace_type);
5345 nfsm_chain_get_32(error, &nmrep, ace_flags);
5346 nfsm_chain_get_32(error, &nmrep, ace_mask);
5347 nfsm_chain_get_32(error, &nmrep, len);
5348 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5349 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5350 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5351 if (!error && (len >= slen)) {
0a7de745
A
5352 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5353 if (s) {
5354 slen = len + 1;
5355 } else {
6d2010ae 5356 ace.ace_flags = 0;
0a7de745 5357 }
6d2010ae 5358 }
0a7de745 5359 if (s) {
6d2010ae 5360 nfsm_chain_get_opaque(error, &nmrep, len, s);
0a7de745 5361 } else {
6d2010ae 5362 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
0a7de745 5363 }
6d2010ae
A
5364 if (!error && s) {
5365 s[len] = '\0';
0a7de745 5366 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
6d2010ae 5367 ace.ace_flags = 0;
0a7de745 5368 }
6d2010ae 5369 }
0a7de745 5370 if (error || !s) {
6d2010ae 5371 ace.ace_flags = 0;
0a7de745
A
5372 }
5373 if (s && (s != sbuf)) {
6d2010ae 5374 FREE(s, M_TEMP);
0a7de745 5375 }
b0d623f7
A
5376 break;
5377 default:
5378 error = EBADRPC;
5379 break;
5380 }
0a7de745 5381 }
b0d623f7 5382 /* At this point if we have no error, the object was created/opened. */
b0d623f7
A
5383 open_error = error;
5384 nfsmout_if(error);
0a7de745 5385 if (create && vap && !exclusive) {
b0d623f7 5386 nfs_vattr_set_supported(bitmap, vap);
0a7de745 5387 }
b0d623f7
A
5388 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5389 nfsmout_if(error);
f427ee49 5390 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
b0d623f7 5391 nfsmout_if(error);
f427ee49 5392 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6d2010ae 5393 printf("nfs: open/create didn't return filehandle? %s\n", cnp->cn_nameptr);
2d21ac55
A
5394 error = EBADRPC;
5395 goto nfsmout;
5396 }
f427ee49 5397 if (!create && np && !NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
b0d623f7 5398 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
6d2010ae 5399 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
0a7de745 5400 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 5401 NP(np, "nfs4_open_rpc: warning: file handle mismatch");
0a7de745 5402 }
b0d623f7 5403 }
2d21ac55
A
5404 /* directory attributes: if we don't get them, make sure to invalidate */
5405 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
5406 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 5407 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
0a7de745 5408 if (error) {
2d21ac55 5409 NATTRINVALIDATE(dnp);
0a7de745 5410 }
b0d623f7
A
5411 nfsmout_if(error);
5412
0a7de745 5413 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
b0d623f7 5414 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 5415 }
b0d623f7
A
5416
5417 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
5418 nfs_node_unlock(dnp);
5419 lockerror = ENOENT;
f427ee49
A
5420 NVATTR_CLEANUP(nvattr);
5421 error = nfs4_open_confirm_rpc(nmp, dnp, fh->fh_data, fh->fh_len, noop, sid, thd, cred, nvattr, &xid);
b0d623f7
A
5422 nfsmout_if(error);
5423 savedxid = xid;
0a7de745 5424 if ((lockerror = nfs_node_lock(dnp))) {
b0d623f7 5425 error = lockerror;
0a7de745 5426 }
b0d623f7
A
5427 }
5428
5429nfsmout:
5430 nfsm_chain_cleanup(&nmreq);
5431 nfsm_chain_cleanup(&nmrep);
5432
5433 if (!lockerror && create) {
5434 if (!open_error && (dnp->n_flag & NNEGNCENTRIES)) {
5435 dnp->n_flag &= ~NNEGNCENTRIES;
5436 cache_purge_negatives(dvp);
5437 }
5438 dnp->n_flag |= NMODIFIED;
5439 nfs_node_unlock(dnp);
5440 lockerror = ENOENT;
6d2010ae 5441 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
b0d623f7 5442 }
0a7de745 5443 if (!lockerror) {
b0d623f7 5444 nfs_node_unlock(dnp);
0a7de745 5445 }
f427ee49 5446 if (!error && !np && fh->fh_len) {
b0d623f7
A
5447 /* create the vnode with the filehandle and attributes */
5448 xid = savedxid;
f427ee49 5449 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &newnp);
0a7de745 5450 if (!error) {
b0d623f7 5451 newvp = NFSTOV(newnp);
0a7de745 5452 }
b0d623f7 5453 }
f427ee49 5454 NVATTR_CLEANUP(nvattr);
0a7de745 5455 if (!busyerror) {
b0d623f7 5456 nfs_node_clear_busy(dnp);
0a7de745 5457 }
b0d623f7 5458 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
0a7de745 5459 if (!np) {
b0d623f7 5460 np = newnp;
0a7de745 5461 }
b0d623f7
A
5462 if (!error && np && !recall) {
5463 /* stuff the delegation state in the node */
5464 lck_mtx_lock(&np->n_openlock);
5465 np->n_openflags &= ~N_DELEG_MASK;
5466 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5467 np->n_dstateid = dstateid;
6d2010ae
A
5468 np->n_dace = ace;
5469 if (np->n_dlink.tqe_next == NFSNOLIST) {
5470 lck_mtx_lock(&nmp->nm_lock);
0a7de745 5471 if (np->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 5472 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
0a7de745 5473 }
6d2010ae
A
5474 lck_mtx_unlock(&nmp->nm_lock);
5475 }
b0d623f7 5476 lck_mtx_unlock(&np->n_openlock);
6d2010ae
A
5477 } else {
5478 /* give the delegation back */
b0d623f7 5479 if (np) {
f427ee49 5480 if (NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
6d2010ae
A
5481 /* update delegation state and return it */
5482 lck_mtx_lock(&np->n_openlock);
5483 np->n_openflags &= ~N_DELEG_MASK;
5484 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5485 np->n_dstateid = dstateid;
5486 np->n_dace = ace;
5487 if (np->n_dlink.tqe_next == NFSNOLIST) {
5488 lck_mtx_lock(&nmp->nm_lock);
0a7de745 5489 if (np->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 5490 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
0a7de745 5491 }
6d2010ae
A
5492 lck_mtx_unlock(&nmp->nm_lock);
5493 }
5494 lck_mtx_unlock(&np->n_openlock);
5495 /* don't need to send a separate delegreturn for fh */
f427ee49 5496 fh->fh_len = 0;
6d2010ae
A
5497 }
5498 /* return np's current delegation */
5499 nfs4_delegation_return(np, 0, thd, cred);
b0d623f7 5500 }
f427ee49
A
5501 if (fh->fh_len) { /* return fh's delegation if it wasn't for np */
5502 nfs4_delegreturn_rpc(nmp, fh->fh_data, fh->fh_len, &dstateid, 0, thd, cred);
0a7de745 5503 }
b0d623f7
A
5504 }
5505 }
5506 if (error) {
5507 if (exclusive && (error == NFSERR_NOTSUPP)) {
5508 exclusive = 0;
5509 goto again;
5510 }
5511 if (newvp) {
5512 nfs_node_unlock(newnp);
5513 vnode_put(newvp);
5514 }
5515 } else if (create) {
5516 nfs_node_unlock(newnp);
5517 if (exclusive) {
5518 error = nfs4_setattr_rpc(newnp, vap, ctx);
5519 if (error && (gotuid || gotgid)) {
5520 /* it's possible the server didn't like our attempt to set IDs. */
5521 /* so, let's try it again without those */
5522 VATTR_CLEAR_ACTIVE(vap, va_uid);
5523 VATTR_CLEAR_ACTIVE(vap, va_gid);
5524 error = nfs4_setattr_rpc(newnp, vap, ctx);
5525 }
5526 }
0a7de745 5527 if (error) {
b0d623f7 5528 vnode_put(newvp);
0a7de745 5529 } else {
b0d623f7 5530 *vpp = newvp;
0a7de745 5531 }
b0d623f7
A
5532 }
5533 nfs_open_owner_clear_busy(noop);
f427ee49
A
5534 NFS_ZFREE(nfs_fhandle_zone, fh);
5535 NFS_ZFREE(nfs_req_zone, req);
5536 FREE(dul, M_TEMP);
5537 FREE(nvattr, M_TEMP);
0a7de745 5538 return error;
b0d623f7
A
5539}
5540
6d2010ae
A
5541
5542/*
5543 * Send an OPEN RPC to claim a delegated open for a file
5544 */
5545int
5546nfs4_claim_delegated_open_rpc(
5547 struct nfs_open_file *nofp,
5548 int share_access,
5549 int share_deny,
5550 int flags)
5551{
5552 struct nfsmount *nmp;
5553 struct nfs_open_owner *noop = nofp->nof_owner;
f427ee49 5554 struct nfs_vattr *nvattr;
6d2010ae
A
5555 int error = 0, lockerror = ENOENT, status;
5556 int nfsvers, numops;
5557 u_int64_t xid;
5558 nfsnode_t np = nofp->nof_np;
5559 struct nfsm_chain nmreq, nmrep;
5560 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5561 uint32_t rflags = 0, delegation, recall = 0;
f427ee49 5562 fhandle_t *fh;
6d2010ae
A
5563 struct nfs_stateid dstateid;
5564 char sbuf[64], *s = sbuf;
5565 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5566 struct kauth_ace ace;
5567 vnode_t dvp = NULL;
5568 const char *vname = NULL;
5569 const char *name = NULL;
f427ee49 5570 uint32_t namelen;
6d2010ae
A
5571 char smallname[128];
5572 char *filename = NULL;
5573 struct nfsreq_secinfo_args si;
5574
5575 nmp = NFSTONMP(np);
0a7de745
A
5576 if (nfs_mount_gone(nmp)) {
5577 return ENXIO;
5578 }
f427ee49
A
5579 fh = zalloc(nfs_fhandle_zone);
5580 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
6d2010ae
A
5581 nfsvers = nmp->nm_vers;
5582
5583 nfs_node_lock_force(np);
5584 if ((vnode_vtype(NFSTOV(np)) != VDIR) && np->n_sillyrename) {
5585 /*
5586 * The node's been sillyrenamed, so we need to use
5587 * the sillyrename directory/name to do the open.
5588 */
5589 struct nfs_sillyrename *nsp = np->n_sillyrename;
5590 dvp = NFSTOV(nsp->nsr_dnp);
5591 if ((error = vnode_get(dvp))) {
cb323159 5592 dvp = NULLVP;
6d2010ae
A
5593 nfs_node_unlock(np);
5594 goto out;
5595 }
5596 name = nsp->nsr_name;
5597 } else {
5598 /*
5599 * [sigh] We can't trust VFS to get the parent right for named
5600 * attribute nodes. (It likes to reparent the nodes after we've
5601 * created them.) Luckily we can probably get the right parent
5602 * from the n_parent we have stashed away.
5603 */
5604 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
0a7de745 5605 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
6d2010ae 5606 dvp = NULL;
0a7de745
A
5607 }
5608 if (!dvp) {
6d2010ae 5609 dvp = vnode_getparent(NFSTOV(np));
0a7de745 5610 }
6d2010ae
A
5611 vname = vnode_getname(NFSTOV(np));
5612 if (!dvp || !vname) {
0a7de745 5613 if (!error) {
6d2010ae 5614 error = EIO;
0a7de745 5615 }
6d2010ae
A
5616 nfs_node_unlock(np);
5617 goto out;
5618 }
5619 name = vname;
5620 }
5621 filename = &smallname[0];
5622 namelen = snprintf(filename, sizeof(smallname), "%s", name);
5623 if (namelen >= sizeof(smallname)) {
0a7de745 5624 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
6d2010ae
A
5625 if (!filename) {
5626 error = ENOMEM;
3e170ce0 5627 nfs_node_unlock(np);
6d2010ae
A
5628 goto out;
5629 }
0a7de745 5630 snprintf(filename, namelen + 1, "%s", name);
6d2010ae
A
5631 }
5632 nfs_node_unlock(np);
5633
0a7de745 5634 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
3e170ce0 5635 goto out;
0a7de745 5636 }
f427ee49 5637 NVATTR_INIT(nvattr);
6d2010ae
A
5638 delegation = NFS_OPEN_DELEGATE_NONE;
5639 dstateid = np->n_dstateid;
5640 NFSREQ_SECINFO_SET(&si, VTONFS(dvp), NULL, 0, filename, namelen);
5641
5642 nfsm_chain_null(&nmreq);
5643 nfsm_chain_null(&nmrep);
5644
5645 // PUTFH, OPEN, GETATTR(FH)
5646 numops = 3;
5647 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
3e170ce0 5648 nfsm_chain_add_compound_header(error, &nmreq, "open_claim_d", nmp->nm_minor_vers, numops);
6d2010ae
A
5649 numops--;
5650 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5651 nfsm_chain_add_fh(error, &nmreq, nfsvers, VTONFS(dvp)->n_fhp, VTONFS(dvp)->n_fhsize);
5652 numops--;
5653 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5654 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5655 nfsm_chain_add_32(error, &nmreq, share_access);
5656 nfsm_chain_add_32(error, &nmreq, share_deny);
5657 // open owner: clientid + uid
5658 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5659 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5660 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5661 // openflag4
5662 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5663 // open_claim4
5664 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_DELEGATE_CUR);
5665 nfsm_chain_add_stateid(error, &nmreq, &np->n_dstateid);
5666 nfsm_chain_add_name(error, &nmreq, filename, namelen, nmp);
5667 numops--;
5668 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5669 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5670 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5671 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5672 nfsm_chain_build_done(error, &nmreq);
5673 nfsm_assert(error, (numops == 0), EPROTO);
5674 nfsmout_if(error);
5675
5676 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
0a7de745 5677 noop->noo_cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
6d2010ae 5678
0a7de745 5679 if ((lockerror = nfs_node_lock(np))) {
6d2010ae 5680 error = lockerror;
0a7de745 5681 }
6d2010ae
A
5682 nfsm_chain_skip_tag(error, &nmrep);
5683 nfsm_chain_get_32(error, &nmrep, numops);
5684 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5685 nfsmout_if(error);
5686 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5687 nfs_owner_seqid_increment(noop, NULL, error);
5688 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5689 nfsm_chain_check_change_info(error, &nmrep, np);
5690 nfsm_chain_get_32(error, &nmrep, rflags);
5691 bmlen = NFS_ATTR_BITMAP_LEN;
5692 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5693 nfsm_chain_get_32(error, &nmrep, delegation);
0a7de745 5694 if (!error) {
6d2010ae
A
5695 switch (delegation) {
5696 case NFS_OPEN_DELEGATE_NONE:
5697 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
0a7de745 5698 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
6d2010ae
A
5699 break;
5700 case NFS_OPEN_DELEGATE_READ:
5701 case NFS_OPEN_DELEGATE_WRITE:
5702 if ((((np->n_openflags & N_DELEG_MASK) == N_DELEG_READ) &&
0a7de745 5703 (delegation == NFS_OPEN_DELEGATE_WRITE)) ||
6d2010ae 5704 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) &&
0a7de745 5705 (delegation == NFS_OPEN_DELEGATE_READ))) {
6d2010ae 5706 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
0a7de745
A
5707 ((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ? "W" : "R",
5708 (delegation == NFS_OPEN_DELEGATE_WRITE) ? "W" : "R", filename ? filename : "???");
5709 }
6d2010ae
A
5710 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5711 nfsm_chain_get_32(error, &nmrep, recall);
0a7de745 5712 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
6d2010ae 5713 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
0a7de745 5714 }
6d2010ae
A
5715 /* if we have any trouble accepting the ACE, just invalidate it */
5716 ace_type = ace_flags = ace_mask = len = 0;
5717 nfsm_chain_get_32(error, &nmrep, ace_type);
5718 nfsm_chain_get_32(error, &nmrep, ace_flags);
5719 nfsm_chain_get_32(error, &nmrep, ace_mask);
5720 nfsm_chain_get_32(error, &nmrep, len);
5721 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5722 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5723 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5724 if (!error && (len >= slen)) {
0a7de745
A
5725 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5726 if (s) {
5727 slen = len + 1;
5728 } else {
6d2010ae 5729 ace.ace_flags = 0;
0a7de745 5730 }
6d2010ae 5731 }
0a7de745 5732 if (s) {
6d2010ae 5733 nfsm_chain_get_opaque(error, &nmrep, len, s);
0a7de745 5734 } else {
6d2010ae 5735 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
0a7de745 5736 }
6d2010ae
A
5737 if (!error && s) {
5738 s[len] = '\0';
0a7de745 5739 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
6d2010ae 5740 ace.ace_flags = 0;
0a7de745 5741 }
6d2010ae 5742 }
0a7de745 5743 if (error || !s) {
6d2010ae 5744 ace.ace_flags = 0;
0a7de745
A
5745 }
5746 if (s && (s != sbuf)) {
6d2010ae 5747 FREE(s, M_TEMP);
0a7de745 5748 }
6d2010ae
A
5749 if (!error) {
5750 /* stuff the latest delegation state in the node */
5751 lck_mtx_lock(&np->n_openlock);
5752 np->n_openflags &= ~N_DELEG_MASK;
5753 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5754 np->n_dstateid = dstateid;
5755 np->n_dace = ace;
5756 if (np->n_dlink.tqe_next == NFSNOLIST) {
5757 lck_mtx_lock(&nmp->nm_lock);
0a7de745 5758 if (np->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 5759 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
0a7de745 5760 }
6d2010ae
A
5761 lck_mtx_unlock(&nmp->nm_lock);
5762 }
5763 lck_mtx_unlock(&np->n_openlock);
5764 }
5765 break;
5766 default:
5767 error = EBADRPC;
5768 break;
5769 }
0a7de745 5770 }
6d2010ae
A
5771 nfsmout_if(error);
5772 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
f427ee49 5773 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
6d2010ae 5774 nfsmout_if(error);
f427ee49 5775 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6d2010ae
A
5776 printf("nfs: open reclaim didn't return filehandle? %s\n", filename ? filename : "???");
5777 error = EBADRPC;
5778 goto nfsmout;
5779 }
f427ee49 5780 if (!NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
6d2010ae
A
5781 // XXX what if fh doesn't match the vnode we think we're re-opening?
5782 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
0a7de745 5783 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 5784 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename ? filename : "???");
0a7de745 5785 }
6d2010ae 5786 }
f427ee49 5787 error = nfs_loadattrcache(np, nvattr, &xid, 1);
6d2010ae 5788 nfsmout_if(error);
0a7de745 5789 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
6d2010ae 5790 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 5791 }
6d2010ae 5792nfsmout:
f427ee49
A
5793 NVATTR_CLEANUP(nvattr);
5794 FREE(nvattr, M_TEMP);
5795 NFS_ZFREE(nfs_fhandle_zone, fh);
6d2010ae
A
5796 nfsm_chain_cleanup(&nmreq);
5797 nfsm_chain_cleanup(&nmrep);
0a7de745 5798 if (!lockerror) {
6d2010ae 5799 nfs_node_unlock(np);
0a7de745 5800 }
6d2010ae
A
5801 nfs_open_owner_clear_busy(noop);
5802 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5803 if (recall) {
5804 /*
5805 * We're making a delegated claim.
5806 * Don't return the delegation here in case we have more to claim.
5807 * Just make sure it's queued up to be returned.
5808 */
5809 nfs4_delegation_return_enqueue(np);
5810 }
5811 }
5812out:
5813 // if (!error)
0a7de745
A
5814 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5815 if (filename && (filename != &smallname[0])) {
6d2010ae 5816 FREE(filename, M_TEMP);
0a7de745
A
5817 }
5818 if (vname) {
6d2010ae 5819 vnode_putname(vname);
0a7de745
A
5820 }
5821 if (dvp != NULLVP) {
6d2010ae 5822 vnode_put(dvp);
0a7de745
A
5823 }
5824 return error;
6d2010ae
A
5825}
5826
b0d623f7
A
5827/*
5828 * Send an OPEN RPC to reclaim an open file.
5829 */
5830int
5831nfs4_open_reclaim_rpc(
5832 struct nfs_open_file *nofp,
5833 int share_access,
5834 int share_deny)
5835{
5836 struct nfsmount *nmp;
5837 struct nfs_open_owner *noop = nofp->nof_owner;
f427ee49 5838 struct nfs_vattr *nvattr;
b0d623f7
A
5839 int error = 0, lockerror = ENOENT, status;
5840 int nfsvers, numops;
5841 u_int64_t xid;
5842 nfsnode_t np = nofp->nof_np;
5843 struct nfsm_chain nmreq, nmrep;
5844 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6d2010ae 5845 uint32_t rflags = 0, delegation, recall = 0;
f427ee49 5846 fhandle_t *fh;
b0d623f7 5847 struct nfs_stateid dstateid;
6d2010ae
A
5848 char sbuf[64], *s = sbuf;
5849 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5850 struct kauth_ace ace;
5851 struct nfsreq_secinfo_args si;
b0d623f7
A
5852
5853 nmp = NFSTONMP(np);
0a7de745
A
5854 if (nfs_mount_gone(nmp)) {
5855 return ENXIO;
5856 }
b0d623f7
A
5857 nfsvers = nmp->nm_vers;
5858
0a7de745
A
5859 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5860 return error;
5861 }
b0d623f7 5862
f427ee49
A
5863 fh = zalloc(nfs_fhandle_zone);
5864 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
5865 NVATTR_INIT(nvattr);
b0d623f7 5866 delegation = NFS_OPEN_DELEGATE_NONE;
6d2010ae
A
5867 dstateid = np->n_dstateid;
5868 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
5869
5870 nfsm_chain_null(&nmreq);
5871 nfsm_chain_null(&nmrep);
5872
5873 // PUTFH, OPEN, GETATTR(FH)
5874 numops = 3;
5875 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
3e170ce0 5876 nfsm_chain_add_compound_header(error, &nmreq, "open_reclaim", nmp->nm_minor_vers, numops);
b0d623f7
A
5877 numops--;
5878 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5879 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5880 numops--;
5881 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5882 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5883 nfsm_chain_add_32(error, &nmreq, share_access);
5884 nfsm_chain_add_32(error, &nmreq, share_deny);
5885 // open owner: clientid + uid
5886 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5887 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5888 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5889 // openflag4
5890 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5891 // open_claim4
5892 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_PREVIOUS);
5893 delegation = (np->n_openflags & N_DELEG_READ) ? NFS_OPEN_DELEGATE_READ :
0a7de745
A
5894 (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE :
5895 NFS_OPEN_DELEGATE_NONE;
b0d623f7
A
5896 nfsm_chain_add_32(error, &nmreq, delegation);
5897 delegation = NFS_OPEN_DELEGATE_NONE;
5898 numops--;
5899 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5900 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5901 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6d2010ae 5902 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
b0d623f7
A
5903 nfsm_chain_build_done(error, &nmreq);
5904 nfsm_assert(error, (numops == 0), EPROTO);
5905 nfsmout_if(error);
5906
6d2010ae 5907 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
0a7de745 5908 noop->noo_cred, &si, R_RECOVER | R_NOINTR, &nmrep, &xid, &status);
b0d623f7 5909
0a7de745 5910 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 5911 error = lockerror;
0a7de745 5912 }
b0d623f7
A
5913 nfsm_chain_skip_tag(error, &nmrep);
5914 nfsm_chain_get_32(error, &nmrep, numops);
5915 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5916 nfsmout_if(error);
5917 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5918 nfs_owner_seqid_increment(noop, NULL, error);
5919 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5920 nfsm_chain_check_change_info(error, &nmrep, np);
5921 nfsm_chain_get_32(error, &nmrep, rflags);
5922 bmlen = NFS_ATTR_BITMAP_LEN;
5923 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5924 nfsm_chain_get_32(error, &nmrep, delegation);
0a7de745 5925 if (!error) {
b0d623f7
A
5926 switch (delegation) {
5927 case NFS_OPEN_DELEGATE_NONE:
6d2010ae
A
5928 if (np->n_openflags & N_DELEG_MASK) {
5929 /*
5930 * Hey! We were supposed to get our delegation back even
5931 * if it was getting immediately recalled. Bad server!
5932 *
5933 * Just try to return the existing delegation.
5934 */
5935 // NP(np, "nfs: open reclaim didn't return delegation?");
5936 delegation = (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE : NFS_OPEN_DELEGATE_READ;
5937 recall = 1;
b0d623f7
A
5938 }
5939 break;
6d2010ae 5940 case NFS_OPEN_DELEGATE_READ:
b0d623f7
A
5941 case NFS_OPEN_DELEGATE_WRITE:
5942 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5943 nfsm_chain_get_32(error, &nmrep, recall);
0a7de745 5944 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
6d2010ae 5945 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
0a7de745 5946 }
6d2010ae
A
5947 /* if we have any trouble accepting the ACE, just invalidate it */
5948 ace_type = ace_flags = ace_mask = len = 0;
5949 nfsm_chain_get_32(error, &nmrep, ace_type);
5950 nfsm_chain_get_32(error, &nmrep, ace_flags);
5951 nfsm_chain_get_32(error, &nmrep, ace_mask);
5952 nfsm_chain_get_32(error, &nmrep, len);
5953 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5954 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5955 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5956 if (!error && (len >= slen)) {
0a7de745
A
5957 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5958 if (s) {
5959 slen = len + 1;
5960 } else {
6d2010ae 5961 ace.ace_flags = 0;
0a7de745 5962 }
6d2010ae 5963 }
0a7de745 5964 if (s) {
6d2010ae 5965 nfsm_chain_get_opaque(error, &nmrep, len, s);
0a7de745 5966 } else {
6d2010ae 5967 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
0a7de745 5968 }
6d2010ae
A
5969 if (!error && s) {
5970 s[len] = '\0';
0a7de745 5971 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
6d2010ae 5972 ace.ace_flags = 0;
0a7de745 5973 }
6d2010ae 5974 }
0a7de745 5975 if (error || !s) {
6d2010ae 5976 ace.ace_flags = 0;
0a7de745
A
5977 }
5978 if (s && (s != sbuf)) {
6d2010ae 5979 FREE(s, M_TEMP);
0a7de745 5980 }
b0d623f7
A
5981 if (!error) {
5982 /* stuff the delegation state in the node */
5983 lck_mtx_lock(&np->n_openlock);
5984 np->n_openflags &= ~N_DELEG_MASK;
6d2010ae 5985 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
b0d623f7 5986 np->n_dstateid = dstateid;
6d2010ae
A
5987 np->n_dace = ace;
5988 if (np->n_dlink.tqe_next == NFSNOLIST) {
5989 lck_mtx_lock(&nmp->nm_lock);
0a7de745 5990 if (np->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 5991 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
0a7de745 5992 }
6d2010ae
A
5993 lck_mtx_unlock(&nmp->nm_lock);
5994 }
b0d623f7
A
5995 lck_mtx_unlock(&np->n_openlock);
5996 }
5997 break;
5998 default:
5999 error = EBADRPC;
6000 break;
6001 }
0a7de745 6002 }
b0d623f7
A
6003 nfsmout_if(error);
6004 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
f427ee49 6005 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
b0d623f7 6006 nfsmout_if(error);
f427ee49 6007 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6d2010ae 6008 NP(np, "nfs: open reclaim didn't return filehandle?");
b0d623f7
A
6009 error = EBADRPC;
6010 goto nfsmout;
6011 }
f427ee49 6012 if (!NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
b0d623f7 6013 // XXX what if fh doesn't match the vnode we think we're re-opening?
6d2010ae
A
6014 // That should be pretty hard in this case, given that we are doing
6015 // the open reclaim using the file handle (and not a dir/name pair).
6016 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
0a7de745 6017 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 6018 NP(np, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
0a7de745 6019 }
b0d623f7 6020 }
f427ee49 6021 error = nfs_loadattrcache(np, nvattr, &xid, 1);
b0d623f7 6022 nfsmout_if(error);
0a7de745 6023 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
b0d623f7 6024 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 6025 }
b0d623f7 6026nfsmout:
6d2010ae 6027 // if (!error)
0a7de745 6028 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
f427ee49
A
6029 NVATTR_CLEANUP(nvattr);
6030 FREE(nvattr, M_TEMP);
6031 NFS_ZFREE(nfs_fhandle_zone, fh);
b0d623f7
A
6032 nfsm_chain_cleanup(&nmreq);
6033 nfsm_chain_cleanup(&nmrep);
0a7de745 6034 if (!lockerror) {
b0d623f7 6035 nfs_node_unlock(np);
0a7de745 6036 }
b0d623f7
A
6037 nfs_open_owner_clear_busy(noop);
6038 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
0a7de745 6039 if (recall) {
6d2010ae 6040 nfs4_delegation_return_enqueue(np);
0a7de745 6041 }
b0d623f7 6042 }
0a7de745 6043 return error;
b0d623f7 6044}
2d21ac55 6045
b0d623f7
A
6046int
6047nfs4_open_downgrade_rpc(
6048 nfsnode_t np,
6049 struct nfs_open_file *nofp,
6050 vfs_context_t ctx)
6051{
6052 struct nfs_open_owner *noop = nofp->nof_owner;
6053 struct nfsmount *nmp;
6054 int error, lockerror = ENOENT, status, nfsvers, numops;
6055 struct nfsm_chain nmreq, nmrep;
6056 u_int64_t xid;
6d2010ae 6057 struct nfsreq_secinfo_args si;
2d21ac55 6058
b0d623f7 6059 nmp = NFSTONMP(np);
0a7de745
A
6060 if (nfs_mount_gone(nmp)) {
6061 return ENXIO;
6062 }
b0d623f7
A
6063 nfsvers = nmp->nm_vers;
6064
0a7de745
A
6065 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
6066 return error;
6067 }
b0d623f7 6068
6d2010ae 6069 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
6070 nfsm_chain_null(&nmreq);
6071 nfsm_chain_null(&nmrep);
6072
6073 // PUTFH, OPEN_DOWNGRADE, GETATTR
6074 numops = 3;
6075 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
3e170ce0 6076 nfsm_chain_add_compound_header(error, &nmreq, "open_downgrd", nmp->nm_minor_vers, numops);
b0d623f7
A
6077 numops--;
6078 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6079 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
6080 numops--;
6081 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_DOWNGRADE);
6082 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
6083 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
6084 nfsm_chain_add_32(error, &nmreq, nofp->nof_access);
6085 nfsm_chain_add_32(error, &nmreq, nofp->nof_deny);
6086 numops--;
6087 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 6088 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
6089 nfsm_chain_build_done(error, &nmreq);
6090 nfsm_assert(error, (numops == 0), EPROTO);
6091 nfsmout_if(error);
6d2010ae 6092 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745
A
6093 vfs_context_thread(ctx), vfs_context_ucred(ctx),
6094 &si, R_NOINTR, &nmrep, &xid, &status);
b0d623f7 6095
0a7de745 6096 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 6097 error = lockerror;
0a7de745 6098 }
b0d623f7
A
6099 nfsm_chain_skip_tag(error, &nmrep);
6100 nfsm_chain_get_32(error, &nmrep, numops);
6101 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
2d21ac55 6102 nfsmout_if(error);
b0d623f7
A
6103 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_DOWNGRADE);
6104 nfs_owner_seqid_increment(noop, NULL, error);
6105 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6106 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 6107 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
b0d623f7 6108nfsmout:
0a7de745 6109 if (!lockerror) {
b0d623f7 6110 nfs_node_unlock(np);
0a7de745 6111 }
b0d623f7 6112 nfs_open_owner_clear_busy(noop);
2d21ac55
A
6113 nfsm_chain_cleanup(&nmreq);
6114 nfsm_chain_cleanup(&nmrep);
0a7de745 6115 return error;
b0d623f7 6116}
2d21ac55 6117
b0d623f7
A
6118int
6119nfs4_close_rpc(
6120 nfsnode_t np,
6121 struct nfs_open_file *nofp,
6122 thread_t thd,
6123 kauth_cred_t cred,
6d2010ae 6124 int flags)
b0d623f7
A
6125{
6126 struct nfs_open_owner *noop = nofp->nof_owner;
6127 struct nfsmount *nmp;
6128 int error, lockerror = ENOENT, status, nfsvers, numops;
6129 struct nfsm_chain nmreq, nmrep;
6130 u_int64_t xid;
6d2010ae 6131 struct nfsreq_secinfo_args si;
b0d623f7
A
6132
6133 nmp = NFSTONMP(np);
0a7de745
A
6134 if (nfs_mount_gone(nmp)) {
6135 return ENXIO;
6136 }
b0d623f7
A
6137 nfsvers = nmp->nm_vers;
6138
0a7de745
A
6139 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
6140 return error;
6141 }
b0d623f7 6142
6d2010ae 6143 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
6144 nfsm_chain_null(&nmreq);
6145 nfsm_chain_null(&nmrep);
6146
6d2010ae 6147 // PUTFH, CLOSE, GETATTR
b0d623f7
A
6148 numops = 3;
6149 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
3e170ce0 6150 nfsm_chain_add_compound_header(error, &nmreq, "close", nmp->nm_minor_vers, numops);
2d21ac55
A
6151 numops--;
6152 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
b0d623f7 6153 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
2d21ac55
A
6154 numops--;
6155 nfsm_chain_add_32(error, &nmreq, NFS_OP_CLOSE);
b0d623f7
A
6156 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
6157 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
6158 numops--;
6159 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 6160 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
6161 nfsm_chain_build_done(error, &nmreq);
6162 nfsm_assert(error, (numops == 0), EPROTO);
6163 nfsmout_if(error);
0a7de745 6164 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
2d21ac55 6165
0a7de745 6166 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 6167 error = lockerror;
0a7de745 6168 }
2d21ac55
A
6169 nfsm_chain_skip_tag(error, &nmrep);
6170 nfsm_chain_get_32(error, &nmrep, numops);
6171 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
b0d623f7 6172 nfsmout_if(error);
2d21ac55 6173 nfsm_chain_op_check(error, &nmrep, NFS_OP_CLOSE);
b0d623f7
A
6174 nfs_owner_seqid_increment(noop, NULL, error);
6175 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6176 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 6177 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
b0d623f7 6178nfsmout:
0a7de745 6179 if (!lockerror) {
b0d623f7 6180 nfs_node_unlock(np);
0a7de745 6181 }
b0d623f7
A
6182 nfs_open_owner_clear_busy(noop);
6183 nfsm_chain_cleanup(&nmreq);
6184 nfsm_chain_cleanup(&nmrep);
0a7de745 6185 return error;
b0d623f7
A
6186}
6187
6188
b0d623f7 6189/*
6d2010ae 6190 * Claim the delegated open combinations this open file holds.
b0d623f7
A
6191 */
6192int
6d2010ae 6193nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags)
b0d623f7 6194{
6d2010ae
A
6195 struct nfs_open_owner *noop = nofp->nof_owner;
6196 struct nfs_lock_owner *nlop;
6197 struct nfs_file_lock *nflp, *nextnflp;
b0d623f7 6198 struct nfsmount *nmp;
6d2010ae 6199 int error = 0, reopen = 0;
b0d623f7 6200
6d2010ae
A
6201 if (nofp->nof_d_rw_drw) {
6202 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_BOTH, flags);
6203 if (!error) {
6204 lck_mtx_lock(&nofp->nof_lock);
6205 nofp->nof_rw_drw += nofp->nof_d_rw_drw;
6206 nofp->nof_d_rw_drw = 0;
6207 lck_mtx_unlock(&nofp->nof_lock);
6208 }
b0d623f7 6209 }
6d2010ae
A
6210 if (!error && nofp->nof_d_w_drw) {
6211 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_BOTH, flags);
6212 if (!error) {
6213 lck_mtx_lock(&nofp->nof_lock);
6214 nofp->nof_w_drw += nofp->nof_d_w_drw;
6215 nofp->nof_d_w_drw = 0;
6216 lck_mtx_unlock(&nofp->nof_lock);
6217 }
b0d623f7 6218 }
6d2010ae
A
6219 if (!error && nofp->nof_d_r_drw) {
6220 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_BOTH, flags);
6221 if (!error) {
6222 lck_mtx_lock(&nofp->nof_lock);
6223 nofp->nof_r_drw += nofp->nof_d_r_drw;
6224 nofp->nof_d_r_drw = 0;
6225 lck_mtx_unlock(&nofp->nof_lock);
6226 }
6227 }
6228 if (!error && nofp->nof_d_rw_dw) {
6229 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_WRITE, flags);
6230 if (!error) {
6231 lck_mtx_lock(&nofp->nof_lock);
6232 nofp->nof_rw_dw += nofp->nof_d_rw_dw;
6233 nofp->nof_d_rw_dw = 0;
6234 lck_mtx_unlock(&nofp->nof_lock);
6235 }
6236 }
6237 if (!error && nofp->nof_d_w_dw) {
6238 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_WRITE, flags);
6239 if (!error) {
6240 lck_mtx_lock(&nofp->nof_lock);
6241 nofp->nof_w_dw += nofp->nof_d_w_dw;
6242 nofp->nof_d_w_dw = 0;
6243 lck_mtx_unlock(&nofp->nof_lock);
6244 }
6245 }
6246 if (!error && nofp->nof_d_r_dw) {
6247 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_WRITE, flags);
6248 if (!error) {
6249 lck_mtx_lock(&nofp->nof_lock);
6250 nofp->nof_r_dw += nofp->nof_d_r_dw;
6251 nofp->nof_d_r_dw = 0;
6252 lck_mtx_unlock(&nofp->nof_lock);
6253 }
6254 }
6255 /* non-deny-mode opens may be reopened if no locks are held */
6256 if (!error && nofp->nof_d_rw) {
6257 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, flags);
6258 /* for some errors, we should just try reopening the file */
0a7de745 6259 if (nfs_mount_state_error_delegation_lost(error)) {
6d2010ae 6260 reopen = error;
0a7de745 6261 }
6d2010ae
A
6262 if (!error || reopen) {
6263 lck_mtx_lock(&nofp->nof_lock);
6264 nofp->nof_rw += nofp->nof_d_rw;
6265 nofp->nof_d_rw = 0;
6266 lck_mtx_unlock(&nofp->nof_lock);
6267 }
6268 }
6269 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
6270 if ((!error || reopen) && nofp->nof_d_w) {
6271 if (!error) {
6272 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, flags);
6273 /* for some errors, we should just try reopening the file */
0a7de745 6274 if (nfs_mount_state_error_delegation_lost(error)) {
6d2010ae 6275 reopen = error;
0a7de745 6276 }
6d2010ae
A
6277 }
6278 if (!error || reopen) {
6279 lck_mtx_lock(&nofp->nof_lock);
6280 nofp->nof_w += nofp->nof_d_w;
6281 nofp->nof_d_w = 0;
6282 lck_mtx_unlock(&nofp->nof_lock);
6283 }
6284 }
6285 if ((!error || reopen) && nofp->nof_d_r) {
6286 if (!error) {
6287 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, flags);
6288 /* for some errors, we should just try reopening the file */
0a7de745 6289 if (nfs_mount_state_error_delegation_lost(error)) {
6d2010ae 6290 reopen = error;
0a7de745 6291 }
6d2010ae
A
6292 }
6293 if (!error || reopen) {
6294 lck_mtx_lock(&nofp->nof_lock);
6295 nofp->nof_r += nofp->nof_d_r;
6296 nofp->nof_d_r = 0;
6297 lck_mtx_unlock(&nofp->nof_lock);
6298 }
6299 }
6300
6301 if (reopen) {
6302 /*
6303 * Any problems with the delegation probably indicates that we
6304 * should review/return all of our current delegation state.
6305 */
6306 if ((nmp = NFSTONMP(nofp->nof_np))) {
6307 nfs4_delegation_return_enqueue(nofp->nof_np);
6308 lck_mtx_lock(&nmp->nm_lock);
6309 nfs_need_recover(nmp, NFSERR_EXPIRED);
6310 lck_mtx_unlock(&nmp->nm_lock);
6311 }
6312 if (reopen && (nfs_check_for_locks(noop, nofp) == 0)) {
6313 /* just reopen the file on next access */
6314 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
0a7de745 6315 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6d2010ae
A
6316 lck_mtx_lock(&nofp->nof_lock);
6317 nofp->nof_flags |= NFS_OPEN_FILE_REOPEN;
6318 lck_mtx_unlock(&nofp->nof_lock);
0a7de745 6319 return 0;
6d2010ae 6320 }
0a7de745 6321 if (reopen) {
6d2010ae 6322 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
0a7de745
A
6323 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6324 }
6d2010ae
A
6325 }
6326
6327 if (!error && ((nmp = NFSTONMP(nofp->nof_np)))) {
6328 /* claim delegated locks */
6329 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
0a7de745 6330 if (nlop->nlo_open_owner != noop) {
6d2010ae 6331 continue;
0a7de745 6332 }
6d2010ae
A
6333 TAILQ_FOREACH_SAFE(nflp, &nlop->nlo_locks, nfl_lolink, nextnflp) {
6334 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
0a7de745 6335 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6d2010ae 6336 continue;
0a7de745 6337 }
6d2010ae 6338 /* skip non-delegated locks */
0a7de745 6339 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6d2010ae 6340 continue;
0a7de745 6341 }
6d2010ae
A
6342 error = nmp->nm_funcs->nf_setlock_rpc(nofp->nof_np, nofp, nflp, 0, flags, current_thread(), noop->noo_cred);
6343 if (error) {
6344 NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
0a7de745 6345 nflp->nfl_start, nflp->nfl_end, error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6d2010ae
A
6346 break;
6347 }
6348 // else {
0a7de745
A
6349 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
6350 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6d2010ae
A
6351 // }
6352 }
0a7de745 6353 if (error) {
6d2010ae 6354 break;
0a7de745 6355 }
6d2010ae
A
6356 }
6357 }
6358
0a7de745
A
6359 if (!error) { /* all state claimed successfully! */
6360 return 0;
6361 }
6d2010ae
A
6362
6363 /* restart if it looks like a problem more than just losing the delegation */
6364 if (!nfs_mount_state_error_delegation_lost(error) &&
6365 ((error == ETIMEDOUT) || nfs_mount_state_error_should_restart(error))) {
6366 NP(nofp->nof_np, "nfs delegated lock claim error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 6367 if ((error == ETIMEDOUT) && ((nmp = NFSTONMP(nofp->nof_np)))) {
6d2010ae 6368 nfs_need_reconnect(nmp);
0a7de745
A
6369 }
6370 return error;
b0d623f7 6371 }
6d2010ae 6372
0a7de745 6373 /* delegated state lost (once held but now not claimable) */
6d2010ae
A
6374 NP(nofp->nof_np, "nfs delegated state claim error %d, state lost, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6375
6376 /*
6377 * Any problems with the delegation probably indicates that we
6378 * should review/return all of our current delegation state.
6379 */
6380 if ((nmp = NFSTONMP(nofp->nof_np))) {
6381 nfs4_delegation_return_enqueue(nofp->nof_np);
6382 lck_mtx_lock(&nmp->nm_lock);
6383 nfs_need_recover(nmp, NFSERR_EXPIRED);
6384 lck_mtx_unlock(&nmp->nm_lock);
6385 }
6386
6387 /* revoke all open file state */
6388 nfs_revoke_open_state_for_node(nofp->nof_np);
6389
0a7de745 6390 return error;
6d2010ae 6391}
cb323159 6392#endif /* CONFIG_NFS4*/
6d2010ae
A
6393
6394/*
6395 * Release all open state for the given node.
6396 */
6397void
6398nfs_release_open_state_for_node(nfsnode_t np, int force)
6399{
6400 struct nfsmount *nmp = NFSTONMP(np);
6401 struct nfs_open_file *nofp;
6402 struct nfs_file_lock *nflp, *nextnflp;
6403
6404 /* drop held locks */
6405 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
6406 /* skip dead & blocked lock requests */
0a7de745 6407 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6d2010ae 6408 continue;
0a7de745 6409 }
6d2010ae 6410 /* send an unlock if not a delegated lock */
0a7de745 6411 if (!force && nmp && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6d2010ae 6412 nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER,
0a7de745
A
6413 NULL, nflp->nfl_owner->nlo_open_owner->noo_cred);
6414 }
6d2010ae
A
6415 /* kill/remove the lock */
6416 lck_mtx_lock(&np->n_openlock);
6417 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
6418 lck_mtx_lock(&nflp->nfl_owner->nlo_lock);
6419 TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink);
6420 lck_mtx_unlock(&nflp->nfl_owner->nlo_lock);
6421 if (nflp->nfl_blockcnt) {
6422 /* wake up anyone blocked on this lock */
6423 wakeup(nflp);
6424 } else {
6425 /* remove nflp from lock list and destroy */
6426 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
6427 nfs_file_lock_destroy(nflp);
2d21ac55 6428 }
6d2010ae
A
6429 lck_mtx_unlock(&np->n_openlock);
6430 }
6431
6432 lck_mtx_lock(&np->n_openlock);
6433
6434 /* drop all opens */
6435 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
0a7de745 6436 if (nofp->nof_flags & NFS_OPEN_FILE_LOST) {
6d2010ae 6437 continue;
0a7de745 6438 }
6d2010ae
A
6439 /* mark open state as lost */
6440 lck_mtx_lock(&nofp->nof_lock);
6441 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
6442 nofp->nof_flags |= NFS_OPEN_FILE_LOST;
0a7de745 6443
6d2010ae 6444 lck_mtx_unlock(&nofp->nof_lock);
cb323159 6445#if CONFIG_NFS4
0a7de745 6446 if (!force && nmp && (nmp->nm_vers >= NFS_VER4)) {
6d2010ae 6447 nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER);
0a7de745 6448 }
cb323159 6449#endif
6d2010ae
A
6450 }
6451
6452 lck_mtx_unlock(&np->n_openlock);
6453}
6454
6455/*
6456 * State for a node has been lost, drop it, and revoke the node.
6457 * Attempt to return any state if possible in case the server
6458 * might somehow think we hold it.
6459 */
6460void
6461nfs_revoke_open_state_for_node(nfsnode_t np)
6462{
6463 struct nfsmount *nmp;
6464
6465 /* mark node as needing to be revoked */
6466 nfs_node_lock_force(np);
0a7de745 6467 if (np->n_flag & NREVOKE) { /* already revoked? */
6d2010ae
A
6468 NP(np, "nfs_revoke_open_state_for_node(): already revoked");
6469 nfs_node_unlock(np);
6470 return;
6471 }
6472 np->n_flag |= NREVOKE;
6473 nfs_node_unlock(np);
6474
6475 nfs_release_open_state_for_node(np, 0);
6476 NP(np, "nfs: state lost for %p 0x%x", np, np->n_flag);
6477
6478 /* mark mount as needing a revoke scan and have the socket thread do it. */
6479 if ((nmp = NFSTONMP(np))) {
6480 lck_mtx_lock(&nmp->nm_lock);
6481 nmp->nm_state |= NFSSTA_REVOKE;
6482 nfs_mount_sock_thread_wake(nmp);
6483 lck_mtx_unlock(&nmp->nm_lock);
6484 }
6485}
6486
cb323159 6487#if CONFIG_NFS4
6d2010ae
A
6488/*
6489 * Claim the delegated open combinations that each of this node's open files hold.
6490 */
6491int
6492nfs4_claim_delegated_state_for_node(nfsnode_t np, int flags)
6493{
6494 struct nfs_open_file *nofp;
6495 int error = 0;
6496
6497 lck_mtx_lock(&np->n_openlock);
6498
6499 /* walk the open file list looking for opens with delegated state to claim */
6500restart:
6501 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
6502 if (!nofp->nof_d_rw_drw && !nofp->nof_d_w_drw && !nofp->nof_d_r_drw &&
6503 !nofp->nof_d_rw_dw && !nofp->nof_d_w_dw && !nofp->nof_d_r_dw &&
0a7de745 6504 !nofp->nof_d_rw && !nofp->nof_d_w && !nofp->nof_d_r) {
6d2010ae 6505 continue;
0a7de745 6506 }
6d2010ae
A
6507 lck_mtx_unlock(&np->n_openlock);
6508 error = nfs4_claim_delegated_state_for_open_file(nofp, flags);
6509 lck_mtx_lock(&np->n_openlock);
0a7de745 6510 if (error) {
6d2010ae 6511 break;
0a7de745 6512 }
6d2010ae
A
6513 goto restart;
6514 }
6515
6516 lck_mtx_unlock(&np->n_openlock);
6517
0a7de745 6518 return error;
6d2010ae
A
6519}
6520
6521/*
6522 * Mark a node as needed to have its delegation returned.
6523 * Queue it up on the delegation return queue.
6524 * Make sure the thread is running.
6525 */
6526void
6527nfs4_delegation_return_enqueue(nfsnode_t np)
6528{
6529 struct nfsmount *nmp;
6530
6531 nmp = NFSTONMP(np);
0a7de745 6532 if (nfs_mount_gone(nmp)) {
6d2010ae 6533 return;
0a7de745 6534 }
6d2010ae
A
6535
6536 lck_mtx_lock(&np->n_openlock);
6537 np->n_openflags |= N_DELEG_RETURN;
6538 lck_mtx_unlock(&np->n_openlock);
6539
6540 lck_mtx_lock(&nmp->nm_lock);
0a7de745 6541 if (np->n_dreturn.tqe_next == NFSNOLIST) {
6d2010ae 6542 TAILQ_INSERT_TAIL(&nmp->nm_dreturnq, np, n_dreturn);
0a7de745 6543 }
6d2010ae
A
6544 nfs_mount_sock_thread_wake(nmp);
6545 lck_mtx_unlock(&nmp->nm_lock);
6546}
6547
6548/*
6549 * return any delegation we may have for the given node
6550 */
6551int
6552nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred)
6553{
6554 struct nfsmount *nmp;
f427ee49 6555 fhandle_t *fh;
6d2010ae
A
6556 nfs_stateid dstateid;
6557 int error;
6558
6559 nmp = NFSTONMP(np);
0a7de745
A
6560 if (nfs_mount_gone(nmp)) {
6561 return ENXIO;
6562 }
6d2010ae 6563
f427ee49
A
6564 fh = zalloc(nfs_fhandle_zone);
6565
6d2010ae
A
6566 /* first, make sure the node's marked for delegation return */
6567 lck_mtx_lock(&np->n_openlock);
0a7de745 6568 np->n_openflags |= (N_DELEG_RETURN | N_DELEG_RETURNING);
6d2010ae
A
6569 lck_mtx_unlock(&np->n_openlock);
6570
6571 /* make sure nobody else is using the delegation state */
0a7de745 6572 if ((error = nfs_open_state_set_busy(np, NULL))) {
6d2010ae 6573 goto out;
0a7de745 6574 }
6d2010ae
A
6575
6576 /* claim any delegated state */
0a7de745 6577 if ((error = nfs4_claim_delegated_state_for_node(np, flags))) {
6d2010ae 6578 goto out;
0a7de745 6579 }
6d2010ae
A
6580
6581 /* return the delegation */
6582 lck_mtx_lock(&np->n_openlock);
6583 dstateid = np->n_dstateid;
f427ee49
A
6584 fh->fh_len = np->n_fhsize;
6585 bcopy(np->n_fhp, fh->fh_data, fh->fh_len);
6d2010ae 6586 lck_mtx_unlock(&np->n_openlock);
f427ee49 6587 error = nfs4_delegreturn_rpc(NFSTONMP(np), fh->fh_data, fh->fh_len, &dstateid, flags, thd, cred);
6d2010ae
A
6588 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
6589 if ((error != ETIMEDOUT) && (error != NFSERR_MOVED) && (error != NFSERR_LEASE_MOVED)) {
6590 lck_mtx_lock(&np->n_openlock);
6591 np->n_openflags &= ~N_DELEG_MASK;
6592 lck_mtx_lock(&nmp->nm_lock);
6593 if (np->n_dlink.tqe_next != NFSNOLIST) {
6594 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
6595 np->n_dlink.tqe_next = NFSNOLIST;
6596 }
6597 lck_mtx_unlock(&nmp->nm_lock);
6598 lck_mtx_unlock(&np->n_openlock);
6599 }
6600
6601out:
6602 /* make sure it's no longer on the return queue and clear the return flags */
6603 lck_mtx_lock(&nmp->nm_lock);
6604 if (np->n_dreturn.tqe_next != NFSNOLIST) {
6605 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
6606 np->n_dreturn.tqe_next = NFSNOLIST;
6607 }
6608 lck_mtx_unlock(&nmp->nm_lock);
6609 lck_mtx_lock(&np->n_openlock);
0a7de745 6610 np->n_openflags &= ~(N_DELEG_RETURN | N_DELEG_RETURNING);
6d2010ae
A
6611 lck_mtx_unlock(&np->n_openlock);
6612
6613 if (error) {
6614 NP(np, "nfs4_delegation_return, error %d", error);
0a7de745 6615 if (error == ETIMEDOUT) {
6d2010ae 6616 nfs_need_reconnect(nmp);
0a7de745 6617 }
6d2010ae
A
6618 if (nfs_mount_state_error_should_restart(error)) {
6619 /* make sure recovery happens */
6620 lck_mtx_lock(&nmp->nm_lock);
6621 nfs_need_recover(nmp, nfs_mount_state_error_delegation_lost(error) ? NFSERR_EXPIRED : 0);
6622 lck_mtx_unlock(&nmp->nm_lock);
2d21ac55
A
6623 }
6624 }
6d2010ae
A
6625
6626 nfs_open_state_clear_busy(np);
f427ee49 6627 NFS_ZFREE(nfs_fhandle_zone, fh);
0a7de745 6628 return error;
b0d623f7 6629}
2d21ac55 6630
b0d623f7 6631/*
6d2010ae
A
6632 * RPC to return a delegation for a file handle
6633 */
6634int
6635nfs4_delegreturn_rpc(struct nfsmount *nmp, u_char *fhp, int fhlen, struct nfs_stateid *sid, int flags, thread_t thd, kauth_cred_t cred)
6636{
6637 int error = 0, status, numops;
6638 uint64_t xid;
6639 struct nfsm_chain nmreq, nmrep;
6640 struct nfsreq_secinfo_args si;
6641
6642 NFSREQ_SECINFO_SET(&si, NULL, fhp, fhlen, NULL, 0);
6643 nfsm_chain_null(&nmreq);
6644 nfsm_chain_null(&nmrep);
6645
6646 // PUTFH, DELEGRETURN
6647 numops = 2;
6648 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
3e170ce0 6649 nfsm_chain_add_compound_header(error, &nmreq, "delegreturn", nmp->nm_minor_vers, numops);
6d2010ae
A
6650 numops--;
6651 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6652 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
6653 numops--;
6654 nfsm_chain_add_32(error, &nmreq, NFS_OP_DELEGRETURN);
6655 nfsm_chain_add_stateid(error, &nmreq, sid);
6656 nfsm_chain_build_done(error, &nmreq);
6657 nfsm_assert(error, (numops == 0), EPROTO);
6658 nfsmout_if(error);
6659 error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags, &nmrep, &xid, &status);
6660 nfsm_chain_skip_tag(error, &nmrep);
6661 nfsm_chain_get_32(error, &nmrep, numops);
6662 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6663 nfsm_chain_op_check(error, &nmrep, NFS_OP_DELEGRETURN);
6664nfsmout:
6665 nfsm_chain_cleanup(&nmreq);
6666 nfsm_chain_cleanup(&nmrep);
0a7de745 6667 return error;
6d2010ae 6668}
cb323159 6669#endif /* CONFIG_NFS4 */
6d2010ae
A
6670
6671/*
6672 * NFS read call.
6673 * Just call nfs_bioread() to do the work.
6674 *
6675 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6676 * without first calling VNOP_OPEN, so we make sure the file is open here.
6677 */
6678int
6679nfs_vnop_read(
6680 struct vnop_read_args /* {
0a7de745
A
6681 * struct vnodeop_desc *a_desc;
6682 * vnode_t a_vp;
6683 * struct uio *a_uio;
6684 * int a_ioflag;
6685 * vfs_context_t a_context;
6686 * } */*ap)
6d2010ae
A
6687{
6688 vnode_t vp = ap->a_vp;
6689 vfs_context_t ctx = ap->a_context;
6690 nfsnode_t np;
6691 struct nfsmount *nmp;
6692 struct nfs_open_owner *noop;
6693 struct nfs_open_file *nofp;
6694 int error;
6695
0a7de745 6696 if (vnode_vtype(ap->a_vp) != VREG) {
39236c6e 6697 return (vnode_vtype(vp) == VDIR) ? EISDIR : EPERM;
0a7de745 6698 }
6d2010ae
A
6699
6700 np = VTONFS(vp);
6701 nmp = NFSTONMP(np);
0a7de745
A
6702 if (nfs_mount_gone(nmp)) {
6703 return ENXIO;
6704 }
6705 if (np->n_flag & NREVOKE) {
6706 return EIO;
6707 }
6d2010ae
A
6708
6709 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
0a7de745
A
6710 if (!noop) {
6711 return ENOMEM;
6712 }
6d2010ae
A
6713restart:
6714 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
6715 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6716 NP(np, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop->noo_cred));
6717 error = EIO;
6718 }
cb323159 6719#if CONFIG_NFS4
6d2010ae
A
6720 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6721 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
6722 nofp = NULL;
0a7de745 6723 if (!error) {
6d2010ae 6724 goto restart;
0a7de745 6725 }
6d2010ae 6726 }
cb323159 6727#endif
6d2010ae
A
6728 if (error) {
6729 nfs_open_owner_rele(noop);
0a7de745 6730 return error;
6d2010ae 6731 }
3e170ce0
A
6732 /*
6733 * Since the read path is a hot path, if we already have
6734 * read access, lets go and try and do the read, without
6735 * busying the mount and open file node for this open owner.
6736 *
6737 * N.B. This is inherently racy w.r.t. an execve using
6738 * an already open file, in that the read at the end of
6739 * this routine will be racing with a potential close.
6740 * The code below ultimately has the same problem. In practice
6741 * this does not seem to be an issue.
6742 */
6743 if (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) {
6744 nfs_open_owner_rele(noop);
6745 goto do_read;
6746 }
6747 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6748 if (error) {
6749 nfs_open_owner_rele(noop);
0a7de745 6750 return error;
3e170ce0
A
6751 }
6752 /*
6753 * If we don't have a file already open with the access we need (read) then
6754 * we need to open one. Otherwise we just co-opt an open. We might not already
6755 * have access because we're trying to read the first page of the
6756 * file for execve.
6757 */
6758 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
6759 if (error) {
6760 nfs_mount_state_in_use_end(nmp, 0);
6761 nfs_open_owner_rele(noop);
0a7de745 6762 return error;
3e170ce0
A
6763 }
6764 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
6765 /* we don't have the file open, so open it for read access if we're not denied */
6766 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
6767 NP(np, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
0a7de745 6768 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
3e170ce0
A
6769 }
6770 if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) {
6771 nfs_open_file_clear_busy(nofp);
6772 nfs_mount_state_in_use_end(nmp, 0);
6d2010ae 6773 nfs_open_owner_rele(noop);
0a7de745 6774 return EPERM;
6d2010ae
A
6775 }
6776 if (np->n_flag & NREVOKE) {
6777 error = EIO;
3e170ce0 6778 nfs_open_file_clear_busy(nofp);
6d2010ae
A
6779 nfs_mount_state_in_use_end(nmp, 0);
6780 nfs_open_owner_rele(noop);
0a7de745 6781 return error;
6d2010ae 6782 }
3e170ce0
A
6783 if (nmp->nm_vers < NFS_VER4) {
6784 /* NFS v2/v3 opens are always allowed - so just add it. */
6785 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
cb323159
A
6786 }
6787#if CONFIG_NFS4
6788 else {
3e170ce0 6789 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
6d2010ae 6790 }
cb323159 6791#endif
0a7de745 6792 if (!error) {
6d2010ae 6793 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
0a7de745 6794 }
3e170ce0 6795 }
0a7de745 6796 if (nofp) {
3e170ce0 6797 nfs_open_file_clear_busy(nofp);
0a7de745 6798 }
3e170ce0
A
6799 if (nfs_mount_state_in_use_end(nmp, error)) {
6800 nofp = NULL;
6801 goto restart;
6d2010ae
A
6802 }
6803 nfs_open_owner_rele(noop);
0a7de745
A
6804 if (error) {
6805 return error;
6806 }
3e170ce0 6807do_read:
0a7de745 6808 return nfs_bioread(VTONFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_context);
6d2010ae
A
6809}
6810
cb323159 6811#if CONFIG_NFS4
6d2010ae
A
6812/*
6813 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6814 * Files are created using the NFSv4 OPEN RPC. So we must open the
6815 * file to create it and then close it.
6816 */
6817int
6818nfs4_vnop_create(
6819 struct vnop_create_args /* {
0a7de745
A
6820 * struct vnodeop_desc *a_desc;
6821 * vnode_t a_dvp;
6822 * vnode_t *a_vpp;
6823 * struct componentname *a_cnp;
6824 * struct vnode_attr *a_vap;
6825 * vfs_context_t a_context;
6826 * } */*ap)
6d2010ae
A
6827{
6828 vfs_context_t ctx = ap->a_context;
6829 struct componentname *cnp = ap->a_cnp;
6830 struct vnode_attr *vap = ap->a_vap;
6831 vnode_t dvp = ap->a_dvp;
6832 vnode_t *vpp = ap->a_vpp;
6833 struct nfsmount *nmp;
6834 nfsnode_t np;
6835 int error = 0, busyerror = 0, accessMode, denyMode;
6836 struct nfs_open_owner *noop = NULL;
6837 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
6838
6839 nmp = VTONMP(dvp);
0a7de745
A
6840 if (nfs_mount_gone(nmp)) {
6841 return ENXIO;
6842 }
6d2010ae 6843
0a7de745 6844 if (vap) {
6d2010ae 6845 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp), vap, ctx);
0a7de745 6846 }
6d2010ae
A
6847
6848 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
0a7de745
A
6849 if (!noop) {
6850 return ENOMEM;
6851 }
6d2010ae
A
6852
6853restart:
6854 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6855 if (error) {
6856 nfs_open_owner_rele(noop);
0a7de745 6857 return error;
6d2010ae
A
6858 }
6859
6860 /* grab a provisional, nodeless open file */
6861 error = nfs_open_file_find(NULL, noop, &newnofp, 0, 0, 1);
6862 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6863 printf("nfs_vnop_create: LOST\n");
6864 error = EIO;
6865 }
6866 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6867 /* This shouldn't happen given that this is a new, nodeless nofp */
6d2010ae
A
6868 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
6869 nfs_open_file_destroy(newnofp);
6870 newnofp = NULL;
0a7de745 6871 if (!error) {
f427ee49 6872 nfs_mount_state_in_use_end(nmp, 0);
6d2010ae 6873 goto restart;
0a7de745 6874 }
6d2010ae 6875 }
0a7de745 6876 if (!error) {
6d2010ae 6877 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
0a7de745 6878 }
6d2010ae 6879 if (error) {
0a7de745 6880 if (newnofp) {
6d2010ae 6881 nfs_open_file_destroy(newnofp);
0a7de745 6882 }
6d2010ae
A
6883 newnofp = NULL;
6884 goto out;
6885 }
6886
6887 /*
6888 * We're just trying to create the file.
6889 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6890 */
6891 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
6892 denyMode = NFS_OPEN_SHARE_DENY_NONE;
6893
6894 /* Do the open/create */
6895 error = nfs4_open_rpc(newnofp, ctx, cnp, vap, dvp, vpp, NFS_OPEN_CREATE, accessMode, denyMode);
6896 if ((error == EACCES) && vap && !(vap->va_vaflags & VA_EXCLUSIVE) &&
6897 VATTR_IS_ACTIVE(vap, va_mode) && !(vap->va_mode & S_IWUSR)) {
6898 /*
6899 * Hmm... it looks like we may have a situation where the request was
6900 * retransmitted because we didn't get the first response which successfully
6901 * created/opened the file and then the second time we were denied the open
6902 * because the mode the file was created with doesn't allow write access.
6903 *
6904 * We'll try to work around this by temporarily updating the mode and
6905 * retrying the open.
6906 */
6907 struct vnode_attr vattr;
6908
6909 /* first make sure it's there */
6910 int error2 = nfs_lookitup(VTONFS(dvp), cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6911 if (!error2 && np) {
6912 nfs_node_unlock(np);
6913 *vpp = NFSTOV(np);
6914 if (vnode_vtype(NFSTOV(np)) == VREG) {
6915 VATTR_INIT(&vattr);
6916 VATTR_SET(&vattr, va_mode, (vap->va_mode | S_IWUSR));
6917 if (!nfs4_setattr_rpc(np, &vattr, ctx)) {
6918 error2 = nfs4_open_rpc(newnofp, ctx, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, accessMode, denyMode);
6919 VATTR_INIT(&vattr);
6920 VATTR_SET(&vattr, va_mode, vap->va_mode);
6921 nfs4_setattr_rpc(np, &vattr, ctx);
0a7de745 6922 if (!error2) {
6d2010ae 6923 error = 0;
0a7de745 6924 }
6d2010ae
A
6925 }
6926 }
6927 if (error) {
6928 vnode_put(*vpp);
6929 *vpp = NULL;
6930 }
6931 }
6932 }
6933 if (!error && !*vpp) {
6934 printf("nfs4_open_rpc returned without a node?\n");
6935 /* Hmmm... with no node, we have no filehandle and can't close it */
6936 error = EIO;
6937 }
6938 if (error) {
6939 /* need to cleanup our temporary nofp */
6940 nfs_open_file_clear_busy(newnofp);
6941 nfs_open_file_destroy(newnofp);
6942 newnofp = NULL;
6943 goto out;
6944 }
6945 /* After we have a node, add our open file struct to the node */
6946 np = VTONFS(*vpp);
6947 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
6948 nofp = newnofp;
6949 error = nfs_open_file_find_internal(np, noop, &nofp, 0, 0, 0);
6950 if (error) {
6951 /* This shouldn't happen, because we passed in a new nofp to use. */
6952 printf("nfs_open_file_find_internal failed! %d\n", error);
6953 goto out;
6954 } else if (nofp != newnofp) {
6955 /*
6956 * Hmm... an open file struct already exists.
6957 * Mark the existing one busy and merge our open into it.
6958 * Then destroy the one we created.
6959 * Note: there's no chance of an open confict because the
6960 * open has already been granted.
6961 */
6962 busyerror = nfs_open_file_set_busy(nofp, NULL);
6963 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
6964 nofp->nof_stateid = newnofp->nof_stateid;
0a7de745 6965 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
6d2010ae 6966 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 6967 }
6d2010ae
A
6968 nfs_open_file_clear_busy(newnofp);
6969 nfs_open_file_destroy(newnofp);
6970 }
6971 newnofp = NULL;
6972 /* mark the node as holding a create-initiated open */
6973 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
6974 nofp->nof_creator = current_thread();
6975out:
0a7de745 6976 if (nofp && !busyerror) {
6d2010ae 6977 nfs_open_file_clear_busy(nofp);
0a7de745 6978 }
6d2010ae
A
6979 if (nfs_mount_state_in_use_end(nmp, error)) {
6980 nofp = newnofp = NULL;
6981 busyerror = 0;
6982 goto restart;
6983 }
0a7de745 6984 if (noop) {
6d2010ae 6985 nfs_open_owner_rele(noop);
0a7de745
A
6986 }
6987 return error;
6d2010ae
A
6988}
6989
6990/*
6991 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6992 */
6993int
6994nfs4_create_rpc(
6995 vfs_context_t ctx,
6996 nfsnode_t dnp,
6997 struct componentname *cnp,
6998 struct vnode_attr *vap,
6999 int type,
7000 char *link,
7001 nfsnode_t *npp)
7002{
7003 struct nfsmount *nmp;
f427ee49 7004 struct nfs_vattr *nvattr;
6d2010ae
A
7005 int error = 0, create_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
7006 int nfsvers, namedattrs, numops;
f427ee49 7007 u_int64_t xid = 0, savedxid = 0;
6d2010ae
A
7008 nfsnode_t np = NULL;
7009 vnode_t newvp = NULL;
7010 struct nfsm_chain nmreq, nmrep;
7011 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
7012 const char *tag;
7013 nfs_specdata sd;
f427ee49
A
7014 fhandle_t *fh;
7015 struct nfsreq *req;
7016 struct nfs_dulookup *dul;
6d2010ae
A
7017 struct nfsreq_secinfo_args si;
7018
7019 nmp = NFSTONMP(dnp);
0a7de745
A
7020 if (nfs_mount_gone(nmp)) {
7021 return ENXIO;
7022 }
6d2010ae
A
7023 nfsvers = nmp->nm_vers;
7024 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
0a7de745
A
7025 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7026 return EINVAL;
7027 }
6d2010ae
A
7028
7029 sd.specdata1 = sd.specdata2 = 0;
7030
7031 switch (type) {
7032 case NFLNK:
7033 tag = "symlink";
7034 break;
7035 case NFBLK:
7036 case NFCHR:
7037 tag = "mknod";
0a7de745
A
7038 if (!VATTR_IS_ACTIVE(vap, va_rdev)) {
7039 return EINVAL;
7040 }
6d2010ae
A
7041 sd.specdata1 = major(vap->va_rdev);
7042 sd.specdata2 = minor(vap->va_rdev);
7043 break;
7044 case NFSOCK:
7045 case NFFIFO:
7046 tag = "mknod";
7047 break;
7048 case NFDIR:
7049 tag = "mkdir";
7050 break;
7051 default:
0a7de745 7052 return EINVAL;
6d2010ae
A
7053 }
7054
f427ee49
A
7055 fh = zalloc(nfs_fhandle_zone);
7056 req = zalloc(nfs_req_zone);
7057 MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK);
7058 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
6d2010ae
A
7059 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
7060
7061 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
0a7de745 7062 if (!namedattrs) {
f427ee49 7063 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
0a7de745 7064 }
6d2010ae
A
7065
7066 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
f427ee49 7067 NVATTR_INIT(nvattr);
6d2010ae
A
7068 nfsm_chain_null(&nmreq);
7069 nfsm_chain_null(&nmrep);
7070
7071 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
7072 numops = 6;
7073 nfsm_chain_build_alloc_init(error, &nmreq, 66 * NFSX_UNSIGNED);
3e170ce0 7074 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
6d2010ae
A
7075 numops--;
7076 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7077 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
7078 numops--;
7079 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7080 numops--;
7081 nfsm_chain_add_32(error, &nmreq, NFS_OP_CREATE);
7082 nfsm_chain_add_32(error, &nmreq, type);
7083 if (type == NFLNK) {
7084 nfsm_chain_add_name(error, &nmreq, link, strlen(link), nmp);
7085 } else if ((type == NFBLK) || (type == NFCHR)) {
7086 nfsm_chain_add_32(error, &nmreq, sd.specdata1);
7087 nfsm_chain_add_32(error, &nmreq, sd.specdata2);
7088 }
7089 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7090 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
7091 numops--;
7092 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7093 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7094 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7095 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
7096 numops--;
7097 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7098 numops--;
7099 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7100 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
7101 nfsm_chain_build_done(error, &nmreq);
7102 nfsm_assert(error, (numops == 0), EPROTO);
7103 nfsmout_if(error);
7104
7105 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745 7106 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
6d2010ae 7107 if (!error) {
0a7de745 7108 if (!namedattrs) {
f427ee49 7109 nfs_dulookup_start(dul, dnp, ctx);
0a7de745 7110 }
6d2010ae
A
7111 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7112 }
7113
0a7de745 7114 if ((lockerror = nfs_node_lock(dnp))) {
6d2010ae 7115 error = lockerror;
0a7de745 7116 }
6d2010ae
A
7117 nfsm_chain_skip_tag(error, &nmrep);
7118 nfsm_chain_get_32(error, &nmrep, numops);
7119 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7120 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7121 nfsmout_if(error);
7122 nfsm_chain_op_check(error, &nmrep, NFS_OP_CREATE);
7123 nfsm_chain_check_change_info(error, &nmrep, dnp);
7124 bmlen = NFS_ATTR_BITMAP_LEN;
7125 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7126 /* At this point if we have no error, the object was created. */
7127 /* if we don't get attributes, then we should lookitup. */
7128 create_error = error;
7129 nfsmout_if(error);
7130 nfs_vattr_set_supported(bitmap, vap);
7131 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7132 nfsmout_if(error);
f427ee49 7133 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
6d2010ae 7134 nfsmout_if(error);
f427ee49 7135 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6d2010ae
A
7136 printf("nfs: create/%s didn't return filehandle? %s\n", tag, cnp->cn_nameptr);
7137 error = EBADRPC;
7138 goto nfsmout;
7139 }
7140 /* directory attributes: if we don't get them, make sure to invalidate */
7141 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7142 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7143 savedxid = xid;
7144 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
0a7de745 7145 if (error) {
6d2010ae 7146 NATTRINVALIDATE(dnp);
0a7de745 7147 }
6d2010ae
A
7148
7149nfsmout:
7150 nfsm_chain_cleanup(&nmreq);
7151 nfsm_chain_cleanup(&nmrep);
7152
7153 if (!lockerror) {
7154 if (!create_error && (dnp->n_flag & NNEGNCENTRIES)) {
7155 dnp->n_flag &= ~NNEGNCENTRIES;
7156 cache_purge_negatives(NFSTOV(dnp));
7157 }
7158 dnp->n_flag |= NMODIFIED;
7159 nfs_node_unlock(dnp);
7160 /* nfs_getattr() will check changed and purge caches */
7161 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
7162 }
7163
f427ee49 7164 if (!error && fh->fh_len) {
6d2010ae
A
7165 /* create the vnode with the filehandle and attributes */
7166 xid = savedxid;
f427ee49 7167 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &np);
0a7de745 7168 if (!error) {
6d2010ae 7169 newvp = NFSTOV(np);
0a7de745 7170 }
6d2010ae 7171 }
6d2010ae 7172
0a7de745 7173 if (!namedattrs) {
f427ee49 7174 nfs_dulookup_finish(dul, dnp, ctx);
0a7de745 7175 }
6d2010ae 7176
f427ee49
A
7177 NVATTR_CLEANUP(nvattr);
7178 NFS_ZFREE(nfs_fhandle_zone, fh);
7179 NFS_ZFREE(nfs_req_zone, req);
7180 FREE(dul, M_TEMP);
7181 FREE(nvattr, M_TEMP);
7182
6d2010ae
A
7183 /*
7184 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
7185 * if we can succeed in looking up the object.
7186 */
7187 if ((create_error == EEXIST) || (!create_error && !newvp)) {
7188 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
7189 if (!error) {
7190 newvp = NFSTOV(np);
0a7de745 7191 if (vnode_vtype(newvp) != nfstov_type(type, nfsvers)) {
6d2010ae 7192 error = EEXIST;
0a7de745 7193 }
6d2010ae
A
7194 }
7195 }
0a7de745 7196 if (!busyerror) {
6d2010ae 7197 nfs_node_clear_busy(dnp);
0a7de745 7198 }
6d2010ae
A
7199 if (error) {
7200 if (newvp) {
7201 nfs_node_unlock(np);
7202 vnode_put(newvp);
7203 }
7204 } else {
7205 nfs_node_unlock(np);
7206 *npp = np;
7207 }
0a7de745 7208 return error;
6d2010ae
A
7209}
7210
7211int
7212nfs4_vnop_mknod(
7213 struct vnop_mknod_args /* {
0a7de745
A
7214 * struct vnodeop_desc *a_desc;
7215 * vnode_t a_dvp;
7216 * vnode_t *a_vpp;
7217 * struct componentname *a_cnp;
7218 * struct vnode_attr *a_vap;
7219 * vfs_context_t a_context;
7220 * } */*ap)
6d2010ae
A
7221{
7222 nfsnode_t np = NULL;
7223 struct nfsmount *nmp;
7224 int error;
7225
7226 nmp = VTONMP(ap->a_dvp);
0a7de745
A
7227 if (nfs_mount_gone(nmp)) {
7228 return ENXIO;
7229 }
6d2010ae 7230
0a7de745
A
7231 if (!VATTR_IS_ACTIVE(ap->a_vap, va_type)) {
7232 return EINVAL;
7233 }
6d2010ae
A
7234 switch (ap->a_vap->va_type) {
7235 case VBLK:
7236 case VCHR:
7237 case VFIFO:
7238 case VSOCK:
7239 break;
7240 default:
0a7de745 7241 return ENOTSUP;
6d2010ae
A
7242 }
7243
7244 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
0a7de745
A
7245 vtonfs_type(ap->a_vap->va_type, nmp->nm_vers), NULL, &np);
7246 if (!error) {
6d2010ae 7247 *ap->a_vpp = NFSTOV(np);
0a7de745
A
7248 }
7249 return error;
6d2010ae
A
7250}
7251
7252int
7253nfs4_vnop_mkdir(
7254 struct vnop_mkdir_args /* {
0a7de745
A
7255 * struct vnodeop_desc *a_desc;
7256 * vnode_t a_dvp;
7257 * vnode_t *a_vpp;
7258 * struct componentname *a_cnp;
7259 * struct vnode_attr *a_vap;
7260 * vfs_context_t a_context;
7261 * } */*ap)
6d2010ae
A
7262{
7263 nfsnode_t np = NULL;
7264 int error;
7265
7266 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
0a7de745
A
7267 NFDIR, NULL, &np);
7268 if (!error) {
6d2010ae 7269 *ap->a_vpp = NFSTOV(np);
0a7de745
A
7270 }
7271 return error;
6d2010ae
A
7272}
7273
7274int
7275nfs4_vnop_symlink(
7276 struct vnop_symlink_args /* {
0a7de745
A
7277 * struct vnodeop_desc *a_desc;
7278 * vnode_t a_dvp;
7279 * vnode_t *a_vpp;
7280 * struct componentname *a_cnp;
7281 * struct vnode_attr *a_vap;
7282 * char *a_target;
7283 * vfs_context_t a_context;
7284 * } */*ap)
6d2010ae
A
7285{
7286 nfsnode_t np = NULL;
7287 int error;
7288
7289 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
0a7de745
A
7290 NFLNK, ap->a_target, &np);
7291 if (!error) {
6d2010ae 7292 *ap->a_vpp = NFSTOV(np);
0a7de745
A
7293 }
7294 return error;
6d2010ae
A
7295}
7296
7297int
7298nfs4_vnop_link(
7299 struct vnop_link_args /* {
0a7de745
A
7300 * struct vnodeop_desc *a_desc;
7301 * vnode_t a_vp;
7302 * vnode_t a_tdvp;
7303 * struct componentname *a_cnp;
7304 * vfs_context_t a_context;
7305 * } */*ap)
6d2010ae
A
7306{
7307 vfs_context_t ctx = ap->a_context;
7308 vnode_t vp = ap->a_vp;
7309 vnode_t tdvp = ap->a_tdvp;
7310 struct componentname *cnp = ap->a_cnp;
7311 int error = 0, lockerror = ENOENT, status;
7312 struct nfsmount *nmp;
7313 nfsnode_t np = VTONFS(vp);
7314 nfsnode_t tdnp = VTONFS(tdvp);
7315 int nfsvers, numops;
7316 u_int64_t xid, savedxid;
7317 struct nfsm_chain nmreq, nmrep;
7318 struct nfsreq_secinfo_args si;
7319
0a7de745
A
7320 if (vnode_mount(vp) != vnode_mount(tdvp)) {
7321 return EXDEV;
7322 }
6d2010ae
A
7323
7324 nmp = VTONMP(vp);
0a7de745
A
7325 if (nfs_mount_gone(nmp)) {
7326 return ENXIO;
7327 }
6d2010ae 7328 nfsvers = nmp->nm_vers;
0a7de745
A
7329 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7330 return EINVAL;
7331 }
7332 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7333 return EINVAL;
7334 }
6d2010ae
A
7335
7336 /*
7337 * Push all writes to the server, so that the attribute cache
7338 * doesn't get "out of sync" with the server.
7339 * XXX There should be a better way!
7340 */
7341 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
7342
0a7de745
A
7343 if ((error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx)))) {
7344 return error;
7345 }
6d2010ae
A
7346
7347 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7348 nfsm_chain_null(&nmreq);
7349 nfsm_chain_null(&nmrep);
7350
7351 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
7352 numops = 7;
7353 nfsm_chain_build_alloc_init(error, &nmreq, 29 * NFSX_UNSIGNED + cnp->cn_namelen);
3e170ce0 7354 nfsm_chain_add_compound_header(error, &nmreq, "link", nmp->nm_minor_vers, numops);
6d2010ae
A
7355 numops--;
7356 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7357 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
7358 numops--;
7359 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7360 numops--;
7361 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7362 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
7363 numops--;
7364 nfsm_chain_add_32(error, &nmreq, NFS_OP_LINK);
7365 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7366 numops--;
7367 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7368 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
7369 numops--;
7370 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7371 numops--;
7372 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7373 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
7374 nfsm_chain_build_done(error, &nmreq);
7375 nfsm_assert(error, (numops == 0), EPROTO);
7376 nfsmout_if(error);
7377 error = nfs_request(tdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
7378
7379 if ((lockerror = nfs_node_lock2(tdnp, np))) {
7380 error = lockerror;
7381 goto nfsmout;
7382 }
7383 nfsm_chain_skip_tag(error, &nmrep);
7384 nfsm_chain_get_32(error, &nmrep, numops);
7385 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7386 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7387 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7388 nfsm_chain_op_check(error, &nmrep, NFS_OP_LINK);
7389 nfsm_chain_check_change_info(error, &nmrep, tdnp);
7390 /* directory attributes: if we don't get them, make sure to invalidate */
7391 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7392 savedxid = xid;
7393 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
0a7de745 7394 if (error) {
6d2010ae 7395 NATTRINVALIDATE(tdnp);
0a7de745 7396 }
6d2010ae
A
7397 /* link attributes: if we don't get them, make sure to invalidate */
7398 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7399 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7400 xid = savedxid;
7401 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
0a7de745 7402 if (error) {
6d2010ae 7403 NATTRINVALIDATE(np);
0a7de745 7404 }
6d2010ae
A
7405nfsmout:
7406 nfsm_chain_cleanup(&nmreq);
7407 nfsm_chain_cleanup(&nmrep);
0a7de745 7408 if (!lockerror) {
6d2010ae 7409 tdnp->n_flag |= NMODIFIED;
0a7de745 7410 }
6d2010ae 7411 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
0a7de745 7412 if (error == EEXIST) {
6d2010ae 7413 error = 0;
0a7de745 7414 }
6d2010ae
A
7415 if (!error && (tdnp->n_flag & NNEGNCENTRIES)) {
7416 tdnp->n_flag &= ~NNEGNCENTRIES;
7417 cache_purge_negatives(tdvp);
7418 }
0a7de745 7419 if (!lockerror) {
6d2010ae 7420 nfs_node_unlock2(tdnp, np);
0a7de745 7421 }
6d2010ae 7422 nfs_node_clear_busy2(tdnp, np);
0a7de745 7423 return error;
6d2010ae
A
7424}
7425
7426int
7427nfs4_vnop_rmdir(
7428 struct vnop_rmdir_args /* {
0a7de745
A
7429 * struct vnodeop_desc *a_desc;
7430 * vnode_t a_dvp;
7431 * vnode_t a_vp;
7432 * struct componentname *a_cnp;
7433 * vfs_context_t a_context;
7434 * } */*ap)
6d2010ae
A
7435{
7436 vfs_context_t ctx = ap->a_context;
7437 vnode_t vp = ap->a_vp;
7438 vnode_t dvp = ap->a_dvp;
7439 struct componentname *cnp = ap->a_cnp;
7440 struct nfsmount *nmp;
7441 int error = 0, namedattrs;
7442 nfsnode_t np = VTONFS(vp);
7443 nfsnode_t dnp = VTONFS(dvp);
f427ee49 7444 struct nfs_dulookup *dul;
6d2010ae 7445
0a7de745
A
7446 if (vnode_vtype(vp) != VDIR) {
7447 return EINVAL;
7448 }
6d2010ae
A
7449
7450 nmp = NFSTONMP(dnp);
0a7de745
A
7451 if (nfs_mount_gone(nmp)) {
7452 return ENXIO;
7453 }
6d2010ae
A
7454 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
7455
0a7de745
A
7456 if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx)))) {
7457 return error;
7458 }
6d2010ae 7459
f427ee49 7460 MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK);
6d2010ae 7461 if (!namedattrs) {
f427ee49
A
7462 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
7463 nfs_dulookup_start(dul, dnp, ctx);
6d2010ae
A
7464 }
7465
7466 error = nfs4_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
0a7de745 7467 vfs_context_thread(ctx), vfs_context_ucred(ctx));
6d2010ae
A
7468
7469 nfs_name_cache_purge(dnp, np, cnp, ctx);
7470 /* nfs_getattr() will check changed and purge caches */
7471 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
0a7de745 7472 if (!namedattrs) {
f427ee49 7473 nfs_dulookup_finish(dul, dnp, ctx);
0a7de745 7474 }
6d2010ae
A
7475 nfs_node_clear_busy2(dnp, np);
7476
7477 /*
7478 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
7479 */
0a7de745 7480 if (error == ENOENT) {
6d2010ae 7481 error = 0;
0a7de745 7482 }
6d2010ae
A
7483 if (!error) {
7484 /*
7485 * remove nfsnode from hash now so we can't accidentally find it
7486 * again if another object gets created with the same filehandle
7487 * before this vnode gets reclaimed
7488 */
c3c9b80d 7489 lck_mtx_lock(&nfs_node_hash_mutex);
6d2010ae
A
7490 if (np->n_hflag & NHHASHED) {
7491 LIST_REMOVE(np, n_hash);
7492 np->n_hflag &= ~NHHASHED;
7493 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
7494 }
c3c9b80d 7495 lck_mtx_unlock(&nfs_node_hash_mutex);
6d2010ae 7496 }
f427ee49 7497 FREE(dul, M_TEMP);
0a7de745 7498 return error;
6d2010ae
A
7499}
7500
7501/*
7502 * NFSv4 Named Attributes
7503 *
7504 * Both the extended attributes interface and the named streams interface
7505 * are backed by NFSv4 named attributes. The implementations for both use
7506 * a common set of routines in an attempt to reduce code duplication, to
7507 * increase efficiency, to increase caching of both names and data, and to
7508 * confine the complexity.
7509 *
7510 * Each NFS node caches its named attribute directory's file handle.
7511 * The directory nodes for the named attribute directories are handled
7512 * exactly like regular directories (with a couple minor exceptions).
7513 * Named attribute nodes are also treated as much like regular files as
7514 * possible.
7515 *
7516 * Most of the heavy lifting is done by nfs4_named_attr_get().
7517 */
7518
7519/*
7520 * Get the given node's attribute directory node.
7521 * If !fetch, then only return a cached node.
7522 * Otherwise, we will attempt to fetch the node from the server.
7523 * (Note: the node should be marked busy.)
b0d623f7 7524 */
6d2010ae
A
7525nfsnode_t
7526nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx)
b0d623f7 7527{
6d2010ae 7528 nfsnode_t adnp = NULL;
b0d623f7 7529 struct nfsmount *nmp;
6d2010ae
A
7530 int error = 0, status, numops;
7531 struct nfsm_chain nmreq, nmrep;
7532 u_int64_t xid;
7533 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
f427ee49
A
7534 fhandle_t *fh;
7535 struct nfs_vattr *nvattr;
6d2010ae 7536 struct componentname cn;
f427ee49 7537 struct nfsreq *req;
6d2010ae 7538 struct nfsreq_secinfo_args si;
b0d623f7 7539
6d2010ae 7540 nmp = NFSTONMP(np);
0a7de745
A
7541 if (nfs_mount_gone(nmp)) {
7542 return NULL;
7543 }
7544 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7545 return NULL;
7546 }
b0d623f7 7547
6d2010ae 7548 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
f427ee49
A
7549 fh = zalloc(nfs_fhandle_zone);
7550 req = zalloc(nfs_req_zone);
7551 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
7552 NVATTR_INIT(nvattr);
6d2010ae
A
7553 nfsm_chain_null(&nmreq);
7554 nfsm_chain_null(&nmrep);
b0d623f7 7555
6d2010ae
A
7556 bzero(&cn, sizeof(cn));
7557 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
f427ee49 7558 cn.cn_namelen = NFS_STRLEN_INT(_PATH_FORKSPECIFIER);
6d2010ae
A
7559 cn.cn_nameiop = LOOKUP;
7560
7561 if (np->n_attrdirfh) {
7562 // XXX can't set parent correctly (to np) yet
0a7de745
A
7563 error = nfs_nget(nmp->nm_mountp, NULL, &cn, np->n_attrdirfh + 1, *np->n_attrdirfh,
7564 NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &adnp);
7565 if (adnp) {
6d2010ae 7566 goto nfsmout;
0a7de745 7567 }
6d2010ae
A
7568 }
7569 if (!fetch) {
7570 error = ENOENT;
7571 goto nfsmout;
2d21ac55
A
7572 }
7573
6d2010ae
A
7574 // PUTFH, OPENATTR, GETATTR
7575 numops = 3;
7576 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
3e170ce0 7577 nfsm_chain_add_compound_header(error, &nmreq, "openattr", nmp->nm_minor_vers, numops);
6d2010ae
A
7578 numops--;
7579 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7580 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7581 numops--;
7582 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7583 nfsm_chain_add_32(error, &nmreq, 0);
7584 numops--;
7585 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7586 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7587 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7588 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
0a7de745 7589 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6d2010ae
A
7590 nfsm_chain_build_done(error, &nmreq);
7591 nfsm_assert(error, (numops == 0), EPROTO);
7592 nfsmout_if(error);
7593 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745
A
7594 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
7595 if (!error) {
6d2010ae 7596 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
0a7de745 7597 }
b0d623f7 7598
6d2010ae
A
7599 nfsm_chain_skip_tag(error, &nmrep);
7600 nfsm_chain_get_32(error, &nmrep, numops);
7601 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7602 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7603 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7604 nfsmout_if(error);
f427ee49 7605 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
6d2010ae 7606 nfsmout_if(error);
f427ee49 7607 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh->fh_len) {
6d2010ae
A
7608 error = ENOENT;
7609 goto nfsmout;
2d21ac55 7610 }
f427ee49 7611 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh->fh_len)) {
6d2010ae 7612 /* (re)allocate attrdir fh buffer */
0a7de745 7613 if (np->n_attrdirfh) {
6d2010ae 7614 FREE(np->n_attrdirfh, M_TEMP);
0a7de745 7615 }
f427ee49 7616 MALLOC(np->n_attrdirfh, u_char*, fh->fh_len + 1, M_TEMP, M_WAITOK);
2d21ac55 7617 }
6d2010ae
A
7618 if (!np->n_attrdirfh) {
7619 error = ENOMEM;
7620 goto nfsmout;
b0d623f7 7621 }
6d2010ae 7622 /* cache the attrdir fh in the node */
f427ee49
A
7623 *np->n_attrdirfh = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */
7624 bcopy(fh->fh_data, np->n_attrdirfh + 1, fh->fh_len);
6d2010ae
A
7625 /* create node for attrdir */
7626 // XXX can't set parent correctly (to np) yet
f427ee49 7627 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, 0, &adnp);
6d2010ae 7628nfsmout:
f427ee49
A
7629 NVATTR_CLEANUP(nvattr);
7630 NFS_ZFREE(nfs_fhandle_zone, fh);
7631 NFS_ZFREE(nfs_req_zone, req);
7632 FREE(nvattr, M_TEMP);
6d2010ae
A
7633 nfsm_chain_cleanup(&nmreq);
7634 nfsm_chain_cleanup(&nmrep);
2d21ac55 7635
6d2010ae
A
7636 if (adnp) {
7637 /* sanity check that this node is an attribute directory */
0a7de745 7638 if (adnp->n_vattr.nva_type != VDIR) {
6d2010ae 7639 error = EINVAL;
0a7de745
A
7640 }
7641 if (!(adnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 7642 error = EINVAL;
0a7de745 7643 }
6d2010ae 7644 nfs_node_unlock(adnp);
0a7de745 7645 if (error) {
6d2010ae 7646 vnode_put(NFSTOV(adnp));
0a7de745 7647 }
b0d623f7 7648 }
0a7de745 7649 return error ? NULL : adnp;
b0d623f7
A
7650}
7651
2d21ac55 7652/*
6d2010ae
A
7653 * Get the given node's named attribute node for the name given.
7654 *
7655 * In an effort to increase the performance of named attribute access, we try
7656 * to reduce server requests by doing the following:
7657 *
7658 * - cache the node's named attribute directory file handle in the node
7659 * - maintain a directory vnode for the attribute directory
7660 * - use name cache entries (positive and negative) to speed up lookups
7661 * - optionally open the named attribute (with the given accessMode) in the same RPC
7662 * - combine attribute directory retrieval with the lookup/open RPC
7663 * - optionally prefetch the named attribute's first block of data in the same RPC
7664 *
7665 * Also, in an attempt to reduce the number of copies/variations of this code,
7666 * parts of the RPC building/processing code are conditionalized on what is
7667 * needed for any particular request (openattr, lookup vs. open, read).
7668 *
7669 * Note that because we may not have the attribute directory node when we start
7670 * the lookup/open, we lock both the node and the attribute directory node.
2d21ac55 7671 */
6d2010ae 7672
0a7de745
A
7673#define NFS_GET_NAMED_ATTR_CREATE 0x1
7674#define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
7675#define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
7676#define NFS_GET_NAMED_ATTR_PREFETCH 0x8
6d2010ae 7677
b0d623f7 7678int
6d2010ae
A
7679nfs4_named_attr_get(
7680 nfsnode_t np,
2d21ac55 7681 struct componentname *cnp,
6d2010ae
A
7682 uint32_t accessMode,
7683 int flags,
7684 vfs_context_t ctx,
7685 nfsnode_t *anpp,
7686 struct nfs_open_file **nofpp)
2d21ac55
A
7687{
7688 struct nfsmount *nmp;
6d2010ae
A
7689 int error = 0, open_error = EIO;
7690 int inuse = 0, adlockerror = ENOENT, busyerror = ENOENT, adbusyerror = ENOENT, nofpbusyerror = ENOENT;
7691 int create, guarded, prefetch, truncate, noopbusy = 0;
7692 int open, status, numops, hadattrdir, negnamecache;
f427ee49 7693 struct nfs_vattr *nvattr;
6d2010ae
A
7694 struct vnode_attr vattr;
7695 nfsnode_t adnp = NULL, anp = NULL;
7696 vnode_t avp = NULL;
f427ee49 7697 u_int64_t xid = 0, savedxid = 0;
2d21ac55
A
7698 struct nfsm_chain nmreq, nmrep;
7699 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
f427ee49 7700 uint32_t denyMode = 0, rflags, delegation, recall, eof, rlen, retlen;
6d2010ae 7701 nfs_stateid stateid, dstateid;
f427ee49 7702 fhandle_t *fh;
6d2010ae
A
7703 struct nfs_open_owner *noop = NULL;
7704 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
7705 struct vnop_access_args naa;
7706 thread_t thd;
7707 kauth_cred_t cred;
7708 struct timeval now;
7709 char sbuf[64], *s;
7710 uint32_t ace_type, ace_flags, ace_mask, len, slen;
7711 struct kauth_ace ace;
f427ee49 7712 struct nfsreq *req;
6d2010ae
A
7713 struct nfsreq_secinfo_args si;
7714
7715 *anpp = NULL;
6d2010ae
A
7716 rflags = delegation = recall = eof = rlen = retlen = 0;
7717 ace.ace_flags = 0;
7718 s = sbuf;
7719 slen = sizeof(sbuf);
2d21ac55 7720
6d2010ae 7721 nmp = NFSTONMP(np);
0a7de745
A
7722 if (nfs_mount_gone(nmp)) {
7723 return ENXIO;
7724 }
f427ee49
A
7725 fh = zalloc(nfs_fhandle_zone);
7726 req = zalloc(nfs_req_zone);
7727 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
7728 NVATTR_INIT(nvattr);
7729 fh->fh_len = 0;
7730 bzero(&dstateid, sizeof(dstateid));
6d2010ae
A
7731 negnamecache = !NMFLAG(nmp, NONEGNAMECACHE);
7732 thd = vfs_context_thread(ctx);
7733 cred = vfs_context_ucred(ctx);
7734 create = (flags & NFS_GET_NAMED_ATTR_CREATE) ? NFS_OPEN_CREATE : NFS_OPEN_NOCREATE;
7735 guarded = (flags & NFS_GET_NAMED_ATTR_CREATE_GUARDED) ? NFS_CREATE_GUARDED : NFS_CREATE_UNCHECKED;
7736 truncate = (flags & NFS_GET_NAMED_ATTR_TRUNCATE);
7737 prefetch = (flags & NFS_GET_NAMED_ATTR_PREFETCH);
7738
7739 if (!create) {
f427ee49 7740 error = nfs_getattr(np, nvattr, ctx, NGA_CACHED);
0a7de745 7741 if (error) {
f427ee49 7742 goto out_free;
0a7de745 7743 }
f427ee49
A
7744 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
7745 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
7746 error = ENOATTR;
7747 goto out_free;
0a7de745 7748 }
6d2010ae
A
7749 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_NONE) {
7750 /* shouldn't happen... but just be safe */
7751 printf("nfs4_named_attr_get: create with no access %s\n", cnp->cn_nameptr);
7752 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
7753 }
7754 open = (accessMode != NFS_OPEN_SHARE_ACCESS_NONE);
7755 if (open) {
7756 /*
7757 * We're trying to open the file.
7758 * We'll create/open it with the given access mode,
7759 * and set NFS_OPEN_FILE_CREATE.
7760 */
7761 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 7762 if (prefetch && guarded) {
6d2010ae 7763 prefetch = 0; /* no sense prefetching data that can't be there */
0a7de745 7764 }
6d2010ae 7765 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
0a7de745 7766 if (!noop) {
f427ee49
A
7767 error = ENOMEM;
7768 goto out_free;
0a7de745 7769 }
2d21ac55
A
7770 }
7771
0a7de745 7772 if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
f427ee49 7773 goto out_free;
0a7de745 7774 }
2d21ac55 7775
6d2010ae
A
7776 adnp = nfs4_named_attr_dir_get(np, 0, ctx);
7777 hadattrdir = (adnp != NULL);
7778 if (prefetch) {
7779 microuptime(&now);
7780 /* use the special state ID because we don't have a real one to send */
7781 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
7782 rlen = MIN(nmp->nm_rsize, nmp->nm_biosize);
7783 }
7784 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
7785 nfsm_chain_null(&nmreq);
7786 nfsm_chain_null(&nmrep);
7787
6d2010ae 7788 if (hadattrdir) {
0a7de745 7789 if ((error = adbusyerror = nfs_node_set_busy(adnp, vfs_context_thread(ctx)))) {
6d2010ae 7790 goto nfsmout;
0a7de745 7791 }
6d2010ae
A
7792 /* nfs_getattr() will check changed and purge caches */
7793 error = nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7794 nfsmout_if(error);
7795 error = cache_lookup(NFSTOV(adnp), &avp, cnp);
7796 switch (error) {
7797 case ENOENT:
7798 /* negative cache entry */
7799 goto nfsmout;
7800 case 0:
7801 /* cache miss */
7802 /* try dir buf cache lookup */
f427ee49 7803 error = nfs_dir_buf_cache_lookup(adnp, &anp, cnp, ctx, 0, NULL);
6d2010ae
A
7804 if (!error && anp) {
7805 /* dir buf cache hit */
7806 *anpp = anp;
7807 error = -1;
7808 }
0a7de745 7809 if (error != -1) { /* cache miss */
6d2010ae 7810 break;
0a7de745 7811 }
f427ee49 7812 OS_FALLTHROUGH;
6d2010ae
A
7813 case -1:
7814 /* cache hit, not really an error */
316670eb 7815 OSAddAtomic64(1, &nfsstats.lookupcache_hits);
0a7de745 7816 if (!anp && avp) {
6d2010ae 7817 *anpp = anp = VTONFS(avp);
0a7de745 7818 }
6d2010ae
A
7819
7820 nfs_node_clear_busy(adnp);
7821 adbusyerror = ENOENT;
7822
7823 /* check for directory access */
7824 naa.a_desc = &vnop_access_desc;
7825 naa.a_vp = NFSTOV(adnp);
7826 naa.a_action = KAUTH_VNODE_SEARCH;
7827 naa.a_context = ctx;
7828
7829 /* compute actual success/failure based on accessibility */
7830 error = nfs_vnop_access(&naa);
f427ee49 7831 OS_FALLTHROUGH;
6d2010ae
A
7832 default:
7833 /* we either found it, or hit an error */
7834 if (!error && guarded) {
7835 /* found cached entry but told not to use it */
7836 error = EEXIST;
7837 vnode_put(NFSTOV(anp));
7838 *anpp = anp = NULL;
7839 }
7840 /* we're done if error or we don't need to open */
0a7de745 7841 if (error || !open) {
6d2010ae 7842 goto nfsmout;
0a7de745 7843 }
6d2010ae
A
7844 /* no error and we need to open... */
7845 }
7846 }
7847
7848 if (open) {
7849restart:
7850 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
7851 if (error) {
7852 nfs_open_owner_rele(noop);
7853 noop = NULL;
7854 goto nfsmout;
7855 }
7856 inuse = 1;
7857
7858 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7859 error = nfs_open_file_find(anp, noop, &newnofp, 0, 0, 1);
7860 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
7861 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop->noo_cred), cnp->cn_nameptr);
7862 error = EIO;
7863 }
7864 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6d2010ae
A
7865 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
7866 nfs_open_file_destroy(newnofp);
7867 newnofp = NULL;
0a7de745 7868 if (!error) {
f427ee49
A
7869 nfs_mount_state_in_use_end(nmp, 0);
7870 inuse = 0;
6d2010ae 7871 goto restart;
0a7de745 7872 }
6d2010ae 7873 }
0a7de745 7874 if (!error) {
6d2010ae 7875 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
0a7de745 7876 }
6d2010ae 7877 if (error) {
0a7de745 7878 if (newnofp) {
6d2010ae 7879 nfs_open_file_destroy(newnofp);
0a7de745 7880 }
6d2010ae
A
7881 newnofp = NULL;
7882 goto nfsmout;
7883 }
7884 if (anp) {
7885 /*
7886 * We already have the node. So we just need to open
7887 * it - which we may be able to do with a delegation.
7888 */
7889 open_error = error = nfs4_open(anp, newnofp, accessMode, denyMode, ctx);
7890 if (!error) {
7891 /* open succeeded, so our open file is no longer temporary */
7892 nofp = newnofp;
7893 nofpbusyerror = 0;
7894 newnofp = NULL;
0a7de745 7895 if (nofpp) {
6d2010ae 7896 *nofpp = nofp;
0a7de745 7897 }
6d2010ae
A
7898 }
7899 goto nfsmout;
7900 }
7901 }
7902
7903 /*
7904 * We either don't have the attrdir or we didn't find the attribute
7905 * in the name cache, so we need to talk to the server.
7906 *
7907 * If we don't have the attrdir, we'll need to ask the server for that too.
7908 * If the caller is requesting that the attribute be created, we need to
7909 * make sure the attrdir is created.
7910 * The caller may also request that the first block of an existing attribute
7911 * be retrieved at the same time.
7912 */
7913
7914 if (open) {
7915 /* need to mark the open owner busy during the RPC */
0a7de745 7916 if ((error = nfs_open_owner_set_busy(noop, thd))) {
6d2010ae 7917 goto nfsmout;
0a7de745 7918 }
6d2010ae
A
7919 noopbusy = 1;
7920 }
7921
7922 /*
7923 * We'd like to get updated post-open/lookup attributes for the
7924 * directory and we may also want to prefetch some data via READ.
7925 * We'd like the READ results to be last so that we can leave the
7926 * data in the mbufs until the end.
7927 *
7928 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7929 */
7930 numops = 5;
0a7de745
A
7931 if (!hadattrdir) {
7932 numops += 3; // also sending: OPENATTR, GETATTR, OPENATTR
7933 }
7934 if (prefetch) {
7935 numops += 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7936 }
6d2010ae 7937 nfsm_chain_build_alloc_init(error, &nmreq, 64 * NFSX_UNSIGNED + cnp->cn_namelen);
3e170ce0 7938 nfsm_chain_add_compound_header(error, &nmreq, "getnamedattr", nmp->nm_minor_vers, numops);
6d2010ae
A
7939 if (hadattrdir) {
7940 numops--;
7941 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7942 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7943 } else {
7944 numops--;
7945 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7946 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7947 numops--;
7948 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7949 nfsm_chain_add_32(error, &nmreq, create ? 1 : 0);
7950 numops--;
7951 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7952 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7953 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7954 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
0a7de745 7955 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6d2010ae
A
7956 }
7957 if (open) {
7958 numops--;
7959 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
7960 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
7961 nfsm_chain_add_32(error, &nmreq, accessMode);
7962 nfsm_chain_add_32(error, &nmreq, denyMode);
7963 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
7964 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
7965 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
7966 nfsm_chain_add_32(error, &nmreq, create);
7967 if (create) {
7968 nfsm_chain_add_32(error, &nmreq, guarded);
7969 VATTR_INIT(&vattr);
0a7de745 7970 if (truncate) {
6d2010ae 7971 VATTR_SET(&vattr, va_data_size, 0);
0a7de745 7972 }
6d2010ae
A
7973 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7974 }
7975 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
7976 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7977 } else {
7978 numops--;
7979 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
7980 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
2d21ac55 7981 }
2d21ac55
A
7982 numops--;
7983 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7984 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7985 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7986 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
0a7de745 7987 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6d2010ae
A
7988 if (prefetch) {
7989 numops--;
7990 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7991 }
7992 if (hadattrdir) {
7993 numops--;
7994 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7995 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7996 } else {
7997 numops--;
7998 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7999 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
8000 numops--;
8001 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
8002 nfsm_chain_add_32(error, &nmreq, 0);
8003 }
2d21ac55
A
8004 numops--;
8005 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
8006 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
0a7de745 8007 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6d2010ae
A
8008 if (prefetch) {
8009 numops--;
8010 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
8011 numops--;
8012 nfsm_chain_add_32(error, &nmreq, NFS_OP_NVERIFY);
8013 VATTR_INIT(&vattr);
8014 VATTR_SET(&vattr, va_data_size, 0);
8015 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
8016 numops--;
8017 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
8018 nfsm_chain_add_stateid(error, &nmreq, &stateid);
8019 nfsm_chain_add_64(error, &nmreq, 0);
8020 nfsm_chain_add_32(error, &nmreq, rlen);
8021 }
2d21ac55
A
8022 nfsm_chain_build_done(error, &nmreq);
8023 nfsm_assert(error, (numops == 0), EPROTO);
8024 nfsmout_if(error);
6d2010ae 8025 error = nfs_request_async(hadattrdir ? adnp : np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745
A
8026 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, open ? R_NOINTR: 0, NULL, &req);
8027 if (!error) {
2d21ac55 8028 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
0a7de745 8029 }
2d21ac55 8030
0a7de745 8031 if (hadattrdir && ((adlockerror = nfs_node_lock(adnp)))) {
6d2010ae 8032 error = adlockerror;
0a7de745 8033 }
6d2010ae 8034 savedxid = xid;
2d21ac55
A
8035 nfsm_chain_skip_tag(error, &nmrep);
8036 nfsm_chain_get_32(error, &nmrep, numops);
8037 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6d2010ae
A
8038 if (!hadattrdir) {
8039 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
8040 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8041 nfsmout_if(error);
f427ee49 8042 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
6d2010ae 8043 nfsmout_if(error);
f427ee49
A
8044 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE) && fh->fh_len) {
8045 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh->fh_len)) {
6d2010ae 8046 /* (re)allocate attrdir fh buffer */
0a7de745 8047 if (np->n_attrdirfh) {
6d2010ae 8048 FREE(np->n_attrdirfh, M_TEMP);
0a7de745 8049 }
f427ee49 8050 MALLOC(np->n_attrdirfh, u_char*, fh->fh_len + 1, M_TEMP, M_WAITOK);
6d2010ae
A
8051 }
8052 if (np->n_attrdirfh) {
8053 /* remember the attrdir fh in the node */
f427ee49
A
8054 *np->n_attrdirfh = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */
8055 bcopy(fh->fh_data, np->n_attrdirfh + 1, fh->fh_len);
6d2010ae
A
8056 /* create busied node for attrdir */
8057 struct componentname cn;
8058 bzero(&cn, sizeof(cn));
8059 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
f427ee49 8060 cn.cn_namelen = NFS_STRLEN_INT(_PATH_FORKSPECIFIER);
6d2010ae
A
8061 cn.cn_nameiop = LOOKUP;
8062 // XXX can't set parent correctly (to np) yet
f427ee49 8063 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, 0, &adnp);
6d2010ae
A
8064 if (!error) {
8065 adlockerror = 0;
8066 /* set the node busy */
8067 SET(adnp->n_flag, NBUSY);
8068 adbusyerror = 0;
8069 }
8070 /* if no adnp, oh well... */
8071 error = 0;
8072 }
8073 }
f427ee49
A
8074 NVATTR_CLEANUP(nvattr);
8075 fh->fh_len = 0;
6d2010ae
A
8076 }
8077 if (open) {
8078 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
8079 nfs_owner_seqid_increment(noop, NULL, error);
8080 nfsm_chain_get_stateid(error, &nmrep, &newnofp->nof_stateid);
8081 nfsm_chain_check_change_info(error, &nmrep, adnp);
8082 nfsm_chain_get_32(error, &nmrep, rflags);
8083 bmlen = NFS_ATTR_BITMAP_LEN;
8084 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
8085 nfsm_chain_get_32(error, &nmrep, delegation);
0a7de745 8086 if (!error) {
6d2010ae
A
8087 switch (delegation) {
8088 case NFS_OPEN_DELEGATE_NONE:
8089 break;
8090 case NFS_OPEN_DELEGATE_READ:
8091 case NFS_OPEN_DELEGATE_WRITE:
8092 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
8093 nfsm_chain_get_32(error, &nmrep, recall);
0a7de745 8094 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
6d2010ae 8095 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
0a7de745 8096 }
6d2010ae
A
8097 /* if we have any trouble accepting the ACE, just invalidate it */
8098 ace_type = ace_flags = ace_mask = len = 0;
8099 nfsm_chain_get_32(error, &nmrep, ace_type);
8100 nfsm_chain_get_32(error, &nmrep, ace_flags);
8101 nfsm_chain_get_32(error, &nmrep, ace_mask);
8102 nfsm_chain_get_32(error, &nmrep, len);
8103 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
8104 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
8105 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
8106 if (!error && (len >= slen)) {
0a7de745
A
8107 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
8108 if (s) {
8109 slen = len + 1;
8110 } else {
6d2010ae 8111 ace.ace_flags = 0;
0a7de745 8112 }
6d2010ae 8113 }
0a7de745 8114 if (s) {
6d2010ae 8115 nfsm_chain_get_opaque(error, &nmrep, len, s);
0a7de745 8116 } else {
6d2010ae 8117 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
0a7de745 8118 }
6d2010ae
A
8119 if (!error && s) {
8120 s[len] = '\0';
0a7de745 8121 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
6d2010ae 8122 ace.ace_flags = 0;
0a7de745 8123 }
6d2010ae 8124 }
0a7de745 8125 if (error || !s) {
6d2010ae 8126 ace.ace_flags = 0;
0a7de745
A
8127 }
8128 if (s && (s != sbuf)) {
6d2010ae 8129 FREE(s, M_TEMP);
0a7de745 8130 }
6d2010ae
A
8131 break;
8132 default:
8133 error = EBADRPC;
8134 break;
8135 }
0a7de745 8136 }
6d2010ae
A
8137 /* At this point if we have no error, the object was created/opened. */
8138 open_error = error;
8139 } else {
8140 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOOKUP);
8141 }
2d21ac55
A
8142 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8143 nfsmout_if(error);
f427ee49 8144 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
2d21ac55 8145 nfsmout_if(error);
f427ee49 8146 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh->fh_len) {
6d2010ae 8147 error = EIO;
2d21ac55
A
8148 goto nfsmout;
8149 }
0a7de745 8150 if (prefetch) {
6d2010ae 8151 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
0a7de745 8152 }
6d2010ae 8153 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
0a7de745 8154 if (!hadattrdir) {
6d2010ae 8155 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
0a7de745 8156 }
2d21ac55 8157 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae
A
8158 nfsmout_if(error);
8159 xid = savedxid;
8160 nfsm_chain_loadattr(error, &nmrep, adnp, nmp->nm_vers, &xid);
8161 nfsmout_if(error);
2d21ac55 8162
6d2010ae 8163 if (open) {
0a7de745 8164 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
6d2010ae 8165 newnofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 8166 }
6d2010ae
A
8167 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
8168 if (adnp) {
8169 nfs_node_unlock(adnp);
8170 adlockerror = ENOENT;
8171 }
f427ee49
A
8172 NVATTR_CLEANUP(nvattr);
8173 error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh->fh_data, fh->fh_len, noop, &newnofp->nof_stateid, thd, cred, nvattr, &xid);
6d2010ae
A
8174 nfsmout_if(error);
8175 savedxid = xid;
0a7de745 8176 if ((adlockerror = nfs_node_lock(adnp))) {
6d2010ae 8177 error = adlockerror;
0a7de745 8178 }
2d21ac55 8179 }
2d21ac55
A
8180 }
8181
6d2010ae
A
8182nfsmout:
8183 if (open && adnp && !adlockerror) {
8184 if (!open_error && (adnp->n_flag & NNEGNCENTRIES)) {
8185 adnp->n_flag &= ~NNEGNCENTRIES;
8186 cache_purge_negatives(NFSTOV(adnp));
8187 }
8188 adnp->n_flag |= NMODIFIED;
8189 nfs_node_unlock(adnp);
8190 adlockerror = ENOENT;
8191 nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
8192 }
8193 if (adnp && !adlockerror && (error == ENOENT) &&
8194 (cnp->cn_flags & MAKEENTRY) && (cnp->cn_nameiop != CREATE) && negnamecache) {
8195 /* add a negative entry in the name cache */
8196 cache_enter(NFSTOV(adnp), NULL, cnp);
8197 adnp->n_flag |= NNEGNCENTRIES;
8198 }
8199 if (adnp && !adlockerror) {
8200 nfs_node_unlock(adnp);
8201 adlockerror = ENOENT;
8202 }
f427ee49 8203 if (!error && !anp && fh->fh_len) {
2d21ac55
A
8204 /* create the vnode with the filehandle and attributes */
8205 xid = savedxid;
f427ee49 8206 error = nfs_nget(NFSTOMP(np), adnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &anp);
6d2010ae
A
8207 if (!error) {
8208 *anpp = anp;
8209 nfs_node_unlock(anp);
8210 }
8211 if (!error && open) {
8212 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
8213 /* After we have a node, add our open file struct to the node */
8214 nofp = newnofp;
8215 error = nfs_open_file_find_internal(anp, noop, &nofp, 0, 0, 0);
8216 if (error) {
8217 /* This shouldn't happen, because we passed in a new nofp to use. */
8218 printf("nfs_open_file_find_internal failed! %d\n", error);
8219 nofp = NULL;
8220 } else if (nofp != newnofp) {
8221 /*
8222 * Hmm... an open file struct already exists.
8223 * Mark the existing one busy and merge our open into it.
8224 * Then destroy the one we created.
8225 * Note: there's no chance of an open confict because the
8226 * open has already been granted.
8227 */
8228 nofpbusyerror = nfs_open_file_set_busy(nofp, NULL);
8229 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
8230 nofp->nof_stateid = newnofp->nof_stateid;
0a7de745 8231 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
6d2010ae 8232 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 8233 }
6d2010ae
A
8234 nfs_open_file_clear_busy(newnofp);
8235 nfs_open_file_destroy(newnofp);
8236 newnofp = NULL;
8237 }
8238 if (!error) {
8239 newnofp = NULL;
8240 nofpbusyerror = 0;
8241 /* mark the node as holding a create-initiated open */
8242 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
8243 nofp->nof_creator = current_thread();
0a7de745 8244 if (nofpp) {
6d2010ae 8245 *nofpp = nofp;
0a7de745 8246 }
6d2010ae
A
8247 }
8248 }
2d21ac55 8249 }
f427ee49 8250 NVATTR_CLEANUP(nvattr);
6d2010ae
A
8251 if (open && ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE))) {
8252 if (!error && anp && !recall) {
8253 /* stuff the delegation state in the node */
8254 lck_mtx_lock(&anp->n_openlock);
8255 anp->n_openflags &= ~N_DELEG_MASK;
8256 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8257 anp->n_dstateid = dstateid;
8258 anp->n_dace = ace;
8259 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8260 lck_mtx_lock(&nmp->nm_lock);
0a7de745 8261 if (anp->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 8262 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
0a7de745 8263 }
6d2010ae
A
8264 lck_mtx_unlock(&nmp->nm_lock);
8265 }
8266 lck_mtx_unlock(&anp->n_openlock);
8267 } else {
8268 /* give the delegation back */
8269 if (anp) {
f427ee49 8270 if (NFS_CMPFH(anp, fh->fh_data, fh->fh_len)) {
6d2010ae
A
8271 /* update delegation state and return it */
8272 lck_mtx_lock(&anp->n_openlock);
8273 anp->n_openflags &= ~N_DELEG_MASK;
8274 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8275 anp->n_dstateid = dstateid;
8276 anp->n_dace = ace;
8277 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8278 lck_mtx_lock(&nmp->nm_lock);
0a7de745 8279 if (anp->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 8280 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
0a7de745 8281 }
6d2010ae
A
8282 lck_mtx_unlock(&nmp->nm_lock);
8283 }
8284 lck_mtx_unlock(&anp->n_openlock);
8285 /* don't need to send a separate delegreturn for fh */
f427ee49 8286 fh->fh_len = 0;
6d2010ae
A
8287 }
8288 /* return anp's current delegation */
8289 nfs4_delegation_return(anp, 0, thd, cred);
8290 }
f427ee49
A
8291 if (fh->fh_len) { /* return fh's delegation if it wasn't for anp */
8292 nfs4_delegreturn_rpc(nmp, fh->fh_data, fh->fh_len, &dstateid, 0, thd, cred);
0a7de745 8293 }
6d2010ae
A
8294 }
8295 }
8296 if (open) {
8297 if (newnofp) {
8298 /* need to cleanup our temporary nofp */
8299 nfs_open_file_clear_busy(newnofp);
8300 nfs_open_file_destroy(newnofp);
8301 newnofp = NULL;
8302 } else if (nofp && !nofpbusyerror) {
8303 nfs_open_file_clear_busy(nofp);
8304 nofpbusyerror = ENOENT;
8305 }
8306 if (inuse && nfs_mount_state_in_use_end(nmp, error)) {
8307 inuse = 0;
8308 nofp = newnofp = NULL;
8309 rflags = delegation = recall = eof = rlen = retlen = 0;
8310 ace.ace_flags = 0;
8311 s = sbuf;
8312 slen = sizeof(sbuf);
8313 nfsm_chain_cleanup(&nmreq);
8314 nfsm_chain_cleanup(&nmrep);
8315 if (anp) {
8316 vnode_put(NFSTOV(anp));
8317 *anpp = anp = NULL;
8318 }
8319 hadattrdir = (adnp != NULL);
8320 if (noopbusy) {
8321 nfs_open_owner_clear_busy(noop);
8322 noopbusy = 0;
8323 }
8324 goto restart;
8325 }
f427ee49 8326 inuse = 0;
6d2010ae
A
8327 if (noop) {
8328 if (noopbusy) {
8329 nfs_open_owner_clear_busy(noop);
8330 noopbusy = 0;
8331 }
8332 nfs_open_owner_rele(noop);
8333 }
8334 }
8335 if (!error && prefetch && nmrep.nmc_mhead) {
8336 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
8337 nfsm_chain_op_check(error, &nmrep, NFS_OP_NVERIFY);
8338 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
8339 nfsm_chain_get_32(error, &nmrep, eof);
8340 nfsm_chain_get_32(error, &nmrep, retlen);
8341 if (!error && anp) {
8342 /*
8343 * There can be one problem with doing the prefetch.
8344 * Because we don't have the node before we start the RPC, we
8345 * can't have the buffer busy while the READ is performed.
8346 * So there is a chance that other I/O occured on the same
8347 * range of data while we were performing this RPC. If that
8348 * happens, then it's possible the data we have in the READ
8349 * response is no longer up to date.
8350 * Once we have the node and the buffer, we need to make sure
8351 * that there's no chance we could be putting stale data in
8352 * the buffer.
8353 * So, we check if the range read is dirty or if any I/O may
8354 * have occured on it while we were performing our RPC.
8355 */
8356 struct nfsbuf *bp = NULL;
8357 int lastpg;
f427ee49 8358 nfsbufpgs pagemask, pagemaskand;
6d2010ae
A
8359
8360 retlen = MIN(retlen, rlen);
8361
8362 /* check if node needs size update or invalidation */
0a7de745 8363 if (ISSET(anp->n_flag, NUPDATESIZE)) {
6d2010ae 8364 nfs_data_update_size(anp, 0);
0a7de745 8365 }
6d2010ae
A
8366 if (!(error = nfs_node_lock(anp))) {
8367 if (anp->n_flag & NNEEDINVALIDATE) {
8368 anp->n_flag &= ~NNEEDINVALIDATE;
8369 nfs_node_unlock(anp);
0a7de745
A
8370 error = nfs_vinvalbuf(NFSTOV(anp), V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
8371 if (!error) { /* lets play it safe and just drop the data */
6d2010ae 8372 error = EIO;
0a7de745 8373 }
6d2010ae
A
8374 } else {
8375 nfs_node_unlock(anp);
8376 }
8377 }
2d21ac55 8378
6d2010ae 8379 /* calculate page mask for the range of data read */
f427ee49
A
8380 lastpg = (retlen - 1) / PAGE_SIZE;
8381 nfs_buf_pgs_get_page_mask(&pagemask, lastpg + 1);
6d2010ae 8382
0a7de745
A
8383 if (!error) {
8384 error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ | NBLK_NOWAIT, &bp);
8385 }
6d2010ae 8386 /* don't save the data if dirty or potential I/O conflict */
f427ee49
A
8387 nfs_buf_pgs_bit_and(&bp->nb_dirty, &pagemask, &pagemaskand);
8388 if (!error && bp && !bp->nb_dirtyoff && !nfs_buf_pgs_is_set(&pagemaskand) &&
6d2010ae 8389 timevalcmp(&anp->n_lastio, &now, <)) {
316670eb 8390 OSAddAtomic64(1, &nfsstats.read_bios);
0a7de745 8391 CLR(bp->nb_flags, (NB_DONE | NB_ASYNC));
6d2010ae
A
8392 SET(bp->nb_flags, NB_READ);
8393 NFS_BUF_MAP(bp);
8394 nfsm_chain_get_opaque(error, &nmrep, retlen, bp->nb_data);
8395 if (error) {
8396 bp->nb_error = error;
8397 SET(bp->nb_flags, NB_ERROR);
8398 } else {
8399 bp->nb_offio = 0;
8400 bp->nb_endio = rlen;
0a7de745 8401 if ((retlen > 0) && (bp->nb_endio < (int)retlen)) {
6d2010ae 8402 bp->nb_endio = retlen;
0a7de745 8403 }
6d2010ae
A
8404 if (eof || (retlen == 0)) {
8405 /* zero out the remaining data (up to EOF) */
8406 off_t rpcrem, eofrem, rem;
8407 rpcrem = (rlen - retlen);
8408 eofrem = anp->n_size - (NBOFF(bp) + retlen);
8409 rem = (rpcrem < eofrem) ? rpcrem : eofrem;
0a7de745 8410 if (rem > 0) {
6d2010ae 8411 bzero(bp->nb_data + retlen, rem);
0a7de745 8412 }
6d2010ae
A
8413 } else if ((retlen < rlen) && !ISSET(bp->nb_flags, NB_ERROR)) {
8414 /* ugh... short read ... just invalidate for now... */
8415 SET(bp->nb_flags, NB_INVAL);
8416 }
8417 }
8418 nfs_buf_read_finish(bp);
8419 microuptime(&anp->n_lastio);
8420 }
0a7de745 8421 if (bp) {
6d2010ae 8422 nfs_buf_release(bp, 1);
0a7de745 8423 }
2d21ac55 8424 }
6d2010ae 8425 error = 0; /* ignore any transient error in processing the prefetch */
2d21ac55 8426 }
6d2010ae
A
8427 if (adnp && !adbusyerror) {
8428 nfs_node_clear_busy(adnp);
8429 adbusyerror = ENOENT;
8430 }
8431 if (!busyerror) {
8432 nfs_node_clear_busy(np);
8433 busyerror = ENOENT;
8434 }
0a7de745 8435 if (adnp) {
6d2010ae 8436 vnode_put(NFSTOV(adnp));
0a7de745 8437 }
f427ee49
A
8438 if (inuse) {
8439 nfs_mount_state_in_use_end(nmp, error);
8440 }
6d2010ae
A
8441 if (error && *anpp) {
8442 vnode_put(NFSTOV(*anpp));
8443 *anpp = NULL;
8444 }
8445 nfsm_chain_cleanup(&nmreq);
8446 nfsm_chain_cleanup(&nmrep);
f427ee49
A
8447out_free:
8448 NFS_ZFREE(nfs_fhandle_zone, fh);
8449 NFS_ZFREE(nfs_req_zone, req);
8450 FREE(nvattr, M_TEMP);
0a7de745 8451 return error;
6d2010ae
A
8452}
8453
8454/*
8455 * Remove a named attribute.
8456 */
8457int
8458nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_context_t ctx)
8459{
8460 nfsnode_t adnp = NULL;
8461 struct nfsmount *nmp;
8462 struct componentname cn;
8463 struct vnop_remove_args vra;
8464 int error, putanp = 0;
8465
8466 nmp = NFSTONMP(np);
0a7de745
A
8467 if (nfs_mount_gone(nmp)) {
8468 return ENXIO;
8469 }
6d2010ae
A
8470
8471 bzero(&cn, sizeof(cn));
8472 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
f427ee49 8473 cn.cn_namelen = NFS_STRLEN_INT(name);
6d2010ae
A
8474 cn.cn_nameiop = DELETE;
8475 cn.cn_flags = 0;
8476
8477 if (!anp) {
8478 error = nfs4_named_attr_get(np, &cn, NFS_OPEN_SHARE_ACCESS_NONE,
0a7de745
A
8479 0, ctx, &anp, NULL);
8480 if ((!error && !anp) || (error == ENOATTR)) {
6d2010ae 8481 error = ENOENT;
0a7de745 8482 }
6d2010ae
A
8483 if (error) {
8484 if (anp) {
8485 vnode_put(NFSTOV(anp));
8486 anp = NULL;
8487 }
8488 goto out;
2d21ac55 8489 }
6d2010ae
A
8490 putanp = 1;
8491 }
8492
0a7de745 8493 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
6d2010ae 8494 goto out;
0a7de745 8495 }
6d2010ae
A
8496 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8497 nfs_node_clear_busy(np);
8498 if (!adnp) {
8499 error = ENOENT;
8500 goto out;
2d21ac55 8501 }
6d2010ae
A
8502
8503 vra.a_desc = &vnop_remove_desc;
8504 vra.a_dvp = NFSTOV(adnp);
8505 vra.a_vp = NFSTOV(anp);
8506 vra.a_cnp = &cn;
8507 vra.a_flags = 0;
8508 vra.a_context = ctx;
8509 error = nfs_vnop_remove(&vra);
8510out:
0a7de745 8511 if (adnp) {
6d2010ae 8512 vnode_put(NFSTOV(adnp));
0a7de745
A
8513 }
8514 if (putanp) {
6d2010ae 8515 vnode_put(NFSTOV(anp));
0a7de745
A
8516 }
8517 return error;
2d21ac55
A
8518}
8519
8520int
6d2010ae
A
8521nfs4_vnop_getxattr(
8522 struct vnop_getxattr_args /* {
0a7de745
A
8523 * struct vnodeop_desc *a_desc;
8524 * vnode_t a_vp;
8525 * const char * a_name;
8526 * uio_t a_uio;
8527 * size_t *a_size;
8528 * int a_options;
8529 * vfs_context_t a_context;
8530 * } */*ap)
2d21ac55 8531{
6d2010ae 8532 vfs_context_t ctx = ap->a_context;
2d21ac55 8533 struct nfsmount *nmp;
f427ee49 8534 struct nfs_vattr *nvattr;
6d2010ae
A
8535 struct componentname cn;
8536 nfsnode_t anp;
8537 int error = 0, isrsrcfork;
2d21ac55 8538
6d2010ae 8539 nmp = VTONMP(ap->a_vp);
0a7de745
A
8540 if (nfs_mount_gone(nmp)) {
8541 return ENXIO;
8542 }
2d21ac55 8543
0a7de745
A
8544 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8545 return ENOTSUP;
8546 }
f427ee49
A
8547
8548 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
8549 error = nfs_getattr(VTONFS(ap->a_vp), nvattr, ctx, NGA_CACHED);
0a7de745 8550 if (error) {
f427ee49 8551 goto out;
0a7de745 8552 }
f427ee49
A
8553 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8554 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8555 error = ENOATTR;
8556 goto out;
0a7de745 8557 }
6d2010ae
A
8558
8559 bzero(&cn, sizeof(cn));
8560 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
f427ee49 8561 cn.cn_namelen = NFS_STRLEN_INT(ap->a_name);
6d2010ae
A
8562 cn.cn_nameiop = LOOKUP;
8563 cn.cn_flags = MAKEENTRY;
8564
8565 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
8566 isrsrcfork = (bcmp(ap->a_name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
8567
8568 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
0a7de745
A
8569 !isrsrcfork ? NFS_GET_NAMED_ATTR_PREFETCH : 0, ctx, &anp, NULL);
8570 if ((!error && !anp) || (error == ENOENT)) {
6d2010ae 8571 error = ENOATTR;
0a7de745 8572 }
6d2010ae 8573 if (!error) {
0a7de745 8574 if (ap->a_uio) {
6d2010ae 8575 error = nfs_bioread(anp, ap->a_uio, 0, ctx);
0a7de745 8576 } else {
6d2010ae 8577 *ap->a_size = anp->n_size;
0a7de745 8578 }
2d21ac55 8579 }
0a7de745 8580 if (anp) {
6d2010ae 8581 vnode_put(NFSTOV(anp));
0a7de745 8582 }
f427ee49
A
8583out:
8584 FREE(nvattr, M_TEMP);
0a7de745 8585 return error;
6d2010ae 8586}
2d21ac55 8587
6d2010ae
A
8588int
8589nfs4_vnop_setxattr(
8590 struct vnop_setxattr_args /* {
0a7de745
A
8591 * struct vnodeop_desc *a_desc;
8592 * vnode_t a_vp;
8593 * const char * a_name;
8594 * uio_t a_uio;
8595 * int a_options;
8596 * vfs_context_t a_context;
8597 * } */*ap)
6d2010ae
A
8598{
8599 vfs_context_t ctx = ap->a_context;
8600 int options = ap->a_options;
8601 uio_t uio = ap->a_uio;
8602 const char *name = ap->a_name;
8603 struct nfsmount *nmp;
8604 struct componentname cn;
8605 nfsnode_t anp = NULL;
8606 int error = 0, closeerror = 0, flags, isrsrcfork, isfinderinfo, empty = 0, i;
8607#define FINDERINFOSIZE 32
8608 uint8_t finfo[FINDERINFOSIZE];
8609 uint32_t *finfop;
8610 struct nfs_open_file *nofp = NULL;
0a7de745 8611 char uio_buf[UIO_SIZEOF(1)];
6d2010ae
A
8612 uio_t auio;
8613 struct vnop_write_args vwa;
8614
8615 nmp = VTONMP(ap->a_vp);
0a7de745
A
8616 if (nfs_mount_gone(nmp)) {
8617 return ENXIO;
8618 }
6d2010ae 8619
0a7de745
A
8620 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8621 return ENOTSUP;
8622 }
6d2010ae 8623
0a7de745
A
8624 if ((options & XATTR_CREATE) && (options & XATTR_REPLACE)) {
8625 return EINVAL;
8626 }
6d2010ae
A
8627
8628 /* XXX limitation based on need to back up uio on short write */
8629 if (uio_iovcnt(uio) > 1) {
8630 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
0a7de745 8631 return EINVAL;
6d2010ae
A
8632 }
8633
8634 bzero(&cn, sizeof(cn));
8635 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
f427ee49 8636 cn.cn_namelen = NFS_STRLEN_INT(name);
6d2010ae
A
8637 cn.cn_nameiop = CREATE;
8638 cn.cn_flags = MAKEENTRY;
8639
8640 isfinderinfo = (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0);
8641 isrsrcfork = isfinderinfo ? 0 : (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
0a7de745 8642 if (!isrsrcfork) {
6d2010ae 8643 uio_setoffset(uio, 0);
0a7de745 8644 }
6d2010ae 8645 if (isfinderinfo) {
0a7de745
A
8646 if (uio_resid(uio) != sizeof(finfo)) {
8647 return ERANGE;
8648 }
6d2010ae 8649 error = uiomove((char*)&finfo, sizeof(finfo), uio);
0a7de745
A
8650 if (error) {
8651 return error;
8652 }
6d2010ae
A
8653 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
8654 empty = 1;
0a7de745 8655 for (i = 0, finfop = (uint32_t*)&finfo; i < (int)(sizeof(finfo) / sizeof(uint32_t)); i++) {
6d2010ae
A
8656 if (finfop[i]) {
8657 empty = 0;
8658 break;
8659 }
0a7de745
A
8660 }
8661 if (empty && !(options & (XATTR_CREATE | XATTR_REPLACE))) {
6d2010ae 8662 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
0a7de745 8663 if (error == ENOENT) {
6d2010ae 8664 error = 0;
0a7de745
A
8665 }
8666 return error;
6d2010ae
A
8667 }
8668 /* first, let's see if we get a create/replace error */
8669 }
8670
8671 /*
8672 * create/open the xattr
8673 *
8674 * We need to make sure not to create it if XATTR_REPLACE.
8675 * For all xattrs except the resource fork, we also want to
8676 * truncate the xattr to remove any current data. We'll do
8677 * that by setting the size to 0 on create/open.
8678 */
8679 flags = 0;
0a7de745 8680 if (!(options & XATTR_REPLACE)) {
6d2010ae 8681 flags |= NFS_GET_NAMED_ATTR_CREATE;
0a7de745
A
8682 }
8683 if (options & XATTR_CREATE) {
6d2010ae 8684 flags |= NFS_GET_NAMED_ATTR_CREATE_GUARDED;
0a7de745
A
8685 }
8686 if (!isrsrcfork) {
6d2010ae 8687 flags |= NFS_GET_NAMED_ATTR_TRUNCATE;
0a7de745 8688 }
6d2010ae
A
8689
8690 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
0a7de745
A
8691 flags, ctx, &anp, &nofp);
8692 if (!error && !anp) {
6d2010ae 8693 error = ENOATTR;
0a7de745
A
8694 }
8695 if (error) {
6d2010ae 8696 goto out;
0a7de745 8697 }
6d2010ae
A
8698 /* grab the open state from the get/create/open */
8699 if (nofp && !(error = nfs_open_file_set_busy(nofp, NULL))) {
8700 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
8701 nofp->nof_creator = NULL;
8702 nfs_open_file_clear_busy(nofp);
8703 }
8704
8705 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
0a7de745 8706 if (isfinderinfo && empty) {
6d2010ae 8707 goto doclose;
0a7de745 8708 }
6d2010ae
A
8709
8710 /*
8711 * Write the data out and flush.
8712 *
8713 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
8714 */
8715 vwa.a_desc = &vnop_write_desc;
8716 vwa.a_vp = NFSTOV(anp);
8717 vwa.a_uio = NULL;
8718 vwa.a_ioflag = 0;
8719 vwa.a_context = ctx;
8720 if (isfinderinfo) {
8721 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, &uio_buf, sizeof(uio_buf));
8722 uio_addiov(auio, (uintptr_t)&finfo, sizeof(finfo));
8723 vwa.a_uio = auio;
8724 } else if (uio_resid(uio) > 0) {
8725 vwa.a_uio = uio;
8726 }
8727 if (vwa.a_uio) {
8728 error = nfs_vnop_write(&vwa);
0a7de745 8729 if (!error) {
6d2010ae 8730 error = nfs_flush(anp, MNT_WAIT, vfs_context_thread(ctx), 0);
0a7de745 8731 }
6d2010ae
A
8732 }
8733doclose:
8734 /* Close the xattr. */
8735 if (nofp) {
8736 int busyerror = nfs_open_file_set_busy(nofp, NULL);
8737 closeerror = nfs_close(anp, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
0a7de745 8738 if (!busyerror) {
6d2010ae 8739 nfs_open_file_clear_busy(nofp);
0a7de745 8740 }
6d2010ae 8741 }
0a7de745 8742 if (!error && isfinderinfo && empty) { /* Setting an empty FinderInfo really means remove it */
6d2010ae 8743 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
0a7de745 8744 if (error == ENOENT) {
6d2010ae 8745 error = 0;
0a7de745 8746 }
6d2010ae 8747 }
0a7de745 8748 if (!error) {
6d2010ae 8749 error = closeerror;
0a7de745 8750 }
6d2010ae 8751out:
0a7de745 8752 if (anp) {
6d2010ae 8753 vnode_put(NFSTOV(anp));
0a7de745
A
8754 }
8755 if (error == ENOENT) {
6d2010ae 8756 error = ENOATTR;
0a7de745
A
8757 }
8758 return error;
2d21ac55
A
8759}
8760
8761int
6d2010ae
A
8762nfs4_vnop_removexattr(
8763 struct vnop_removexattr_args /* {
0a7de745
A
8764 * struct vnodeop_desc *a_desc;
8765 * vnode_t a_vp;
8766 * const char * a_name;
8767 * int a_options;
8768 * vfs_context_t a_context;
8769 * } */*ap)
2d21ac55 8770{
6d2010ae 8771 struct nfsmount *nmp = VTONMP(ap->a_vp);
2d21ac55
A
8772 int error;
8773
0a7de745
A
8774 if (nfs_mount_gone(nmp)) {
8775 return ENXIO;
8776 }
8777 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8778 return ENOTSUP;
8779 }
6d2010ae
A
8780
8781 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), NULL, ap->a_name, ap->a_context);
0a7de745 8782 if (error == ENOENT) {
6d2010ae 8783 error = ENOATTR;
0a7de745
A
8784 }
8785 return error;
2d21ac55
A
8786}
8787
8788int
6d2010ae
A
8789nfs4_vnop_listxattr(
8790 struct vnop_listxattr_args /* {
0a7de745
A
8791 * struct vnodeop_desc *a_desc;
8792 * vnode_t a_vp;
8793 * uio_t a_uio;
8794 * size_t *a_size;
8795 * int a_options;
8796 * vfs_context_t a_context;
8797 * } */*ap)
2d21ac55 8798{
6d2010ae
A
8799 vfs_context_t ctx = ap->a_context;
8800 nfsnode_t np = VTONFS(ap->a_vp);
8801 uio_t uio = ap->a_uio;
8802 nfsnode_t adnp = NULL;
8803 struct nfsmount *nmp;
8804 int error, done, i;
f427ee49 8805 struct nfs_vattr *nvattr;
6d2010ae
A
8806 uint64_t cookie, nextcookie, lbn = 0;
8807 struct nfsbuf *bp = NULL;
8808 struct nfs_dir_buf_header *ndbhp;
8809 struct direntry *dp;
2d21ac55 8810
6d2010ae 8811 nmp = VTONMP(ap->a_vp);
0a7de745
A
8812 if (nfs_mount_gone(nmp)) {
8813 return ENXIO;
8814 }
6d2010ae 8815
0a7de745
A
8816 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8817 return ENOTSUP;
8818 }
6d2010ae 8819
f427ee49
A
8820 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
8821 error = nfs_getattr(np, nvattr, ctx, NGA_CACHED);
0a7de745 8822 if (error) {
f427ee49 8823 goto out_free;
0a7de745 8824 }
f427ee49
A
8825 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8826 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8827 error = 0;
8828 goto out_free;
0a7de745 8829 }
6d2010ae 8830
0a7de745 8831 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
f427ee49 8832 goto out_free;
0a7de745 8833 }
6d2010ae
A
8834 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8835 nfs_node_clear_busy(np);
0a7de745 8836 if (!adnp) {
6d2010ae 8837 goto out;
0a7de745 8838 }
6d2010ae 8839
0a7de745 8840 if ((error = nfs_node_lock(adnp))) {
6d2010ae 8841 goto out;
0a7de745 8842 }
6d2010ae
A
8843
8844 if (adnp->n_flag & NNEEDINVALIDATE) {
8845 adnp->n_flag &= ~NNEEDINVALIDATE;
8846 nfs_invaldir(adnp);
8847 nfs_node_unlock(adnp);
8848 error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
0a7de745 8849 if (!error) {
6d2010ae 8850 error = nfs_node_lock(adnp);
0a7de745
A
8851 }
8852 if (error) {
6d2010ae 8853 goto out;
0a7de745 8854 }
6d2010ae
A
8855 }
8856
8857 /*
8858 * check for need to invalidate when (re)starting at beginning
8859 */
8860 if (adnp->n_flag & NMODIFIED) {
8861 nfs_invaldir(adnp);
8862 nfs_node_unlock(adnp);
0a7de745 8863 if ((error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1))) {
6d2010ae 8864 goto out;
0a7de745 8865 }
6d2010ae
A
8866 } else {
8867 nfs_node_unlock(adnp);
8868 }
8869 /* nfs_getattr() will check changed and purge caches */
f427ee49 8870 if ((error = nfs_getattr(adnp, nvattr, ctx, NGA_UNCACHED))) {
6d2010ae 8871 goto out;
0a7de745 8872 }
6d2010ae 8873
0a7de745 8874 if (uio && (uio_resid(uio) == 0)) {
6d2010ae 8875 goto out;
0a7de745 8876 }
6d2010ae
A
8877
8878 done = 0;
8879 nextcookie = lbn = 0;
8880
8881 while (!error && !done) {
316670eb 8882 OSAddAtomic64(1, &nfsstats.biocache_readdirs);
6d2010ae
A
8883 cookie = nextcookie;
8884getbuffer:
8885 error = nfs_buf_get(adnp, lbn, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
0a7de745 8886 if (error) {
6d2010ae 8887 goto out;
0a7de745 8888 }
6d2010ae
A
8889 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
8890 if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) {
8891 if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */
8892 ndbhp->ndbh_flags = 0;
8893 ndbhp->ndbh_count = 0;
8894 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
8895 ndbhp->ndbh_ncgen = adnp->n_ncgen;
8896 }
8897 error = nfs_buf_readdir(bp, ctx);
0a7de745 8898 if (error == NFSERR_DIRBUFDROPPED) {
6d2010ae 8899 goto getbuffer;
0a7de745
A
8900 }
8901 if (error) {
6d2010ae 8902 nfs_buf_release(bp, 1);
0a7de745 8903 }
6d2010ae
A
8904 if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) {
8905 if (!nfs_node_lock(adnp)) {
8906 nfs_invaldir(adnp);
8907 nfs_node_unlock(adnp);
8908 }
8909 nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
0a7de745 8910 if (error == NFSERR_BAD_COOKIE) {
6d2010ae 8911 error = ENOENT;
0a7de745 8912 }
6d2010ae 8913 }
0a7de745 8914 if (error) {
6d2010ae 8915 goto out;
0a7de745 8916 }
6d2010ae
A
8917 }
8918
8919 /* go through all the entries copying/counting */
8920 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
0a7de745 8921 for (i = 0; i < ndbhp->ndbh_count; i++) {
6d2010ae
A
8922 if (!xattr_protected(dp->d_name)) {
8923 if (uio == NULL) {
8924 *ap->a_size += dp->d_namlen + 1;
8925 } else if (uio_resid(uio) < (dp->d_namlen + 1)) {
8926 error = ERANGE;
8927 } else {
0a7de745
A
8928 error = uiomove(dp->d_name, dp->d_namlen + 1, uio);
8929 if (error && (error != EFAULT)) {
6d2010ae 8930 error = ERANGE;
0a7de745 8931 }
6d2010ae
A
8932 }
8933 }
8934 nextcookie = dp->d_seekoff;
8935 dp = NFS_DIRENTRY_NEXT(dp);
8936 }
8937
8938 if (i == ndbhp->ndbh_count) {
8939 /* hit end of buffer, move to next buffer */
8940 lbn = nextcookie;
8941 /* if we also hit EOF, we're done */
0a7de745 8942 if (ISSET(ndbhp->ndbh_flags, NDB_EOF)) {
6d2010ae 8943 done = 1;
0a7de745 8944 }
6d2010ae
A
8945 }
8946 if (!error && !done && (nextcookie == cookie)) {
8947 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count);
8948 error = EIO;
8949 }
8950 nfs_buf_release(bp, 1);
8951 }
8952out:
0a7de745 8953 if (adnp) {
6d2010ae 8954 vnode_put(NFSTOV(adnp));
0a7de745 8955 }
f427ee49
A
8956out_free:
8957 FREE(nvattr, M_TEMP);
0a7de745 8958 return error;
2d21ac55
A
8959}
8960
6d2010ae 8961#if NAMEDSTREAMS
2d21ac55 8962int
6d2010ae
A
8963nfs4_vnop_getnamedstream(
8964 struct vnop_getnamedstream_args /* {
0a7de745
A
8965 * struct vnodeop_desc *a_desc;
8966 * vnode_t a_vp;
8967 * vnode_t *a_svpp;
8968 * const char *a_name;
8969 * enum nsoperation a_operation;
8970 * int a_flags;
8971 * vfs_context_t a_context;
8972 * } */*ap)
2d21ac55
A
8973{
8974 vfs_context_t ctx = ap->a_context;
2d21ac55 8975 struct nfsmount *nmp;
f427ee49 8976 struct nfs_vattr *nvattr;
6d2010ae
A
8977 struct componentname cn;
8978 nfsnode_t anp;
8979 int error = 0;
2d21ac55 8980
6d2010ae 8981 nmp = VTONMP(ap->a_vp);
0a7de745
A
8982 if (nfs_mount_gone(nmp)) {
8983 return ENXIO;
8984 }
2d21ac55 8985
0a7de745
A
8986 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8987 return ENOTSUP;
8988 }
f427ee49
A
8989
8990 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
8991 error = nfs_getattr(VTONFS(ap->a_vp), nvattr, ctx, NGA_CACHED);
0a7de745 8992 if (error) {
f427ee49 8993 goto out;
0a7de745 8994 }
f427ee49
A
8995 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8996 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8997 error = ENOATTR;
8998 goto out;
0a7de745 8999 }
2d21ac55 9000
6d2010ae
A
9001 bzero(&cn, sizeof(cn));
9002 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
f427ee49 9003 cn.cn_namelen = NFS_STRLEN_INT(ap->a_name);
6d2010ae
A
9004 cn.cn_nameiop = LOOKUP;
9005 cn.cn_flags = MAKEENTRY;
9006
9007 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
0a7de745
A
9008 0, ctx, &anp, NULL);
9009 if ((!error && !anp) || (error == ENOENT)) {
6d2010ae 9010 error = ENOATTR;
0a7de745
A
9011 }
9012 if (!error && anp) {
6d2010ae 9013 *ap->a_svpp = NFSTOV(anp);
0a7de745 9014 } else if (anp) {
6d2010ae 9015 vnode_put(NFSTOV(anp));
0a7de745 9016 }
f427ee49
A
9017out:
9018 FREE(nvattr, M_TEMP);
0a7de745 9019 return error;
2d21ac55
A
9020}
9021
9022int
6d2010ae
A
9023nfs4_vnop_makenamedstream(
9024 struct vnop_makenamedstream_args /* {
0a7de745
A
9025 * struct vnodeop_desc *a_desc;
9026 * vnode_t *a_svpp;
9027 * vnode_t a_vp;
9028 * const char *a_name;
9029 * int a_flags;
9030 * vfs_context_t a_context;
9031 * } */*ap)
2d21ac55
A
9032{
9033 vfs_context_t ctx = ap->a_context;
6d2010ae
A
9034 struct nfsmount *nmp;
9035 struct componentname cn;
9036 nfsnode_t anp;
2d21ac55 9037 int error = 0;
2d21ac55 9038
6d2010ae 9039 nmp = VTONMP(ap->a_vp);
0a7de745
A
9040 if (nfs_mount_gone(nmp)) {
9041 return ENXIO;
9042 }
2d21ac55 9043
0a7de745
A
9044 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
9045 return ENOTSUP;
9046 }
2d21ac55 9047
6d2010ae
A
9048 bzero(&cn, sizeof(cn));
9049 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
f427ee49 9050 cn.cn_namelen = NFS_STRLEN_INT(ap->a_name);
6d2010ae
A
9051 cn.cn_nameiop = CREATE;
9052 cn.cn_flags = MAKEENTRY;
9053
9054 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
0a7de745
A
9055 NFS_GET_NAMED_ATTR_CREATE, ctx, &anp, NULL);
9056 if ((!error && !anp) || (error == ENOENT)) {
6d2010ae 9057 error = ENOATTR;
0a7de745
A
9058 }
9059 if (!error && anp) {
6d2010ae 9060 *ap->a_svpp = NFSTOV(anp);
0a7de745 9061 } else if (anp) {
6d2010ae 9062 vnode_put(NFSTOV(anp));
0a7de745
A
9063 }
9064 return error;
6d2010ae 9065}
2d21ac55 9066
6d2010ae
A
9067int
9068nfs4_vnop_removenamedstream(
9069 struct vnop_removenamedstream_args /* {
0a7de745
A
9070 * struct vnodeop_desc *a_desc;
9071 * vnode_t a_vp;
9072 * vnode_t a_svp;
9073 * const char *a_name;
9074 * int a_flags;
9075 * vfs_context_t a_context;
9076 * } */*ap)
6d2010ae
A
9077{
9078 struct nfsmount *nmp = VTONMP(ap->a_vp);
9079 nfsnode_t np = ap->a_vp ? VTONFS(ap->a_vp) : NULL;
9080 nfsnode_t anp = ap->a_svp ? VTONFS(ap->a_svp) : NULL;
2d21ac55 9081
0a7de745
A
9082 if (nfs_mount_gone(nmp)) {
9083 return ENXIO;
9084 }
2d21ac55
A
9085
9086 /*
6d2010ae
A
9087 * Given that a_svp is a named stream, checking for
9088 * named attribute support is kinda pointless.
2d21ac55 9089 */
0a7de745
A
9090 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
9091 return ENOTSUP;
9092 }
6d2010ae 9093
0a7de745 9094 return nfs4_named_attr_remove(np, anp, ap->a_name, ap->a_context);
2d21ac55
A
9095}
9096
6d2010ae 9097#endif
cb323159 9098#endif /* CONFIG_NFS4 */
ea3f0419
A
9099
9100#endif /* CONFIG_NFS_CLIENT */