]> git.saurik.com Git - apple/xnu.git/blame - bsd/nfs/nfs4_vnops.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / bsd / nfs / nfs4_vnops.c
CommitLineData
2d21ac55 1/*
f427ee49 2 * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
2d21ac55
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
ea3f0419
A
29#include <nfs/nfs_conf.h>
30#if CONFIG_NFS_CLIENT
31
2d21ac55
A
32/*
33 * vnode op calls for NFS version 4
34 */
35#include <sys/param.h>
36#include <sys/kernel.h>
37#include <sys/systm.h>
38#include <sys/resourcevar.h>
39#include <sys/proc_internal.h>
40#include <sys/kauth.h>
41#include <sys/mount_internal.h>
42#include <sys/malloc.h>
43#include <sys/kpi_mbuf.h>
44#include <sys/conf.h>
45#include <sys/vnode_internal.h>
46#include <sys/dirent.h>
47#include <sys/fcntl.h>
48#include <sys/lockf.h>
49#include <sys/ubc_internal.h>
50#include <sys/attr.h>
51#include <sys/signalvar.h>
6d2010ae
A
52#include <sys/uio_internal.h>
53#include <sys/xattr.h>
54#include <sys/paths.h>
2d21ac55
A
55
56#include <vfs/vfs_support.h>
57
58#include <sys/vm.h>
59
60#include <sys/time.h>
61#include <kern/clock.h>
62#include <libkern/OSAtomic.h>
63
64#include <miscfs/fifofs/fifo.h>
65#include <miscfs/specfs/specdev.h>
66
67#include <nfs/rpcv2.h>
68#include <nfs/nfsproto.h>
69#include <nfs/nfs.h>
70#include <nfs/nfsnode.h>
71#include <nfs/nfs_gss.h>
72#include <nfs/nfsmount.h>
73#include <nfs/nfs_lock.h>
74#include <nfs/xdr_subs.h>
75#include <nfs/nfsm_subs.h>
76
77#include <net/if.h>
78#include <netinet/in.h>
79#include <netinet/in_var.h>
80#include <vm/vm_kern.h>
81
82#include <kern/task.h>
83#include <kern/sched_prim.h>
84
cb323159 85#if CONFIG_NFS4
2d21ac55 86int
fe8ab488 87nfs4_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx)
2d21ac55 88{
b0d623f7 89 int error = 0, lockerror = ENOENT, status, numops, slot;
2d21ac55
A
90 u_int64_t xid;
91 struct nfsm_chain nmreq, nmrep;
92 struct timeval now;
6d2010ae 93 uint32_t access_result = 0, supported = 0, missing;
2d21ac55
A
94 struct nfsmount *nmp = NFSTONMP(np);
95 int nfsvers = nmp->nm_vers;
96 uid_t uid;
6d2010ae 97 struct nfsreq_secinfo_args si;
2d21ac55 98
0a7de745
A
99 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
100 return 0;
101 }
6d2010ae
A
102
103 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
104 nfsm_chain_null(&nmreq);
105 nfsm_chain_null(&nmrep);
106
b0d623f7
A
107 // PUTFH, ACCESS, GETATTR
108 numops = 3;
2d21ac55 109 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED);
3e170ce0 110 nfsm_chain_add_compound_header(error, &nmreq, "access", nmp->nm_minor_vers, numops);
2d21ac55
A
111 numops--;
112 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
113 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
114 numops--;
115 nfsm_chain_add_32(error, &nmreq, NFS_OP_ACCESS);
6d2010ae 116 nfsm_chain_add_32(error, &nmreq, *access);
2d21ac55
A
117 numops--;
118 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 119 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
120 nfsm_chain_build_done(error, &nmreq);
121 nfsm_assert(error, (numops == 0), EPROTO);
122 nfsmout_if(error);
fe8ab488 123 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745
A
124 vfs_context_thread(ctx), vfs_context_ucred(ctx),
125 &si, rpcflags, &nmrep, &xid, &status);
2d21ac55 126
0a7de745 127 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 128 error = lockerror;
0a7de745 129 }
2d21ac55
A
130 nfsm_chain_skip_tag(error, &nmrep);
131 nfsm_chain_get_32(error, &nmrep, numops);
132 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
133 nfsm_chain_op_check(error, &nmrep, NFS_OP_ACCESS);
134 nfsm_chain_get_32(error, &nmrep, supported);
6d2010ae 135 nfsm_chain_get_32(error, &nmrep, access_result);
2d21ac55 136 nfsmout_if(error);
6d2010ae 137 if ((missing = (*access & ~supported))) {
2d21ac55
A
138 /* missing support for something(s) we wanted */
139 if (missing & NFS_ACCESS_DELETE) {
140 /*
141 * If the server doesn't report DELETE (possible
142 * on UNIX systems), we'll assume that it is OK
143 * and just let any subsequent delete action fail
144 * if it really isn't deletable.
145 */
6d2010ae 146 access_result |= NFS_ACCESS_DELETE;
2d21ac55
A
147 }
148 }
6d2010ae
A
149 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
150 if (nfs_access_dotzfs) {
151 vnode_t dvp = NULLVP;
0a7de745
A
152 if (np->n_flag & NISDOTZFSCHILD) { /* may be able to create/delete snapshot dirs */
153 access_result |= (NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE);
154 } else if (((dvp = vnode_getparent(NFSTOV(np))) != NULLVP) && (VTONFS(dvp)->n_flag & NISDOTZFSCHILD)) {
6d2010ae 155 access_result |= NFS_ACCESS_DELETE; /* may be able to delete snapshot dirs */
0a7de745
A
156 }
157 if (dvp != NULLVP) {
6d2010ae 158 vnode_put(dvp);
0a7de745 159 }
6d2010ae 160 }
b0d623f7 161 /* Some servers report DELETE support but erroneously give a denied answer. */
0a7de745 162 if (nfs_access_delete && (*access & NFS_ACCESS_DELETE) && !(access_result & NFS_ACCESS_DELETE)) {
6d2010ae 163 access_result |= NFS_ACCESS_DELETE;
0a7de745 164 }
2d21ac55 165 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 166 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
2d21ac55
A
167 nfsmout_if(error);
168
813fb2f6
A
169 if (nfs_mount_gone(nmp)) {
170 error = ENXIO;
171 }
172 nfsmout_if(error);
173
174 if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) {
175 uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx));
176 } else {
177 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
178 }
6d2010ae
A
179 slot = nfs_node_access_slot(np, uid, 1);
180 np->n_accessuid[slot] = uid;
2d21ac55 181 microuptime(&now);
6d2010ae
A
182 np->n_accessstamp[slot] = now.tv_sec;
183 np->n_access[slot] = access_result;
2d21ac55 184
6d2010ae
A
185 /* pass back the access returned with this request */
186 *access = np->n_access[slot];
2d21ac55 187nfsmout:
0a7de745 188 if (!lockerror) {
b0d623f7 189 nfs_node_unlock(np);
0a7de745 190 }
2d21ac55
A
191 nfsm_chain_cleanup(&nmreq);
192 nfsm_chain_cleanup(&nmrep);
0a7de745 193 return error;
2d21ac55
A
194}
195
196int
197nfs4_getattr_rpc(
198 nfsnode_t np,
199 mount_t mp,
200 u_char *fhp,
201 size_t fhsize,
6d2010ae 202 int flags,
2d21ac55
A
203 vfs_context_t ctx,
204 struct nfs_vattr *nvap,
205 u_int64_t *xidp)
206{
207 struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
6d2010ae
A
208 int error = 0, status, nfsvers, numops, rpcflags = 0, acls;
209 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
2d21ac55 210 struct nfsm_chain nmreq, nmrep;
6d2010ae 211 struct nfsreq_secinfo_args si;
2d21ac55 212
0a7de745
A
213 if (nfs_mount_gone(nmp)) {
214 return ENXIO;
215 }
2d21ac55 216 nfsvers = nmp->nm_vers;
6d2010ae
A
217 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
218
219 if (np && (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)) {
220 nfs4_default_attrs_for_referral_trigger(VTONFS(np->n_parent), NULL, 0, nvap, NULL);
0a7de745 221 return 0;
6d2010ae
A
222 }
223
0a7de745 224 if (flags & NGA_MONITOR) { /* vnode monitor requests should be soft */
6d2010ae 225 rpcflags = R_RECOVER;
0a7de745 226 }
2d21ac55 227
0a7de745 228 if (flags & NGA_SOFT) { /* Return ETIMEDOUT if server not responding */
fe8ab488 229 rpcflags |= R_SOFT;
0a7de745 230 }
fe8ab488 231
6d2010ae 232 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
233 nfsm_chain_null(&nmreq);
234 nfsm_chain_null(&nmrep);
235
b0d623f7
A
236 // PUTFH, GETATTR
237 numops = 2;
2d21ac55 238 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
3e170ce0 239 nfsm_chain_add_compound_header(error, &nmreq, "getattr", nmp->nm_minor_vers, numops);
2d21ac55
A
240 numops--;
241 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
242 nfsm_chain_add_fh(error, &nmreq, nfsvers, fhp, fhsize);
243 numops--;
244 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 245 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
0a7de745 246 if ((flags & NGA_ACL) && acls) {
6d2010ae 247 NFS_BITMAP_SET(bitmap, NFS_FATTR_ACL);
0a7de745 248 }
6d2010ae 249 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
2d21ac55
A
250 nfsm_chain_build_done(error, &nmreq);
251 nfsm_assert(error, (numops == 0), EPROTO);
252 nfsmout_if(error);
0a7de745
A
253 error = nfs_request2(np, mp, &nmreq, NFSPROC4_COMPOUND,
254 vfs_context_thread(ctx), vfs_context_ucred(ctx),
255 NULL, rpcflags, &nmrep, xidp, &status);
2d21ac55
A
256
257 nfsm_chain_skip_tag(error, &nmrep);
258 nfsm_chain_get_32(error, &nmrep, numops);
259 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
260 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
261 nfsmout_if(error);
6d2010ae
A
262 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
263 nfsmout_if(error);
264 if ((flags & NGA_ACL) && acls && !NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) {
265 /* we asked for the ACL but didn't get one... assume there isn't one */
266 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_ACL);
267 nvap->nva_acl = NULL;
268 }
2d21ac55
A
269nfsmout:
270 nfsm_chain_cleanup(&nmreq);
271 nfsm_chain_cleanup(&nmrep);
0a7de745 272 return error;
2d21ac55
A
273}
274
275int
f427ee49 276nfs4_readlink_rpc(nfsnode_t np, char *buf, size_t *buflenp, vfs_context_t ctx)
2d21ac55
A
277{
278 struct nfsmount *nmp;
279 int error = 0, lockerror = ENOENT, status, numops;
f427ee49 280 size_t len = 0;
2d21ac55
A
281 u_int64_t xid;
282 struct nfsm_chain nmreq, nmrep;
6d2010ae 283 struct nfsreq_secinfo_args si;
2d21ac55
A
284
285 nmp = NFSTONMP(np);
0a7de745
A
286 if (nfs_mount_gone(nmp)) {
287 return ENXIO;
288 }
289 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
290 return EINVAL;
291 }
6d2010ae 292 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
293 nfsm_chain_null(&nmreq);
294 nfsm_chain_null(&nmrep);
295
b0d623f7
A
296 // PUTFH, GETATTR, READLINK
297 numops = 3;
2d21ac55 298 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
3e170ce0 299 nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
2d21ac55
A
300 numops--;
301 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
302 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
303 numops--;
304 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 305 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
306 numops--;
307 nfsm_chain_add_32(error, &nmreq, NFS_OP_READLINK);
308 nfsm_chain_build_done(error, &nmreq);
309 nfsm_assert(error, (numops == 0), EPROTO);
310 nfsmout_if(error);
6d2010ae 311 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 312
0a7de745 313 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 314 error = lockerror;
0a7de745 315 }
2d21ac55
A
316 nfsm_chain_skip_tag(error, &nmrep);
317 nfsm_chain_get_32(error, &nmrep, numops);
318 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
319 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 320 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
2d21ac55
A
321 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
322 nfsm_chain_get_32(error, &nmrep, len);
323 nfsmout_if(error);
324 if (len >= *buflenp) {
0a7de745 325 if (np->n_size && (np->n_size < *buflenp)) {
2d21ac55 326 len = np->n_size;
0a7de745 327 } else {
2d21ac55 328 len = *buflenp - 1;
0a7de745 329 }
2d21ac55
A
330 }
331 nfsm_chain_get_opaque(error, &nmrep, len, buf);
0a7de745 332 if (!error) {
2d21ac55 333 *buflenp = len;
0a7de745 334 }
2d21ac55 335nfsmout:
0a7de745 336 if (!lockerror) {
b0d623f7 337 nfs_node_unlock(np);
0a7de745 338 }
2d21ac55
A
339 nfsm_chain_cleanup(&nmreq);
340 nfsm_chain_cleanup(&nmrep);
0a7de745 341 return error;
2d21ac55
A
342}
343
344int
345nfs4_read_rpc_async(
346 nfsnode_t np,
347 off_t offset,
348 size_t len,
349 thread_t thd,
350 kauth_cred_t cred,
351 struct nfsreq_cbinfo *cb,
352 struct nfsreq **reqp)
353{
354 struct nfsmount *nmp;
355 int error = 0, nfsvers, numops;
b0d623f7 356 nfs_stateid stateid;
2d21ac55 357 struct nfsm_chain nmreq;
6d2010ae 358 struct nfsreq_secinfo_args si;
2d21ac55
A
359
360 nmp = NFSTONMP(np);
0a7de745
A
361 if (nfs_mount_gone(nmp)) {
362 return ENXIO;
363 }
2d21ac55 364 nfsvers = nmp->nm_vers;
0a7de745
A
365 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
366 return EINVAL;
367 }
2d21ac55 368
6d2010ae 369 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
370 nfsm_chain_null(&nmreq);
371
f427ee49
A
372 // PUTFH, READ
373 numops = 2;
2d21ac55 374 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
3e170ce0 375 nfsm_chain_add_compound_header(error, &nmreq, "read", nmp->nm_minor_vers, numops);
2d21ac55
A
376 numops--;
377 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
378 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
379 numops--;
380 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
b0d623f7
A
381 nfs_get_stateid(np, thd, cred, &stateid);
382 nfsm_chain_add_stateid(error, &nmreq, &stateid);
2d21ac55
A
383 nfsm_chain_add_64(error, &nmreq, offset);
384 nfsm_chain_add_32(error, &nmreq, len);
2d21ac55
A
385 nfsm_chain_build_done(error, &nmreq);
386 nfsm_assert(error, (numops == 0), EPROTO);
387 nfsmout_if(error);
6d2010ae 388 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
2d21ac55
A
389nfsmout:
390 nfsm_chain_cleanup(&nmreq);
0a7de745 391 return error;
2d21ac55
A
392}
393
394int
395nfs4_read_rpc_async_finish(
396 nfsnode_t np,
397 struct nfsreq *req,
b0d623f7 398 uio_t uio,
2d21ac55
A
399 size_t *lenp,
400 int *eofp)
401{
402 struct nfsmount *nmp;
403 int error = 0, lockerror, nfsvers, numops, status, eof = 0;
404 size_t retlen = 0;
405 u_int64_t xid;
406 struct nfsm_chain nmrep;
407
408 nmp = NFSTONMP(np);
fe8ab488 409 if (nfs_mount_gone(nmp)) {
2d21ac55 410 nfs_request_async_cancel(req);
0a7de745 411 return ENXIO;
2d21ac55
A
412 }
413 nfsvers = nmp->nm_vers;
414
415 nfsm_chain_null(&nmrep);
416
417 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
0a7de745
A
418 if (error == EINPROGRESS) { /* async request restarted */
419 return error;
420 }
2d21ac55 421
0a7de745 422 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 423 error = lockerror;
0a7de745 424 }
2d21ac55
A
425 nfsm_chain_skip_tag(error, &nmrep);
426 nfsm_chain_get_32(error, &nmrep, numops);
427 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
428 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
429 nfsm_chain_get_32(error, &nmrep, eof);
430 nfsm_chain_get_32(error, &nmrep, retlen);
431 if (!error) {
432 *lenp = MIN(retlen, *lenp);
b0d623f7 433 error = nfsm_chain_get_uio(&nmrep, *lenp, uio);
2d21ac55 434 }
0a7de745 435 if (!lockerror) {
b0d623f7 436 nfs_node_unlock(np);
0a7de745 437 }
2d21ac55 438 if (eofp) {
0a7de745 439 if (!eof && !retlen) {
2d21ac55 440 eof = 1;
0a7de745 441 }
2d21ac55
A
442 *eofp = eof;
443 }
444 nfsm_chain_cleanup(&nmrep);
0a7de745 445 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
6d2010ae 446 microuptime(&np->n_lastio);
0a7de745
A
447 }
448 return error;
2d21ac55
A
449}
450
451int
452nfs4_write_rpc_async(
453 nfsnode_t np,
b0d623f7 454 uio_t uio,
2d21ac55
A
455 size_t len,
456 thread_t thd,
457 kauth_cred_t cred,
458 int iomode,
459 struct nfsreq_cbinfo *cb,
460 struct nfsreq **reqp)
461{
462 struct nfsmount *nmp;
6d2010ae 463 mount_t mp;
2d21ac55 464 int error = 0, nfsvers, numops;
b0d623f7 465 nfs_stateid stateid;
2d21ac55 466 struct nfsm_chain nmreq;
6d2010ae 467 struct nfsreq_secinfo_args si;
2d21ac55
A
468
469 nmp = NFSTONMP(np);
0a7de745
A
470 if (nfs_mount_gone(nmp)) {
471 return ENXIO;
472 }
2d21ac55 473 nfsvers = nmp->nm_vers;
0a7de745
A
474 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
475 return EINVAL;
476 }
6d2010ae
A
477
478 /* for async mounts, don't bother sending sync write requests */
479 if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async &&
0a7de745 480 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
6d2010ae 481 iomode = NFS_WRITE_UNSTABLE;
0a7de745 482 }
2d21ac55 483
6d2010ae 484 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
485 nfsm_chain_null(&nmreq);
486
b0d623f7 487 // PUTFH, WRITE, GETATTR
2d21ac55
A
488 numops = 3;
489 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED + len);
3e170ce0 490 nfsm_chain_add_compound_header(error, &nmreq, "write", nmp->nm_minor_vers, numops);
2d21ac55
A
491 numops--;
492 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
493 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
494 numops--;
495 nfsm_chain_add_32(error, &nmreq, NFS_OP_WRITE);
b0d623f7
A
496 nfs_get_stateid(np, thd, cred, &stateid);
497 nfsm_chain_add_stateid(error, &nmreq, &stateid);
498 nfsm_chain_add_64(error, &nmreq, uio_offset(uio));
2d21ac55
A
499 nfsm_chain_add_32(error, &nmreq, iomode);
500 nfsm_chain_add_32(error, &nmreq, len);
0a7de745 501 if (!error) {
b0d623f7 502 error = nfsm_chain_add_uio(&nmreq, uio, len);
0a7de745 503 }
2d21ac55
A
504 numops--;
505 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
f427ee49 506 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs4_getattr_write_bitmap, nmp, np);
2d21ac55
A
507 nfsm_chain_build_done(error, &nmreq);
508 nfsm_assert(error, (numops == 0), EPROTO);
509 nfsmout_if(error);
510
6d2010ae 511 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
2d21ac55
A
512nfsmout:
513 nfsm_chain_cleanup(&nmreq);
0a7de745 514 return error;
2d21ac55
A
515}
516
517int
518nfs4_write_rpc_async_finish(
519 nfsnode_t np,
520 struct nfsreq *req,
521 int *iomodep,
522 size_t *rlenp,
523 uint64_t *wverfp)
524{
525 struct nfsmount *nmp;
526 int error = 0, lockerror = ENOENT, nfsvers, numops, status;
527 int committed = NFS_WRITE_FILESYNC;
528 size_t rlen = 0;
529 u_int64_t xid, wverf;
530 mount_t mp;
531 struct nfsm_chain nmrep;
532
533 nmp = NFSTONMP(np);
fe8ab488 534 if (nfs_mount_gone(nmp)) {
2d21ac55 535 nfs_request_async_cancel(req);
0a7de745 536 return ENXIO;
2d21ac55
A
537 }
538 nfsvers = nmp->nm_vers;
539
540 nfsm_chain_null(&nmrep);
541
542 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
0a7de745
A
543 if (error == EINPROGRESS) { /* async request restarted */
544 return error;
545 }
2d21ac55 546 nmp = NFSTONMP(np);
0a7de745 547 if (nfs_mount_gone(nmp)) {
2d21ac55 548 error = ENXIO;
0a7de745
A
549 }
550 if (!error && (lockerror = nfs_node_lock(np))) {
2d21ac55 551 error = lockerror;
0a7de745 552 }
2d21ac55
A
553 nfsm_chain_skip_tag(error, &nmrep);
554 nfsm_chain_get_32(error, &nmrep, numops);
555 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
556 nfsm_chain_op_check(error, &nmrep, NFS_OP_WRITE);
557 nfsm_chain_get_32(error, &nmrep, rlen);
558 nfsmout_if(error);
559 *rlenp = rlen;
0a7de745 560 if (rlen <= 0) {
2d21ac55 561 error = NFSERR_IO;
0a7de745 562 }
2d21ac55
A
563 nfsm_chain_get_32(error, &nmrep, committed);
564 nfsm_chain_get_64(error, &nmrep, wverf);
565 nfsmout_if(error);
0a7de745 566 if (wverfp) {
2d21ac55 567 *wverfp = wverf;
0a7de745 568 }
2d21ac55
A
569 lck_mtx_lock(&nmp->nm_lock);
570 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
571 nmp->nm_verf = wverf;
572 nmp->nm_state |= NFSSTA_HASWRITEVERF;
573 } else if (nmp->nm_verf != wverf) {
574 nmp->nm_verf = wverf;
575 }
576 lck_mtx_unlock(&nmp->nm_lock);
577 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
f427ee49
A
578
579 /*
580 * NFSv4 WRITE RPCs contain partial GETATTR requests - only type, change, size, metadatatime and modifytime are requested.
581 * In such cases, we do not update the time stamp - but the requested attributes.
582 */
583 np->n_vattr.nva_flags |= NFS_FFLAG_PARTIAL_WRITE;
6d2010ae 584 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
f427ee49
A
585 np->n_vattr.nva_flags &= ~NFS_FFLAG_PARTIAL_WRITE;
586
2d21ac55 587nfsmout:
0a7de745 588 if (!lockerror) {
b0d623f7 589 nfs_node_unlock(np);
0a7de745 590 }
2d21ac55
A
591 nfsm_chain_cleanup(&nmrep);
592 if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async &&
0a7de745 593 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
2d21ac55 594 committed = NFS_WRITE_FILESYNC;
0a7de745 595 }
2d21ac55 596 *iomodep = committed;
0a7de745 597 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
6d2010ae 598 microuptime(&np->n_lastio);
0a7de745
A
599 }
600 return error;
2d21ac55
A
601}
602
603int
604nfs4_remove_rpc(
605 nfsnode_t dnp,
606 char *name,
607 int namelen,
608 thread_t thd,
609 kauth_cred_t cred)
610{
b0d623f7 611 int error = 0, lockerror = ENOENT, remove_error = 0, status;
2d21ac55
A
612 struct nfsmount *nmp;
613 int nfsvers, numops;
614 u_int64_t xid;
615 struct nfsm_chain nmreq, nmrep;
6d2010ae 616 struct nfsreq_secinfo_args si;
2d21ac55
A
617
618 nmp = NFSTONMP(dnp);
0a7de745
A
619 if (nfs_mount_gone(nmp)) {
620 return ENXIO;
621 }
2d21ac55 622 nfsvers = nmp->nm_vers;
0a7de745
A
623 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
624 return EINVAL;
625 }
6d2010ae 626 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
b0d623f7 627restart:
2d21ac55
A
628 nfsm_chain_null(&nmreq);
629 nfsm_chain_null(&nmrep);
630
631 // PUTFH, REMOVE, GETATTR
632 numops = 3;
633 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED + namelen);
3e170ce0 634 nfsm_chain_add_compound_header(error, &nmreq, "remove", nmp->nm_minor_vers, numops);
2d21ac55
A
635 numops--;
636 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
637 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
638 numops--;
639 nfsm_chain_add_32(error, &nmreq, NFS_OP_REMOVE);
6d2010ae 640 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
2d21ac55
A
641 numops--;
642 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 643 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
2d21ac55
A
644 nfsm_chain_build_done(error, &nmreq);
645 nfsm_assert(error, (numops == 0), EPROTO);
646 nfsmout_if(error);
647
6d2010ae 648 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, &nmrep, &xid, &status);
2d21ac55 649
0a7de745 650 if ((lockerror = nfs_node_lock(dnp))) {
b0d623f7 651 error = lockerror;
0a7de745 652 }
2d21ac55
A
653 nfsm_chain_skip_tag(error, &nmrep);
654 nfsm_chain_get_32(error, &nmrep, numops);
655 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
656 nfsm_chain_op_check(error, &nmrep, NFS_OP_REMOVE);
657 remove_error = error;
658 nfsm_chain_check_change_info(error, &nmrep, dnp);
659 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 660 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
0a7de745 661 if (error && !lockerror) {
2d21ac55 662 NATTRINVALIDATE(dnp);
0a7de745 663 }
2d21ac55
A
664nfsmout:
665 nfsm_chain_cleanup(&nmreq);
666 nfsm_chain_cleanup(&nmrep);
667
b0d623f7
A
668 if (!lockerror) {
669 dnp->n_flag |= NMODIFIED;
670 nfs_node_unlock(dnp);
671 }
672 if (error == NFSERR_GRACE) {
0a7de745 673 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
b0d623f7
A
674 goto restart;
675 }
2d21ac55 676
0a7de745 677 return remove_error;
2d21ac55
A
678}
679
680int
681nfs4_rename_rpc(
682 nfsnode_t fdnp,
683 char *fnameptr,
684 int fnamelen,
685 nfsnode_t tdnp,
686 char *tnameptr,
687 int tnamelen,
688 vfs_context_t ctx)
689{
b0d623f7 690 int error = 0, lockerror = ENOENT, status, nfsvers, numops;
2d21ac55
A
691 struct nfsmount *nmp;
692 u_int64_t xid, savedxid;
693 struct nfsm_chain nmreq, nmrep;
6d2010ae 694 struct nfsreq_secinfo_args si;
2d21ac55
A
695
696 nmp = NFSTONMP(fdnp);
0a7de745
A
697 if (nfs_mount_gone(nmp)) {
698 return ENXIO;
699 }
2d21ac55 700 nfsvers = nmp->nm_vers;
0a7de745
A
701 if (fdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
702 return EINVAL;
703 }
704 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
705 return EINVAL;
706 }
2d21ac55 707
6d2010ae 708 NFSREQ_SECINFO_SET(&si, fdnp, NULL, 0, NULL, 0);
2d21ac55
A
709 nfsm_chain_null(&nmreq);
710 nfsm_chain_null(&nmrep);
711
712 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
713 numops = 7;
714 nfsm_chain_build_alloc_init(error, &nmreq, 30 * NFSX_UNSIGNED + fnamelen + tnamelen);
3e170ce0 715 nfsm_chain_add_compound_header(error, &nmreq, "rename", nmp->nm_minor_vers, numops);
2d21ac55
A
716 numops--;
717 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
718 nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize);
719 numops--;
720 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
721 numops--;
722 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
723 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
724 numops--;
725 nfsm_chain_add_32(error, &nmreq, NFS_OP_RENAME);
6d2010ae
A
726 nfsm_chain_add_name(error, &nmreq, fnameptr, fnamelen, nmp);
727 nfsm_chain_add_name(error, &nmreq, tnameptr, tnamelen, nmp);
2d21ac55
A
728 numops--;
729 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 730 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
2d21ac55
A
731 numops--;
732 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
733 numops--;
734 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 735 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, fdnp);
2d21ac55
A
736 nfsm_chain_build_done(error, &nmreq);
737 nfsm_assert(error, (numops == 0), EPROTO);
738 nfsmout_if(error);
739
6d2010ae 740 error = nfs_request(fdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 741
0a7de745 742 if ((lockerror = nfs_node_lock2(fdnp, tdnp))) {
b0d623f7 743 error = lockerror;
0a7de745 744 }
2d21ac55
A
745 nfsm_chain_skip_tag(error, &nmrep);
746 nfsm_chain_get_32(error, &nmrep, numops);
747 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
748 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
749 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
750 nfsm_chain_op_check(error, &nmrep, NFS_OP_RENAME);
751 nfsm_chain_check_change_info(error, &nmrep, fdnp);
752 nfsm_chain_check_change_info(error, &nmrep, tdnp);
753 /* directory attributes: if we don't get them, make sure to invalidate */
754 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
755 savedxid = xid;
6d2010ae 756 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
0a7de745 757 if (error && !lockerror) {
2d21ac55 758 NATTRINVALIDATE(tdnp);
0a7de745 759 }
2d21ac55
A
760 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
761 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
762 xid = savedxid;
6d2010ae 763 nfsm_chain_loadattr(error, &nmrep, fdnp, nfsvers, &xid);
0a7de745 764 if (error && !lockerror) {
2d21ac55 765 NATTRINVALIDATE(fdnp);
0a7de745 766 }
2d21ac55
A
767nfsmout:
768 nfsm_chain_cleanup(&nmreq);
769 nfsm_chain_cleanup(&nmrep);
b0d623f7
A
770 if (!lockerror) {
771 fdnp->n_flag |= NMODIFIED;
772 tdnp->n_flag |= NMODIFIED;
773 nfs_node_unlock2(fdnp, tdnp);
774 }
0a7de745 775 return error;
2d21ac55
A
776}
777
778/*
779 * NFS V4 readdir RPC.
780 */
2d21ac55 781int
b0d623f7
A
782nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx)
783{
2d21ac55 784 struct nfsmount *nmp;
6d2010ae 785 int error = 0, lockerror, nfsvers, namedattr, rdirplus, bigcookies, numops;
b0d623f7 786 int i, status, more_entries = 1, eof, bp_dropped = 0;
f427ee49 787 uint16_t namlen, reclen;
b0d623f7 788 uint32_t nmreaddirsize, nmrsize;
f427ee49
A
789 uint32_t namlen32, skiplen, fhlen, xlen, attrlen;
790 uint64_t padlen, cookie, lastcookie, xid, savedxid, space_free, space_needed;
b0d623f7 791 struct nfsm_chain nmreq, nmrep, nmrepsave;
f427ee49
A
792 fhandle_t *fh;
793 struct nfs_vattr *nvattr, *nvattrp;
b0d623f7
A
794 struct nfs_dir_buf_header *ndbhp;
795 struct direntry *dp;
f427ee49 796 char *padstart;
2d21ac55
A
797 const char *tag;
798 uint32_t entry_attrs[NFS_ATTR_BITMAP_LEN];
b0d623f7 799 struct timeval now;
6d2010ae 800 struct nfsreq_secinfo_args si;
2d21ac55 801
2d21ac55 802 nmp = NFSTONMP(dnp);
0a7de745
A
803 if (nfs_mount_gone(nmp)) {
804 return ENXIO;
805 }
2d21ac55
A
806 nfsvers = nmp->nm_vers;
807 nmreaddirsize = nmp->nm_readdirsize;
808 nmrsize = nmp->nm_rsize;
b0d623f7 809 bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES;
6d2010ae
A
810 namedattr = (dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) ? 1 : 0;
811 rdirplus = (NMFLAG(nmp, RDIRPLUS) || namedattr) ? 1 : 0;
0a7de745
A
812 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
813 return EINVAL;
814 }
6d2010ae 815 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
2d21ac55
A
816
817 /*
818 * Set up attribute request for entries.
819 * For READDIRPLUS functionality, get everything.
b0d623f7 820 * Otherwise, just get what we need for struct direntry.
2d21ac55
A
821 */
822 if (rdirplus) {
b0d623f7 823 tag = "readdirplus";
6d2010ae 824 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, entry_attrs);
2d21ac55
A
825 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEHANDLE);
826 } else {
b0d623f7 827 tag = "readdir";
2d21ac55
A
828 NFS_CLEAR_ATTRIBUTES(entry_attrs);
829 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_TYPE);
830 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEID);
6d2010ae 831 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_MOUNTED_ON_FILEID);
2d21ac55 832 }
2d21ac55
A
833 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_RDATTR_ERROR);
834
b0d623f7 835 /* lock to protect access to cookie verifier */
0a7de745
A
836 if ((lockerror = nfs_node_lock(dnp))) {
837 return lockerror;
838 }
2d21ac55 839
f427ee49
A
840 fh = zalloc(nfs_fhandle_zone);
841 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
842
b0d623f7
A
843 /* determine cookie to use, and move dp to the right offset */
844 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
845 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
846 if (ndbhp->ndbh_count) {
0a7de745 847 for (i = 0; i < ndbhp->ndbh_count - 1; i++) {
b0d623f7 848 dp = NFS_DIRENTRY_NEXT(dp);
0a7de745 849 }
b0d623f7
A
850 cookie = dp->d_seekoff;
851 dp = NFS_DIRENTRY_NEXT(dp);
852 } else {
853 cookie = bp->nb_lblkno;
854 /* increment with every buffer read */
316670eb 855 OSAddAtomic64(1, &nfsstats.readdir_bios);
2d21ac55 856 }
b0d623f7 857 lastcookie = cookie;
2d21ac55
A
858
859 /*
b0d623f7
A
860 * The NFS client is responsible for the "." and ".." entries in the
861 * directory. So, we put them at the start of the first buffer.
6d2010ae 862 * Don't bother for attribute directories.
2d21ac55 863 */
6d2010ae
A
864 if (((bp->nb_lblkno == 0) && (ndbhp->ndbh_count == 0)) &&
865 !(dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
f427ee49
A
866 fh->fh_len = 0;
867 fhlen = rdirplus ? fh->fh_len + 1 : 0;
b0d623f7
A
868 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
869 /* "." */
870 namlen = 1;
f427ee49 871 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
0a7de745
A
872 if (xlen) {
873 bzero(&dp->d_name[namlen + 1], xlen);
874 }
b0d623f7 875 dp->d_namlen = namlen;
0a7de745 876 strlcpy(dp->d_name, ".", namlen + 1);
2d21ac55 877 dp->d_fileno = dnp->n_vattr.nva_fileid;
2d21ac55 878 dp->d_type = DT_DIR;
b0d623f7
A
879 dp->d_reclen = reclen;
880 dp->d_seekoff = 1;
881 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
882 dp = NFS_DIRENTRY_NEXT(dp);
883 padlen = (char*)dp - padstart;
0a7de745 884 if (padlen > 0) {
b0d623f7 885 bzero(padstart, padlen);
0a7de745
A
886 }
887 if (rdirplus) { /* zero out attributes */
b0d623f7 888 bzero(NFS_DIR_BUF_NVATTR(bp, 0), sizeof(struct nfs_vattr));
0a7de745 889 }
b0d623f7
A
890
891 /* ".." */
892 namlen = 2;
f427ee49 893 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
0a7de745
A
894 if (xlen) {
895 bzero(&dp->d_name[namlen + 1], xlen);
896 }
b0d623f7 897 dp->d_namlen = namlen;
0a7de745
A
898 strlcpy(dp->d_name, "..", namlen + 1);
899 if (dnp->n_parent) {
2d21ac55 900 dp->d_fileno = VTONFS(dnp->n_parent)->n_vattr.nva_fileid;
0a7de745 901 } else {
2d21ac55 902 dp->d_fileno = dnp->n_vattr.nva_fileid;
0a7de745 903 }
2d21ac55 904 dp->d_type = DT_DIR;
b0d623f7
A
905 dp->d_reclen = reclen;
906 dp->d_seekoff = 2;
907 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
908 dp = NFS_DIRENTRY_NEXT(dp);
909 padlen = (char*)dp - padstart;
0a7de745 910 if (padlen > 0) {
b0d623f7 911 bzero(padstart, padlen);
0a7de745
A
912 }
913 if (rdirplus) { /* zero out attributes */
b0d623f7 914 bzero(NFS_DIR_BUF_NVATTR(bp, 1), sizeof(struct nfs_vattr));
0a7de745 915 }
b0d623f7
A
916
917 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
918 ndbhp->ndbh_count = 2;
2d21ac55
A
919 }
920
921 /*
b0d623f7
A
922 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
923 * the buffer is full (or we hit EOF). Then put the remainder of the
924 * results in the next buffer(s).
2d21ac55 925 */
b0d623f7
A
926 nfsm_chain_null(&nmreq);
927 nfsm_chain_null(&nmrep);
928 while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) {
b0d623f7
A
929 // PUTFH, GETATTR, READDIR
930 numops = 3;
2d21ac55 931 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3e170ce0 932 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
2d21ac55
A
933 numops--;
934 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
935 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
936 numops--;
937 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 938 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
2d21ac55
A
939 numops--;
940 nfsm_chain_add_32(error, &nmreq, NFS_OP_READDIR);
b0d623f7
A
941 nfsm_chain_add_64(error, &nmreq, (cookie <= 2) ? 0 : cookie);
942 nfsm_chain_add_64(error, &nmreq, dnp->n_cookieverf);
2d21ac55
A
943 nfsm_chain_add_32(error, &nmreq, nmreaddirsize);
944 nfsm_chain_add_32(error, &nmreq, nmrsize);
6d2010ae 945 nfsm_chain_add_bitmap_supported(error, &nmreq, entry_attrs, nmp, dnp);
2d21ac55
A
946 nfsm_chain_build_done(error, &nmreq);
947 nfsm_assert(error, (numops == 0), EPROTO);
b0d623f7 948 nfs_node_unlock(dnp);
2d21ac55 949 nfsmout_if(error);
6d2010ae 950 error = nfs_request(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 951
0a7de745 952 if ((lockerror = nfs_node_lock(dnp))) {
2d21ac55 953 error = lockerror;
0a7de745 954 }
b0d623f7
A
955
956 savedxid = xid;
2d21ac55
A
957 nfsm_chain_skip_tag(error, &nmrep);
958 nfsm_chain_get_32(error, &nmrep, numops);
959 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
960 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 961 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
2d21ac55 962 nfsm_chain_op_check(error, &nmrep, NFS_OP_READDIR);
b0d623f7 963 nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf);
2d21ac55 964 nfsm_chain_get_32(error, &nmrep, more_entries);
b0d623f7
A
965
966 if (!lockerror) {
967 nfs_node_unlock(dnp);
968 lockerror = ENOENT;
969 }
2d21ac55
A
970 nfsmout_if(error);
971
0a7de745 972 if (rdirplus) {
b0d623f7 973 microuptime(&now);
ea3f0419
A
974 if (lastcookie == 0) {
975 dnp->n_rdirplusstamp_sof = now.tv_sec;
976 dnp->n_rdirplusstamp_eof = 0;
977 }
0a7de745 978 }
b0d623f7
A
979
980 /* loop through the entries packing them into the buffer */
981 while (more_entries) {
2d21ac55 982 /* Entry: COOKIE, NAME, FATTR */
b0d623f7 983 nfsm_chain_get_64(error, &nmrep, cookie);
f427ee49
A
984 nfsm_chain_get_32(error, &nmrep, namlen32);
985 if (namlen32 > UINT16_MAX) {
986 error = EBADRPC;
987 goto nfsmout;
988 }
989 namlen = (uint16_t)namlen32;
2d21ac55 990 nfsmout_if(error);
b0d623f7
A
991 if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) {
992 /* we've got a big cookie, make sure flag is set */
993 lck_mtx_lock(&nmp->nm_lock);
994 nmp->nm_state |= NFSSTA_BIGCOOKIES;
995 lck_mtx_unlock(&nmp->nm_lock);
996 bigcookies = 1;
997 }
998 /* just truncate names that don't fit in direntry.d_name */
999 if (namlen <= 0) {
2d21ac55
A
1000 error = EBADRPC;
1001 goto nfsmout;
1002 }
0a7de745 1003 if (namlen > (sizeof(dp->d_name) - 1)) {
b0d623f7
A
1004 skiplen = namlen - sizeof(dp->d_name) + 1;
1005 namlen = sizeof(dp->d_name) - 1;
2d21ac55
A
1006 } else {
1007 skiplen = 0;
1008 }
b0d623f7
A
1009 /* guess that fh size will be same as parent */
1010 fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0;
1011 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
1012 attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0;
f427ee49 1013 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
b0d623f7
A
1014 space_needed = reclen + attrlen;
1015 space_free = nfs_dir_buf_freespace(bp, rdirplus);
1016 if (space_needed > space_free) {
1017 /*
1018 * We still have entries to pack, but we've
1019 * run out of room in the current buffer.
1020 * So we need to move to the next buffer.
1021 * The block# for the next buffer is the
1022 * last cookie in the current buffer.
1023 */
1024nextbuffer:
1025 ndbhp->ndbh_flags |= NDB_FULL;
1026 nfs_buf_release(bp, 0);
1027 bp_dropped = 1;
1028 bp = NULL;
1029 error = nfs_buf_get(dnp, lastcookie, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
1030 nfsmout_if(error);
1031 /* initialize buffer */
1032 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
1033 ndbhp->ndbh_flags = 0;
1034 ndbhp->ndbh_count = 0;
1035 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
1036 ndbhp->ndbh_ncgen = dnp->n_ncgen;
1037 space_free = nfs_dir_buf_freespace(bp, rdirplus);
1038 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
1039 /* increment with every buffer read */
316670eb 1040 OSAddAtomic64(1, &nfsstats.readdir_bios);
2d21ac55 1041 }
b0d623f7
A
1042 nmrepsave = nmrep;
1043 dp->d_fileno = cookie; /* placeholder */
1044 dp->d_seekoff = cookie;
1045 dp->d_namlen = namlen;
1046 dp->d_reclen = reclen;
2d21ac55 1047 dp->d_type = DT_UNKNOWN;
b0d623f7
A
1048 nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name);
1049 nfsmout_if(error);
1050 dp->d_name[namlen] = '\0';
0a7de745 1051 if (skiplen) {
2d21ac55 1052 nfsm_chain_adv(error, &nmrep,
0a7de745
A
1053 nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen));
1054 }
2d21ac55 1055 nfsmout_if(error);
f427ee49
A
1056 nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : nvattr;
1057 error = nfs4_parsefattr(&nmrep, NULL, nvattrp, fh, NULL, NULL);
6d2010ae
A
1058 if (!error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_ACL)) {
1059 /* we do NOT want ACLs returned to us here */
1060 NFS_BITMAP_CLR(nvattrp->nva_bitmap, NFS_FATTR_ACL);
1061 if (nvattrp->nva_acl) {
1062 kauth_acl_free(nvattrp->nva_acl);
1063 nvattrp->nva_acl = NULL;
1064 }
1065 }
b0d623f7 1066 if (error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_RDATTR_ERROR)) {
6d2010ae
A
1067 /* OK, we may not have gotten all of the attributes but we will use what we can. */
1068 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1069 /* set this up to look like a referral trigger */
f427ee49 1070 nfs4_default_attrs_for_referral_trigger(dnp, dp->d_name, namlen, nvattrp, fh);
6d2010ae 1071 }
2d21ac55
A
1072 error = 0;
1073 }
b0d623f7 1074 /* check for more entries after this one */
2d21ac55
A
1075 nfsm_chain_get_32(error, &nmrep, more_entries);
1076 nfsmout_if(error);
1077
b0d623f7 1078 /* Skip any "." and ".." entries returned from server. */
6d2010ae
A
1079 /* Also skip any bothersome named attribute entries. */
1080 if (((dp->d_name[0] == '.') && ((namlen == 1) || ((namlen == 2) && (dp->d_name[1] == '.')))) ||
1081 (namedattr && (namlen == 11) && (!strcmp(dp->d_name, "SUNWattr_ro") || !strcmp(dp->d_name, "SUNWattr_rw")))) {
b0d623f7 1082 lastcookie = cookie;
2d21ac55
A
1083 continue;
1084 }
1085
0a7de745 1086 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_TYPE)) {
b0d623f7 1087 dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type));
0a7de745
A
1088 }
1089 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEID)) {
b0d623f7 1090 dp->d_fileno = nvattrp->nva_fileid;
0a7de745 1091 }
b0d623f7
A
1092 if (rdirplus) {
1093 /* fileid is already in d_fileno, so stash xid in attrs */
1094 nvattrp->nva_fileid = savedxid;
f427ee49 1095 nvattrp->nva_flags |= NFS_FFLAG_FILEID_CONTAINS_XID;
b0d623f7 1096 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
f427ee49 1097 fhlen = fh->fh_len + 1;
b0d623f7 1098 xlen = fhlen + sizeof(time_t);
f427ee49 1099 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
b0d623f7
A
1100 space_needed = reclen + attrlen;
1101 if (space_needed > space_free) {
1102 /* didn't actually have the room... move on to next buffer */
1103 nmrep = nmrepsave;
1104 goto nextbuffer;
1105 }
1106 /* pack the file handle into the record */
f427ee49
A
1107 dp->d_name[dp->d_namlen + 1] = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */
1108 bcopy(fh->fh_data, &dp->d_name[dp->d_namlen + 2], fh->fh_len);
b0d623f7
A
1109 } else {
1110 /* mark the file handle invalid */
f427ee49
A
1111 fh->fh_len = 0;
1112 fhlen = fh->fh_len + 1;
b0d623f7 1113 xlen = fhlen + sizeof(time_t);
f427ee49 1114 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
0a7de745 1115 bzero(&dp->d_name[dp->d_namlen + 1], fhlen);
2d21ac55 1116 }
0a7de745 1117 *(time_t*)(&dp->d_name[dp->d_namlen + 1 + fhlen]) = now.tv_sec;
b0d623f7 1118 dp->d_reclen = reclen;
f427ee49 1119 nfs_rdirplus_update_node_attrs(dnp, dp, fh, nvattrp, &savedxid);
2d21ac55 1120 }
b0d623f7
A
1121 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
1122 ndbhp->ndbh_count++;
1123 lastcookie = cookie;
1124
1125 /* advance to next direntry in buffer */
1126 dp = NFS_DIRENTRY_NEXT(dp);
1127 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
1128 /* zero out the pad bytes */
1129 padlen = (char*)dp - padstart;
0a7de745 1130 if (padlen > 0) {
b0d623f7 1131 bzero(padstart, padlen);
0a7de745 1132 }
b0d623f7
A
1133 }
1134 /* Finally, get the eof boolean */
1135 nfsm_chain_get_32(error, &nmrep, eof);
1136 nfsmout_if(error);
1137 if (eof) {
0a7de745 1138 ndbhp->ndbh_flags |= (NDB_FULL | NDB_EOF);
b0d623f7
A
1139 nfs_node_lock_force(dnp);
1140 dnp->n_eofcookie = lastcookie;
ea3f0419
A
1141 if (rdirplus) {
1142 dnp->n_rdirplusstamp_eof = now.tv_sec;
1143 }
b0d623f7
A
1144 nfs_node_unlock(dnp);
1145 } else {
1146 more_entries = 1;
2d21ac55 1147 }
b0d623f7
A
1148 if (bp_dropped) {
1149 nfs_buf_release(bp, 0);
1150 bp = NULL;
1151 break;
2d21ac55 1152 }
0a7de745 1153 if ((lockerror = nfs_node_lock(dnp))) {
2d21ac55 1154 error = lockerror;
0a7de745 1155 }
2d21ac55
A
1156 nfsmout_if(error);
1157 nfsm_chain_cleanup(&nmrep);
b0d623f7 1158 nfsm_chain_null(&nmreq);
2d21ac55 1159 }
2d21ac55 1160nfsmout:
0a7de745 1161 if (bp_dropped && bp) {
b0d623f7 1162 nfs_buf_release(bp, 0);
0a7de745
A
1163 }
1164 if (!lockerror) {
b0d623f7 1165 nfs_node_unlock(dnp);
0a7de745 1166 }
2d21ac55
A
1167 nfsm_chain_cleanup(&nmreq);
1168 nfsm_chain_cleanup(&nmrep);
f427ee49
A
1169 NFS_ZFREE(nfs_fhandle_zone, fh);
1170 FREE(nvattr, M_TEMP);
0a7de745 1171 return bp_dropped ? NFSERR_DIRBUFDROPPED : error;
2d21ac55
A
1172}
1173
1174int
1175nfs4_lookup_rpc_async(
1176 nfsnode_t dnp,
1177 char *name,
1178 int namelen,
1179 vfs_context_t ctx,
1180 struct nfsreq **reqp)
1181{
6d2010ae 1182 int error = 0, isdotdot = 0, nfsvers, numops;
2d21ac55
A
1183 struct nfsm_chain nmreq;
1184 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1185 struct nfsmount *nmp;
6d2010ae 1186 struct nfsreq_secinfo_args si;
2d21ac55
A
1187
1188 nmp = NFSTONMP(dnp);
0a7de745
A
1189 if (nfs_mount_gone(nmp)) {
1190 return ENXIO;
1191 }
2d21ac55 1192 nfsvers = nmp->nm_vers;
0a7de745
A
1193 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1194 return EINVAL;
1195 }
2d21ac55 1196
6d2010ae 1197 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
2d21ac55 1198 isdotdot = 1;
6d2010ae
A
1199 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
1200 } else {
1201 NFSREQ_SECINFO_SET(&si, dnp, dnp->n_fhp, dnp->n_fhsize, name, namelen);
1202 }
2d21ac55
A
1203
1204 nfsm_chain_null(&nmreq);
1205
6d2010ae
A
1206 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1207 numops = 5;
2d21ac55 1208 nfsm_chain_build_alloc_init(error, &nmreq, 20 * NFSX_UNSIGNED + namelen);
3e170ce0 1209 nfsm_chain_add_compound_header(error, &nmreq, "lookup", nmp->nm_minor_vers, numops);
2d21ac55
A
1210 numops--;
1211 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1212 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
1213 numops--;
1214 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 1215 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
2d21ac55
A
1216 numops--;
1217 if (isdotdot) {
1218 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUPP);
1219 } else {
1220 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
6d2010ae 1221 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
2d21ac55 1222 }
6d2010ae
A
1223 numops--;
1224 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETFH);
1225 numops--;
1226 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1227 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1228 /* some ".zfs" directories can't handle being asked for some attributes */
0a7de745 1229 if ((dnp->n_flag & NISDOTZFS) && !isdotdot) {
6d2010ae 1230 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
0a7de745
A
1231 }
1232 if ((dnp->n_flag & NISDOTZFSCHILD) && isdotdot) {
6d2010ae 1233 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
0a7de745
A
1234 }
1235 if (((namelen == 4) && (name[0] == '.') && (name[1] == 'z') && (name[2] == 'f') && (name[3] == 's'))) {
6d2010ae 1236 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
0a7de745 1237 }
6d2010ae 1238 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
2d21ac55
A
1239 nfsm_chain_build_done(error, &nmreq);
1240 nfsm_assert(error, (numops == 0), EPROTO);
1241 nfsmout_if(error);
1242 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745 1243 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, reqp);
2d21ac55
A
1244nfsmout:
1245 nfsm_chain_cleanup(&nmreq);
0a7de745 1246 return error;
2d21ac55
A
1247}
1248
6d2010ae 1249
2d21ac55
A
1250int
1251nfs4_lookup_rpc_async_finish(
1252 nfsnode_t dnp,
6d2010ae
A
1253 char *name,
1254 int namelen,
1255 vfs_context_t ctx,
2d21ac55
A
1256 struct nfsreq *req,
1257 u_int64_t *xidp,
1258 fhandle_t *fhp,
1259 struct nfs_vattr *nvap)
1260{
6d2010ae
A
1261 int error = 0, lockerror = ENOENT, status, nfsvers, numops, isdotdot = 0;
1262 uint32_t op = NFS_OP_LOOKUP;
2d21ac55
A
1263 u_int64_t xid;
1264 struct nfsmount *nmp;
1265 struct nfsm_chain nmrep;
1266
1267 nmp = NFSTONMP(dnp);
0a7de745
A
1268 if (nmp == NULL) {
1269 return ENXIO;
1270 }
2d21ac55 1271 nfsvers = nmp->nm_vers;
0a7de745 1272 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
6d2010ae 1273 isdotdot = 1;
0a7de745 1274 }
2d21ac55
A
1275
1276 nfsm_chain_null(&nmrep);
1277
1278 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1279
0a7de745 1280 if ((lockerror = nfs_node_lock(dnp))) {
b0d623f7 1281 error = lockerror;
0a7de745 1282 }
2d21ac55
A
1283 nfsm_chain_skip_tag(error, &nmrep);
1284 nfsm_chain_get_32(error, &nmrep, numops);
1285 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1286 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
0a7de745 1287 if (xidp) {
2d21ac55 1288 *xidp = xid;
0a7de745 1289 }
6d2010ae 1290 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
2d21ac55 1291
6d2010ae 1292 nfsm_chain_op_check(error, &nmrep, (isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP));
2d21ac55 1293 nfsmout_if(error || !fhp || !nvap);
6d2010ae
A
1294 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
1295 nfsm_chain_get_32(error, &nmrep, fhp->fh_len);
0a7de745 1296 if (error == 0 && fhp->fh_len > sizeof(fhp->fh_data)) {
d26ffc64 1297 error = EBADRPC;
0a7de745 1298 }
d26ffc64 1299 nfsmout_if(error);
6d2010ae 1300 nfsm_chain_get_opaque(error, &nmrep, fhp->fh_len, fhp->fh_data);
2d21ac55 1301 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae
A
1302 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1303 /* set this up to look like a referral trigger */
1304 nfs4_default_attrs_for_referral_trigger(dnp, name, namelen, nvap, fhp);
1305 error = 0;
1306 } else {
1307 nfsmout_if(error);
1308 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
2d21ac55
A
1309 }
1310nfsmout:
0a7de745 1311 if (!lockerror) {
b0d623f7 1312 nfs_node_unlock(dnp);
0a7de745 1313 }
2d21ac55 1314 nfsm_chain_cleanup(&nmrep);
6d2010ae
A
1315 if (!error && (op == NFS_OP_LOOKUP) && (nmp->nm_state & NFSSTA_NEEDSECINFO)) {
1316 /* We still need to get SECINFO to set default for mount. */
1317 /* Do so for the first LOOKUP that returns successfully. */
1318 struct nfs_sec sec;
1319
1320 sec.count = NX_MAX_SEC_FLAVORS;
1321 error = nfs4_secinfo_rpc(nmp, &req->r_secinfo, vfs_context_ucred(ctx), sec.flavors, &sec.count);
1322 /* [sigh] some implementations return "illegal" error for unsupported ops */
0a7de745 1323 if (error == NFSERR_OP_ILLEGAL) {
6d2010ae 1324 error = 0;
0a7de745 1325 }
6d2010ae
A
1326 if (!error) {
1327 /* set our default security flavor to the first in the list */
1328 lck_mtx_lock(&nmp->nm_lock);
0a7de745 1329 if (sec.count) {
6d2010ae 1330 nmp->nm_auth = sec.flavors[0];
0a7de745 1331 }
6d2010ae
A
1332 nmp->nm_state &= ~NFSSTA_NEEDSECINFO;
1333 lck_mtx_unlock(&nmp->nm_lock);
1334 }
1335 }
0a7de745 1336 return error;
2d21ac55
A
1337}
1338
1339int
1340nfs4_commit_rpc(
1341 nfsnode_t np,
6d2010ae
A
1342 uint64_t offset,
1343 uint64_t count,
1344 kauth_cred_t cred,
1345 uint64_t wverf)
2d21ac55
A
1346{
1347 struct nfsmount *nmp;
1348 int error = 0, lockerror, status, nfsvers, numops;
6d2010ae 1349 u_int64_t xid, newwverf;
2d21ac55
A
1350 uint32_t count32;
1351 struct nfsm_chain nmreq, nmrep;
6d2010ae 1352 struct nfsreq_secinfo_args si;
2d21ac55
A
1353
1354 nmp = NFSTONMP(np);
1355 FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
0a7de745
A
1356 if (nfs_mount_gone(nmp)) {
1357 return ENXIO;
1358 }
1359 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1360 return EINVAL;
1361 }
1362 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
1363 return 0;
1364 }
2d21ac55 1365 nfsvers = nmp->nm_vers;
f427ee49 1366 count32 = count > UINT32_MAX ? 0 : (uint32_t)count;
2d21ac55 1367
6d2010ae 1368 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
1369 nfsm_chain_null(&nmreq);
1370 nfsm_chain_null(&nmrep);
1371
1372 // PUTFH, COMMIT, GETATTR
1373 numops = 3;
1374 nfsm_chain_build_alloc_init(error, &nmreq, 19 * NFSX_UNSIGNED);
3e170ce0 1375 nfsm_chain_add_compound_header(error, &nmreq, "commit", nmp->nm_minor_vers, numops);
2d21ac55
A
1376 numops--;
1377 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1378 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1379 numops--;
1380 nfsm_chain_add_32(error, &nmreq, NFS_OP_COMMIT);
1381 nfsm_chain_add_64(error, &nmreq, offset);
1382 nfsm_chain_add_32(error, &nmreq, count32);
1383 numops--;
1384 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 1385 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
1386 nfsm_chain_build_done(error, &nmreq);
1387 nfsm_assert(error, (numops == 0), EPROTO);
1388 nfsmout_if(error);
1389 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745 1390 current_thread(), cred, &si, 0, &nmrep, &xid, &status);
2d21ac55 1391
0a7de745 1392 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 1393 error = lockerror;
0a7de745 1394 }
2d21ac55
A
1395 nfsm_chain_skip_tag(error, &nmrep);
1396 nfsm_chain_get_32(error, &nmrep, numops);
1397 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1398 nfsm_chain_op_check(error, &nmrep, NFS_OP_COMMIT);
6d2010ae 1399 nfsm_chain_get_64(error, &nmrep, newwverf);
2d21ac55 1400 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 1401 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
0a7de745 1402 if (!lockerror) {
b0d623f7 1403 nfs_node_unlock(np);
0a7de745 1404 }
2d21ac55
A
1405 nfsmout_if(error);
1406 lck_mtx_lock(&nmp->nm_lock);
0a7de745 1407 if (nmp->nm_verf != newwverf) {
6d2010ae 1408 nmp->nm_verf = newwverf;
0a7de745
A
1409 }
1410 if (wverf != newwverf) {
2d21ac55 1411 error = NFSERR_STALEWRITEVERF;
0a7de745 1412 }
2d21ac55
A
1413 lck_mtx_unlock(&nmp->nm_lock);
1414nfsmout:
1415 nfsm_chain_cleanup(&nmreq);
1416 nfsm_chain_cleanup(&nmrep);
0a7de745 1417 return error;
2d21ac55
A
1418}
1419
1420int
1421nfs4_pathconf_rpc(
1422 nfsnode_t np,
1423 struct nfs_fsattr *nfsap,
1424 vfs_context_t ctx)
1425{
1426 u_int64_t xid;
1427 int error = 0, lockerror, status, nfsvers, numops;
1428 struct nfsm_chain nmreq, nmrep;
1429 struct nfsmount *nmp = NFSTONMP(np);
1430 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
f427ee49 1431 struct nfs_vattr *nvattr;
6d2010ae 1432 struct nfsreq_secinfo_args si;
2d21ac55 1433
0a7de745
A
1434 if (nfs_mount_gone(nmp)) {
1435 return ENXIO;
1436 }
2d21ac55 1437 nfsvers = nmp->nm_vers;
0a7de745
A
1438 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1439 return EINVAL;
1440 }
2d21ac55 1441
6d2010ae 1442 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
f427ee49
A
1443 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
1444 NVATTR_INIT(nvattr);
2d21ac55
A
1445 nfsm_chain_null(&nmreq);
1446 nfsm_chain_null(&nmrep);
1447
1448 /* NFSv4: fetch "pathconf" info for this node */
b0d623f7
A
1449 // PUTFH, GETATTR
1450 numops = 2;
2d21ac55 1451 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
3e170ce0 1452 nfsm_chain_add_compound_header(error, &nmreq, "pathconf", nmp->nm_minor_vers, numops);
2d21ac55
A
1453 numops--;
1454 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1455 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1456 numops--;
1457 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1458 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1459 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXLINK);
1460 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXNAME);
1461 NFS_BITMAP_SET(bitmap, NFS_FATTR_NO_TRUNC);
1462 NFS_BITMAP_SET(bitmap, NFS_FATTR_CHOWN_RESTRICTED);
1463 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_INSENSITIVE);
1464 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_PRESERVING);
6d2010ae 1465 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
2d21ac55
A
1466 nfsm_chain_build_done(error, &nmreq);
1467 nfsm_assert(error, (numops == 0), EPROTO);
1468 nfsmout_if(error);
6d2010ae 1469 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55
A
1470
1471 nfsm_chain_skip_tag(error, &nmrep);
1472 nfsm_chain_get_32(error, &nmrep, numops);
1473 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1474 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1475 nfsmout_if(error);
f427ee49 1476 error = nfs4_parsefattr(&nmrep, nfsap, nvattr, NULL, NULL, NULL);
2d21ac55 1477 nfsmout_if(error);
0a7de745 1478 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 1479 error = lockerror;
0a7de745
A
1480 }
1481 if (!error) {
f427ee49 1482 nfs_loadattrcache(np, nvattr, &xid, 0);
0a7de745
A
1483 }
1484 if (!lockerror) {
b0d623f7 1485 nfs_node_unlock(np);
0a7de745 1486 }
2d21ac55 1487nfsmout:
f427ee49
A
1488 NVATTR_CLEANUP(nvattr);
1489 FREE(nvattr, M_TEMP);
2d21ac55
A
1490 nfsm_chain_cleanup(&nmreq);
1491 nfsm_chain_cleanup(&nmrep);
0a7de745 1492 return error;
2d21ac55
A
1493}
1494
1495int
1496nfs4_vnop_getattr(
1497 struct vnop_getattr_args /* {
0a7de745
A
1498 * struct vnodeop_desc *a_desc;
1499 * vnode_t a_vp;
1500 * struct vnode_attr *a_vap;
1501 * vfs_context_t a_context;
1502 * } */*ap)
2d21ac55
A
1503{
1504 struct vnode_attr *vap = ap->a_vap;
6d2010ae 1505 struct nfsmount *nmp;
f427ee49 1506 struct nfs_vattr *nva;
6d2010ae
A
1507 int error, acls, ngaflags;
1508
fe8ab488 1509 nmp = VTONMP(ap->a_vp);
0a7de745
A
1510 if (nfs_mount_gone(nmp)) {
1511 return ENXIO;
1512 }
6d2010ae 1513 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
2d21ac55 1514
6d2010ae 1515 ngaflags = NGA_CACHED;
0a7de745 1516 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
6d2010ae 1517 ngaflags |= NGA_ACL;
0a7de745 1518 }
f427ee49
A
1519 MALLOC(nva, struct nfs_vattr *, sizeof(*nva), M_TEMP, M_WAITOK);
1520 error = nfs_getattr(VTONFS(ap->a_vp), nva, ap->a_context, ngaflags);
0a7de745 1521 if (error) {
f427ee49 1522 goto out;
0a7de745 1523 }
5ba3f43e 1524
2d21ac55 1525 /* copy what we have in nva to *a_vap */
f427ee49
A
1526 if (VATTR_IS_ACTIVE(vap, va_rdev) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_RAWDEV)) {
1527 dev_t rdev = makedev(nva->nva_rawdev.specdata1, nva->nva_rawdev.specdata2);
2d21ac55
A
1528 VATTR_RETURN(vap, va_rdev, rdev);
1529 }
f427ee49
A
1530 if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_NUMLINKS)) {
1531 VATTR_RETURN(vap, va_nlink, nva->nva_nlink);
0a7de745 1532 }
f427ee49
A
1533 if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_SIZE)) {
1534 VATTR_RETURN(vap, va_data_size, nva->nva_size);
0a7de745 1535 }
2d21ac55
A
1536 // VATTR_RETURN(vap, va_data_alloc, ???);
1537 // VATTR_RETURN(vap, va_total_size, ???);
f427ee49
A
1538 if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_SPACE_USED)) {
1539 VATTR_RETURN(vap, va_total_alloc, nva->nva_bytes);
0a7de745 1540 }
f427ee49
A
1541 if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER)) {
1542 VATTR_RETURN(vap, va_uid, nva->nva_uid);
0a7de745 1543 }
f427ee49
A
1544 if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER)) {
1545 VATTR_RETURN(vap, va_uuuid, nva->nva_uuuid);
0a7de745 1546 }
f427ee49
A
1547 if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
1548 VATTR_RETURN(vap, va_gid, nva->nva_gid);
0a7de745 1549 }
f427ee49
A
1550 if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
1551 VATTR_RETURN(vap, va_guuid, nva->nva_guuid);
0a7de745 1552 }
6d2010ae 1553 if (VATTR_IS_ACTIVE(vap, va_mode)) {
f427ee49
A
1554 if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_MODE)) {
1555 VATTR_RETURN(vap, va_mode, ACCESSPERMS);
0a7de745 1556 } else {
f427ee49 1557 VATTR_RETURN(vap, va_mode, nva->nva_mode);
0a7de745 1558 }
6d2010ae
A
1559 }
1560 if (VATTR_IS_ACTIVE(vap, va_flags) &&
f427ee49
A
1561 (NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_ARCHIVE) ||
1562 NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_HIDDEN) ||
1563 (nva->nva_flags & NFS_FFLAG_TRIGGER))) {
2d21ac55 1564 uint32_t flags = 0;
f427ee49
A
1565 if (NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_ARCHIVE) &&
1566 (nva->nva_flags & NFS_FFLAG_ARCHIVED)) {
2d21ac55 1567 flags |= SF_ARCHIVED;
0a7de745 1568 }
f427ee49
A
1569 if (NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_HIDDEN) &&
1570 (nva->nva_flags & NFS_FFLAG_HIDDEN)) {
2d21ac55 1571 flags |= UF_HIDDEN;
0a7de745 1572 }
2d21ac55
A
1573 VATTR_RETURN(vap, va_flags, flags);
1574 }
f427ee49
A
1575 if (VATTR_IS_ACTIVE(vap, va_create_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_CREATE)) {
1576 vap->va_create_time.tv_sec = nva->nva_timesec[NFSTIME_CREATE];
1577 vap->va_create_time.tv_nsec = nva->nva_timensec[NFSTIME_CREATE];
2d21ac55
A
1578 VATTR_SET_SUPPORTED(vap, va_create_time);
1579 }
f427ee49
A
1580 if (VATTR_IS_ACTIVE(vap, va_access_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_ACCESS)) {
1581 vap->va_access_time.tv_sec = nva->nva_timesec[NFSTIME_ACCESS];
1582 vap->va_access_time.tv_nsec = nva->nva_timensec[NFSTIME_ACCESS];
2d21ac55
A
1583 VATTR_SET_SUPPORTED(vap, va_access_time);
1584 }
f427ee49
A
1585 if (VATTR_IS_ACTIVE(vap, va_modify_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_MODIFY)) {
1586 vap->va_modify_time.tv_sec = nva->nva_timesec[NFSTIME_MODIFY];
1587 vap->va_modify_time.tv_nsec = nva->nva_timensec[NFSTIME_MODIFY];
2d21ac55
A
1588 VATTR_SET_SUPPORTED(vap, va_modify_time);
1589 }
f427ee49
A
1590 if (VATTR_IS_ACTIVE(vap, va_change_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_METADATA)) {
1591 vap->va_change_time.tv_sec = nva->nva_timesec[NFSTIME_CHANGE];
1592 vap->va_change_time.tv_nsec = nva->nva_timensec[NFSTIME_CHANGE];
2d21ac55
A
1593 VATTR_SET_SUPPORTED(vap, va_change_time);
1594 }
f427ee49
A
1595 if (VATTR_IS_ACTIVE(vap, va_backup_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_BACKUP)) {
1596 vap->va_backup_time.tv_sec = nva->nva_timesec[NFSTIME_BACKUP];
1597 vap->va_backup_time.tv_nsec = nva->nva_timensec[NFSTIME_BACKUP];
2d21ac55
A
1598 VATTR_SET_SUPPORTED(vap, va_backup_time);
1599 }
f427ee49
A
1600 if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_FILEID)) {
1601 VATTR_RETURN(vap, va_fileid, nva->nva_fileid);
0a7de745 1602 }
f427ee49
A
1603 if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TYPE)) {
1604 VATTR_RETURN(vap, va_type, nva->nva_type);
0a7de745 1605 }
f427ee49
A
1606 if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_CHANGE)) {
1607 VATTR_RETURN(vap, va_filerev, nva->nva_change);
0a7de745 1608 }
2d21ac55 1609
6d2010ae 1610 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
f427ee49
A
1611 VATTR_RETURN(vap, va_acl, nva->nva_acl);
1612 nva->nva_acl = NULL;
6d2010ae
A
1613 }
1614
2d21ac55
A
1615 // other attrs we might support someday:
1616 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
2d21ac55 1617
f427ee49
A
1618 NVATTR_CLEANUP(nva);
1619out:
1620 FREE(nva, M_TEMP);
0a7de745 1621 return error;
2d21ac55
A
1622}
1623
1624int
1625nfs4_setattr_rpc(
1626 nfsnode_t np,
1627 struct vnode_attr *vap,
b0d623f7 1628 vfs_context_t ctx)
2d21ac55
A
1629{
1630 struct nfsmount *nmp = NFSTONMP(np);
6d2010ae 1631 int error = 0, setattr_error = 0, lockerror = ENOENT, status, nfsvers, numops;
b0d623f7 1632 u_int64_t xid, nextxid;
2d21ac55 1633 struct nfsm_chain nmreq, nmrep;
b0d623f7 1634 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6d2010ae
A
1635 uint32_t getbitmap[NFS_ATTR_BITMAP_LEN];
1636 uint32_t setbitmap[NFS_ATTR_BITMAP_LEN];
b0d623f7 1637 nfs_stateid stateid;
6d2010ae 1638 struct nfsreq_secinfo_args si;
2d21ac55 1639
0a7de745
A
1640 if (nfs_mount_gone(nmp)) {
1641 return ENXIO;
1642 }
2d21ac55 1643 nfsvers = nmp->nm_vers;
0a7de745
A
1644 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1645 return EINVAL;
1646 }
2d21ac55 1647
0a7de745 1648 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & ~(SF_ARCHIVED | UF_HIDDEN))) {
2d21ac55 1649 /* we don't support setting unsupported flags (duh!) */
0a7de745
A
1650 if (vap->va_active & ~VNODE_ATTR_va_flags) {
1651 return EINVAL; /* return EINVAL if other attributes also set */
1652 } else {
1653 return ENOTSUP; /* return ENOTSUP for chflags(2) */
1654 }
2d21ac55
A
1655 }
1656
6d2010ae 1657 /* don't bother requesting some changes if they don't look like they are changing */
0a7de745 1658 if (VATTR_IS_ACTIVE(vap, va_uid) && (vap->va_uid == np->n_vattr.nva_uid)) {
6d2010ae 1659 VATTR_CLEAR_ACTIVE(vap, va_uid);
0a7de745
A
1660 }
1661 if (VATTR_IS_ACTIVE(vap, va_gid) && (vap->va_gid == np->n_vattr.nva_gid)) {
6d2010ae 1662 VATTR_CLEAR_ACTIVE(vap, va_gid);
0a7de745
A
1663 }
1664 if (VATTR_IS_ACTIVE(vap, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &np->n_vattr.nva_uuuid)) {
6d2010ae 1665 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
0a7de745
A
1666 }
1667 if (VATTR_IS_ACTIVE(vap, va_guuid) && kauth_guid_equal(&vap->va_guuid, &np->n_vattr.nva_guuid)) {
6d2010ae 1668 VATTR_CLEAR_ACTIVE(vap, va_guuid);
0a7de745 1669 }
6d2010ae
A
1670
1671tryagain:
1672 /* do nothing if no attributes will be sent */
1673 nfs_vattr_set_bitmap(nmp, bitmap, vap);
0a7de745
A
1674 if (!bitmap[0] && !bitmap[1]) {
1675 return 0;
1676 }
6d2010ae
A
1677
1678 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
1679 nfsm_chain_null(&nmreq);
1680 nfsm_chain_null(&nmrep);
1681
6d2010ae
A
1682 /*
1683 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1684 * need to invalidate any cached ACL. And if we had an ACL cached,
1685 * we might as well also fetch the new value.
1686 */
1687 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, getbitmap);
1688 if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL) ||
1689 NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) {
0a7de745 1690 if (NACLVALID(np)) {
6d2010ae 1691 NFS_BITMAP_SET(getbitmap, NFS_FATTR_ACL);
0a7de745 1692 }
6d2010ae
A
1693 NACLINVALIDATE(np);
1694 }
1695
2d21ac55
A
1696 // PUTFH, SETATTR, GETATTR
1697 numops = 3;
1698 nfsm_chain_build_alloc_init(error, &nmreq, 40 * NFSX_UNSIGNED);
3e170ce0 1699 nfsm_chain_add_compound_header(error, &nmreq, "setattr", nmp->nm_minor_vers, numops);
2d21ac55
A
1700 numops--;
1701 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1702 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1703 numops--;
1704 nfsm_chain_add_32(error, &nmreq, NFS_OP_SETATTR);
0a7de745 1705 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
b0d623f7 1706 nfs_get_stateid(np, vfs_context_thread(ctx), vfs_context_ucred(ctx), &stateid);
0a7de745 1707 } else {
b0d623f7 1708 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
0a7de745 1709 }
b0d623f7 1710 nfsm_chain_add_stateid(error, &nmreq, &stateid);
2d21ac55
A
1711 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
1712 numops--;
1713 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 1714 nfsm_chain_add_bitmap_supported(error, &nmreq, getbitmap, nmp, np);
2d21ac55
A
1715 nfsm_chain_build_done(error, &nmreq);
1716 nfsm_assert(error, (numops == 0), EPROTO);
1717 nfsmout_if(error);
6d2010ae 1718 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 1719
0a7de745 1720 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 1721 error = lockerror;
0a7de745 1722 }
2d21ac55
A
1723 nfsm_chain_skip_tag(error, &nmrep);
1724 nfsm_chain_get_32(error, &nmrep, numops);
1725 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6d2010ae 1726 nfsmout_if(error);
2d21ac55 1727 nfsm_chain_op_check(error, &nmrep, NFS_OP_SETATTR);
6d2010ae
A
1728 nfsmout_if(error == EBADRPC);
1729 setattr_error = error;
1730 error = 0;
2d21ac55 1731 bmlen = NFS_ATTR_BITMAP_LEN;
6d2010ae
A
1732 nfsm_chain_get_bitmap(error, &nmrep, setbitmap, bmlen);
1733 if (!error) {
0a7de745 1734 if (VATTR_IS_ACTIVE(vap, va_data_size) && (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 1735 microuptime(&np->n_lastio);
0a7de745 1736 }
6d2010ae
A
1737 nfs_vattr_set_supported(setbitmap, vap);
1738 error = setattr_error;
1739 }
2d21ac55 1740 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 1741 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
0a7de745 1742 if (error) {
2d21ac55 1743 NATTRINVALIDATE(np);
0a7de745 1744 }
b0d623f7
A
1745 /*
1746 * We just changed the attributes and we want to make sure that we
1747 * see the latest attributes. Get the next XID. If it's not the
1748 * next XID after the SETATTR XID, then it's possible that another
1749 * RPC was in flight at the same time and it might put stale attributes
1750 * in the cache. In that case, we invalidate the attributes and set
1751 * the attribute cache XID to guarantee that newer attributes will
1752 * get loaded next.
1753 */
1754 nextxid = 0;
1755 nfs_get_xid(&nextxid);
1756 if (nextxid != (xid + 1)) {
1757 np->n_xid = nextxid;
1758 NATTRINVALIDATE(np);
1759 }
2d21ac55 1760nfsmout:
0a7de745 1761 if (!lockerror) {
b0d623f7 1762 nfs_node_unlock(np);
0a7de745 1763 }
2d21ac55
A
1764 nfsm_chain_cleanup(&nmreq);
1765 nfsm_chain_cleanup(&nmrep);
6d2010ae
A
1766 if ((setattr_error == EINVAL) && VATTR_IS_ACTIVE(vap, va_acl) && VATTR_IS_ACTIVE(vap, va_mode) && !NMFLAG(nmp, ACLONLY)) {
1767 /*
1768 * Some server's may not like ACL/mode combos that get sent.
1769 * If it looks like that's what the server choked on, try setting
1770 * just the ACL and not the mode (unless it looks like everything
1771 * but mode was already successfully set).
1772 */
1773 if (((bitmap[0] & setbitmap[0]) != bitmap[0]) ||
0a7de745 1774 ((bitmap[1] & (setbitmap[1] | NFS_FATTR_MODE)) != bitmap[1])) {
6d2010ae
A
1775 VATTR_CLEAR_ACTIVE(vap, va_mode);
1776 error = 0;
1777 goto tryagain;
1778 }
1779 }
0a7de745 1780 return error;
2d21ac55 1781}
cb323159 1782#endif /* CONFIG_NFS4 */
2d21ac55 1783
b0d623f7
A
1784/*
1785 * Wait for any pending recovery to complete.
1786 */
2d21ac55 1787int
b0d623f7 1788nfs_mount_state_wait_for_recovery(struct nfsmount *nmp)
2d21ac55 1789{
cb323159 1790 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
6d2010ae 1791 int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
b0d623f7
A
1792
1793 lck_mtx_lock(&nmp->nm_lock);
1794 while (nmp->nm_state & NFSSTA_RECOVER) {
0a7de745 1795 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1))) {
b0d623f7 1796 break;
0a7de745 1797 }
b0d623f7 1798 nfs_mount_sock_thread_wake(nmp);
0a7de745 1799 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
6d2010ae 1800 slpflag = 0;
b0d623f7
A
1801 }
1802 lck_mtx_unlock(&nmp->nm_lock);
1803
0a7de745 1804 return error;
2d21ac55
A
1805}
1806
b0d623f7
A
1807/*
1808 * We're about to use/manipulate NFS mount's open/lock state.
1809 * Wait for any pending state recovery to complete, then
1810 * mark the state as being in use (which will hold off
1811 * the recovery thread until we're done).
1812 */
2d21ac55 1813int
6d2010ae 1814nfs_mount_state_in_use_start(struct nfsmount *nmp, thread_t thd)
2d21ac55 1815{
cb323159 1816 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
6d2010ae 1817 int error = 0, slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7 1818
0a7de745
A
1819 if (nfs_mount_gone(nmp)) {
1820 return ENXIO;
1821 }
b0d623f7 1822 lck_mtx_lock(&nmp->nm_lock);
0a7de745 1823 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
6d2010ae 1824 lck_mtx_unlock(&nmp->nm_lock);
0a7de745 1825 return ENXIO;
6d2010ae 1826 }
b0d623f7 1827 while (nmp->nm_state & NFSSTA_RECOVER) {
0a7de745 1828 if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
b0d623f7 1829 break;
0a7de745 1830 }
b0d623f7 1831 nfs_mount_sock_thread_wake(nmp);
0a7de745 1832 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
6d2010ae 1833 slpflag = 0;
b0d623f7 1834 }
0a7de745 1835 if (!error) {
b0d623f7 1836 nmp->nm_stateinuse++;
0a7de745 1837 }
b0d623f7
A
1838 lck_mtx_unlock(&nmp->nm_lock);
1839
0a7de745 1840 return error;
2d21ac55
A
1841}
1842
b0d623f7
A
1843/*
1844 * We're done using/manipulating the NFS mount's open/lock
1845 * state. If the given error indicates that recovery should
1846 * be performed, we'll initiate recovery.
1847 */
2d21ac55 1848int
b0d623f7 1849nfs_mount_state_in_use_end(struct nfsmount *nmp, int error)
2d21ac55 1850{
b0d623f7
A
1851 int restart = nfs_mount_state_error_should_restart(error);
1852
0a7de745 1853 if (nfs_mount_gone(nmp)) {
f427ee49 1854 return ENXIO;
0a7de745 1855 }
b0d623f7
A
1856 lck_mtx_lock(&nmp->nm_lock);
1857 if (restart && (error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE)) {
6d2010ae 1858 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
0a7de745 1859 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid);
6d2010ae 1860 nfs_need_recover(nmp, error);
b0d623f7 1861 }
0a7de745 1862 if (nmp->nm_stateinuse > 0) {
b0d623f7 1863 nmp->nm_stateinuse--;
0a7de745 1864 } else {
b0d623f7 1865 panic("NFS mount state in use count underrun");
0a7de745
A
1866 }
1867 if (!nmp->nm_stateinuse && (nmp->nm_state & NFSSTA_RECOVER)) {
b0d623f7 1868 wakeup(&nmp->nm_stateinuse);
0a7de745 1869 }
b0d623f7 1870 lck_mtx_unlock(&nmp->nm_lock);
0a7de745
A
1871 if (error == NFSERR_GRACE) {
1872 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
1873 }
b0d623f7 1874
0a7de745 1875 return restart;
2d21ac55
A
1876}
1877
1878/*
b0d623f7 1879 * Does the error mean we should restart/redo a state-related operation?
2d21ac55
A
1880 */
1881int
b0d623f7 1882nfs_mount_state_error_should_restart(int error)
2d21ac55 1883{
b0d623f7
A
1884 switch (error) {
1885 case NFSERR_STALE_STATEID:
1886 case NFSERR_STALE_CLIENTID:
1887 case NFSERR_ADMIN_REVOKED:
1888 case NFSERR_EXPIRED:
1889 case NFSERR_OLD_STATEID:
1890 case NFSERR_BAD_STATEID:
1891 case NFSERR_GRACE:
0a7de745 1892 return 1;
b0d623f7 1893 }
0a7de745 1894 return 0;
b0d623f7 1895}
2d21ac55 1896
b0d623f7
A
1897/*
1898 * In some cases we may want to limit how many times we restart a
1899 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1900 * Base the limit on the lease (as long as it's not too short).
1901 */
1902uint
1903nfs_mount_state_max_restarts(struct nfsmount *nmp)
1904{
0a7de745 1905 return MAX(nmp->nm_fsattr.nfsa_lease, 60);
b0d623f7 1906}
2d21ac55 1907
6d2010ae
A
1908/*
1909 * Does the error mean we probably lost a delegation?
1910 */
1911int
1912nfs_mount_state_error_delegation_lost(int error)
1913{
1914 switch (error) {
1915 case NFSERR_STALE_STATEID:
1916 case NFSERR_ADMIN_REVOKED:
1917 case NFSERR_EXPIRED:
1918 case NFSERR_OLD_STATEID:
1919 case NFSERR_BAD_STATEID:
1920 case NFSERR_GRACE: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
0a7de745 1921 return 1;
6d2010ae 1922 }
0a7de745 1923 return 0;
6d2010ae
A
1924}
1925
b0d623f7
A
1926
1927/*
1928 * Mark an NFS node's open state as busy.
1929 */
1930int
6d2010ae 1931nfs_open_state_set_busy(nfsnode_t np, thread_t thd)
b0d623f7
A
1932{
1933 struct nfsmount *nmp;
cb323159 1934 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
b0d623f7
A
1935 int error = 0, slpflag;
1936
1937 nmp = NFSTONMP(np);
0a7de745
A
1938 if (nfs_mount_gone(nmp)) {
1939 return ENXIO;
1940 }
6d2010ae 1941 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2d21ac55 1942
b0d623f7
A
1943 lck_mtx_lock(&np->n_openlock);
1944 while (np->n_openflags & N_OPENBUSY) {
0a7de745 1945 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 1946 break;
0a7de745 1947 }
b0d623f7
A
1948 np->n_openflags |= N_OPENWANT;
1949 msleep(&np->n_openflags, &np->n_openlock, slpflag, "nfs_open_state_set_busy", &ts);
6d2010ae 1950 slpflag = 0;
b0d623f7 1951 }
0a7de745 1952 if (!error) {
b0d623f7 1953 np->n_openflags |= N_OPENBUSY;
0a7de745 1954 }
b0d623f7 1955 lck_mtx_unlock(&np->n_openlock);
2d21ac55 1956
0a7de745 1957 return error;
b0d623f7 1958}
2d21ac55 1959
b0d623f7
A
1960/*
1961 * Clear an NFS node's open state busy flag and wake up
1962 * anyone wanting it.
1963 */
1964void
1965nfs_open_state_clear_busy(nfsnode_t np)
1966{
1967 int wanted;
1968
1969 lck_mtx_lock(&np->n_openlock);
0a7de745 1970 if (!(np->n_openflags & N_OPENBUSY)) {
b0d623f7 1971 panic("nfs_open_state_clear_busy");
0a7de745 1972 }
b0d623f7 1973 wanted = (np->n_openflags & N_OPENWANT);
0a7de745 1974 np->n_openflags &= ~(N_OPENBUSY | N_OPENWANT);
b0d623f7 1975 lck_mtx_unlock(&np->n_openlock);
0a7de745 1976 if (wanted) {
b0d623f7 1977 wakeup(&np->n_openflags);
0a7de745 1978 }
b0d623f7 1979}
2d21ac55 1980
b0d623f7
A
1981/*
1982 * Search a mount's open owner list for the owner for this credential.
1983 * If not found and "alloc" is set, then allocate a new one.
1984 */
1985struct nfs_open_owner *
1986nfs_open_owner_find(struct nfsmount *nmp, kauth_cred_t cred, int alloc)
1987{
1988 uid_t uid = kauth_cred_getuid(cred);
1989 struct nfs_open_owner *noop, *newnoop = NULL;
2d21ac55 1990
b0d623f7
A
1991tryagain:
1992 lck_mtx_lock(&nmp->nm_lock);
1993 TAILQ_FOREACH(noop, &nmp->nm_open_owners, noo_link) {
0a7de745 1994 if (kauth_cred_getuid(noop->noo_cred) == uid) {
b0d623f7 1995 break;
0a7de745 1996 }
2d21ac55 1997 }
2d21ac55 1998
b0d623f7
A
1999 if (!noop && !newnoop && alloc) {
2000 lck_mtx_unlock(&nmp->nm_lock);
2001 MALLOC(newnoop, struct nfs_open_owner *, sizeof(struct nfs_open_owner), M_TEMP, M_WAITOK);
0a7de745
A
2002 if (!newnoop) {
2003 return NULL;
2004 }
b0d623f7
A
2005 bzero(newnoop, sizeof(*newnoop));
2006 lck_mtx_init(&newnoop->noo_lock, nfs_open_grp, LCK_ATTR_NULL);
2007 newnoop->noo_mount = nmp;
2008 kauth_cred_ref(cred);
2009 newnoop->noo_cred = cred;
2010 newnoop->noo_name = OSAddAtomic(1, &nfs_open_owner_seqnum);
2011 TAILQ_INIT(&newnoop->noo_opens);
2012 goto tryagain;
2013 }
2014 if (!noop && newnoop) {
2015 newnoop->noo_flags |= NFS_OPEN_OWNER_LINK;
0a7de745 2016 os_ref_init(&newnoop->noo_refcnt, NULL);
b0d623f7
A
2017 TAILQ_INSERT_HEAD(&nmp->nm_open_owners, newnoop, noo_link);
2018 noop = newnoop;
2019 }
2020 lck_mtx_unlock(&nmp->nm_lock);
2021
0a7de745 2022 if (newnoop && (noop != newnoop)) {
b0d623f7 2023 nfs_open_owner_destroy(newnoop);
0a7de745 2024 }
b0d623f7 2025
0a7de745 2026 if (noop) {
b0d623f7 2027 nfs_open_owner_ref(noop);
0a7de745 2028 }
b0d623f7 2029
0a7de745 2030 return noop;
b0d623f7
A
2031}
2032
2033/*
2034 * destroy an open owner that's no longer needed
2035 */
2036void
2037nfs_open_owner_destroy(struct nfs_open_owner *noop)
2038{
0a7de745 2039 if (noop->noo_cred) {
b0d623f7 2040 kauth_cred_unref(&noop->noo_cred);
0a7de745 2041 }
b0d623f7
A
2042 lck_mtx_destroy(&noop->noo_lock, nfs_open_grp);
2043 FREE(noop, M_TEMP);
2044}
2045
2046/*
2047 * acquire a reference count on an open owner
2048 */
2049void
2050nfs_open_owner_ref(struct nfs_open_owner *noop)
2051{
2052 lck_mtx_lock(&noop->noo_lock);
0a7de745 2053 os_ref_retain_locked(&noop->noo_refcnt);
b0d623f7
A
2054 lck_mtx_unlock(&noop->noo_lock);
2055}
2056
2057/*
2058 * drop a reference count on an open owner and destroy it if
2059 * it is no longer referenced and no longer on the mount's list.
2060 */
2061void
2062nfs_open_owner_rele(struct nfs_open_owner *noop)
2063{
0a7de745
A
2064 os_ref_count_t newcount;
2065
b0d623f7 2066 lck_mtx_lock(&noop->noo_lock);
0a7de745 2067 if (os_ref_get_count(&noop->noo_refcnt) < 1) {
b0d623f7 2068 panic("nfs_open_owner_rele: no refcnt");
0a7de745
A
2069 }
2070 newcount = os_ref_release_locked(&noop->noo_refcnt);
2071 if (!newcount && (noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
b0d623f7 2072 panic("nfs_open_owner_rele: busy");
0a7de745 2073 }
b0d623f7 2074 /* XXX we may potentially want to clean up idle/unused open owner structures */
0a7de745 2075 if (newcount || (noop->noo_flags & NFS_OPEN_OWNER_LINK)) {
b0d623f7
A
2076 lck_mtx_unlock(&noop->noo_lock);
2077 return;
2078 }
2079 /* owner is no longer referenced or linked to mount, so destroy it */
2080 lck_mtx_unlock(&noop->noo_lock);
2081 nfs_open_owner_destroy(noop);
2082}
2083
2084/*
2085 * Mark an open owner as busy because we are about to
2086 * start an operation that uses and updates open owner state.
2087 */
2088int
2089nfs_open_owner_set_busy(struct nfs_open_owner *noop, thread_t thd)
2090{
2091 struct nfsmount *nmp;
cb323159 2092 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
b0d623f7
A
2093 int error = 0, slpflag;
2094
2095 nmp = noop->noo_mount;
0a7de745
A
2096 if (nfs_mount_gone(nmp)) {
2097 return ENXIO;
2098 }
6d2010ae 2099 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7
A
2100
2101 lck_mtx_lock(&noop->noo_lock);
2102 while (noop->noo_flags & NFS_OPEN_OWNER_BUSY) {
0a7de745 2103 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 2104 break;
0a7de745 2105 }
b0d623f7
A
2106 noop->noo_flags |= NFS_OPEN_OWNER_WANT;
2107 msleep(noop, &noop->noo_lock, slpflag, "nfs_open_owner_set_busy", &ts);
6d2010ae 2108 slpflag = 0;
b0d623f7 2109 }
0a7de745 2110 if (!error) {
b0d623f7 2111 noop->noo_flags |= NFS_OPEN_OWNER_BUSY;
0a7de745 2112 }
b0d623f7
A
2113 lck_mtx_unlock(&noop->noo_lock);
2114
0a7de745 2115 return error;
b0d623f7
A
2116}
2117
2118/*
2119 * Clear the busy flag on an open owner and wake up anyone waiting
2120 * to mark it busy.
2121 */
2122void
2123nfs_open_owner_clear_busy(struct nfs_open_owner *noop)
2124{
2125 int wanted;
2126
2127 lck_mtx_lock(&noop->noo_lock);
0a7de745 2128 if (!(noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
b0d623f7 2129 panic("nfs_open_owner_clear_busy");
0a7de745 2130 }
b0d623f7 2131 wanted = (noop->noo_flags & NFS_OPEN_OWNER_WANT);
0a7de745 2132 noop->noo_flags &= ~(NFS_OPEN_OWNER_BUSY | NFS_OPEN_OWNER_WANT);
b0d623f7 2133 lck_mtx_unlock(&noop->noo_lock);
0a7de745 2134 if (wanted) {
b0d623f7 2135 wakeup(noop);
0a7de745 2136 }
b0d623f7
A
2137}
2138
2139/*
2140 * Given an open/lock owner and an error code, increment the
2141 * sequence ID if appropriate.
2142 */
2143void
2144nfs_owner_seqid_increment(struct nfs_open_owner *noop, struct nfs_lock_owner *nlop, int error)
2145{
2146 switch (error) {
2147 case NFSERR_STALE_CLIENTID:
2148 case NFSERR_STALE_STATEID:
2149 case NFSERR_OLD_STATEID:
2150 case NFSERR_BAD_STATEID:
2151 case NFSERR_BAD_SEQID:
2152 case NFSERR_BADXDR:
2153 case NFSERR_RESOURCE:
2154 case NFSERR_NOFILEHANDLE:
2155 /* do not increment the open seqid on these errors */
2156 return;
2157 }
0a7de745 2158 if (noop) {
b0d623f7 2159 noop->noo_seqid++;
0a7de745
A
2160 }
2161 if (nlop) {
b0d623f7 2162 nlop->nlo_seqid++;
0a7de745 2163 }
b0d623f7
A
2164}
2165
2166/*
2167 * Search a node's open file list for any conflicts with this request.
2168 * Also find this open owner's open file structure.
2169 * If not found and "alloc" is set, then allocate one.
2170 */
2171int
2172nfs_open_file_find(
2173 nfsnode_t np,
2174 struct nfs_open_owner *noop,
2175 struct nfs_open_file **nofpp,
2176 uint32_t accessMode,
2177 uint32_t denyMode,
2178 int alloc)
6d2010ae
A
2179{
2180 *nofpp = NULL;
2181 return nfs_open_file_find_internal(np, noop, nofpp, accessMode, denyMode, alloc);
2182}
2183
2184/*
2185 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
2186 * if an existing one is not found. This is used in "create" scenarios to
2187 * officially add the provisional nofp to the node once the node is created.
2188 */
2189int
2190nfs_open_file_find_internal(
2191 nfsnode_t np,
2192 struct nfs_open_owner *noop,
2193 struct nfs_open_file **nofpp,
2194 uint32_t accessMode,
2195 uint32_t denyMode,
2196 int alloc)
b0d623f7
A
2197{
2198 struct nfs_open_file *nofp = NULL, *nofp2, *newnofp = NULL;
2199
0a7de745 2200 if (!np) {
b0d623f7 2201 goto alloc;
0a7de745 2202 }
b0d623f7
A
2203tryagain:
2204 lck_mtx_lock(&np->n_openlock);
2205 TAILQ_FOREACH(nofp2, &np->n_opens, nof_link) {
2206 if (nofp2->nof_owner == noop) {
2207 nofp = nofp2;
0a7de745 2208 if (!accessMode) {
b0d623f7 2209 break;
0a7de745 2210 }
b0d623f7
A
2211 }
2212 if ((accessMode & nofp2->nof_deny) || (denyMode & nofp2->nof_access)) {
2213 /* This request conflicts with an existing open on this client. */
2214 lck_mtx_unlock(&np->n_openlock);
0a7de745 2215 return EACCES;
b0d623f7
A
2216 }
2217 }
2218
2219 /*
2220 * If this open owner doesn't have an open
2221 * file structure yet, we create one for it.
2222 */
6d2010ae 2223 if (!nofp && !*nofpp && !newnofp && alloc) {
b0d623f7
A
2224 lck_mtx_unlock(&np->n_openlock);
2225alloc:
2226 MALLOC(newnofp, struct nfs_open_file *, sizeof(struct nfs_open_file), M_TEMP, M_WAITOK);
0a7de745
A
2227 if (!newnofp) {
2228 return ENOMEM;
2229 }
b0d623f7
A
2230 bzero(newnofp, sizeof(*newnofp));
2231 lck_mtx_init(&newnofp->nof_lock, nfs_open_grp, LCK_ATTR_NULL);
2232 newnofp->nof_owner = noop;
2233 nfs_open_owner_ref(noop);
2234 newnofp->nof_np = np;
2235 lck_mtx_lock(&noop->noo_lock);
2236 TAILQ_INSERT_HEAD(&noop->noo_opens, newnofp, nof_oolink);
2237 lck_mtx_unlock(&noop->noo_lock);
0a7de745 2238 if (np) {
b0d623f7 2239 goto tryagain;
0a7de745 2240 }
b0d623f7 2241 }
6d2010ae
A
2242 if (!nofp) {
2243 if (*nofpp) {
2244 (*nofpp)->nof_np = np;
2245 nofp = *nofpp;
2246 } else {
2247 nofp = newnofp;
2248 }
0a7de745 2249 if (nofp && np) {
6d2010ae 2250 TAILQ_INSERT_HEAD(&np->n_opens, nofp, nof_link);
0a7de745 2251 }
b0d623f7 2252 }
0a7de745 2253 if (np) {
b0d623f7 2254 lck_mtx_unlock(&np->n_openlock);
0a7de745 2255 }
b0d623f7 2256
0a7de745 2257 if (alloc && newnofp && (nofp != newnofp)) {
b0d623f7 2258 nfs_open_file_destroy(newnofp);
0a7de745 2259 }
b0d623f7
A
2260
2261 *nofpp = nofp;
0a7de745 2262 return nofp ? 0 : ESRCH;
b0d623f7
A
2263}
2264
2265/*
2266 * Destroy an open file structure.
2267 */
2268void
2269nfs_open_file_destroy(struct nfs_open_file *nofp)
2270{
2271 lck_mtx_lock(&nofp->nof_owner->noo_lock);
2272 TAILQ_REMOVE(&nofp->nof_owner->noo_opens, nofp, nof_oolink);
2273 lck_mtx_unlock(&nofp->nof_owner->noo_lock);
2274 nfs_open_owner_rele(nofp->nof_owner);
2275 lck_mtx_destroy(&nofp->nof_lock, nfs_open_grp);
2276 FREE(nofp, M_TEMP);
2277}
2278
2279/*
2280 * Mark an open file as busy because we are about to
2281 * start an operation that uses and updates open file state.
2282 */
2283int
2284nfs_open_file_set_busy(struct nfs_open_file *nofp, thread_t thd)
2285{
2286 struct nfsmount *nmp;
cb323159 2287 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
b0d623f7
A
2288 int error = 0, slpflag;
2289
2290 nmp = nofp->nof_owner->noo_mount;
0a7de745
A
2291 if (nfs_mount_gone(nmp)) {
2292 return ENXIO;
2293 }
6d2010ae 2294 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7
A
2295
2296 lck_mtx_lock(&nofp->nof_lock);
2297 while (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
0a7de745 2298 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 2299 break;
0a7de745 2300 }
b0d623f7
A
2301 nofp->nof_flags |= NFS_OPEN_FILE_WANT;
2302 msleep(nofp, &nofp->nof_lock, slpflag, "nfs_open_file_set_busy", &ts);
6d2010ae 2303 slpflag = 0;
b0d623f7 2304 }
0a7de745 2305 if (!error) {
b0d623f7 2306 nofp->nof_flags |= NFS_OPEN_FILE_BUSY;
0a7de745 2307 }
b0d623f7
A
2308 lck_mtx_unlock(&nofp->nof_lock);
2309
0a7de745 2310 return error;
b0d623f7
A
2311}
2312
2313/*
2314 * Clear the busy flag on an open file and wake up anyone waiting
2315 * to mark it busy.
2316 */
2317void
2318nfs_open_file_clear_busy(struct nfs_open_file *nofp)
2319{
2320 int wanted;
2321
2322 lck_mtx_lock(&nofp->nof_lock);
0a7de745 2323 if (!(nofp->nof_flags & NFS_OPEN_FILE_BUSY)) {
b0d623f7 2324 panic("nfs_open_file_clear_busy");
0a7de745 2325 }
b0d623f7 2326 wanted = (nofp->nof_flags & NFS_OPEN_FILE_WANT);
0a7de745 2327 nofp->nof_flags &= ~(NFS_OPEN_FILE_BUSY | NFS_OPEN_FILE_WANT);
b0d623f7 2328 lck_mtx_unlock(&nofp->nof_lock);
0a7de745 2329 if (wanted) {
b0d623f7 2330 wakeup(nofp);
0a7de745 2331 }
b0d623f7
A
2332}
2333
2334/*
6d2010ae 2335 * Add the open state for the given access/deny modes to this open file.
b0d623f7
A
2336 */
2337void
6d2010ae 2338nfs_open_file_add_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode, int delegated)
b0d623f7 2339{
6d2010ae
A
2340 lck_mtx_lock(&nofp->nof_lock);
2341 nofp->nof_access |= accessMode;
2342 nofp->nof_deny |= denyMode;
b0d623f7 2343
6d2010ae
A
2344 if (delegated) {
2345 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
0a7de745 2346 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2347 nofp->nof_d_r++;
0a7de745 2348 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2349 nofp->nof_d_w++;
0a7de745 2350 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2351 nofp->nof_d_rw++;
0a7de745 2352 }
6d2010ae 2353 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
0a7de745 2354 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2355 nofp->nof_d_r_dw++;
0a7de745 2356 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2357 nofp->nof_d_w_dw++;
0a7de745 2358 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2359 nofp->nof_d_rw_dw++;
0a7de745 2360 }
6d2010ae 2361 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
0a7de745 2362 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2363 nofp->nof_d_r_drw++;
0a7de745 2364 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2365 nofp->nof_d_w_drw++;
0a7de745 2366 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2367 nofp->nof_d_rw_drw++;
0a7de745 2368 }
6d2010ae 2369 }
b0d623f7 2370 } else {
6d2010ae 2371 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
0a7de745 2372 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2373 nofp->nof_r++;
0a7de745 2374 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2375 nofp->nof_w++;
0a7de745 2376 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2377 nofp->nof_rw++;
0a7de745 2378 }
6d2010ae 2379 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
0a7de745 2380 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2381 nofp->nof_r_dw++;
0a7de745 2382 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2383 nofp->nof_w_dw++;
0a7de745 2384 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2385 nofp->nof_rw_dw++;
0a7de745 2386 }
6d2010ae 2387 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
0a7de745 2388 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2389 nofp->nof_r_drw++;
0a7de745 2390 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2391 nofp->nof_w_drw++;
0a7de745 2392 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2393 nofp->nof_rw_drw++;
0a7de745 2394 }
6d2010ae 2395 }
b0d623f7 2396 }
6d2010ae
A
2397
2398 nofp->nof_opencnt++;
2399 lck_mtx_unlock(&nofp->nof_lock);
b0d623f7
A
2400}
2401
2402/*
6d2010ae
A
2403 * Find which particular open combo will be closed and report what
2404 * the new modes will be and whether the open was delegated.
b0d623f7 2405 */
6d2010ae
A
2406void
2407nfs_open_file_remove_open_find(
b0d623f7
A
2408 struct nfs_open_file *nofp,
2409 uint32_t accessMode,
2410 uint32_t denyMode,
f427ee49
A
2411 uint8_t *newAccessMode,
2412 uint8_t *newDenyMode,
6d2010ae 2413 int *delegated)
b0d623f7 2414{
6d2010ae
A
2415 /*
2416 * Calculate new modes: a mode bit gets removed when there's only
2417 * one count in all the corresponding counts
2418 */
2419 *newAccessMode = nofp->nof_access;
2420 *newDenyMode = nofp->nof_deny;
b0d623f7 2421
6d2010ae
A
2422 if ((accessMode & NFS_OPEN_SHARE_ACCESS_READ) &&
2423 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) &&
2424 ((nofp->nof_r + nofp->nof_d_r +
0a7de745
A
2425 nofp->nof_rw + nofp->nof_d_rw +
2426 nofp->nof_r_dw + nofp->nof_d_r_dw +
2427 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2428 nofp->nof_r_drw + nofp->nof_d_r_drw +
2429 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
6d2010ae 2430 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
0a7de745 2431 }
6d2010ae
A
2432 if ((accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2433 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2434 ((nofp->nof_w + nofp->nof_d_w +
0a7de745
A
2435 nofp->nof_rw + nofp->nof_d_rw +
2436 nofp->nof_w_dw + nofp->nof_d_w_dw +
2437 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2438 nofp->nof_w_drw + nofp->nof_d_w_drw +
2439 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
6d2010ae 2440 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_WRITE;
0a7de745 2441 }
6d2010ae
A
2442 if ((denyMode & NFS_OPEN_SHARE_DENY_READ) &&
2443 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) &&
2444 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
0a7de745
A
2445 nofp->nof_w_drw + nofp->nof_d_w_drw +
2446 nofp->nof_rw_drw + nofp->nof_d_rw_drw) == 1)) {
6d2010ae 2447 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_READ;
0a7de745 2448 }
6d2010ae
A
2449 if ((denyMode & NFS_OPEN_SHARE_DENY_WRITE) &&
2450 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) &&
2451 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
0a7de745
A
2452 nofp->nof_w_drw + nofp->nof_d_w_drw +
2453 nofp->nof_rw_drw + nofp->nof_d_rw_drw +
2454 nofp->nof_r_dw + nofp->nof_d_r_dw +
2455 nofp->nof_w_dw + nofp->nof_d_w_dw +
2456 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
6d2010ae 2457 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 2458 }
6d2010ae
A
2459
2460 /* Find the corresponding open access/deny mode counter. */
b0d623f7 2461 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
0a7de745 2462 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2463 *delegated = (nofp->nof_d_r != 0);
0a7de745 2464 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2465 *delegated = (nofp->nof_d_w != 0);
0a7de745 2466 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2467 *delegated = (nofp->nof_d_rw != 0);
0a7de745 2468 } else {
6d2010ae 2469 *delegated = 0;
0a7de745 2470 }
b0d623f7 2471 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
0a7de745 2472 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2473 *delegated = (nofp->nof_d_r_dw != 0);
0a7de745 2474 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2475 *delegated = (nofp->nof_d_w_dw != 0);
0a7de745 2476 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2477 *delegated = (nofp->nof_d_rw_dw != 0);
0a7de745 2478 } else {
6d2010ae 2479 *delegated = 0;
0a7de745 2480 }
b0d623f7 2481 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
0a7de745 2482 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2483 *delegated = (nofp->nof_d_r_drw != 0);
0a7de745 2484 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2485 *delegated = (nofp->nof_d_w_drw != 0);
0a7de745 2486 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2487 *delegated = (nofp->nof_d_rw_drw != 0);
0a7de745 2488 } else {
6d2010ae 2489 *delegated = 0;
0a7de745 2490 }
b0d623f7 2491 }
6d2010ae
A
2492}
2493
2494/*
2495 * Remove the open state for the given access/deny modes to this open file.
2496 */
2497void
2498nfs_open_file_remove_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode)
2499{
f427ee49 2500 uint8_t newAccessMode, newDenyMode;
6d2010ae
A
2501 int delegated = 0;
2502
2503 lck_mtx_lock(&nofp->nof_lock);
2504 nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated);
2505
2506 /* Decrement the corresponding open access/deny mode counter. */
2507 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2508 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2509 if (delegated) {
0a7de745 2510 if (nofp->nof_d_r == 0) {
6d2010ae 2511 NP(nofp->nof_np, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2512 } else {
6d2010ae 2513 nofp->nof_d_r--;
0a7de745 2514 }
6d2010ae 2515 } else {
0a7de745 2516 if (nofp->nof_r == 0) {
6d2010ae 2517 NP(nofp->nof_np, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2518 } else {
6d2010ae 2519 nofp->nof_r--;
0a7de745 2520 }
6d2010ae
A
2521 }
2522 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2523 if (delegated) {
0a7de745 2524 if (nofp->nof_d_w == 0) {
6d2010ae 2525 NP(nofp->nof_np, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2526 } else {
6d2010ae 2527 nofp->nof_d_w--;
0a7de745 2528 }
6d2010ae 2529 } else {
0a7de745 2530 if (nofp->nof_w == 0) {
6d2010ae 2531 NP(nofp->nof_np, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2532 } else {
6d2010ae 2533 nofp->nof_w--;
0a7de745 2534 }
6d2010ae
A
2535 }
2536 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2537 if (delegated) {
0a7de745 2538 if (nofp->nof_d_rw == 0) {
6d2010ae 2539 NP(nofp->nof_np, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2540 } else {
6d2010ae 2541 nofp->nof_d_rw--;
0a7de745 2542 }
6d2010ae 2543 } else {
0a7de745 2544 if (nofp->nof_rw == 0) {
6d2010ae 2545 NP(nofp->nof_np, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2546 } else {
6d2010ae 2547 nofp->nof_rw--;
0a7de745 2548 }
6d2010ae
A
2549 }
2550 }
2551 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2552 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2553 if (delegated) {
0a7de745 2554 if (nofp->nof_d_r_dw == 0) {
6d2010ae 2555 NP(nofp->nof_np, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2556 } else {
6d2010ae 2557 nofp->nof_d_r_dw--;
0a7de745 2558 }
6d2010ae 2559 } else {
0a7de745 2560 if (nofp->nof_r_dw == 0) {
6d2010ae 2561 NP(nofp->nof_np, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2562 } else {
6d2010ae 2563 nofp->nof_r_dw--;
0a7de745 2564 }
6d2010ae
A
2565 }
2566 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2567 if (delegated) {
0a7de745 2568 if (nofp->nof_d_w_dw == 0) {
6d2010ae 2569 NP(nofp->nof_np, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2570 } else {
6d2010ae 2571 nofp->nof_d_w_dw--;
0a7de745 2572 }
6d2010ae 2573 } else {
0a7de745 2574 if (nofp->nof_w_dw == 0) {
6d2010ae 2575 NP(nofp->nof_np, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2576 } else {
6d2010ae 2577 nofp->nof_w_dw--;
0a7de745 2578 }
6d2010ae
A
2579 }
2580 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2581 if (delegated) {
0a7de745 2582 if (nofp->nof_d_rw_dw == 0) {
6d2010ae 2583 NP(nofp->nof_np, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2584 } else {
6d2010ae 2585 nofp->nof_d_rw_dw--;
0a7de745 2586 }
6d2010ae 2587 } else {
0a7de745 2588 if (nofp->nof_rw_dw == 0) {
6d2010ae 2589 NP(nofp->nof_np, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2590 } else {
6d2010ae 2591 nofp->nof_rw_dw--;
0a7de745 2592 }
6d2010ae
A
2593 }
2594 }
2595 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2596 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2597 if (delegated) {
0a7de745 2598 if (nofp->nof_d_r_drw == 0) {
6d2010ae 2599 NP(nofp->nof_np, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2600 } else {
6d2010ae 2601 nofp->nof_d_r_drw--;
0a7de745 2602 }
6d2010ae 2603 } else {
0a7de745 2604 if (nofp->nof_r_drw == 0) {
6d2010ae 2605 NP(nofp->nof_np, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2606 } else {
6d2010ae 2607 nofp->nof_r_drw--;
0a7de745 2608 }
6d2010ae
A
2609 }
2610 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2611 if (delegated) {
0a7de745 2612 if (nofp->nof_d_w_drw == 0) {
6d2010ae 2613 NP(nofp->nof_np, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2614 } else {
6d2010ae 2615 nofp->nof_d_w_drw--;
0a7de745 2616 }
6d2010ae 2617 } else {
0a7de745 2618 if (nofp->nof_w_drw == 0) {
6d2010ae 2619 NP(nofp->nof_np, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2620 } else {
6d2010ae 2621 nofp->nof_w_drw--;
0a7de745 2622 }
6d2010ae
A
2623 }
2624 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2625 if (delegated) {
0a7de745 2626 if (nofp->nof_d_rw_drw == 0) {
6d2010ae 2627 NP(nofp->nof_np, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2628 } else {
6d2010ae 2629 nofp->nof_d_rw_drw--;
0a7de745 2630 }
6d2010ae 2631 } else {
0a7de745 2632 if (nofp->nof_rw_drw == 0) {
6d2010ae 2633 NP(nofp->nof_np, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2634 } else {
6d2010ae 2635 nofp->nof_rw_drw--;
0a7de745 2636 }
6d2010ae
A
2637 }
2638 }
2639 }
2640
2641 /* update the modes */
2642 nofp->nof_access = newAccessMode;
2643 nofp->nof_deny = newDenyMode;
2644 nofp->nof_opencnt--;
2645 lck_mtx_unlock(&nofp->nof_lock);
2646}
2647
cb323159 2648#if CONFIG_NFS4
6d2010ae
A
2649/*
2650 * Get the current (delegation, lock, open, default) stateid for this node.
2651 * If node has a delegation, use that stateid.
2652 * If pid has a lock, use the lockowner's stateid.
2653 * Or use the open file's stateid.
2654 * If no open file, use a default stateid of all ones.
2655 */
2656void
2657nfs_get_stateid(nfsnode_t np, thread_t thd, kauth_cred_t cred, nfs_stateid *sid)
2658{
2659 struct nfsmount *nmp = NFSTONMP(np);
2660 proc_t p = thd ? get_bsdthreadtask_info(thd) : current_proc(); // XXX async I/O requests don't have a thread
2661 struct nfs_open_owner *noop = NULL;
2662 struct nfs_open_file *nofp = NULL;
2663 struct nfs_lock_owner *nlop = NULL;
2664 nfs_stateid *s = NULL;
2665
2666 if (np->n_openflags & N_DELEG_MASK) {
2667 s = &np->n_dstateid;
2668 } else {
0a7de745 2669 if (p) {
6d2010ae 2670 nlop = nfs_lock_owner_find(np, p, 0);
0a7de745 2671 }
6d2010ae
A
2672 if (nlop && !TAILQ_EMPTY(&nlop->nlo_locks)) {
2673 /* we hold locks, use lock stateid */
2674 s = &nlop->nlo_stateid;
2675 } else if (((noop = nfs_open_owner_find(nmp, cred, 0))) &&
0a7de745
A
2676 (nfs_open_file_find(np, noop, &nofp, 0, 0, 0) == 0) &&
2677 !(nofp->nof_flags & NFS_OPEN_FILE_LOST) &&
2678 nofp->nof_access) {
6d2010ae 2679 /* we (should) have the file open, use open stateid */
0a7de745 2680 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
6d2010ae 2681 nfs4_reopen(nofp, thd);
0a7de745
A
2682 }
2683 if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6d2010ae 2684 s = &nofp->nof_stateid;
0a7de745 2685 }
6d2010ae
A
2686 }
2687 }
2688
2689 if (s) {
2690 sid->seqid = s->seqid;
2691 sid->other[0] = s->other[0];
2692 sid->other[1] = s->other[1];
2693 sid->other[2] = s->other[2];
2694 } else {
2695 /* named attributes may not have a stateid for reads, so don't complain for them */
0a7de745 2696 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 2697 NP(np, "nfs_get_stateid: no stateid");
0a7de745 2698 }
6d2010ae
A
2699 sid->seqid = sid->other[0] = sid->other[1] = sid->other[2] = 0xffffffff;
2700 }
0a7de745 2701 if (nlop) {
6d2010ae 2702 nfs_lock_owner_rele(nlop);
0a7de745
A
2703 }
2704 if (noop) {
6d2010ae 2705 nfs_open_owner_rele(noop);
0a7de745 2706 }
6d2010ae
A
2707}
2708
2709
2710/*
2711 * When we have a delegation, we may be able to perform the OPEN locally.
2712 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2713 */
2714int
2715nfs4_open_delegated(
2716 nfsnode_t np,
2717 struct nfs_open_file *nofp,
2718 uint32_t accessMode,
2719 uint32_t denyMode,
2720 vfs_context_t ctx)
2721{
2722 int error = 0, ismember, readtoo = 0, authorized = 0;
2723 uint32_t action;
2724 struct kauth_acl_eval eval;
2725 kauth_cred_t cred = vfs_context_ucred(ctx);
2726
2727 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2728 /*
2729 * Try to open it for read access too,
2730 * so the buffer cache can read data.
2731 */
2732 readtoo = 1;
2733 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2734 }
2735
2736tryagain:
2737 action = 0;
0a7de745 2738 if (accessMode & NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2739 action |= KAUTH_VNODE_READ_DATA;
0a7de745
A
2740 }
2741 if (accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2742 action |= KAUTH_VNODE_WRITE_DATA;
0a7de745 2743 }
6d2010ae
A
2744
2745 /* evaluate ACE (if we have one) */
2746 if (np->n_dace.ace_flags) {
2747 eval.ae_requested = action;
2748 eval.ae_acl = &np->n_dace;
2749 eval.ae_count = 1;
2750 eval.ae_options = 0;
0a7de745 2751 if (np->n_vattr.nva_uid == kauth_cred_getuid(cred)) {
6d2010ae 2752 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
0a7de745 2753 }
6d2010ae 2754 error = kauth_cred_ismember_gid(cred, np->n_vattr.nva_gid, &ismember);
0a7de745 2755 if (!error && ismember) {
6d2010ae 2756 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
0a7de745 2757 }
6d2010ae
A
2758
2759 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
2760 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
2761 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
2762 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
2763
2764 error = kauth_acl_evaluate(cred, &eval);
2765
0a7de745 2766 if (!error && (eval.ae_result == KAUTH_RESULT_ALLOW)) {
6d2010ae 2767 authorized = 1;
0a7de745 2768 }
6d2010ae
A
2769 }
2770
2771 if (!authorized) {
2772 /* need to ask the server via ACCESS */
2773 struct vnop_access_args naa;
2774 naa.a_desc = &vnop_access_desc;
2775 naa.a_vp = NFSTOV(np);
2776 naa.a_action = action;
2777 naa.a_context = ctx;
0a7de745 2778 if (!(error = nfs_vnop_access(&naa))) {
6d2010ae 2779 authorized = 1;
0a7de745 2780 }
6d2010ae
A
2781 }
2782
2783 if (!authorized) {
2784 if (readtoo) {
2785 /* try again without the extra read access */
2786 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2787 readtoo = 0;
2788 goto tryagain;
2789 }
0a7de745 2790 return error ? error : EACCES;
6d2010ae
A
2791 }
2792
2793 nfs_open_file_add_open(nofp, accessMode, denyMode, 1);
2794
0a7de745 2795 return 0;
6d2010ae
A
2796}
2797
2798
2799/*
2800 * Open a file with the given access/deny modes.
2801 *
2802 * If we have a delegation, we may be able to handle the open locally.
2803 * Otherwise, we will always send the open RPC even if this open's mode is
2804 * a subset of all the existing opens. This makes sure that we will always
2805 * be able to do a downgrade to any of the open modes.
2806 *
2807 * Note: local conflicts should have already been checked in nfs_open_file_find().
2808 */
2809int
2810nfs4_open(
2811 nfsnode_t np,
2812 struct nfs_open_file *nofp,
2813 uint32_t accessMode,
2814 uint32_t denyMode,
2815 vfs_context_t ctx)
2816{
2817 vnode_t vp = NFSTOV(np);
2818 vnode_t dvp = NULL;
2819 struct componentname cn;
2820 const char *vname = NULL;
f427ee49 2821 uint32_t namelen;
6d2010ae
A
2822 char smallname[128];
2823 char *filename = NULL;
2824 int error = 0, readtoo = 0;
2825
2826 /*
2827 * We can handle the OPEN ourselves if we have a delegation,
2828 * unless it's a read delegation and the open is asking for
2829 * either write access or deny read. We also don't bother to
2830 * use the delegation if it's being returned.
2831 */
2832 if (np->n_openflags & N_DELEG_MASK) {
0a7de745
A
2833 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
2834 return error;
2835 }
6d2010ae
A
2836 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN) &&
2837 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ||
0a7de745 2838 (!(accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) && !(denyMode & NFS_OPEN_SHARE_DENY_READ)))) {
6d2010ae
A
2839 error = nfs4_open_delegated(np, nofp, accessMode, denyMode, ctx);
2840 nfs_open_state_clear_busy(np);
0a7de745 2841 return error;
6d2010ae
A
2842 }
2843 nfs_open_state_clear_busy(np);
2844 }
2845
2846 /*
2847 * [sigh] We can't trust VFS to get the parent right for named
2848 * attribute nodes. (It likes to reparent the nodes after we've
2849 * created them.) Luckily we can probably get the right parent
2850 * from the n_parent we have stashed away.
2851 */
2852 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
0a7de745 2853 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
6d2010ae 2854 dvp = NULL;
0a7de745
A
2855 }
2856 if (!dvp) {
6d2010ae 2857 dvp = vnode_getparent(vp);
0a7de745 2858 }
6d2010ae
A
2859 vname = vnode_getname(vp);
2860 if (!dvp || !vname) {
0a7de745 2861 if (!error) {
6d2010ae 2862 error = EIO;
0a7de745 2863 }
6d2010ae
A
2864 goto out;
2865 }
2866 filename = &smallname[0];
2867 namelen = snprintf(filename, sizeof(smallname), "%s", vname);
2868 if (namelen >= sizeof(smallname)) {
0a7de745 2869 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
6d2010ae
A
2870 if (!filename) {
2871 error = ENOMEM;
2872 goto out;
2873 }
0a7de745 2874 snprintf(filename, namelen + 1, "%s", vname);
6d2010ae
A
2875 }
2876 bzero(&cn, sizeof(cn));
2877 cn.cn_nameptr = filename;
2878 cn.cn_namelen = namelen;
2879
2880 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2881 /*
2882 * Try to open it for read access too,
2883 * so the buffer cache can read data.
2884 */
2885 readtoo = 1;
2886 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2887 }
2888tryagain:
2889 error = nfs4_open_rpc(nofp, ctx, &cn, NULL, dvp, &vp, NFS_OPEN_NOCREATE, accessMode, denyMode);
2890 if (error) {
2891 if (!nfs_mount_state_error_should_restart(error) &&
2892 (error != EINTR) && (error != ERESTART) && readtoo) {
2893 /* try again without the extra read access */
2894 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2895 readtoo = 0;
2896 goto tryagain;
2897 }
2898 goto out;
2899 }
2900 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
b0d623f7 2901out:
0a7de745 2902 if (filename && (filename != &smallname[0])) {
b0d623f7 2903 FREE(filename, M_TEMP);
0a7de745
A
2904 }
2905 if (vname) {
b0d623f7 2906 vnode_putname(vname);
0a7de745
A
2907 }
2908 if (dvp != NULLVP) {
b0d623f7 2909 vnode_put(dvp);
0a7de745
A
2910 }
2911 return error;
b0d623f7 2912}
cb323159 2913#endif /* CONFIG_NFS4 */
b0d623f7 2914
b0d623f7 2915int
6d2010ae
A
2916nfs_vnop_mmap(
2917 struct vnop_mmap_args /* {
0a7de745
A
2918 * struct vnodeop_desc *a_desc;
2919 * vnode_t a_vp;
2920 * int a_fflags;
2921 * vfs_context_t a_context;
2922 * } */*ap)
b0d623f7
A
2923{
2924 vfs_context_t ctx = ap->a_context;
2925 vnode_t vp = ap->a_vp;
2926 nfsnode_t np = VTONFS(vp);
f427ee49
A
2927 int error = 0, delegated = 0;
2928 uint8_t accessMode, denyMode;
b0d623f7 2929 struct nfsmount *nmp;
b0d623f7
A
2930 struct nfs_open_owner *noop = NULL;
2931 struct nfs_open_file *nofp = NULL;
2932
b0d623f7 2933 nmp = VTONMP(vp);
0a7de745
A
2934 if (nfs_mount_gone(nmp)) {
2935 return ENXIO;
2936 }
b0d623f7 2937
0a7de745
A
2938 if (!vnode_isreg(vp) || !(ap->a_fflags & (PROT_READ | PROT_WRITE))) {
2939 return EINVAL;
2940 }
2941 if (np->n_flag & NREVOKE) {
2942 return EIO;
2943 }
b0d623f7 2944
6d2010ae
A
2945 /*
2946 * fflags contains some combination of: PROT_READ, PROT_WRITE
2947 * Since it's not possible to mmap() without having the file open for reading,
2948 * read access is always there (regardless if PROT_READ is not set).
2949 */
2950 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
0a7de745 2951 if (ap->a_fflags & PROT_WRITE) {
b0d623f7 2952 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
0a7de745 2953 }
6d2010ae 2954 denyMode = NFS_OPEN_SHARE_DENY_NONE;
b0d623f7
A
2955
2956 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
0a7de745
A
2957 if (!noop) {
2958 return ENOMEM;
2959 }
b0d623f7
A
2960
2961restart:
6d2010ae 2962 error = nfs_mount_state_in_use_start(nmp, NULL);
b0d623f7
A
2963 if (error) {
2964 nfs_open_owner_rele(noop);
0a7de745 2965 return error;
b0d623f7 2966 }
6d2010ae 2967 if (np->n_flag & NREVOKE) {
b0d623f7 2968 error = EIO;
6d2010ae
A
2969 nfs_mount_state_in_use_end(nmp, 0);
2970 nfs_open_owner_rele(noop);
0a7de745 2971 return error;
6d2010ae
A
2972 }
2973
2974 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
2975 if (error || (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST))) {
2976 NP(np, "nfs_vnop_mmap: no open file for owner, error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
2977 error = EPERM;
b0d623f7 2978 }
cb323159 2979#if CONFIG_NFS4
b0d623f7 2980 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6d2010ae 2981 error = nfs4_reopen(nofp, NULL);
b0d623f7 2982 nofp = NULL;
0a7de745 2983 if (!error) {
f427ee49 2984 nfs_mount_state_in_use_end(nmp, 0);
6d2010ae 2985 goto restart;
0a7de745 2986 }
b0d623f7 2987 }
cb323159 2988#endif
0a7de745 2989 if (!error) {
6d2010ae 2990 error = nfs_open_file_set_busy(nofp, NULL);
0a7de745 2991 }
b0d623f7
A
2992 if (error) {
2993 nofp = NULL;
2994 goto out;
2995 }
2996
2997 /*
6d2010ae
A
2998 * The open reference for mmap must mirror an existing open because
2999 * we may need to reclaim it after the file is closed.
3000 * So grab another open count matching the accessMode passed in.
3001 * If we already had an mmap open, prefer read/write without deny mode.
3002 * This means we may have to drop the current mmap open first.
3e170ce0
A
3003 *
3004 * N.B. We should have an open for the mmap, because, mmap was
3005 * called on an open descriptor, or we've created an open for read
3006 * from reading the first page for execve. However, if we piggy
3007 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
3008 * that open may have closed.
b0d623f7 3009 */
6d2010ae 3010
3e170ce0
A
3011 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
3012 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
3013 /* We shouldn't get here. We've already open the file for execve */
3014 NP(np, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
0a7de745 3015 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
3e170ce0
A
3016 }
3017 /*
3018 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
3019 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
3020 */
3021 if (accessMode != NFS_OPEN_SHARE_ACCESS_READ || (accessMode & nofp->nof_deny)) {
6d2010ae
A
3022 /* not asking for just read access -> fail */
3023 error = EPERM;
3024 goto out;
3025 }
3026 /* we don't have the file open, so open it for read access */
3027 if (nmp->nm_vers < NFS_VER4) {
3028 /* NFS v2/v3 opens are always allowed - so just add it. */
3029 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
b0d623f7 3030 error = 0;
cb323159
A
3031 }
3032#if CONFIG_NFS4
3033 else {
6d2010ae 3034 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
b0d623f7 3035 }
cb323159 3036#endif
0a7de745 3037 if (!error) {
6d2010ae 3038 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
0a7de745
A
3039 }
3040 if (error) {
b0d623f7 3041 goto out;
0a7de745 3042 }
6d2010ae
A
3043 }
3044
3045 /* determine deny mode for open */
3046 if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
3047 if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3048 delegated = 1;
0a7de745 3049 if (nofp->nof_d_rw) {
6d2010ae 3050 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3051 } else if (nofp->nof_d_rw_dw) {
6d2010ae 3052 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3053 } else if (nofp->nof_d_rw_drw) {
6d2010ae 3054 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3055 }
6d2010ae
A
3056 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3057 delegated = 0;
0a7de745 3058 if (nofp->nof_rw) {
6d2010ae 3059 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3060 } else if (nofp->nof_rw_dw) {
6d2010ae 3061 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3062 } else if (nofp->nof_rw_drw) {
6d2010ae 3063 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3064 }
6d2010ae
A
3065 } else {
3066 error = EPERM;
3067 }
3068 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
3069 if (nofp->nof_d_r || nofp->nof_d_r_dw || nofp->nof_d_r_drw) {
3070 delegated = 1;
0a7de745 3071 if (nofp->nof_d_r) {
6d2010ae 3072 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3073 } else if (nofp->nof_d_r_dw) {
6d2010ae 3074 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3075 } else if (nofp->nof_d_r_drw) {
6d2010ae 3076 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3077 }
6d2010ae
A
3078 } else if (nofp->nof_r || nofp->nof_r_dw || nofp->nof_r_drw) {
3079 delegated = 0;
0a7de745 3080 if (nofp->nof_r) {
6d2010ae 3081 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3082 } else if (nofp->nof_r_dw) {
6d2010ae 3083 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3084 } else if (nofp->nof_r_drw) {
6d2010ae 3085 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3086 }
3e170ce0
A
3087 } else if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3088 /*
3089 * This clause and the one below is to co-opt a read write access
3090 * for a read only mmaping. We probably got here in that an
3091 * existing rw open for an executable file already exists.
3092 */
3093 delegated = 1;
3094 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
0a7de745 3095 if (nofp->nof_d_rw) {
3e170ce0 3096 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3097 } else if (nofp->nof_d_rw_dw) {
3e170ce0 3098 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3099 } else if (nofp->nof_d_rw_drw) {
3e170ce0 3100 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3101 }
3e170ce0
A
3102 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3103 delegated = 0;
3104 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
0a7de745 3105 if (nofp->nof_rw) {
3e170ce0 3106 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3107 } else if (nofp->nof_rw_dw) {
3e170ce0 3108 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3109 } else if (nofp->nof_rw_drw) {
3e170ce0 3110 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3111 }
6d2010ae
A
3112 } else {
3113 error = EPERM;
3114 }
3115 }
0a7de745 3116 if (error) { /* mmap mode without proper open mode */
6d2010ae 3117 goto out;
0a7de745 3118 }
6d2010ae
A
3119
3120 /*
3121 * If the existing mmap access is more than the new access OR the
3122 * existing access is the same and the existing deny mode is less,
3123 * then we'll stick with the existing mmap open mode.
3124 */
3125 if ((nofp->nof_mmap_access > accessMode) ||
0a7de745 3126 ((nofp->nof_mmap_access == accessMode) && (nofp->nof_mmap_deny <= denyMode))) {
6d2010ae 3127 goto out;
0a7de745 3128 }
6d2010ae
A
3129
3130 /* update mmap open mode */
3131 if (nofp->nof_mmap_access) {
3132 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3133 if (error) {
0a7de745 3134 if (!nfs_mount_state_error_should_restart(error)) {
6d2010ae 3135 NP(np, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 3136 }
6d2010ae
A
3137 NP(np, "nfs_vnop_mmap: update, close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3138 goto out;
b0d623f7 3139 }
6d2010ae 3140 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
b0d623f7
A
3141 }
3142
6d2010ae
A
3143 nfs_open_file_add_open(nofp, accessMode, denyMode, delegated);
3144 nofp->nof_mmap_access = accessMode;
3145 nofp->nof_mmap_deny = denyMode;
3146
b0d623f7 3147out:
0a7de745 3148 if (nofp) {
b0d623f7 3149 nfs_open_file_clear_busy(nofp);
0a7de745 3150 }
b0d623f7
A
3151 if (nfs_mount_state_in_use_end(nmp, error)) {
3152 nofp = NULL;
3153 goto restart;
3154 }
0a7de745 3155 if (noop) {
b0d623f7 3156 nfs_open_owner_rele(noop);
0a7de745 3157 }
316670eb
A
3158
3159 if (!error) {
3160 int ismapped = 0;
3161 nfs_node_lock_force(np);
3162 if ((np->n_flag & NISMAPPED) == 0) {
3163 np->n_flag |= NISMAPPED;
3164 ismapped = 1;
3165 }
3166 nfs_node_unlock(np);
3167 if (ismapped) {
3168 lck_mtx_lock(&nmp->nm_lock);
3169 nmp->nm_state &= ~NFSSTA_SQUISHY;
3170 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
0a7de745 3171 if (nmp->nm_curdeadtimeout <= 0) {
316670eb 3172 nmp->nm_deadto_start = 0;
0a7de745 3173 }
316670eb
A
3174 nmp->nm_mappers++;
3175 lck_mtx_unlock(&nmp->nm_lock);
3176 }
3177 }
3178
0a7de745 3179 return error;
b0d623f7
A
3180}
3181
f427ee49
A
3182int
3183nfs_vnop_mmap_check(
3184 struct vnop_mmap_check_args /* {
3185 * struct vnodeop_desc *a_desc;
3186 * vnode_t a_vp;
3187 * int a_flags;
3188 * vfs_context_t a_context;
3189 * } */*ap)
3190{
3191 vfs_context_t ctx = ap->a_context;
3192 vnode_t vp = ap->a_vp;
3193 struct nfsmount *nmp = VTONMP(vp);
3194 struct vnop_access_args naa;
3195 int error = 0;
3196
3197 if (nfs_mount_gone(nmp)) {
3198 return ENXIO;
3199 }
3200
3201 if (vnode_isreg(vp)) {
3202 /*
3203 * We only need to ensure that a page-in will be
3204 * possible with these credentials. Everything
3205 * else has been checked at other layers.
3206 */
3207 naa.a_desc = &vnop_access_desc;
3208 naa.a_vp = vp;
3209 naa.a_action = KAUTH_VNODE_READ_DATA;
3210 naa.a_context = ctx;
3211
3212 /* compute actual success/failure based on accessibility */
3213 error = nfs_vnop_access(&naa);
3214 }
3215
3216 return error;
3217}
b0d623f7
A
3218
3219int
6d2010ae
A
3220nfs_vnop_mnomap(
3221 struct vnop_mnomap_args /* {
0a7de745
A
3222 * struct vnodeop_desc *a_desc;
3223 * vnode_t a_vp;
3224 * vfs_context_t a_context;
3225 * } */*ap)
b0d623f7
A
3226{
3227 vfs_context_t ctx = ap->a_context;
3228 vnode_t vp = ap->a_vp;
b0d623f7
A
3229 nfsnode_t np = VTONFS(vp);
3230 struct nfsmount *nmp;
b0d623f7 3231 struct nfs_open_file *nofp = NULL;
6d2010ae
A
3232 off_t size;
3233 int error;
316670eb 3234 int is_mapped_flag = 0;
0a7de745 3235
b0d623f7 3236 nmp = VTONMP(vp);
0a7de745
A
3237 if (nfs_mount_gone(nmp)) {
3238 return ENXIO;
3239 }
b0d623f7 3240
316670eb
A
3241 nfs_node_lock_force(np);
3242 if (np->n_flag & NISMAPPED) {
3243 is_mapped_flag = 1;
3244 np->n_flag &= ~NISMAPPED;
3245 }
3246 nfs_node_unlock(np);
3247 if (is_mapped_flag) {
3248 lck_mtx_lock(&nmp->nm_lock);
0a7de745 3249 if (nmp->nm_mappers) {
316670eb 3250 nmp->nm_mappers--;
0a7de745 3251 } else {
316670eb 3252 NP(np, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
0a7de745 3253 }
316670eb
A
3254 lck_mtx_unlock(&nmp->nm_lock);
3255 }
3256
6d2010ae
A
3257 /* flush buffers/ubc before we drop the open (in case it's our last open) */
3258 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
0a7de745 3259 if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp))) {
6d2010ae 3260 ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC);
0a7de745 3261 }
b0d623f7 3262
6d2010ae
A
3263 /* walk all open files and close all mmap opens */
3264loop:
3265 error = nfs_mount_state_in_use_start(nmp, NULL);
0a7de745
A
3266 if (error) {
3267 return error;
3268 }
6d2010ae
A
3269 lck_mtx_lock(&np->n_openlock);
3270 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
0a7de745 3271 if (!nofp->nof_mmap_access) {
6d2010ae 3272 continue;
0a7de745 3273 }
b0d623f7 3274 lck_mtx_unlock(&np->n_openlock);
cb323159 3275#if CONFIG_NFS4
6d2010ae 3276 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
6d2010ae 3277 error = nfs4_reopen(nofp, NULL);
0a7de745 3278 if (!error) {
f427ee49 3279 nfs_mount_state_in_use_end(nmp, 0);
6d2010ae 3280 goto loop;
0a7de745 3281 }
6d2010ae 3282 }
cb323159 3283#endif
0a7de745 3284 if (!error) {
6d2010ae 3285 error = nfs_open_file_set_busy(nofp, NULL);
0a7de745 3286 }
6d2010ae
A
3287 if (error) {
3288 lck_mtx_lock(&np->n_openlock);
3289 break;
3290 }
3291 if (nofp->nof_mmap_access) {
3292 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3293 if (!nfs_mount_state_error_should_restart(error)) {
0a7de745 3294 if (error) { /* not a state-operation-restarting error, so just clear the access */
6d2010ae 3295 NP(np, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 3296 }
6d2010ae
A
3297 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
3298 }
0a7de745 3299 if (error) {
6d2010ae 3300 NP(np, "nfs_vnop_mnomap: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 3301 }
6d2010ae
A
3302 }
3303 nfs_open_file_clear_busy(nofp);
3304 nfs_mount_state_in_use_end(nmp, error);
3305 goto loop;
b0d623f7 3306 }
6d2010ae
A
3307 lck_mtx_unlock(&np->n_openlock);
3308 nfs_mount_state_in_use_end(nmp, error);
0a7de745 3309 return error;
6d2010ae 3310}
b0d623f7 3311
6d2010ae
A
3312/*
3313 * Search a node's lock owner list for the owner for this process.
3314 * If not found and "alloc" is set, then allocate a new one.
3315 */
3316struct nfs_lock_owner *
3317nfs_lock_owner_find(nfsnode_t np, proc_t p, int alloc)
3318{
3319 pid_t pid = proc_pid(p);
3320 struct nfs_lock_owner *nlop, *newnlop = NULL;
b0d623f7 3321
6d2010ae
A
3322tryagain:
3323 lck_mtx_lock(&np->n_openlock);
3324 TAILQ_FOREACH(nlop, &np->n_lock_owners, nlo_link) {
0a7de745
A
3325 os_ref_count_t newcount;
3326
3327 if (nlop->nlo_pid != pid) {
6d2010ae 3328 continue;
0a7de745
A
3329 }
3330 if (timevalcmp(&nlop->nlo_pid_start, &p->p_start, ==)) {
6d2010ae 3331 break;
0a7de745 3332 }
6d2010ae 3333 /* stale lock owner... reuse it if we can */
0a7de745 3334 if (os_ref_get_count(&nlop->nlo_refcnt)) {
6d2010ae
A
3335 TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
3336 nlop->nlo_flags &= ~NFS_LOCK_OWNER_LINK;
0a7de745 3337 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
6d2010ae
A
3338 lck_mtx_unlock(&np->n_openlock);
3339 goto tryagain;
3340 }
3341 nlop->nlo_pid_start = p->p_start;
3342 nlop->nlo_seqid = 0;
3343 nlop->nlo_stategenid = 0;
3344 break;
b0d623f7
A
3345 }
3346
6d2010ae
A
3347 if (!nlop && !newnlop && alloc) {
3348 lck_mtx_unlock(&np->n_openlock);
3349 MALLOC(newnlop, struct nfs_lock_owner *, sizeof(struct nfs_lock_owner), M_TEMP, M_WAITOK);
0a7de745
A
3350 if (!newnlop) {
3351 return NULL;
3352 }
6d2010ae
A
3353 bzero(newnlop, sizeof(*newnlop));
3354 lck_mtx_init(&newnlop->nlo_lock, nfs_open_grp, LCK_ATTR_NULL);
3355 newnlop->nlo_pid = pid;
3356 newnlop->nlo_pid_start = p->p_start;
3357 newnlop->nlo_name = OSAddAtomic(1, &nfs_lock_owner_seqnum);
3358 TAILQ_INIT(&newnlop->nlo_locks);
3359 goto tryagain;
b0d623f7 3360 }
6d2010ae
A
3361 if (!nlop && newnlop) {
3362 newnlop->nlo_flags |= NFS_LOCK_OWNER_LINK;
0a7de745 3363 os_ref_init(&newnlop->nlo_refcnt, NULL);
6d2010ae
A
3364 TAILQ_INSERT_HEAD(&np->n_lock_owners, newnlop, nlo_link);
3365 nlop = newnlop;
b0d623f7 3366 }
6d2010ae 3367 lck_mtx_unlock(&np->n_openlock);
b0d623f7 3368
0a7de745 3369 if (newnlop && (nlop != newnlop)) {
6d2010ae 3370 nfs_lock_owner_destroy(newnlop);
0a7de745 3371 }
b0d623f7 3372
0a7de745 3373 if (nlop) {
6d2010ae 3374 nfs_lock_owner_ref(nlop);
0a7de745 3375 }
b0d623f7 3376
0a7de745 3377 return nlop;
6d2010ae 3378}
b0d623f7
A
3379
3380/*
3381 * destroy a lock owner that's no longer needed
3382 */
3383void
3384nfs_lock_owner_destroy(struct nfs_lock_owner *nlop)
3385{
3386 if (nlop->nlo_open_owner) {
3387 nfs_open_owner_rele(nlop->nlo_open_owner);
3388 nlop->nlo_open_owner = NULL;
3389 }
3390 lck_mtx_destroy(&nlop->nlo_lock, nfs_open_grp);
3391 FREE(nlop, M_TEMP);
3392}
3393
3394/*
3395 * acquire a reference count on a lock owner
3396 */
3397void
3398nfs_lock_owner_ref(struct nfs_lock_owner *nlop)
3399{
3400 lck_mtx_lock(&nlop->nlo_lock);
0a7de745 3401 os_ref_retain_locked(&nlop->nlo_refcnt);
b0d623f7
A
3402 lck_mtx_unlock(&nlop->nlo_lock);
3403}
3404
3405/*
3406 * drop a reference count on a lock owner and destroy it if
3407 * it is no longer referenced and no longer on the mount's list.
3408 */
3409void
3410nfs_lock_owner_rele(struct nfs_lock_owner *nlop)
3411{
0a7de745
A
3412 os_ref_count_t newcount;
3413
b0d623f7 3414 lck_mtx_lock(&nlop->nlo_lock);
0a7de745 3415 if (os_ref_get_count(&nlop->nlo_refcnt) < 1) {
b0d623f7 3416 panic("nfs_lock_owner_rele: no refcnt");
0a7de745
A
3417 }
3418 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
3419 if (!newcount && (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
b0d623f7 3420 panic("nfs_lock_owner_rele: busy");
0a7de745 3421 }
b0d623f7 3422 /* XXX we may potentially want to clean up idle/unused lock owner structures */
0a7de745 3423 if (newcount || (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) {
b0d623f7
A
3424 lck_mtx_unlock(&nlop->nlo_lock);
3425 return;
3426 }
3427 /* owner is no longer referenced or linked to mount, so destroy it */
3428 lck_mtx_unlock(&nlop->nlo_lock);
3429 nfs_lock_owner_destroy(nlop);
3430}
3431
3432/*
3433 * Mark a lock owner as busy because we are about to
3434 * start an operation that uses and updates lock owner state.
3435 */
3436int
3437nfs_lock_owner_set_busy(struct nfs_lock_owner *nlop, thread_t thd)
3438{
3439 struct nfsmount *nmp;
cb323159 3440 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
b0d623f7
A
3441 int error = 0, slpflag;
3442
3443 nmp = nlop->nlo_open_owner->noo_mount;
0a7de745
A
3444 if (nfs_mount_gone(nmp)) {
3445 return ENXIO;
3446 }
6d2010ae 3447 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7
A
3448
3449 lck_mtx_lock(&nlop->nlo_lock);
3450 while (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY) {
0a7de745 3451 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 3452 break;
0a7de745 3453 }
b0d623f7
A
3454 nlop->nlo_flags |= NFS_LOCK_OWNER_WANT;
3455 msleep(nlop, &nlop->nlo_lock, slpflag, "nfs_lock_owner_set_busy", &ts);
6d2010ae 3456 slpflag = 0;
b0d623f7 3457 }
0a7de745 3458 if (!error) {
b0d623f7 3459 nlop->nlo_flags |= NFS_LOCK_OWNER_BUSY;
0a7de745 3460 }
b0d623f7
A
3461 lck_mtx_unlock(&nlop->nlo_lock);
3462
0a7de745 3463 return error;
b0d623f7
A
3464}
3465
3466/*
3467 * Clear the busy flag on a lock owner and wake up anyone waiting
3468 * to mark it busy.
3469 */
3470void
3471nfs_lock_owner_clear_busy(struct nfs_lock_owner *nlop)
3472{
3473 int wanted;
3474
3475 lck_mtx_lock(&nlop->nlo_lock);
0a7de745 3476 if (!(nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
b0d623f7 3477 panic("nfs_lock_owner_clear_busy");
0a7de745 3478 }
b0d623f7 3479 wanted = (nlop->nlo_flags & NFS_LOCK_OWNER_WANT);
0a7de745 3480 nlop->nlo_flags &= ~(NFS_LOCK_OWNER_BUSY | NFS_LOCK_OWNER_WANT);
b0d623f7 3481 lck_mtx_unlock(&nlop->nlo_lock);
0a7de745 3482 if (wanted) {
b0d623f7 3483 wakeup(nlop);
0a7de745 3484 }
b0d623f7
A
3485}
3486
3487/*
3488 * Insert a held lock into a lock owner's sorted list.
3489 * (flock locks are always inserted at the head the list)
3490 */
3491void
3492nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *nlop, struct nfs_file_lock *newnflp)
3493{
3494 struct nfs_file_lock *nflp;
3495
3496 /* insert new lock in lock owner's held lock list */
3497 lck_mtx_lock(&nlop->nlo_lock);
3498 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) {
3499 TAILQ_INSERT_HEAD(&nlop->nlo_locks, newnflp, nfl_lolink);
3500 } else {
3501 TAILQ_FOREACH(nflp, &nlop->nlo_locks, nfl_lolink) {
0a7de745 3502 if (newnflp->nfl_start < nflp->nfl_start) {
b0d623f7 3503 break;
0a7de745 3504 }
b0d623f7 3505 }
0a7de745 3506 if (nflp) {
b0d623f7 3507 TAILQ_INSERT_BEFORE(nflp, newnflp, nfl_lolink);
0a7de745 3508 } else {
b0d623f7 3509 TAILQ_INSERT_TAIL(&nlop->nlo_locks, newnflp, nfl_lolink);
0a7de745 3510 }
b0d623f7
A
3511 }
3512 lck_mtx_unlock(&nlop->nlo_lock);
3513}
3514
3515/*
3516 * Get a file lock structure for this lock owner.
3517 */
3518struct nfs_file_lock *
3519nfs_file_lock_alloc(struct nfs_lock_owner *nlop)
3520{
3521 struct nfs_file_lock *nflp = NULL;
3522
3523 lck_mtx_lock(&nlop->nlo_lock);
3524 if (!nlop->nlo_alock.nfl_owner) {
3525 nflp = &nlop->nlo_alock;
3526 nflp->nfl_owner = nlop;
3527 }
3528 lck_mtx_unlock(&nlop->nlo_lock);
3529 if (!nflp) {
3530 MALLOC(nflp, struct nfs_file_lock *, sizeof(struct nfs_file_lock), M_TEMP, M_WAITOK);
0a7de745
A
3531 if (!nflp) {
3532 return NULL;
3533 }
b0d623f7
A
3534 bzero(nflp, sizeof(*nflp));
3535 nflp->nfl_flags |= NFS_FILE_LOCK_ALLOC;
3536 nflp->nfl_owner = nlop;
3537 }
3538 nfs_lock_owner_ref(nlop);
0a7de745 3539 return nflp;
b0d623f7
A
3540}
3541
3542/*
3543 * destroy the given NFS file lock structure
3544 */
3545void
3546nfs_file_lock_destroy(struct nfs_file_lock *nflp)
3547{
3548 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3549
3550 if (nflp->nfl_flags & NFS_FILE_LOCK_ALLOC) {
3551 nflp->nfl_owner = NULL;
3552 FREE(nflp, M_TEMP);
3553 } else {
3554 lck_mtx_lock(&nlop->nlo_lock);
3e170ce0 3555 bzero(nflp, sizeof(*nflp));
b0d623f7
A
3556 lck_mtx_unlock(&nlop->nlo_lock);
3557 }
3558 nfs_lock_owner_rele(nlop);
3559}
3560
3561/*
3562 * Check if one file lock conflicts with another.
3563 * (nflp1 is the new lock. nflp2 is the existing lock.)
3564 */
3565int
3566nfs_file_lock_conflict(struct nfs_file_lock *nflp1, struct nfs_file_lock *nflp2, int *willsplit)
3567{
3568 /* no conflict if lock is dead */
0a7de745
A
3569 if ((nflp1->nfl_flags & NFS_FILE_LOCK_DEAD) || (nflp2->nfl_flags & NFS_FILE_LOCK_DEAD)) {
3570 return 0;
3571 }
b0d623f7
A
3572 /* no conflict if it's ours - unless the lock style doesn't match */
3573 if ((nflp1->nfl_owner == nflp2->nfl_owner) &&
3574 ((nflp1->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == (nflp2->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))) {
3575 if (willsplit && (nflp1->nfl_type != nflp2->nfl_type) &&
3576 (nflp1->nfl_start > nflp2->nfl_start) &&
0a7de745 3577 (nflp1->nfl_end < nflp2->nfl_end)) {
b0d623f7 3578 *willsplit = 1;
0a7de745
A
3579 }
3580 return 0;
b0d623f7
A
3581 }
3582 /* no conflict if ranges don't overlap */
0a7de745
A
3583 if ((nflp1->nfl_start > nflp2->nfl_end) || (nflp1->nfl_end < nflp2->nfl_start)) {
3584 return 0;
3585 }
b0d623f7 3586 /* no conflict if neither lock is exclusive */
0a7de745
A
3587 if ((nflp1->nfl_type != F_WRLCK) && (nflp2->nfl_type != F_WRLCK)) {
3588 return 0;
3589 }
b0d623f7 3590 /* conflict */
0a7de745 3591 return 1;
b0d623f7
A
3592}
3593
cb323159 3594#if CONFIG_NFS4
b0d623f7
A
3595/*
3596 * Send an NFSv4 LOCK RPC to the server.
3597 */
3598int
6d2010ae 3599nfs4_setlock_rpc(
b0d623f7
A
3600 nfsnode_t np,
3601 struct nfs_open_file *nofp,
3602 struct nfs_file_lock *nflp,
3603 int reclaim,
6d2010ae 3604 int flags,
b0d623f7
A
3605 thread_t thd,
3606 kauth_cred_t cred)
3607{
3608 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3609 struct nfsmount *nmp;
3610 struct nfsm_chain nmreq, nmrep;
3611 uint64_t xid;
3612 uint32_t locktype;
3613 int error = 0, lockerror = ENOENT, newlocker, numops, status;
6d2010ae 3614 struct nfsreq_secinfo_args si;
b0d623f7
A
3615
3616 nmp = NFSTONMP(np);
0a7de745
A
3617 if (nfs_mount_gone(nmp)) {
3618 return ENXIO;
3619 }
3620 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3621 return EINVAL;
3622 }
b0d623f7
A
3623
3624 newlocker = (nlop->nlo_stategenid != nmp->nm_stategenid);
3625 locktype = (nflp->nfl_flags & NFS_FILE_LOCK_WAIT) ?
0a7de745
A
3626 ((nflp->nfl_type == F_WRLCK) ?
3627 NFS_LOCK_TYPE_WRITEW :
3628 NFS_LOCK_TYPE_READW) :
3629 ((nflp->nfl_type == F_WRLCK) ?
3630 NFS_LOCK_TYPE_WRITE :
3631 NFS_LOCK_TYPE_READ);
b0d623f7
A
3632 if (newlocker) {
3633 error = nfs_open_file_set_busy(nofp, thd);
0a7de745
A
3634 if (error) {
3635 return error;
3636 }
b0d623f7
A
3637 error = nfs_open_owner_set_busy(nofp->nof_owner, thd);
3638 if (error) {
3639 nfs_open_file_clear_busy(nofp);
0a7de745 3640 return error;
b0d623f7
A
3641 }
3642 if (!nlop->nlo_open_owner) {
3643 nfs_open_owner_ref(nofp->nof_owner);
3644 nlop->nlo_open_owner = nofp->nof_owner;
3645 }
3646 }
3647 error = nfs_lock_owner_set_busy(nlop, thd);
3648 if (error) {
3649 if (newlocker) {
3650 nfs_open_owner_clear_busy(nofp->nof_owner);
3651 nfs_open_file_clear_busy(nofp);
3652 }
0a7de745 3653 return error;
b0d623f7
A
3654 }
3655
6d2010ae 3656 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
3657 nfsm_chain_null(&nmreq);
3658 nfsm_chain_null(&nmrep);
3659
3660 // PUTFH, GETATTR, LOCK
3661 numops = 3;
3662 nfsm_chain_build_alloc_init(error, &nmreq, 33 * NFSX_UNSIGNED);
3e170ce0 3663 nfsm_chain_add_compound_header(error, &nmreq, "lock", nmp->nm_minor_vers, numops);
b0d623f7
A
3664 numops--;
3665 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3666 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3667 numops--;
3668 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 3669 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
3670 numops--;
3671 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCK);
3672 nfsm_chain_add_32(error, &nmreq, locktype);
3673 nfsm_chain_add_32(error, &nmreq, reclaim);
3674 nfsm_chain_add_64(error, &nmreq, nflp->nfl_start);
3675 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(nflp->nfl_start, nflp->nfl_end));
3676 nfsm_chain_add_32(error, &nmreq, newlocker);
3677 if (newlocker) {
3678 nfsm_chain_add_32(error, &nmreq, nofp->nof_owner->noo_seqid);
3679 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
3680 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3681 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3682 } else {
3683 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3684 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3685 }
3686 nfsm_chain_build_done(error, &nmreq);
3687 nfsm_assert(error, (numops == 0), EPROTO);
3688 nfsmout_if(error);
3689
0a7de745 3690 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
b0d623f7 3691
0a7de745 3692 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 3693 error = lockerror;
0a7de745 3694 }
b0d623f7
A
3695 nfsm_chain_skip_tag(error, &nmrep);
3696 nfsm_chain_get_32(error, &nmrep, numops);
3697 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3698 nfsmout_if(error);
3699 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 3700 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
b0d623f7
A
3701 nfsmout_if(error);
3702 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCK);
3703 nfs_owner_seqid_increment(newlocker ? nofp->nof_owner : NULL, nlop, error);
3704 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3705
3706 /* Update the lock owner's stategenid once it appears the server has state for it. */
3707 /* We determine this by noting the request was successful (we got a stateid). */
0a7de745 3708 if (newlocker && !error) {
b0d623f7 3709 nlop->nlo_stategenid = nmp->nm_stategenid;
0a7de745 3710 }
b0d623f7 3711nfsmout:
0a7de745 3712 if (!lockerror) {
b0d623f7 3713 nfs_node_unlock(np);
0a7de745 3714 }
b0d623f7
A
3715 nfs_lock_owner_clear_busy(nlop);
3716 if (newlocker) {
3717 nfs_open_owner_clear_busy(nofp->nof_owner);
3718 nfs_open_file_clear_busy(nofp);
3719 }
3720 nfsm_chain_cleanup(&nmreq);
3721 nfsm_chain_cleanup(&nmrep);
0a7de745 3722 return error;
b0d623f7
A
3723}
3724
3725/*
3726 * Send an NFSv4 LOCKU RPC to the server.
3727 */
3728int
3729nfs4_unlock_rpc(
3730 nfsnode_t np,
3731 struct nfs_lock_owner *nlop,
3732 int type,
3733 uint64_t start,
3734 uint64_t end,
6d2010ae
A
3735 int flags,
3736 thread_t thd,
3737 kauth_cred_t cred)
b0d623f7
A
3738{
3739 struct nfsmount *nmp;
3740 struct nfsm_chain nmreq, nmrep;
3741 uint64_t xid;
3742 int error = 0, lockerror = ENOENT, numops, status;
6d2010ae 3743 struct nfsreq_secinfo_args si;
b0d623f7
A
3744
3745 nmp = NFSTONMP(np);
0a7de745
A
3746 if (nfs_mount_gone(nmp)) {
3747 return ENXIO;
3748 }
3749 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3750 return EINVAL;
3751 }
b0d623f7 3752
6d2010ae 3753 error = nfs_lock_owner_set_busy(nlop, NULL);
0a7de745
A
3754 if (error) {
3755 return error;
3756 }
b0d623f7 3757
6d2010ae 3758 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
3759 nfsm_chain_null(&nmreq);
3760 nfsm_chain_null(&nmrep);
3761
3762 // PUTFH, GETATTR, LOCKU
3763 numops = 3;
3764 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3e170ce0 3765 nfsm_chain_add_compound_header(error, &nmreq, "unlock", nmp->nm_minor_vers, numops);
b0d623f7
A
3766 numops--;
3767 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3768 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3769 numops--;
3770 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 3771 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
3772 numops--;
3773 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKU);
3774 nfsm_chain_add_32(error, &nmreq, (type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3775 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3776 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3777 nfsm_chain_add_64(error, &nmreq, start);
3778 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3779 nfsm_chain_build_done(error, &nmreq);
3780 nfsm_assert(error, (numops == 0), EPROTO);
3781 nfsmout_if(error);
3782
0a7de745 3783 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
b0d623f7 3784
0a7de745 3785 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 3786 error = lockerror;
0a7de745 3787 }
b0d623f7
A
3788 nfsm_chain_skip_tag(error, &nmrep);
3789 nfsm_chain_get_32(error, &nmrep, numops);
3790 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3791 nfsmout_if(error);
3792 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 3793 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
b0d623f7
A
3794 nfsmout_if(error);
3795 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKU);
3796 nfs_owner_seqid_increment(NULL, nlop, error);
3797 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3798nfsmout:
0a7de745 3799 if (!lockerror) {
b0d623f7 3800 nfs_node_unlock(np);
0a7de745 3801 }
b0d623f7
A
3802 nfs_lock_owner_clear_busy(nlop);
3803 nfsm_chain_cleanup(&nmreq);
3804 nfsm_chain_cleanup(&nmrep);
0a7de745 3805 return error;
b0d623f7
A
3806}
3807
3808/*
6d2010ae 3809 * Send an NFSv4 LOCKT RPC to the server.
b0d623f7
A
3810 */
3811int
6d2010ae 3812nfs4_getlock_rpc(
b0d623f7
A
3813 nfsnode_t np,
3814 struct nfs_lock_owner *nlop,
3815 struct flock *fl,
3816 uint64_t start,
3817 uint64_t end,
3818 vfs_context_t ctx)
3819{
3820 struct nfsmount *nmp;
b0d623f7
A
3821 struct nfsm_chain nmreq, nmrep;
3822 uint64_t xid, val64 = 0;
3823 uint32_t val = 0;
6d2010ae
A
3824 int error = 0, lockerror, numops, status;
3825 struct nfsreq_secinfo_args si;
b0d623f7
A
3826
3827 nmp = NFSTONMP(np);
0a7de745
A
3828 if (nfs_mount_gone(nmp)) {
3829 return ENXIO;
3830 }
3831 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3832 return EINVAL;
3833 }
b0d623f7 3834
6d2010ae
A
3835 lockerror = ENOENT;
3836 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
3837 nfsm_chain_null(&nmreq);
3838 nfsm_chain_null(&nmrep);
3839
3840 // PUTFH, GETATTR, LOCKT
3841 numops = 3;
3842 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3e170ce0 3843 nfsm_chain_add_compound_header(error, &nmreq, "locktest", nmp->nm_minor_vers, numops);
b0d623f7
A
3844 numops--;
3845 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3846 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3847 numops--;
3848 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 3849 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
3850 numops--;
3851 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKT);
3852 nfsm_chain_add_32(error, &nmreq, (fl->l_type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3853 nfsm_chain_add_64(error, &nmreq, start);
3854 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3855 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3856 nfsm_chain_build_done(error, &nmreq);
3857 nfsm_assert(error, (numops == 0), EPROTO);
3858 nfsmout_if(error);
3859
6d2010ae 3860 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
b0d623f7 3861
0a7de745 3862 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 3863 error = lockerror;
0a7de745 3864 }
b0d623f7
A
3865 nfsm_chain_skip_tag(error, &nmrep);
3866 nfsm_chain_get_32(error, &nmrep, numops);
3867 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3868 nfsmout_if(error);
3869 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 3870 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
b0d623f7
A
3871 nfsmout_if(error);
3872 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKT);
3873 if (error == NFSERR_DENIED) {
3874 error = 0;
3875 nfsm_chain_get_64(error, &nmrep, fl->l_start);
3876 nfsm_chain_get_64(error, &nmrep, val64);
3877 fl->l_len = (val64 == UINT64_MAX) ? 0 : val64;
3878 nfsm_chain_get_32(error, &nmrep, val);
3879 fl->l_type = (val == NFS_LOCK_TYPE_WRITE) ? F_WRLCK : F_RDLCK;
3880 fl->l_pid = 0;
3881 fl->l_whence = SEEK_SET;
3882 } else if (!error) {
3883 fl->l_type = F_UNLCK;
3884 }
3885nfsmout:
0a7de745 3886 if (!lockerror) {
b0d623f7 3887 nfs_node_unlock(np);
0a7de745 3888 }
b0d623f7
A
3889 nfsm_chain_cleanup(&nmreq);
3890 nfsm_chain_cleanup(&nmrep);
0a7de745 3891 return error;
b0d623f7 3892}
cb323159 3893#endif /* CONFIG_NFS4 */
6d2010ae
A
3894
3895/*
3896 * Check for any conflicts with the given lock.
3897 *
3898 * Checking for a lock doesn't require the file to be opened.
3899 * So we skip all the open owner, open file, lock owner work
3900 * and just check for a conflicting lock.
3901 */
3902int
3903nfs_advlock_getlock(
3904 nfsnode_t np,
3905 struct nfs_lock_owner *nlop,
3906 struct flock *fl,
3907 uint64_t start,
3908 uint64_t end,
3909 vfs_context_t ctx)
3910{
3911 struct nfsmount *nmp;
3912 struct nfs_file_lock *nflp;
3913 int error = 0, answered = 0;
3914
3915 nmp = NFSTONMP(np);
0a7de745
A
3916 if (nfs_mount_gone(nmp)) {
3917 return ENXIO;
3918 }
6d2010ae
A
3919
3920restart:
0a7de745
A
3921 if ((error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)))) {
3922 return error;
3923 }
6d2010ae
A
3924
3925 lck_mtx_lock(&np->n_openlock);
3926 /* scan currently held locks for conflict */
3927 TAILQ_FOREACH(nflp, &np->n_locks, nfl_link) {
0a7de745 3928 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
6d2010ae 3929 continue;
0a7de745 3930 }
6d2010ae 3931 if ((start <= nflp->nfl_end) && (end >= nflp->nfl_start) &&
0a7de745 3932 ((fl->l_type == F_WRLCK) || (nflp->nfl_type == F_WRLCK))) {
6d2010ae 3933 break;
0a7de745 3934 }
6d2010ae
A
3935 }
3936 if (nflp) {
3937 /* found a conflicting lock */
3938 fl->l_type = nflp->nfl_type;
3939 fl->l_pid = (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_FLOCK) ? -1 : nflp->nfl_owner->nlo_pid;
3940 fl->l_start = nflp->nfl_start;
3941 fl->l_len = NFS_FLOCK_LENGTH(nflp->nfl_start, nflp->nfl_end);
3942 fl->l_whence = SEEK_SET;
3943 answered = 1;
3944 } else if ((np->n_openflags & N_DELEG_WRITE) && !(np->n_openflags & N_DELEG_RETURN)) {
3945 /*
3946 * If we have a write delegation, we know there can't be other
3947 * locks on the server. So the answer is no conflicting lock found.
3948 */
3949 fl->l_type = F_UNLCK;
3950 answered = 1;
3951 }
3952 lck_mtx_unlock(&np->n_openlock);
3953 if (answered) {
3954 nfs_mount_state_in_use_end(nmp, 0);
0a7de745 3955 return 0;
6d2010ae
A
3956 }
3957
3958 /* no conflict found locally, so ask the server */
3959 error = nmp->nm_funcs->nf_getlock_rpc(np, nlop, fl, start, end, ctx);
3960
0a7de745 3961 if (nfs_mount_state_in_use_end(nmp, error)) {
6d2010ae 3962 goto restart;
0a7de745
A
3963 }
3964 return error;
6d2010ae
A
3965}
3966
b0d623f7
A
3967/*
3968 * Acquire a file lock for the given range.
3969 *
3970 * Add the lock (request) to the lock queue.
3971 * Scan the lock queue for any conflicting locks.
3972 * If a conflict is found, block or return an error.
3973 * Once end of queue is reached, send request to the server.
3974 * If the server grants the lock, scan the lock queue and
3975 * update any existing locks. Then (optionally) scan the
3976 * queue again to coalesce any locks adjacent to the new one.
3977 */
3978int
6d2010ae 3979nfs_advlock_setlock(
b0d623f7
A
3980 nfsnode_t np,
3981 struct nfs_open_file *nofp,
3982 struct nfs_lock_owner *nlop,
3983 int op,
3984 uint64_t start,
3985 uint64_t end,
3986 int style,
3987 short type,
3988 vfs_context_t ctx)
3989{
3990 struct nfsmount *nmp;
3991 struct nfs_file_lock *newnflp, *nflp, *nflp2 = NULL, *nextnflp, *flocknflp = NULL;
3992 struct nfs_file_lock *coalnflp;
3993 int error = 0, error2, willsplit = 0, delay, slpflag, busy = 0, inuse = 0, restart, inqueue = 0;
cb323159 3994 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
b0d623f7
A
3995
3996 nmp = NFSTONMP(np);
0a7de745
A
3997 if (nfs_mount_gone(nmp)) {
3998 return ENXIO;
3999 }
6d2010ae
A
4000 slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
4001
0a7de745
A
4002 if ((type != F_RDLCK) && (type != F_WRLCK)) {
4003 return EINVAL;
4004 }
b0d623f7
A
4005
4006 /* allocate a new lock */
4007 newnflp = nfs_file_lock_alloc(nlop);
0a7de745
A
4008 if (!newnflp) {
4009 return ENOLCK;
4010 }
b0d623f7
A
4011 newnflp->nfl_start = start;
4012 newnflp->nfl_end = end;
4013 newnflp->nfl_type = type;
0a7de745 4014 if (op == F_SETLKW) {
b0d623f7 4015 newnflp->nfl_flags |= NFS_FILE_LOCK_WAIT;
0a7de745 4016 }
b0d623f7
A
4017 newnflp->nfl_flags |= style;
4018 newnflp->nfl_flags |= NFS_FILE_LOCK_BLOCKED;
4019
4020 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && (type == F_WRLCK)) {
4021 /*
4022 * For exclusive flock-style locks, if we block waiting for the
4023 * lock, we need to first release any currently held shared
4024 * flock-style lock. So, the first thing we do is check if we
4025 * have a shared flock-style lock.
4026 */
4027 nflp = TAILQ_FIRST(&nlop->nlo_locks);
0a7de745 4028 if (nflp && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_FLOCK)) {
b0d623f7 4029 nflp = NULL;
0a7de745
A
4030 }
4031 if (nflp && (nflp->nfl_type != F_RDLCK)) {
b0d623f7 4032 nflp = NULL;
0a7de745 4033 }
b0d623f7
A
4034 flocknflp = nflp;
4035 }
4036
4037restart:
4038 restart = 0;
6d2010ae 4039 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
0a7de745 4040 if (error) {
b0d623f7 4041 goto error_out;
0a7de745 4042 }
b0d623f7 4043 inuse = 1;
6d2010ae
A
4044 if (np->n_flag & NREVOKE) {
4045 error = EIO;
4046 nfs_mount_state_in_use_end(nmp, 0);
4047 inuse = 0;
4048 goto error_out;
4049 }
cb323159 4050#if CONFIG_NFS4
b0d623f7
A
4051 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4052 nfs_mount_state_in_use_end(nmp, 0);
4053 inuse = 0;
6d2010ae 4054 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
0a7de745 4055 if (error) {
6d2010ae 4056 goto error_out;
0a7de745 4057 }
b0d623f7
A
4058 goto restart;
4059 }
cb323159 4060#endif
b0d623f7
A
4061
4062 lck_mtx_lock(&np->n_openlock);
4063 if (!inqueue) {
4064 /* insert new lock at beginning of list */
4065 TAILQ_INSERT_HEAD(&np->n_locks, newnflp, nfl_link);
4066 inqueue = 1;
4067 }
4068
4069 /* scan current list of locks (held and pending) for conflicts */
6d2010ae
A
4070 for (nflp = TAILQ_NEXT(newnflp, nfl_link); nflp; nflp = nextnflp) {
4071 nextnflp = TAILQ_NEXT(nflp, nfl_link);
0a7de745 4072 if (!nfs_file_lock_conflict(newnflp, nflp, &willsplit)) {
b0d623f7 4073 continue;
0a7de745 4074 }
b0d623f7
A
4075 /* Conflict */
4076 if (!(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
4077 error = EAGAIN;
4078 break;
4079 }
4080 /* Block until this lock is no longer held. */
4081 if (nflp->nfl_blockcnt == UINT_MAX) {
4082 error = ENOLCK;
4083 break;
4084 }
4085 nflp->nfl_blockcnt++;
4086 do {
4087 if (flocknflp) {
4088 /* release any currently held shared lock before sleeping */
4089 lck_mtx_unlock(&np->n_openlock);
4090 nfs_mount_state_in_use_end(nmp, 0);
4091 inuse = 0;
6d2010ae 4092 error = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
b0d623f7 4093 flocknflp = NULL;
0a7de745 4094 if (!error) {
6d2010ae 4095 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
0a7de745 4096 }
b0d623f7
A
4097 if (error) {
4098 lck_mtx_lock(&np->n_openlock);
4099 break;
4100 }
4101 inuse = 1;
4102 lck_mtx_lock(&np->n_openlock);
4103 /* no need to block/sleep if the conflict is gone */
0a7de745 4104 if (!nfs_file_lock_conflict(newnflp, nflp, NULL)) {
b0d623f7 4105 break;
0a7de745 4106 }
b0d623f7 4107 }
6d2010ae
A
4108 msleep(nflp, &np->n_openlock, slpflag, "nfs_advlock_setlock_blocked", &ts);
4109 slpflag = 0;
b0d623f7
A
4110 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4111 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4112 /* looks like we have a recover pending... restart */
4113 restart = 1;
4114 lck_mtx_unlock(&np->n_openlock);
4115 nfs_mount_state_in_use_end(nmp, 0);
4116 inuse = 0;
4117 lck_mtx_lock(&np->n_openlock);
4118 break;
4119 }
0a7de745 4120 if (!error && (np->n_flag & NREVOKE)) {
6d2010ae 4121 error = EIO;
0a7de745 4122 }
b0d623f7
A
4123 } while (!error && nfs_file_lock_conflict(newnflp, nflp, NULL));
4124 nflp->nfl_blockcnt--;
4125 if ((nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !nflp->nfl_blockcnt) {
4126 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4127 nfs_file_lock_destroy(nflp);
4128 }
0a7de745 4129 if (error || restart) {
b0d623f7 4130 break;
0a7de745 4131 }
6d2010ae
A
4132 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
4133 /* So, start this lock-scanning loop over from where it started. */
4134 nextnflp = TAILQ_NEXT(newnflp, nfl_link);
b0d623f7
A
4135 }
4136 lck_mtx_unlock(&np->n_openlock);
0a7de745 4137 if (restart) {
b0d623f7 4138 goto restart;
0a7de745
A
4139 }
4140 if (error) {
b0d623f7 4141 goto error_out;
0a7de745 4142 }
b0d623f7
A
4143
4144 if (willsplit) {
4145 /*
4146 * It looks like this operation is splitting a lock.
4147 * We allocate a new lock now so we don't have to worry
4148 * about the allocation failing after we've updated some state.
4149 */
4150 nflp2 = nfs_file_lock_alloc(nlop);
4151 if (!nflp2) {
4152 error = ENOLCK;
4153 goto error_out;
4154 }
4155 }
4156
4157 /* once scan for local conflicts is clear, send request to server */
0a7de745 4158 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
b0d623f7 4159 goto error_out;
0a7de745 4160 }
b0d623f7
A
4161 busy = 1;
4162 delay = 0;
4163 do {
cb323159 4164#if CONFIG_NFS4
6d2010ae
A
4165 /* do we have a delegation? (that we're not returning?) */
4166 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN)) {
4167 if (np->n_openflags & N_DELEG_WRITE) {
4168 /* with a write delegation, just take the lock delegated */
4169 newnflp->nfl_flags |= NFS_FILE_LOCK_DELEGATED;
4170 error = 0;
4171 /* make sure the lock owner knows its open owner */
4172 if (!nlop->nlo_open_owner) {
4173 nfs_open_owner_ref(nofp->nof_owner);
4174 nlop->nlo_open_owner = nofp->nof_owner;
4175 }
4176 break;
4177 } else {
4178 /*
4179 * If we don't have any non-delegated opens but we do have
4180 * delegated opens, then we need to first claim the delegated
4181 * opens so that the lock request on the server can be associated
4182 * with an open it knows about.
4183 */
4184 if ((!nofp->nof_rw_drw && !nofp->nof_w_drw && !nofp->nof_r_drw &&
0a7de745
A
4185 !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw &&
4186 !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r) &&
6d2010ae 4187 (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw ||
0a7de745
A
4188 nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw ||
4189 nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r)) {
6d2010ae 4190 error = nfs4_claim_delegated_state_for_open_file(nofp, 0);
0a7de745 4191 if (error) {
6d2010ae 4192 break;
0a7de745 4193 }
6d2010ae
A
4194 }
4195 }
4196 }
cb323159 4197#endif
0a7de745 4198 if (np->n_flag & NREVOKE) {
6d2010ae 4199 error = EIO;
0a7de745
A
4200 }
4201 if (!error) {
6d2010ae 4202 error = nmp->nm_funcs->nf_setlock_rpc(np, nofp, newnflp, 0, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
0a7de745
A
4203 }
4204 if (!error || ((error != NFSERR_DENIED) && (error != NFSERR_GRACE))) {
b0d623f7 4205 break;
0a7de745 4206 }
b0d623f7 4207 /* request was denied due to either conflict or grace period */
6d2010ae 4208 if ((error == NFSERR_DENIED) && !(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
b0d623f7
A
4209 error = EAGAIN;
4210 break;
4211 }
4212 if (flocknflp) {
4213 /* release any currently held shared lock before sleeping */
4214 nfs_open_state_clear_busy(np);
4215 busy = 0;
f427ee49
A
4216 if (inuse) {
4217 nfs_mount_state_in_use_end(nmp, 0);
4218 inuse = 0;
4219 }
6d2010ae 4220 error2 = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
b0d623f7 4221 flocknflp = NULL;
0a7de745 4222 if (!error2) {
6d2010ae 4223 error2 = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
0a7de745 4224 }
b0d623f7
A
4225 if (!error2) {
4226 inuse = 1;
6d2010ae 4227 error2 = nfs_open_state_set_busy(np, vfs_context_thread(ctx));
b0d623f7
A
4228 }
4229 if (error2) {
4230 error = error2;
4231 break;
4232 }
4233 busy = 1;
4234 }
6d2010ae
A
4235 /*
4236 * Wait a little bit and send the request again.
4237 * Except for retries of blocked v2/v3 request where we've already waited a bit.
4238 */
4239 if ((nmp->nm_vers >= NFS_VER4) || (error == NFSERR_GRACE)) {
0a7de745 4240 if (error == NFSERR_GRACE) {
6d2010ae 4241 delay = 4;
0a7de745
A
4242 }
4243 if (delay < 4) {
6d2010ae 4244 delay++;
0a7de745
A
4245 }
4246 tsleep(newnflp, slpflag, "nfs_advlock_setlock_delay", delay * (hz / 2));
6d2010ae
A
4247 slpflag = 0;
4248 }
b0d623f7
A
4249 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4250 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4251 /* looks like we have a recover pending... restart */
4252 nfs_open_state_clear_busy(np);
4253 busy = 0;
f427ee49
A
4254 if (inuse) {
4255 nfs_mount_state_in_use_end(nmp, 0);
4256 inuse = 0;
4257 }
b0d623f7
A
4258 goto restart;
4259 }
0a7de745 4260 if (!error && (np->n_flag & NREVOKE)) {
6d2010ae 4261 error = EIO;
0a7de745 4262 }
b0d623f7
A
4263 } while (!error);
4264
4265error_out:
4266 if (nfs_mount_state_error_should_restart(error)) {
4267 /* looks like we need to restart this operation */
4268 if (busy) {
4269 nfs_open_state_clear_busy(np);
4270 busy = 0;
4271 }
4272 if (inuse) {
4273 nfs_mount_state_in_use_end(nmp, error);
4274 inuse = 0;
4275 }
4276 goto restart;
4277 }
4278 lck_mtx_lock(&np->n_openlock);
4279 newnflp->nfl_flags &= ~NFS_FILE_LOCK_BLOCKED;
4280 if (error) {
4281 newnflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4282 if (newnflp->nfl_blockcnt) {
4283 /* wake up anyone blocked on this lock */
4284 wakeup(newnflp);
4285 } else {
4286 /* remove newnflp from lock list and destroy */
0a7de745 4287 if (inqueue) {
316670eb 4288 TAILQ_REMOVE(&np->n_locks, newnflp, nfl_link);
0a7de745 4289 }
b0d623f7
A
4290 nfs_file_lock_destroy(newnflp);
4291 }
4292 lck_mtx_unlock(&np->n_openlock);
0a7de745 4293 if (busy) {
b0d623f7 4294 nfs_open_state_clear_busy(np);
0a7de745
A
4295 }
4296 if (inuse) {
b0d623f7 4297 nfs_mount_state_in_use_end(nmp, error);
0a7de745
A
4298 }
4299 if (nflp2) {
b0d623f7 4300 nfs_file_lock_destroy(nflp2);
0a7de745
A
4301 }
4302 return error;
b0d623f7
A
4303 }
4304
4305 /* server granted the lock */
4306
4307 /*
4308 * Scan for locks to update.
4309 *
4310 * Locks completely covered are killed.
4311 * At most two locks may need to be clipped.
4312 * It's possible that a single lock may need to be split.
4313 */
4314 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
0a7de745 4315 if (nflp == newnflp) {
b0d623f7 4316 continue;
0a7de745
A
4317 }
4318 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
b0d623f7 4319 continue;
0a7de745
A
4320 }
4321 if (nflp->nfl_owner != nlop) {
b0d623f7 4322 continue;
0a7de745
A
4323 }
4324 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK)) {
b0d623f7 4325 continue;
0a7de745
A
4326 }
4327 if ((newnflp->nfl_start > nflp->nfl_end) || (newnflp->nfl_end < nflp->nfl_start)) {
b0d623f7 4328 continue;
0a7de745 4329 }
b0d623f7
A
4330 /* here's one to update */
4331 if ((newnflp->nfl_start <= nflp->nfl_start) && (newnflp->nfl_end >= nflp->nfl_end)) {
4332 /* The entire lock is being replaced. */
4333 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4334 lck_mtx_lock(&nlop->nlo_lock);
4335 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4336 lck_mtx_unlock(&nlop->nlo_lock);
4337 /* lock will be destroyed below, if no waiters */
4338 } else if ((newnflp->nfl_start > nflp->nfl_start) && (newnflp->nfl_end < nflp->nfl_end)) {
4339 /* We're replacing a range in the middle of a lock. */
4340 /* The current lock will be split into two locks. */
4341 /* Update locks and insert new lock after current lock. */
0a7de745 4342 nflp2->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
b0d623f7
A
4343 nflp2->nfl_type = nflp->nfl_type;
4344 nflp2->nfl_start = newnflp->nfl_end + 1;
4345 nflp2->nfl_end = nflp->nfl_end;
4346 nflp->nfl_end = newnflp->nfl_start - 1;
4347 TAILQ_INSERT_AFTER(&np->n_locks, nflp, nflp2, nfl_link);
4348 nfs_lock_owner_insert_held_lock(nlop, nflp2);
4349 nextnflp = nflp2;
4350 nflp2 = NULL;
4351 } else if (newnflp->nfl_start > nflp->nfl_start) {
4352 /* We're replacing the end of a lock. */
4353 nflp->nfl_end = newnflp->nfl_start - 1;
4354 } else if (newnflp->nfl_end < nflp->nfl_end) {
4355 /* We're replacing the start of a lock. */
4356 nflp->nfl_start = newnflp->nfl_end + 1;
4357 }
4358 if (nflp->nfl_blockcnt) {
4359 /* wake up anyone blocked on this lock */
4360 wakeup(nflp);
4361 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4362 /* remove nflp from lock list and destroy */
4363 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4364 nfs_file_lock_destroy(nflp);
4365 }
4366 }
4367
4368 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4369
4370 /*
4371 * POSIX locks should be coalesced when possible.
4372 */
4373 if ((style == NFS_FILE_LOCK_STYLE_POSIX) && (nofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)) {
4374 /*
4375 * Walk through the lock queue and check each of our held locks with
4376 * the previous and next locks in the lock owner's "held lock list".
4377 * If the two locks can be coalesced, we merge the current lock into
4378 * the other (previous or next) lock. Merging this way makes sure that
4379 * lock ranges are always merged forward in the lock queue. This is
4380 * important because anyone blocked on the lock being "merged away"
4381 * will still need to block on that range and it will simply continue
4382 * checking locks that are further down the list.
4383 */
4384 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
0a7de745 4385 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
b0d623f7 4386 continue;
0a7de745
A
4387 }
4388 if (nflp->nfl_owner != nlop) {
b0d623f7 4389 continue;
0a7de745
A
4390 }
4391 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_POSIX) {
b0d623f7 4392 continue;
0a7de745 4393 }
b0d623f7
A
4394 if (((coalnflp = TAILQ_PREV(nflp, nfs_file_lock_queue, nfl_lolink))) &&
4395 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4396 (coalnflp->nfl_type == nflp->nfl_type) &&
4397 (coalnflp->nfl_end == (nflp->nfl_start - 1))) {
4398 coalnflp->nfl_end = nflp->nfl_end;
4399 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4400 lck_mtx_lock(&nlop->nlo_lock);
4401 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4402 lck_mtx_unlock(&nlop->nlo_lock);
4403 } else if (((coalnflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4404 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4405 (coalnflp->nfl_type == nflp->nfl_type) &&
4406 (coalnflp->nfl_start == (nflp->nfl_end + 1))) {
4407 coalnflp->nfl_start = nflp->nfl_start;
4408 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4409 lck_mtx_lock(&nlop->nlo_lock);
4410 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4411 lck_mtx_unlock(&nlop->nlo_lock);
4412 }
0a7de745 4413 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD)) {
b0d623f7 4414 continue;
0a7de745 4415 }
b0d623f7
A
4416 if (nflp->nfl_blockcnt) {
4417 /* wake up anyone blocked on this lock */
4418 wakeup(nflp);
4419 } else {
4420 /* remove nflp from lock list and destroy */
4421 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4422 nfs_file_lock_destroy(nflp);
4423 }
4424 }
4425 }
4426
4427 lck_mtx_unlock(&np->n_openlock);
4428 nfs_open_state_clear_busy(np);
b0d623f7 4429
f427ee49
A
4430 if (inuse) {
4431 nfs_mount_state_in_use_end(nmp, error);
4432 }
0a7de745 4433 if (nflp2) {
b0d623f7 4434 nfs_file_lock_destroy(nflp2);
0a7de745
A
4435 }
4436 return error;
b0d623f7
A
4437}
4438
6d2010ae
A
4439/*
4440 * Release all (same style) locks within the given range.
4441 */
b0d623f7 4442int
6d2010ae 4443nfs_advlock_unlock(
b0d623f7 4444 nfsnode_t np,
cb323159
A
4445 struct nfs_open_file *nofp
4446#if !CONFIG_NFS4
4447 __unused
4448#endif
4449 ,
b0d623f7
A
4450 struct nfs_lock_owner *nlop,
4451 uint64_t start,
4452 uint64_t end,
4453 int style,
4454 vfs_context_t ctx)
4455{
4456 struct nfsmount *nmp;
4457 struct nfs_file_lock *nflp, *nextnflp, *newnflp = NULL;
4458 int error = 0, willsplit = 0, send_unlock_rpcs = 1;
4459
4460 nmp = NFSTONMP(np);
0a7de745
A
4461 if (nfs_mount_gone(nmp)) {
4462 return ENXIO;
4463 }
b0d623f7
A
4464
4465restart:
0a7de745
A
4466 if ((error = nfs_mount_state_in_use_start(nmp, NULL))) {
4467 return error;
4468 }
cb323159 4469#if CONFIG_NFS4
b0d623f7
A
4470 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4471 nfs_mount_state_in_use_end(nmp, 0);
6d2010ae 4472 error = nfs4_reopen(nofp, NULL);
0a7de745
A
4473 if (error) {
4474 return error;
4475 }
b0d623f7
A
4476 goto restart;
4477 }
cb323159 4478#endif
6d2010ae 4479 if ((error = nfs_open_state_set_busy(np, NULL))) {
b0d623f7 4480 nfs_mount_state_in_use_end(nmp, error);
0a7de745 4481 return error;
b0d623f7
A
4482 }
4483
4484 lck_mtx_lock(&np->n_openlock);
4485 if ((start > 0) && (end < UINT64_MAX) && !willsplit) {
4486 /*
4487 * We may need to allocate a new lock if an existing lock gets split.
4488 * So, we first scan the list to check for a split, and if there's
4489 * going to be one, we'll allocate one now.
4490 */
4491 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
0a7de745 4492 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
b0d623f7 4493 continue;
0a7de745
A
4494 }
4495 if (nflp->nfl_owner != nlop) {
b0d623f7 4496 continue;
0a7de745
A
4497 }
4498 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
b0d623f7 4499 continue;
0a7de745
A
4500 }
4501 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
b0d623f7 4502 continue;
0a7de745 4503 }
b0d623f7
A
4504 if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4505 willsplit = 1;
4506 break;
4507 }
4508 }
4509 if (willsplit) {
4510 lck_mtx_unlock(&np->n_openlock);
4511 nfs_open_state_clear_busy(np);
4512 nfs_mount_state_in_use_end(nmp, 0);
4513 newnflp = nfs_file_lock_alloc(nlop);
0a7de745
A
4514 if (!newnflp) {
4515 return ENOMEM;
4516 }
b0d623f7
A
4517 goto restart;
4518 }
4519 }
4520
4521 /*
4522 * Free all of our locks in the given range.
4523 *
4524 * Note that this process requires sending requests to the server.
0a7de745 4525 * Because of this, we will release the n_openlock while performing
b0d623f7
A
4526 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4527 * locks from changing underneath us. However, other entries in the
4528 * list may be removed. So we need to be careful walking the list.
4529 */
4530
4531 /*
4532 * Don't unlock ranges that are held by other-style locks.
4533 * If style is posix, don't send any unlock rpcs if flock is held.
4534 * If we unlock an flock, don't send unlock rpcs for any posix-style
4535 * ranges held - instead send unlocks for the ranges not held.
4536 */
4537 if ((style == NFS_FILE_LOCK_STYLE_POSIX) &&
4538 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
0a7de745 4539 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK)) {
b0d623f7 4540 send_unlock_rpcs = 0;
0a7de745 4541 }
b0d623f7
A
4542 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) &&
4543 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4544 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) &&
4545 ((nflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4546 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX)) {
4547 uint64_t s = 0;
4548 int type = TAILQ_FIRST(&nlop->nlo_locks)->nfl_type;
6d2010ae
A
4549 int delegated = (TAILQ_FIRST(&nlop->nlo_locks)->nfl_flags & NFS_FILE_LOCK_DELEGATED);
4550 while (!delegated && nflp) {
b0d623f7
A
4551 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) {
4552 /* unlock the range preceding this lock */
4553 lck_mtx_unlock(&np->n_openlock);
0a7de745
A
4554 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, nflp->nfl_start - 1, 0,
4555 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4556 if (nfs_mount_state_error_should_restart(error)) {
4557 nfs_open_state_clear_busy(np);
4558 nfs_mount_state_in_use_end(nmp, error);
4559 goto restart;
4560 }
4561 lck_mtx_lock(&np->n_openlock);
0a7de745 4562 if (error) {
b0d623f7 4563 goto out;
0a7de745
A
4564 }
4565 s = nflp->nfl_end + 1;
b0d623f7
A
4566 }
4567 nflp = TAILQ_NEXT(nflp, nfl_lolink);
4568 }
6d2010ae
A
4569 if (!delegated) {
4570 lck_mtx_unlock(&np->n_openlock);
4571 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, end, 0,
0a7de745 4572 vfs_context_thread(ctx), vfs_context_ucred(ctx));
6d2010ae
A
4573 if (nfs_mount_state_error_should_restart(error)) {
4574 nfs_open_state_clear_busy(np);
4575 nfs_mount_state_in_use_end(nmp, error);
4576 goto restart;
4577 }
4578 lck_mtx_lock(&np->n_openlock);
0a7de745 4579 if (error) {
6d2010ae 4580 goto out;
0a7de745 4581 }
b0d623f7 4582 }
b0d623f7
A
4583 send_unlock_rpcs = 0;
4584 }
4585
4586 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
0a7de745 4587 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
b0d623f7 4588 continue;
0a7de745
A
4589 }
4590 if (nflp->nfl_owner != nlop) {
b0d623f7 4591 continue;
0a7de745
A
4592 }
4593 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
b0d623f7 4594 continue;
0a7de745
A
4595 }
4596 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
b0d623f7 4597 continue;
0a7de745 4598 }
b0d623f7
A
4599 /* here's one to unlock */
4600 if ((start <= nflp->nfl_start) && (end >= nflp->nfl_end)) {
4601 /* The entire lock is being unlocked. */
6d2010ae 4602 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4603 lck_mtx_unlock(&np->n_openlock);
6d2010ae 4604 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, nflp->nfl_end, 0,
0a7de745 4605 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4606 if (nfs_mount_state_error_should_restart(error)) {
4607 nfs_open_state_clear_busy(np);
4608 nfs_mount_state_in_use_end(nmp, error);
4609 goto restart;
4610 }
4611 lck_mtx_lock(&np->n_openlock);
4612 }
4613 nextnflp = TAILQ_NEXT(nflp, nfl_link);
0a7de745 4614 if (error) {
b0d623f7 4615 break;
0a7de745 4616 }
b0d623f7
A
4617 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4618 lck_mtx_lock(&nlop->nlo_lock);
4619 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4620 lck_mtx_unlock(&nlop->nlo_lock);
4621 /* lock will be destroyed below, if no waiters */
4622 } else if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4623 /* We're unlocking a range in the middle of a lock. */
4624 /* The current lock will be split into two locks. */
6d2010ae 4625 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4626 lck_mtx_unlock(&np->n_openlock);
6d2010ae 4627 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, end, 0,
0a7de745 4628 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4629 if (nfs_mount_state_error_should_restart(error)) {
4630 nfs_open_state_clear_busy(np);
4631 nfs_mount_state_in_use_end(nmp, error);
4632 goto restart;
4633 }
4634 lck_mtx_lock(&np->n_openlock);
4635 }
0a7de745 4636 if (error) {
b0d623f7 4637 break;
0a7de745 4638 }
b0d623f7 4639 /* update locks and insert new lock after current lock */
0a7de745 4640 newnflp->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
b0d623f7
A
4641 newnflp->nfl_type = nflp->nfl_type;
4642 newnflp->nfl_start = end + 1;
4643 newnflp->nfl_end = nflp->nfl_end;
4644 nflp->nfl_end = start - 1;
4645 TAILQ_INSERT_AFTER(&np->n_locks, nflp, newnflp, nfl_link);
4646 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4647 nextnflp = newnflp;
4648 newnflp = NULL;
4649 } else if (start > nflp->nfl_start) {
4650 /* We're unlocking the end of a lock. */
6d2010ae 4651 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4652 lck_mtx_unlock(&np->n_openlock);
6d2010ae 4653 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, nflp->nfl_end, 0,
0a7de745 4654 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4655 if (nfs_mount_state_error_should_restart(error)) {
4656 nfs_open_state_clear_busy(np);
4657 nfs_mount_state_in_use_end(nmp, error);
4658 goto restart;
4659 }
4660 lck_mtx_lock(&np->n_openlock);
4661 }
4662 nextnflp = TAILQ_NEXT(nflp, nfl_link);
0a7de745 4663 if (error) {
b0d623f7 4664 break;
0a7de745 4665 }
b0d623f7
A
4666 nflp->nfl_end = start - 1;
4667 } else if (end < nflp->nfl_end) {
4668 /* We're unlocking the start of a lock. */
6d2010ae 4669 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4670 lck_mtx_unlock(&np->n_openlock);
6d2010ae 4671 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, end, 0,
0a7de745 4672 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4673 if (nfs_mount_state_error_should_restart(error)) {
4674 nfs_open_state_clear_busy(np);
4675 nfs_mount_state_in_use_end(nmp, error);
4676 goto restart;
4677 }
4678 lck_mtx_lock(&np->n_openlock);
4679 }
4680 nextnflp = TAILQ_NEXT(nflp, nfl_link);
0a7de745 4681 if (error) {
b0d623f7 4682 break;
0a7de745 4683 }
b0d623f7
A
4684 nflp->nfl_start = end + 1;
4685 }
4686 if (nflp->nfl_blockcnt) {
4687 /* wake up anyone blocked on this lock */
4688 wakeup(nflp);
4689 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4690 /* remove nflp from lock list and destroy */
4691 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4692 nfs_file_lock_destroy(nflp);
4693 }
4694 }
4695out:
4696 lck_mtx_unlock(&np->n_openlock);
4697 nfs_open_state_clear_busy(np);
4698 nfs_mount_state_in_use_end(nmp, 0);
4699
0a7de745 4700 if (newnflp) {
b0d623f7 4701 nfs_file_lock_destroy(newnflp);
0a7de745
A
4702 }
4703 return error;
b0d623f7
A
4704}
4705
4706/*
4707 * NFSv4 advisory file locking
4708 */
4709int
6d2010ae 4710nfs_vnop_advlock(
b0d623f7 4711 struct vnop_advlock_args /* {
0a7de745
A
4712 * struct vnodeop_desc *a_desc;
4713 * vnode_t a_vp;
4714 * caddr_t a_id;
4715 * int a_op;
4716 * struct flock *a_fl;
4717 * int a_flags;
4718 * vfs_context_t a_context;
4719 * } */*ap)
b0d623f7
A
4720{
4721 vnode_t vp = ap->a_vp;
4722 nfsnode_t np = VTONFS(ap->a_vp);
4723 struct flock *fl = ap->a_fl;
4724 int op = ap->a_op;
4725 int flags = ap->a_flags;
4726 vfs_context_t ctx = ap->a_context;
4727 struct nfsmount *nmp;
b0d623f7
A
4728 struct nfs_open_owner *noop = NULL;
4729 struct nfs_open_file *nofp = NULL;
4730 struct nfs_lock_owner *nlop = NULL;
4731 off_t lstart;
4732 uint64_t start, end;
4733 int error = 0, modified, style;
6d2010ae 4734 enum vtype vtype;
b0d623f7
A
4735#define OFF_MAX QUAD_MAX
4736
4737 nmp = VTONMP(ap->a_vp);
0a7de745
A
4738 if (nfs_mount_gone(nmp)) {
4739 return ENXIO;
4740 }
6d2010ae
A
4741 lck_mtx_lock(&nmp->nm_lock);
4742 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) {
4743 lck_mtx_unlock(&nmp->nm_lock);
0a7de745 4744 return ENOTSUP;
6d2010ae
A
4745 }
4746 lck_mtx_unlock(&nmp->nm_lock);
b0d623f7 4747
0a7de745
A
4748 if (np->n_flag & NREVOKE) {
4749 return EIO;
4750 }
6d2010ae 4751 vtype = vnode_vtype(ap->a_vp);
0a7de745
A
4752 if (vtype == VDIR) { /* ignore lock requests on directories */
4753 return 0;
4754 }
4755 if (vtype != VREG) { /* anything other than regular files is invalid */
4756 return EINVAL;
4757 }
6d2010ae
A
4758
4759 /* Convert the flock structure into a start and end. */
b0d623f7
A
4760 switch (fl->l_whence) {
4761 case SEEK_SET:
4762 case SEEK_CUR:
4763 /*
4764 * Caller is responsible for adding any necessary offset
4765 * to fl->l_start when SEEK_CUR is used.
4766 */
4767 lstart = fl->l_start;
4768 break;
4769 case SEEK_END:
4770 /* need to flush, and refetch attributes to make */
4771 /* sure we have the correct end of file offset */
0a7de745
A
4772 if ((error = nfs_node_lock(np))) {
4773 return error;
4774 }
b0d623f7
A
4775 modified = (np->n_flag & NMODIFIED);
4776 nfs_node_unlock(np);
0a7de745
A
4777 if (modified && ((error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1)))) {
4778 return error;
4779 }
4780 if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED))) {
4781 return error;
4782 }
b0d623f7
A
4783 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
4784 if ((np->n_size > OFF_MAX) ||
0a7de745 4785 ((fl->l_start > 0) && (np->n_size > (u_quad_t)(OFF_MAX - fl->l_start)))) {
b0d623f7 4786 error = EOVERFLOW;
0a7de745 4787 }
b0d623f7
A
4788 lstart = np->n_size + fl->l_start;
4789 nfs_data_unlock(np);
0a7de745
A
4790 if (error) {
4791 return error;
4792 }
b0d623f7
A
4793 break;
4794 default:
0a7de745
A
4795 return EINVAL;
4796 }
4797 if (lstart < 0) {
4798 return EINVAL;
b0d623f7 4799 }
b0d623f7
A
4800 start = lstart;
4801 if (fl->l_len == 0) {
4802 end = UINT64_MAX;
4803 } else if (fl->l_len > 0) {
0a7de745
A
4804 if ((fl->l_len - 1) > (OFF_MAX - lstart)) {
4805 return EOVERFLOW;
4806 }
b0d623f7
A
4807 end = start - 1 + fl->l_len;
4808 } else { /* l_len is negative */
0a7de745
A
4809 if ((lstart + fl->l_len) < 0) {
4810 return EINVAL;
4811 }
b0d623f7
A
4812 end = start - 1;
4813 start += fl->l_len;
4814 }
0a7de745
A
4815 if ((nmp->nm_vers == NFS_VER2) && ((start > INT32_MAX) || (fl->l_len && (end > INT32_MAX)))) {
4816 return EINVAL;
4817 }
b0d623f7
A
4818
4819 style = (flags & F_FLOCK) ? NFS_FILE_LOCK_STYLE_FLOCK : NFS_FILE_LOCK_STYLE_POSIX;
0a7de745
A
4820 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((start != 0) || (end != UINT64_MAX))) {
4821 return EINVAL;
4822 }
b0d623f7
A
4823
4824 /* find the lock owner, alloc if not unlock */
4825 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), (op != F_UNLCK));
4826 if (!nlop) {
4827 error = (op == F_UNLCK) ? 0 : ENOMEM;
0a7de745 4828 if (error) {
6d2010ae 4829 NP(np, "nfs_vnop_advlock: no lock owner, error %d", error);
0a7de745 4830 }
b0d623f7
A
4831 goto out;
4832 }
4833
4834 if (op == F_GETLK) {
6d2010ae 4835 error = nfs_advlock_getlock(np, nlop, fl, start, end, ctx);
b0d623f7
A
4836 } else {
4837 /* find the open owner */
4838 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
4839 if (!noop) {
6d2010ae 4840 NP(np, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx)));
b0d623f7
A
4841 error = EPERM;
4842 goto out;
4843 }
4844 /* find the open file */
cb323159 4845#if CONFIG_NFS4
b0d623f7 4846restart:
cb323159 4847#endif
b0d623f7 4848 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
0a7de745 4849 if (error) {
b0d623f7 4850 error = EBADF;
0a7de745 4851 }
b0d623f7 4852 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6d2010ae 4853 NP(np, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
b0d623f7
A
4854 error = EIO;
4855 }
cb323159 4856#if CONFIG_NFS4
b0d623f7 4857 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6d2010ae 4858 error = nfs4_reopen(nofp, ((op == F_UNLCK) ? NULL : vfs_context_thread(ctx)));
b0d623f7 4859 nofp = NULL;
0a7de745 4860 if (!error) {
6d2010ae 4861 goto restart;
0a7de745 4862 }
b0d623f7 4863 }
cb323159 4864#endif
b0d623f7 4865 if (error) {
6d2010ae 4866 NP(np, "nfs_vnop_advlock: no open file %d, %d", error, kauth_cred_getuid(noop->noo_cred));
b0d623f7
A
4867 goto out;
4868 }
4869 if (op == F_UNLCK) {
6d2010ae 4870 error = nfs_advlock_unlock(np, nofp, nlop, start, end, style, ctx);
b0d623f7 4871 } else if ((op == F_SETLK) || (op == F_SETLKW)) {
0a7de745 4872 if ((op == F_SETLK) && (flags & F_WAIT)) {
b0d623f7 4873 op = F_SETLKW;
0a7de745 4874 }
6d2010ae 4875 error = nfs_advlock_setlock(np, nofp, nlop, op, start, end, style, fl->l_type, ctx);
b0d623f7
A
4876 } else {
4877 /* not getlk, unlock or lock? */
4878 error = EINVAL;
4879 }
4880 }
4881
4882out:
0a7de745 4883 if (nlop) {
b0d623f7 4884 nfs_lock_owner_rele(nlop);
0a7de745
A
4885 }
4886 if (noop) {
b0d623f7 4887 nfs_open_owner_rele(noop);
0a7de745
A
4888 }
4889 return error;
b0d623f7
A
4890}
4891
4892/*
4893 * Check if an open owner holds any locks on a file.
4894 */
4895int
6d2010ae 4896nfs_check_for_locks(struct nfs_open_owner *noop, struct nfs_open_file *nofp)
b0d623f7
A
4897{
4898 struct nfs_lock_owner *nlop;
4899
4900 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
0a7de745 4901 if (nlop->nlo_open_owner != noop) {
b0d623f7 4902 continue;
0a7de745
A
4903 }
4904 if (!TAILQ_EMPTY(&nlop->nlo_locks)) {
b0d623f7 4905 break;
0a7de745 4906 }
b0d623f7 4907 }
0a7de745 4908 return nlop ? 1 : 0;
b0d623f7
A
4909}
4910
cb323159 4911#if CONFIG_NFS4
b0d623f7
A
4912/*
4913 * Reopen simple (no deny, no locks) open state that was lost.
4914 */
6d2010ae 4915int
b0d623f7
A
4916nfs4_reopen(struct nfs_open_file *nofp, thread_t thd)
4917{
4918 struct nfs_open_owner *noop = nofp->nof_owner;
4919 struct nfsmount *nmp = NFSTONMP(nofp->nof_np);
6d2010ae
A
4920 nfsnode_t np = nofp->nof_np;
4921 vnode_t vp = NFSTOV(np);
b0d623f7
A
4922 vnode_t dvp = NULL;
4923 struct componentname cn;
4924 const char *vname = NULL;
6d2010ae 4925 const char *name = NULL;
f427ee49 4926 uint32_t namelen;
b0d623f7
A
4927 char smallname[128];
4928 char *filename = NULL;
6d2010ae 4929 int error = 0, done = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
cb323159 4930 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
b0d623f7
A
4931
4932 lck_mtx_lock(&nofp->nof_lock);
4933 while (nofp->nof_flags & NFS_OPEN_FILE_REOPENING) {
0a7de745 4934 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 4935 break;
0a7de745
A
4936 }
4937 msleep(&nofp->nof_flags, &nofp->nof_lock, slpflag | (PZERO - 1), "nfsreopenwait", &ts);
6d2010ae 4938 slpflag = 0;
b0d623f7 4939 }
6d2010ae 4940 if (error || !(nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
b0d623f7 4941 lck_mtx_unlock(&nofp->nof_lock);
0a7de745 4942 return error;
b0d623f7
A
4943 }
4944 nofp->nof_flags |= NFS_OPEN_FILE_REOPENING;
4945 lck_mtx_unlock(&nofp->nof_lock);
4946
6d2010ae
A
4947 nfs_node_lock_force(np);
4948 if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) {
4949 /*
4950 * The node's been sillyrenamed, so we need to use
4951 * the sillyrename directory/name to do the open.
4952 */
4953 struct nfs_sillyrename *nsp = np->n_sillyrename;
4954 dvp = NFSTOV(nsp->nsr_dnp);
4955 if ((error = vnode_get(dvp))) {
cb323159 4956 dvp = NULLVP;
6d2010ae
A
4957 nfs_node_unlock(np);
4958 goto out;
4959 }
4960 name = nsp->nsr_name;
4961 } else {
4962 /*
4963 * [sigh] We can't trust VFS to get the parent right for named
4964 * attribute nodes. (It likes to reparent the nodes after we've
4965 * created them.) Luckily we can probably get the right parent
4966 * from the n_parent we have stashed away.
4967 */
4968 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
0a7de745 4969 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
6d2010ae 4970 dvp = NULL;
0a7de745
A
4971 }
4972 if (!dvp) {
6d2010ae 4973 dvp = vnode_getparent(vp);
0a7de745 4974 }
6d2010ae
A
4975 vname = vnode_getname(vp);
4976 if (!dvp || !vname) {
0a7de745 4977 if (!error) {
6d2010ae 4978 error = EIO;
0a7de745 4979 }
6d2010ae
A
4980 nfs_node_unlock(np);
4981 goto out;
4982 }
4983 name = vname;
b0d623f7
A
4984 }
4985 filename = &smallname[0];
6d2010ae 4986 namelen = snprintf(filename, sizeof(smallname), "%s", name);
b0d623f7 4987 if (namelen >= sizeof(smallname)) {
0a7de745 4988 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
b0d623f7
A
4989 if (!filename) {
4990 error = ENOMEM;
4991 goto out;
4992 }
0a7de745 4993 snprintf(filename, namelen + 1, "%s", name);
b0d623f7 4994 }
6d2010ae 4995 nfs_node_unlock(np);
b0d623f7
A
4996 bzero(&cn, sizeof(cn));
4997 cn.cn_nameptr = filename;
4998 cn.cn_namelen = namelen;
4999
5000restart:
5001 done = 0;
0a7de745 5002 if ((error = nfs_mount_state_in_use_start(nmp, thd))) {
b0d623f7 5003 goto out;
0a7de745 5004 }
b0d623f7 5005
0a7de745 5006 if (nofp->nof_rw) {
b0d623f7 5007 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE);
0a7de745
A
5008 }
5009 if (!error && nofp->nof_w) {
b0d623f7 5010 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE);
0a7de745
A
5011 }
5012 if (!error && nofp->nof_r) {
b0d623f7 5013 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE);
0a7de745 5014 }
b0d623f7
A
5015
5016 if (nfs_mount_state_in_use_end(nmp, error)) {
0a7de745 5017 if (error == NFSERR_GRACE) {
b0d623f7 5018 goto restart;
0a7de745 5019 }
6d2010ae 5020 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error,
0a7de745 5021 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
b0d623f7
A
5022 error = 0;
5023 goto out;
5024 }
5025 done = 1;
5026out:
0a7de745 5027 if (error && (error != EINTR) && (error != ERESTART)) {
6d2010ae 5028 nfs_revoke_open_state_for_node(np);
0a7de745 5029 }
b0d623f7
A
5030 lck_mtx_lock(&nofp->nof_lock);
5031 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPENING;
0a7de745 5032 if (done) {
b0d623f7 5033 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
0a7de745 5034 } else if (error) {
6d2010ae 5035 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error,
0a7de745
A
5036 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
5037 }
b0d623f7 5038 lck_mtx_unlock(&nofp->nof_lock);
0a7de745 5039 if (filename && (filename != &smallname[0])) {
b0d623f7 5040 FREE(filename, M_TEMP);
0a7de745
A
5041 }
5042 if (vname) {
b0d623f7 5043 vnode_putname(vname);
0a7de745
A
5044 }
5045 if (dvp != NULLVP) {
b0d623f7 5046 vnode_put(dvp);
0a7de745
A
5047 }
5048 return error;
b0d623f7
A
5049}
5050
5051/*
5052 * Send a normal OPEN RPC to open/create a file.
5053 */
5054int
5055nfs4_open_rpc(
5056 struct nfs_open_file *nofp,
5057 vfs_context_t ctx,
5058 struct componentname *cnp,
5059 struct vnode_attr *vap,
5060 vnode_t dvp,
5061 vnode_t *vpp,
5062 int create,
5063 int share_access,
5064 int share_deny)
5065{
0a7de745
A
5066 return nfs4_open_rpc_internal(nofp, ctx, vfs_context_thread(ctx), vfs_context_ucred(ctx),
5067 cnp, vap, dvp, vpp, create, share_access, share_deny);
b0d623f7
A
5068}
5069
5070/*
5071 * Send an OPEN RPC to reopen a file.
5072 */
5073int
5074nfs4_open_reopen_rpc(
5075 struct nfs_open_file *nofp,
5076 thread_t thd,
5077 kauth_cred_t cred,
5078 struct componentname *cnp,
5079 vnode_t dvp,
5080 vnode_t *vpp,
5081 int share_access,
5082 int share_deny)
5083{
0a7de745 5084 return nfs4_open_rpc_internal(nofp, NULL, thd, cred, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, share_access, share_deny);
6d2010ae
A
5085}
5086
5087/*
5088 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
5089 */
5090int
5091nfs4_open_confirm_rpc(
5092 struct nfsmount *nmp,
5093 nfsnode_t dnp,
5094 u_char *fhp,
5095 int fhlen,
5096 struct nfs_open_owner *noop,
5097 nfs_stateid *sid,
5098 thread_t thd,
5099 kauth_cred_t cred,
5100 struct nfs_vattr *nvap,
5101 uint64_t *xidp)
5102{
5103 struct nfsm_chain nmreq, nmrep;
5104 int error = 0, status, numops;
5105 struct nfsreq_secinfo_args si;
5106
5107 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
5108 nfsm_chain_null(&nmreq);
5109 nfsm_chain_null(&nmrep);
5110
5111 // PUTFH, OPEN_CONFIRM, GETATTR
5112 numops = 3;
5113 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
3e170ce0 5114 nfsm_chain_add_compound_header(error, &nmreq, "open_confirm", nmp->nm_minor_vers, numops);
6d2010ae
A
5115 numops--;
5116 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5117 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
5118 numops--;
5119 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_CONFIRM);
5120 nfsm_chain_add_stateid(error, &nmreq, sid);
5121 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5122 numops--;
5123 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5124 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
5125 nfsm_chain_build_done(error, &nmreq);
5126 nfsm_assert(error, (numops == 0), EPROTO);
5127 nfsmout_if(error);
5128 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, &nmrep, xidp, &status);
5129
5130 nfsm_chain_skip_tag(error, &nmrep);
5131 nfsm_chain_get_32(error, &nmrep, numops);
5132 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5133 nfsmout_if(error);
5134 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_CONFIRM);
5135 nfs_owner_seqid_increment(noop, NULL, error);
5136 nfsm_chain_get_stateid(error, &nmrep, sid);
5137 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5138 nfsmout_if(error);
5139 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
5140nfsmout:
5141 nfsm_chain_cleanup(&nmreq);
5142 nfsm_chain_cleanup(&nmrep);
0a7de745 5143 return error;
b0d623f7
A
5144}
5145
5146/*
5147 * common OPEN RPC code
5148 *
5149 * If create is set, ctx must be passed in.
6d2010ae 5150 * Returns a node on success if no node passed in.
b0d623f7
A
5151 */
5152int
5153nfs4_open_rpc_internal(
5154 struct nfs_open_file *nofp,
5155 vfs_context_t ctx,
5156 thread_t thd,
5157 kauth_cred_t cred,
5158 struct componentname *cnp,
5159 struct vnode_attr *vap,
5160 vnode_t dvp,
5161 vnode_t *vpp,
5162 int create,
5163 int share_access,
5164 int share_deny)
5165{
5166 struct nfsmount *nmp;
5167 struct nfs_open_owner *noop = nofp->nof_owner;
f427ee49 5168 struct nfs_vattr *nvattr;
b0d623f7 5169 int error = 0, open_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
6d2010ae 5170 int nfsvers, namedattrs, numops, exclusive = 0, gotuid, gotgid;
b0d623f7
A
5171 u_int64_t xid, savedxid = 0;
5172 nfsnode_t dnp = VTONFS(dvp);
5173 nfsnode_t np, newnp = NULL;
5174 vnode_t newvp = NULL;
5175 struct nfsm_chain nmreq, nmrep;
5176 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6d2010ae 5177 uint32_t rflags, delegation, recall;
b0d623f7 5178 struct nfs_stateid stateid, dstateid, *sid;
f427ee49
A
5179 fhandle_t *fh;
5180 struct nfsreq *req;
5181 struct nfs_dulookup *dul;
6d2010ae
A
5182 char sbuf[64], *s;
5183 uint32_t ace_type, ace_flags, ace_mask, len, slen;
5184 struct kauth_ace ace;
5185 struct nfsreq_secinfo_args si;
b0d623f7 5186
0a7de745
A
5187 if (create && !ctx) {
5188 return EINVAL;
5189 }
b0d623f7
A
5190
5191 nmp = VTONMP(dvp);
0a7de745
A
5192 if (nfs_mount_gone(nmp)) {
5193 return ENXIO;
5194 }
b0d623f7 5195 nfsvers = nmp->nm_vers;
6d2010ae 5196 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
f427ee49 5197 bzero(&dstateid, sizeof(dstateid));
0a7de745
A
5198 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
5199 return EINVAL;
5200 }
b0d623f7
A
5201
5202 np = *vpp ? VTONFS(*vpp) : NULL;
5203 if (create && vap) {
5204 exclusive = (vap->va_vaflags & VA_EXCLUSIVE);
5205 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
5206 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
5207 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
0a7de745 5208 if (exclusive && (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time))) {
6d2010ae 5209 vap->va_vaflags |= VA_UTIMES_NULL;
0a7de745 5210 }
b0d623f7
A
5211 } else {
5212 exclusive = gotuid = gotgid = 0;
5213 }
5214 if (nofp) {
5215 sid = &nofp->nof_stateid;
5216 } else {
5217 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
5218 sid = &stateid;
5219 }
5220
0a7de745
A
5221 if ((error = nfs_open_owner_set_busy(noop, thd))) {
5222 return error;
5223 }
f427ee49
A
5224
5225 fh = zalloc(nfs_fhandle_zone);
5226 req = zalloc(nfs_req_zone);
5227 MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK);
5228 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
5229
b0d623f7 5230again:
6d2010ae
A
5231 rflags = delegation = recall = 0;
5232 ace.ace_flags = 0;
5233 s = sbuf;
5234 slen = sizeof(sbuf);
f427ee49 5235 NVATTR_INIT(nvattr);
6d2010ae 5236 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, cnp->cn_nameptr, cnp->cn_namelen);
b0d623f7
A
5237
5238 nfsm_chain_null(&nmreq);
5239 nfsm_chain_null(&nmrep);
5240
5241 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
5242 numops = 6;
5243 nfsm_chain_build_alloc_init(error, &nmreq, 53 * NFSX_UNSIGNED + cnp->cn_namelen);
3e170ce0 5244 nfsm_chain_add_compound_header(error, &nmreq, create ? "create" : "open", nmp->nm_minor_vers, numops);
b0d623f7
A
5245 numops--;
5246 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5247 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
5248 numops--;
5249 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
5250 numops--;
5251 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5252 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5253 nfsm_chain_add_32(error, &nmreq, share_access);
5254 nfsm_chain_add_32(error, &nmreq, share_deny);
6d2010ae 5255 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
b0d623f7 5256 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
6d2010ae 5257 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
b0d623f7
A
5258 nfsm_chain_add_32(error, &nmreq, create);
5259 if (create) {
5260 if (exclusive) {
5261 static uint32_t create_verf; // XXX need a better verifier
5262 create_verf++;
5263 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE);
5264 /* insert 64 bit verifier */
5265 nfsm_chain_add_32(error, &nmreq, create_verf);
5266 nfsm_chain_add_32(error, &nmreq, create_verf);
5267 } else {
5268 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_UNCHECKED);
5269 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
5270 }
5271 }
b0d623f7 5272 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
6d2010ae 5273 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
b0d623f7
A
5274 numops--;
5275 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5276 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5277 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6d2010ae 5278 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
b0d623f7
A
5279 numops--;
5280 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
5281 numops--;
5282 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 5283 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
b0d623f7
A
5284 nfsm_chain_build_done(error, &nmreq);
5285 nfsm_assert(error, (numops == 0), EPROTO);
0a7de745 5286 if (!error) {
b0d623f7 5287 error = busyerror = nfs_node_set_busy(dnp, thd);
0a7de745 5288 }
b0d623f7
A
5289 nfsmout_if(error);
5290
0a7de745 5291 if (create && !namedattrs) {
f427ee49 5292 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
0a7de745 5293 }
b0d623f7 5294
6d2010ae 5295 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, NULL, &req);
b0d623f7 5296 if (!error) {
0a7de745 5297 if (create && !namedattrs) {
f427ee49 5298 nfs_dulookup_start(dul, dnp, ctx);
0a7de745 5299 }
b0d623f7
A
5300 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
5301 savedxid = xid;
5302 }
5303
0a7de745 5304 if (create && !namedattrs) {
f427ee49 5305 nfs_dulookup_finish(dul, dnp, ctx);
0a7de745 5306 }
b0d623f7 5307
0a7de745 5308 if ((lockerror = nfs_node_lock(dnp))) {
b0d623f7 5309 error = lockerror;
0a7de745 5310 }
b0d623f7
A
5311 nfsm_chain_skip_tag(error, &nmrep);
5312 nfsm_chain_get_32(error, &nmrep, numops);
5313 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5314 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
5315 nfsmout_if(error);
5316 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5317 nfs_owner_seqid_increment(noop, NULL, error);
5318 nfsm_chain_get_stateid(error, &nmrep, sid);
5319 nfsm_chain_check_change_info(error, &nmrep, dnp);
5320 nfsm_chain_get_32(error, &nmrep, rflags);
5321 bmlen = NFS_ATTR_BITMAP_LEN;
5322 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5323 nfsm_chain_get_32(error, &nmrep, delegation);
0a7de745 5324 if (!error) {
b0d623f7
A
5325 switch (delegation) {
5326 case NFS_OPEN_DELEGATE_NONE:
5327 break;
5328 case NFS_OPEN_DELEGATE_READ:
b0d623f7
A
5329 case NFS_OPEN_DELEGATE_WRITE:
5330 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5331 nfsm_chain_get_32(error, &nmrep, recall);
0a7de745 5332 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
6d2010ae 5333 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
0a7de745 5334 }
6d2010ae
A
5335 /* if we have any trouble accepting the ACE, just invalidate it */
5336 ace_type = ace_flags = ace_mask = len = 0;
5337 nfsm_chain_get_32(error, &nmrep, ace_type);
5338 nfsm_chain_get_32(error, &nmrep, ace_flags);
5339 nfsm_chain_get_32(error, &nmrep, ace_mask);
5340 nfsm_chain_get_32(error, &nmrep, len);
5341 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5342 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5343 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5344 if (!error && (len >= slen)) {
0a7de745
A
5345 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5346 if (s) {
5347 slen = len + 1;
5348 } else {
6d2010ae 5349 ace.ace_flags = 0;
0a7de745 5350 }
6d2010ae 5351 }
0a7de745 5352 if (s) {
6d2010ae 5353 nfsm_chain_get_opaque(error, &nmrep, len, s);
0a7de745 5354 } else {
6d2010ae 5355 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
0a7de745 5356 }
6d2010ae
A
5357 if (!error && s) {
5358 s[len] = '\0';
0a7de745 5359 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
6d2010ae 5360 ace.ace_flags = 0;
0a7de745 5361 }
6d2010ae 5362 }
0a7de745 5363 if (error || !s) {
6d2010ae 5364 ace.ace_flags = 0;
0a7de745
A
5365 }
5366 if (s && (s != sbuf)) {
6d2010ae 5367 FREE(s, M_TEMP);
0a7de745 5368 }
b0d623f7
A
5369 break;
5370 default:
5371 error = EBADRPC;
5372 break;
5373 }
0a7de745 5374 }
b0d623f7 5375 /* At this point if we have no error, the object was created/opened. */
b0d623f7
A
5376 open_error = error;
5377 nfsmout_if(error);
0a7de745 5378 if (create && vap && !exclusive) {
b0d623f7 5379 nfs_vattr_set_supported(bitmap, vap);
0a7de745 5380 }
b0d623f7
A
5381 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5382 nfsmout_if(error);
f427ee49 5383 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
b0d623f7 5384 nfsmout_if(error);
f427ee49 5385 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6d2010ae 5386 printf("nfs: open/create didn't return filehandle? %s\n", cnp->cn_nameptr);
2d21ac55
A
5387 error = EBADRPC;
5388 goto nfsmout;
5389 }
f427ee49 5390 if (!create && np && !NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
b0d623f7 5391 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
6d2010ae 5392 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
0a7de745 5393 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 5394 NP(np, "nfs4_open_rpc: warning: file handle mismatch");
0a7de745 5395 }
b0d623f7 5396 }
2d21ac55
A
5397 /* directory attributes: if we don't get them, make sure to invalidate */
5398 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
5399 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 5400 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
0a7de745 5401 if (error) {
2d21ac55 5402 NATTRINVALIDATE(dnp);
0a7de745 5403 }
b0d623f7
A
5404 nfsmout_if(error);
5405
0a7de745 5406 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
b0d623f7 5407 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 5408 }
b0d623f7
A
5409
5410 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
5411 nfs_node_unlock(dnp);
5412 lockerror = ENOENT;
f427ee49
A
5413 NVATTR_CLEANUP(nvattr);
5414 error = nfs4_open_confirm_rpc(nmp, dnp, fh->fh_data, fh->fh_len, noop, sid, thd, cred, nvattr, &xid);
b0d623f7
A
5415 nfsmout_if(error);
5416 savedxid = xid;
0a7de745 5417 if ((lockerror = nfs_node_lock(dnp))) {
b0d623f7 5418 error = lockerror;
0a7de745 5419 }
b0d623f7
A
5420 }
5421
5422nfsmout:
5423 nfsm_chain_cleanup(&nmreq);
5424 nfsm_chain_cleanup(&nmrep);
5425
5426 if (!lockerror && create) {
5427 if (!open_error && (dnp->n_flag & NNEGNCENTRIES)) {
5428 dnp->n_flag &= ~NNEGNCENTRIES;
5429 cache_purge_negatives(dvp);
5430 }
5431 dnp->n_flag |= NMODIFIED;
5432 nfs_node_unlock(dnp);
5433 lockerror = ENOENT;
6d2010ae 5434 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
b0d623f7 5435 }
0a7de745 5436 if (!lockerror) {
b0d623f7 5437 nfs_node_unlock(dnp);
0a7de745 5438 }
f427ee49 5439 if (!error && !np && fh->fh_len) {
b0d623f7
A
5440 /* create the vnode with the filehandle and attributes */
5441 xid = savedxid;
f427ee49 5442 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &newnp);
0a7de745 5443 if (!error) {
b0d623f7 5444 newvp = NFSTOV(newnp);
0a7de745 5445 }
b0d623f7 5446 }
f427ee49 5447 NVATTR_CLEANUP(nvattr);
0a7de745 5448 if (!busyerror) {
b0d623f7 5449 nfs_node_clear_busy(dnp);
0a7de745 5450 }
b0d623f7 5451 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
0a7de745 5452 if (!np) {
b0d623f7 5453 np = newnp;
0a7de745 5454 }
b0d623f7
A
5455 if (!error && np && !recall) {
5456 /* stuff the delegation state in the node */
5457 lck_mtx_lock(&np->n_openlock);
5458 np->n_openflags &= ~N_DELEG_MASK;
5459 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5460 np->n_dstateid = dstateid;
6d2010ae
A
5461 np->n_dace = ace;
5462 if (np->n_dlink.tqe_next == NFSNOLIST) {
5463 lck_mtx_lock(&nmp->nm_lock);
0a7de745 5464 if (np->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 5465 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
0a7de745 5466 }
6d2010ae
A
5467 lck_mtx_unlock(&nmp->nm_lock);
5468 }
b0d623f7 5469 lck_mtx_unlock(&np->n_openlock);
6d2010ae
A
5470 } else {
5471 /* give the delegation back */
b0d623f7 5472 if (np) {
f427ee49 5473 if (NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
6d2010ae
A
5474 /* update delegation state and return it */
5475 lck_mtx_lock(&np->n_openlock);
5476 np->n_openflags &= ~N_DELEG_MASK;
5477 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5478 np->n_dstateid = dstateid;
5479 np->n_dace = ace;
5480 if (np->n_dlink.tqe_next == NFSNOLIST) {
5481 lck_mtx_lock(&nmp->nm_lock);
0a7de745 5482 if (np->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 5483 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
0a7de745 5484 }
6d2010ae
A
5485 lck_mtx_unlock(&nmp->nm_lock);
5486 }
5487 lck_mtx_unlock(&np->n_openlock);
5488 /* don't need to send a separate delegreturn for fh */
f427ee49 5489 fh->fh_len = 0;
6d2010ae
A
5490 }
5491 /* return np's current delegation */
5492 nfs4_delegation_return(np, 0, thd, cred);
b0d623f7 5493 }
f427ee49
A
5494 if (fh->fh_len) { /* return fh's delegation if it wasn't for np */
5495 nfs4_delegreturn_rpc(nmp, fh->fh_data, fh->fh_len, &dstateid, 0, thd, cred);
0a7de745 5496 }
b0d623f7
A
5497 }
5498 }
5499 if (error) {
5500 if (exclusive && (error == NFSERR_NOTSUPP)) {
5501 exclusive = 0;
5502 goto again;
5503 }
5504 if (newvp) {
5505 nfs_node_unlock(newnp);
5506 vnode_put(newvp);
5507 }
5508 } else if (create) {
5509 nfs_node_unlock(newnp);
5510 if (exclusive) {
5511 error = nfs4_setattr_rpc(newnp, vap, ctx);
5512 if (error && (gotuid || gotgid)) {
5513 /* it's possible the server didn't like our attempt to set IDs. */
5514 /* so, let's try it again without those */
5515 VATTR_CLEAR_ACTIVE(vap, va_uid);
5516 VATTR_CLEAR_ACTIVE(vap, va_gid);
5517 error = nfs4_setattr_rpc(newnp, vap, ctx);
5518 }
5519 }
0a7de745 5520 if (error) {
b0d623f7 5521 vnode_put(newvp);
0a7de745 5522 } else {
b0d623f7 5523 *vpp = newvp;
0a7de745 5524 }
b0d623f7
A
5525 }
5526 nfs_open_owner_clear_busy(noop);
f427ee49
A
5527 NFS_ZFREE(nfs_fhandle_zone, fh);
5528 NFS_ZFREE(nfs_req_zone, req);
5529 FREE(dul, M_TEMP);
5530 FREE(nvattr, M_TEMP);
0a7de745 5531 return error;
b0d623f7
A
5532}
5533
6d2010ae
A
5534
5535/*
5536 * Send an OPEN RPC to claim a delegated open for a file
5537 */
5538int
5539nfs4_claim_delegated_open_rpc(
5540 struct nfs_open_file *nofp,
5541 int share_access,
5542 int share_deny,
5543 int flags)
5544{
5545 struct nfsmount *nmp;
5546 struct nfs_open_owner *noop = nofp->nof_owner;
f427ee49 5547 struct nfs_vattr *nvattr;
6d2010ae
A
5548 int error = 0, lockerror = ENOENT, status;
5549 int nfsvers, numops;
5550 u_int64_t xid;
5551 nfsnode_t np = nofp->nof_np;
5552 struct nfsm_chain nmreq, nmrep;
5553 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5554 uint32_t rflags = 0, delegation, recall = 0;
f427ee49 5555 fhandle_t *fh;
6d2010ae
A
5556 struct nfs_stateid dstateid;
5557 char sbuf[64], *s = sbuf;
5558 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5559 struct kauth_ace ace;
5560 vnode_t dvp = NULL;
5561 const char *vname = NULL;
5562 const char *name = NULL;
f427ee49 5563 uint32_t namelen;
6d2010ae
A
5564 char smallname[128];
5565 char *filename = NULL;
5566 struct nfsreq_secinfo_args si;
5567
5568 nmp = NFSTONMP(np);
0a7de745
A
5569 if (nfs_mount_gone(nmp)) {
5570 return ENXIO;
5571 }
f427ee49
A
5572 fh = zalloc(nfs_fhandle_zone);
5573 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
6d2010ae
A
5574 nfsvers = nmp->nm_vers;
5575
5576 nfs_node_lock_force(np);
5577 if ((vnode_vtype(NFSTOV(np)) != VDIR) && np->n_sillyrename) {
5578 /*
5579 * The node's been sillyrenamed, so we need to use
5580 * the sillyrename directory/name to do the open.
5581 */
5582 struct nfs_sillyrename *nsp = np->n_sillyrename;
5583 dvp = NFSTOV(nsp->nsr_dnp);
5584 if ((error = vnode_get(dvp))) {
cb323159 5585 dvp = NULLVP;
6d2010ae
A
5586 nfs_node_unlock(np);
5587 goto out;
5588 }
5589 name = nsp->nsr_name;
5590 } else {
5591 /*
5592 * [sigh] We can't trust VFS to get the parent right for named
5593 * attribute nodes. (It likes to reparent the nodes after we've
5594 * created them.) Luckily we can probably get the right parent
5595 * from the n_parent we have stashed away.
5596 */
5597 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
0a7de745 5598 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
6d2010ae 5599 dvp = NULL;
0a7de745
A
5600 }
5601 if (!dvp) {
6d2010ae 5602 dvp = vnode_getparent(NFSTOV(np));
0a7de745 5603 }
6d2010ae
A
5604 vname = vnode_getname(NFSTOV(np));
5605 if (!dvp || !vname) {
0a7de745 5606 if (!error) {
6d2010ae 5607 error = EIO;
0a7de745 5608 }
6d2010ae
A
5609 nfs_node_unlock(np);
5610 goto out;
5611 }
5612 name = vname;
5613 }
5614 filename = &smallname[0];
5615 namelen = snprintf(filename, sizeof(smallname), "%s", name);
5616 if (namelen >= sizeof(smallname)) {
0a7de745 5617 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
6d2010ae
A
5618 if (!filename) {
5619 error = ENOMEM;
3e170ce0 5620 nfs_node_unlock(np);
6d2010ae
A
5621 goto out;
5622 }
0a7de745 5623 snprintf(filename, namelen + 1, "%s", name);
6d2010ae
A
5624 }
5625 nfs_node_unlock(np);
5626
0a7de745 5627 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
3e170ce0 5628 goto out;
0a7de745 5629 }
f427ee49 5630 NVATTR_INIT(nvattr);
6d2010ae
A
5631 delegation = NFS_OPEN_DELEGATE_NONE;
5632 dstateid = np->n_dstateid;
5633 NFSREQ_SECINFO_SET(&si, VTONFS(dvp), NULL, 0, filename, namelen);
5634
5635 nfsm_chain_null(&nmreq);
5636 nfsm_chain_null(&nmrep);
5637
5638 // PUTFH, OPEN, GETATTR(FH)
5639 numops = 3;
5640 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
3e170ce0 5641 nfsm_chain_add_compound_header(error, &nmreq, "open_claim_d", nmp->nm_minor_vers, numops);
6d2010ae
A
5642 numops--;
5643 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5644 nfsm_chain_add_fh(error, &nmreq, nfsvers, VTONFS(dvp)->n_fhp, VTONFS(dvp)->n_fhsize);
5645 numops--;
5646 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5647 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5648 nfsm_chain_add_32(error, &nmreq, share_access);
5649 nfsm_chain_add_32(error, &nmreq, share_deny);
5650 // open owner: clientid + uid
5651 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5652 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5653 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5654 // openflag4
5655 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5656 // open_claim4
5657 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_DELEGATE_CUR);
5658 nfsm_chain_add_stateid(error, &nmreq, &np->n_dstateid);
5659 nfsm_chain_add_name(error, &nmreq, filename, namelen, nmp);
5660 numops--;
5661 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5662 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5663 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5664 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5665 nfsm_chain_build_done(error, &nmreq);
5666 nfsm_assert(error, (numops == 0), EPROTO);
5667 nfsmout_if(error);
5668
5669 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
0a7de745 5670 noop->noo_cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
6d2010ae 5671
0a7de745 5672 if ((lockerror = nfs_node_lock(np))) {
6d2010ae 5673 error = lockerror;
0a7de745 5674 }
6d2010ae
A
5675 nfsm_chain_skip_tag(error, &nmrep);
5676 nfsm_chain_get_32(error, &nmrep, numops);
5677 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5678 nfsmout_if(error);
5679 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5680 nfs_owner_seqid_increment(noop, NULL, error);
5681 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5682 nfsm_chain_check_change_info(error, &nmrep, np);
5683 nfsm_chain_get_32(error, &nmrep, rflags);
5684 bmlen = NFS_ATTR_BITMAP_LEN;
5685 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5686 nfsm_chain_get_32(error, &nmrep, delegation);
0a7de745 5687 if (!error) {
6d2010ae
A
5688 switch (delegation) {
5689 case NFS_OPEN_DELEGATE_NONE:
5690 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
0a7de745 5691 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
6d2010ae
A
5692 break;
5693 case NFS_OPEN_DELEGATE_READ:
5694 case NFS_OPEN_DELEGATE_WRITE:
5695 if ((((np->n_openflags & N_DELEG_MASK) == N_DELEG_READ) &&
0a7de745 5696 (delegation == NFS_OPEN_DELEGATE_WRITE)) ||
6d2010ae 5697 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) &&
0a7de745 5698 (delegation == NFS_OPEN_DELEGATE_READ))) {
6d2010ae 5699 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
0a7de745
A
5700 ((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ? "W" : "R",
5701 (delegation == NFS_OPEN_DELEGATE_WRITE) ? "W" : "R", filename ? filename : "???");
5702 }
6d2010ae
A
5703 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5704 nfsm_chain_get_32(error, &nmrep, recall);
0a7de745 5705 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
6d2010ae 5706 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
0a7de745 5707 }
6d2010ae
A
5708 /* if we have any trouble accepting the ACE, just invalidate it */
5709 ace_type = ace_flags = ace_mask = len = 0;
5710 nfsm_chain_get_32(error, &nmrep, ace_type);
5711 nfsm_chain_get_32(error, &nmrep, ace_flags);
5712 nfsm_chain_get_32(error, &nmrep, ace_mask);
5713 nfsm_chain_get_32(error, &nmrep, len);
5714 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5715 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5716 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5717 if (!error && (len >= slen)) {
0a7de745
A
5718 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5719 if (s) {
5720 slen = len + 1;
5721 } else {
6d2010ae 5722 ace.ace_flags = 0;
0a7de745 5723 }
6d2010ae 5724 }
0a7de745 5725 if (s) {
6d2010ae 5726 nfsm_chain_get_opaque(error, &nmrep, len, s);
0a7de745 5727 } else {
6d2010ae 5728 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
0a7de745 5729 }
6d2010ae
A
5730 if (!error && s) {
5731 s[len] = '\0';
0a7de745 5732 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
6d2010ae 5733 ace.ace_flags = 0;
0a7de745 5734 }
6d2010ae 5735 }
0a7de745 5736 if (error || !s) {
6d2010ae 5737 ace.ace_flags = 0;
0a7de745
A
5738 }
5739 if (s && (s != sbuf)) {
6d2010ae 5740 FREE(s, M_TEMP);
0a7de745 5741 }
6d2010ae
A
5742 if (!error) {
5743 /* stuff the latest delegation state in the node */
5744 lck_mtx_lock(&np->n_openlock);
5745 np->n_openflags &= ~N_DELEG_MASK;
5746 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5747 np->n_dstateid = dstateid;
5748 np->n_dace = ace;
5749 if (np->n_dlink.tqe_next == NFSNOLIST) {
5750 lck_mtx_lock(&nmp->nm_lock);
0a7de745 5751 if (np->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 5752 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
0a7de745 5753 }
6d2010ae
A
5754 lck_mtx_unlock(&nmp->nm_lock);
5755 }
5756 lck_mtx_unlock(&np->n_openlock);
5757 }
5758 break;
5759 default:
5760 error = EBADRPC;
5761 break;
5762 }
0a7de745 5763 }
6d2010ae
A
5764 nfsmout_if(error);
5765 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
f427ee49 5766 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
6d2010ae 5767 nfsmout_if(error);
f427ee49 5768 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6d2010ae
A
5769 printf("nfs: open reclaim didn't return filehandle? %s\n", filename ? filename : "???");
5770 error = EBADRPC;
5771 goto nfsmout;
5772 }
f427ee49 5773 if (!NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
6d2010ae
A
5774 // XXX what if fh doesn't match the vnode we think we're re-opening?
5775 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
0a7de745 5776 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 5777 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename ? filename : "???");
0a7de745 5778 }
6d2010ae 5779 }
f427ee49 5780 error = nfs_loadattrcache(np, nvattr, &xid, 1);
6d2010ae 5781 nfsmout_if(error);
0a7de745 5782 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
6d2010ae 5783 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 5784 }
6d2010ae 5785nfsmout:
f427ee49
A
5786 NVATTR_CLEANUP(nvattr);
5787 FREE(nvattr, M_TEMP);
5788 NFS_ZFREE(nfs_fhandle_zone, fh);
6d2010ae
A
5789 nfsm_chain_cleanup(&nmreq);
5790 nfsm_chain_cleanup(&nmrep);
0a7de745 5791 if (!lockerror) {
6d2010ae 5792 nfs_node_unlock(np);
0a7de745 5793 }
6d2010ae
A
5794 nfs_open_owner_clear_busy(noop);
5795 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5796 if (recall) {
5797 /*
5798 * We're making a delegated claim.
5799 * Don't return the delegation here in case we have more to claim.
5800 * Just make sure it's queued up to be returned.
5801 */
5802 nfs4_delegation_return_enqueue(np);
5803 }
5804 }
5805out:
5806 // if (!error)
0a7de745
A
5807 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5808 if (filename && (filename != &smallname[0])) {
6d2010ae 5809 FREE(filename, M_TEMP);
0a7de745
A
5810 }
5811 if (vname) {
6d2010ae 5812 vnode_putname(vname);
0a7de745
A
5813 }
5814 if (dvp != NULLVP) {
6d2010ae 5815 vnode_put(dvp);
0a7de745
A
5816 }
5817 return error;
6d2010ae
A
5818}
5819
b0d623f7
A
5820/*
5821 * Send an OPEN RPC to reclaim an open file.
5822 */
5823int
5824nfs4_open_reclaim_rpc(
5825 struct nfs_open_file *nofp,
5826 int share_access,
5827 int share_deny)
5828{
5829 struct nfsmount *nmp;
5830 struct nfs_open_owner *noop = nofp->nof_owner;
f427ee49 5831 struct nfs_vattr *nvattr;
b0d623f7
A
5832 int error = 0, lockerror = ENOENT, status;
5833 int nfsvers, numops;
5834 u_int64_t xid;
5835 nfsnode_t np = nofp->nof_np;
5836 struct nfsm_chain nmreq, nmrep;
5837 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6d2010ae 5838 uint32_t rflags = 0, delegation, recall = 0;
f427ee49 5839 fhandle_t *fh;
b0d623f7 5840 struct nfs_stateid dstateid;
6d2010ae
A
5841 char sbuf[64], *s = sbuf;
5842 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5843 struct kauth_ace ace;
5844 struct nfsreq_secinfo_args si;
b0d623f7
A
5845
5846 nmp = NFSTONMP(np);
0a7de745
A
5847 if (nfs_mount_gone(nmp)) {
5848 return ENXIO;
5849 }
b0d623f7
A
5850 nfsvers = nmp->nm_vers;
5851
0a7de745
A
5852 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5853 return error;
5854 }
b0d623f7 5855
f427ee49
A
5856 fh = zalloc(nfs_fhandle_zone);
5857 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
5858 NVATTR_INIT(nvattr);
b0d623f7 5859 delegation = NFS_OPEN_DELEGATE_NONE;
6d2010ae
A
5860 dstateid = np->n_dstateid;
5861 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
5862
5863 nfsm_chain_null(&nmreq);
5864 nfsm_chain_null(&nmrep);
5865
5866 // PUTFH, OPEN, GETATTR(FH)
5867 numops = 3;
5868 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
3e170ce0 5869 nfsm_chain_add_compound_header(error, &nmreq, "open_reclaim", nmp->nm_minor_vers, numops);
b0d623f7
A
5870 numops--;
5871 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5872 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5873 numops--;
5874 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5875 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5876 nfsm_chain_add_32(error, &nmreq, share_access);
5877 nfsm_chain_add_32(error, &nmreq, share_deny);
5878 // open owner: clientid + uid
5879 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5880 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5881 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5882 // openflag4
5883 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5884 // open_claim4
5885 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_PREVIOUS);
5886 delegation = (np->n_openflags & N_DELEG_READ) ? NFS_OPEN_DELEGATE_READ :
0a7de745
A
5887 (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE :
5888 NFS_OPEN_DELEGATE_NONE;
b0d623f7
A
5889 nfsm_chain_add_32(error, &nmreq, delegation);
5890 delegation = NFS_OPEN_DELEGATE_NONE;
5891 numops--;
5892 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5893 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5894 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6d2010ae 5895 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
b0d623f7
A
5896 nfsm_chain_build_done(error, &nmreq);
5897 nfsm_assert(error, (numops == 0), EPROTO);
5898 nfsmout_if(error);
5899
6d2010ae 5900 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
0a7de745 5901 noop->noo_cred, &si, R_RECOVER | R_NOINTR, &nmrep, &xid, &status);
b0d623f7 5902
0a7de745 5903 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 5904 error = lockerror;
0a7de745 5905 }
b0d623f7
A
5906 nfsm_chain_skip_tag(error, &nmrep);
5907 nfsm_chain_get_32(error, &nmrep, numops);
5908 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5909 nfsmout_if(error);
5910 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5911 nfs_owner_seqid_increment(noop, NULL, error);
5912 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5913 nfsm_chain_check_change_info(error, &nmrep, np);
5914 nfsm_chain_get_32(error, &nmrep, rflags);
5915 bmlen = NFS_ATTR_BITMAP_LEN;
5916 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5917 nfsm_chain_get_32(error, &nmrep, delegation);
0a7de745 5918 if (!error) {
b0d623f7
A
5919 switch (delegation) {
5920 case NFS_OPEN_DELEGATE_NONE:
6d2010ae
A
5921 if (np->n_openflags & N_DELEG_MASK) {
5922 /*
5923 * Hey! We were supposed to get our delegation back even
5924 * if it was getting immediately recalled. Bad server!
5925 *
5926 * Just try to return the existing delegation.
5927 */
5928 // NP(np, "nfs: open reclaim didn't return delegation?");
5929 delegation = (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE : NFS_OPEN_DELEGATE_READ;
5930 recall = 1;
b0d623f7
A
5931 }
5932 break;
6d2010ae 5933 case NFS_OPEN_DELEGATE_READ:
b0d623f7
A
5934 case NFS_OPEN_DELEGATE_WRITE:
5935 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5936 nfsm_chain_get_32(error, &nmrep, recall);
0a7de745 5937 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
6d2010ae 5938 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
0a7de745 5939 }
6d2010ae
A
5940 /* if we have any trouble accepting the ACE, just invalidate it */
5941 ace_type = ace_flags = ace_mask = len = 0;
5942 nfsm_chain_get_32(error, &nmrep, ace_type);
5943 nfsm_chain_get_32(error, &nmrep, ace_flags);
5944 nfsm_chain_get_32(error, &nmrep, ace_mask);
5945 nfsm_chain_get_32(error, &nmrep, len);
5946 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5947 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5948 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5949 if (!error && (len >= slen)) {
0a7de745
A
5950 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5951 if (s) {
5952 slen = len + 1;
5953 } else {
6d2010ae 5954 ace.ace_flags = 0;
0a7de745 5955 }
6d2010ae 5956 }
0a7de745 5957 if (s) {
6d2010ae 5958 nfsm_chain_get_opaque(error, &nmrep, len, s);
0a7de745 5959 } else {
6d2010ae 5960 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
0a7de745 5961 }
6d2010ae
A
5962 if (!error && s) {
5963 s[len] = '\0';
0a7de745 5964 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
6d2010ae 5965 ace.ace_flags = 0;
0a7de745 5966 }
6d2010ae 5967 }
0a7de745 5968 if (error || !s) {
6d2010ae 5969 ace.ace_flags = 0;
0a7de745
A
5970 }
5971 if (s && (s != sbuf)) {
6d2010ae 5972 FREE(s, M_TEMP);
0a7de745 5973 }
b0d623f7
A
5974 if (!error) {
5975 /* stuff the delegation state in the node */
5976 lck_mtx_lock(&np->n_openlock);
5977 np->n_openflags &= ~N_DELEG_MASK;
6d2010ae 5978 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
b0d623f7 5979 np->n_dstateid = dstateid;
6d2010ae
A
5980 np->n_dace = ace;
5981 if (np->n_dlink.tqe_next == NFSNOLIST) {
5982 lck_mtx_lock(&nmp->nm_lock);
0a7de745 5983 if (np->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 5984 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
0a7de745 5985 }
6d2010ae
A
5986 lck_mtx_unlock(&nmp->nm_lock);
5987 }
b0d623f7
A
5988 lck_mtx_unlock(&np->n_openlock);
5989 }
5990 break;
5991 default:
5992 error = EBADRPC;
5993 break;
5994 }
0a7de745 5995 }
b0d623f7
A
5996 nfsmout_if(error);
5997 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
f427ee49 5998 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
b0d623f7 5999 nfsmout_if(error);
f427ee49 6000 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6d2010ae 6001 NP(np, "nfs: open reclaim didn't return filehandle?");
b0d623f7
A
6002 error = EBADRPC;
6003 goto nfsmout;
6004 }
f427ee49 6005 if (!NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
b0d623f7 6006 // XXX what if fh doesn't match the vnode we think we're re-opening?
6d2010ae
A
6007 // That should be pretty hard in this case, given that we are doing
6008 // the open reclaim using the file handle (and not a dir/name pair).
6009 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
0a7de745 6010 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 6011 NP(np, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
0a7de745 6012 }
b0d623f7 6013 }
f427ee49 6014 error = nfs_loadattrcache(np, nvattr, &xid, 1);
b0d623f7 6015 nfsmout_if(error);
0a7de745 6016 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
b0d623f7 6017 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 6018 }
b0d623f7 6019nfsmout:
6d2010ae 6020 // if (!error)
0a7de745 6021 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
f427ee49
A
6022 NVATTR_CLEANUP(nvattr);
6023 FREE(nvattr, M_TEMP);
6024 NFS_ZFREE(nfs_fhandle_zone, fh);
b0d623f7
A
6025 nfsm_chain_cleanup(&nmreq);
6026 nfsm_chain_cleanup(&nmrep);
0a7de745 6027 if (!lockerror) {
b0d623f7 6028 nfs_node_unlock(np);
0a7de745 6029 }
b0d623f7
A
6030 nfs_open_owner_clear_busy(noop);
6031 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
0a7de745 6032 if (recall) {
6d2010ae 6033 nfs4_delegation_return_enqueue(np);
0a7de745 6034 }
b0d623f7 6035 }
0a7de745 6036 return error;
b0d623f7 6037}
2d21ac55 6038
b0d623f7
A
6039int
6040nfs4_open_downgrade_rpc(
6041 nfsnode_t np,
6042 struct nfs_open_file *nofp,
6043 vfs_context_t ctx)
6044{
6045 struct nfs_open_owner *noop = nofp->nof_owner;
6046 struct nfsmount *nmp;
6047 int error, lockerror = ENOENT, status, nfsvers, numops;
6048 struct nfsm_chain nmreq, nmrep;
6049 u_int64_t xid;
6d2010ae 6050 struct nfsreq_secinfo_args si;
2d21ac55 6051
b0d623f7 6052 nmp = NFSTONMP(np);
0a7de745
A
6053 if (nfs_mount_gone(nmp)) {
6054 return ENXIO;
6055 }
b0d623f7
A
6056 nfsvers = nmp->nm_vers;
6057
0a7de745
A
6058 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
6059 return error;
6060 }
b0d623f7 6061
6d2010ae 6062 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
6063 nfsm_chain_null(&nmreq);
6064 nfsm_chain_null(&nmrep);
6065
6066 // PUTFH, OPEN_DOWNGRADE, GETATTR
6067 numops = 3;
6068 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
3e170ce0 6069 nfsm_chain_add_compound_header(error, &nmreq, "open_downgrd", nmp->nm_minor_vers, numops);
b0d623f7
A
6070 numops--;
6071 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6072 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
6073 numops--;
6074 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_DOWNGRADE);
6075 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
6076 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
6077 nfsm_chain_add_32(error, &nmreq, nofp->nof_access);
6078 nfsm_chain_add_32(error, &nmreq, nofp->nof_deny);
6079 numops--;
6080 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 6081 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
6082 nfsm_chain_build_done(error, &nmreq);
6083 nfsm_assert(error, (numops == 0), EPROTO);
6084 nfsmout_if(error);
6d2010ae 6085 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745
A
6086 vfs_context_thread(ctx), vfs_context_ucred(ctx),
6087 &si, R_NOINTR, &nmrep, &xid, &status);
b0d623f7 6088
0a7de745 6089 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 6090 error = lockerror;
0a7de745 6091 }
b0d623f7
A
6092 nfsm_chain_skip_tag(error, &nmrep);
6093 nfsm_chain_get_32(error, &nmrep, numops);
6094 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
2d21ac55 6095 nfsmout_if(error);
b0d623f7
A
6096 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_DOWNGRADE);
6097 nfs_owner_seqid_increment(noop, NULL, error);
6098 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6099 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 6100 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
b0d623f7 6101nfsmout:
0a7de745 6102 if (!lockerror) {
b0d623f7 6103 nfs_node_unlock(np);
0a7de745 6104 }
b0d623f7 6105 nfs_open_owner_clear_busy(noop);
2d21ac55
A
6106 nfsm_chain_cleanup(&nmreq);
6107 nfsm_chain_cleanup(&nmrep);
0a7de745 6108 return error;
b0d623f7 6109}
2d21ac55 6110
b0d623f7
A
6111int
6112nfs4_close_rpc(
6113 nfsnode_t np,
6114 struct nfs_open_file *nofp,
6115 thread_t thd,
6116 kauth_cred_t cred,
6d2010ae 6117 int flags)
b0d623f7
A
6118{
6119 struct nfs_open_owner *noop = nofp->nof_owner;
6120 struct nfsmount *nmp;
6121 int error, lockerror = ENOENT, status, nfsvers, numops;
6122 struct nfsm_chain nmreq, nmrep;
6123 u_int64_t xid;
6d2010ae 6124 struct nfsreq_secinfo_args si;
b0d623f7
A
6125
6126 nmp = NFSTONMP(np);
0a7de745
A
6127 if (nfs_mount_gone(nmp)) {
6128 return ENXIO;
6129 }
b0d623f7
A
6130 nfsvers = nmp->nm_vers;
6131
0a7de745
A
6132 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
6133 return error;
6134 }
b0d623f7 6135
6d2010ae 6136 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
6137 nfsm_chain_null(&nmreq);
6138 nfsm_chain_null(&nmrep);
6139
6d2010ae 6140 // PUTFH, CLOSE, GETATTR
b0d623f7
A
6141 numops = 3;
6142 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
3e170ce0 6143 nfsm_chain_add_compound_header(error, &nmreq, "close", nmp->nm_minor_vers, numops);
2d21ac55
A
6144 numops--;
6145 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
b0d623f7 6146 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
2d21ac55
A
6147 numops--;
6148 nfsm_chain_add_32(error, &nmreq, NFS_OP_CLOSE);
b0d623f7
A
6149 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
6150 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
6151 numops--;
6152 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 6153 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
6154 nfsm_chain_build_done(error, &nmreq);
6155 nfsm_assert(error, (numops == 0), EPROTO);
6156 nfsmout_if(error);
0a7de745 6157 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
2d21ac55 6158
0a7de745 6159 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 6160 error = lockerror;
0a7de745 6161 }
2d21ac55
A
6162 nfsm_chain_skip_tag(error, &nmrep);
6163 nfsm_chain_get_32(error, &nmrep, numops);
6164 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
b0d623f7 6165 nfsmout_if(error);
2d21ac55 6166 nfsm_chain_op_check(error, &nmrep, NFS_OP_CLOSE);
b0d623f7
A
6167 nfs_owner_seqid_increment(noop, NULL, error);
6168 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6169 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 6170 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
b0d623f7 6171nfsmout:
0a7de745 6172 if (!lockerror) {
b0d623f7 6173 nfs_node_unlock(np);
0a7de745 6174 }
b0d623f7
A
6175 nfs_open_owner_clear_busy(noop);
6176 nfsm_chain_cleanup(&nmreq);
6177 nfsm_chain_cleanup(&nmrep);
0a7de745 6178 return error;
b0d623f7
A
6179}
6180
6181
b0d623f7 6182/*
6d2010ae 6183 * Claim the delegated open combinations this open file holds.
b0d623f7
A
6184 */
6185int
6d2010ae 6186nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags)
b0d623f7 6187{
6d2010ae
A
6188 struct nfs_open_owner *noop = nofp->nof_owner;
6189 struct nfs_lock_owner *nlop;
6190 struct nfs_file_lock *nflp, *nextnflp;
b0d623f7 6191 struct nfsmount *nmp;
6d2010ae 6192 int error = 0, reopen = 0;
b0d623f7 6193
6d2010ae
A
6194 if (nofp->nof_d_rw_drw) {
6195 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_BOTH, flags);
6196 if (!error) {
6197 lck_mtx_lock(&nofp->nof_lock);
6198 nofp->nof_rw_drw += nofp->nof_d_rw_drw;
6199 nofp->nof_d_rw_drw = 0;
6200 lck_mtx_unlock(&nofp->nof_lock);
6201 }
b0d623f7 6202 }
6d2010ae
A
6203 if (!error && nofp->nof_d_w_drw) {
6204 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_BOTH, flags);
6205 if (!error) {
6206 lck_mtx_lock(&nofp->nof_lock);
6207 nofp->nof_w_drw += nofp->nof_d_w_drw;
6208 nofp->nof_d_w_drw = 0;
6209 lck_mtx_unlock(&nofp->nof_lock);
6210 }
b0d623f7 6211 }
6d2010ae
A
6212 if (!error && nofp->nof_d_r_drw) {
6213 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_BOTH, flags);
6214 if (!error) {
6215 lck_mtx_lock(&nofp->nof_lock);
6216 nofp->nof_r_drw += nofp->nof_d_r_drw;
6217 nofp->nof_d_r_drw = 0;
6218 lck_mtx_unlock(&nofp->nof_lock);
6219 }
6220 }
6221 if (!error && nofp->nof_d_rw_dw) {
6222 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_WRITE, flags);
6223 if (!error) {
6224 lck_mtx_lock(&nofp->nof_lock);
6225 nofp->nof_rw_dw += nofp->nof_d_rw_dw;
6226 nofp->nof_d_rw_dw = 0;
6227 lck_mtx_unlock(&nofp->nof_lock);
6228 }
6229 }
6230 if (!error && nofp->nof_d_w_dw) {
6231 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_WRITE, flags);
6232 if (!error) {
6233 lck_mtx_lock(&nofp->nof_lock);
6234 nofp->nof_w_dw += nofp->nof_d_w_dw;
6235 nofp->nof_d_w_dw = 0;
6236 lck_mtx_unlock(&nofp->nof_lock);
6237 }
6238 }
6239 if (!error && nofp->nof_d_r_dw) {
6240 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_WRITE, flags);
6241 if (!error) {
6242 lck_mtx_lock(&nofp->nof_lock);
6243 nofp->nof_r_dw += nofp->nof_d_r_dw;
6244 nofp->nof_d_r_dw = 0;
6245 lck_mtx_unlock(&nofp->nof_lock);
6246 }
6247 }
6248 /* non-deny-mode opens may be reopened if no locks are held */
6249 if (!error && nofp->nof_d_rw) {
6250 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, flags);
6251 /* for some errors, we should just try reopening the file */
0a7de745 6252 if (nfs_mount_state_error_delegation_lost(error)) {
6d2010ae 6253 reopen = error;
0a7de745 6254 }
6d2010ae
A
6255 if (!error || reopen) {
6256 lck_mtx_lock(&nofp->nof_lock);
6257 nofp->nof_rw += nofp->nof_d_rw;
6258 nofp->nof_d_rw = 0;
6259 lck_mtx_unlock(&nofp->nof_lock);
6260 }
6261 }
6262 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
6263 if ((!error || reopen) && nofp->nof_d_w) {
6264 if (!error) {
6265 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, flags);
6266 /* for some errors, we should just try reopening the file */
0a7de745 6267 if (nfs_mount_state_error_delegation_lost(error)) {
6d2010ae 6268 reopen = error;
0a7de745 6269 }
6d2010ae
A
6270 }
6271 if (!error || reopen) {
6272 lck_mtx_lock(&nofp->nof_lock);
6273 nofp->nof_w += nofp->nof_d_w;
6274 nofp->nof_d_w = 0;
6275 lck_mtx_unlock(&nofp->nof_lock);
6276 }
6277 }
6278 if ((!error || reopen) && nofp->nof_d_r) {
6279 if (!error) {
6280 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, flags);
6281 /* for some errors, we should just try reopening the file */
0a7de745 6282 if (nfs_mount_state_error_delegation_lost(error)) {
6d2010ae 6283 reopen = error;
0a7de745 6284 }
6d2010ae
A
6285 }
6286 if (!error || reopen) {
6287 lck_mtx_lock(&nofp->nof_lock);
6288 nofp->nof_r += nofp->nof_d_r;
6289 nofp->nof_d_r = 0;
6290 lck_mtx_unlock(&nofp->nof_lock);
6291 }
6292 }
6293
6294 if (reopen) {
6295 /*
6296 * Any problems with the delegation probably indicates that we
6297 * should review/return all of our current delegation state.
6298 */
6299 if ((nmp = NFSTONMP(nofp->nof_np))) {
6300 nfs4_delegation_return_enqueue(nofp->nof_np);
6301 lck_mtx_lock(&nmp->nm_lock);
6302 nfs_need_recover(nmp, NFSERR_EXPIRED);
6303 lck_mtx_unlock(&nmp->nm_lock);
6304 }
6305 if (reopen && (nfs_check_for_locks(noop, nofp) == 0)) {
6306 /* just reopen the file on next access */
6307 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
0a7de745 6308 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6d2010ae
A
6309 lck_mtx_lock(&nofp->nof_lock);
6310 nofp->nof_flags |= NFS_OPEN_FILE_REOPEN;
6311 lck_mtx_unlock(&nofp->nof_lock);
0a7de745 6312 return 0;
6d2010ae 6313 }
0a7de745 6314 if (reopen) {
6d2010ae 6315 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
0a7de745
A
6316 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6317 }
6d2010ae
A
6318 }
6319
6320 if (!error && ((nmp = NFSTONMP(nofp->nof_np)))) {
6321 /* claim delegated locks */
6322 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
0a7de745 6323 if (nlop->nlo_open_owner != noop) {
6d2010ae 6324 continue;
0a7de745 6325 }
6d2010ae
A
6326 TAILQ_FOREACH_SAFE(nflp, &nlop->nlo_locks, nfl_lolink, nextnflp) {
6327 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
0a7de745 6328 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6d2010ae 6329 continue;
0a7de745 6330 }
6d2010ae 6331 /* skip non-delegated locks */
0a7de745 6332 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6d2010ae 6333 continue;
0a7de745 6334 }
6d2010ae
A
6335 error = nmp->nm_funcs->nf_setlock_rpc(nofp->nof_np, nofp, nflp, 0, flags, current_thread(), noop->noo_cred);
6336 if (error) {
6337 NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
0a7de745 6338 nflp->nfl_start, nflp->nfl_end, error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6d2010ae
A
6339 break;
6340 }
6341 // else {
0a7de745
A
6342 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
6343 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6d2010ae
A
6344 // }
6345 }
0a7de745 6346 if (error) {
6d2010ae 6347 break;
0a7de745 6348 }
6d2010ae
A
6349 }
6350 }
6351
0a7de745
A
6352 if (!error) { /* all state claimed successfully! */
6353 return 0;
6354 }
6d2010ae
A
6355
6356 /* restart if it looks like a problem more than just losing the delegation */
6357 if (!nfs_mount_state_error_delegation_lost(error) &&
6358 ((error == ETIMEDOUT) || nfs_mount_state_error_should_restart(error))) {
6359 NP(nofp->nof_np, "nfs delegated lock claim error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 6360 if ((error == ETIMEDOUT) && ((nmp = NFSTONMP(nofp->nof_np)))) {
6d2010ae 6361 nfs_need_reconnect(nmp);
0a7de745
A
6362 }
6363 return error;
b0d623f7 6364 }
6d2010ae 6365
0a7de745 6366 /* delegated state lost (once held but now not claimable) */
6d2010ae
A
6367 NP(nofp->nof_np, "nfs delegated state claim error %d, state lost, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6368
6369 /*
6370 * Any problems with the delegation probably indicates that we
6371 * should review/return all of our current delegation state.
6372 */
6373 if ((nmp = NFSTONMP(nofp->nof_np))) {
6374 nfs4_delegation_return_enqueue(nofp->nof_np);
6375 lck_mtx_lock(&nmp->nm_lock);
6376 nfs_need_recover(nmp, NFSERR_EXPIRED);
6377 lck_mtx_unlock(&nmp->nm_lock);
6378 }
6379
6380 /* revoke all open file state */
6381 nfs_revoke_open_state_for_node(nofp->nof_np);
6382
0a7de745 6383 return error;
6d2010ae 6384}
cb323159 6385#endif /* CONFIG_NFS4*/
6d2010ae
A
6386
6387/*
6388 * Release all open state for the given node.
6389 */
6390void
6391nfs_release_open_state_for_node(nfsnode_t np, int force)
6392{
6393 struct nfsmount *nmp = NFSTONMP(np);
6394 struct nfs_open_file *nofp;
6395 struct nfs_file_lock *nflp, *nextnflp;
6396
6397 /* drop held locks */
6398 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
6399 /* skip dead & blocked lock requests */
0a7de745 6400 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6d2010ae 6401 continue;
0a7de745 6402 }
6d2010ae 6403 /* send an unlock if not a delegated lock */
0a7de745 6404 if (!force && nmp && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6d2010ae 6405 nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER,
0a7de745
A
6406 NULL, nflp->nfl_owner->nlo_open_owner->noo_cred);
6407 }
6d2010ae
A
6408 /* kill/remove the lock */
6409 lck_mtx_lock(&np->n_openlock);
6410 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
6411 lck_mtx_lock(&nflp->nfl_owner->nlo_lock);
6412 TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink);
6413 lck_mtx_unlock(&nflp->nfl_owner->nlo_lock);
6414 if (nflp->nfl_blockcnt) {
6415 /* wake up anyone blocked on this lock */
6416 wakeup(nflp);
6417 } else {
6418 /* remove nflp from lock list and destroy */
6419 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
6420 nfs_file_lock_destroy(nflp);
2d21ac55 6421 }
6d2010ae
A
6422 lck_mtx_unlock(&np->n_openlock);
6423 }
6424
6425 lck_mtx_lock(&np->n_openlock);
6426
6427 /* drop all opens */
6428 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
0a7de745 6429 if (nofp->nof_flags & NFS_OPEN_FILE_LOST) {
6d2010ae 6430 continue;
0a7de745 6431 }
6d2010ae
A
6432 /* mark open state as lost */
6433 lck_mtx_lock(&nofp->nof_lock);
6434 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
6435 nofp->nof_flags |= NFS_OPEN_FILE_LOST;
0a7de745 6436
6d2010ae 6437 lck_mtx_unlock(&nofp->nof_lock);
cb323159 6438#if CONFIG_NFS4
0a7de745 6439 if (!force && nmp && (nmp->nm_vers >= NFS_VER4)) {
6d2010ae 6440 nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER);
0a7de745 6441 }
cb323159 6442#endif
6d2010ae
A
6443 }
6444
6445 lck_mtx_unlock(&np->n_openlock);
6446}
6447
6448/*
6449 * State for a node has been lost, drop it, and revoke the node.
6450 * Attempt to return any state if possible in case the server
6451 * might somehow think we hold it.
6452 */
6453void
6454nfs_revoke_open_state_for_node(nfsnode_t np)
6455{
6456 struct nfsmount *nmp;
6457
6458 /* mark node as needing to be revoked */
6459 nfs_node_lock_force(np);
0a7de745 6460 if (np->n_flag & NREVOKE) { /* already revoked? */
6d2010ae
A
6461 NP(np, "nfs_revoke_open_state_for_node(): already revoked");
6462 nfs_node_unlock(np);
6463 return;
6464 }
6465 np->n_flag |= NREVOKE;
6466 nfs_node_unlock(np);
6467
6468 nfs_release_open_state_for_node(np, 0);
6469 NP(np, "nfs: state lost for %p 0x%x", np, np->n_flag);
6470
6471 /* mark mount as needing a revoke scan and have the socket thread do it. */
6472 if ((nmp = NFSTONMP(np))) {
6473 lck_mtx_lock(&nmp->nm_lock);
6474 nmp->nm_state |= NFSSTA_REVOKE;
6475 nfs_mount_sock_thread_wake(nmp);
6476 lck_mtx_unlock(&nmp->nm_lock);
6477 }
6478}
6479
cb323159 6480#if CONFIG_NFS4
6d2010ae
A
6481/*
6482 * Claim the delegated open combinations that each of this node's open files hold.
6483 */
6484int
6485nfs4_claim_delegated_state_for_node(nfsnode_t np, int flags)
6486{
6487 struct nfs_open_file *nofp;
6488 int error = 0;
6489
6490 lck_mtx_lock(&np->n_openlock);
6491
6492 /* walk the open file list looking for opens with delegated state to claim */
6493restart:
6494 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
6495 if (!nofp->nof_d_rw_drw && !nofp->nof_d_w_drw && !nofp->nof_d_r_drw &&
6496 !nofp->nof_d_rw_dw && !nofp->nof_d_w_dw && !nofp->nof_d_r_dw &&
0a7de745 6497 !nofp->nof_d_rw && !nofp->nof_d_w && !nofp->nof_d_r) {
6d2010ae 6498 continue;
0a7de745 6499 }
6d2010ae
A
6500 lck_mtx_unlock(&np->n_openlock);
6501 error = nfs4_claim_delegated_state_for_open_file(nofp, flags);
6502 lck_mtx_lock(&np->n_openlock);
0a7de745 6503 if (error) {
6d2010ae 6504 break;
0a7de745 6505 }
6d2010ae
A
6506 goto restart;
6507 }
6508
6509 lck_mtx_unlock(&np->n_openlock);
6510
0a7de745 6511 return error;
6d2010ae
A
6512}
6513
6514/*
6515 * Mark a node as needed to have its delegation returned.
6516 * Queue it up on the delegation return queue.
6517 * Make sure the thread is running.
6518 */
6519void
6520nfs4_delegation_return_enqueue(nfsnode_t np)
6521{
6522 struct nfsmount *nmp;
6523
6524 nmp = NFSTONMP(np);
0a7de745 6525 if (nfs_mount_gone(nmp)) {
6d2010ae 6526 return;
0a7de745 6527 }
6d2010ae
A
6528
6529 lck_mtx_lock(&np->n_openlock);
6530 np->n_openflags |= N_DELEG_RETURN;
6531 lck_mtx_unlock(&np->n_openlock);
6532
6533 lck_mtx_lock(&nmp->nm_lock);
0a7de745 6534 if (np->n_dreturn.tqe_next == NFSNOLIST) {
6d2010ae 6535 TAILQ_INSERT_TAIL(&nmp->nm_dreturnq, np, n_dreturn);
0a7de745 6536 }
6d2010ae
A
6537 nfs_mount_sock_thread_wake(nmp);
6538 lck_mtx_unlock(&nmp->nm_lock);
6539}
6540
6541/*
6542 * return any delegation we may have for the given node
6543 */
6544int
6545nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred)
6546{
6547 struct nfsmount *nmp;
f427ee49 6548 fhandle_t *fh;
6d2010ae
A
6549 nfs_stateid dstateid;
6550 int error;
6551
6552 nmp = NFSTONMP(np);
0a7de745
A
6553 if (nfs_mount_gone(nmp)) {
6554 return ENXIO;
6555 }
6d2010ae 6556
f427ee49
A
6557 fh = zalloc(nfs_fhandle_zone);
6558
6d2010ae
A
6559 /* first, make sure the node's marked for delegation return */
6560 lck_mtx_lock(&np->n_openlock);
0a7de745 6561 np->n_openflags |= (N_DELEG_RETURN | N_DELEG_RETURNING);
6d2010ae
A
6562 lck_mtx_unlock(&np->n_openlock);
6563
6564 /* make sure nobody else is using the delegation state */
0a7de745 6565 if ((error = nfs_open_state_set_busy(np, NULL))) {
6d2010ae 6566 goto out;
0a7de745 6567 }
6d2010ae
A
6568
6569 /* claim any delegated state */
0a7de745 6570 if ((error = nfs4_claim_delegated_state_for_node(np, flags))) {
6d2010ae 6571 goto out;
0a7de745 6572 }
6d2010ae
A
6573
6574 /* return the delegation */
6575 lck_mtx_lock(&np->n_openlock);
6576 dstateid = np->n_dstateid;
f427ee49
A
6577 fh->fh_len = np->n_fhsize;
6578 bcopy(np->n_fhp, fh->fh_data, fh->fh_len);
6d2010ae 6579 lck_mtx_unlock(&np->n_openlock);
f427ee49 6580 error = nfs4_delegreturn_rpc(NFSTONMP(np), fh->fh_data, fh->fh_len, &dstateid, flags, thd, cred);
6d2010ae
A
6581 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
6582 if ((error != ETIMEDOUT) && (error != NFSERR_MOVED) && (error != NFSERR_LEASE_MOVED)) {
6583 lck_mtx_lock(&np->n_openlock);
6584 np->n_openflags &= ~N_DELEG_MASK;
6585 lck_mtx_lock(&nmp->nm_lock);
6586 if (np->n_dlink.tqe_next != NFSNOLIST) {
6587 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
6588 np->n_dlink.tqe_next = NFSNOLIST;
6589 }
6590 lck_mtx_unlock(&nmp->nm_lock);
6591 lck_mtx_unlock(&np->n_openlock);
6592 }
6593
6594out:
6595 /* make sure it's no longer on the return queue and clear the return flags */
6596 lck_mtx_lock(&nmp->nm_lock);
6597 if (np->n_dreturn.tqe_next != NFSNOLIST) {
6598 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
6599 np->n_dreturn.tqe_next = NFSNOLIST;
6600 }
6601 lck_mtx_unlock(&nmp->nm_lock);
6602 lck_mtx_lock(&np->n_openlock);
0a7de745 6603 np->n_openflags &= ~(N_DELEG_RETURN | N_DELEG_RETURNING);
6d2010ae
A
6604 lck_mtx_unlock(&np->n_openlock);
6605
6606 if (error) {
6607 NP(np, "nfs4_delegation_return, error %d", error);
0a7de745 6608 if (error == ETIMEDOUT) {
6d2010ae 6609 nfs_need_reconnect(nmp);
0a7de745 6610 }
6d2010ae
A
6611 if (nfs_mount_state_error_should_restart(error)) {
6612 /* make sure recovery happens */
6613 lck_mtx_lock(&nmp->nm_lock);
6614 nfs_need_recover(nmp, nfs_mount_state_error_delegation_lost(error) ? NFSERR_EXPIRED : 0);
6615 lck_mtx_unlock(&nmp->nm_lock);
2d21ac55
A
6616 }
6617 }
6d2010ae
A
6618
6619 nfs_open_state_clear_busy(np);
f427ee49 6620 NFS_ZFREE(nfs_fhandle_zone, fh);
0a7de745 6621 return error;
b0d623f7 6622}
2d21ac55 6623
b0d623f7 6624/*
6d2010ae
A
6625 * RPC to return a delegation for a file handle
6626 */
6627int
6628nfs4_delegreturn_rpc(struct nfsmount *nmp, u_char *fhp, int fhlen, struct nfs_stateid *sid, int flags, thread_t thd, kauth_cred_t cred)
6629{
6630 int error = 0, status, numops;
6631 uint64_t xid;
6632 struct nfsm_chain nmreq, nmrep;
6633 struct nfsreq_secinfo_args si;
6634
6635 NFSREQ_SECINFO_SET(&si, NULL, fhp, fhlen, NULL, 0);
6636 nfsm_chain_null(&nmreq);
6637 nfsm_chain_null(&nmrep);
6638
6639 // PUTFH, DELEGRETURN
6640 numops = 2;
6641 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
3e170ce0 6642 nfsm_chain_add_compound_header(error, &nmreq, "delegreturn", nmp->nm_minor_vers, numops);
6d2010ae
A
6643 numops--;
6644 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6645 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
6646 numops--;
6647 nfsm_chain_add_32(error, &nmreq, NFS_OP_DELEGRETURN);
6648 nfsm_chain_add_stateid(error, &nmreq, sid);
6649 nfsm_chain_build_done(error, &nmreq);
6650 nfsm_assert(error, (numops == 0), EPROTO);
6651 nfsmout_if(error);
6652 error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags, &nmrep, &xid, &status);
6653 nfsm_chain_skip_tag(error, &nmrep);
6654 nfsm_chain_get_32(error, &nmrep, numops);
6655 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6656 nfsm_chain_op_check(error, &nmrep, NFS_OP_DELEGRETURN);
6657nfsmout:
6658 nfsm_chain_cleanup(&nmreq);
6659 nfsm_chain_cleanup(&nmrep);
0a7de745 6660 return error;
6d2010ae 6661}
cb323159 6662#endif /* CONFIG_NFS4 */
6d2010ae
A
6663
6664/*
6665 * NFS read call.
6666 * Just call nfs_bioread() to do the work.
6667 *
6668 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6669 * without first calling VNOP_OPEN, so we make sure the file is open here.
6670 */
6671int
6672nfs_vnop_read(
6673 struct vnop_read_args /* {
0a7de745
A
6674 * struct vnodeop_desc *a_desc;
6675 * vnode_t a_vp;
6676 * struct uio *a_uio;
6677 * int a_ioflag;
6678 * vfs_context_t a_context;
6679 * } */*ap)
6d2010ae
A
6680{
6681 vnode_t vp = ap->a_vp;
6682 vfs_context_t ctx = ap->a_context;
6683 nfsnode_t np;
6684 struct nfsmount *nmp;
6685 struct nfs_open_owner *noop;
6686 struct nfs_open_file *nofp;
6687 int error;
6688
0a7de745 6689 if (vnode_vtype(ap->a_vp) != VREG) {
39236c6e 6690 return (vnode_vtype(vp) == VDIR) ? EISDIR : EPERM;
0a7de745 6691 }
6d2010ae
A
6692
6693 np = VTONFS(vp);
6694 nmp = NFSTONMP(np);
0a7de745
A
6695 if (nfs_mount_gone(nmp)) {
6696 return ENXIO;
6697 }
6698 if (np->n_flag & NREVOKE) {
6699 return EIO;
6700 }
6d2010ae
A
6701
6702 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
0a7de745
A
6703 if (!noop) {
6704 return ENOMEM;
6705 }
6d2010ae
A
6706restart:
6707 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
6708 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6709 NP(np, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop->noo_cred));
6710 error = EIO;
6711 }
cb323159 6712#if CONFIG_NFS4
6d2010ae
A
6713 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6714 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
6715 nofp = NULL;
0a7de745 6716 if (!error) {
6d2010ae 6717 goto restart;
0a7de745 6718 }
6d2010ae 6719 }
cb323159 6720#endif
6d2010ae
A
6721 if (error) {
6722 nfs_open_owner_rele(noop);
0a7de745 6723 return error;
6d2010ae 6724 }
3e170ce0
A
6725 /*
6726 * Since the read path is a hot path, if we already have
6727 * read access, lets go and try and do the read, without
6728 * busying the mount and open file node for this open owner.
6729 *
6730 * N.B. This is inherently racy w.r.t. an execve using
6731 * an already open file, in that the read at the end of
6732 * this routine will be racing with a potential close.
6733 * The code below ultimately has the same problem. In practice
6734 * this does not seem to be an issue.
6735 */
6736 if (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) {
6737 nfs_open_owner_rele(noop);
6738 goto do_read;
6739 }
6740 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6741 if (error) {
6742 nfs_open_owner_rele(noop);
0a7de745 6743 return error;
3e170ce0
A
6744 }
6745 /*
6746 * If we don't have a file already open with the access we need (read) then
6747 * we need to open one. Otherwise we just co-opt an open. We might not already
6748 * have access because we're trying to read the first page of the
6749 * file for execve.
6750 */
6751 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
6752 if (error) {
6753 nfs_mount_state_in_use_end(nmp, 0);
6754 nfs_open_owner_rele(noop);
0a7de745 6755 return error;
3e170ce0
A
6756 }
6757 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
6758 /* we don't have the file open, so open it for read access if we're not denied */
6759 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
6760 NP(np, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
0a7de745 6761 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
3e170ce0
A
6762 }
6763 if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) {
6764 nfs_open_file_clear_busy(nofp);
6765 nfs_mount_state_in_use_end(nmp, 0);
6d2010ae 6766 nfs_open_owner_rele(noop);
0a7de745 6767 return EPERM;
6d2010ae
A
6768 }
6769 if (np->n_flag & NREVOKE) {
6770 error = EIO;
3e170ce0 6771 nfs_open_file_clear_busy(nofp);
6d2010ae
A
6772 nfs_mount_state_in_use_end(nmp, 0);
6773 nfs_open_owner_rele(noop);
0a7de745 6774 return error;
6d2010ae 6775 }
3e170ce0
A
6776 if (nmp->nm_vers < NFS_VER4) {
6777 /* NFS v2/v3 opens are always allowed - so just add it. */
6778 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
cb323159
A
6779 }
6780#if CONFIG_NFS4
6781 else {
3e170ce0 6782 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
6d2010ae 6783 }
cb323159 6784#endif
0a7de745 6785 if (!error) {
6d2010ae 6786 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
0a7de745 6787 }
3e170ce0 6788 }
0a7de745 6789 if (nofp) {
3e170ce0 6790 nfs_open_file_clear_busy(nofp);
0a7de745 6791 }
3e170ce0
A
6792 if (nfs_mount_state_in_use_end(nmp, error)) {
6793 nofp = NULL;
6794 goto restart;
6d2010ae
A
6795 }
6796 nfs_open_owner_rele(noop);
0a7de745
A
6797 if (error) {
6798 return error;
6799 }
3e170ce0 6800do_read:
0a7de745 6801 return nfs_bioread(VTONFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_context);
6d2010ae
A
6802}
6803
cb323159 6804#if CONFIG_NFS4
6d2010ae
A
6805/*
6806 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6807 * Files are created using the NFSv4 OPEN RPC. So we must open the
6808 * file to create it and then close it.
6809 */
6810int
6811nfs4_vnop_create(
6812 struct vnop_create_args /* {
0a7de745
A
6813 * struct vnodeop_desc *a_desc;
6814 * vnode_t a_dvp;
6815 * vnode_t *a_vpp;
6816 * struct componentname *a_cnp;
6817 * struct vnode_attr *a_vap;
6818 * vfs_context_t a_context;
6819 * } */*ap)
6d2010ae
A
6820{
6821 vfs_context_t ctx = ap->a_context;
6822 struct componentname *cnp = ap->a_cnp;
6823 struct vnode_attr *vap = ap->a_vap;
6824 vnode_t dvp = ap->a_dvp;
6825 vnode_t *vpp = ap->a_vpp;
6826 struct nfsmount *nmp;
6827 nfsnode_t np;
6828 int error = 0, busyerror = 0, accessMode, denyMode;
6829 struct nfs_open_owner *noop = NULL;
6830 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
6831
6832 nmp = VTONMP(dvp);
0a7de745
A
6833 if (nfs_mount_gone(nmp)) {
6834 return ENXIO;
6835 }
6d2010ae 6836
0a7de745 6837 if (vap) {
6d2010ae 6838 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp), vap, ctx);
0a7de745 6839 }
6d2010ae
A
6840
6841 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
0a7de745
A
6842 if (!noop) {
6843 return ENOMEM;
6844 }
6d2010ae
A
6845
6846restart:
6847 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6848 if (error) {
6849 nfs_open_owner_rele(noop);
0a7de745 6850 return error;
6d2010ae
A
6851 }
6852
6853 /* grab a provisional, nodeless open file */
6854 error = nfs_open_file_find(NULL, noop, &newnofp, 0, 0, 1);
6855 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6856 printf("nfs_vnop_create: LOST\n");
6857 error = EIO;
6858 }
6859 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6860 /* This shouldn't happen given that this is a new, nodeless nofp */
6d2010ae
A
6861 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
6862 nfs_open_file_destroy(newnofp);
6863 newnofp = NULL;
0a7de745 6864 if (!error) {
f427ee49 6865 nfs_mount_state_in_use_end(nmp, 0);
6d2010ae 6866 goto restart;
0a7de745 6867 }
6d2010ae 6868 }
0a7de745 6869 if (!error) {
6d2010ae 6870 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
0a7de745 6871 }
6d2010ae 6872 if (error) {
0a7de745 6873 if (newnofp) {
6d2010ae 6874 nfs_open_file_destroy(newnofp);
0a7de745 6875 }
6d2010ae
A
6876 newnofp = NULL;
6877 goto out;
6878 }
6879
6880 /*
6881 * We're just trying to create the file.
6882 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6883 */
6884 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
6885 denyMode = NFS_OPEN_SHARE_DENY_NONE;
6886
6887 /* Do the open/create */
6888 error = nfs4_open_rpc(newnofp, ctx, cnp, vap, dvp, vpp, NFS_OPEN_CREATE, accessMode, denyMode);
6889 if ((error == EACCES) && vap && !(vap->va_vaflags & VA_EXCLUSIVE) &&
6890 VATTR_IS_ACTIVE(vap, va_mode) && !(vap->va_mode & S_IWUSR)) {
6891 /*
6892 * Hmm... it looks like we may have a situation where the request was
6893 * retransmitted because we didn't get the first response which successfully
6894 * created/opened the file and then the second time we were denied the open
6895 * because the mode the file was created with doesn't allow write access.
6896 *
6897 * We'll try to work around this by temporarily updating the mode and
6898 * retrying the open.
6899 */
6900 struct vnode_attr vattr;
6901
6902 /* first make sure it's there */
6903 int error2 = nfs_lookitup(VTONFS(dvp), cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6904 if (!error2 && np) {
6905 nfs_node_unlock(np);
6906 *vpp = NFSTOV(np);
6907 if (vnode_vtype(NFSTOV(np)) == VREG) {
6908 VATTR_INIT(&vattr);
6909 VATTR_SET(&vattr, va_mode, (vap->va_mode | S_IWUSR));
6910 if (!nfs4_setattr_rpc(np, &vattr, ctx)) {
6911 error2 = nfs4_open_rpc(newnofp, ctx, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, accessMode, denyMode);
6912 VATTR_INIT(&vattr);
6913 VATTR_SET(&vattr, va_mode, vap->va_mode);
6914 nfs4_setattr_rpc(np, &vattr, ctx);
0a7de745 6915 if (!error2) {
6d2010ae 6916 error = 0;
0a7de745 6917 }
6d2010ae
A
6918 }
6919 }
6920 if (error) {
6921 vnode_put(*vpp);
6922 *vpp = NULL;
6923 }
6924 }
6925 }
6926 if (!error && !*vpp) {
6927 printf("nfs4_open_rpc returned without a node?\n");
6928 /* Hmmm... with no node, we have no filehandle and can't close it */
6929 error = EIO;
6930 }
6931 if (error) {
6932 /* need to cleanup our temporary nofp */
6933 nfs_open_file_clear_busy(newnofp);
6934 nfs_open_file_destroy(newnofp);
6935 newnofp = NULL;
6936 goto out;
6937 }
6938 /* After we have a node, add our open file struct to the node */
6939 np = VTONFS(*vpp);
6940 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
6941 nofp = newnofp;
6942 error = nfs_open_file_find_internal(np, noop, &nofp, 0, 0, 0);
6943 if (error) {
6944 /* This shouldn't happen, because we passed in a new nofp to use. */
6945 printf("nfs_open_file_find_internal failed! %d\n", error);
6946 goto out;
6947 } else if (nofp != newnofp) {
6948 /*
6949 * Hmm... an open file struct already exists.
6950 * Mark the existing one busy and merge our open into it.
6951 * Then destroy the one we created.
6952 * Note: there's no chance of an open confict because the
6953 * open has already been granted.
6954 */
6955 busyerror = nfs_open_file_set_busy(nofp, NULL);
6956 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
6957 nofp->nof_stateid = newnofp->nof_stateid;
0a7de745 6958 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
6d2010ae 6959 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 6960 }
6d2010ae
A
6961 nfs_open_file_clear_busy(newnofp);
6962 nfs_open_file_destroy(newnofp);
6963 }
6964 newnofp = NULL;
6965 /* mark the node as holding a create-initiated open */
6966 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
6967 nofp->nof_creator = current_thread();
6968out:
0a7de745 6969 if (nofp && !busyerror) {
6d2010ae 6970 nfs_open_file_clear_busy(nofp);
0a7de745 6971 }
6d2010ae
A
6972 if (nfs_mount_state_in_use_end(nmp, error)) {
6973 nofp = newnofp = NULL;
6974 busyerror = 0;
6975 goto restart;
6976 }
0a7de745 6977 if (noop) {
6d2010ae 6978 nfs_open_owner_rele(noop);
0a7de745
A
6979 }
6980 return error;
6d2010ae
A
6981}
6982
6983/*
6984 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6985 */
6986int
6987nfs4_create_rpc(
6988 vfs_context_t ctx,
6989 nfsnode_t dnp,
6990 struct componentname *cnp,
6991 struct vnode_attr *vap,
6992 int type,
6993 char *link,
6994 nfsnode_t *npp)
6995{
6996 struct nfsmount *nmp;
f427ee49 6997 struct nfs_vattr *nvattr;
6d2010ae
A
6998 int error = 0, create_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
6999 int nfsvers, namedattrs, numops;
f427ee49 7000 u_int64_t xid = 0, savedxid = 0;
6d2010ae
A
7001 nfsnode_t np = NULL;
7002 vnode_t newvp = NULL;
7003 struct nfsm_chain nmreq, nmrep;
7004 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
7005 const char *tag;
7006 nfs_specdata sd;
f427ee49
A
7007 fhandle_t *fh;
7008 struct nfsreq *req;
7009 struct nfs_dulookup *dul;
6d2010ae
A
7010 struct nfsreq_secinfo_args si;
7011
7012 nmp = NFSTONMP(dnp);
0a7de745
A
7013 if (nfs_mount_gone(nmp)) {
7014 return ENXIO;
7015 }
6d2010ae
A
7016 nfsvers = nmp->nm_vers;
7017 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
0a7de745
A
7018 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7019 return EINVAL;
7020 }
6d2010ae
A
7021
7022 sd.specdata1 = sd.specdata2 = 0;
7023
7024 switch (type) {
7025 case NFLNK:
7026 tag = "symlink";
7027 break;
7028 case NFBLK:
7029 case NFCHR:
7030 tag = "mknod";
0a7de745
A
7031 if (!VATTR_IS_ACTIVE(vap, va_rdev)) {
7032 return EINVAL;
7033 }
6d2010ae
A
7034 sd.specdata1 = major(vap->va_rdev);
7035 sd.specdata2 = minor(vap->va_rdev);
7036 break;
7037 case NFSOCK:
7038 case NFFIFO:
7039 tag = "mknod";
7040 break;
7041 case NFDIR:
7042 tag = "mkdir";
7043 break;
7044 default:
0a7de745 7045 return EINVAL;
6d2010ae
A
7046 }
7047
f427ee49
A
7048 fh = zalloc(nfs_fhandle_zone);
7049 req = zalloc(nfs_req_zone);
7050 MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK);
7051 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
6d2010ae
A
7052 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
7053
7054 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
0a7de745 7055 if (!namedattrs) {
f427ee49 7056 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
0a7de745 7057 }
6d2010ae
A
7058
7059 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
f427ee49 7060 NVATTR_INIT(nvattr);
6d2010ae
A
7061 nfsm_chain_null(&nmreq);
7062 nfsm_chain_null(&nmrep);
7063
7064 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
7065 numops = 6;
7066 nfsm_chain_build_alloc_init(error, &nmreq, 66 * NFSX_UNSIGNED);
3e170ce0 7067 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
6d2010ae
A
7068 numops--;
7069 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7070 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
7071 numops--;
7072 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7073 numops--;
7074 nfsm_chain_add_32(error, &nmreq, NFS_OP_CREATE);
7075 nfsm_chain_add_32(error, &nmreq, type);
7076 if (type == NFLNK) {
7077 nfsm_chain_add_name(error, &nmreq, link, strlen(link), nmp);
7078 } else if ((type == NFBLK) || (type == NFCHR)) {
7079 nfsm_chain_add_32(error, &nmreq, sd.specdata1);
7080 nfsm_chain_add_32(error, &nmreq, sd.specdata2);
7081 }
7082 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7083 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
7084 numops--;
7085 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7086 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7087 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7088 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
7089 numops--;
7090 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7091 numops--;
7092 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7093 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
7094 nfsm_chain_build_done(error, &nmreq);
7095 nfsm_assert(error, (numops == 0), EPROTO);
7096 nfsmout_if(error);
7097
7098 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745 7099 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
6d2010ae 7100 if (!error) {
0a7de745 7101 if (!namedattrs) {
f427ee49 7102 nfs_dulookup_start(dul, dnp, ctx);
0a7de745 7103 }
6d2010ae
A
7104 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7105 }
7106
0a7de745 7107 if ((lockerror = nfs_node_lock(dnp))) {
6d2010ae 7108 error = lockerror;
0a7de745 7109 }
6d2010ae
A
7110 nfsm_chain_skip_tag(error, &nmrep);
7111 nfsm_chain_get_32(error, &nmrep, numops);
7112 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7113 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7114 nfsmout_if(error);
7115 nfsm_chain_op_check(error, &nmrep, NFS_OP_CREATE);
7116 nfsm_chain_check_change_info(error, &nmrep, dnp);
7117 bmlen = NFS_ATTR_BITMAP_LEN;
7118 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7119 /* At this point if we have no error, the object was created. */
7120 /* if we don't get attributes, then we should lookitup. */
7121 create_error = error;
7122 nfsmout_if(error);
7123 nfs_vattr_set_supported(bitmap, vap);
7124 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7125 nfsmout_if(error);
f427ee49 7126 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
6d2010ae 7127 nfsmout_if(error);
f427ee49 7128 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6d2010ae
A
7129 printf("nfs: create/%s didn't return filehandle? %s\n", tag, cnp->cn_nameptr);
7130 error = EBADRPC;
7131 goto nfsmout;
7132 }
7133 /* directory attributes: if we don't get them, make sure to invalidate */
7134 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7135 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7136 savedxid = xid;
7137 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
0a7de745 7138 if (error) {
6d2010ae 7139 NATTRINVALIDATE(dnp);
0a7de745 7140 }
6d2010ae
A
7141
7142nfsmout:
7143 nfsm_chain_cleanup(&nmreq);
7144 nfsm_chain_cleanup(&nmrep);
7145
7146 if (!lockerror) {
7147 if (!create_error && (dnp->n_flag & NNEGNCENTRIES)) {
7148 dnp->n_flag &= ~NNEGNCENTRIES;
7149 cache_purge_negatives(NFSTOV(dnp));
7150 }
7151 dnp->n_flag |= NMODIFIED;
7152 nfs_node_unlock(dnp);
7153 /* nfs_getattr() will check changed and purge caches */
7154 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
7155 }
7156
f427ee49 7157 if (!error && fh->fh_len) {
6d2010ae
A
7158 /* create the vnode with the filehandle and attributes */
7159 xid = savedxid;
f427ee49 7160 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &np);
0a7de745 7161 if (!error) {
6d2010ae 7162 newvp = NFSTOV(np);
0a7de745 7163 }
6d2010ae 7164 }
6d2010ae 7165
0a7de745 7166 if (!namedattrs) {
f427ee49 7167 nfs_dulookup_finish(dul, dnp, ctx);
0a7de745 7168 }
6d2010ae 7169
f427ee49
A
7170 NVATTR_CLEANUP(nvattr);
7171 NFS_ZFREE(nfs_fhandle_zone, fh);
7172 NFS_ZFREE(nfs_req_zone, req);
7173 FREE(dul, M_TEMP);
7174 FREE(nvattr, M_TEMP);
7175
6d2010ae
A
7176 /*
7177 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
7178 * if we can succeed in looking up the object.
7179 */
7180 if ((create_error == EEXIST) || (!create_error && !newvp)) {
7181 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
7182 if (!error) {
7183 newvp = NFSTOV(np);
0a7de745 7184 if (vnode_vtype(newvp) != nfstov_type(type, nfsvers)) {
6d2010ae 7185 error = EEXIST;
0a7de745 7186 }
6d2010ae
A
7187 }
7188 }
0a7de745 7189 if (!busyerror) {
6d2010ae 7190 nfs_node_clear_busy(dnp);
0a7de745 7191 }
6d2010ae
A
7192 if (error) {
7193 if (newvp) {
7194 nfs_node_unlock(np);
7195 vnode_put(newvp);
7196 }
7197 } else {
7198 nfs_node_unlock(np);
7199 *npp = np;
7200 }
0a7de745 7201 return error;
6d2010ae
A
7202}
7203
7204int
7205nfs4_vnop_mknod(
7206 struct vnop_mknod_args /* {
0a7de745
A
7207 * struct vnodeop_desc *a_desc;
7208 * vnode_t a_dvp;
7209 * vnode_t *a_vpp;
7210 * struct componentname *a_cnp;
7211 * struct vnode_attr *a_vap;
7212 * vfs_context_t a_context;
7213 * } */*ap)
6d2010ae
A
7214{
7215 nfsnode_t np = NULL;
7216 struct nfsmount *nmp;
7217 int error;
7218
7219 nmp = VTONMP(ap->a_dvp);
0a7de745
A
7220 if (nfs_mount_gone(nmp)) {
7221 return ENXIO;
7222 }
6d2010ae 7223
0a7de745
A
7224 if (!VATTR_IS_ACTIVE(ap->a_vap, va_type)) {
7225 return EINVAL;
7226 }
6d2010ae
A
7227 switch (ap->a_vap->va_type) {
7228 case VBLK:
7229 case VCHR:
7230 case VFIFO:
7231 case VSOCK:
7232 break;
7233 default:
0a7de745 7234 return ENOTSUP;
6d2010ae
A
7235 }
7236
7237 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
0a7de745
A
7238 vtonfs_type(ap->a_vap->va_type, nmp->nm_vers), NULL, &np);
7239 if (!error) {
6d2010ae 7240 *ap->a_vpp = NFSTOV(np);
0a7de745
A
7241 }
7242 return error;
6d2010ae
A
7243}
7244
7245int
7246nfs4_vnop_mkdir(
7247 struct vnop_mkdir_args /* {
0a7de745
A
7248 * struct vnodeop_desc *a_desc;
7249 * vnode_t a_dvp;
7250 * vnode_t *a_vpp;
7251 * struct componentname *a_cnp;
7252 * struct vnode_attr *a_vap;
7253 * vfs_context_t a_context;
7254 * } */*ap)
6d2010ae
A
7255{
7256 nfsnode_t np = NULL;
7257 int error;
7258
7259 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
0a7de745
A
7260 NFDIR, NULL, &np);
7261 if (!error) {
6d2010ae 7262 *ap->a_vpp = NFSTOV(np);
0a7de745
A
7263 }
7264 return error;
6d2010ae
A
7265}
7266
7267int
7268nfs4_vnop_symlink(
7269 struct vnop_symlink_args /* {
0a7de745
A
7270 * struct vnodeop_desc *a_desc;
7271 * vnode_t a_dvp;
7272 * vnode_t *a_vpp;
7273 * struct componentname *a_cnp;
7274 * struct vnode_attr *a_vap;
7275 * char *a_target;
7276 * vfs_context_t a_context;
7277 * } */*ap)
6d2010ae
A
7278{
7279 nfsnode_t np = NULL;
7280 int error;
7281
7282 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
0a7de745
A
7283 NFLNK, ap->a_target, &np);
7284 if (!error) {
6d2010ae 7285 *ap->a_vpp = NFSTOV(np);
0a7de745
A
7286 }
7287 return error;
6d2010ae
A
7288}
7289
7290int
7291nfs4_vnop_link(
7292 struct vnop_link_args /* {
0a7de745
A
7293 * struct vnodeop_desc *a_desc;
7294 * vnode_t a_vp;
7295 * vnode_t a_tdvp;
7296 * struct componentname *a_cnp;
7297 * vfs_context_t a_context;
7298 * } */*ap)
6d2010ae
A
7299{
7300 vfs_context_t ctx = ap->a_context;
7301 vnode_t vp = ap->a_vp;
7302 vnode_t tdvp = ap->a_tdvp;
7303 struct componentname *cnp = ap->a_cnp;
7304 int error = 0, lockerror = ENOENT, status;
7305 struct nfsmount *nmp;
7306 nfsnode_t np = VTONFS(vp);
7307 nfsnode_t tdnp = VTONFS(tdvp);
7308 int nfsvers, numops;
7309 u_int64_t xid, savedxid;
7310 struct nfsm_chain nmreq, nmrep;
7311 struct nfsreq_secinfo_args si;
7312
0a7de745
A
7313 if (vnode_mount(vp) != vnode_mount(tdvp)) {
7314 return EXDEV;
7315 }
6d2010ae
A
7316
7317 nmp = VTONMP(vp);
0a7de745
A
7318 if (nfs_mount_gone(nmp)) {
7319 return ENXIO;
7320 }
6d2010ae 7321 nfsvers = nmp->nm_vers;
0a7de745
A
7322 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7323 return EINVAL;
7324 }
7325 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7326 return EINVAL;
7327 }
6d2010ae
A
7328
7329 /*
7330 * Push all writes to the server, so that the attribute cache
7331 * doesn't get "out of sync" with the server.
7332 * XXX There should be a better way!
7333 */
7334 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
7335
0a7de745
A
7336 if ((error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx)))) {
7337 return error;
7338 }
6d2010ae
A
7339
7340 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7341 nfsm_chain_null(&nmreq);
7342 nfsm_chain_null(&nmrep);
7343
7344 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
7345 numops = 7;
7346 nfsm_chain_build_alloc_init(error, &nmreq, 29 * NFSX_UNSIGNED + cnp->cn_namelen);
3e170ce0 7347 nfsm_chain_add_compound_header(error, &nmreq, "link", nmp->nm_minor_vers, numops);
6d2010ae
A
7348 numops--;
7349 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7350 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
7351 numops--;
7352 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7353 numops--;
7354 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7355 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
7356 numops--;
7357 nfsm_chain_add_32(error, &nmreq, NFS_OP_LINK);
7358 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7359 numops--;
7360 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7361 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
7362 numops--;
7363 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7364 numops--;
7365 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7366 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
7367 nfsm_chain_build_done(error, &nmreq);
7368 nfsm_assert(error, (numops == 0), EPROTO);
7369 nfsmout_if(error);
7370 error = nfs_request(tdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
7371
7372 if ((lockerror = nfs_node_lock2(tdnp, np))) {
7373 error = lockerror;
7374 goto nfsmout;
7375 }
7376 nfsm_chain_skip_tag(error, &nmrep);
7377 nfsm_chain_get_32(error, &nmrep, numops);
7378 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7379 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7380 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7381 nfsm_chain_op_check(error, &nmrep, NFS_OP_LINK);
7382 nfsm_chain_check_change_info(error, &nmrep, tdnp);
7383 /* directory attributes: if we don't get them, make sure to invalidate */
7384 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7385 savedxid = xid;
7386 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
0a7de745 7387 if (error) {
6d2010ae 7388 NATTRINVALIDATE(tdnp);
0a7de745 7389 }
6d2010ae
A
7390 /* link attributes: if we don't get them, make sure to invalidate */
7391 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7392 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7393 xid = savedxid;
7394 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
0a7de745 7395 if (error) {
6d2010ae 7396 NATTRINVALIDATE(np);
0a7de745 7397 }
6d2010ae
A
7398nfsmout:
7399 nfsm_chain_cleanup(&nmreq);
7400 nfsm_chain_cleanup(&nmrep);
0a7de745 7401 if (!lockerror) {
6d2010ae 7402 tdnp->n_flag |= NMODIFIED;
0a7de745 7403 }
6d2010ae 7404 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
0a7de745 7405 if (error == EEXIST) {
6d2010ae 7406 error = 0;
0a7de745 7407 }
6d2010ae
A
7408 if (!error && (tdnp->n_flag & NNEGNCENTRIES)) {
7409 tdnp->n_flag &= ~NNEGNCENTRIES;
7410 cache_purge_negatives(tdvp);
7411 }
0a7de745 7412 if (!lockerror) {
6d2010ae 7413 nfs_node_unlock2(tdnp, np);
0a7de745 7414 }
6d2010ae 7415 nfs_node_clear_busy2(tdnp, np);
0a7de745 7416 return error;
6d2010ae
A
7417}
7418
7419int
7420nfs4_vnop_rmdir(
7421 struct vnop_rmdir_args /* {
0a7de745
A
7422 * struct vnodeop_desc *a_desc;
7423 * vnode_t a_dvp;
7424 * vnode_t a_vp;
7425 * struct componentname *a_cnp;
7426 * vfs_context_t a_context;
7427 * } */*ap)
6d2010ae
A
7428{
7429 vfs_context_t ctx = ap->a_context;
7430 vnode_t vp = ap->a_vp;
7431 vnode_t dvp = ap->a_dvp;
7432 struct componentname *cnp = ap->a_cnp;
7433 struct nfsmount *nmp;
7434 int error = 0, namedattrs;
7435 nfsnode_t np = VTONFS(vp);
7436 nfsnode_t dnp = VTONFS(dvp);
f427ee49 7437 struct nfs_dulookup *dul;
6d2010ae 7438
0a7de745
A
7439 if (vnode_vtype(vp) != VDIR) {
7440 return EINVAL;
7441 }
6d2010ae
A
7442
7443 nmp = NFSTONMP(dnp);
0a7de745
A
7444 if (nfs_mount_gone(nmp)) {
7445 return ENXIO;
7446 }
6d2010ae
A
7447 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
7448
0a7de745
A
7449 if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx)))) {
7450 return error;
7451 }
6d2010ae 7452
f427ee49 7453 MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK);
6d2010ae 7454 if (!namedattrs) {
f427ee49
A
7455 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
7456 nfs_dulookup_start(dul, dnp, ctx);
6d2010ae
A
7457 }
7458
7459 error = nfs4_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
0a7de745 7460 vfs_context_thread(ctx), vfs_context_ucred(ctx));
6d2010ae
A
7461
7462 nfs_name_cache_purge(dnp, np, cnp, ctx);
7463 /* nfs_getattr() will check changed and purge caches */
7464 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
0a7de745 7465 if (!namedattrs) {
f427ee49 7466 nfs_dulookup_finish(dul, dnp, ctx);
0a7de745 7467 }
6d2010ae
A
7468 nfs_node_clear_busy2(dnp, np);
7469
7470 /*
7471 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
7472 */
0a7de745 7473 if (error == ENOENT) {
6d2010ae 7474 error = 0;
0a7de745 7475 }
6d2010ae
A
7476 if (!error) {
7477 /*
7478 * remove nfsnode from hash now so we can't accidentally find it
7479 * again if another object gets created with the same filehandle
7480 * before this vnode gets reclaimed
7481 */
7482 lck_mtx_lock(nfs_node_hash_mutex);
7483 if (np->n_hflag & NHHASHED) {
7484 LIST_REMOVE(np, n_hash);
7485 np->n_hflag &= ~NHHASHED;
7486 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
7487 }
7488 lck_mtx_unlock(nfs_node_hash_mutex);
7489 }
f427ee49 7490 FREE(dul, M_TEMP);
0a7de745 7491 return error;
6d2010ae
A
7492}
7493
7494/*
7495 * NFSv4 Named Attributes
7496 *
7497 * Both the extended attributes interface and the named streams interface
7498 * are backed by NFSv4 named attributes. The implementations for both use
7499 * a common set of routines in an attempt to reduce code duplication, to
7500 * increase efficiency, to increase caching of both names and data, and to
7501 * confine the complexity.
7502 *
7503 * Each NFS node caches its named attribute directory's file handle.
7504 * The directory nodes for the named attribute directories are handled
7505 * exactly like regular directories (with a couple minor exceptions).
7506 * Named attribute nodes are also treated as much like regular files as
7507 * possible.
7508 *
7509 * Most of the heavy lifting is done by nfs4_named_attr_get().
7510 */
7511
7512/*
7513 * Get the given node's attribute directory node.
7514 * If !fetch, then only return a cached node.
7515 * Otherwise, we will attempt to fetch the node from the server.
7516 * (Note: the node should be marked busy.)
b0d623f7 7517 */
6d2010ae
A
7518nfsnode_t
7519nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx)
b0d623f7 7520{
6d2010ae 7521 nfsnode_t adnp = NULL;
b0d623f7 7522 struct nfsmount *nmp;
6d2010ae
A
7523 int error = 0, status, numops;
7524 struct nfsm_chain nmreq, nmrep;
7525 u_int64_t xid;
7526 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
f427ee49
A
7527 fhandle_t *fh;
7528 struct nfs_vattr *nvattr;
6d2010ae 7529 struct componentname cn;
f427ee49 7530 struct nfsreq *req;
6d2010ae 7531 struct nfsreq_secinfo_args si;
b0d623f7 7532
6d2010ae 7533 nmp = NFSTONMP(np);
0a7de745
A
7534 if (nfs_mount_gone(nmp)) {
7535 return NULL;
7536 }
7537 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7538 return NULL;
7539 }
b0d623f7 7540
6d2010ae 7541 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
f427ee49
A
7542 fh = zalloc(nfs_fhandle_zone);
7543 req = zalloc(nfs_req_zone);
7544 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
7545 NVATTR_INIT(nvattr);
6d2010ae
A
7546 nfsm_chain_null(&nmreq);
7547 nfsm_chain_null(&nmrep);
b0d623f7 7548
6d2010ae
A
7549 bzero(&cn, sizeof(cn));
7550 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
f427ee49 7551 cn.cn_namelen = NFS_STRLEN_INT(_PATH_FORKSPECIFIER);
6d2010ae
A
7552 cn.cn_nameiop = LOOKUP;
7553
7554 if (np->n_attrdirfh) {
7555 // XXX can't set parent correctly (to np) yet
0a7de745
A
7556 error = nfs_nget(nmp->nm_mountp, NULL, &cn, np->n_attrdirfh + 1, *np->n_attrdirfh,
7557 NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &adnp);
7558 if (adnp) {
6d2010ae 7559 goto nfsmout;
0a7de745 7560 }
6d2010ae
A
7561 }
7562 if (!fetch) {
7563 error = ENOENT;
7564 goto nfsmout;
2d21ac55
A
7565 }
7566
6d2010ae
A
7567 // PUTFH, OPENATTR, GETATTR
7568 numops = 3;
7569 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
3e170ce0 7570 nfsm_chain_add_compound_header(error, &nmreq, "openattr", nmp->nm_minor_vers, numops);
6d2010ae
A
7571 numops--;
7572 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7573 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7574 numops--;
7575 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7576 nfsm_chain_add_32(error, &nmreq, 0);
7577 numops--;
7578 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7579 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7580 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7581 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
0a7de745 7582 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6d2010ae
A
7583 nfsm_chain_build_done(error, &nmreq);
7584 nfsm_assert(error, (numops == 0), EPROTO);
7585 nfsmout_if(error);
7586 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745
A
7587 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
7588 if (!error) {
6d2010ae 7589 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
0a7de745 7590 }
b0d623f7 7591
6d2010ae
A
7592 nfsm_chain_skip_tag(error, &nmrep);
7593 nfsm_chain_get_32(error, &nmrep, numops);
7594 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7595 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7596 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7597 nfsmout_if(error);
f427ee49 7598 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
6d2010ae 7599 nfsmout_if(error);
f427ee49 7600 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh->fh_len) {
6d2010ae
A
7601 error = ENOENT;
7602 goto nfsmout;
2d21ac55 7603 }
f427ee49 7604 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh->fh_len)) {
6d2010ae 7605 /* (re)allocate attrdir fh buffer */
0a7de745 7606 if (np->n_attrdirfh) {
6d2010ae 7607 FREE(np->n_attrdirfh, M_TEMP);
0a7de745 7608 }
f427ee49 7609 MALLOC(np->n_attrdirfh, u_char*, fh->fh_len + 1, M_TEMP, M_WAITOK);
2d21ac55 7610 }
6d2010ae
A
7611 if (!np->n_attrdirfh) {
7612 error = ENOMEM;
7613 goto nfsmout;
b0d623f7 7614 }
6d2010ae 7615 /* cache the attrdir fh in the node */
f427ee49
A
7616 *np->n_attrdirfh = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */
7617 bcopy(fh->fh_data, np->n_attrdirfh + 1, fh->fh_len);
6d2010ae
A
7618 /* create node for attrdir */
7619 // XXX can't set parent correctly (to np) yet
f427ee49 7620 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, 0, &adnp);
6d2010ae 7621nfsmout:
f427ee49
A
7622 NVATTR_CLEANUP(nvattr);
7623 NFS_ZFREE(nfs_fhandle_zone, fh);
7624 NFS_ZFREE(nfs_req_zone, req);
7625 FREE(nvattr, M_TEMP);
6d2010ae
A
7626 nfsm_chain_cleanup(&nmreq);
7627 nfsm_chain_cleanup(&nmrep);
2d21ac55 7628
6d2010ae
A
7629 if (adnp) {
7630 /* sanity check that this node is an attribute directory */
0a7de745 7631 if (adnp->n_vattr.nva_type != VDIR) {
6d2010ae 7632 error = EINVAL;
0a7de745
A
7633 }
7634 if (!(adnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 7635 error = EINVAL;
0a7de745 7636 }
6d2010ae 7637 nfs_node_unlock(adnp);
0a7de745 7638 if (error) {
6d2010ae 7639 vnode_put(NFSTOV(adnp));
0a7de745 7640 }
b0d623f7 7641 }
0a7de745 7642 return error ? NULL : adnp;
b0d623f7
A
7643}
7644
2d21ac55 7645/*
6d2010ae
A
7646 * Get the given node's named attribute node for the name given.
7647 *
7648 * In an effort to increase the performance of named attribute access, we try
7649 * to reduce server requests by doing the following:
7650 *
7651 * - cache the node's named attribute directory file handle in the node
7652 * - maintain a directory vnode for the attribute directory
7653 * - use name cache entries (positive and negative) to speed up lookups
7654 * - optionally open the named attribute (with the given accessMode) in the same RPC
7655 * - combine attribute directory retrieval with the lookup/open RPC
7656 * - optionally prefetch the named attribute's first block of data in the same RPC
7657 *
7658 * Also, in an attempt to reduce the number of copies/variations of this code,
7659 * parts of the RPC building/processing code are conditionalized on what is
7660 * needed for any particular request (openattr, lookup vs. open, read).
7661 *
7662 * Note that because we may not have the attribute directory node when we start
7663 * the lookup/open, we lock both the node and the attribute directory node.
2d21ac55 7664 */
6d2010ae 7665
0a7de745
A
7666#define NFS_GET_NAMED_ATTR_CREATE 0x1
7667#define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
7668#define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
7669#define NFS_GET_NAMED_ATTR_PREFETCH 0x8
6d2010ae 7670
b0d623f7 7671int
6d2010ae
A
7672nfs4_named_attr_get(
7673 nfsnode_t np,
2d21ac55 7674 struct componentname *cnp,
6d2010ae
A
7675 uint32_t accessMode,
7676 int flags,
7677 vfs_context_t ctx,
7678 nfsnode_t *anpp,
7679 struct nfs_open_file **nofpp)
2d21ac55
A
7680{
7681 struct nfsmount *nmp;
6d2010ae
A
7682 int error = 0, open_error = EIO;
7683 int inuse = 0, adlockerror = ENOENT, busyerror = ENOENT, adbusyerror = ENOENT, nofpbusyerror = ENOENT;
7684 int create, guarded, prefetch, truncate, noopbusy = 0;
7685 int open, status, numops, hadattrdir, negnamecache;
f427ee49 7686 struct nfs_vattr *nvattr;
6d2010ae
A
7687 struct vnode_attr vattr;
7688 nfsnode_t adnp = NULL, anp = NULL;
7689 vnode_t avp = NULL;
f427ee49 7690 u_int64_t xid = 0, savedxid = 0;
2d21ac55
A
7691 struct nfsm_chain nmreq, nmrep;
7692 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
f427ee49 7693 uint32_t denyMode = 0, rflags, delegation, recall, eof, rlen, retlen;
6d2010ae 7694 nfs_stateid stateid, dstateid;
f427ee49 7695 fhandle_t *fh;
6d2010ae
A
7696 struct nfs_open_owner *noop = NULL;
7697 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
7698 struct vnop_access_args naa;
7699 thread_t thd;
7700 kauth_cred_t cred;
7701 struct timeval now;
7702 char sbuf[64], *s;
7703 uint32_t ace_type, ace_flags, ace_mask, len, slen;
7704 struct kauth_ace ace;
f427ee49 7705 struct nfsreq *req;
6d2010ae
A
7706 struct nfsreq_secinfo_args si;
7707
7708 *anpp = NULL;
6d2010ae
A
7709 rflags = delegation = recall = eof = rlen = retlen = 0;
7710 ace.ace_flags = 0;
7711 s = sbuf;
7712 slen = sizeof(sbuf);
2d21ac55 7713
6d2010ae 7714 nmp = NFSTONMP(np);
0a7de745
A
7715 if (nfs_mount_gone(nmp)) {
7716 return ENXIO;
7717 }
f427ee49
A
7718 fh = zalloc(nfs_fhandle_zone);
7719 req = zalloc(nfs_req_zone);
7720 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
7721 NVATTR_INIT(nvattr);
7722 fh->fh_len = 0;
7723 bzero(&dstateid, sizeof(dstateid));
6d2010ae
A
7724 negnamecache = !NMFLAG(nmp, NONEGNAMECACHE);
7725 thd = vfs_context_thread(ctx);
7726 cred = vfs_context_ucred(ctx);
7727 create = (flags & NFS_GET_NAMED_ATTR_CREATE) ? NFS_OPEN_CREATE : NFS_OPEN_NOCREATE;
7728 guarded = (flags & NFS_GET_NAMED_ATTR_CREATE_GUARDED) ? NFS_CREATE_GUARDED : NFS_CREATE_UNCHECKED;
7729 truncate = (flags & NFS_GET_NAMED_ATTR_TRUNCATE);
7730 prefetch = (flags & NFS_GET_NAMED_ATTR_PREFETCH);
7731
7732 if (!create) {
f427ee49 7733 error = nfs_getattr(np, nvattr, ctx, NGA_CACHED);
0a7de745 7734 if (error) {
f427ee49 7735 goto out_free;
0a7de745 7736 }
f427ee49
A
7737 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
7738 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
7739 error = ENOATTR;
7740 goto out_free;
0a7de745 7741 }
6d2010ae
A
7742 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_NONE) {
7743 /* shouldn't happen... but just be safe */
7744 printf("nfs4_named_attr_get: create with no access %s\n", cnp->cn_nameptr);
7745 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
7746 }
7747 open = (accessMode != NFS_OPEN_SHARE_ACCESS_NONE);
7748 if (open) {
7749 /*
7750 * We're trying to open the file.
7751 * We'll create/open it with the given access mode,
7752 * and set NFS_OPEN_FILE_CREATE.
7753 */
7754 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 7755 if (prefetch && guarded) {
6d2010ae 7756 prefetch = 0; /* no sense prefetching data that can't be there */
0a7de745 7757 }
6d2010ae 7758 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
0a7de745 7759 if (!noop) {
f427ee49
A
7760 error = ENOMEM;
7761 goto out_free;
0a7de745 7762 }
2d21ac55
A
7763 }
7764
0a7de745 7765 if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
f427ee49 7766 goto out_free;
0a7de745 7767 }
2d21ac55 7768
6d2010ae
A
7769 adnp = nfs4_named_attr_dir_get(np, 0, ctx);
7770 hadattrdir = (adnp != NULL);
7771 if (prefetch) {
7772 microuptime(&now);
7773 /* use the special state ID because we don't have a real one to send */
7774 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
7775 rlen = MIN(nmp->nm_rsize, nmp->nm_biosize);
7776 }
7777 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
7778 nfsm_chain_null(&nmreq);
7779 nfsm_chain_null(&nmrep);
7780
6d2010ae 7781 if (hadattrdir) {
0a7de745 7782 if ((error = adbusyerror = nfs_node_set_busy(adnp, vfs_context_thread(ctx)))) {
6d2010ae 7783 goto nfsmout;
0a7de745 7784 }
6d2010ae
A
7785 /* nfs_getattr() will check changed and purge caches */
7786 error = nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7787 nfsmout_if(error);
7788 error = cache_lookup(NFSTOV(adnp), &avp, cnp);
7789 switch (error) {
7790 case ENOENT:
7791 /* negative cache entry */
7792 goto nfsmout;
7793 case 0:
7794 /* cache miss */
7795 /* try dir buf cache lookup */
f427ee49 7796 error = nfs_dir_buf_cache_lookup(adnp, &anp, cnp, ctx, 0, NULL);
6d2010ae
A
7797 if (!error && anp) {
7798 /* dir buf cache hit */
7799 *anpp = anp;
7800 error = -1;
7801 }
0a7de745 7802 if (error != -1) { /* cache miss */
6d2010ae 7803 break;
0a7de745 7804 }
f427ee49 7805 OS_FALLTHROUGH;
6d2010ae
A
7806 case -1:
7807 /* cache hit, not really an error */
316670eb 7808 OSAddAtomic64(1, &nfsstats.lookupcache_hits);
0a7de745 7809 if (!anp && avp) {
6d2010ae 7810 *anpp = anp = VTONFS(avp);
0a7de745 7811 }
6d2010ae
A
7812
7813 nfs_node_clear_busy(adnp);
7814 adbusyerror = ENOENT;
7815
7816 /* check for directory access */
7817 naa.a_desc = &vnop_access_desc;
7818 naa.a_vp = NFSTOV(adnp);
7819 naa.a_action = KAUTH_VNODE_SEARCH;
7820 naa.a_context = ctx;
7821
7822 /* compute actual success/failure based on accessibility */
7823 error = nfs_vnop_access(&naa);
f427ee49 7824 OS_FALLTHROUGH;
6d2010ae
A
7825 default:
7826 /* we either found it, or hit an error */
7827 if (!error && guarded) {
7828 /* found cached entry but told not to use it */
7829 error = EEXIST;
7830 vnode_put(NFSTOV(anp));
7831 *anpp = anp = NULL;
7832 }
7833 /* we're done if error or we don't need to open */
0a7de745 7834 if (error || !open) {
6d2010ae 7835 goto nfsmout;
0a7de745 7836 }
6d2010ae
A
7837 /* no error and we need to open... */
7838 }
7839 }
7840
7841 if (open) {
7842restart:
7843 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
7844 if (error) {
7845 nfs_open_owner_rele(noop);
7846 noop = NULL;
7847 goto nfsmout;
7848 }
7849 inuse = 1;
7850
7851 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7852 error = nfs_open_file_find(anp, noop, &newnofp, 0, 0, 1);
7853 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
7854 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop->noo_cred), cnp->cn_nameptr);
7855 error = EIO;
7856 }
7857 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6d2010ae
A
7858 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
7859 nfs_open_file_destroy(newnofp);
7860 newnofp = NULL;
0a7de745 7861 if (!error) {
f427ee49
A
7862 nfs_mount_state_in_use_end(nmp, 0);
7863 inuse = 0;
6d2010ae 7864 goto restart;
0a7de745 7865 }
6d2010ae 7866 }
0a7de745 7867 if (!error) {
6d2010ae 7868 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
0a7de745 7869 }
6d2010ae 7870 if (error) {
0a7de745 7871 if (newnofp) {
6d2010ae 7872 nfs_open_file_destroy(newnofp);
0a7de745 7873 }
6d2010ae
A
7874 newnofp = NULL;
7875 goto nfsmout;
7876 }
7877 if (anp) {
7878 /*
7879 * We already have the node. So we just need to open
7880 * it - which we may be able to do with a delegation.
7881 */
7882 open_error = error = nfs4_open(anp, newnofp, accessMode, denyMode, ctx);
7883 if (!error) {
7884 /* open succeeded, so our open file is no longer temporary */
7885 nofp = newnofp;
7886 nofpbusyerror = 0;
7887 newnofp = NULL;
0a7de745 7888 if (nofpp) {
6d2010ae 7889 *nofpp = nofp;
0a7de745 7890 }
6d2010ae
A
7891 }
7892 goto nfsmout;
7893 }
7894 }
7895
7896 /*
7897 * We either don't have the attrdir or we didn't find the attribute
7898 * in the name cache, so we need to talk to the server.
7899 *
7900 * If we don't have the attrdir, we'll need to ask the server for that too.
7901 * If the caller is requesting that the attribute be created, we need to
7902 * make sure the attrdir is created.
7903 * The caller may also request that the first block of an existing attribute
7904 * be retrieved at the same time.
7905 */
7906
7907 if (open) {
7908 /* need to mark the open owner busy during the RPC */
0a7de745 7909 if ((error = nfs_open_owner_set_busy(noop, thd))) {
6d2010ae 7910 goto nfsmout;
0a7de745 7911 }
6d2010ae
A
7912 noopbusy = 1;
7913 }
7914
7915 /*
7916 * We'd like to get updated post-open/lookup attributes for the
7917 * directory and we may also want to prefetch some data via READ.
7918 * We'd like the READ results to be last so that we can leave the
7919 * data in the mbufs until the end.
7920 *
7921 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7922 */
7923 numops = 5;
0a7de745
A
7924 if (!hadattrdir) {
7925 numops += 3; // also sending: OPENATTR, GETATTR, OPENATTR
7926 }
7927 if (prefetch) {
7928 numops += 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7929 }
6d2010ae 7930 nfsm_chain_build_alloc_init(error, &nmreq, 64 * NFSX_UNSIGNED + cnp->cn_namelen);
3e170ce0 7931 nfsm_chain_add_compound_header(error, &nmreq, "getnamedattr", nmp->nm_minor_vers, numops);
6d2010ae
A
7932 if (hadattrdir) {
7933 numops--;
7934 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7935 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7936 } else {
7937 numops--;
7938 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7939 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7940 numops--;
7941 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7942 nfsm_chain_add_32(error, &nmreq, create ? 1 : 0);
7943 numops--;
7944 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7945 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7946 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7947 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
0a7de745 7948 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6d2010ae
A
7949 }
7950 if (open) {
7951 numops--;
7952 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
7953 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
7954 nfsm_chain_add_32(error, &nmreq, accessMode);
7955 nfsm_chain_add_32(error, &nmreq, denyMode);
7956 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
7957 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
7958 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
7959 nfsm_chain_add_32(error, &nmreq, create);
7960 if (create) {
7961 nfsm_chain_add_32(error, &nmreq, guarded);
7962 VATTR_INIT(&vattr);
0a7de745 7963 if (truncate) {
6d2010ae 7964 VATTR_SET(&vattr, va_data_size, 0);
0a7de745 7965 }
6d2010ae
A
7966 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7967 }
7968 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
7969 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7970 } else {
7971 numops--;
7972 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
7973 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
2d21ac55 7974 }
2d21ac55
A
7975 numops--;
7976 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7977 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7978 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7979 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
0a7de745 7980 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6d2010ae
A
7981 if (prefetch) {
7982 numops--;
7983 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7984 }
7985 if (hadattrdir) {
7986 numops--;
7987 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7988 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7989 } else {
7990 numops--;
7991 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7992 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7993 numops--;
7994 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7995 nfsm_chain_add_32(error, &nmreq, 0);
7996 }
2d21ac55
A
7997 numops--;
7998 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7999 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
0a7de745 8000 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6d2010ae
A
8001 if (prefetch) {
8002 numops--;
8003 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
8004 numops--;
8005 nfsm_chain_add_32(error, &nmreq, NFS_OP_NVERIFY);
8006 VATTR_INIT(&vattr);
8007 VATTR_SET(&vattr, va_data_size, 0);
8008 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
8009 numops--;
8010 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
8011 nfsm_chain_add_stateid(error, &nmreq, &stateid);
8012 nfsm_chain_add_64(error, &nmreq, 0);
8013 nfsm_chain_add_32(error, &nmreq, rlen);
8014 }
2d21ac55
A
8015 nfsm_chain_build_done(error, &nmreq);
8016 nfsm_assert(error, (numops == 0), EPROTO);
8017 nfsmout_if(error);
6d2010ae 8018 error = nfs_request_async(hadattrdir ? adnp : np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745
A
8019 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, open ? R_NOINTR: 0, NULL, &req);
8020 if (!error) {
2d21ac55 8021 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
0a7de745 8022 }
2d21ac55 8023
0a7de745 8024 if (hadattrdir && ((adlockerror = nfs_node_lock(adnp)))) {
6d2010ae 8025 error = adlockerror;
0a7de745 8026 }
6d2010ae 8027 savedxid = xid;
2d21ac55
A
8028 nfsm_chain_skip_tag(error, &nmrep);
8029 nfsm_chain_get_32(error, &nmrep, numops);
8030 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6d2010ae
A
8031 if (!hadattrdir) {
8032 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
8033 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8034 nfsmout_if(error);
f427ee49 8035 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
6d2010ae 8036 nfsmout_if(error);
f427ee49
A
8037 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE) && fh->fh_len) {
8038 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh->fh_len)) {
6d2010ae 8039 /* (re)allocate attrdir fh buffer */
0a7de745 8040 if (np->n_attrdirfh) {
6d2010ae 8041 FREE(np->n_attrdirfh, M_TEMP);
0a7de745 8042 }
f427ee49 8043 MALLOC(np->n_attrdirfh, u_char*, fh->fh_len + 1, M_TEMP, M_WAITOK);
6d2010ae
A
8044 }
8045 if (np->n_attrdirfh) {
8046 /* remember the attrdir fh in the node */
f427ee49
A
8047 *np->n_attrdirfh = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */
8048 bcopy(fh->fh_data, np->n_attrdirfh + 1, fh->fh_len);
6d2010ae
A
8049 /* create busied node for attrdir */
8050 struct componentname cn;
8051 bzero(&cn, sizeof(cn));
8052 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
f427ee49 8053 cn.cn_namelen = NFS_STRLEN_INT(_PATH_FORKSPECIFIER);
6d2010ae
A
8054 cn.cn_nameiop = LOOKUP;
8055 // XXX can't set parent correctly (to np) yet
f427ee49 8056 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, 0, &adnp);
6d2010ae
A
8057 if (!error) {
8058 adlockerror = 0;
8059 /* set the node busy */
8060 SET(adnp->n_flag, NBUSY);
8061 adbusyerror = 0;
8062 }
8063 /* if no adnp, oh well... */
8064 error = 0;
8065 }
8066 }
f427ee49
A
8067 NVATTR_CLEANUP(nvattr);
8068 fh->fh_len = 0;
6d2010ae
A
8069 }
8070 if (open) {
8071 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
8072 nfs_owner_seqid_increment(noop, NULL, error);
8073 nfsm_chain_get_stateid(error, &nmrep, &newnofp->nof_stateid);
8074 nfsm_chain_check_change_info(error, &nmrep, adnp);
8075 nfsm_chain_get_32(error, &nmrep, rflags);
8076 bmlen = NFS_ATTR_BITMAP_LEN;
8077 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
8078 nfsm_chain_get_32(error, &nmrep, delegation);
0a7de745 8079 if (!error) {
6d2010ae
A
8080 switch (delegation) {
8081 case NFS_OPEN_DELEGATE_NONE:
8082 break;
8083 case NFS_OPEN_DELEGATE_READ:
8084 case NFS_OPEN_DELEGATE_WRITE:
8085 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
8086 nfsm_chain_get_32(error, &nmrep, recall);
0a7de745 8087 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
6d2010ae 8088 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
0a7de745 8089 }
6d2010ae
A
8090 /* if we have any trouble accepting the ACE, just invalidate it */
8091 ace_type = ace_flags = ace_mask = len = 0;
8092 nfsm_chain_get_32(error, &nmrep, ace_type);
8093 nfsm_chain_get_32(error, &nmrep, ace_flags);
8094 nfsm_chain_get_32(error, &nmrep, ace_mask);
8095 nfsm_chain_get_32(error, &nmrep, len);
8096 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
8097 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
8098 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
8099 if (!error && (len >= slen)) {
0a7de745
A
8100 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
8101 if (s) {
8102 slen = len + 1;
8103 } else {
6d2010ae 8104 ace.ace_flags = 0;
0a7de745 8105 }
6d2010ae 8106 }
0a7de745 8107 if (s) {
6d2010ae 8108 nfsm_chain_get_opaque(error, &nmrep, len, s);
0a7de745 8109 } else {
6d2010ae 8110 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
0a7de745 8111 }
6d2010ae
A
8112 if (!error && s) {
8113 s[len] = '\0';
0a7de745 8114 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
6d2010ae 8115 ace.ace_flags = 0;
0a7de745 8116 }
6d2010ae 8117 }
0a7de745 8118 if (error || !s) {
6d2010ae 8119 ace.ace_flags = 0;
0a7de745
A
8120 }
8121 if (s && (s != sbuf)) {
6d2010ae 8122 FREE(s, M_TEMP);
0a7de745 8123 }
6d2010ae
A
8124 break;
8125 default:
8126 error = EBADRPC;
8127 break;
8128 }
0a7de745 8129 }
6d2010ae
A
8130 /* At this point if we have no error, the object was created/opened. */
8131 open_error = error;
8132 } else {
8133 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOOKUP);
8134 }
2d21ac55
A
8135 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8136 nfsmout_if(error);
f427ee49 8137 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
2d21ac55 8138 nfsmout_if(error);
f427ee49 8139 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh->fh_len) {
6d2010ae 8140 error = EIO;
2d21ac55
A
8141 goto nfsmout;
8142 }
0a7de745 8143 if (prefetch) {
6d2010ae 8144 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
0a7de745 8145 }
6d2010ae 8146 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
0a7de745 8147 if (!hadattrdir) {
6d2010ae 8148 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
0a7de745 8149 }
2d21ac55 8150 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae
A
8151 nfsmout_if(error);
8152 xid = savedxid;
8153 nfsm_chain_loadattr(error, &nmrep, adnp, nmp->nm_vers, &xid);
8154 nfsmout_if(error);
2d21ac55 8155
6d2010ae 8156 if (open) {
0a7de745 8157 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
6d2010ae 8158 newnofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 8159 }
6d2010ae
A
8160 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
8161 if (adnp) {
8162 nfs_node_unlock(adnp);
8163 adlockerror = ENOENT;
8164 }
f427ee49
A
8165 NVATTR_CLEANUP(nvattr);
8166 error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh->fh_data, fh->fh_len, noop, &newnofp->nof_stateid, thd, cred, nvattr, &xid);
6d2010ae
A
8167 nfsmout_if(error);
8168 savedxid = xid;
0a7de745 8169 if ((adlockerror = nfs_node_lock(adnp))) {
6d2010ae 8170 error = adlockerror;
0a7de745 8171 }
2d21ac55 8172 }
2d21ac55
A
8173 }
8174
6d2010ae
A
8175nfsmout:
8176 if (open && adnp && !adlockerror) {
8177 if (!open_error && (adnp->n_flag & NNEGNCENTRIES)) {
8178 adnp->n_flag &= ~NNEGNCENTRIES;
8179 cache_purge_negatives(NFSTOV(adnp));
8180 }
8181 adnp->n_flag |= NMODIFIED;
8182 nfs_node_unlock(adnp);
8183 adlockerror = ENOENT;
8184 nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
8185 }
8186 if (adnp && !adlockerror && (error == ENOENT) &&
8187 (cnp->cn_flags & MAKEENTRY) && (cnp->cn_nameiop != CREATE) && negnamecache) {
8188 /* add a negative entry in the name cache */
8189 cache_enter(NFSTOV(adnp), NULL, cnp);
8190 adnp->n_flag |= NNEGNCENTRIES;
8191 }
8192 if (adnp && !adlockerror) {
8193 nfs_node_unlock(adnp);
8194 adlockerror = ENOENT;
8195 }
f427ee49 8196 if (!error && !anp && fh->fh_len) {
2d21ac55
A
8197 /* create the vnode with the filehandle and attributes */
8198 xid = savedxid;
f427ee49 8199 error = nfs_nget(NFSTOMP(np), adnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &anp);
6d2010ae
A
8200 if (!error) {
8201 *anpp = anp;
8202 nfs_node_unlock(anp);
8203 }
8204 if (!error && open) {
8205 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
8206 /* After we have a node, add our open file struct to the node */
8207 nofp = newnofp;
8208 error = nfs_open_file_find_internal(anp, noop, &nofp, 0, 0, 0);
8209 if (error) {
8210 /* This shouldn't happen, because we passed in a new nofp to use. */
8211 printf("nfs_open_file_find_internal failed! %d\n", error);
8212 nofp = NULL;
8213 } else if (nofp != newnofp) {
8214 /*
8215 * Hmm... an open file struct already exists.
8216 * Mark the existing one busy and merge our open into it.
8217 * Then destroy the one we created.
8218 * Note: there's no chance of an open confict because the
8219 * open has already been granted.
8220 */
8221 nofpbusyerror = nfs_open_file_set_busy(nofp, NULL);
8222 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
8223 nofp->nof_stateid = newnofp->nof_stateid;
0a7de745 8224 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
6d2010ae 8225 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 8226 }
6d2010ae
A
8227 nfs_open_file_clear_busy(newnofp);
8228 nfs_open_file_destroy(newnofp);
8229 newnofp = NULL;
8230 }
8231 if (!error) {
8232 newnofp = NULL;
8233 nofpbusyerror = 0;
8234 /* mark the node as holding a create-initiated open */
8235 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
8236 nofp->nof_creator = current_thread();
0a7de745 8237 if (nofpp) {
6d2010ae 8238 *nofpp = nofp;
0a7de745 8239 }
6d2010ae
A
8240 }
8241 }
2d21ac55 8242 }
f427ee49 8243 NVATTR_CLEANUP(nvattr);
6d2010ae
A
8244 if (open && ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE))) {
8245 if (!error && anp && !recall) {
8246 /* stuff the delegation state in the node */
8247 lck_mtx_lock(&anp->n_openlock);
8248 anp->n_openflags &= ~N_DELEG_MASK;
8249 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8250 anp->n_dstateid = dstateid;
8251 anp->n_dace = ace;
8252 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8253 lck_mtx_lock(&nmp->nm_lock);
0a7de745 8254 if (anp->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 8255 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
0a7de745 8256 }
6d2010ae
A
8257 lck_mtx_unlock(&nmp->nm_lock);
8258 }
8259 lck_mtx_unlock(&anp->n_openlock);
8260 } else {
8261 /* give the delegation back */
8262 if (anp) {
f427ee49 8263 if (NFS_CMPFH(anp, fh->fh_data, fh->fh_len)) {
6d2010ae
A
8264 /* update delegation state and return it */
8265 lck_mtx_lock(&anp->n_openlock);
8266 anp->n_openflags &= ~N_DELEG_MASK;
8267 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8268 anp->n_dstateid = dstateid;
8269 anp->n_dace = ace;
8270 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8271 lck_mtx_lock(&nmp->nm_lock);
0a7de745 8272 if (anp->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 8273 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
0a7de745 8274 }
6d2010ae
A
8275 lck_mtx_unlock(&nmp->nm_lock);
8276 }
8277 lck_mtx_unlock(&anp->n_openlock);
8278 /* don't need to send a separate delegreturn for fh */
f427ee49 8279 fh->fh_len = 0;
6d2010ae
A
8280 }
8281 /* return anp's current delegation */
8282 nfs4_delegation_return(anp, 0, thd, cred);
8283 }
f427ee49
A
8284 if (fh->fh_len) { /* return fh's delegation if it wasn't for anp */
8285 nfs4_delegreturn_rpc(nmp, fh->fh_data, fh->fh_len, &dstateid, 0, thd, cred);
0a7de745 8286 }
6d2010ae
A
8287 }
8288 }
8289 if (open) {
8290 if (newnofp) {
8291 /* need to cleanup our temporary nofp */
8292 nfs_open_file_clear_busy(newnofp);
8293 nfs_open_file_destroy(newnofp);
8294 newnofp = NULL;
8295 } else if (nofp && !nofpbusyerror) {
8296 nfs_open_file_clear_busy(nofp);
8297 nofpbusyerror = ENOENT;
8298 }
8299 if (inuse && nfs_mount_state_in_use_end(nmp, error)) {
8300 inuse = 0;
8301 nofp = newnofp = NULL;
8302 rflags = delegation = recall = eof = rlen = retlen = 0;
8303 ace.ace_flags = 0;
8304 s = sbuf;
8305 slen = sizeof(sbuf);
8306 nfsm_chain_cleanup(&nmreq);
8307 nfsm_chain_cleanup(&nmrep);
8308 if (anp) {
8309 vnode_put(NFSTOV(anp));
8310 *anpp = anp = NULL;
8311 }
8312 hadattrdir = (adnp != NULL);
8313 if (noopbusy) {
8314 nfs_open_owner_clear_busy(noop);
8315 noopbusy = 0;
8316 }
8317 goto restart;
8318 }
f427ee49 8319 inuse = 0;
6d2010ae
A
8320 if (noop) {
8321 if (noopbusy) {
8322 nfs_open_owner_clear_busy(noop);
8323 noopbusy = 0;
8324 }
8325 nfs_open_owner_rele(noop);
8326 }
8327 }
8328 if (!error && prefetch && nmrep.nmc_mhead) {
8329 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
8330 nfsm_chain_op_check(error, &nmrep, NFS_OP_NVERIFY);
8331 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
8332 nfsm_chain_get_32(error, &nmrep, eof);
8333 nfsm_chain_get_32(error, &nmrep, retlen);
8334 if (!error && anp) {
8335 /*
8336 * There can be one problem with doing the prefetch.
8337 * Because we don't have the node before we start the RPC, we
8338 * can't have the buffer busy while the READ is performed.
8339 * So there is a chance that other I/O occured on the same
8340 * range of data while we were performing this RPC. If that
8341 * happens, then it's possible the data we have in the READ
8342 * response is no longer up to date.
8343 * Once we have the node and the buffer, we need to make sure
8344 * that there's no chance we could be putting stale data in
8345 * the buffer.
8346 * So, we check if the range read is dirty or if any I/O may
8347 * have occured on it while we were performing our RPC.
8348 */
8349 struct nfsbuf *bp = NULL;
8350 int lastpg;
f427ee49 8351 nfsbufpgs pagemask, pagemaskand;
6d2010ae
A
8352
8353 retlen = MIN(retlen, rlen);
8354
8355 /* check if node needs size update or invalidation */
0a7de745 8356 if (ISSET(anp->n_flag, NUPDATESIZE)) {
6d2010ae 8357 nfs_data_update_size(anp, 0);
0a7de745 8358 }
6d2010ae
A
8359 if (!(error = nfs_node_lock(anp))) {
8360 if (anp->n_flag & NNEEDINVALIDATE) {
8361 anp->n_flag &= ~NNEEDINVALIDATE;
8362 nfs_node_unlock(anp);
0a7de745
A
8363 error = nfs_vinvalbuf(NFSTOV(anp), V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
8364 if (!error) { /* lets play it safe and just drop the data */
6d2010ae 8365 error = EIO;
0a7de745 8366 }
6d2010ae
A
8367 } else {
8368 nfs_node_unlock(anp);
8369 }
8370 }
2d21ac55 8371
6d2010ae 8372 /* calculate page mask for the range of data read */
f427ee49
A
8373 lastpg = (retlen - 1) / PAGE_SIZE;
8374 nfs_buf_pgs_get_page_mask(&pagemask, lastpg + 1);
6d2010ae 8375
0a7de745
A
8376 if (!error) {
8377 error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ | NBLK_NOWAIT, &bp);
8378 }
6d2010ae 8379 /* don't save the data if dirty or potential I/O conflict */
f427ee49
A
8380 nfs_buf_pgs_bit_and(&bp->nb_dirty, &pagemask, &pagemaskand);
8381 if (!error && bp && !bp->nb_dirtyoff && !nfs_buf_pgs_is_set(&pagemaskand) &&
6d2010ae 8382 timevalcmp(&anp->n_lastio, &now, <)) {
316670eb 8383 OSAddAtomic64(1, &nfsstats.read_bios);
0a7de745 8384 CLR(bp->nb_flags, (NB_DONE | NB_ASYNC));
6d2010ae
A
8385 SET(bp->nb_flags, NB_READ);
8386 NFS_BUF_MAP(bp);
8387 nfsm_chain_get_opaque(error, &nmrep, retlen, bp->nb_data);
8388 if (error) {
8389 bp->nb_error = error;
8390 SET(bp->nb_flags, NB_ERROR);
8391 } else {
8392 bp->nb_offio = 0;
8393 bp->nb_endio = rlen;
0a7de745 8394 if ((retlen > 0) && (bp->nb_endio < (int)retlen)) {
6d2010ae 8395 bp->nb_endio = retlen;
0a7de745 8396 }
6d2010ae
A
8397 if (eof || (retlen == 0)) {
8398 /* zero out the remaining data (up to EOF) */
8399 off_t rpcrem, eofrem, rem;
8400 rpcrem = (rlen - retlen);
8401 eofrem = anp->n_size - (NBOFF(bp) + retlen);
8402 rem = (rpcrem < eofrem) ? rpcrem : eofrem;
0a7de745 8403 if (rem > 0) {
6d2010ae 8404 bzero(bp->nb_data + retlen, rem);
0a7de745 8405 }
6d2010ae
A
8406 } else if ((retlen < rlen) && !ISSET(bp->nb_flags, NB_ERROR)) {
8407 /* ugh... short read ... just invalidate for now... */
8408 SET(bp->nb_flags, NB_INVAL);
8409 }
8410 }
8411 nfs_buf_read_finish(bp);
8412 microuptime(&anp->n_lastio);
8413 }
0a7de745 8414 if (bp) {
6d2010ae 8415 nfs_buf_release(bp, 1);
0a7de745 8416 }
2d21ac55 8417 }
6d2010ae 8418 error = 0; /* ignore any transient error in processing the prefetch */
2d21ac55 8419 }
6d2010ae
A
8420 if (adnp && !adbusyerror) {
8421 nfs_node_clear_busy(adnp);
8422 adbusyerror = ENOENT;
8423 }
8424 if (!busyerror) {
8425 nfs_node_clear_busy(np);
8426 busyerror = ENOENT;
8427 }
0a7de745 8428 if (adnp) {
6d2010ae 8429 vnode_put(NFSTOV(adnp));
0a7de745 8430 }
f427ee49
A
8431 if (inuse) {
8432 nfs_mount_state_in_use_end(nmp, error);
8433 }
6d2010ae
A
8434 if (error && *anpp) {
8435 vnode_put(NFSTOV(*anpp));
8436 *anpp = NULL;
8437 }
8438 nfsm_chain_cleanup(&nmreq);
8439 nfsm_chain_cleanup(&nmrep);
f427ee49
A
8440out_free:
8441 NFS_ZFREE(nfs_fhandle_zone, fh);
8442 NFS_ZFREE(nfs_req_zone, req);
8443 FREE(nvattr, M_TEMP);
0a7de745 8444 return error;
6d2010ae
A
8445}
8446
8447/*
8448 * Remove a named attribute.
8449 */
8450int
8451nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_context_t ctx)
8452{
8453 nfsnode_t adnp = NULL;
8454 struct nfsmount *nmp;
8455 struct componentname cn;
8456 struct vnop_remove_args vra;
8457 int error, putanp = 0;
8458
8459 nmp = NFSTONMP(np);
0a7de745
A
8460 if (nfs_mount_gone(nmp)) {
8461 return ENXIO;
8462 }
6d2010ae
A
8463
8464 bzero(&cn, sizeof(cn));
8465 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
f427ee49 8466 cn.cn_namelen = NFS_STRLEN_INT(name);
6d2010ae
A
8467 cn.cn_nameiop = DELETE;
8468 cn.cn_flags = 0;
8469
8470 if (!anp) {
8471 error = nfs4_named_attr_get(np, &cn, NFS_OPEN_SHARE_ACCESS_NONE,
0a7de745
A
8472 0, ctx, &anp, NULL);
8473 if ((!error && !anp) || (error == ENOATTR)) {
6d2010ae 8474 error = ENOENT;
0a7de745 8475 }
6d2010ae
A
8476 if (error) {
8477 if (anp) {
8478 vnode_put(NFSTOV(anp));
8479 anp = NULL;
8480 }
8481 goto out;
2d21ac55 8482 }
6d2010ae
A
8483 putanp = 1;
8484 }
8485
0a7de745 8486 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
6d2010ae 8487 goto out;
0a7de745 8488 }
6d2010ae
A
8489 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8490 nfs_node_clear_busy(np);
8491 if (!adnp) {
8492 error = ENOENT;
8493 goto out;
2d21ac55 8494 }
6d2010ae
A
8495
8496 vra.a_desc = &vnop_remove_desc;
8497 vra.a_dvp = NFSTOV(adnp);
8498 vra.a_vp = NFSTOV(anp);
8499 vra.a_cnp = &cn;
8500 vra.a_flags = 0;
8501 vra.a_context = ctx;
8502 error = nfs_vnop_remove(&vra);
8503out:
0a7de745 8504 if (adnp) {
6d2010ae 8505 vnode_put(NFSTOV(adnp));
0a7de745
A
8506 }
8507 if (putanp) {
6d2010ae 8508 vnode_put(NFSTOV(anp));
0a7de745
A
8509 }
8510 return error;
2d21ac55
A
8511}
8512
8513int
6d2010ae
A
8514nfs4_vnop_getxattr(
8515 struct vnop_getxattr_args /* {
0a7de745
A
8516 * struct vnodeop_desc *a_desc;
8517 * vnode_t a_vp;
8518 * const char * a_name;
8519 * uio_t a_uio;
8520 * size_t *a_size;
8521 * int a_options;
8522 * vfs_context_t a_context;
8523 * } */*ap)
2d21ac55 8524{
6d2010ae 8525 vfs_context_t ctx = ap->a_context;
2d21ac55 8526 struct nfsmount *nmp;
f427ee49 8527 struct nfs_vattr *nvattr;
6d2010ae
A
8528 struct componentname cn;
8529 nfsnode_t anp;
8530 int error = 0, isrsrcfork;
2d21ac55 8531
6d2010ae 8532 nmp = VTONMP(ap->a_vp);
0a7de745
A
8533 if (nfs_mount_gone(nmp)) {
8534 return ENXIO;
8535 }
2d21ac55 8536
0a7de745
A
8537 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8538 return ENOTSUP;
8539 }
f427ee49
A
8540
8541 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
8542 error = nfs_getattr(VTONFS(ap->a_vp), nvattr, ctx, NGA_CACHED);
0a7de745 8543 if (error) {
f427ee49 8544 goto out;
0a7de745 8545 }
f427ee49
A
8546 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8547 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8548 error = ENOATTR;
8549 goto out;
0a7de745 8550 }
6d2010ae
A
8551
8552 bzero(&cn, sizeof(cn));
8553 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
f427ee49 8554 cn.cn_namelen = NFS_STRLEN_INT(ap->a_name);
6d2010ae
A
8555 cn.cn_nameiop = LOOKUP;
8556 cn.cn_flags = MAKEENTRY;
8557
8558 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
8559 isrsrcfork = (bcmp(ap->a_name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
8560
8561 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
0a7de745
A
8562 !isrsrcfork ? NFS_GET_NAMED_ATTR_PREFETCH : 0, ctx, &anp, NULL);
8563 if ((!error && !anp) || (error == ENOENT)) {
6d2010ae 8564 error = ENOATTR;
0a7de745 8565 }
6d2010ae 8566 if (!error) {
0a7de745 8567 if (ap->a_uio) {
6d2010ae 8568 error = nfs_bioread(anp, ap->a_uio, 0, ctx);
0a7de745 8569 } else {
6d2010ae 8570 *ap->a_size = anp->n_size;
0a7de745 8571 }
2d21ac55 8572 }
0a7de745 8573 if (anp) {
6d2010ae 8574 vnode_put(NFSTOV(anp));
0a7de745 8575 }
f427ee49
A
8576out:
8577 FREE(nvattr, M_TEMP);
0a7de745 8578 return error;
6d2010ae 8579}
2d21ac55 8580
6d2010ae
A
8581int
8582nfs4_vnop_setxattr(
8583 struct vnop_setxattr_args /* {
0a7de745
A
8584 * struct vnodeop_desc *a_desc;
8585 * vnode_t a_vp;
8586 * const char * a_name;
8587 * uio_t a_uio;
8588 * int a_options;
8589 * vfs_context_t a_context;
8590 * } */*ap)
6d2010ae
A
8591{
8592 vfs_context_t ctx = ap->a_context;
8593 int options = ap->a_options;
8594 uio_t uio = ap->a_uio;
8595 const char *name = ap->a_name;
8596 struct nfsmount *nmp;
8597 struct componentname cn;
8598 nfsnode_t anp = NULL;
8599 int error = 0, closeerror = 0, flags, isrsrcfork, isfinderinfo, empty = 0, i;
8600#define FINDERINFOSIZE 32
8601 uint8_t finfo[FINDERINFOSIZE];
8602 uint32_t *finfop;
8603 struct nfs_open_file *nofp = NULL;
0a7de745 8604 char uio_buf[UIO_SIZEOF(1)];
6d2010ae
A
8605 uio_t auio;
8606 struct vnop_write_args vwa;
8607
8608 nmp = VTONMP(ap->a_vp);
0a7de745
A
8609 if (nfs_mount_gone(nmp)) {
8610 return ENXIO;
8611 }
6d2010ae 8612
0a7de745
A
8613 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8614 return ENOTSUP;
8615 }
6d2010ae 8616
0a7de745
A
8617 if ((options & XATTR_CREATE) && (options & XATTR_REPLACE)) {
8618 return EINVAL;
8619 }
6d2010ae
A
8620
8621 /* XXX limitation based on need to back up uio on short write */
8622 if (uio_iovcnt(uio) > 1) {
8623 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
0a7de745 8624 return EINVAL;
6d2010ae
A
8625 }
8626
8627 bzero(&cn, sizeof(cn));
8628 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
f427ee49 8629 cn.cn_namelen = NFS_STRLEN_INT(name);
6d2010ae
A
8630 cn.cn_nameiop = CREATE;
8631 cn.cn_flags = MAKEENTRY;
8632
8633 isfinderinfo = (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0);
8634 isrsrcfork = isfinderinfo ? 0 : (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
0a7de745 8635 if (!isrsrcfork) {
6d2010ae 8636 uio_setoffset(uio, 0);
0a7de745 8637 }
6d2010ae 8638 if (isfinderinfo) {
0a7de745
A
8639 if (uio_resid(uio) != sizeof(finfo)) {
8640 return ERANGE;
8641 }
6d2010ae 8642 error = uiomove((char*)&finfo, sizeof(finfo), uio);
0a7de745
A
8643 if (error) {
8644 return error;
8645 }
6d2010ae
A
8646 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
8647 empty = 1;
0a7de745 8648 for (i = 0, finfop = (uint32_t*)&finfo; i < (int)(sizeof(finfo) / sizeof(uint32_t)); i++) {
6d2010ae
A
8649 if (finfop[i]) {
8650 empty = 0;
8651 break;
8652 }
0a7de745
A
8653 }
8654 if (empty && !(options & (XATTR_CREATE | XATTR_REPLACE))) {
6d2010ae 8655 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
0a7de745 8656 if (error == ENOENT) {
6d2010ae 8657 error = 0;
0a7de745
A
8658 }
8659 return error;
6d2010ae
A
8660 }
8661 /* first, let's see if we get a create/replace error */
8662 }
8663
8664 /*
8665 * create/open the xattr
8666 *
8667 * We need to make sure not to create it if XATTR_REPLACE.
8668 * For all xattrs except the resource fork, we also want to
8669 * truncate the xattr to remove any current data. We'll do
8670 * that by setting the size to 0 on create/open.
8671 */
8672 flags = 0;
0a7de745 8673 if (!(options & XATTR_REPLACE)) {
6d2010ae 8674 flags |= NFS_GET_NAMED_ATTR_CREATE;
0a7de745
A
8675 }
8676 if (options & XATTR_CREATE) {
6d2010ae 8677 flags |= NFS_GET_NAMED_ATTR_CREATE_GUARDED;
0a7de745
A
8678 }
8679 if (!isrsrcfork) {
6d2010ae 8680 flags |= NFS_GET_NAMED_ATTR_TRUNCATE;
0a7de745 8681 }
6d2010ae
A
8682
8683 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
0a7de745
A
8684 flags, ctx, &anp, &nofp);
8685 if (!error && !anp) {
6d2010ae 8686 error = ENOATTR;
0a7de745
A
8687 }
8688 if (error) {
6d2010ae 8689 goto out;
0a7de745 8690 }
6d2010ae
A
8691 /* grab the open state from the get/create/open */
8692 if (nofp && !(error = nfs_open_file_set_busy(nofp, NULL))) {
8693 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
8694 nofp->nof_creator = NULL;
8695 nfs_open_file_clear_busy(nofp);
8696 }
8697
8698 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
0a7de745 8699 if (isfinderinfo && empty) {
6d2010ae 8700 goto doclose;
0a7de745 8701 }
6d2010ae
A
8702
8703 /*
8704 * Write the data out and flush.
8705 *
8706 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
8707 */
8708 vwa.a_desc = &vnop_write_desc;
8709 vwa.a_vp = NFSTOV(anp);
8710 vwa.a_uio = NULL;
8711 vwa.a_ioflag = 0;
8712 vwa.a_context = ctx;
8713 if (isfinderinfo) {
8714 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, &uio_buf, sizeof(uio_buf));
8715 uio_addiov(auio, (uintptr_t)&finfo, sizeof(finfo));
8716 vwa.a_uio = auio;
8717 } else if (uio_resid(uio) > 0) {
8718 vwa.a_uio = uio;
8719 }
8720 if (vwa.a_uio) {
8721 error = nfs_vnop_write(&vwa);
0a7de745 8722 if (!error) {
6d2010ae 8723 error = nfs_flush(anp, MNT_WAIT, vfs_context_thread(ctx), 0);
0a7de745 8724 }
6d2010ae
A
8725 }
8726doclose:
8727 /* Close the xattr. */
8728 if (nofp) {
8729 int busyerror = nfs_open_file_set_busy(nofp, NULL);
8730 closeerror = nfs_close(anp, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
0a7de745 8731 if (!busyerror) {
6d2010ae 8732 nfs_open_file_clear_busy(nofp);
0a7de745 8733 }
6d2010ae 8734 }
0a7de745 8735 if (!error && isfinderinfo && empty) { /* Setting an empty FinderInfo really means remove it */
6d2010ae 8736 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
0a7de745 8737 if (error == ENOENT) {
6d2010ae 8738 error = 0;
0a7de745 8739 }
6d2010ae 8740 }
0a7de745 8741 if (!error) {
6d2010ae 8742 error = closeerror;
0a7de745 8743 }
6d2010ae 8744out:
0a7de745 8745 if (anp) {
6d2010ae 8746 vnode_put(NFSTOV(anp));
0a7de745
A
8747 }
8748 if (error == ENOENT) {
6d2010ae 8749 error = ENOATTR;
0a7de745
A
8750 }
8751 return error;
2d21ac55
A
8752}
8753
8754int
6d2010ae
A
8755nfs4_vnop_removexattr(
8756 struct vnop_removexattr_args /* {
0a7de745
A
8757 * struct vnodeop_desc *a_desc;
8758 * vnode_t a_vp;
8759 * const char * a_name;
8760 * int a_options;
8761 * vfs_context_t a_context;
8762 * } */*ap)
2d21ac55 8763{
6d2010ae 8764 struct nfsmount *nmp = VTONMP(ap->a_vp);
2d21ac55
A
8765 int error;
8766
0a7de745
A
8767 if (nfs_mount_gone(nmp)) {
8768 return ENXIO;
8769 }
8770 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8771 return ENOTSUP;
8772 }
6d2010ae
A
8773
8774 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), NULL, ap->a_name, ap->a_context);
0a7de745 8775 if (error == ENOENT) {
6d2010ae 8776 error = ENOATTR;
0a7de745
A
8777 }
8778 return error;
2d21ac55
A
8779}
8780
8781int
6d2010ae
A
8782nfs4_vnop_listxattr(
8783 struct vnop_listxattr_args /* {
0a7de745
A
8784 * struct vnodeop_desc *a_desc;
8785 * vnode_t a_vp;
8786 * uio_t a_uio;
8787 * size_t *a_size;
8788 * int a_options;
8789 * vfs_context_t a_context;
8790 * } */*ap)
2d21ac55 8791{
6d2010ae
A
8792 vfs_context_t ctx = ap->a_context;
8793 nfsnode_t np = VTONFS(ap->a_vp);
8794 uio_t uio = ap->a_uio;
8795 nfsnode_t adnp = NULL;
8796 struct nfsmount *nmp;
8797 int error, done, i;
f427ee49 8798 struct nfs_vattr *nvattr;
6d2010ae
A
8799 uint64_t cookie, nextcookie, lbn = 0;
8800 struct nfsbuf *bp = NULL;
8801 struct nfs_dir_buf_header *ndbhp;
8802 struct direntry *dp;
2d21ac55 8803
6d2010ae 8804 nmp = VTONMP(ap->a_vp);
0a7de745
A
8805 if (nfs_mount_gone(nmp)) {
8806 return ENXIO;
8807 }
6d2010ae 8808
0a7de745
A
8809 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8810 return ENOTSUP;
8811 }
6d2010ae 8812
f427ee49
A
8813 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
8814 error = nfs_getattr(np, nvattr, ctx, NGA_CACHED);
0a7de745 8815 if (error) {
f427ee49 8816 goto out_free;
0a7de745 8817 }
f427ee49
A
8818 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8819 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8820 error = 0;
8821 goto out_free;
0a7de745 8822 }
6d2010ae 8823
0a7de745 8824 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
f427ee49 8825 goto out_free;
0a7de745 8826 }
6d2010ae
A
8827 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8828 nfs_node_clear_busy(np);
0a7de745 8829 if (!adnp) {
6d2010ae 8830 goto out;
0a7de745 8831 }
6d2010ae 8832
0a7de745 8833 if ((error = nfs_node_lock(adnp))) {
6d2010ae 8834 goto out;
0a7de745 8835 }
6d2010ae
A
8836
8837 if (adnp->n_flag & NNEEDINVALIDATE) {
8838 adnp->n_flag &= ~NNEEDINVALIDATE;
8839 nfs_invaldir(adnp);
8840 nfs_node_unlock(adnp);
8841 error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
0a7de745 8842 if (!error) {
6d2010ae 8843 error = nfs_node_lock(adnp);
0a7de745
A
8844 }
8845 if (error) {
6d2010ae 8846 goto out;
0a7de745 8847 }
6d2010ae
A
8848 }
8849
8850 /*
8851 * check for need to invalidate when (re)starting at beginning
8852 */
8853 if (adnp->n_flag & NMODIFIED) {
8854 nfs_invaldir(adnp);
8855 nfs_node_unlock(adnp);
0a7de745 8856 if ((error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1))) {
6d2010ae 8857 goto out;
0a7de745 8858 }
6d2010ae
A
8859 } else {
8860 nfs_node_unlock(adnp);
8861 }
8862 /* nfs_getattr() will check changed and purge caches */
f427ee49 8863 if ((error = nfs_getattr(adnp, nvattr, ctx, NGA_UNCACHED))) {
6d2010ae 8864 goto out;
0a7de745 8865 }
6d2010ae 8866
0a7de745 8867 if (uio && (uio_resid(uio) == 0)) {
6d2010ae 8868 goto out;
0a7de745 8869 }
6d2010ae
A
8870
8871 done = 0;
8872 nextcookie = lbn = 0;
8873
8874 while (!error && !done) {
316670eb 8875 OSAddAtomic64(1, &nfsstats.biocache_readdirs);
6d2010ae
A
8876 cookie = nextcookie;
8877getbuffer:
8878 error = nfs_buf_get(adnp, lbn, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
0a7de745 8879 if (error) {
6d2010ae 8880 goto out;
0a7de745 8881 }
6d2010ae
A
8882 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
8883 if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) {
8884 if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */
8885 ndbhp->ndbh_flags = 0;
8886 ndbhp->ndbh_count = 0;
8887 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
8888 ndbhp->ndbh_ncgen = adnp->n_ncgen;
8889 }
8890 error = nfs_buf_readdir(bp, ctx);
0a7de745 8891 if (error == NFSERR_DIRBUFDROPPED) {
6d2010ae 8892 goto getbuffer;
0a7de745
A
8893 }
8894 if (error) {
6d2010ae 8895 nfs_buf_release(bp, 1);
0a7de745 8896 }
6d2010ae
A
8897 if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) {
8898 if (!nfs_node_lock(adnp)) {
8899 nfs_invaldir(adnp);
8900 nfs_node_unlock(adnp);
8901 }
8902 nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
0a7de745 8903 if (error == NFSERR_BAD_COOKIE) {
6d2010ae 8904 error = ENOENT;
0a7de745 8905 }
6d2010ae 8906 }
0a7de745 8907 if (error) {
6d2010ae 8908 goto out;
0a7de745 8909 }
6d2010ae
A
8910 }
8911
8912 /* go through all the entries copying/counting */
8913 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
0a7de745 8914 for (i = 0; i < ndbhp->ndbh_count; i++) {
6d2010ae
A
8915 if (!xattr_protected(dp->d_name)) {
8916 if (uio == NULL) {
8917 *ap->a_size += dp->d_namlen + 1;
8918 } else if (uio_resid(uio) < (dp->d_namlen + 1)) {
8919 error = ERANGE;
8920 } else {
0a7de745
A
8921 error = uiomove(dp->d_name, dp->d_namlen + 1, uio);
8922 if (error && (error != EFAULT)) {
6d2010ae 8923 error = ERANGE;
0a7de745 8924 }
6d2010ae
A
8925 }
8926 }
8927 nextcookie = dp->d_seekoff;
8928 dp = NFS_DIRENTRY_NEXT(dp);
8929 }
8930
8931 if (i == ndbhp->ndbh_count) {
8932 /* hit end of buffer, move to next buffer */
8933 lbn = nextcookie;
8934 /* if we also hit EOF, we're done */
0a7de745 8935 if (ISSET(ndbhp->ndbh_flags, NDB_EOF)) {
6d2010ae 8936 done = 1;
0a7de745 8937 }
6d2010ae
A
8938 }
8939 if (!error && !done && (nextcookie == cookie)) {
8940 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count);
8941 error = EIO;
8942 }
8943 nfs_buf_release(bp, 1);
8944 }
8945out:
0a7de745 8946 if (adnp) {
6d2010ae 8947 vnode_put(NFSTOV(adnp));
0a7de745 8948 }
f427ee49
A
8949out_free:
8950 FREE(nvattr, M_TEMP);
0a7de745 8951 return error;
2d21ac55
A
8952}
8953
6d2010ae 8954#if NAMEDSTREAMS
2d21ac55 8955int
6d2010ae
A
8956nfs4_vnop_getnamedstream(
8957 struct vnop_getnamedstream_args /* {
0a7de745
A
8958 * struct vnodeop_desc *a_desc;
8959 * vnode_t a_vp;
8960 * vnode_t *a_svpp;
8961 * const char *a_name;
8962 * enum nsoperation a_operation;
8963 * int a_flags;
8964 * vfs_context_t a_context;
8965 * } */*ap)
2d21ac55
A
8966{
8967 vfs_context_t ctx = ap->a_context;
2d21ac55 8968 struct nfsmount *nmp;
f427ee49 8969 struct nfs_vattr *nvattr;
6d2010ae
A
8970 struct componentname cn;
8971 nfsnode_t anp;
8972 int error = 0;
2d21ac55 8973
6d2010ae 8974 nmp = VTONMP(ap->a_vp);
0a7de745
A
8975 if (nfs_mount_gone(nmp)) {
8976 return ENXIO;
8977 }
2d21ac55 8978
0a7de745
A
8979 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8980 return ENOTSUP;
8981 }
f427ee49
A
8982
8983 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
8984 error = nfs_getattr(VTONFS(ap->a_vp), nvattr, ctx, NGA_CACHED);
0a7de745 8985 if (error) {
f427ee49 8986 goto out;
0a7de745 8987 }
f427ee49
A
8988 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8989 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8990 error = ENOATTR;
8991 goto out;
0a7de745 8992 }
2d21ac55 8993
6d2010ae
A
8994 bzero(&cn, sizeof(cn));
8995 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
f427ee49 8996 cn.cn_namelen = NFS_STRLEN_INT(ap->a_name);
6d2010ae
A
8997 cn.cn_nameiop = LOOKUP;
8998 cn.cn_flags = MAKEENTRY;
8999
9000 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
0a7de745
A
9001 0, ctx, &anp, NULL);
9002 if ((!error && !anp) || (error == ENOENT)) {
6d2010ae 9003 error = ENOATTR;
0a7de745
A
9004 }
9005 if (!error && anp) {
6d2010ae 9006 *ap->a_svpp = NFSTOV(anp);
0a7de745 9007 } else if (anp) {
6d2010ae 9008 vnode_put(NFSTOV(anp));
0a7de745 9009 }
f427ee49
A
9010out:
9011 FREE(nvattr, M_TEMP);
0a7de745 9012 return error;
2d21ac55
A
9013}
9014
9015int
6d2010ae
A
9016nfs4_vnop_makenamedstream(
9017 struct vnop_makenamedstream_args /* {
0a7de745
A
9018 * struct vnodeop_desc *a_desc;
9019 * vnode_t *a_svpp;
9020 * vnode_t a_vp;
9021 * const char *a_name;
9022 * int a_flags;
9023 * vfs_context_t a_context;
9024 * } */*ap)
2d21ac55
A
9025{
9026 vfs_context_t ctx = ap->a_context;
6d2010ae
A
9027 struct nfsmount *nmp;
9028 struct componentname cn;
9029 nfsnode_t anp;
2d21ac55 9030 int error = 0;
2d21ac55 9031
6d2010ae 9032 nmp = VTONMP(ap->a_vp);
0a7de745
A
9033 if (nfs_mount_gone(nmp)) {
9034 return ENXIO;
9035 }
2d21ac55 9036
0a7de745
A
9037 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
9038 return ENOTSUP;
9039 }
2d21ac55 9040
6d2010ae
A
9041 bzero(&cn, sizeof(cn));
9042 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
f427ee49 9043 cn.cn_namelen = NFS_STRLEN_INT(ap->a_name);
6d2010ae
A
9044 cn.cn_nameiop = CREATE;
9045 cn.cn_flags = MAKEENTRY;
9046
9047 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
0a7de745
A
9048 NFS_GET_NAMED_ATTR_CREATE, ctx, &anp, NULL);
9049 if ((!error && !anp) || (error == ENOENT)) {
6d2010ae 9050 error = ENOATTR;
0a7de745
A
9051 }
9052 if (!error && anp) {
6d2010ae 9053 *ap->a_svpp = NFSTOV(anp);
0a7de745 9054 } else if (anp) {
6d2010ae 9055 vnode_put(NFSTOV(anp));
0a7de745
A
9056 }
9057 return error;
6d2010ae 9058}
2d21ac55 9059
6d2010ae
A
9060int
9061nfs4_vnop_removenamedstream(
9062 struct vnop_removenamedstream_args /* {
0a7de745
A
9063 * struct vnodeop_desc *a_desc;
9064 * vnode_t a_vp;
9065 * vnode_t a_svp;
9066 * const char *a_name;
9067 * int a_flags;
9068 * vfs_context_t a_context;
9069 * } */*ap)
6d2010ae
A
9070{
9071 struct nfsmount *nmp = VTONMP(ap->a_vp);
9072 nfsnode_t np = ap->a_vp ? VTONFS(ap->a_vp) : NULL;
9073 nfsnode_t anp = ap->a_svp ? VTONFS(ap->a_svp) : NULL;
2d21ac55 9074
0a7de745
A
9075 if (nfs_mount_gone(nmp)) {
9076 return ENXIO;
9077 }
2d21ac55
A
9078
9079 /*
6d2010ae
A
9080 * Given that a_svp is a named stream, checking for
9081 * named attribute support is kinda pointless.
2d21ac55 9082 */
0a7de745
A
9083 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
9084 return ENOTSUP;
9085 }
6d2010ae 9086
0a7de745 9087 return nfs4_named_attr_remove(np, anp, ap->a_name, ap->a_context);
2d21ac55
A
9088}
9089
6d2010ae 9090#endif
cb323159 9091#endif /* CONFIG_NFS4 */
ea3f0419
A
9092
9093#endif /* CONFIG_NFS_CLIENT */