]> git.saurik.com Git - apple/xnu.git/blame - bsd/nfs/nfs4_vnops.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / bsd / nfs / nfs4_vnops.c
CommitLineData
2d21ac55 1/*
cb323159 2 * Copyright (c) 2006-2019 Apple Inc. All rights reserved.
2d21ac55
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
ea3f0419
A
29#include <nfs/nfs_conf.h>
30#if CONFIG_NFS_CLIENT
31
2d21ac55
A
32/*
33 * vnode op calls for NFS version 4
34 */
35#include <sys/param.h>
36#include <sys/kernel.h>
37#include <sys/systm.h>
38#include <sys/resourcevar.h>
39#include <sys/proc_internal.h>
40#include <sys/kauth.h>
41#include <sys/mount_internal.h>
42#include <sys/malloc.h>
43#include <sys/kpi_mbuf.h>
44#include <sys/conf.h>
45#include <sys/vnode_internal.h>
46#include <sys/dirent.h>
47#include <sys/fcntl.h>
48#include <sys/lockf.h>
49#include <sys/ubc_internal.h>
50#include <sys/attr.h>
51#include <sys/signalvar.h>
6d2010ae
A
52#include <sys/uio_internal.h>
53#include <sys/xattr.h>
54#include <sys/paths.h>
2d21ac55
A
55
56#include <vfs/vfs_support.h>
57
58#include <sys/vm.h>
59
60#include <sys/time.h>
61#include <kern/clock.h>
62#include <libkern/OSAtomic.h>
63
64#include <miscfs/fifofs/fifo.h>
65#include <miscfs/specfs/specdev.h>
66
67#include <nfs/rpcv2.h>
68#include <nfs/nfsproto.h>
69#include <nfs/nfs.h>
70#include <nfs/nfsnode.h>
71#include <nfs/nfs_gss.h>
72#include <nfs/nfsmount.h>
73#include <nfs/nfs_lock.h>
74#include <nfs/xdr_subs.h>
75#include <nfs/nfsm_subs.h>
76
77#include <net/if.h>
78#include <netinet/in.h>
79#include <netinet/in_var.h>
80#include <vm/vm_kern.h>
81
82#include <kern/task.h>
83#include <kern/sched_prim.h>
84
cb323159 85#if CONFIG_NFS4
2d21ac55 86int
fe8ab488 87nfs4_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx)
2d21ac55 88{
b0d623f7 89 int error = 0, lockerror = ENOENT, status, numops, slot;
2d21ac55
A
90 u_int64_t xid;
91 struct nfsm_chain nmreq, nmrep;
92 struct timeval now;
6d2010ae 93 uint32_t access_result = 0, supported = 0, missing;
2d21ac55
A
94 struct nfsmount *nmp = NFSTONMP(np);
95 int nfsvers = nmp->nm_vers;
96 uid_t uid;
6d2010ae 97 struct nfsreq_secinfo_args si;
2d21ac55 98
0a7de745
A
99 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
100 return 0;
101 }
6d2010ae
A
102
103 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
104 nfsm_chain_null(&nmreq);
105 nfsm_chain_null(&nmrep);
106
b0d623f7
A
107 // PUTFH, ACCESS, GETATTR
108 numops = 3;
2d21ac55 109 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED);
3e170ce0 110 nfsm_chain_add_compound_header(error, &nmreq, "access", nmp->nm_minor_vers, numops);
2d21ac55
A
111 numops--;
112 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
113 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
114 numops--;
115 nfsm_chain_add_32(error, &nmreq, NFS_OP_ACCESS);
6d2010ae 116 nfsm_chain_add_32(error, &nmreq, *access);
2d21ac55
A
117 numops--;
118 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 119 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
120 nfsm_chain_build_done(error, &nmreq);
121 nfsm_assert(error, (numops == 0), EPROTO);
122 nfsmout_if(error);
fe8ab488 123 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745
A
124 vfs_context_thread(ctx), vfs_context_ucred(ctx),
125 &si, rpcflags, &nmrep, &xid, &status);
2d21ac55 126
0a7de745 127 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 128 error = lockerror;
0a7de745 129 }
2d21ac55
A
130 nfsm_chain_skip_tag(error, &nmrep);
131 nfsm_chain_get_32(error, &nmrep, numops);
132 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
133 nfsm_chain_op_check(error, &nmrep, NFS_OP_ACCESS);
134 nfsm_chain_get_32(error, &nmrep, supported);
6d2010ae 135 nfsm_chain_get_32(error, &nmrep, access_result);
2d21ac55 136 nfsmout_if(error);
6d2010ae 137 if ((missing = (*access & ~supported))) {
2d21ac55
A
138 /* missing support for something(s) we wanted */
139 if (missing & NFS_ACCESS_DELETE) {
140 /*
141 * If the server doesn't report DELETE (possible
142 * on UNIX systems), we'll assume that it is OK
143 * and just let any subsequent delete action fail
144 * if it really isn't deletable.
145 */
6d2010ae 146 access_result |= NFS_ACCESS_DELETE;
2d21ac55
A
147 }
148 }
6d2010ae
A
149 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
150 if (nfs_access_dotzfs) {
151 vnode_t dvp = NULLVP;
0a7de745
A
152 if (np->n_flag & NISDOTZFSCHILD) { /* may be able to create/delete snapshot dirs */
153 access_result |= (NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE);
154 } else if (((dvp = vnode_getparent(NFSTOV(np))) != NULLVP) && (VTONFS(dvp)->n_flag & NISDOTZFSCHILD)) {
6d2010ae 155 access_result |= NFS_ACCESS_DELETE; /* may be able to delete snapshot dirs */
0a7de745
A
156 }
157 if (dvp != NULLVP) {
6d2010ae 158 vnode_put(dvp);
0a7de745 159 }
6d2010ae 160 }
b0d623f7 161 /* Some servers report DELETE support but erroneously give a denied answer. */
0a7de745 162 if (nfs_access_delete && (*access & NFS_ACCESS_DELETE) && !(access_result & NFS_ACCESS_DELETE)) {
6d2010ae 163 access_result |= NFS_ACCESS_DELETE;
0a7de745 164 }
2d21ac55 165 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 166 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
2d21ac55
A
167 nfsmout_if(error);
168
813fb2f6
A
169 if (nfs_mount_gone(nmp)) {
170 error = ENXIO;
171 }
172 nfsmout_if(error);
173
174 if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) {
175 uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx));
176 } else {
177 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
178 }
6d2010ae
A
179 slot = nfs_node_access_slot(np, uid, 1);
180 np->n_accessuid[slot] = uid;
2d21ac55 181 microuptime(&now);
6d2010ae
A
182 np->n_accessstamp[slot] = now.tv_sec;
183 np->n_access[slot] = access_result;
2d21ac55 184
6d2010ae
A
185 /* pass back the access returned with this request */
186 *access = np->n_access[slot];
2d21ac55 187nfsmout:
0a7de745 188 if (!lockerror) {
b0d623f7 189 nfs_node_unlock(np);
0a7de745 190 }
2d21ac55
A
191 nfsm_chain_cleanup(&nmreq);
192 nfsm_chain_cleanup(&nmrep);
0a7de745 193 return error;
2d21ac55
A
194}
195
196int
197nfs4_getattr_rpc(
198 nfsnode_t np,
199 mount_t mp,
200 u_char *fhp,
201 size_t fhsize,
6d2010ae 202 int flags,
2d21ac55
A
203 vfs_context_t ctx,
204 struct nfs_vattr *nvap,
205 u_int64_t *xidp)
206{
207 struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
6d2010ae
A
208 int error = 0, status, nfsvers, numops, rpcflags = 0, acls;
209 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
2d21ac55 210 struct nfsm_chain nmreq, nmrep;
6d2010ae 211 struct nfsreq_secinfo_args si;
2d21ac55 212
0a7de745
A
213 if (nfs_mount_gone(nmp)) {
214 return ENXIO;
215 }
2d21ac55 216 nfsvers = nmp->nm_vers;
6d2010ae
A
217 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
218
219 if (np && (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)) {
220 nfs4_default_attrs_for_referral_trigger(VTONFS(np->n_parent), NULL, 0, nvap, NULL);
0a7de745 221 return 0;
6d2010ae
A
222 }
223
0a7de745 224 if (flags & NGA_MONITOR) { /* vnode monitor requests should be soft */
6d2010ae 225 rpcflags = R_RECOVER;
0a7de745 226 }
2d21ac55 227
0a7de745 228 if (flags & NGA_SOFT) { /* Return ETIMEDOUT if server not responding */
fe8ab488 229 rpcflags |= R_SOFT;
0a7de745 230 }
fe8ab488 231
6d2010ae 232 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
233 nfsm_chain_null(&nmreq);
234 nfsm_chain_null(&nmrep);
235
b0d623f7
A
236 // PUTFH, GETATTR
237 numops = 2;
2d21ac55 238 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
3e170ce0 239 nfsm_chain_add_compound_header(error, &nmreq, "getattr", nmp->nm_minor_vers, numops);
2d21ac55
A
240 numops--;
241 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
242 nfsm_chain_add_fh(error, &nmreq, nfsvers, fhp, fhsize);
243 numops--;
244 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 245 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
0a7de745 246 if ((flags & NGA_ACL) && acls) {
6d2010ae 247 NFS_BITMAP_SET(bitmap, NFS_FATTR_ACL);
0a7de745 248 }
6d2010ae 249 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
2d21ac55
A
250 nfsm_chain_build_done(error, &nmreq);
251 nfsm_assert(error, (numops == 0), EPROTO);
252 nfsmout_if(error);
0a7de745
A
253 error = nfs_request2(np, mp, &nmreq, NFSPROC4_COMPOUND,
254 vfs_context_thread(ctx), vfs_context_ucred(ctx),
255 NULL, rpcflags, &nmrep, xidp, &status);
2d21ac55
A
256
257 nfsm_chain_skip_tag(error, &nmrep);
258 nfsm_chain_get_32(error, &nmrep, numops);
259 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
260 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
261 nfsmout_if(error);
6d2010ae
A
262 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
263 nfsmout_if(error);
264 if ((flags & NGA_ACL) && acls && !NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) {
265 /* we asked for the ACL but didn't get one... assume there isn't one */
266 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_ACL);
267 nvap->nva_acl = NULL;
268 }
2d21ac55
A
269nfsmout:
270 nfsm_chain_cleanup(&nmreq);
271 nfsm_chain_cleanup(&nmrep);
0a7de745 272 return error;
2d21ac55
A
273}
274
275int
276nfs4_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx)
277{
278 struct nfsmount *nmp;
279 int error = 0, lockerror = ENOENT, status, numops;
280 uint32_t len = 0;
281 u_int64_t xid;
282 struct nfsm_chain nmreq, nmrep;
6d2010ae 283 struct nfsreq_secinfo_args si;
2d21ac55
A
284
285 nmp = NFSTONMP(np);
0a7de745
A
286 if (nfs_mount_gone(nmp)) {
287 return ENXIO;
288 }
289 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
290 return EINVAL;
291 }
6d2010ae 292 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
293 nfsm_chain_null(&nmreq);
294 nfsm_chain_null(&nmrep);
295
b0d623f7
A
296 // PUTFH, GETATTR, READLINK
297 numops = 3;
2d21ac55 298 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
3e170ce0 299 nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
2d21ac55
A
300 numops--;
301 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
302 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
303 numops--;
304 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 305 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
306 numops--;
307 nfsm_chain_add_32(error, &nmreq, NFS_OP_READLINK);
308 nfsm_chain_build_done(error, &nmreq);
309 nfsm_assert(error, (numops == 0), EPROTO);
310 nfsmout_if(error);
6d2010ae 311 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 312
0a7de745 313 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 314 error = lockerror;
0a7de745 315 }
2d21ac55
A
316 nfsm_chain_skip_tag(error, &nmrep);
317 nfsm_chain_get_32(error, &nmrep, numops);
318 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
319 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 320 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
2d21ac55
A
321 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
322 nfsm_chain_get_32(error, &nmrep, len);
323 nfsmout_if(error);
324 if (len >= *buflenp) {
0a7de745 325 if (np->n_size && (np->n_size < *buflenp)) {
2d21ac55 326 len = np->n_size;
0a7de745 327 } else {
2d21ac55 328 len = *buflenp - 1;
0a7de745 329 }
2d21ac55
A
330 }
331 nfsm_chain_get_opaque(error, &nmrep, len, buf);
0a7de745 332 if (!error) {
2d21ac55 333 *buflenp = len;
0a7de745 334 }
2d21ac55 335nfsmout:
0a7de745 336 if (!lockerror) {
b0d623f7 337 nfs_node_unlock(np);
0a7de745 338 }
2d21ac55
A
339 nfsm_chain_cleanup(&nmreq);
340 nfsm_chain_cleanup(&nmrep);
0a7de745 341 return error;
2d21ac55
A
342}
343
344int
345nfs4_read_rpc_async(
346 nfsnode_t np,
347 off_t offset,
348 size_t len,
349 thread_t thd,
350 kauth_cred_t cred,
351 struct nfsreq_cbinfo *cb,
352 struct nfsreq **reqp)
353{
354 struct nfsmount *nmp;
355 int error = 0, nfsvers, numops;
b0d623f7 356 nfs_stateid stateid;
2d21ac55 357 struct nfsm_chain nmreq;
6d2010ae 358 struct nfsreq_secinfo_args si;
2d21ac55
A
359
360 nmp = NFSTONMP(np);
0a7de745
A
361 if (nfs_mount_gone(nmp)) {
362 return ENXIO;
363 }
2d21ac55 364 nfsvers = nmp->nm_vers;
0a7de745
A
365 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
366 return EINVAL;
367 }
2d21ac55 368
6d2010ae 369 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
370 nfsm_chain_null(&nmreq);
371
b0d623f7 372 // PUTFH, READ, GETATTR
2d21ac55
A
373 numops = 3;
374 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
3e170ce0 375 nfsm_chain_add_compound_header(error, &nmreq, "read", nmp->nm_minor_vers, numops);
2d21ac55
A
376 numops--;
377 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
378 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
379 numops--;
380 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
b0d623f7
A
381 nfs_get_stateid(np, thd, cred, &stateid);
382 nfsm_chain_add_stateid(error, &nmreq, &stateid);
2d21ac55
A
383 nfsm_chain_add_64(error, &nmreq, offset);
384 nfsm_chain_add_32(error, &nmreq, len);
385 numops--;
386 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 387 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
388 nfsm_chain_build_done(error, &nmreq);
389 nfsm_assert(error, (numops == 0), EPROTO);
390 nfsmout_if(error);
6d2010ae 391 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
2d21ac55
A
392nfsmout:
393 nfsm_chain_cleanup(&nmreq);
0a7de745 394 return error;
2d21ac55
A
395}
396
397int
398nfs4_read_rpc_async_finish(
399 nfsnode_t np,
400 struct nfsreq *req,
b0d623f7 401 uio_t uio,
2d21ac55
A
402 size_t *lenp,
403 int *eofp)
404{
405 struct nfsmount *nmp;
406 int error = 0, lockerror, nfsvers, numops, status, eof = 0;
407 size_t retlen = 0;
408 u_int64_t xid;
409 struct nfsm_chain nmrep;
410
411 nmp = NFSTONMP(np);
fe8ab488 412 if (nfs_mount_gone(nmp)) {
2d21ac55 413 nfs_request_async_cancel(req);
0a7de745 414 return ENXIO;
2d21ac55
A
415 }
416 nfsvers = nmp->nm_vers;
417
418 nfsm_chain_null(&nmrep);
419
420 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
0a7de745
A
421 if (error == EINPROGRESS) { /* async request restarted */
422 return error;
423 }
2d21ac55 424
0a7de745 425 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 426 error = lockerror;
0a7de745 427 }
2d21ac55
A
428 nfsm_chain_skip_tag(error, &nmrep);
429 nfsm_chain_get_32(error, &nmrep, numops);
430 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
431 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
432 nfsm_chain_get_32(error, &nmrep, eof);
433 nfsm_chain_get_32(error, &nmrep, retlen);
434 if (!error) {
435 *lenp = MIN(retlen, *lenp);
b0d623f7 436 error = nfsm_chain_get_uio(&nmrep, *lenp, uio);
2d21ac55
A
437 }
438 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 439 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
0a7de745 440 if (!lockerror) {
b0d623f7 441 nfs_node_unlock(np);
0a7de745 442 }
2d21ac55 443 if (eofp) {
0a7de745 444 if (!eof && !retlen) {
2d21ac55 445 eof = 1;
0a7de745 446 }
2d21ac55
A
447 *eofp = eof;
448 }
449 nfsm_chain_cleanup(&nmrep);
0a7de745 450 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
6d2010ae 451 microuptime(&np->n_lastio);
0a7de745
A
452 }
453 return error;
2d21ac55
A
454}
455
456int
457nfs4_write_rpc_async(
458 nfsnode_t np,
b0d623f7 459 uio_t uio,
2d21ac55
A
460 size_t len,
461 thread_t thd,
462 kauth_cred_t cred,
463 int iomode,
464 struct nfsreq_cbinfo *cb,
465 struct nfsreq **reqp)
466{
467 struct nfsmount *nmp;
6d2010ae 468 mount_t mp;
2d21ac55 469 int error = 0, nfsvers, numops;
b0d623f7 470 nfs_stateid stateid;
2d21ac55 471 struct nfsm_chain nmreq;
6d2010ae 472 struct nfsreq_secinfo_args si;
2d21ac55
A
473
474 nmp = NFSTONMP(np);
0a7de745
A
475 if (nfs_mount_gone(nmp)) {
476 return ENXIO;
477 }
2d21ac55 478 nfsvers = nmp->nm_vers;
0a7de745
A
479 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
480 return EINVAL;
481 }
6d2010ae
A
482
483 /* for async mounts, don't bother sending sync write requests */
484 if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async &&
0a7de745 485 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
6d2010ae 486 iomode = NFS_WRITE_UNSTABLE;
0a7de745 487 }
2d21ac55 488
6d2010ae 489 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
490 nfsm_chain_null(&nmreq);
491
b0d623f7 492 // PUTFH, WRITE, GETATTR
2d21ac55
A
493 numops = 3;
494 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED + len);
3e170ce0 495 nfsm_chain_add_compound_header(error, &nmreq, "write", nmp->nm_minor_vers, numops);
2d21ac55
A
496 numops--;
497 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
498 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
499 numops--;
500 nfsm_chain_add_32(error, &nmreq, NFS_OP_WRITE);
b0d623f7
A
501 nfs_get_stateid(np, thd, cred, &stateid);
502 nfsm_chain_add_stateid(error, &nmreq, &stateid);
503 nfsm_chain_add_64(error, &nmreq, uio_offset(uio));
2d21ac55
A
504 nfsm_chain_add_32(error, &nmreq, iomode);
505 nfsm_chain_add_32(error, &nmreq, len);
0a7de745 506 if (!error) {
b0d623f7 507 error = nfsm_chain_add_uio(&nmreq, uio, len);
0a7de745 508 }
2d21ac55
A
509 numops--;
510 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 511 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
512 nfsm_chain_build_done(error, &nmreq);
513 nfsm_assert(error, (numops == 0), EPROTO);
514 nfsmout_if(error);
515
6d2010ae 516 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
2d21ac55
A
517nfsmout:
518 nfsm_chain_cleanup(&nmreq);
0a7de745 519 return error;
2d21ac55
A
520}
521
522int
523nfs4_write_rpc_async_finish(
524 nfsnode_t np,
525 struct nfsreq *req,
526 int *iomodep,
527 size_t *rlenp,
528 uint64_t *wverfp)
529{
530 struct nfsmount *nmp;
531 int error = 0, lockerror = ENOENT, nfsvers, numops, status;
532 int committed = NFS_WRITE_FILESYNC;
533 size_t rlen = 0;
534 u_int64_t xid, wverf;
535 mount_t mp;
536 struct nfsm_chain nmrep;
537
538 nmp = NFSTONMP(np);
fe8ab488 539 if (nfs_mount_gone(nmp)) {
2d21ac55 540 nfs_request_async_cancel(req);
0a7de745 541 return ENXIO;
2d21ac55
A
542 }
543 nfsvers = nmp->nm_vers;
544
545 nfsm_chain_null(&nmrep);
546
547 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
0a7de745
A
548 if (error == EINPROGRESS) { /* async request restarted */
549 return error;
550 }
2d21ac55 551 nmp = NFSTONMP(np);
0a7de745 552 if (nfs_mount_gone(nmp)) {
2d21ac55 553 error = ENXIO;
0a7de745
A
554 }
555 if (!error && (lockerror = nfs_node_lock(np))) {
2d21ac55 556 error = lockerror;
0a7de745 557 }
2d21ac55
A
558 nfsm_chain_skip_tag(error, &nmrep);
559 nfsm_chain_get_32(error, &nmrep, numops);
560 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
561 nfsm_chain_op_check(error, &nmrep, NFS_OP_WRITE);
562 nfsm_chain_get_32(error, &nmrep, rlen);
563 nfsmout_if(error);
564 *rlenp = rlen;
0a7de745 565 if (rlen <= 0) {
2d21ac55 566 error = NFSERR_IO;
0a7de745 567 }
2d21ac55
A
568 nfsm_chain_get_32(error, &nmrep, committed);
569 nfsm_chain_get_64(error, &nmrep, wverf);
570 nfsmout_if(error);
0a7de745 571 if (wverfp) {
2d21ac55 572 *wverfp = wverf;
0a7de745 573 }
2d21ac55
A
574 lck_mtx_lock(&nmp->nm_lock);
575 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
576 nmp->nm_verf = wverf;
577 nmp->nm_state |= NFSSTA_HASWRITEVERF;
578 } else if (nmp->nm_verf != wverf) {
579 nmp->nm_verf = wverf;
580 }
581 lck_mtx_unlock(&nmp->nm_lock);
582 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 583 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
2d21ac55 584nfsmout:
0a7de745 585 if (!lockerror) {
b0d623f7 586 nfs_node_unlock(np);
0a7de745 587 }
2d21ac55
A
588 nfsm_chain_cleanup(&nmrep);
589 if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async &&
0a7de745 590 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
2d21ac55 591 committed = NFS_WRITE_FILESYNC;
0a7de745 592 }
2d21ac55 593 *iomodep = committed;
0a7de745 594 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
6d2010ae 595 microuptime(&np->n_lastio);
0a7de745
A
596 }
597 return error;
2d21ac55
A
598}
599
600int
601nfs4_remove_rpc(
602 nfsnode_t dnp,
603 char *name,
604 int namelen,
605 thread_t thd,
606 kauth_cred_t cred)
607{
b0d623f7 608 int error = 0, lockerror = ENOENT, remove_error = 0, status;
2d21ac55
A
609 struct nfsmount *nmp;
610 int nfsvers, numops;
611 u_int64_t xid;
612 struct nfsm_chain nmreq, nmrep;
6d2010ae 613 struct nfsreq_secinfo_args si;
2d21ac55
A
614
615 nmp = NFSTONMP(dnp);
0a7de745
A
616 if (nfs_mount_gone(nmp)) {
617 return ENXIO;
618 }
2d21ac55 619 nfsvers = nmp->nm_vers;
0a7de745
A
620 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
621 return EINVAL;
622 }
6d2010ae 623 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
b0d623f7 624restart:
2d21ac55
A
625 nfsm_chain_null(&nmreq);
626 nfsm_chain_null(&nmrep);
627
628 // PUTFH, REMOVE, GETATTR
629 numops = 3;
630 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED + namelen);
3e170ce0 631 nfsm_chain_add_compound_header(error, &nmreq, "remove", nmp->nm_minor_vers, numops);
2d21ac55
A
632 numops--;
633 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
634 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
635 numops--;
636 nfsm_chain_add_32(error, &nmreq, NFS_OP_REMOVE);
6d2010ae 637 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
2d21ac55
A
638 numops--;
639 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 640 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
2d21ac55
A
641 nfsm_chain_build_done(error, &nmreq);
642 nfsm_assert(error, (numops == 0), EPROTO);
643 nfsmout_if(error);
644
6d2010ae 645 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, &nmrep, &xid, &status);
2d21ac55 646
0a7de745 647 if ((lockerror = nfs_node_lock(dnp))) {
b0d623f7 648 error = lockerror;
0a7de745 649 }
2d21ac55
A
650 nfsm_chain_skip_tag(error, &nmrep);
651 nfsm_chain_get_32(error, &nmrep, numops);
652 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
653 nfsm_chain_op_check(error, &nmrep, NFS_OP_REMOVE);
654 remove_error = error;
655 nfsm_chain_check_change_info(error, &nmrep, dnp);
656 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 657 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
0a7de745 658 if (error && !lockerror) {
2d21ac55 659 NATTRINVALIDATE(dnp);
0a7de745 660 }
2d21ac55
A
661nfsmout:
662 nfsm_chain_cleanup(&nmreq);
663 nfsm_chain_cleanup(&nmrep);
664
b0d623f7
A
665 if (!lockerror) {
666 dnp->n_flag |= NMODIFIED;
667 nfs_node_unlock(dnp);
668 }
669 if (error == NFSERR_GRACE) {
0a7de745 670 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
b0d623f7
A
671 goto restart;
672 }
2d21ac55 673
0a7de745 674 return remove_error;
2d21ac55
A
675}
676
677int
678nfs4_rename_rpc(
679 nfsnode_t fdnp,
680 char *fnameptr,
681 int fnamelen,
682 nfsnode_t tdnp,
683 char *tnameptr,
684 int tnamelen,
685 vfs_context_t ctx)
686{
b0d623f7 687 int error = 0, lockerror = ENOENT, status, nfsvers, numops;
2d21ac55
A
688 struct nfsmount *nmp;
689 u_int64_t xid, savedxid;
690 struct nfsm_chain nmreq, nmrep;
6d2010ae 691 struct nfsreq_secinfo_args si;
2d21ac55
A
692
693 nmp = NFSTONMP(fdnp);
0a7de745
A
694 if (nfs_mount_gone(nmp)) {
695 return ENXIO;
696 }
2d21ac55 697 nfsvers = nmp->nm_vers;
0a7de745
A
698 if (fdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
699 return EINVAL;
700 }
701 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
702 return EINVAL;
703 }
2d21ac55 704
6d2010ae 705 NFSREQ_SECINFO_SET(&si, fdnp, NULL, 0, NULL, 0);
2d21ac55
A
706 nfsm_chain_null(&nmreq);
707 nfsm_chain_null(&nmrep);
708
709 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
710 numops = 7;
711 nfsm_chain_build_alloc_init(error, &nmreq, 30 * NFSX_UNSIGNED + fnamelen + tnamelen);
3e170ce0 712 nfsm_chain_add_compound_header(error, &nmreq, "rename", nmp->nm_minor_vers, numops);
2d21ac55
A
713 numops--;
714 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
715 nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize);
716 numops--;
717 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
718 numops--;
719 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
720 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
721 numops--;
722 nfsm_chain_add_32(error, &nmreq, NFS_OP_RENAME);
6d2010ae
A
723 nfsm_chain_add_name(error, &nmreq, fnameptr, fnamelen, nmp);
724 nfsm_chain_add_name(error, &nmreq, tnameptr, tnamelen, nmp);
2d21ac55
A
725 numops--;
726 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 727 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
2d21ac55
A
728 numops--;
729 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
730 numops--;
731 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 732 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, fdnp);
2d21ac55
A
733 nfsm_chain_build_done(error, &nmreq);
734 nfsm_assert(error, (numops == 0), EPROTO);
735 nfsmout_if(error);
736
6d2010ae 737 error = nfs_request(fdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 738
0a7de745 739 if ((lockerror = nfs_node_lock2(fdnp, tdnp))) {
b0d623f7 740 error = lockerror;
0a7de745 741 }
2d21ac55
A
742 nfsm_chain_skip_tag(error, &nmrep);
743 nfsm_chain_get_32(error, &nmrep, numops);
744 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
745 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
746 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
747 nfsm_chain_op_check(error, &nmrep, NFS_OP_RENAME);
748 nfsm_chain_check_change_info(error, &nmrep, fdnp);
749 nfsm_chain_check_change_info(error, &nmrep, tdnp);
750 /* directory attributes: if we don't get them, make sure to invalidate */
751 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
752 savedxid = xid;
6d2010ae 753 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
0a7de745 754 if (error && !lockerror) {
2d21ac55 755 NATTRINVALIDATE(tdnp);
0a7de745 756 }
2d21ac55
A
757 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
758 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
759 xid = savedxid;
6d2010ae 760 nfsm_chain_loadattr(error, &nmrep, fdnp, nfsvers, &xid);
0a7de745 761 if (error && !lockerror) {
2d21ac55 762 NATTRINVALIDATE(fdnp);
0a7de745 763 }
2d21ac55
A
764nfsmout:
765 nfsm_chain_cleanup(&nmreq);
766 nfsm_chain_cleanup(&nmrep);
b0d623f7
A
767 if (!lockerror) {
768 fdnp->n_flag |= NMODIFIED;
769 tdnp->n_flag |= NMODIFIED;
770 nfs_node_unlock2(fdnp, tdnp);
771 }
0a7de745 772 return error;
2d21ac55
A
773}
774
775/*
776 * NFS V4 readdir RPC.
777 */
2d21ac55 778int
b0d623f7
A
779nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx)
780{
2d21ac55 781 struct nfsmount *nmp;
6d2010ae 782 int error = 0, lockerror, nfsvers, namedattr, rdirplus, bigcookies, numops;
b0d623f7
A
783 int i, status, more_entries = 1, eof, bp_dropped = 0;
784 uint32_t nmreaddirsize, nmrsize;
785 uint32_t namlen, skiplen, fhlen, xlen, attrlen, reclen, space_free, space_needed;
786 uint64_t cookie, lastcookie, xid, savedxid;
787 struct nfsm_chain nmreq, nmrep, nmrepsave;
788 fhandle_t fh;
789 struct nfs_vattr nvattr, *nvattrp;
790 struct nfs_dir_buf_header *ndbhp;
791 struct direntry *dp;
792 char *padstart, padlen;
2d21ac55
A
793 const char *tag;
794 uint32_t entry_attrs[NFS_ATTR_BITMAP_LEN];
b0d623f7 795 struct timeval now;
6d2010ae 796 struct nfsreq_secinfo_args si;
2d21ac55 797
2d21ac55 798 nmp = NFSTONMP(dnp);
0a7de745
A
799 if (nfs_mount_gone(nmp)) {
800 return ENXIO;
801 }
2d21ac55
A
802 nfsvers = nmp->nm_vers;
803 nmreaddirsize = nmp->nm_readdirsize;
804 nmrsize = nmp->nm_rsize;
b0d623f7 805 bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES;
6d2010ae
A
806 namedattr = (dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) ? 1 : 0;
807 rdirplus = (NMFLAG(nmp, RDIRPLUS) || namedattr) ? 1 : 0;
0a7de745
A
808 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
809 return EINVAL;
810 }
6d2010ae 811 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
2d21ac55
A
812
813 /*
814 * Set up attribute request for entries.
815 * For READDIRPLUS functionality, get everything.
b0d623f7 816 * Otherwise, just get what we need for struct direntry.
2d21ac55
A
817 */
818 if (rdirplus) {
b0d623f7 819 tag = "readdirplus";
6d2010ae 820 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, entry_attrs);
2d21ac55
A
821 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEHANDLE);
822 } else {
b0d623f7 823 tag = "readdir";
2d21ac55
A
824 NFS_CLEAR_ATTRIBUTES(entry_attrs);
825 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_TYPE);
826 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEID);
6d2010ae 827 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_MOUNTED_ON_FILEID);
2d21ac55 828 }
2d21ac55
A
829 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_RDATTR_ERROR);
830
b0d623f7 831 /* lock to protect access to cookie verifier */
0a7de745
A
832 if ((lockerror = nfs_node_lock(dnp))) {
833 return lockerror;
834 }
2d21ac55 835
b0d623f7
A
836 /* determine cookie to use, and move dp to the right offset */
837 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
838 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
839 if (ndbhp->ndbh_count) {
0a7de745 840 for (i = 0; i < ndbhp->ndbh_count - 1; i++) {
b0d623f7 841 dp = NFS_DIRENTRY_NEXT(dp);
0a7de745 842 }
b0d623f7
A
843 cookie = dp->d_seekoff;
844 dp = NFS_DIRENTRY_NEXT(dp);
845 } else {
846 cookie = bp->nb_lblkno;
847 /* increment with every buffer read */
316670eb 848 OSAddAtomic64(1, &nfsstats.readdir_bios);
2d21ac55 849 }
b0d623f7 850 lastcookie = cookie;
2d21ac55
A
851
852 /*
b0d623f7
A
853 * The NFS client is responsible for the "." and ".." entries in the
854 * directory. So, we put them at the start of the first buffer.
6d2010ae 855 * Don't bother for attribute directories.
2d21ac55 856 */
6d2010ae
A
857 if (((bp->nb_lblkno == 0) && (ndbhp->ndbh_count == 0)) &&
858 !(dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
b0d623f7
A
859 fh.fh_len = 0;
860 fhlen = rdirplus ? fh.fh_len + 1 : 0;
861 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
862 /* "." */
863 namlen = 1;
864 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
0a7de745
A
865 if (xlen) {
866 bzero(&dp->d_name[namlen + 1], xlen);
867 }
b0d623f7 868 dp->d_namlen = namlen;
0a7de745 869 strlcpy(dp->d_name, ".", namlen + 1);
2d21ac55 870 dp->d_fileno = dnp->n_vattr.nva_fileid;
2d21ac55 871 dp->d_type = DT_DIR;
b0d623f7
A
872 dp->d_reclen = reclen;
873 dp->d_seekoff = 1;
874 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
875 dp = NFS_DIRENTRY_NEXT(dp);
876 padlen = (char*)dp - padstart;
0a7de745 877 if (padlen > 0) {
b0d623f7 878 bzero(padstart, padlen);
0a7de745
A
879 }
880 if (rdirplus) { /* zero out attributes */
b0d623f7 881 bzero(NFS_DIR_BUF_NVATTR(bp, 0), sizeof(struct nfs_vattr));
0a7de745 882 }
b0d623f7
A
883
884 /* ".." */
885 namlen = 2;
886 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
0a7de745
A
887 if (xlen) {
888 bzero(&dp->d_name[namlen + 1], xlen);
889 }
b0d623f7 890 dp->d_namlen = namlen;
0a7de745
A
891 strlcpy(dp->d_name, "..", namlen + 1);
892 if (dnp->n_parent) {
2d21ac55 893 dp->d_fileno = VTONFS(dnp->n_parent)->n_vattr.nva_fileid;
0a7de745 894 } else {
2d21ac55 895 dp->d_fileno = dnp->n_vattr.nva_fileid;
0a7de745 896 }
2d21ac55 897 dp->d_type = DT_DIR;
b0d623f7
A
898 dp->d_reclen = reclen;
899 dp->d_seekoff = 2;
900 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
901 dp = NFS_DIRENTRY_NEXT(dp);
902 padlen = (char*)dp - padstart;
0a7de745 903 if (padlen > 0) {
b0d623f7 904 bzero(padstart, padlen);
0a7de745
A
905 }
906 if (rdirplus) { /* zero out attributes */
b0d623f7 907 bzero(NFS_DIR_BUF_NVATTR(bp, 1), sizeof(struct nfs_vattr));
0a7de745 908 }
b0d623f7
A
909
910 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
911 ndbhp->ndbh_count = 2;
2d21ac55
A
912 }
913
914 /*
b0d623f7
A
915 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
916 * the buffer is full (or we hit EOF). Then put the remainder of the
917 * results in the next buffer(s).
2d21ac55 918 */
b0d623f7
A
919 nfsm_chain_null(&nmreq);
920 nfsm_chain_null(&nmrep);
921 while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) {
b0d623f7
A
922 // PUTFH, GETATTR, READDIR
923 numops = 3;
2d21ac55 924 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3e170ce0 925 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
2d21ac55
A
926 numops--;
927 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
928 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
929 numops--;
930 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 931 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
2d21ac55
A
932 numops--;
933 nfsm_chain_add_32(error, &nmreq, NFS_OP_READDIR);
b0d623f7
A
934 nfsm_chain_add_64(error, &nmreq, (cookie <= 2) ? 0 : cookie);
935 nfsm_chain_add_64(error, &nmreq, dnp->n_cookieverf);
2d21ac55
A
936 nfsm_chain_add_32(error, &nmreq, nmreaddirsize);
937 nfsm_chain_add_32(error, &nmreq, nmrsize);
6d2010ae 938 nfsm_chain_add_bitmap_supported(error, &nmreq, entry_attrs, nmp, dnp);
2d21ac55
A
939 nfsm_chain_build_done(error, &nmreq);
940 nfsm_assert(error, (numops == 0), EPROTO);
b0d623f7 941 nfs_node_unlock(dnp);
2d21ac55 942 nfsmout_if(error);
6d2010ae 943 error = nfs_request(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 944
0a7de745 945 if ((lockerror = nfs_node_lock(dnp))) {
2d21ac55 946 error = lockerror;
0a7de745 947 }
b0d623f7
A
948
949 savedxid = xid;
2d21ac55
A
950 nfsm_chain_skip_tag(error, &nmrep);
951 nfsm_chain_get_32(error, &nmrep, numops);
952 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
953 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 954 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
2d21ac55 955 nfsm_chain_op_check(error, &nmrep, NFS_OP_READDIR);
b0d623f7 956 nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf);
2d21ac55 957 nfsm_chain_get_32(error, &nmrep, more_entries);
b0d623f7
A
958
959 if (!lockerror) {
960 nfs_node_unlock(dnp);
961 lockerror = ENOENT;
962 }
2d21ac55
A
963 nfsmout_if(error);
964
0a7de745 965 if (rdirplus) {
b0d623f7 966 microuptime(&now);
ea3f0419
A
967 if (lastcookie == 0) {
968 dnp->n_rdirplusstamp_sof = now.tv_sec;
969 dnp->n_rdirplusstamp_eof = 0;
970 }
0a7de745 971 }
b0d623f7
A
972
973 /* loop through the entries packing them into the buffer */
974 while (more_entries) {
2d21ac55 975 /* Entry: COOKIE, NAME, FATTR */
b0d623f7
A
976 nfsm_chain_get_64(error, &nmrep, cookie);
977 nfsm_chain_get_32(error, &nmrep, namlen);
2d21ac55 978 nfsmout_if(error);
b0d623f7
A
979 if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) {
980 /* we've got a big cookie, make sure flag is set */
981 lck_mtx_lock(&nmp->nm_lock);
982 nmp->nm_state |= NFSSTA_BIGCOOKIES;
983 lck_mtx_unlock(&nmp->nm_lock);
984 bigcookies = 1;
985 }
986 /* just truncate names that don't fit in direntry.d_name */
987 if (namlen <= 0) {
2d21ac55
A
988 error = EBADRPC;
989 goto nfsmout;
990 }
0a7de745 991 if (namlen > (sizeof(dp->d_name) - 1)) {
b0d623f7
A
992 skiplen = namlen - sizeof(dp->d_name) + 1;
993 namlen = sizeof(dp->d_name) - 1;
2d21ac55
A
994 } else {
995 skiplen = 0;
996 }
b0d623f7
A
997 /* guess that fh size will be same as parent */
998 fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0;
999 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
1000 attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0;
1001 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1002 space_needed = reclen + attrlen;
1003 space_free = nfs_dir_buf_freespace(bp, rdirplus);
1004 if (space_needed > space_free) {
1005 /*
1006 * We still have entries to pack, but we've
1007 * run out of room in the current buffer.
1008 * So we need to move to the next buffer.
1009 * The block# for the next buffer is the
1010 * last cookie in the current buffer.
1011 */
1012nextbuffer:
1013 ndbhp->ndbh_flags |= NDB_FULL;
1014 nfs_buf_release(bp, 0);
1015 bp_dropped = 1;
1016 bp = NULL;
1017 error = nfs_buf_get(dnp, lastcookie, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
1018 nfsmout_if(error);
1019 /* initialize buffer */
1020 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
1021 ndbhp->ndbh_flags = 0;
1022 ndbhp->ndbh_count = 0;
1023 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
1024 ndbhp->ndbh_ncgen = dnp->n_ncgen;
1025 space_free = nfs_dir_buf_freespace(bp, rdirplus);
1026 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
1027 /* increment with every buffer read */
316670eb 1028 OSAddAtomic64(1, &nfsstats.readdir_bios);
2d21ac55 1029 }
b0d623f7
A
1030 nmrepsave = nmrep;
1031 dp->d_fileno = cookie; /* placeholder */
1032 dp->d_seekoff = cookie;
1033 dp->d_namlen = namlen;
1034 dp->d_reclen = reclen;
2d21ac55 1035 dp->d_type = DT_UNKNOWN;
b0d623f7
A
1036 nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name);
1037 nfsmout_if(error);
1038 dp->d_name[namlen] = '\0';
0a7de745 1039 if (skiplen) {
2d21ac55 1040 nfsm_chain_adv(error, &nmrep,
0a7de745
A
1041 nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen));
1042 }
2d21ac55 1043 nfsmout_if(error);
b0d623f7 1044 nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : &nvattr;
6d2010ae
A
1045 error = nfs4_parsefattr(&nmrep, NULL, nvattrp, &fh, NULL, NULL);
1046 if (!error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_ACL)) {
1047 /* we do NOT want ACLs returned to us here */
1048 NFS_BITMAP_CLR(nvattrp->nva_bitmap, NFS_FATTR_ACL);
1049 if (nvattrp->nva_acl) {
1050 kauth_acl_free(nvattrp->nva_acl);
1051 nvattrp->nva_acl = NULL;
1052 }
1053 }
b0d623f7 1054 if (error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_RDATTR_ERROR)) {
6d2010ae
A
1055 /* OK, we may not have gotten all of the attributes but we will use what we can. */
1056 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1057 /* set this up to look like a referral trigger */
1058 nfs4_default_attrs_for_referral_trigger(dnp, dp->d_name, namlen, nvattrp, &fh);
1059 }
2d21ac55
A
1060 error = 0;
1061 }
b0d623f7 1062 /* check for more entries after this one */
2d21ac55
A
1063 nfsm_chain_get_32(error, &nmrep, more_entries);
1064 nfsmout_if(error);
1065
b0d623f7 1066 /* Skip any "." and ".." entries returned from server. */
6d2010ae
A
1067 /* Also skip any bothersome named attribute entries. */
1068 if (((dp->d_name[0] == '.') && ((namlen == 1) || ((namlen == 2) && (dp->d_name[1] == '.')))) ||
1069 (namedattr && (namlen == 11) && (!strcmp(dp->d_name, "SUNWattr_ro") || !strcmp(dp->d_name, "SUNWattr_rw")))) {
b0d623f7 1070 lastcookie = cookie;
2d21ac55
A
1071 continue;
1072 }
1073
0a7de745 1074 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_TYPE)) {
b0d623f7 1075 dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type));
0a7de745
A
1076 }
1077 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEID)) {
b0d623f7 1078 dp->d_fileno = nvattrp->nva_fileid;
0a7de745 1079 }
b0d623f7
A
1080 if (rdirplus) {
1081 /* fileid is already in d_fileno, so stash xid in attrs */
1082 nvattrp->nva_fileid = savedxid;
1083 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
1084 fhlen = fh.fh_len + 1;
1085 xlen = fhlen + sizeof(time_t);
1086 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1087 space_needed = reclen + attrlen;
1088 if (space_needed > space_free) {
1089 /* didn't actually have the room... move on to next buffer */
1090 nmrep = nmrepsave;
1091 goto nextbuffer;
1092 }
1093 /* pack the file handle into the record */
0a7de745
A
1094 dp->d_name[dp->d_namlen + 1] = fh.fh_len;
1095 bcopy(fh.fh_data, &dp->d_name[dp->d_namlen + 2], fh.fh_len);
b0d623f7
A
1096 } else {
1097 /* mark the file handle invalid */
1098 fh.fh_len = 0;
1099 fhlen = fh.fh_len + 1;
1100 xlen = fhlen + sizeof(time_t);
1101 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
0a7de745 1102 bzero(&dp->d_name[dp->d_namlen + 1], fhlen);
2d21ac55 1103 }
0a7de745 1104 *(time_t*)(&dp->d_name[dp->d_namlen + 1 + fhlen]) = now.tv_sec;
b0d623f7 1105 dp->d_reclen = reclen;
ea3f0419 1106 nfs_rdirplus_update_node_attrs(dnp, dp, &fh, nvattrp, &savedxid);
2d21ac55 1107 }
b0d623f7
A
1108 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
1109 ndbhp->ndbh_count++;
1110 lastcookie = cookie;
1111
1112 /* advance to next direntry in buffer */
1113 dp = NFS_DIRENTRY_NEXT(dp);
1114 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
1115 /* zero out the pad bytes */
1116 padlen = (char*)dp - padstart;
0a7de745 1117 if (padlen > 0) {
b0d623f7 1118 bzero(padstart, padlen);
0a7de745 1119 }
b0d623f7
A
1120 }
1121 /* Finally, get the eof boolean */
1122 nfsm_chain_get_32(error, &nmrep, eof);
1123 nfsmout_if(error);
1124 if (eof) {
0a7de745 1125 ndbhp->ndbh_flags |= (NDB_FULL | NDB_EOF);
b0d623f7
A
1126 nfs_node_lock_force(dnp);
1127 dnp->n_eofcookie = lastcookie;
ea3f0419
A
1128 if (rdirplus) {
1129 dnp->n_rdirplusstamp_eof = now.tv_sec;
1130 }
b0d623f7
A
1131 nfs_node_unlock(dnp);
1132 } else {
1133 more_entries = 1;
2d21ac55 1134 }
b0d623f7
A
1135 if (bp_dropped) {
1136 nfs_buf_release(bp, 0);
1137 bp = NULL;
1138 break;
2d21ac55 1139 }
0a7de745 1140 if ((lockerror = nfs_node_lock(dnp))) {
2d21ac55 1141 error = lockerror;
0a7de745 1142 }
2d21ac55
A
1143 nfsmout_if(error);
1144 nfsm_chain_cleanup(&nmrep);
b0d623f7 1145 nfsm_chain_null(&nmreq);
2d21ac55 1146 }
2d21ac55 1147nfsmout:
0a7de745 1148 if (bp_dropped && bp) {
b0d623f7 1149 nfs_buf_release(bp, 0);
0a7de745
A
1150 }
1151 if (!lockerror) {
b0d623f7 1152 nfs_node_unlock(dnp);
0a7de745 1153 }
2d21ac55
A
1154 nfsm_chain_cleanup(&nmreq);
1155 nfsm_chain_cleanup(&nmrep);
0a7de745 1156 return bp_dropped ? NFSERR_DIRBUFDROPPED : error;
2d21ac55
A
1157}
1158
1159int
1160nfs4_lookup_rpc_async(
1161 nfsnode_t dnp,
1162 char *name,
1163 int namelen,
1164 vfs_context_t ctx,
1165 struct nfsreq **reqp)
1166{
6d2010ae 1167 int error = 0, isdotdot = 0, nfsvers, numops;
2d21ac55
A
1168 struct nfsm_chain nmreq;
1169 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1170 struct nfsmount *nmp;
6d2010ae 1171 struct nfsreq_secinfo_args si;
2d21ac55
A
1172
1173 nmp = NFSTONMP(dnp);
0a7de745
A
1174 if (nfs_mount_gone(nmp)) {
1175 return ENXIO;
1176 }
2d21ac55 1177 nfsvers = nmp->nm_vers;
0a7de745
A
1178 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1179 return EINVAL;
1180 }
2d21ac55 1181
6d2010ae 1182 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
2d21ac55 1183 isdotdot = 1;
6d2010ae
A
1184 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
1185 } else {
1186 NFSREQ_SECINFO_SET(&si, dnp, dnp->n_fhp, dnp->n_fhsize, name, namelen);
1187 }
2d21ac55
A
1188
1189 nfsm_chain_null(&nmreq);
1190
6d2010ae
A
1191 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1192 numops = 5;
2d21ac55 1193 nfsm_chain_build_alloc_init(error, &nmreq, 20 * NFSX_UNSIGNED + namelen);
3e170ce0 1194 nfsm_chain_add_compound_header(error, &nmreq, "lookup", nmp->nm_minor_vers, numops);
2d21ac55
A
1195 numops--;
1196 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1197 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
1198 numops--;
1199 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 1200 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
2d21ac55
A
1201 numops--;
1202 if (isdotdot) {
1203 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUPP);
1204 } else {
1205 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
6d2010ae 1206 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
2d21ac55 1207 }
6d2010ae
A
1208 numops--;
1209 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETFH);
1210 numops--;
1211 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1212 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1213 /* some ".zfs" directories can't handle being asked for some attributes */
0a7de745 1214 if ((dnp->n_flag & NISDOTZFS) && !isdotdot) {
6d2010ae 1215 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
0a7de745
A
1216 }
1217 if ((dnp->n_flag & NISDOTZFSCHILD) && isdotdot) {
6d2010ae 1218 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
0a7de745
A
1219 }
1220 if (((namelen == 4) && (name[0] == '.') && (name[1] == 'z') && (name[2] == 'f') && (name[3] == 's'))) {
6d2010ae 1221 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
0a7de745 1222 }
6d2010ae 1223 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
2d21ac55
A
1224 nfsm_chain_build_done(error, &nmreq);
1225 nfsm_assert(error, (numops == 0), EPROTO);
1226 nfsmout_if(error);
1227 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745 1228 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, reqp);
2d21ac55
A
1229nfsmout:
1230 nfsm_chain_cleanup(&nmreq);
0a7de745 1231 return error;
2d21ac55
A
1232}
1233
6d2010ae 1234
2d21ac55
A
1235int
1236nfs4_lookup_rpc_async_finish(
1237 nfsnode_t dnp,
6d2010ae
A
1238 char *name,
1239 int namelen,
1240 vfs_context_t ctx,
2d21ac55
A
1241 struct nfsreq *req,
1242 u_int64_t *xidp,
1243 fhandle_t *fhp,
1244 struct nfs_vattr *nvap)
1245{
6d2010ae
A
1246 int error = 0, lockerror = ENOENT, status, nfsvers, numops, isdotdot = 0;
1247 uint32_t op = NFS_OP_LOOKUP;
2d21ac55
A
1248 u_int64_t xid;
1249 struct nfsmount *nmp;
1250 struct nfsm_chain nmrep;
1251
1252 nmp = NFSTONMP(dnp);
0a7de745
A
1253 if (nmp == NULL) {
1254 return ENXIO;
1255 }
2d21ac55 1256 nfsvers = nmp->nm_vers;
0a7de745 1257 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
6d2010ae 1258 isdotdot = 1;
0a7de745 1259 }
2d21ac55
A
1260
1261 nfsm_chain_null(&nmrep);
1262
1263 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1264
0a7de745 1265 if ((lockerror = nfs_node_lock(dnp))) {
b0d623f7 1266 error = lockerror;
0a7de745 1267 }
2d21ac55
A
1268 nfsm_chain_skip_tag(error, &nmrep);
1269 nfsm_chain_get_32(error, &nmrep, numops);
1270 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1271 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
0a7de745 1272 if (xidp) {
2d21ac55 1273 *xidp = xid;
0a7de745 1274 }
6d2010ae 1275 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
2d21ac55 1276
6d2010ae 1277 nfsm_chain_op_check(error, &nmrep, (isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP));
2d21ac55 1278 nfsmout_if(error || !fhp || !nvap);
6d2010ae
A
1279 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
1280 nfsm_chain_get_32(error, &nmrep, fhp->fh_len);
0a7de745 1281 if (error == 0 && fhp->fh_len > sizeof(fhp->fh_data)) {
d26ffc64 1282 error = EBADRPC;
0a7de745 1283 }
d26ffc64 1284 nfsmout_if(error);
6d2010ae 1285 nfsm_chain_get_opaque(error, &nmrep, fhp->fh_len, fhp->fh_data);
2d21ac55 1286 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae
A
1287 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1288 /* set this up to look like a referral trigger */
1289 nfs4_default_attrs_for_referral_trigger(dnp, name, namelen, nvap, fhp);
1290 error = 0;
1291 } else {
1292 nfsmout_if(error);
1293 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
2d21ac55
A
1294 }
1295nfsmout:
0a7de745 1296 if (!lockerror) {
b0d623f7 1297 nfs_node_unlock(dnp);
0a7de745 1298 }
2d21ac55 1299 nfsm_chain_cleanup(&nmrep);
6d2010ae
A
1300 if (!error && (op == NFS_OP_LOOKUP) && (nmp->nm_state & NFSSTA_NEEDSECINFO)) {
1301 /* We still need to get SECINFO to set default for mount. */
1302 /* Do so for the first LOOKUP that returns successfully. */
1303 struct nfs_sec sec;
1304
1305 sec.count = NX_MAX_SEC_FLAVORS;
1306 error = nfs4_secinfo_rpc(nmp, &req->r_secinfo, vfs_context_ucred(ctx), sec.flavors, &sec.count);
1307 /* [sigh] some implementations return "illegal" error for unsupported ops */
0a7de745 1308 if (error == NFSERR_OP_ILLEGAL) {
6d2010ae 1309 error = 0;
0a7de745 1310 }
6d2010ae
A
1311 if (!error) {
1312 /* set our default security flavor to the first in the list */
1313 lck_mtx_lock(&nmp->nm_lock);
0a7de745 1314 if (sec.count) {
6d2010ae 1315 nmp->nm_auth = sec.flavors[0];
0a7de745 1316 }
6d2010ae
A
1317 nmp->nm_state &= ~NFSSTA_NEEDSECINFO;
1318 lck_mtx_unlock(&nmp->nm_lock);
1319 }
1320 }
0a7de745 1321 return error;
2d21ac55
A
1322}
1323
1324int
1325nfs4_commit_rpc(
1326 nfsnode_t np,
6d2010ae
A
1327 uint64_t offset,
1328 uint64_t count,
1329 kauth_cred_t cred,
1330 uint64_t wverf)
2d21ac55
A
1331{
1332 struct nfsmount *nmp;
1333 int error = 0, lockerror, status, nfsvers, numops;
6d2010ae 1334 u_int64_t xid, newwverf;
2d21ac55
A
1335 uint32_t count32;
1336 struct nfsm_chain nmreq, nmrep;
6d2010ae 1337 struct nfsreq_secinfo_args si;
2d21ac55
A
1338
1339 nmp = NFSTONMP(np);
1340 FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
0a7de745
A
1341 if (nfs_mount_gone(nmp)) {
1342 return ENXIO;
1343 }
1344 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1345 return EINVAL;
1346 }
1347 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
1348 return 0;
1349 }
2d21ac55
A
1350 nfsvers = nmp->nm_vers;
1351
0a7de745 1352 if (count > UINT32_MAX) {
2d21ac55 1353 count32 = 0;
0a7de745 1354 } else {
2d21ac55 1355 count32 = count;
0a7de745 1356 }
2d21ac55 1357
6d2010ae 1358 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
1359 nfsm_chain_null(&nmreq);
1360 nfsm_chain_null(&nmrep);
1361
1362 // PUTFH, COMMIT, GETATTR
1363 numops = 3;
1364 nfsm_chain_build_alloc_init(error, &nmreq, 19 * NFSX_UNSIGNED);
3e170ce0 1365 nfsm_chain_add_compound_header(error, &nmreq, "commit", nmp->nm_minor_vers, numops);
2d21ac55
A
1366 numops--;
1367 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1368 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1369 numops--;
1370 nfsm_chain_add_32(error, &nmreq, NFS_OP_COMMIT);
1371 nfsm_chain_add_64(error, &nmreq, offset);
1372 nfsm_chain_add_32(error, &nmreq, count32);
1373 numops--;
1374 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 1375 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
1376 nfsm_chain_build_done(error, &nmreq);
1377 nfsm_assert(error, (numops == 0), EPROTO);
1378 nfsmout_if(error);
1379 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745 1380 current_thread(), cred, &si, 0, &nmrep, &xid, &status);
2d21ac55 1381
0a7de745 1382 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 1383 error = lockerror;
0a7de745 1384 }
2d21ac55
A
1385 nfsm_chain_skip_tag(error, &nmrep);
1386 nfsm_chain_get_32(error, &nmrep, numops);
1387 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1388 nfsm_chain_op_check(error, &nmrep, NFS_OP_COMMIT);
6d2010ae 1389 nfsm_chain_get_64(error, &nmrep, newwverf);
2d21ac55 1390 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 1391 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
0a7de745 1392 if (!lockerror) {
b0d623f7 1393 nfs_node_unlock(np);
0a7de745 1394 }
2d21ac55
A
1395 nfsmout_if(error);
1396 lck_mtx_lock(&nmp->nm_lock);
0a7de745 1397 if (nmp->nm_verf != newwverf) {
6d2010ae 1398 nmp->nm_verf = newwverf;
0a7de745
A
1399 }
1400 if (wverf != newwverf) {
2d21ac55 1401 error = NFSERR_STALEWRITEVERF;
0a7de745 1402 }
2d21ac55
A
1403 lck_mtx_unlock(&nmp->nm_lock);
1404nfsmout:
1405 nfsm_chain_cleanup(&nmreq);
1406 nfsm_chain_cleanup(&nmrep);
0a7de745 1407 return error;
2d21ac55
A
1408}
1409
1410int
1411nfs4_pathconf_rpc(
1412 nfsnode_t np,
1413 struct nfs_fsattr *nfsap,
1414 vfs_context_t ctx)
1415{
1416 u_int64_t xid;
1417 int error = 0, lockerror, status, nfsvers, numops;
1418 struct nfsm_chain nmreq, nmrep;
1419 struct nfsmount *nmp = NFSTONMP(np);
1420 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1421 struct nfs_vattr nvattr;
6d2010ae 1422 struct nfsreq_secinfo_args si;
2d21ac55 1423
0a7de745
A
1424 if (nfs_mount_gone(nmp)) {
1425 return ENXIO;
1426 }
2d21ac55 1427 nfsvers = nmp->nm_vers;
0a7de745
A
1428 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1429 return EINVAL;
1430 }
2d21ac55 1431
6d2010ae
A
1432 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1433 NVATTR_INIT(&nvattr);
2d21ac55
A
1434 nfsm_chain_null(&nmreq);
1435 nfsm_chain_null(&nmrep);
1436
1437 /* NFSv4: fetch "pathconf" info for this node */
b0d623f7
A
1438 // PUTFH, GETATTR
1439 numops = 2;
2d21ac55 1440 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
3e170ce0 1441 nfsm_chain_add_compound_header(error, &nmreq, "pathconf", nmp->nm_minor_vers, numops);
2d21ac55
A
1442 numops--;
1443 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1444 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1445 numops--;
1446 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1447 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1448 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXLINK);
1449 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXNAME);
1450 NFS_BITMAP_SET(bitmap, NFS_FATTR_NO_TRUNC);
1451 NFS_BITMAP_SET(bitmap, NFS_FATTR_CHOWN_RESTRICTED);
1452 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_INSENSITIVE);
1453 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_PRESERVING);
6d2010ae 1454 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
2d21ac55
A
1455 nfsm_chain_build_done(error, &nmreq);
1456 nfsm_assert(error, (numops == 0), EPROTO);
1457 nfsmout_if(error);
6d2010ae 1458 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55
A
1459
1460 nfsm_chain_skip_tag(error, &nmrep);
1461 nfsm_chain_get_32(error, &nmrep, numops);
1462 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1463 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1464 nfsmout_if(error);
6d2010ae 1465 error = nfs4_parsefattr(&nmrep, nfsap, &nvattr, NULL, NULL, NULL);
2d21ac55 1466 nfsmout_if(error);
0a7de745 1467 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 1468 error = lockerror;
0a7de745
A
1469 }
1470 if (!error) {
b0d623f7 1471 nfs_loadattrcache(np, &nvattr, &xid, 0);
0a7de745
A
1472 }
1473 if (!lockerror) {
b0d623f7 1474 nfs_node_unlock(np);
0a7de745 1475 }
2d21ac55 1476nfsmout:
6d2010ae 1477 NVATTR_CLEANUP(&nvattr);
2d21ac55
A
1478 nfsm_chain_cleanup(&nmreq);
1479 nfsm_chain_cleanup(&nmrep);
0a7de745 1480 return error;
2d21ac55
A
1481}
1482
1483int
1484nfs4_vnop_getattr(
1485 struct vnop_getattr_args /* {
0a7de745
A
1486 * struct vnodeop_desc *a_desc;
1487 * vnode_t a_vp;
1488 * struct vnode_attr *a_vap;
1489 * vfs_context_t a_context;
1490 * } */*ap)
2d21ac55
A
1491{
1492 struct vnode_attr *vap = ap->a_vap;
6d2010ae 1493 struct nfsmount *nmp;
2d21ac55 1494 struct nfs_vattr nva;
6d2010ae
A
1495 int error, acls, ngaflags;
1496
fe8ab488 1497 nmp = VTONMP(ap->a_vp);
0a7de745
A
1498 if (nfs_mount_gone(nmp)) {
1499 return ENXIO;
1500 }
6d2010ae 1501 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
2d21ac55 1502
6d2010ae 1503 ngaflags = NGA_CACHED;
0a7de745 1504 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
6d2010ae 1505 ngaflags |= NGA_ACL;
0a7de745 1506 }
6d2010ae 1507 error = nfs_getattr(VTONFS(ap->a_vp), &nva, ap->a_context, ngaflags);
0a7de745
A
1508 if (error) {
1509 return error;
1510 }
5ba3f43e 1511
2d21ac55 1512 /* copy what we have in nva to *a_vap */
6d2010ae 1513 if (VATTR_IS_ACTIVE(vap, va_rdev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_RAWDEV)) {
2d21ac55
A
1514 dev_t rdev = makedev(nva.nva_rawdev.specdata1, nva.nva_rawdev.specdata2);
1515 VATTR_RETURN(vap, va_rdev, rdev);
1516 }
0a7de745 1517 if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_NUMLINKS)) {
2d21ac55 1518 VATTR_RETURN(vap, va_nlink, nva.nva_nlink);
0a7de745
A
1519 }
1520 if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SIZE)) {
2d21ac55 1521 VATTR_RETURN(vap, va_data_size, nva.nva_size);
0a7de745 1522 }
2d21ac55
A
1523 // VATTR_RETURN(vap, va_data_alloc, ???);
1524 // VATTR_RETURN(vap, va_total_size, ???);
0a7de745 1525 if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SPACE_USED)) {
2d21ac55 1526 VATTR_RETURN(vap, va_total_alloc, nva.nva_bytes);
0a7de745
A
1527 }
1528 if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER)) {
2d21ac55 1529 VATTR_RETURN(vap, va_uid, nva.nva_uid);
0a7de745
A
1530 }
1531 if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER)) {
6d2010ae 1532 VATTR_RETURN(vap, va_uuuid, nva.nva_uuuid);
0a7de745
A
1533 }
1534 if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
2d21ac55 1535 VATTR_RETURN(vap, va_gid, nva.nva_gid);
0a7de745
A
1536 }
1537 if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
6d2010ae 1538 VATTR_RETURN(vap, va_guuid, nva.nva_guuid);
0a7de745 1539 }
6d2010ae 1540 if (VATTR_IS_ACTIVE(vap, va_mode)) {
0a7de745 1541 if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_MODE)) {
6d2010ae 1542 VATTR_RETURN(vap, va_mode, 0777);
0a7de745 1543 } else {
6d2010ae 1544 VATTR_RETURN(vap, va_mode, nva.nva_mode);
0a7de745 1545 }
6d2010ae
A
1546 }
1547 if (VATTR_IS_ACTIVE(vap, va_flags) &&
1548 (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) ||
0a7de745
A
1549 NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) ||
1550 (nva.nva_flags & NFS_FFLAG_TRIGGER))) {
2d21ac55 1551 uint32_t flags = 0;
6d2010ae 1552 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) &&
0a7de745 1553 (nva.nva_flags & NFS_FFLAG_ARCHIVED)) {
2d21ac55 1554 flags |= SF_ARCHIVED;
0a7de745 1555 }
6d2010ae 1556 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) &&
0a7de745 1557 (nva.nva_flags & NFS_FFLAG_HIDDEN)) {
2d21ac55 1558 flags |= UF_HIDDEN;
0a7de745 1559 }
2d21ac55
A
1560 VATTR_RETURN(vap, va_flags, flags);
1561 }
6d2010ae 1562 if (VATTR_IS_ACTIVE(vap, va_create_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_CREATE)) {
2d21ac55
A
1563 vap->va_create_time.tv_sec = nva.nva_timesec[NFSTIME_CREATE];
1564 vap->va_create_time.tv_nsec = nva.nva_timensec[NFSTIME_CREATE];
1565 VATTR_SET_SUPPORTED(vap, va_create_time);
1566 }
6d2010ae 1567 if (VATTR_IS_ACTIVE(vap, va_access_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_ACCESS)) {
2d21ac55
A
1568 vap->va_access_time.tv_sec = nva.nva_timesec[NFSTIME_ACCESS];
1569 vap->va_access_time.tv_nsec = nva.nva_timensec[NFSTIME_ACCESS];
1570 VATTR_SET_SUPPORTED(vap, va_access_time);
1571 }
6d2010ae 1572 if (VATTR_IS_ACTIVE(vap, va_modify_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_MODIFY)) {
2d21ac55
A
1573 vap->va_modify_time.tv_sec = nva.nva_timesec[NFSTIME_MODIFY];
1574 vap->va_modify_time.tv_nsec = nva.nva_timensec[NFSTIME_MODIFY];
1575 VATTR_SET_SUPPORTED(vap, va_modify_time);
1576 }
6d2010ae 1577 if (VATTR_IS_ACTIVE(vap, va_change_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_METADATA)) {
2d21ac55
A
1578 vap->va_change_time.tv_sec = nva.nva_timesec[NFSTIME_CHANGE];
1579 vap->va_change_time.tv_nsec = nva.nva_timensec[NFSTIME_CHANGE];
1580 VATTR_SET_SUPPORTED(vap, va_change_time);
1581 }
6d2010ae 1582 if (VATTR_IS_ACTIVE(vap, va_backup_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_BACKUP)) {
2d21ac55
A
1583 vap->va_backup_time.tv_sec = nva.nva_timesec[NFSTIME_BACKUP];
1584 vap->va_backup_time.tv_nsec = nva.nva_timensec[NFSTIME_BACKUP];
1585 VATTR_SET_SUPPORTED(vap, va_backup_time);
1586 }
0a7de745 1587 if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_FILEID)) {
2d21ac55 1588 VATTR_RETURN(vap, va_fileid, nva.nva_fileid);
0a7de745
A
1589 }
1590 if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TYPE)) {
2d21ac55 1591 VATTR_RETURN(vap, va_type, nva.nva_type);
0a7de745
A
1592 }
1593 if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_CHANGE)) {
2d21ac55 1594 VATTR_RETURN(vap, va_filerev, nva.nva_change);
0a7de745 1595 }
2d21ac55 1596
6d2010ae
A
1597 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1598 VATTR_RETURN(vap, va_acl, nva.nva_acl);
1599 nva.nva_acl = NULL;
1600 }
1601
2d21ac55
A
1602 // other attrs we might support someday:
1603 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
2d21ac55 1604
6d2010ae 1605 NVATTR_CLEANUP(&nva);
0a7de745 1606 return error;
2d21ac55
A
1607}
1608
1609int
1610nfs4_setattr_rpc(
1611 nfsnode_t np,
1612 struct vnode_attr *vap,
b0d623f7 1613 vfs_context_t ctx)
2d21ac55
A
1614{
1615 struct nfsmount *nmp = NFSTONMP(np);
6d2010ae 1616 int error = 0, setattr_error = 0, lockerror = ENOENT, status, nfsvers, numops;
b0d623f7 1617 u_int64_t xid, nextxid;
2d21ac55 1618 struct nfsm_chain nmreq, nmrep;
b0d623f7 1619 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6d2010ae
A
1620 uint32_t getbitmap[NFS_ATTR_BITMAP_LEN];
1621 uint32_t setbitmap[NFS_ATTR_BITMAP_LEN];
b0d623f7 1622 nfs_stateid stateid;
6d2010ae 1623 struct nfsreq_secinfo_args si;
2d21ac55 1624
0a7de745
A
1625 if (nfs_mount_gone(nmp)) {
1626 return ENXIO;
1627 }
2d21ac55 1628 nfsvers = nmp->nm_vers;
0a7de745
A
1629 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1630 return EINVAL;
1631 }
2d21ac55 1632
0a7de745 1633 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & ~(SF_ARCHIVED | UF_HIDDEN))) {
2d21ac55 1634 /* we don't support setting unsupported flags (duh!) */
0a7de745
A
1635 if (vap->va_active & ~VNODE_ATTR_va_flags) {
1636 return EINVAL; /* return EINVAL if other attributes also set */
1637 } else {
1638 return ENOTSUP; /* return ENOTSUP for chflags(2) */
1639 }
2d21ac55
A
1640 }
1641
6d2010ae 1642 /* don't bother requesting some changes if they don't look like they are changing */
0a7de745 1643 if (VATTR_IS_ACTIVE(vap, va_uid) && (vap->va_uid == np->n_vattr.nva_uid)) {
6d2010ae 1644 VATTR_CLEAR_ACTIVE(vap, va_uid);
0a7de745
A
1645 }
1646 if (VATTR_IS_ACTIVE(vap, va_gid) && (vap->va_gid == np->n_vattr.nva_gid)) {
6d2010ae 1647 VATTR_CLEAR_ACTIVE(vap, va_gid);
0a7de745
A
1648 }
1649 if (VATTR_IS_ACTIVE(vap, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &np->n_vattr.nva_uuuid)) {
6d2010ae 1650 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
0a7de745
A
1651 }
1652 if (VATTR_IS_ACTIVE(vap, va_guuid) && kauth_guid_equal(&vap->va_guuid, &np->n_vattr.nva_guuid)) {
6d2010ae 1653 VATTR_CLEAR_ACTIVE(vap, va_guuid);
0a7de745 1654 }
6d2010ae
A
1655
1656tryagain:
1657 /* do nothing if no attributes will be sent */
1658 nfs_vattr_set_bitmap(nmp, bitmap, vap);
0a7de745
A
1659 if (!bitmap[0] && !bitmap[1]) {
1660 return 0;
1661 }
6d2010ae
A
1662
1663 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
1664 nfsm_chain_null(&nmreq);
1665 nfsm_chain_null(&nmrep);
1666
6d2010ae
A
1667 /*
1668 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1669 * need to invalidate any cached ACL. And if we had an ACL cached,
1670 * we might as well also fetch the new value.
1671 */
1672 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, getbitmap);
1673 if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL) ||
1674 NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) {
0a7de745 1675 if (NACLVALID(np)) {
6d2010ae 1676 NFS_BITMAP_SET(getbitmap, NFS_FATTR_ACL);
0a7de745 1677 }
6d2010ae
A
1678 NACLINVALIDATE(np);
1679 }
1680
2d21ac55
A
1681 // PUTFH, SETATTR, GETATTR
1682 numops = 3;
1683 nfsm_chain_build_alloc_init(error, &nmreq, 40 * NFSX_UNSIGNED);
3e170ce0 1684 nfsm_chain_add_compound_header(error, &nmreq, "setattr", nmp->nm_minor_vers, numops);
2d21ac55
A
1685 numops--;
1686 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1687 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1688 numops--;
1689 nfsm_chain_add_32(error, &nmreq, NFS_OP_SETATTR);
0a7de745 1690 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
b0d623f7 1691 nfs_get_stateid(np, vfs_context_thread(ctx), vfs_context_ucred(ctx), &stateid);
0a7de745 1692 } else {
b0d623f7 1693 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
0a7de745 1694 }
b0d623f7 1695 nfsm_chain_add_stateid(error, &nmreq, &stateid);
2d21ac55
A
1696 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
1697 numops--;
1698 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 1699 nfsm_chain_add_bitmap_supported(error, &nmreq, getbitmap, nmp, np);
2d21ac55
A
1700 nfsm_chain_build_done(error, &nmreq);
1701 nfsm_assert(error, (numops == 0), EPROTO);
1702 nfsmout_if(error);
6d2010ae 1703 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 1704
0a7de745 1705 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 1706 error = lockerror;
0a7de745 1707 }
2d21ac55
A
1708 nfsm_chain_skip_tag(error, &nmrep);
1709 nfsm_chain_get_32(error, &nmrep, numops);
1710 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6d2010ae 1711 nfsmout_if(error);
2d21ac55 1712 nfsm_chain_op_check(error, &nmrep, NFS_OP_SETATTR);
6d2010ae
A
1713 nfsmout_if(error == EBADRPC);
1714 setattr_error = error;
1715 error = 0;
2d21ac55 1716 bmlen = NFS_ATTR_BITMAP_LEN;
6d2010ae
A
1717 nfsm_chain_get_bitmap(error, &nmrep, setbitmap, bmlen);
1718 if (!error) {
0a7de745 1719 if (VATTR_IS_ACTIVE(vap, va_data_size) && (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 1720 microuptime(&np->n_lastio);
0a7de745 1721 }
6d2010ae
A
1722 nfs_vattr_set_supported(setbitmap, vap);
1723 error = setattr_error;
1724 }
2d21ac55 1725 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 1726 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
0a7de745 1727 if (error) {
2d21ac55 1728 NATTRINVALIDATE(np);
0a7de745 1729 }
b0d623f7
A
1730 /*
1731 * We just changed the attributes and we want to make sure that we
1732 * see the latest attributes. Get the next XID. If it's not the
1733 * next XID after the SETATTR XID, then it's possible that another
1734 * RPC was in flight at the same time and it might put stale attributes
1735 * in the cache. In that case, we invalidate the attributes and set
1736 * the attribute cache XID to guarantee that newer attributes will
1737 * get loaded next.
1738 */
1739 nextxid = 0;
1740 nfs_get_xid(&nextxid);
1741 if (nextxid != (xid + 1)) {
1742 np->n_xid = nextxid;
1743 NATTRINVALIDATE(np);
1744 }
2d21ac55 1745nfsmout:
0a7de745 1746 if (!lockerror) {
b0d623f7 1747 nfs_node_unlock(np);
0a7de745 1748 }
2d21ac55
A
1749 nfsm_chain_cleanup(&nmreq);
1750 nfsm_chain_cleanup(&nmrep);
6d2010ae
A
1751 if ((setattr_error == EINVAL) && VATTR_IS_ACTIVE(vap, va_acl) && VATTR_IS_ACTIVE(vap, va_mode) && !NMFLAG(nmp, ACLONLY)) {
1752 /*
1753 * Some server's may not like ACL/mode combos that get sent.
1754 * If it looks like that's what the server choked on, try setting
1755 * just the ACL and not the mode (unless it looks like everything
1756 * but mode was already successfully set).
1757 */
1758 if (((bitmap[0] & setbitmap[0]) != bitmap[0]) ||
0a7de745 1759 ((bitmap[1] & (setbitmap[1] | NFS_FATTR_MODE)) != bitmap[1])) {
6d2010ae
A
1760 VATTR_CLEAR_ACTIVE(vap, va_mode);
1761 error = 0;
1762 goto tryagain;
1763 }
1764 }
0a7de745 1765 return error;
2d21ac55 1766}
cb323159 1767#endif /* CONFIG_NFS4 */
2d21ac55 1768
b0d623f7
A
1769/*
1770 * Wait for any pending recovery to complete.
1771 */
2d21ac55 1772int
b0d623f7 1773nfs_mount_state_wait_for_recovery(struct nfsmount *nmp)
2d21ac55 1774{
cb323159 1775 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
6d2010ae 1776 int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
b0d623f7
A
1777
1778 lck_mtx_lock(&nmp->nm_lock);
1779 while (nmp->nm_state & NFSSTA_RECOVER) {
0a7de745 1780 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1))) {
b0d623f7 1781 break;
0a7de745 1782 }
b0d623f7 1783 nfs_mount_sock_thread_wake(nmp);
0a7de745 1784 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
6d2010ae 1785 slpflag = 0;
b0d623f7
A
1786 }
1787 lck_mtx_unlock(&nmp->nm_lock);
1788
0a7de745 1789 return error;
2d21ac55
A
1790}
1791
b0d623f7
A
1792/*
1793 * We're about to use/manipulate NFS mount's open/lock state.
1794 * Wait for any pending state recovery to complete, then
1795 * mark the state as being in use (which will hold off
1796 * the recovery thread until we're done).
1797 */
2d21ac55 1798int
6d2010ae 1799nfs_mount_state_in_use_start(struct nfsmount *nmp, thread_t thd)
2d21ac55 1800{
cb323159 1801 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
6d2010ae 1802 int error = 0, slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7 1803
0a7de745
A
1804 if (nfs_mount_gone(nmp)) {
1805 return ENXIO;
1806 }
b0d623f7 1807 lck_mtx_lock(&nmp->nm_lock);
0a7de745 1808 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
6d2010ae 1809 lck_mtx_unlock(&nmp->nm_lock);
0a7de745 1810 return ENXIO;
6d2010ae 1811 }
b0d623f7 1812 while (nmp->nm_state & NFSSTA_RECOVER) {
0a7de745 1813 if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
b0d623f7 1814 break;
0a7de745 1815 }
b0d623f7 1816 nfs_mount_sock_thread_wake(nmp);
0a7de745 1817 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
6d2010ae 1818 slpflag = 0;
b0d623f7 1819 }
0a7de745 1820 if (!error) {
b0d623f7 1821 nmp->nm_stateinuse++;
0a7de745 1822 }
b0d623f7
A
1823 lck_mtx_unlock(&nmp->nm_lock);
1824
0a7de745 1825 return error;
2d21ac55
A
1826}
1827
b0d623f7
A
1828/*
1829 * We're done using/manipulating the NFS mount's open/lock
1830 * state. If the given error indicates that recovery should
1831 * be performed, we'll initiate recovery.
1832 */
2d21ac55 1833int
b0d623f7 1834nfs_mount_state_in_use_end(struct nfsmount *nmp, int error)
2d21ac55 1835{
b0d623f7
A
1836 int restart = nfs_mount_state_error_should_restart(error);
1837
0a7de745
A
1838 if (nfs_mount_gone(nmp)) {
1839 return restart;
1840 }
b0d623f7
A
1841 lck_mtx_lock(&nmp->nm_lock);
1842 if (restart && (error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE)) {
6d2010ae 1843 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
0a7de745 1844 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid);
6d2010ae 1845 nfs_need_recover(nmp, error);
b0d623f7 1846 }
0a7de745 1847 if (nmp->nm_stateinuse > 0) {
b0d623f7 1848 nmp->nm_stateinuse--;
0a7de745 1849 } else {
b0d623f7 1850 panic("NFS mount state in use count underrun");
0a7de745
A
1851 }
1852 if (!nmp->nm_stateinuse && (nmp->nm_state & NFSSTA_RECOVER)) {
b0d623f7 1853 wakeup(&nmp->nm_stateinuse);
0a7de745 1854 }
b0d623f7 1855 lck_mtx_unlock(&nmp->nm_lock);
0a7de745
A
1856 if (error == NFSERR_GRACE) {
1857 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
1858 }
b0d623f7 1859
0a7de745 1860 return restart;
2d21ac55
A
1861}
1862
1863/*
b0d623f7 1864 * Does the error mean we should restart/redo a state-related operation?
2d21ac55
A
1865 */
1866int
b0d623f7 1867nfs_mount_state_error_should_restart(int error)
2d21ac55 1868{
b0d623f7
A
1869 switch (error) {
1870 case NFSERR_STALE_STATEID:
1871 case NFSERR_STALE_CLIENTID:
1872 case NFSERR_ADMIN_REVOKED:
1873 case NFSERR_EXPIRED:
1874 case NFSERR_OLD_STATEID:
1875 case NFSERR_BAD_STATEID:
1876 case NFSERR_GRACE:
0a7de745 1877 return 1;
b0d623f7 1878 }
0a7de745 1879 return 0;
b0d623f7 1880}
2d21ac55 1881
b0d623f7
A
1882/*
1883 * In some cases we may want to limit how many times we restart a
1884 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1885 * Base the limit on the lease (as long as it's not too short).
1886 */
1887uint
1888nfs_mount_state_max_restarts(struct nfsmount *nmp)
1889{
0a7de745 1890 return MAX(nmp->nm_fsattr.nfsa_lease, 60);
b0d623f7 1891}
2d21ac55 1892
6d2010ae
A
1893/*
1894 * Does the error mean we probably lost a delegation?
1895 */
1896int
1897nfs_mount_state_error_delegation_lost(int error)
1898{
1899 switch (error) {
1900 case NFSERR_STALE_STATEID:
1901 case NFSERR_ADMIN_REVOKED:
1902 case NFSERR_EXPIRED:
1903 case NFSERR_OLD_STATEID:
1904 case NFSERR_BAD_STATEID:
1905 case NFSERR_GRACE: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
0a7de745 1906 return 1;
6d2010ae 1907 }
0a7de745 1908 return 0;
6d2010ae
A
1909}
1910
b0d623f7
A
1911
1912/*
1913 * Mark an NFS node's open state as busy.
1914 */
1915int
6d2010ae 1916nfs_open_state_set_busy(nfsnode_t np, thread_t thd)
b0d623f7
A
1917{
1918 struct nfsmount *nmp;
cb323159 1919 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
b0d623f7
A
1920 int error = 0, slpflag;
1921
1922 nmp = NFSTONMP(np);
0a7de745
A
1923 if (nfs_mount_gone(nmp)) {
1924 return ENXIO;
1925 }
6d2010ae 1926 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2d21ac55 1927
b0d623f7
A
1928 lck_mtx_lock(&np->n_openlock);
1929 while (np->n_openflags & N_OPENBUSY) {
0a7de745 1930 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 1931 break;
0a7de745 1932 }
b0d623f7
A
1933 np->n_openflags |= N_OPENWANT;
1934 msleep(&np->n_openflags, &np->n_openlock, slpflag, "nfs_open_state_set_busy", &ts);
6d2010ae 1935 slpflag = 0;
b0d623f7 1936 }
0a7de745 1937 if (!error) {
b0d623f7 1938 np->n_openflags |= N_OPENBUSY;
0a7de745 1939 }
b0d623f7 1940 lck_mtx_unlock(&np->n_openlock);
2d21ac55 1941
0a7de745 1942 return error;
b0d623f7 1943}
2d21ac55 1944
b0d623f7
A
1945/*
1946 * Clear an NFS node's open state busy flag and wake up
1947 * anyone wanting it.
1948 */
1949void
1950nfs_open_state_clear_busy(nfsnode_t np)
1951{
1952 int wanted;
1953
1954 lck_mtx_lock(&np->n_openlock);
0a7de745 1955 if (!(np->n_openflags & N_OPENBUSY)) {
b0d623f7 1956 panic("nfs_open_state_clear_busy");
0a7de745 1957 }
b0d623f7 1958 wanted = (np->n_openflags & N_OPENWANT);
0a7de745 1959 np->n_openflags &= ~(N_OPENBUSY | N_OPENWANT);
b0d623f7 1960 lck_mtx_unlock(&np->n_openlock);
0a7de745 1961 if (wanted) {
b0d623f7 1962 wakeup(&np->n_openflags);
0a7de745 1963 }
b0d623f7 1964}
2d21ac55 1965
b0d623f7
A
1966/*
1967 * Search a mount's open owner list for the owner for this credential.
1968 * If not found and "alloc" is set, then allocate a new one.
1969 */
1970struct nfs_open_owner *
1971nfs_open_owner_find(struct nfsmount *nmp, kauth_cred_t cred, int alloc)
1972{
1973 uid_t uid = kauth_cred_getuid(cred);
1974 struct nfs_open_owner *noop, *newnoop = NULL;
2d21ac55 1975
b0d623f7
A
1976tryagain:
1977 lck_mtx_lock(&nmp->nm_lock);
1978 TAILQ_FOREACH(noop, &nmp->nm_open_owners, noo_link) {
0a7de745 1979 if (kauth_cred_getuid(noop->noo_cred) == uid) {
b0d623f7 1980 break;
0a7de745 1981 }
2d21ac55 1982 }
2d21ac55 1983
b0d623f7
A
1984 if (!noop && !newnoop && alloc) {
1985 lck_mtx_unlock(&nmp->nm_lock);
1986 MALLOC(newnoop, struct nfs_open_owner *, sizeof(struct nfs_open_owner), M_TEMP, M_WAITOK);
0a7de745
A
1987 if (!newnoop) {
1988 return NULL;
1989 }
b0d623f7
A
1990 bzero(newnoop, sizeof(*newnoop));
1991 lck_mtx_init(&newnoop->noo_lock, nfs_open_grp, LCK_ATTR_NULL);
1992 newnoop->noo_mount = nmp;
1993 kauth_cred_ref(cred);
1994 newnoop->noo_cred = cred;
1995 newnoop->noo_name = OSAddAtomic(1, &nfs_open_owner_seqnum);
1996 TAILQ_INIT(&newnoop->noo_opens);
1997 goto tryagain;
1998 }
1999 if (!noop && newnoop) {
2000 newnoop->noo_flags |= NFS_OPEN_OWNER_LINK;
0a7de745 2001 os_ref_init(&newnoop->noo_refcnt, NULL);
b0d623f7
A
2002 TAILQ_INSERT_HEAD(&nmp->nm_open_owners, newnoop, noo_link);
2003 noop = newnoop;
2004 }
2005 lck_mtx_unlock(&nmp->nm_lock);
2006
0a7de745 2007 if (newnoop && (noop != newnoop)) {
b0d623f7 2008 nfs_open_owner_destroy(newnoop);
0a7de745 2009 }
b0d623f7 2010
0a7de745 2011 if (noop) {
b0d623f7 2012 nfs_open_owner_ref(noop);
0a7de745 2013 }
b0d623f7 2014
0a7de745 2015 return noop;
b0d623f7
A
2016}
2017
2018/*
2019 * destroy an open owner that's no longer needed
2020 */
2021void
2022nfs_open_owner_destroy(struct nfs_open_owner *noop)
2023{
0a7de745 2024 if (noop->noo_cred) {
b0d623f7 2025 kauth_cred_unref(&noop->noo_cred);
0a7de745 2026 }
b0d623f7
A
2027 lck_mtx_destroy(&noop->noo_lock, nfs_open_grp);
2028 FREE(noop, M_TEMP);
2029}
2030
2031/*
2032 * acquire a reference count on an open owner
2033 */
2034void
2035nfs_open_owner_ref(struct nfs_open_owner *noop)
2036{
2037 lck_mtx_lock(&noop->noo_lock);
0a7de745 2038 os_ref_retain_locked(&noop->noo_refcnt);
b0d623f7
A
2039 lck_mtx_unlock(&noop->noo_lock);
2040}
2041
2042/*
2043 * drop a reference count on an open owner and destroy it if
2044 * it is no longer referenced and no longer on the mount's list.
2045 */
2046void
2047nfs_open_owner_rele(struct nfs_open_owner *noop)
2048{
0a7de745
A
2049 os_ref_count_t newcount;
2050
b0d623f7 2051 lck_mtx_lock(&noop->noo_lock);
0a7de745 2052 if (os_ref_get_count(&noop->noo_refcnt) < 1) {
b0d623f7 2053 panic("nfs_open_owner_rele: no refcnt");
0a7de745
A
2054 }
2055 newcount = os_ref_release_locked(&noop->noo_refcnt);
2056 if (!newcount && (noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
b0d623f7 2057 panic("nfs_open_owner_rele: busy");
0a7de745 2058 }
b0d623f7 2059 /* XXX we may potentially want to clean up idle/unused open owner structures */
0a7de745 2060 if (newcount || (noop->noo_flags & NFS_OPEN_OWNER_LINK)) {
b0d623f7
A
2061 lck_mtx_unlock(&noop->noo_lock);
2062 return;
2063 }
2064 /* owner is no longer referenced or linked to mount, so destroy it */
2065 lck_mtx_unlock(&noop->noo_lock);
2066 nfs_open_owner_destroy(noop);
2067}
2068
2069/*
2070 * Mark an open owner as busy because we are about to
2071 * start an operation that uses and updates open owner state.
2072 */
2073int
2074nfs_open_owner_set_busy(struct nfs_open_owner *noop, thread_t thd)
2075{
2076 struct nfsmount *nmp;
cb323159 2077 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
b0d623f7
A
2078 int error = 0, slpflag;
2079
2080 nmp = noop->noo_mount;
0a7de745
A
2081 if (nfs_mount_gone(nmp)) {
2082 return ENXIO;
2083 }
6d2010ae 2084 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7
A
2085
2086 lck_mtx_lock(&noop->noo_lock);
2087 while (noop->noo_flags & NFS_OPEN_OWNER_BUSY) {
0a7de745 2088 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 2089 break;
0a7de745 2090 }
b0d623f7
A
2091 noop->noo_flags |= NFS_OPEN_OWNER_WANT;
2092 msleep(noop, &noop->noo_lock, slpflag, "nfs_open_owner_set_busy", &ts);
6d2010ae 2093 slpflag = 0;
b0d623f7 2094 }
0a7de745 2095 if (!error) {
b0d623f7 2096 noop->noo_flags |= NFS_OPEN_OWNER_BUSY;
0a7de745 2097 }
b0d623f7
A
2098 lck_mtx_unlock(&noop->noo_lock);
2099
0a7de745 2100 return error;
b0d623f7
A
2101}
2102
2103/*
2104 * Clear the busy flag on an open owner and wake up anyone waiting
2105 * to mark it busy.
2106 */
2107void
2108nfs_open_owner_clear_busy(struct nfs_open_owner *noop)
2109{
2110 int wanted;
2111
2112 lck_mtx_lock(&noop->noo_lock);
0a7de745 2113 if (!(noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
b0d623f7 2114 panic("nfs_open_owner_clear_busy");
0a7de745 2115 }
b0d623f7 2116 wanted = (noop->noo_flags & NFS_OPEN_OWNER_WANT);
0a7de745 2117 noop->noo_flags &= ~(NFS_OPEN_OWNER_BUSY | NFS_OPEN_OWNER_WANT);
b0d623f7 2118 lck_mtx_unlock(&noop->noo_lock);
0a7de745 2119 if (wanted) {
b0d623f7 2120 wakeup(noop);
0a7de745 2121 }
b0d623f7
A
2122}
2123
2124/*
2125 * Given an open/lock owner and an error code, increment the
2126 * sequence ID if appropriate.
2127 */
2128void
2129nfs_owner_seqid_increment(struct nfs_open_owner *noop, struct nfs_lock_owner *nlop, int error)
2130{
2131 switch (error) {
2132 case NFSERR_STALE_CLIENTID:
2133 case NFSERR_STALE_STATEID:
2134 case NFSERR_OLD_STATEID:
2135 case NFSERR_BAD_STATEID:
2136 case NFSERR_BAD_SEQID:
2137 case NFSERR_BADXDR:
2138 case NFSERR_RESOURCE:
2139 case NFSERR_NOFILEHANDLE:
2140 /* do not increment the open seqid on these errors */
2141 return;
2142 }
0a7de745 2143 if (noop) {
b0d623f7 2144 noop->noo_seqid++;
0a7de745
A
2145 }
2146 if (nlop) {
b0d623f7 2147 nlop->nlo_seqid++;
0a7de745 2148 }
b0d623f7
A
2149}
2150
2151/*
2152 * Search a node's open file list for any conflicts with this request.
2153 * Also find this open owner's open file structure.
2154 * If not found and "alloc" is set, then allocate one.
2155 */
2156int
2157nfs_open_file_find(
2158 nfsnode_t np,
2159 struct nfs_open_owner *noop,
2160 struct nfs_open_file **nofpp,
2161 uint32_t accessMode,
2162 uint32_t denyMode,
2163 int alloc)
6d2010ae
A
2164{
2165 *nofpp = NULL;
2166 return nfs_open_file_find_internal(np, noop, nofpp, accessMode, denyMode, alloc);
2167}
2168
2169/*
2170 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
2171 * if an existing one is not found. This is used in "create" scenarios to
2172 * officially add the provisional nofp to the node once the node is created.
2173 */
2174int
2175nfs_open_file_find_internal(
2176 nfsnode_t np,
2177 struct nfs_open_owner *noop,
2178 struct nfs_open_file **nofpp,
2179 uint32_t accessMode,
2180 uint32_t denyMode,
2181 int alloc)
b0d623f7
A
2182{
2183 struct nfs_open_file *nofp = NULL, *nofp2, *newnofp = NULL;
2184
0a7de745 2185 if (!np) {
b0d623f7 2186 goto alloc;
0a7de745 2187 }
b0d623f7
A
2188tryagain:
2189 lck_mtx_lock(&np->n_openlock);
2190 TAILQ_FOREACH(nofp2, &np->n_opens, nof_link) {
2191 if (nofp2->nof_owner == noop) {
2192 nofp = nofp2;
0a7de745 2193 if (!accessMode) {
b0d623f7 2194 break;
0a7de745 2195 }
b0d623f7
A
2196 }
2197 if ((accessMode & nofp2->nof_deny) || (denyMode & nofp2->nof_access)) {
2198 /* This request conflicts with an existing open on this client. */
2199 lck_mtx_unlock(&np->n_openlock);
0a7de745 2200 return EACCES;
b0d623f7
A
2201 }
2202 }
2203
2204 /*
2205 * If this open owner doesn't have an open
2206 * file structure yet, we create one for it.
2207 */
6d2010ae 2208 if (!nofp && !*nofpp && !newnofp && alloc) {
b0d623f7
A
2209 lck_mtx_unlock(&np->n_openlock);
2210alloc:
2211 MALLOC(newnofp, struct nfs_open_file *, sizeof(struct nfs_open_file), M_TEMP, M_WAITOK);
0a7de745
A
2212 if (!newnofp) {
2213 return ENOMEM;
2214 }
b0d623f7
A
2215 bzero(newnofp, sizeof(*newnofp));
2216 lck_mtx_init(&newnofp->nof_lock, nfs_open_grp, LCK_ATTR_NULL);
2217 newnofp->nof_owner = noop;
2218 nfs_open_owner_ref(noop);
2219 newnofp->nof_np = np;
2220 lck_mtx_lock(&noop->noo_lock);
2221 TAILQ_INSERT_HEAD(&noop->noo_opens, newnofp, nof_oolink);
2222 lck_mtx_unlock(&noop->noo_lock);
0a7de745 2223 if (np) {
b0d623f7 2224 goto tryagain;
0a7de745 2225 }
b0d623f7 2226 }
6d2010ae
A
2227 if (!nofp) {
2228 if (*nofpp) {
2229 (*nofpp)->nof_np = np;
2230 nofp = *nofpp;
2231 } else {
2232 nofp = newnofp;
2233 }
0a7de745 2234 if (nofp && np) {
6d2010ae 2235 TAILQ_INSERT_HEAD(&np->n_opens, nofp, nof_link);
0a7de745 2236 }
b0d623f7 2237 }
0a7de745 2238 if (np) {
b0d623f7 2239 lck_mtx_unlock(&np->n_openlock);
0a7de745 2240 }
b0d623f7 2241
0a7de745 2242 if (alloc && newnofp && (nofp != newnofp)) {
b0d623f7 2243 nfs_open_file_destroy(newnofp);
0a7de745 2244 }
b0d623f7
A
2245
2246 *nofpp = nofp;
0a7de745 2247 return nofp ? 0 : ESRCH;
b0d623f7
A
2248}
2249
2250/*
2251 * Destroy an open file structure.
2252 */
2253void
2254nfs_open_file_destroy(struct nfs_open_file *nofp)
2255{
2256 lck_mtx_lock(&nofp->nof_owner->noo_lock);
2257 TAILQ_REMOVE(&nofp->nof_owner->noo_opens, nofp, nof_oolink);
2258 lck_mtx_unlock(&nofp->nof_owner->noo_lock);
2259 nfs_open_owner_rele(nofp->nof_owner);
2260 lck_mtx_destroy(&nofp->nof_lock, nfs_open_grp);
2261 FREE(nofp, M_TEMP);
2262}
2263
2264/*
2265 * Mark an open file as busy because we are about to
2266 * start an operation that uses and updates open file state.
2267 */
2268int
2269nfs_open_file_set_busy(struct nfs_open_file *nofp, thread_t thd)
2270{
2271 struct nfsmount *nmp;
cb323159 2272 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
b0d623f7
A
2273 int error = 0, slpflag;
2274
2275 nmp = nofp->nof_owner->noo_mount;
0a7de745
A
2276 if (nfs_mount_gone(nmp)) {
2277 return ENXIO;
2278 }
6d2010ae 2279 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7
A
2280
2281 lck_mtx_lock(&nofp->nof_lock);
2282 while (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
0a7de745 2283 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 2284 break;
0a7de745 2285 }
b0d623f7
A
2286 nofp->nof_flags |= NFS_OPEN_FILE_WANT;
2287 msleep(nofp, &nofp->nof_lock, slpflag, "nfs_open_file_set_busy", &ts);
6d2010ae 2288 slpflag = 0;
b0d623f7 2289 }
0a7de745 2290 if (!error) {
b0d623f7 2291 nofp->nof_flags |= NFS_OPEN_FILE_BUSY;
0a7de745 2292 }
b0d623f7
A
2293 lck_mtx_unlock(&nofp->nof_lock);
2294
0a7de745 2295 return error;
b0d623f7
A
2296}
2297
2298/*
2299 * Clear the busy flag on an open file and wake up anyone waiting
2300 * to mark it busy.
2301 */
2302void
2303nfs_open_file_clear_busy(struct nfs_open_file *nofp)
2304{
2305 int wanted;
2306
2307 lck_mtx_lock(&nofp->nof_lock);
0a7de745 2308 if (!(nofp->nof_flags & NFS_OPEN_FILE_BUSY)) {
b0d623f7 2309 panic("nfs_open_file_clear_busy");
0a7de745 2310 }
b0d623f7 2311 wanted = (nofp->nof_flags & NFS_OPEN_FILE_WANT);
0a7de745 2312 nofp->nof_flags &= ~(NFS_OPEN_FILE_BUSY | NFS_OPEN_FILE_WANT);
b0d623f7 2313 lck_mtx_unlock(&nofp->nof_lock);
0a7de745 2314 if (wanted) {
b0d623f7 2315 wakeup(nofp);
0a7de745 2316 }
b0d623f7
A
2317}
2318
2319/*
6d2010ae 2320 * Add the open state for the given access/deny modes to this open file.
b0d623f7
A
2321 */
2322void
6d2010ae 2323nfs_open_file_add_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode, int delegated)
b0d623f7 2324{
6d2010ae
A
2325 lck_mtx_lock(&nofp->nof_lock);
2326 nofp->nof_access |= accessMode;
2327 nofp->nof_deny |= denyMode;
b0d623f7 2328
6d2010ae
A
2329 if (delegated) {
2330 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
0a7de745 2331 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2332 nofp->nof_d_r++;
0a7de745 2333 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2334 nofp->nof_d_w++;
0a7de745 2335 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2336 nofp->nof_d_rw++;
0a7de745 2337 }
6d2010ae 2338 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
0a7de745 2339 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2340 nofp->nof_d_r_dw++;
0a7de745 2341 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2342 nofp->nof_d_w_dw++;
0a7de745 2343 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2344 nofp->nof_d_rw_dw++;
0a7de745 2345 }
6d2010ae 2346 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
0a7de745 2347 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2348 nofp->nof_d_r_drw++;
0a7de745 2349 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2350 nofp->nof_d_w_drw++;
0a7de745 2351 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2352 nofp->nof_d_rw_drw++;
0a7de745 2353 }
6d2010ae 2354 }
b0d623f7 2355 } else {
6d2010ae 2356 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
0a7de745 2357 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2358 nofp->nof_r++;
0a7de745 2359 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2360 nofp->nof_w++;
0a7de745 2361 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2362 nofp->nof_rw++;
0a7de745 2363 }
6d2010ae 2364 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
0a7de745 2365 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2366 nofp->nof_r_dw++;
0a7de745 2367 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2368 nofp->nof_w_dw++;
0a7de745 2369 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2370 nofp->nof_rw_dw++;
0a7de745 2371 }
6d2010ae 2372 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
0a7de745 2373 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2374 nofp->nof_r_drw++;
0a7de745 2375 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2376 nofp->nof_w_drw++;
0a7de745 2377 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2378 nofp->nof_rw_drw++;
0a7de745 2379 }
6d2010ae 2380 }
b0d623f7 2381 }
6d2010ae
A
2382
2383 nofp->nof_opencnt++;
2384 lck_mtx_unlock(&nofp->nof_lock);
b0d623f7
A
2385}
2386
2387/*
6d2010ae
A
2388 * Find which particular open combo will be closed and report what
2389 * the new modes will be and whether the open was delegated.
b0d623f7 2390 */
6d2010ae
A
2391void
2392nfs_open_file_remove_open_find(
b0d623f7
A
2393 struct nfs_open_file *nofp,
2394 uint32_t accessMode,
2395 uint32_t denyMode,
6d2010ae
A
2396 uint32_t *newAccessMode,
2397 uint32_t *newDenyMode,
2398 int *delegated)
b0d623f7 2399{
6d2010ae
A
2400 /*
2401 * Calculate new modes: a mode bit gets removed when there's only
2402 * one count in all the corresponding counts
2403 */
2404 *newAccessMode = nofp->nof_access;
2405 *newDenyMode = nofp->nof_deny;
b0d623f7 2406
6d2010ae
A
2407 if ((accessMode & NFS_OPEN_SHARE_ACCESS_READ) &&
2408 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) &&
2409 ((nofp->nof_r + nofp->nof_d_r +
0a7de745
A
2410 nofp->nof_rw + nofp->nof_d_rw +
2411 nofp->nof_r_dw + nofp->nof_d_r_dw +
2412 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2413 nofp->nof_r_drw + nofp->nof_d_r_drw +
2414 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
6d2010ae 2415 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
0a7de745 2416 }
6d2010ae
A
2417 if ((accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2418 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2419 ((nofp->nof_w + nofp->nof_d_w +
0a7de745
A
2420 nofp->nof_rw + nofp->nof_d_rw +
2421 nofp->nof_w_dw + nofp->nof_d_w_dw +
2422 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2423 nofp->nof_w_drw + nofp->nof_d_w_drw +
2424 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
6d2010ae 2425 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_WRITE;
0a7de745 2426 }
6d2010ae
A
2427 if ((denyMode & NFS_OPEN_SHARE_DENY_READ) &&
2428 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) &&
2429 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
0a7de745
A
2430 nofp->nof_w_drw + nofp->nof_d_w_drw +
2431 nofp->nof_rw_drw + nofp->nof_d_rw_drw) == 1)) {
6d2010ae 2432 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_READ;
0a7de745 2433 }
6d2010ae
A
2434 if ((denyMode & NFS_OPEN_SHARE_DENY_WRITE) &&
2435 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) &&
2436 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
0a7de745
A
2437 nofp->nof_w_drw + nofp->nof_d_w_drw +
2438 nofp->nof_rw_drw + nofp->nof_d_rw_drw +
2439 nofp->nof_r_dw + nofp->nof_d_r_dw +
2440 nofp->nof_w_dw + nofp->nof_d_w_dw +
2441 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
6d2010ae 2442 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 2443 }
6d2010ae
A
2444
2445 /* Find the corresponding open access/deny mode counter. */
b0d623f7 2446 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
0a7de745 2447 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2448 *delegated = (nofp->nof_d_r != 0);
0a7de745 2449 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2450 *delegated = (nofp->nof_d_w != 0);
0a7de745 2451 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2452 *delegated = (nofp->nof_d_rw != 0);
0a7de745 2453 } else {
6d2010ae 2454 *delegated = 0;
0a7de745 2455 }
b0d623f7 2456 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
0a7de745 2457 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2458 *delegated = (nofp->nof_d_r_dw != 0);
0a7de745 2459 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2460 *delegated = (nofp->nof_d_w_dw != 0);
0a7de745 2461 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2462 *delegated = (nofp->nof_d_rw_dw != 0);
0a7de745 2463 } else {
6d2010ae 2464 *delegated = 0;
0a7de745 2465 }
b0d623f7 2466 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
0a7de745 2467 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2468 *delegated = (nofp->nof_d_r_drw != 0);
0a7de745 2469 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2470 *delegated = (nofp->nof_d_w_drw != 0);
0a7de745 2471 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2472 *delegated = (nofp->nof_d_rw_drw != 0);
0a7de745 2473 } else {
6d2010ae 2474 *delegated = 0;
0a7de745 2475 }
b0d623f7 2476 }
6d2010ae
A
2477}
2478
2479/*
2480 * Remove the open state for the given access/deny modes to this open file.
2481 */
2482void
2483nfs_open_file_remove_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode)
2484{
2485 uint32_t newAccessMode, newDenyMode;
2486 int delegated = 0;
2487
2488 lck_mtx_lock(&nofp->nof_lock);
2489 nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated);
2490
2491 /* Decrement the corresponding open access/deny mode counter. */
2492 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2493 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2494 if (delegated) {
0a7de745 2495 if (nofp->nof_d_r == 0) {
6d2010ae 2496 NP(nofp->nof_np, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2497 } else {
6d2010ae 2498 nofp->nof_d_r--;
0a7de745 2499 }
6d2010ae 2500 } else {
0a7de745 2501 if (nofp->nof_r == 0) {
6d2010ae 2502 NP(nofp->nof_np, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2503 } else {
6d2010ae 2504 nofp->nof_r--;
0a7de745 2505 }
6d2010ae
A
2506 }
2507 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2508 if (delegated) {
0a7de745 2509 if (nofp->nof_d_w == 0) {
6d2010ae 2510 NP(nofp->nof_np, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2511 } else {
6d2010ae 2512 nofp->nof_d_w--;
0a7de745 2513 }
6d2010ae 2514 } else {
0a7de745 2515 if (nofp->nof_w == 0) {
6d2010ae 2516 NP(nofp->nof_np, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2517 } else {
6d2010ae 2518 nofp->nof_w--;
0a7de745 2519 }
6d2010ae
A
2520 }
2521 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2522 if (delegated) {
0a7de745 2523 if (nofp->nof_d_rw == 0) {
6d2010ae 2524 NP(nofp->nof_np, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2525 } else {
6d2010ae 2526 nofp->nof_d_rw--;
0a7de745 2527 }
6d2010ae 2528 } else {
0a7de745 2529 if (nofp->nof_rw == 0) {
6d2010ae 2530 NP(nofp->nof_np, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2531 } else {
6d2010ae 2532 nofp->nof_rw--;
0a7de745 2533 }
6d2010ae
A
2534 }
2535 }
2536 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2537 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2538 if (delegated) {
0a7de745 2539 if (nofp->nof_d_r_dw == 0) {
6d2010ae 2540 NP(nofp->nof_np, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2541 } else {
6d2010ae 2542 nofp->nof_d_r_dw--;
0a7de745 2543 }
6d2010ae 2544 } else {
0a7de745 2545 if (nofp->nof_r_dw == 0) {
6d2010ae 2546 NP(nofp->nof_np, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2547 } else {
6d2010ae 2548 nofp->nof_r_dw--;
0a7de745 2549 }
6d2010ae
A
2550 }
2551 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2552 if (delegated) {
0a7de745 2553 if (nofp->nof_d_w_dw == 0) {
6d2010ae 2554 NP(nofp->nof_np, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2555 } else {
6d2010ae 2556 nofp->nof_d_w_dw--;
0a7de745 2557 }
6d2010ae 2558 } else {
0a7de745 2559 if (nofp->nof_w_dw == 0) {
6d2010ae 2560 NP(nofp->nof_np, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2561 } else {
6d2010ae 2562 nofp->nof_w_dw--;
0a7de745 2563 }
6d2010ae
A
2564 }
2565 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2566 if (delegated) {
0a7de745 2567 if (nofp->nof_d_rw_dw == 0) {
6d2010ae 2568 NP(nofp->nof_np, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2569 } else {
6d2010ae 2570 nofp->nof_d_rw_dw--;
0a7de745 2571 }
6d2010ae 2572 } else {
0a7de745 2573 if (nofp->nof_rw_dw == 0) {
6d2010ae 2574 NP(nofp->nof_np, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2575 } else {
6d2010ae 2576 nofp->nof_rw_dw--;
0a7de745 2577 }
6d2010ae
A
2578 }
2579 }
2580 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2581 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2582 if (delegated) {
0a7de745 2583 if (nofp->nof_d_r_drw == 0) {
6d2010ae 2584 NP(nofp->nof_np, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2585 } else {
6d2010ae 2586 nofp->nof_d_r_drw--;
0a7de745 2587 }
6d2010ae 2588 } else {
0a7de745 2589 if (nofp->nof_r_drw == 0) {
6d2010ae 2590 NP(nofp->nof_np, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2591 } else {
6d2010ae 2592 nofp->nof_r_drw--;
0a7de745 2593 }
6d2010ae
A
2594 }
2595 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2596 if (delegated) {
0a7de745 2597 if (nofp->nof_d_w_drw == 0) {
6d2010ae 2598 NP(nofp->nof_np, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2599 } else {
6d2010ae 2600 nofp->nof_d_w_drw--;
0a7de745 2601 }
6d2010ae 2602 } else {
0a7de745 2603 if (nofp->nof_w_drw == 0) {
6d2010ae 2604 NP(nofp->nof_np, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2605 } else {
6d2010ae 2606 nofp->nof_w_drw--;
0a7de745 2607 }
6d2010ae
A
2608 }
2609 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2610 if (delegated) {
0a7de745 2611 if (nofp->nof_d_rw_drw == 0) {
6d2010ae 2612 NP(nofp->nof_np, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2613 } else {
6d2010ae 2614 nofp->nof_d_rw_drw--;
0a7de745 2615 }
6d2010ae 2616 } else {
0a7de745 2617 if (nofp->nof_rw_drw == 0) {
6d2010ae 2618 NP(nofp->nof_np, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2619 } else {
6d2010ae 2620 nofp->nof_rw_drw--;
0a7de745 2621 }
6d2010ae
A
2622 }
2623 }
2624 }
2625
2626 /* update the modes */
2627 nofp->nof_access = newAccessMode;
2628 nofp->nof_deny = newDenyMode;
2629 nofp->nof_opencnt--;
2630 lck_mtx_unlock(&nofp->nof_lock);
2631}
2632
cb323159 2633#if CONFIG_NFS4
6d2010ae
A
2634/*
2635 * Get the current (delegation, lock, open, default) stateid for this node.
2636 * If node has a delegation, use that stateid.
2637 * If pid has a lock, use the lockowner's stateid.
2638 * Or use the open file's stateid.
2639 * If no open file, use a default stateid of all ones.
2640 */
2641void
2642nfs_get_stateid(nfsnode_t np, thread_t thd, kauth_cred_t cred, nfs_stateid *sid)
2643{
2644 struct nfsmount *nmp = NFSTONMP(np);
2645 proc_t p = thd ? get_bsdthreadtask_info(thd) : current_proc(); // XXX async I/O requests don't have a thread
2646 struct nfs_open_owner *noop = NULL;
2647 struct nfs_open_file *nofp = NULL;
2648 struct nfs_lock_owner *nlop = NULL;
2649 nfs_stateid *s = NULL;
2650
2651 if (np->n_openflags & N_DELEG_MASK) {
2652 s = &np->n_dstateid;
2653 } else {
0a7de745 2654 if (p) {
6d2010ae 2655 nlop = nfs_lock_owner_find(np, p, 0);
0a7de745 2656 }
6d2010ae
A
2657 if (nlop && !TAILQ_EMPTY(&nlop->nlo_locks)) {
2658 /* we hold locks, use lock stateid */
2659 s = &nlop->nlo_stateid;
2660 } else if (((noop = nfs_open_owner_find(nmp, cred, 0))) &&
0a7de745
A
2661 (nfs_open_file_find(np, noop, &nofp, 0, 0, 0) == 0) &&
2662 !(nofp->nof_flags & NFS_OPEN_FILE_LOST) &&
2663 nofp->nof_access) {
6d2010ae 2664 /* we (should) have the file open, use open stateid */
0a7de745 2665 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
6d2010ae 2666 nfs4_reopen(nofp, thd);
0a7de745
A
2667 }
2668 if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6d2010ae 2669 s = &nofp->nof_stateid;
0a7de745 2670 }
6d2010ae
A
2671 }
2672 }
2673
2674 if (s) {
2675 sid->seqid = s->seqid;
2676 sid->other[0] = s->other[0];
2677 sid->other[1] = s->other[1];
2678 sid->other[2] = s->other[2];
2679 } else {
2680 /* named attributes may not have a stateid for reads, so don't complain for them */
0a7de745 2681 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 2682 NP(np, "nfs_get_stateid: no stateid");
0a7de745 2683 }
6d2010ae
A
2684 sid->seqid = sid->other[0] = sid->other[1] = sid->other[2] = 0xffffffff;
2685 }
0a7de745 2686 if (nlop) {
6d2010ae 2687 nfs_lock_owner_rele(nlop);
0a7de745
A
2688 }
2689 if (noop) {
6d2010ae 2690 nfs_open_owner_rele(noop);
0a7de745 2691 }
6d2010ae
A
2692}
2693
2694
2695/*
2696 * When we have a delegation, we may be able to perform the OPEN locally.
2697 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2698 */
2699int
2700nfs4_open_delegated(
2701 nfsnode_t np,
2702 struct nfs_open_file *nofp,
2703 uint32_t accessMode,
2704 uint32_t denyMode,
2705 vfs_context_t ctx)
2706{
2707 int error = 0, ismember, readtoo = 0, authorized = 0;
2708 uint32_t action;
2709 struct kauth_acl_eval eval;
2710 kauth_cred_t cred = vfs_context_ucred(ctx);
2711
2712 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2713 /*
2714 * Try to open it for read access too,
2715 * so the buffer cache can read data.
2716 */
2717 readtoo = 1;
2718 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2719 }
2720
2721tryagain:
2722 action = 0;
0a7de745 2723 if (accessMode & NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2724 action |= KAUTH_VNODE_READ_DATA;
0a7de745
A
2725 }
2726 if (accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2727 action |= KAUTH_VNODE_WRITE_DATA;
0a7de745 2728 }
6d2010ae
A
2729
2730 /* evaluate ACE (if we have one) */
2731 if (np->n_dace.ace_flags) {
2732 eval.ae_requested = action;
2733 eval.ae_acl = &np->n_dace;
2734 eval.ae_count = 1;
2735 eval.ae_options = 0;
0a7de745 2736 if (np->n_vattr.nva_uid == kauth_cred_getuid(cred)) {
6d2010ae 2737 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
0a7de745 2738 }
6d2010ae 2739 error = kauth_cred_ismember_gid(cred, np->n_vattr.nva_gid, &ismember);
0a7de745 2740 if (!error && ismember) {
6d2010ae 2741 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
0a7de745 2742 }
6d2010ae
A
2743
2744 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
2745 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
2746 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
2747 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
2748
2749 error = kauth_acl_evaluate(cred, &eval);
2750
0a7de745 2751 if (!error && (eval.ae_result == KAUTH_RESULT_ALLOW)) {
6d2010ae 2752 authorized = 1;
0a7de745 2753 }
6d2010ae
A
2754 }
2755
2756 if (!authorized) {
2757 /* need to ask the server via ACCESS */
2758 struct vnop_access_args naa;
2759 naa.a_desc = &vnop_access_desc;
2760 naa.a_vp = NFSTOV(np);
2761 naa.a_action = action;
2762 naa.a_context = ctx;
0a7de745 2763 if (!(error = nfs_vnop_access(&naa))) {
6d2010ae 2764 authorized = 1;
0a7de745 2765 }
6d2010ae
A
2766 }
2767
2768 if (!authorized) {
2769 if (readtoo) {
2770 /* try again without the extra read access */
2771 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2772 readtoo = 0;
2773 goto tryagain;
2774 }
0a7de745 2775 return error ? error : EACCES;
6d2010ae
A
2776 }
2777
2778 nfs_open_file_add_open(nofp, accessMode, denyMode, 1);
2779
0a7de745 2780 return 0;
6d2010ae
A
2781}
2782
2783
2784/*
2785 * Open a file with the given access/deny modes.
2786 *
2787 * If we have a delegation, we may be able to handle the open locally.
2788 * Otherwise, we will always send the open RPC even if this open's mode is
2789 * a subset of all the existing opens. This makes sure that we will always
2790 * be able to do a downgrade to any of the open modes.
2791 *
2792 * Note: local conflicts should have already been checked in nfs_open_file_find().
2793 */
2794int
2795nfs4_open(
2796 nfsnode_t np,
2797 struct nfs_open_file *nofp,
2798 uint32_t accessMode,
2799 uint32_t denyMode,
2800 vfs_context_t ctx)
2801{
2802 vnode_t vp = NFSTOV(np);
2803 vnode_t dvp = NULL;
2804 struct componentname cn;
2805 const char *vname = NULL;
2806 size_t namelen;
2807 char smallname[128];
2808 char *filename = NULL;
2809 int error = 0, readtoo = 0;
2810
2811 /*
2812 * We can handle the OPEN ourselves if we have a delegation,
2813 * unless it's a read delegation and the open is asking for
2814 * either write access or deny read. We also don't bother to
2815 * use the delegation if it's being returned.
2816 */
2817 if (np->n_openflags & N_DELEG_MASK) {
0a7de745
A
2818 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
2819 return error;
2820 }
6d2010ae
A
2821 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN) &&
2822 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ||
0a7de745 2823 (!(accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) && !(denyMode & NFS_OPEN_SHARE_DENY_READ)))) {
6d2010ae
A
2824 error = nfs4_open_delegated(np, nofp, accessMode, denyMode, ctx);
2825 nfs_open_state_clear_busy(np);
0a7de745 2826 return error;
6d2010ae
A
2827 }
2828 nfs_open_state_clear_busy(np);
2829 }
2830
2831 /*
2832 * [sigh] We can't trust VFS to get the parent right for named
2833 * attribute nodes. (It likes to reparent the nodes after we've
2834 * created them.) Luckily we can probably get the right parent
2835 * from the n_parent we have stashed away.
2836 */
2837 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
0a7de745 2838 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
6d2010ae 2839 dvp = NULL;
0a7de745
A
2840 }
2841 if (!dvp) {
6d2010ae 2842 dvp = vnode_getparent(vp);
0a7de745 2843 }
6d2010ae
A
2844 vname = vnode_getname(vp);
2845 if (!dvp || !vname) {
0a7de745 2846 if (!error) {
6d2010ae 2847 error = EIO;
0a7de745 2848 }
6d2010ae
A
2849 goto out;
2850 }
2851 filename = &smallname[0];
2852 namelen = snprintf(filename, sizeof(smallname), "%s", vname);
2853 if (namelen >= sizeof(smallname)) {
0a7de745 2854 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
6d2010ae
A
2855 if (!filename) {
2856 error = ENOMEM;
2857 goto out;
2858 }
0a7de745 2859 snprintf(filename, namelen + 1, "%s", vname);
6d2010ae
A
2860 }
2861 bzero(&cn, sizeof(cn));
2862 cn.cn_nameptr = filename;
2863 cn.cn_namelen = namelen;
2864
2865 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2866 /*
2867 * Try to open it for read access too,
2868 * so the buffer cache can read data.
2869 */
2870 readtoo = 1;
2871 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2872 }
2873tryagain:
2874 error = nfs4_open_rpc(nofp, ctx, &cn, NULL, dvp, &vp, NFS_OPEN_NOCREATE, accessMode, denyMode);
2875 if (error) {
2876 if (!nfs_mount_state_error_should_restart(error) &&
2877 (error != EINTR) && (error != ERESTART) && readtoo) {
2878 /* try again without the extra read access */
2879 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2880 readtoo = 0;
2881 goto tryagain;
2882 }
2883 goto out;
2884 }
2885 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
b0d623f7 2886out:
0a7de745 2887 if (filename && (filename != &smallname[0])) {
b0d623f7 2888 FREE(filename, M_TEMP);
0a7de745
A
2889 }
2890 if (vname) {
b0d623f7 2891 vnode_putname(vname);
0a7de745
A
2892 }
2893 if (dvp != NULLVP) {
b0d623f7 2894 vnode_put(dvp);
0a7de745
A
2895 }
2896 return error;
b0d623f7 2897}
cb323159 2898#endif /* CONFIG_NFS4 */
b0d623f7 2899
b0d623f7 2900int
6d2010ae
A
2901nfs_vnop_mmap(
2902 struct vnop_mmap_args /* {
0a7de745
A
2903 * struct vnodeop_desc *a_desc;
2904 * vnode_t a_vp;
2905 * int a_fflags;
2906 * vfs_context_t a_context;
2907 * } */*ap)
b0d623f7
A
2908{
2909 vfs_context_t ctx = ap->a_context;
2910 vnode_t vp = ap->a_vp;
2911 nfsnode_t np = VTONFS(vp);
6d2010ae 2912 int error = 0, accessMode, denyMode, delegated;
b0d623f7 2913 struct nfsmount *nmp;
b0d623f7
A
2914 struct nfs_open_owner *noop = NULL;
2915 struct nfs_open_file *nofp = NULL;
2916
b0d623f7 2917 nmp = VTONMP(vp);
0a7de745
A
2918 if (nfs_mount_gone(nmp)) {
2919 return ENXIO;
2920 }
b0d623f7 2921
0a7de745
A
2922 if (!vnode_isreg(vp) || !(ap->a_fflags & (PROT_READ | PROT_WRITE))) {
2923 return EINVAL;
2924 }
2925 if (np->n_flag & NREVOKE) {
2926 return EIO;
2927 }
b0d623f7 2928
6d2010ae
A
2929 /*
2930 * fflags contains some combination of: PROT_READ, PROT_WRITE
2931 * Since it's not possible to mmap() without having the file open for reading,
2932 * read access is always there (regardless if PROT_READ is not set).
2933 */
2934 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
0a7de745 2935 if (ap->a_fflags & PROT_WRITE) {
b0d623f7 2936 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
0a7de745 2937 }
6d2010ae 2938 denyMode = NFS_OPEN_SHARE_DENY_NONE;
b0d623f7
A
2939
2940 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
0a7de745
A
2941 if (!noop) {
2942 return ENOMEM;
2943 }
b0d623f7
A
2944
2945restart:
6d2010ae 2946 error = nfs_mount_state_in_use_start(nmp, NULL);
b0d623f7
A
2947 if (error) {
2948 nfs_open_owner_rele(noop);
0a7de745 2949 return error;
b0d623f7 2950 }
6d2010ae 2951 if (np->n_flag & NREVOKE) {
b0d623f7 2952 error = EIO;
6d2010ae
A
2953 nfs_mount_state_in_use_end(nmp, 0);
2954 nfs_open_owner_rele(noop);
0a7de745 2955 return error;
6d2010ae
A
2956 }
2957
2958 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
2959 if (error || (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST))) {
2960 NP(np, "nfs_vnop_mmap: no open file for owner, error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
2961 error = EPERM;
b0d623f7 2962 }
cb323159 2963#if CONFIG_NFS4
b0d623f7
A
2964 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
2965 nfs_mount_state_in_use_end(nmp, 0);
6d2010ae 2966 error = nfs4_reopen(nofp, NULL);
b0d623f7 2967 nofp = NULL;
0a7de745 2968 if (!error) {
6d2010ae 2969 goto restart;
0a7de745 2970 }
b0d623f7 2971 }
cb323159 2972#endif
0a7de745 2973 if (!error) {
6d2010ae 2974 error = nfs_open_file_set_busy(nofp, NULL);
0a7de745 2975 }
b0d623f7
A
2976 if (error) {
2977 nofp = NULL;
2978 goto out;
2979 }
2980
2981 /*
6d2010ae
A
2982 * The open reference for mmap must mirror an existing open because
2983 * we may need to reclaim it after the file is closed.
2984 * So grab another open count matching the accessMode passed in.
2985 * If we already had an mmap open, prefer read/write without deny mode.
2986 * This means we may have to drop the current mmap open first.
3e170ce0
A
2987 *
2988 * N.B. We should have an open for the mmap, because, mmap was
2989 * called on an open descriptor, or we've created an open for read
2990 * from reading the first page for execve. However, if we piggy
2991 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
2992 * that open may have closed.
b0d623f7 2993 */
6d2010ae 2994
3e170ce0
A
2995 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
2996 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
2997 /* We shouldn't get here. We've already open the file for execve */
2998 NP(np, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
0a7de745 2999 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
3e170ce0
A
3000 }
3001 /*
3002 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
3003 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
3004 */
3005 if (accessMode != NFS_OPEN_SHARE_ACCESS_READ || (accessMode & nofp->nof_deny)) {
6d2010ae
A
3006 /* not asking for just read access -> fail */
3007 error = EPERM;
3008 goto out;
3009 }
3010 /* we don't have the file open, so open it for read access */
3011 if (nmp->nm_vers < NFS_VER4) {
3012 /* NFS v2/v3 opens are always allowed - so just add it. */
3013 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
b0d623f7 3014 error = 0;
cb323159
A
3015 }
3016#if CONFIG_NFS4
3017 else {
6d2010ae 3018 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
b0d623f7 3019 }
cb323159 3020#endif
0a7de745 3021 if (!error) {
6d2010ae 3022 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
0a7de745
A
3023 }
3024 if (error) {
b0d623f7 3025 goto out;
0a7de745 3026 }
6d2010ae
A
3027 }
3028
3029 /* determine deny mode for open */
3030 if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
3031 if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3032 delegated = 1;
0a7de745 3033 if (nofp->nof_d_rw) {
6d2010ae 3034 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3035 } else if (nofp->nof_d_rw_dw) {
6d2010ae 3036 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3037 } else if (nofp->nof_d_rw_drw) {
6d2010ae 3038 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3039 }
6d2010ae
A
3040 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3041 delegated = 0;
0a7de745 3042 if (nofp->nof_rw) {
6d2010ae 3043 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3044 } else if (nofp->nof_rw_dw) {
6d2010ae 3045 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3046 } else if (nofp->nof_rw_drw) {
6d2010ae 3047 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3048 }
6d2010ae
A
3049 } else {
3050 error = EPERM;
3051 }
3052 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
3053 if (nofp->nof_d_r || nofp->nof_d_r_dw || nofp->nof_d_r_drw) {
3054 delegated = 1;
0a7de745 3055 if (nofp->nof_d_r) {
6d2010ae 3056 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3057 } else if (nofp->nof_d_r_dw) {
6d2010ae 3058 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3059 } else if (nofp->nof_d_r_drw) {
6d2010ae 3060 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3061 }
6d2010ae
A
3062 } else if (nofp->nof_r || nofp->nof_r_dw || nofp->nof_r_drw) {
3063 delegated = 0;
0a7de745 3064 if (nofp->nof_r) {
6d2010ae 3065 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3066 } else if (nofp->nof_r_dw) {
6d2010ae 3067 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3068 } else if (nofp->nof_r_drw) {
6d2010ae 3069 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3070 }
3e170ce0
A
3071 } else if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3072 /*
3073 * This clause and the one below is to co-opt a read write access
3074 * for a read only mmaping. We probably got here in that an
3075 * existing rw open for an executable file already exists.
3076 */
3077 delegated = 1;
3078 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
0a7de745 3079 if (nofp->nof_d_rw) {
3e170ce0 3080 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3081 } else if (nofp->nof_d_rw_dw) {
3e170ce0 3082 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3083 } else if (nofp->nof_d_rw_drw) {
3e170ce0 3084 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3085 }
3e170ce0
A
3086 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3087 delegated = 0;
3088 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
0a7de745 3089 if (nofp->nof_rw) {
3e170ce0 3090 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3091 } else if (nofp->nof_rw_dw) {
3e170ce0 3092 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3093 } else if (nofp->nof_rw_drw) {
3e170ce0 3094 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3095 }
6d2010ae
A
3096 } else {
3097 error = EPERM;
3098 }
3099 }
0a7de745 3100 if (error) { /* mmap mode without proper open mode */
6d2010ae 3101 goto out;
0a7de745 3102 }
6d2010ae
A
3103
3104 /*
3105 * If the existing mmap access is more than the new access OR the
3106 * existing access is the same and the existing deny mode is less,
3107 * then we'll stick with the existing mmap open mode.
3108 */
3109 if ((nofp->nof_mmap_access > accessMode) ||
0a7de745 3110 ((nofp->nof_mmap_access == accessMode) && (nofp->nof_mmap_deny <= denyMode))) {
6d2010ae 3111 goto out;
0a7de745 3112 }
6d2010ae
A
3113
3114 /* update mmap open mode */
3115 if (nofp->nof_mmap_access) {
3116 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3117 if (error) {
0a7de745 3118 if (!nfs_mount_state_error_should_restart(error)) {
6d2010ae 3119 NP(np, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 3120 }
6d2010ae
A
3121 NP(np, "nfs_vnop_mmap: update, close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3122 goto out;
b0d623f7 3123 }
6d2010ae 3124 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
b0d623f7
A
3125 }
3126
6d2010ae
A
3127 nfs_open_file_add_open(nofp, accessMode, denyMode, delegated);
3128 nofp->nof_mmap_access = accessMode;
3129 nofp->nof_mmap_deny = denyMode;
3130
b0d623f7 3131out:
0a7de745 3132 if (nofp) {
b0d623f7 3133 nfs_open_file_clear_busy(nofp);
0a7de745 3134 }
b0d623f7
A
3135 if (nfs_mount_state_in_use_end(nmp, error)) {
3136 nofp = NULL;
3137 goto restart;
3138 }
0a7de745 3139 if (noop) {
b0d623f7 3140 nfs_open_owner_rele(noop);
0a7de745 3141 }
316670eb
A
3142
3143 if (!error) {
3144 int ismapped = 0;
3145 nfs_node_lock_force(np);
3146 if ((np->n_flag & NISMAPPED) == 0) {
3147 np->n_flag |= NISMAPPED;
3148 ismapped = 1;
3149 }
3150 nfs_node_unlock(np);
3151 if (ismapped) {
3152 lck_mtx_lock(&nmp->nm_lock);
3153 nmp->nm_state &= ~NFSSTA_SQUISHY;
3154 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
0a7de745 3155 if (nmp->nm_curdeadtimeout <= 0) {
316670eb 3156 nmp->nm_deadto_start = 0;
0a7de745 3157 }
316670eb
A
3158 nmp->nm_mappers++;
3159 lck_mtx_unlock(&nmp->nm_lock);
3160 }
3161 }
3162
0a7de745 3163 return error;
b0d623f7
A
3164}
3165
b0d623f7
A
3166
3167int
6d2010ae
A
3168nfs_vnop_mnomap(
3169 struct vnop_mnomap_args /* {
0a7de745
A
3170 * struct vnodeop_desc *a_desc;
3171 * vnode_t a_vp;
3172 * vfs_context_t a_context;
3173 * } */*ap)
b0d623f7
A
3174{
3175 vfs_context_t ctx = ap->a_context;
3176 vnode_t vp = ap->a_vp;
b0d623f7
A
3177 nfsnode_t np = VTONFS(vp);
3178 struct nfsmount *nmp;
b0d623f7 3179 struct nfs_open_file *nofp = NULL;
6d2010ae
A
3180 off_t size;
3181 int error;
316670eb 3182 int is_mapped_flag = 0;
0a7de745 3183
b0d623f7 3184 nmp = VTONMP(vp);
0a7de745
A
3185 if (nfs_mount_gone(nmp)) {
3186 return ENXIO;
3187 }
b0d623f7 3188
316670eb
A
3189 nfs_node_lock_force(np);
3190 if (np->n_flag & NISMAPPED) {
3191 is_mapped_flag = 1;
3192 np->n_flag &= ~NISMAPPED;
3193 }
3194 nfs_node_unlock(np);
3195 if (is_mapped_flag) {
3196 lck_mtx_lock(&nmp->nm_lock);
0a7de745 3197 if (nmp->nm_mappers) {
316670eb 3198 nmp->nm_mappers--;
0a7de745 3199 } else {
316670eb 3200 NP(np, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
0a7de745 3201 }
316670eb
A
3202 lck_mtx_unlock(&nmp->nm_lock);
3203 }
3204
6d2010ae
A
3205 /* flush buffers/ubc before we drop the open (in case it's our last open) */
3206 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
0a7de745 3207 if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp))) {
6d2010ae 3208 ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC);
0a7de745 3209 }
b0d623f7 3210
6d2010ae
A
3211 /* walk all open files and close all mmap opens */
3212loop:
3213 error = nfs_mount_state_in_use_start(nmp, NULL);
0a7de745
A
3214 if (error) {
3215 return error;
3216 }
6d2010ae
A
3217 lck_mtx_lock(&np->n_openlock);
3218 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
0a7de745 3219 if (!nofp->nof_mmap_access) {
6d2010ae 3220 continue;
0a7de745 3221 }
b0d623f7 3222 lck_mtx_unlock(&np->n_openlock);
cb323159 3223#if CONFIG_NFS4
6d2010ae
A
3224 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3225 nfs_mount_state_in_use_end(nmp, 0);
3226 error = nfs4_reopen(nofp, NULL);
0a7de745 3227 if (!error) {
6d2010ae 3228 goto loop;
0a7de745 3229 }
6d2010ae 3230 }
cb323159 3231#endif
0a7de745 3232 if (!error) {
6d2010ae 3233 error = nfs_open_file_set_busy(nofp, NULL);
0a7de745 3234 }
6d2010ae
A
3235 if (error) {
3236 lck_mtx_lock(&np->n_openlock);
3237 break;
3238 }
3239 if (nofp->nof_mmap_access) {
3240 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3241 if (!nfs_mount_state_error_should_restart(error)) {
0a7de745 3242 if (error) { /* not a state-operation-restarting error, so just clear the access */
6d2010ae 3243 NP(np, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 3244 }
6d2010ae
A
3245 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
3246 }
0a7de745 3247 if (error) {
6d2010ae 3248 NP(np, "nfs_vnop_mnomap: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 3249 }
6d2010ae
A
3250 }
3251 nfs_open_file_clear_busy(nofp);
3252 nfs_mount_state_in_use_end(nmp, error);
3253 goto loop;
b0d623f7 3254 }
6d2010ae
A
3255 lck_mtx_unlock(&np->n_openlock);
3256 nfs_mount_state_in_use_end(nmp, error);
0a7de745 3257 return error;
6d2010ae 3258}
b0d623f7 3259
6d2010ae
A
3260/*
3261 * Search a node's lock owner list for the owner for this process.
3262 * If not found and "alloc" is set, then allocate a new one.
3263 */
3264struct nfs_lock_owner *
3265nfs_lock_owner_find(nfsnode_t np, proc_t p, int alloc)
3266{
3267 pid_t pid = proc_pid(p);
3268 struct nfs_lock_owner *nlop, *newnlop = NULL;
b0d623f7 3269
6d2010ae
A
3270tryagain:
3271 lck_mtx_lock(&np->n_openlock);
3272 TAILQ_FOREACH(nlop, &np->n_lock_owners, nlo_link) {
0a7de745
A
3273 os_ref_count_t newcount;
3274
3275 if (nlop->nlo_pid != pid) {
6d2010ae 3276 continue;
0a7de745
A
3277 }
3278 if (timevalcmp(&nlop->nlo_pid_start, &p->p_start, ==)) {
6d2010ae 3279 break;
0a7de745 3280 }
6d2010ae 3281 /* stale lock owner... reuse it if we can */
0a7de745 3282 if (os_ref_get_count(&nlop->nlo_refcnt)) {
6d2010ae
A
3283 TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
3284 nlop->nlo_flags &= ~NFS_LOCK_OWNER_LINK;
0a7de745 3285 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
6d2010ae
A
3286 lck_mtx_unlock(&np->n_openlock);
3287 goto tryagain;
3288 }
3289 nlop->nlo_pid_start = p->p_start;
3290 nlop->nlo_seqid = 0;
3291 nlop->nlo_stategenid = 0;
3292 break;
b0d623f7
A
3293 }
3294
6d2010ae
A
3295 if (!nlop && !newnlop && alloc) {
3296 lck_mtx_unlock(&np->n_openlock);
3297 MALLOC(newnlop, struct nfs_lock_owner *, sizeof(struct nfs_lock_owner), M_TEMP, M_WAITOK);
0a7de745
A
3298 if (!newnlop) {
3299 return NULL;
3300 }
6d2010ae
A
3301 bzero(newnlop, sizeof(*newnlop));
3302 lck_mtx_init(&newnlop->nlo_lock, nfs_open_grp, LCK_ATTR_NULL);
3303 newnlop->nlo_pid = pid;
3304 newnlop->nlo_pid_start = p->p_start;
3305 newnlop->nlo_name = OSAddAtomic(1, &nfs_lock_owner_seqnum);
3306 TAILQ_INIT(&newnlop->nlo_locks);
3307 goto tryagain;
b0d623f7 3308 }
6d2010ae
A
3309 if (!nlop && newnlop) {
3310 newnlop->nlo_flags |= NFS_LOCK_OWNER_LINK;
0a7de745 3311 os_ref_init(&newnlop->nlo_refcnt, NULL);
6d2010ae
A
3312 TAILQ_INSERT_HEAD(&np->n_lock_owners, newnlop, nlo_link);
3313 nlop = newnlop;
b0d623f7 3314 }
6d2010ae 3315 lck_mtx_unlock(&np->n_openlock);
b0d623f7 3316
0a7de745 3317 if (newnlop && (nlop != newnlop)) {
6d2010ae 3318 nfs_lock_owner_destroy(newnlop);
0a7de745 3319 }
b0d623f7 3320
0a7de745 3321 if (nlop) {
6d2010ae 3322 nfs_lock_owner_ref(nlop);
0a7de745 3323 }
b0d623f7 3324
0a7de745 3325 return nlop;
6d2010ae 3326}
b0d623f7
A
3327
3328/*
3329 * destroy a lock owner that's no longer needed
3330 */
3331void
3332nfs_lock_owner_destroy(struct nfs_lock_owner *nlop)
3333{
3334 if (nlop->nlo_open_owner) {
3335 nfs_open_owner_rele(nlop->nlo_open_owner);
3336 nlop->nlo_open_owner = NULL;
3337 }
3338 lck_mtx_destroy(&nlop->nlo_lock, nfs_open_grp);
3339 FREE(nlop, M_TEMP);
3340}
3341
3342/*
3343 * acquire a reference count on a lock owner
3344 */
3345void
3346nfs_lock_owner_ref(struct nfs_lock_owner *nlop)
3347{
3348 lck_mtx_lock(&nlop->nlo_lock);
0a7de745 3349 os_ref_retain_locked(&nlop->nlo_refcnt);
b0d623f7
A
3350 lck_mtx_unlock(&nlop->nlo_lock);
3351}
3352
3353/*
3354 * drop a reference count on a lock owner and destroy it if
3355 * it is no longer referenced and no longer on the mount's list.
3356 */
3357void
3358nfs_lock_owner_rele(struct nfs_lock_owner *nlop)
3359{
0a7de745
A
3360 os_ref_count_t newcount;
3361
b0d623f7 3362 lck_mtx_lock(&nlop->nlo_lock);
0a7de745 3363 if (os_ref_get_count(&nlop->nlo_refcnt) < 1) {
b0d623f7 3364 panic("nfs_lock_owner_rele: no refcnt");
0a7de745
A
3365 }
3366 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
3367 if (!newcount && (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
b0d623f7 3368 panic("nfs_lock_owner_rele: busy");
0a7de745 3369 }
b0d623f7 3370 /* XXX we may potentially want to clean up idle/unused lock owner structures */
0a7de745 3371 if (newcount || (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) {
b0d623f7
A
3372 lck_mtx_unlock(&nlop->nlo_lock);
3373 return;
3374 }
3375 /* owner is no longer referenced or linked to mount, so destroy it */
3376 lck_mtx_unlock(&nlop->nlo_lock);
3377 nfs_lock_owner_destroy(nlop);
3378}
3379
3380/*
3381 * Mark a lock owner as busy because we are about to
3382 * start an operation that uses and updates lock owner state.
3383 */
3384int
3385nfs_lock_owner_set_busy(struct nfs_lock_owner *nlop, thread_t thd)
3386{
3387 struct nfsmount *nmp;
cb323159 3388 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
b0d623f7
A
3389 int error = 0, slpflag;
3390
3391 nmp = nlop->nlo_open_owner->noo_mount;
0a7de745
A
3392 if (nfs_mount_gone(nmp)) {
3393 return ENXIO;
3394 }
6d2010ae 3395 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7
A
3396
3397 lck_mtx_lock(&nlop->nlo_lock);
3398 while (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY) {
0a7de745 3399 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 3400 break;
0a7de745 3401 }
b0d623f7
A
3402 nlop->nlo_flags |= NFS_LOCK_OWNER_WANT;
3403 msleep(nlop, &nlop->nlo_lock, slpflag, "nfs_lock_owner_set_busy", &ts);
6d2010ae 3404 slpflag = 0;
b0d623f7 3405 }
0a7de745 3406 if (!error) {
b0d623f7 3407 nlop->nlo_flags |= NFS_LOCK_OWNER_BUSY;
0a7de745 3408 }
b0d623f7
A
3409 lck_mtx_unlock(&nlop->nlo_lock);
3410
0a7de745 3411 return error;
b0d623f7
A
3412}
3413
3414/*
3415 * Clear the busy flag on a lock owner and wake up anyone waiting
3416 * to mark it busy.
3417 */
3418void
3419nfs_lock_owner_clear_busy(struct nfs_lock_owner *nlop)
3420{
3421 int wanted;
3422
3423 lck_mtx_lock(&nlop->nlo_lock);
0a7de745 3424 if (!(nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
b0d623f7 3425 panic("nfs_lock_owner_clear_busy");
0a7de745 3426 }
b0d623f7 3427 wanted = (nlop->nlo_flags & NFS_LOCK_OWNER_WANT);
0a7de745 3428 nlop->nlo_flags &= ~(NFS_LOCK_OWNER_BUSY | NFS_LOCK_OWNER_WANT);
b0d623f7 3429 lck_mtx_unlock(&nlop->nlo_lock);
0a7de745 3430 if (wanted) {
b0d623f7 3431 wakeup(nlop);
0a7de745 3432 }
b0d623f7
A
3433}
3434
3435/*
3436 * Insert a held lock into a lock owner's sorted list.
3437 * (flock locks are always inserted at the head the list)
3438 */
3439void
3440nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *nlop, struct nfs_file_lock *newnflp)
3441{
3442 struct nfs_file_lock *nflp;
3443
3444 /* insert new lock in lock owner's held lock list */
3445 lck_mtx_lock(&nlop->nlo_lock);
3446 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) {
3447 TAILQ_INSERT_HEAD(&nlop->nlo_locks, newnflp, nfl_lolink);
3448 } else {
3449 TAILQ_FOREACH(nflp, &nlop->nlo_locks, nfl_lolink) {
0a7de745 3450 if (newnflp->nfl_start < nflp->nfl_start) {
b0d623f7 3451 break;
0a7de745 3452 }
b0d623f7 3453 }
0a7de745 3454 if (nflp) {
b0d623f7 3455 TAILQ_INSERT_BEFORE(nflp, newnflp, nfl_lolink);
0a7de745 3456 } else {
b0d623f7 3457 TAILQ_INSERT_TAIL(&nlop->nlo_locks, newnflp, nfl_lolink);
0a7de745 3458 }
b0d623f7
A
3459 }
3460 lck_mtx_unlock(&nlop->nlo_lock);
3461}
3462
3463/*
3464 * Get a file lock structure for this lock owner.
3465 */
3466struct nfs_file_lock *
3467nfs_file_lock_alloc(struct nfs_lock_owner *nlop)
3468{
3469 struct nfs_file_lock *nflp = NULL;
3470
3471 lck_mtx_lock(&nlop->nlo_lock);
3472 if (!nlop->nlo_alock.nfl_owner) {
3473 nflp = &nlop->nlo_alock;
3474 nflp->nfl_owner = nlop;
3475 }
3476 lck_mtx_unlock(&nlop->nlo_lock);
3477 if (!nflp) {
3478 MALLOC(nflp, struct nfs_file_lock *, sizeof(struct nfs_file_lock), M_TEMP, M_WAITOK);
0a7de745
A
3479 if (!nflp) {
3480 return NULL;
3481 }
b0d623f7
A
3482 bzero(nflp, sizeof(*nflp));
3483 nflp->nfl_flags |= NFS_FILE_LOCK_ALLOC;
3484 nflp->nfl_owner = nlop;
3485 }
3486 nfs_lock_owner_ref(nlop);
0a7de745 3487 return nflp;
b0d623f7
A
3488}
3489
3490/*
3491 * destroy the given NFS file lock structure
3492 */
3493void
3494nfs_file_lock_destroy(struct nfs_file_lock *nflp)
3495{
3496 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3497
3498 if (nflp->nfl_flags & NFS_FILE_LOCK_ALLOC) {
3499 nflp->nfl_owner = NULL;
3500 FREE(nflp, M_TEMP);
3501 } else {
3502 lck_mtx_lock(&nlop->nlo_lock);
3e170ce0 3503 bzero(nflp, sizeof(*nflp));
b0d623f7
A
3504 lck_mtx_unlock(&nlop->nlo_lock);
3505 }
3506 nfs_lock_owner_rele(nlop);
3507}
3508
3509/*
3510 * Check if one file lock conflicts with another.
3511 * (nflp1 is the new lock. nflp2 is the existing lock.)
3512 */
3513int
3514nfs_file_lock_conflict(struct nfs_file_lock *nflp1, struct nfs_file_lock *nflp2, int *willsplit)
3515{
3516 /* no conflict if lock is dead */
0a7de745
A
3517 if ((nflp1->nfl_flags & NFS_FILE_LOCK_DEAD) || (nflp2->nfl_flags & NFS_FILE_LOCK_DEAD)) {
3518 return 0;
3519 }
b0d623f7
A
3520 /* no conflict if it's ours - unless the lock style doesn't match */
3521 if ((nflp1->nfl_owner == nflp2->nfl_owner) &&
3522 ((nflp1->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == (nflp2->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))) {
3523 if (willsplit && (nflp1->nfl_type != nflp2->nfl_type) &&
3524 (nflp1->nfl_start > nflp2->nfl_start) &&
0a7de745 3525 (nflp1->nfl_end < nflp2->nfl_end)) {
b0d623f7 3526 *willsplit = 1;
0a7de745
A
3527 }
3528 return 0;
b0d623f7
A
3529 }
3530 /* no conflict if ranges don't overlap */
0a7de745
A
3531 if ((nflp1->nfl_start > nflp2->nfl_end) || (nflp1->nfl_end < nflp2->nfl_start)) {
3532 return 0;
3533 }
b0d623f7 3534 /* no conflict if neither lock is exclusive */
0a7de745
A
3535 if ((nflp1->nfl_type != F_WRLCK) && (nflp2->nfl_type != F_WRLCK)) {
3536 return 0;
3537 }
b0d623f7 3538 /* conflict */
0a7de745 3539 return 1;
b0d623f7
A
3540}
3541
cb323159 3542#if CONFIG_NFS4
b0d623f7
A
3543/*
3544 * Send an NFSv4 LOCK RPC to the server.
3545 */
3546int
6d2010ae 3547nfs4_setlock_rpc(
b0d623f7
A
3548 nfsnode_t np,
3549 struct nfs_open_file *nofp,
3550 struct nfs_file_lock *nflp,
3551 int reclaim,
6d2010ae 3552 int flags,
b0d623f7
A
3553 thread_t thd,
3554 kauth_cred_t cred)
3555{
3556 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3557 struct nfsmount *nmp;
3558 struct nfsm_chain nmreq, nmrep;
3559 uint64_t xid;
3560 uint32_t locktype;
3561 int error = 0, lockerror = ENOENT, newlocker, numops, status;
6d2010ae 3562 struct nfsreq_secinfo_args si;
b0d623f7
A
3563
3564 nmp = NFSTONMP(np);
0a7de745
A
3565 if (nfs_mount_gone(nmp)) {
3566 return ENXIO;
3567 }
3568 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3569 return EINVAL;
3570 }
b0d623f7
A
3571
3572 newlocker = (nlop->nlo_stategenid != nmp->nm_stategenid);
3573 locktype = (nflp->nfl_flags & NFS_FILE_LOCK_WAIT) ?
0a7de745
A
3574 ((nflp->nfl_type == F_WRLCK) ?
3575 NFS_LOCK_TYPE_WRITEW :
3576 NFS_LOCK_TYPE_READW) :
3577 ((nflp->nfl_type == F_WRLCK) ?
3578 NFS_LOCK_TYPE_WRITE :
3579 NFS_LOCK_TYPE_READ);
b0d623f7
A
3580 if (newlocker) {
3581 error = nfs_open_file_set_busy(nofp, thd);
0a7de745
A
3582 if (error) {
3583 return error;
3584 }
b0d623f7
A
3585 error = nfs_open_owner_set_busy(nofp->nof_owner, thd);
3586 if (error) {
3587 nfs_open_file_clear_busy(nofp);
0a7de745 3588 return error;
b0d623f7
A
3589 }
3590 if (!nlop->nlo_open_owner) {
3591 nfs_open_owner_ref(nofp->nof_owner);
3592 nlop->nlo_open_owner = nofp->nof_owner;
3593 }
3594 }
3595 error = nfs_lock_owner_set_busy(nlop, thd);
3596 if (error) {
3597 if (newlocker) {
3598 nfs_open_owner_clear_busy(nofp->nof_owner);
3599 nfs_open_file_clear_busy(nofp);
3600 }
0a7de745 3601 return error;
b0d623f7
A
3602 }
3603
6d2010ae 3604 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
3605 nfsm_chain_null(&nmreq);
3606 nfsm_chain_null(&nmrep);
3607
3608 // PUTFH, GETATTR, LOCK
3609 numops = 3;
3610 nfsm_chain_build_alloc_init(error, &nmreq, 33 * NFSX_UNSIGNED);
3e170ce0 3611 nfsm_chain_add_compound_header(error, &nmreq, "lock", nmp->nm_minor_vers, numops);
b0d623f7
A
3612 numops--;
3613 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3614 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3615 numops--;
3616 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 3617 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
3618 numops--;
3619 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCK);
3620 nfsm_chain_add_32(error, &nmreq, locktype);
3621 nfsm_chain_add_32(error, &nmreq, reclaim);
3622 nfsm_chain_add_64(error, &nmreq, nflp->nfl_start);
3623 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(nflp->nfl_start, nflp->nfl_end));
3624 nfsm_chain_add_32(error, &nmreq, newlocker);
3625 if (newlocker) {
3626 nfsm_chain_add_32(error, &nmreq, nofp->nof_owner->noo_seqid);
3627 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
3628 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3629 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3630 } else {
3631 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3632 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3633 }
3634 nfsm_chain_build_done(error, &nmreq);
3635 nfsm_assert(error, (numops == 0), EPROTO);
3636 nfsmout_if(error);
3637
0a7de745 3638 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
b0d623f7 3639
0a7de745 3640 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 3641 error = lockerror;
0a7de745 3642 }
b0d623f7
A
3643 nfsm_chain_skip_tag(error, &nmrep);
3644 nfsm_chain_get_32(error, &nmrep, numops);
3645 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3646 nfsmout_if(error);
3647 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 3648 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
b0d623f7
A
3649 nfsmout_if(error);
3650 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCK);
3651 nfs_owner_seqid_increment(newlocker ? nofp->nof_owner : NULL, nlop, error);
3652 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3653
3654 /* Update the lock owner's stategenid once it appears the server has state for it. */
3655 /* We determine this by noting the request was successful (we got a stateid). */
0a7de745 3656 if (newlocker && !error) {
b0d623f7 3657 nlop->nlo_stategenid = nmp->nm_stategenid;
0a7de745 3658 }
b0d623f7 3659nfsmout:
0a7de745 3660 if (!lockerror) {
b0d623f7 3661 nfs_node_unlock(np);
0a7de745 3662 }
b0d623f7
A
3663 nfs_lock_owner_clear_busy(nlop);
3664 if (newlocker) {
3665 nfs_open_owner_clear_busy(nofp->nof_owner);
3666 nfs_open_file_clear_busy(nofp);
3667 }
3668 nfsm_chain_cleanup(&nmreq);
3669 nfsm_chain_cleanup(&nmrep);
0a7de745 3670 return error;
b0d623f7
A
3671}
3672
3673/*
3674 * Send an NFSv4 LOCKU RPC to the server.
3675 */
3676int
3677nfs4_unlock_rpc(
3678 nfsnode_t np,
3679 struct nfs_lock_owner *nlop,
3680 int type,
3681 uint64_t start,
3682 uint64_t end,
6d2010ae
A
3683 int flags,
3684 thread_t thd,
3685 kauth_cred_t cred)
b0d623f7
A
3686{
3687 struct nfsmount *nmp;
3688 struct nfsm_chain nmreq, nmrep;
3689 uint64_t xid;
3690 int error = 0, lockerror = ENOENT, numops, status;
6d2010ae 3691 struct nfsreq_secinfo_args si;
b0d623f7
A
3692
3693 nmp = NFSTONMP(np);
0a7de745
A
3694 if (nfs_mount_gone(nmp)) {
3695 return ENXIO;
3696 }
3697 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3698 return EINVAL;
3699 }
b0d623f7 3700
6d2010ae 3701 error = nfs_lock_owner_set_busy(nlop, NULL);
0a7de745
A
3702 if (error) {
3703 return error;
3704 }
b0d623f7 3705
6d2010ae 3706 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
3707 nfsm_chain_null(&nmreq);
3708 nfsm_chain_null(&nmrep);
3709
3710 // PUTFH, GETATTR, LOCKU
3711 numops = 3;
3712 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3e170ce0 3713 nfsm_chain_add_compound_header(error, &nmreq, "unlock", nmp->nm_minor_vers, numops);
b0d623f7
A
3714 numops--;
3715 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3716 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3717 numops--;
3718 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 3719 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
3720 numops--;
3721 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKU);
3722 nfsm_chain_add_32(error, &nmreq, (type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3723 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3724 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3725 nfsm_chain_add_64(error, &nmreq, start);
3726 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3727 nfsm_chain_build_done(error, &nmreq);
3728 nfsm_assert(error, (numops == 0), EPROTO);
3729 nfsmout_if(error);
3730
0a7de745 3731 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
b0d623f7 3732
0a7de745 3733 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 3734 error = lockerror;
0a7de745 3735 }
b0d623f7
A
3736 nfsm_chain_skip_tag(error, &nmrep);
3737 nfsm_chain_get_32(error, &nmrep, numops);
3738 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3739 nfsmout_if(error);
3740 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 3741 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
b0d623f7
A
3742 nfsmout_if(error);
3743 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKU);
3744 nfs_owner_seqid_increment(NULL, nlop, error);
3745 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3746nfsmout:
0a7de745 3747 if (!lockerror) {
b0d623f7 3748 nfs_node_unlock(np);
0a7de745 3749 }
b0d623f7
A
3750 nfs_lock_owner_clear_busy(nlop);
3751 nfsm_chain_cleanup(&nmreq);
3752 nfsm_chain_cleanup(&nmrep);
0a7de745 3753 return error;
b0d623f7
A
3754}
3755
3756/*
6d2010ae 3757 * Send an NFSv4 LOCKT RPC to the server.
b0d623f7
A
3758 */
3759int
6d2010ae 3760nfs4_getlock_rpc(
b0d623f7
A
3761 nfsnode_t np,
3762 struct nfs_lock_owner *nlop,
3763 struct flock *fl,
3764 uint64_t start,
3765 uint64_t end,
3766 vfs_context_t ctx)
3767{
3768 struct nfsmount *nmp;
b0d623f7
A
3769 struct nfsm_chain nmreq, nmrep;
3770 uint64_t xid, val64 = 0;
3771 uint32_t val = 0;
6d2010ae
A
3772 int error = 0, lockerror, numops, status;
3773 struct nfsreq_secinfo_args si;
b0d623f7
A
3774
3775 nmp = NFSTONMP(np);
0a7de745
A
3776 if (nfs_mount_gone(nmp)) {
3777 return ENXIO;
3778 }
3779 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3780 return EINVAL;
3781 }
b0d623f7 3782
6d2010ae
A
3783 lockerror = ENOENT;
3784 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
3785 nfsm_chain_null(&nmreq);
3786 nfsm_chain_null(&nmrep);
3787
3788 // PUTFH, GETATTR, LOCKT
3789 numops = 3;
3790 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3e170ce0 3791 nfsm_chain_add_compound_header(error, &nmreq, "locktest", nmp->nm_minor_vers, numops);
b0d623f7
A
3792 numops--;
3793 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3794 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3795 numops--;
3796 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 3797 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
3798 numops--;
3799 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKT);
3800 nfsm_chain_add_32(error, &nmreq, (fl->l_type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3801 nfsm_chain_add_64(error, &nmreq, start);
3802 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3803 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3804 nfsm_chain_build_done(error, &nmreq);
3805 nfsm_assert(error, (numops == 0), EPROTO);
3806 nfsmout_if(error);
3807
6d2010ae 3808 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
b0d623f7 3809
0a7de745 3810 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 3811 error = lockerror;
0a7de745 3812 }
b0d623f7
A
3813 nfsm_chain_skip_tag(error, &nmrep);
3814 nfsm_chain_get_32(error, &nmrep, numops);
3815 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3816 nfsmout_if(error);
3817 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 3818 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
b0d623f7
A
3819 nfsmout_if(error);
3820 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKT);
3821 if (error == NFSERR_DENIED) {
3822 error = 0;
3823 nfsm_chain_get_64(error, &nmrep, fl->l_start);
3824 nfsm_chain_get_64(error, &nmrep, val64);
3825 fl->l_len = (val64 == UINT64_MAX) ? 0 : val64;
3826 nfsm_chain_get_32(error, &nmrep, val);
3827 fl->l_type = (val == NFS_LOCK_TYPE_WRITE) ? F_WRLCK : F_RDLCK;
3828 fl->l_pid = 0;
3829 fl->l_whence = SEEK_SET;
3830 } else if (!error) {
3831 fl->l_type = F_UNLCK;
3832 }
3833nfsmout:
0a7de745 3834 if (!lockerror) {
b0d623f7 3835 nfs_node_unlock(np);
0a7de745 3836 }
b0d623f7
A
3837 nfsm_chain_cleanup(&nmreq);
3838 nfsm_chain_cleanup(&nmrep);
0a7de745 3839 return error;
b0d623f7 3840}
cb323159 3841#endif /* CONFIG_NFS4 */
6d2010ae
A
3842
3843/*
3844 * Check for any conflicts with the given lock.
3845 *
3846 * Checking for a lock doesn't require the file to be opened.
3847 * So we skip all the open owner, open file, lock owner work
3848 * and just check for a conflicting lock.
3849 */
3850int
3851nfs_advlock_getlock(
3852 nfsnode_t np,
3853 struct nfs_lock_owner *nlop,
3854 struct flock *fl,
3855 uint64_t start,
3856 uint64_t end,
3857 vfs_context_t ctx)
3858{
3859 struct nfsmount *nmp;
3860 struct nfs_file_lock *nflp;
3861 int error = 0, answered = 0;
3862
3863 nmp = NFSTONMP(np);
0a7de745
A
3864 if (nfs_mount_gone(nmp)) {
3865 return ENXIO;
3866 }
6d2010ae
A
3867
3868restart:
0a7de745
A
3869 if ((error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)))) {
3870 return error;
3871 }
6d2010ae
A
3872
3873 lck_mtx_lock(&np->n_openlock);
3874 /* scan currently held locks for conflict */
3875 TAILQ_FOREACH(nflp, &np->n_locks, nfl_link) {
0a7de745 3876 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
6d2010ae 3877 continue;
0a7de745 3878 }
6d2010ae 3879 if ((start <= nflp->nfl_end) && (end >= nflp->nfl_start) &&
0a7de745 3880 ((fl->l_type == F_WRLCK) || (nflp->nfl_type == F_WRLCK))) {
6d2010ae 3881 break;
0a7de745 3882 }
6d2010ae
A
3883 }
3884 if (nflp) {
3885 /* found a conflicting lock */
3886 fl->l_type = nflp->nfl_type;
3887 fl->l_pid = (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_FLOCK) ? -1 : nflp->nfl_owner->nlo_pid;
3888 fl->l_start = nflp->nfl_start;
3889 fl->l_len = NFS_FLOCK_LENGTH(nflp->nfl_start, nflp->nfl_end);
3890 fl->l_whence = SEEK_SET;
3891 answered = 1;
3892 } else if ((np->n_openflags & N_DELEG_WRITE) && !(np->n_openflags & N_DELEG_RETURN)) {
3893 /*
3894 * If we have a write delegation, we know there can't be other
3895 * locks on the server. So the answer is no conflicting lock found.
3896 */
3897 fl->l_type = F_UNLCK;
3898 answered = 1;
3899 }
3900 lck_mtx_unlock(&np->n_openlock);
3901 if (answered) {
3902 nfs_mount_state_in_use_end(nmp, 0);
0a7de745 3903 return 0;
6d2010ae
A
3904 }
3905
3906 /* no conflict found locally, so ask the server */
3907 error = nmp->nm_funcs->nf_getlock_rpc(np, nlop, fl, start, end, ctx);
3908
0a7de745 3909 if (nfs_mount_state_in_use_end(nmp, error)) {
6d2010ae 3910 goto restart;
0a7de745
A
3911 }
3912 return error;
6d2010ae
A
3913}
3914
b0d623f7
A
3915/*
3916 * Acquire a file lock for the given range.
3917 *
3918 * Add the lock (request) to the lock queue.
3919 * Scan the lock queue for any conflicting locks.
3920 * If a conflict is found, block or return an error.
3921 * Once end of queue is reached, send request to the server.
3922 * If the server grants the lock, scan the lock queue and
3923 * update any existing locks. Then (optionally) scan the
3924 * queue again to coalesce any locks adjacent to the new one.
3925 */
3926int
6d2010ae 3927nfs_advlock_setlock(
b0d623f7
A
3928 nfsnode_t np,
3929 struct nfs_open_file *nofp,
3930 struct nfs_lock_owner *nlop,
3931 int op,
3932 uint64_t start,
3933 uint64_t end,
3934 int style,
3935 short type,
3936 vfs_context_t ctx)
3937{
3938 struct nfsmount *nmp;
3939 struct nfs_file_lock *newnflp, *nflp, *nflp2 = NULL, *nextnflp, *flocknflp = NULL;
3940 struct nfs_file_lock *coalnflp;
3941 int error = 0, error2, willsplit = 0, delay, slpflag, busy = 0, inuse = 0, restart, inqueue = 0;
cb323159 3942 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
b0d623f7
A
3943
3944 nmp = NFSTONMP(np);
0a7de745
A
3945 if (nfs_mount_gone(nmp)) {
3946 return ENXIO;
3947 }
6d2010ae
A
3948 slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
3949
0a7de745
A
3950 if ((type != F_RDLCK) && (type != F_WRLCK)) {
3951 return EINVAL;
3952 }
b0d623f7
A
3953
3954 /* allocate a new lock */
3955 newnflp = nfs_file_lock_alloc(nlop);
0a7de745
A
3956 if (!newnflp) {
3957 return ENOLCK;
3958 }
b0d623f7
A
3959 newnflp->nfl_start = start;
3960 newnflp->nfl_end = end;
3961 newnflp->nfl_type = type;
0a7de745 3962 if (op == F_SETLKW) {
b0d623f7 3963 newnflp->nfl_flags |= NFS_FILE_LOCK_WAIT;
0a7de745 3964 }
b0d623f7
A
3965 newnflp->nfl_flags |= style;
3966 newnflp->nfl_flags |= NFS_FILE_LOCK_BLOCKED;
3967
3968 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && (type == F_WRLCK)) {
3969 /*
3970 * For exclusive flock-style locks, if we block waiting for the
3971 * lock, we need to first release any currently held shared
3972 * flock-style lock. So, the first thing we do is check if we
3973 * have a shared flock-style lock.
3974 */
3975 nflp = TAILQ_FIRST(&nlop->nlo_locks);
0a7de745 3976 if (nflp && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_FLOCK)) {
b0d623f7 3977 nflp = NULL;
0a7de745
A
3978 }
3979 if (nflp && (nflp->nfl_type != F_RDLCK)) {
b0d623f7 3980 nflp = NULL;
0a7de745 3981 }
b0d623f7
A
3982 flocknflp = nflp;
3983 }
3984
3985restart:
3986 restart = 0;
6d2010ae 3987 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
0a7de745 3988 if (error) {
b0d623f7 3989 goto error_out;
0a7de745 3990 }
b0d623f7 3991 inuse = 1;
6d2010ae
A
3992 if (np->n_flag & NREVOKE) {
3993 error = EIO;
3994 nfs_mount_state_in_use_end(nmp, 0);
3995 inuse = 0;
3996 goto error_out;
3997 }
cb323159 3998#if CONFIG_NFS4
b0d623f7
A
3999 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4000 nfs_mount_state_in_use_end(nmp, 0);
4001 inuse = 0;
6d2010ae 4002 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
0a7de745 4003 if (error) {
6d2010ae 4004 goto error_out;
0a7de745 4005 }
b0d623f7
A
4006 goto restart;
4007 }
cb323159 4008#endif
b0d623f7
A
4009
4010 lck_mtx_lock(&np->n_openlock);
4011 if (!inqueue) {
4012 /* insert new lock at beginning of list */
4013 TAILQ_INSERT_HEAD(&np->n_locks, newnflp, nfl_link);
4014 inqueue = 1;
4015 }
4016
4017 /* scan current list of locks (held and pending) for conflicts */
6d2010ae
A
4018 for (nflp = TAILQ_NEXT(newnflp, nfl_link); nflp; nflp = nextnflp) {
4019 nextnflp = TAILQ_NEXT(nflp, nfl_link);
0a7de745 4020 if (!nfs_file_lock_conflict(newnflp, nflp, &willsplit)) {
b0d623f7 4021 continue;
0a7de745 4022 }
b0d623f7
A
4023 /* Conflict */
4024 if (!(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
4025 error = EAGAIN;
4026 break;
4027 }
4028 /* Block until this lock is no longer held. */
4029 if (nflp->nfl_blockcnt == UINT_MAX) {
4030 error = ENOLCK;
4031 break;
4032 }
4033 nflp->nfl_blockcnt++;
4034 do {
4035 if (flocknflp) {
4036 /* release any currently held shared lock before sleeping */
4037 lck_mtx_unlock(&np->n_openlock);
4038 nfs_mount_state_in_use_end(nmp, 0);
4039 inuse = 0;
6d2010ae 4040 error = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
b0d623f7 4041 flocknflp = NULL;
0a7de745 4042 if (!error) {
6d2010ae 4043 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
0a7de745 4044 }
b0d623f7
A
4045 if (error) {
4046 lck_mtx_lock(&np->n_openlock);
4047 break;
4048 }
4049 inuse = 1;
4050 lck_mtx_lock(&np->n_openlock);
4051 /* no need to block/sleep if the conflict is gone */
0a7de745 4052 if (!nfs_file_lock_conflict(newnflp, nflp, NULL)) {
b0d623f7 4053 break;
0a7de745 4054 }
b0d623f7 4055 }
6d2010ae
A
4056 msleep(nflp, &np->n_openlock, slpflag, "nfs_advlock_setlock_blocked", &ts);
4057 slpflag = 0;
b0d623f7
A
4058 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4059 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4060 /* looks like we have a recover pending... restart */
4061 restart = 1;
4062 lck_mtx_unlock(&np->n_openlock);
4063 nfs_mount_state_in_use_end(nmp, 0);
4064 inuse = 0;
4065 lck_mtx_lock(&np->n_openlock);
4066 break;
4067 }
0a7de745 4068 if (!error && (np->n_flag & NREVOKE)) {
6d2010ae 4069 error = EIO;
0a7de745 4070 }
b0d623f7
A
4071 } while (!error && nfs_file_lock_conflict(newnflp, nflp, NULL));
4072 nflp->nfl_blockcnt--;
4073 if ((nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !nflp->nfl_blockcnt) {
4074 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4075 nfs_file_lock_destroy(nflp);
4076 }
0a7de745 4077 if (error || restart) {
b0d623f7 4078 break;
0a7de745 4079 }
6d2010ae
A
4080 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
4081 /* So, start this lock-scanning loop over from where it started. */
4082 nextnflp = TAILQ_NEXT(newnflp, nfl_link);
b0d623f7
A
4083 }
4084 lck_mtx_unlock(&np->n_openlock);
0a7de745 4085 if (restart) {
b0d623f7 4086 goto restart;
0a7de745
A
4087 }
4088 if (error) {
b0d623f7 4089 goto error_out;
0a7de745 4090 }
b0d623f7
A
4091
4092 if (willsplit) {
4093 /*
4094 * It looks like this operation is splitting a lock.
4095 * We allocate a new lock now so we don't have to worry
4096 * about the allocation failing after we've updated some state.
4097 */
4098 nflp2 = nfs_file_lock_alloc(nlop);
4099 if (!nflp2) {
4100 error = ENOLCK;
4101 goto error_out;
4102 }
4103 }
4104
4105 /* once scan for local conflicts is clear, send request to server */
0a7de745 4106 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
b0d623f7 4107 goto error_out;
0a7de745 4108 }
b0d623f7
A
4109 busy = 1;
4110 delay = 0;
4111 do {
cb323159 4112#if CONFIG_NFS4
6d2010ae
A
4113 /* do we have a delegation? (that we're not returning?) */
4114 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN)) {
4115 if (np->n_openflags & N_DELEG_WRITE) {
4116 /* with a write delegation, just take the lock delegated */
4117 newnflp->nfl_flags |= NFS_FILE_LOCK_DELEGATED;
4118 error = 0;
4119 /* make sure the lock owner knows its open owner */
4120 if (!nlop->nlo_open_owner) {
4121 nfs_open_owner_ref(nofp->nof_owner);
4122 nlop->nlo_open_owner = nofp->nof_owner;
4123 }
4124 break;
4125 } else {
4126 /*
4127 * If we don't have any non-delegated opens but we do have
4128 * delegated opens, then we need to first claim the delegated
4129 * opens so that the lock request on the server can be associated
4130 * with an open it knows about.
4131 */
4132 if ((!nofp->nof_rw_drw && !nofp->nof_w_drw && !nofp->nof_r_drw &&
0a7de745
A
4133 !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw &&
4134 !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r) &&
6d2010ae 4135 (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw ||
0a7de745
A
4136 nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw ||
4137 nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r)) {
6d2010ae 4138 error = nfs4_claim_delegated_state_for_open_file(nofp, 0);
0a7de745 4139 if (error) {
6d2010ae 4140 break;
0a7de745 4141 }
6d2010ae
A
4142 }
4143 }
4144 }
cb323159 4145#endif
0a7de745 4146 if (np->n_flag & NREVOKE) {
6d2010ae 4147 error = EIO;
0a7de745
A
4148 }
4149 if (!error) {
6d2010ae 4150 error = nmp->nm_funcs->nf_setlock_rpc(np, nofp, newnflp, 0, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
0a7de745
A
4151 }
4152 if (!error || ((error != NFSERR_DENIED) && (error != NFSERR_GRACE))) {
b0d623f7 4153 break;
0a7de745 4154 }
b0d623f7 4155 /* request was denied due to either conflict or grace period */
6d2010ae 4156 if ((error == NFSERR_DENIED) && !(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
b0d623f7
A
4157 error = EAGAIN;
4158 break;
4159 }
4160 if (flocknflp) {
4161 /* release any currently held shared lock before sleeping */
4162 nfs_open_state_clear_busy(np);
4163 busy = 0;
4164 nfs_mount_state_in_use_end(nmp, 0);
4165 inuse = 0;
6d2010ae 4166 error2 = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
b0d623f7 4167 flocknflp = NULL;
0a7de745 4168 if (!error2) {
6d2010ae 4169 error2 = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
0a7de745 4170 }
b0d623f7
A
4171 if (!error2) {
4172 inuse = 1;
6d2010ae 4173 error2 = nfs_open_state_set_busy(np, vfs_context_thread(ctx));
b0d623f7
A
4174 }
4175 if (error2) {
4176 error = error2;
4177 break;
4178 }
4179 busy = 1;
4180 }
6d2010ae
A
4181 /*
4182 * Wait a little bit and send the request again.
4183 * Except for retries of blocked v2/v3 request where we've already waited a bit.
4184 */
4185 if ((nmp->nm_vers >= NFS_VER4) || (error == NFSERR_GRACE)) {
0a7de745 4186 if (error == NFSERR_GRACE) {
6d2010ae 4187 delay = 4;
0a7de745
A
4188 }
4189 if (delay < 4) {
6d2010ae 4190 delay++;
0a7de745
A
4191 }
4192 tsleep(newnflp, slpflag, "nfs_advlock_setlock_delay", delay * (hz / 2));
6d2010ae
A
4193 slpflag = 0;
4194 }
b0d623f7
A
4195 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4196 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4197 /* looks like we have a recover pending... restart */
4198 nfs_open_state_clear_busy(np);
4199 busy = 0;
4200 nfs_mount_state_in_use_end(nmp, 0);
4201 inuse = 0;
4202 goto restart;
4203 }
0a7de745 4204 if (!error && (np->n_flag & NREVOKE)) {
6d2010ae 4205 error = EIO;
0a7de745 4206 }
b0d623f7
A
4207 } while (!error);
4208
4209error_out:
4210 if (nfs_mount_state_error_should_restart(error)) {
4211 /* looks like we need to restart this operation */
4212 if (busy) {
4213 nfs_open_state_clear_busy(np);
4214 busy = 0;
4215 }
4216 if (inuse) {
4217 nfs_mount_state_in_use_end(nmp, error);
4218 inuse = 0;
4219 }
4220 goto restart;
4221 }
4222 lck_mtx_lock(&np->n_openlock);
4223 newnflp->nfl_flags &= ~NFS_FILE_LOCK_BLOCKED;
4224 if (error) {
4225 newnflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4226 if (newnflp->nfl_blockcnt) {
4227 /* wake up anyone blocked on this lock */
4228 wakeup(newnflp);
4229 } else {
4230 /* remove newnflp from lock list and destroy */
0a7de745 4231 if (inqueue) {
316670eb 4232 TAILQ_REMOVE(&np->n_locks, newnflp, nfl_link);
0a7de745 4233 }
b0d623f7
A
4234 nfs_file_lock_destroy(newnflp);
4235 }
4236 lck_mtx_unlock(&np->n_openlock);
0a7de745 4237 if (busy) {
b0d623f7 4238 nfs_open_state_clear_busy(np);
0a7de745
A
4239 }
4240 if (inuse) {
b0d623f7 4241 nfs_mount_state_in_use_end(nmp, error);
0a7de745
A
4242 }
4243 if (nflp2) {
b0d623f7 4244 nfs_file_lock_destroy(nflp2);
0a7de745
A
4245 }
4246 return error;
b0d623f7
A
4247 }
4248
4249 /* server granted the lock */
4250
4251 /*
4252 * Scan for locks to update.
4253 *
4254 * Locks completely covered are killed.
4255 * At most two locks may need to be clipped.
4256 * It's possible that a single lock may need to be split.
4257 */
4258 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
0a7de745 4259 if (nflp == newnflp) {
b0d623f7 4260 continue;
0a7de745
A
4261 }
4262 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
b0d623f7 4263 continue;
0a7de745
A
4264 }
4265 if (nflp->nfl_owner != nlop) {
b0d623f7 4266 continue;
0a7de745
A
4267 }
4268 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK)) {
b0d623f7 4269 continue;
0a7de745
A
4270 }
4271 if ((newnflp->nfl_start > nflp->nfl_end) || (newnflp->nfl_end < nflp->nfl_start)) {
b0d623f7 4272 continue;
0a7de745 4273 }
b0d623f7
A
4274 /* here's one to update */
4275 if ((newnflp->nfl_start <= nflp->nfl_start) && (newnflp->nfl_end >= nflp->nfl_end)) {
4276 /* The entire lock is being replaced. */
4277 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4278 lck_mtx_lock(&nlop->nlo_lock);
4279 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4280 lck_mtx_unlock(&nlop->nlo_lock);
4281 /* lock will be destroyed below, if no waiters */
4282 } else if ((newnflp->nfl_start > nflp->nfl_start) && (newnflp->nfl_end < nflp->nfl_end)) {
4283 /* We're replacing a range in the middle of a lock. */
4284 /* The current lock will be split into two locks. */
4285 /* Update locks and insert new lock after current lock. */
0a7de745 4286 nflp2->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
b0d623f7
A
4287 nflp2->nfl_type = nflp->nfl_type;
4288 nflp2->nfl_start = newnflp->nfl_end + 1;
4289 nflp2->nfl_end = nflp->nfl_end;
4290 nflp->nfl_end = newnflp->nfl_start - 1;
4291 TAILQ_INSERT_AFTER(&np->n_locks, nflp, nflp2, nfl_link);
4292 nfs_lock_owner_insert_held_lock(nlop, nflp2);
4293 nextnflp = nflp2;
4294 nflp2 = NULL;
4295 } else if (newnflp->nfl_start > nflp->nfl_start) {
4296 /* We're replacing the end of a lock. */
4297 nflp->nfl_end = newnflp->nfl_start - 1;
4298 } else if (newnflp->nfl_end < nflp->nfl_end) {
4299 /* We're replacing the start of a lock. */
4300 nflp->nfl_start = newnflp->nfl_end + 1;
4301 }
4302 if (nflp->nfl_blockcnt) {
4303 /* wake up anyone blocked on this lock */
4304 wakeup(nflp);
4305 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4306 /* remove nflp from lock list and destroy */
4307 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4308 nfs_file_lock_destroy(nflp);
4309 }
4310 }
4311
4312 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4313
4314 /*
4315 * POSIX locks should be coalesced when possible.
4316 */
4317 if ((style == NFS_FILE_LOCK_STYLE_POSIX) && (nofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)) {
4318 /*
4319 * Walk through the lock queue and check each of our held locks with
4320 * the previous and next locks in the lock owner's "held lock list".
4321 * If the two locks can be coalesced, we merge the current lock into
4322 * the other (previous or next) lock. Merging this way makes sure that
4323 * lock ranges are always merged forward in the lock queue. This is
4324 * important because anyone blocked on the lock being "merged away"
4325 * will still need to block on that range and it will simply continue
4326 * checking locks that are further down the list.
4327 */
4328 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
0a7de745 4329 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
b0d623f7 4330 continue;
0a7de745
A
4331 }
4332 if (nflp->nfl_owner != nlop) {
b0d623f7 4333 continue;
0a7de745
A
4334 }
4335 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_POSIX) {
b0d623f7 4336 continue;
0a7de745 4337 }
b0d623f7
A
4338 if (((coalnflp = TAILQ_PREV(nflp, nfs_file_lock_queue, nfl_lolink))) &&
4339 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4340 (coalnflp->nfl_type == nflp->nfl_type) &&
4341 (coalnflp->nfl_end == (nflp->nfl_start - 1))) {
4342 coalnflp->nfl_end = nflp->nfl_end;
4343 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4344 lck_mtx_lock(&nlop->nlo_lock);
4345 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4346 lck_mtx_unlock(&nlop->nlo_lock);
4347 } else if (((coalnflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4348 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4349 (coalnflp->nfl_type == nflp->nfl_type) &&
4350 (coalnflp->nfl_start == (nflp->nfl_end + 1))) {
4351 coalnflp->nfl_start = nflp->nfl_start;
4352 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4353 lck_mtx_lock(&nlop->nlo_lock);
4354 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4355 lck_mtx_unlock(&nlop->nlo_lock);
4356 }
0a7de745 4357 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD)) {
b0d623f7 4358 continue;
0a7de745 4359 }
b0d623f7
A
4360 if (nflp->nfl_blockcnt) {
4361 /* wake up anyone blocked on this lock */
4362 wakeup(nflp);
4363 } else {
4364 /* remove nflp from lock list and destroy */
4365 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4366 nfs_file_lock_destroy(nflp);
4367 }
4368 }
4369 }
4370
4371 lck_mtx_unlock(&np->n_openlock);
4372 nfs_open_state_clear_busy(np);
4373 nfs_mount_state_in_use_end(nmp, error);
4374
0a7de745 4375 if (nflp2) {
b0d623f7 4376 nfs_file_lock_destroy(nflp2);
0a7de745
A
4377 }
4378 return error;
b0d623f7
A
4379}
4380
6d2010ae
A
4381/*
4382 * Release all (same style) locks within the given range.
4383 */
b0d623f7 4384int
6d2010ae 4385nfs_advlock_unlock(
b0d623f7 4386 nfsnode_t np,
cb323159
A
4387 struct nfs_open_file *nofp
4388#if !CONFIG_NFS4
4389 __unused
4390#endif
4391 ,
b0d623f7
A
4392 struct nfs_lock_owner *nlop,
4393 uint64_t start,
4394 uint64_t end,
4395 int style,
4396 vfs_context_t ctx)
4397{
4398 struct nfsmount *nmp;
4399 struct nfs_file_lock *nflp, *nextnflp, *newnflp = NULL;
4400 int error = 0, willsplit = 0, send_unlock_rpcs = 1;
4401
4402 nmp = NFSTONMP(np);
0a7de745
A
4403 if (nfs_mount_gone(nmp)) {
4404 return ENXIO;
4405 }
b0d623f7
A
4406
4407restart:
0a7de745
A
4408 if ((error = nfs_mount_state_in_use_start(nmp, NULL))) {
4409 return error;
4410 }
cb323159 4411#if CONFIG_NFS4
b0d623f7
A
4412 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4413 nfs_mount_state_in_use_end(nmp, 0);
6d2010ae 4414 error = nfs4_reopen(nofp, NULL);
0a7de745
A
4415 if (error) {
4416 return error;
4417 }
b0d623f7
A
4418 goto restart;
4419 }
cb323159 4420#endif
6d2010ae 4421 if ((error = nfs_open_state_set_busy(np, NULL))) {
b0d623f7 4422 nfs_mount_state_in_use_end(nmp, error);
0a7de745 4423 return error;
b0d623f7
A
4424 }
4425
4426 lck_mtx_lock(&np->n_openlock);
4427 if ((start > 0) && (end < UINT64_MAX) && !willsplit) {
4428 /*
4429 * We may need to allocate a new lock if an existing lock gets split.
4430 * So, we first scan the list to check for a split, and if there's
4431 * going to be one, we'll allocate one now.
4432 */
4433 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
0a7de745 4434 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
b0d623f7 4435 continue;
0a7de745
A
4436 }
4437 if (nflp->nfl_owner != nlop) {
b0d623f7 4438 continue;
0a7de745
A
4439 }
4440 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
b0d623f7 4441 continue;
0a7de745
A
4442 }
4443 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
b0d623f7 4444 continue;
0a7de745 4445 }
b0d623f7
A
4446 if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4447 willsplit = 1;
4448 break;
4449 }
4450 }
4451 if (willsplit) {
4452 lck_mtx_unlock(&np->n_openlock);
4453 nfs_open_state_clear_busy(np);
4454 nfs_mount_state_in_use_end(nmp, 0);
4455 newnflp = nfs_file_lock_alloc(nlop);
0a7de745
A
4456 if (!newnflp) {
4457 return ENOMEM;
4458 }
b0d623f7
A
4459 goto restart;
4460 }
4461 }
4462
4463 /*
4464 * Free all of our locks in the given range.
4465 *
4466 * Note that this process requires sending requests to the server.
0a7de745 4467 * Because of this, we will release the n_openlock while performing
b0d623f7
A
4468 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4469 * locks from changing underneath us. However, other entries in the
4470 * list may be removed. So we need to be careful walking the list.
4471 */
4472
4473 /*
4474 * Don't unlock ranges that are held by other-style locks.
4475 * If style is posix, don't send any unlock rpcs if flock is held.
4476 * If we unlock an flock, don't send unlock rpcs for any posix-style
4477 * ranges held - instead send unlocks for the ranges not held.
4478 */
4479 if ((style == NFS_FILE_LOCK_STYLE_POSIX) &&
4480 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
0a7de745 4481 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK)) {
b0d623f7 4482 send_unlock_rpcs = 0;
0a7de745 4483 }
b0d623f7
A
4484 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) &&
4485 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4486 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) &&
4487 ((nflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4488 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX)) {
4489 uint64_t s = 0;
4490 int type = TAILQ_FIRST(&nlop->nlo_locks)->nfl_type;
6d2010ae
A
4491 int delegated = (TAILQ_FIRST(&nlop->nlo_locks)->nfl_flags & NFS_FILE_LOCK_DELEGATED);
4492 while (!delegated && nflp) {
b0d623f7
A
4493 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) {
4494 /* unlock the range preceding this lock */
4495 lck_mtx_unlock(&np->n_openlock);
0a7de745
A
4496 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, nflp->nfl_start - 1, 0,
4497 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4498 if (nfs_mount_state_error_should_restart(error)) {
4499 nfs_open_state_clear_busy(np);
4500 nfs_mount_state_in_use_end(nmp, error);
4501 goto restart;
4502 }
4503 lck_mtx_lock(&np->n_openlock);
0a7de745 4504 if (error) {
b0d623f7 4505 goto out;
0a7de745
A
4506 }
4507 s = nflp->nfl_end + 1;
b0d623f7
A
4508 }
4509 nflp = TAILQ_NEXT(nflp, nfl_lolink);
4510 }
6d2010ae
A
4511 if (!delegated) {
4512 lck_mtx_unlock(&np->n_openlock);
4513 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, end, 0,
0a7de745 4514 vfs_context_thread(ctx), vfs_context_ucred(ctx));
6d2010ae
A
4515 if (nfs_mount_state_error_should_restart(error)) {
4516 nfs_open_state_clear_busy(np);
4517 nfs_mount_state_in_use_end(nmp, error);
4518 goto restart;
4519 }
4520 lck_mtx_lock(&np->n_openlock);
0a7de745 4521 if (error) {
6d2010ae 4522 goto out;
0a7de745 4523 }
b0d623f7 4524 }
b0d623f7
A
4525 send_unlock_rpcs = 0;
4526 }
4527
4528 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
0a7de745 4529 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
b0d623f7 4530 continue;
0a7de745
A
4531 }
4532 if (nflp->nfl_owner != nlop) {
b0d623f7 4533 continue;
0a7de745
A
4534 }
4535 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
b0d623f7 4536 continue;
0a7de745
A
4537 }
4538 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
b0d623f7 4539 continue;
0a7de745 4540 }
b0d623f7
A
4541 /* here's one to unlock */
4542 if ((start <= nflp->nfl_start) && (end >= nflp->nfl_end)) {
4543 /* The entire lock is being unlocked. */
6d2010ae 4544 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4545 lck_mtx_unlock(&np->n_openlock);
6d2010ae 4546 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, nflp->nfl_end, 0,
0a7de745 4547 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4548 if (nfs_mount_state_error_should_restart(error)) {
4549 nfs_open_state_clear_busy(np);
4550 nfs_mount_state_in_use_end(nmp, error);
4551 goto restart;
4552 }
4553 lck_mtx_lock(&np->n_openlock);
4554 }
4555 nextnflp = TAILQ_NEXT(nflp, nfl_link);
0a7de745 4556 if (error) {
b0d623f7 4557 break;
0a7de745 4558 }
b0d623f7
A
4559 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4560 lck_mtx_lock(&nlop->nlo_lock);
4561 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4562 lck_mtx_unlock(&nlop->nlo_lock);
4563 /* lock will be destroyed below, if no waiters */
4564 } else if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4565 /* We're unlocking a range in the middle of a lock. */
4566 /* The current lock will be split into two locks. */
6d2010ae 4567 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4568 lck_mtx_unlock(&np->n_openlock);
6d2010ae 4569 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, end, 0,
0a7de745 4570 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4571 if (nfs_mount_state_error_should_restart(error)) {
4572 nfs_open_state_clear_busy(np);
4573 nfs_mount_state_in_use_end(nmp, error);
4574 goto restart;
4575 }
4576 lck_mtx_lock(&np->n_openlock);
4577 }
0a7de745 4578 if (error) {
b0d623f7 4579 break;
0a7de745 4580 }
b0d623f7 4581 /* update locks and insert new lock after current lock */
0a7de745 4582 newnflp->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
b0d623f7
A
4583 newnflp->nfl_type = nflp->nfl_type;
4584 newnflp->nfl_start = end + 1;
4585 newnflp->nfl_end = nflp->nfl_end;
4586 nflp->nfl_end = start - 1;
4587 TAILQ_INSERT_AFTER(&np->n_locks, nflp, newnflp, nfl_link);
4588 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4589 nextnflp = newnflp;
4590 newnflp = NULL;
4591 } else if (start > nflp->nfl_start) {
4592 /* We're unlocking the end of a lock. */
6d2010ae 4593 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4594 lck_mtx_unlock(&np->n_openlock);
6d2010ae 4595 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, nflp->nfl_end, 0,
0a7de745 4596 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4597 if (nfs_mount_state_error_should_restart(error)) {
4598 nfs_open_state_clear_busy(np);
4599 nfs_mount_state_in_use_end(nmp, error);
4600 goto restart;
4601 }
4602 lck_mtx_lock(&np->n_openlock);
4603 }
4604 nextnflp = TAILQ_NEXT(nflp, nfl_link);
0a7de745 4605 if (error) {
b0d623f7 4606 break;
0a7de745 4607 }
b0d623f7
A
4608 nflp->nfl_end = start - 1;
4609 } else if (end < nflp->nfl_end) {
4610 /* We're unlocking the start of a lock. */
6d2010ae 4611 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4612 lck_mtx_unlock(&np->n_openlock);
6d2010ae 4613 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, end, 0,
0a7de745 4614 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4615 if (nfs_mount_state_error_should_restart(error)) {
4616 nfs_open_state_clear_busy(np);
4617 nfs_mount_state_in_use_end(nmp, error);
4618 goto restart;
4619 }
4620 lck_mtx_lock(&np->n_openlock);
4621 }
4622 nextnflp = TAILQ_NEXT(nflp, nfl_link);
0a7de745 4623 if (error) {
b0d623f7 4624 break;
0a7de745 4625 }
b0d623f7
A
4626 nflp->nfl_start = end + 1;
4627 }
4628 if (nflp->nfl_blockcnt) {
4629 /* wake up anyone blocked on this lock */
4630 wakeup(nflp);
4631 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4632 /* remove nflp from lock list and destroy */
4633 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4634 nfs_file_lock_destroy(nflp);
4635 }
4636 }
4637out:
4638 lck_mtx_unlock(&np->n_openlock);
4639 nfs_open_state_clear_busy(np);
4640 nfs_mount_state_in_use_end(nmp, 0);
4641
0a7de745 4642 if (newnflp) {
b0d623f7 4643 nfs_file_lock_destroy(newnflp);
0a7de745
A
4644 }
4645 return error;
b0d623f7
A
4646}
4647
4648/*
4649 * NFSv4 advisory file locking
4650 */
4651int
6d2010ae 4652nfs_vnop_advlock(
b0d623f7 4653 struct vnop_advlock_args /* {
0a7de745
A
4654 * struct vnodeop_desc *a_desc;
4655 * vnode_t a_vp;
4656 * caddr_t a_id;
4657 * int a_op;
4658 * struct flock *a_fl;
4659 * int a_flags;
4660 * vfs_context_t a_context;
4661 * } */*ap)
b0d623f7
A
4662{
4663 vnode_t vp = ap->a_vp;
4664 nfsnode_t np = VTONFS(ap->a_vp);
4665 struct flock *fl = ap->a_fl;
4666 int op = ap->a_op;
4667 int flags = ap->a_flags;
4668 vfs_context_t ctx = ap->a_context;
4669 struct nfsmount *nmp;
b0d623f7
A
4670 struct nfs_open_owner *noop = NULL;
4671 struct nfs_open_file *nofp = NULL;
4672 struct nfs_lock_owner *nlop = NULL;
4673 off_t lstart;
4674 uint64_t start, end;
4675 int error = 0, modified, style;
6d2010ae 4676 enum vtype vtype;
b0d623f7
A
4677#define OFF_MAX QUAD_MAX
4678
4679 nmp = VTONMP(ap->a_vp);
0a7de745
A
4680 if (nfs_mount_gone(nmp)) {
4681 return ENXIO;
4682 }
6d2010ae
A
4683 lck_mtx_lock(&nmp->nm_lock);
4684 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) {
4685 lck_mtx_unlock(&nmp->nm_lock);
0a7de745 4686 return ENOTSUP;
6d2010ae
A
4687 }
4688 lck_mtx_unlock(&nmp->nm_lock);
b0d623f7 4689
0a7de745
A
4690 if (np->n_flag & NREVOKE) {
4691 return EIO;
4692 }
6d2010ae 4693 vtype = vnode_vtype(ap->a_vp);
0a7de745
A
4694 if (vtype == VDIR) { /* ignore lock requests on directories */
4695 return 0;
4696 }
4697 if (vtype != VREG) { /* anything other than regular files is invalid */
4698 return EINVAL;
4699 }
6d2010ae
A
4700
4701 /* Convert the flock structure into a start and end. */
b0d623f7
A
4702 switch (fl->l_whence) {
4703 case SEEK_SET:
4704 case SEEK_CUR:
4705 /*
4706 * Caller is responsible for adding any necessary offset
4707 * to fl->l_start when SEEK_CUR is used.
4708 */
4709 lstart = fl->l_start;
4710 break;
4711 case SEEK_END:
4712 /* need to flush, and refetch attributes to make */
4713 /* sure we have the correct end of file offset */
0a7de745
A
4714 if ((error = nfs_node_lock(np))) {
4715 return error;
4716 }
b0d623f7
A
4717 modified = (np->n_flag & NMODIFIED);
4718 nfs_node_unlock(np);
0a7de745
A
4719 if (modified && ((error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1)))) {
4720 return error;
4721 }
4722 if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED))) {
4723 return error;
4724 }
b0d623f7
A
4725 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
4726 if ((np->n_size > OFF_MAX) ||
0a7de745 4727 ((fl->l_start > 0) && (np->n_size > (u_quad_t)(OFF_MAX - fl->l_start)))) {
b0d623f7 4728 error = EOVERFLOW;
0a7de745 4729 }
b0d623f7
A
4730 lstart = np->n_size + fl->l_start;
4731 nfs_data_unlock(np);
0a7de745
A
4732 if (error) {
4733 return error;
4734 }
b0d623f7
A
4735 break;
4736 default:
0a7de745
A
4737 return EINVAL;
4738 }
4739 if (lstart < 0) {
4740 return EINVAL;
b0d623f7 4741 }
b0d623f7
A
4742 start = lstart;
4743 if (fl->l_len == 0) {
4744 end = UINT64_MAX;
4745 } else if (fl->l_len > 0) {
0a7de745
A
4746 if ((fl->l_len - 1) > (OFF_MAX - lstart)) {
4747 return EOVERFLOW;
4748 }
b0d623f7
A
4749 end = start - 1 + fl->l_len;
4750 } else { /* l_len is negative */
0a7de745
A
4751 if ((lstart + fl->l_len) < 0) {
4752 return EINVAL;
4753 }
b0d623f7
A
4754 end = start - 1;
4755 start += fl->l_len;
4756 }
0a7de745
A
4757 if ((nmp->nm_vers == NFS_VER2) && ((start > INT32_MAX) || (fl->l_len && (end > INT32_MAX)))) {
4758 return EINVAL;
4759 }
b0d623f7
A
4760
4761 style = (flags & F_FLOCK) ? NFS_FILE_LOCK_STYLE_FLOCK : NFS_FILE_LOCK_STYLE_POSIX;
0a7de745
A
4762 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((start != 0) || (end != UINT64_MAX))) {
4763 return EINVAL;
4764 }
b0d623f7
A
4765
4766 /* find the lock owner, alloc if not unlock */
4767 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), (op != F_UNLCK));
4768 if (!nlop) {
4769 error = (op == F_UNLCK) ? 0 : ENOMEM;
0a7de745 4770 if (error) {
6d2010ae 4771 NP(np, "nfs_vnop_advlock: no lock owner, error %d", error);
0a7de745 4772 }
b0d623f7
A
4773 goto out;
4774 }
4775
4776 if (op == F_GETLK) {
6d2010ae 4777 error = nfs_advlock_getlock(np, nlop, fl, start, end, ctx);
b0d623f7
A
4778 } else {
4779 /* find the open owner */
4780 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
4781 if (!noop) {
6d2010ae 4782 NP(np, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx)));
b0d623f7
A
4783 error = EPERM;
4784 goto out;
4785 }
4786 /* find the open file */
cb323159 4787#if CONFIG_NFS4
b0d623f7 4788restart:
cb323159 4789#endif
b0d623f7 4790 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
0a7de745 4791 if (error) {
b0d623f7 4792 error = EBADF;
0a7de745 4793 }
b0d623f7 4794 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6d2010ae 4795 NP(np, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
b0d623f7
A
4796 error = EIO;
4797 }
cb323159 4798#if CONFIG_NFS4
b0d623f7 4799 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6d2010ae 4800 error = nfs4_reopen(nofp, ((op == F_UNLCK) ? NULL : vfs_context_thread(ctx)));
b0d623f7 4801 nofp = NULL;
0a7de745 4802 if (!error) {
6d2010ae 4803 goto restart;
0a7de745 4804 }
b0d623f7 4805 }
cb323159 4806#endif
b0d623f7 4807 if (error) {
6d2010ae 4808 NP(np, "nfs_vnop_advlock: no open file %d, %d", error, kauth_cred_getuid(noop->noo_cred));
b0d623f7
A
4809 goto out;
4810 }
4811 if (op == F_UNLCK) {
6d2010ae 4812 error = nfs_advlock_unlock(np, nofp, nlop, start, end, style, ctx);
b0d623f7 4813 } else if ((op == F_SETLK) || (op == F_SETLKW)) {
0a7de745 4814 if ((op == F_SETLK) && (flags & F_WAIT)) {
b0d623f7 4815 op = F_SETLKW;
0a7de745 4816 }
6d2010ae 4817 error = nfs_advlock_setlock(np, nofp, nlop, op, start, end, style, fl->l_type, ctx);
b0d623f7
A
4818 } else {
4819 /* not getlk, unlock or lock? */
4820 error = EINVAL;
4821 }
4822 }
4823
4824out:
0a7de745 4825 if (nlop) {
b0d623f7 4826 nfs_lock_owner_rele(nlop);
0a7de745
A
4827 }
4828 if (noop) {
b0d623f7 4829 nfs_open_owner_rele(noop);
0a7de745
A
4830 }
4831 return error;
b0d623f7
A
4832}
4833
4834/*
4835 * Check if an open owner holds any locks on a file.
4836 */
4837int
6d2010ae 4838nfs_check_for_locks(struct nfs_open_owner *noop, struct nfs_open_file *nofp)
b0d623f7
A
4839{
4840 struct nfs_lock_owner *nlop;
4841
4842 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
0a7de745 4843 if (nlop->nlo_open_owner != noop) {
b0d623f7 4844 continue;
0a7de745
A
4845 }
4846 if (!TAILQ_EMPTY(&nlop->nlo_locks)) {
b0d623f7 4847 break;
0a7de745 4848 }
b0d623f7 4849 }
0a7de745 4850 return nlop ? 1 : 0;
b0d623f7
A
4851}
4852
cb323159 4853#if CONFIG_NFS4
b0d623f7
A
4854/*
4855 * Reopen simple (no deny, no locks) open state that was lost.
4856 */
6d2010ae 4857int
b0d623f7
A
4858nfs4_reopen(struct nfs_open_file *nofp, thread_t thd)
4859{
4860 struct nfs_open_owner *noop = nofp->nof_owner;
4861 struct nfsmount *nmp = NFSTONMP(nofp->nof_np);
6d2010ae
A
4862 nfsnode_t np = nofp->nof_np;
4863 vnode_t vp = NFSTOV(np);
b0d623f7
A
4864 vnode_t dvp = NULL;
4865 struct componentname cn;
4866 const char *vname = NULL;
6d2010ae 4867 const char *name = NULL;
b0d623f7
A
4868 size_t namelen;
4869 char smallname[128];
4870 char *filename = NULL;
6d2010ae 4871 int error = 0, done = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
cb323159 4872 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
b0d623f7
A
4873
4874 lck_mtx_lock(&nofp->nof_lock);
4875 while (nofp->nof_flags & NFS_OPEN_FILE_REOPENING) {
0a7de745 4876 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 4877 break;
0a7de745
A
4878 }
4879 msleep(&nofp->nof_flags, &nofp->nof_lock, slpflag | (PZERO - 1), "nfsreopenwait", &ts);
6d2010ae 4880 slpflag = 0;
b0d623f7 4881 }
6d2010ae 4882 if (error || !(nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
b0d623f7 4883 lck_mtx_unlock(&nofp->nof_lock);
0a7de745 4884 return error;
b0d623f7
A
4885 }
4886 nofp->nof_flags |= NFS_OPEN_FILE_REOPENING;
4887 lck_mtx_unlock(&nofp->nof_lock);
4888
6d2010ae
A
4889 nfs_node_lock_force(np);
4890 if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) {
4891 /*
4892 * The node's been sillyrenamed, so we need to use
4893 * the sillyrename directory/name to do the open.
4894 */
4895 struct nfs_sillyrename *nsp = np->n_sillyrename;
4896 dvp = NFSTOV(nsp->nsr_dnp);
4897 if ((error = vnode_get(dvp))) {
cb323159 4898 dvp = NULLVP;
6d2010ae
A
4899 nfs_node_unlock(np);
4900 goto out;
4901 }
4902 name = nsp->nsr_name;
4903 } else {
4904 /*
4905 * [sigh] We can't trust VFS to get the parent right for named
4906 * attribute nodes. (It likes to reparent the nodes after we've
4907 * created them.) Luckily we can probably get the right parent
4908 * from the n_parent we have stashed away.
4909 */
4910 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
0a7de745 4911 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
6d2010ae 4912 dvp = NULL;
0a7de745
A
4913 }
4914 if (!dvp) {
6d2010ae 4915 dvp = vnode_getparent(vp);
0a7de745 4916 }
6d2010ae
A
4917 vname = vnode_getname(vp);
4918 if (!dvp || !vname) {
0a7de745 4919 if (!error) {
6d2010ae 4920 error = EIO;
0a7de745 4921 }
6d2010ae
A
4922 nfs_node_unlock(np);
4923 goto out;
4924 }
4925 name = vname;
b0d623f7
A
4926 }
4927 filename = &smallname[0];
6d2010ae 4928 namelen = snprintf(filename, sizeof(smallname), "%s", name);
b0d623f7 4929 if (namelen >= sizeof(smallname)) {
0a7de745 4930 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
b0d623f7
A
4931 if (!filename) {
4932 error = ENOMEM;
4933 goto out;
4934 }
0a7de745 4935 snprintf(filename, namelen + 1, "%s", name);
b0d623f7 4936 }
6d2010ae 4937 nfs_node_unlock(np);
b0d623f7
A
4938 bzero(&cn, sizeof(cn));
4939 cn.cn_nameptr = filename;
4940 cn.cn_namelen = namelen;
4941
4942restart:
4943 done = 0;
0a7de745 4944 if ((error = nfs_mount_state_in_use_start(nmp, thd))) {
b0d623f7 4945 goto out;
0a7de745 4946 }
b0d623f7 4947
0a7de745 4948 if (nofp->nof_rw) {
b0d623f7 4949 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE);
0a7de745
A
4950 }
4951 if (!error && nofp->nof_w) {
b0d623f7 4952 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE);
0a7de745
A
4953 }
4954 if (!error && nofp->nof_r) {
b0d623f7 4955 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE);
0a7de745 4956 }
b0d623f7
A
4957
4958 if (nfs_mount_state_in_use_end(nmp, error)) {
0a7de745 4959 if (error == NFSERR_GRACE) {
b0d623f7 4960 goto restart;
0a7de745 4961 }
6d2010ae 4962 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error,
0a7de745 4963 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
b0d623f7
A
4964 error = 0;
4965 goto out;
4966 }
4967 done = 1;
4968out:
0a7de745 4969 if (error && (error != EINTR) && (error != ERESTART)) {
6d2010ae 4970 nfs_revoke_open_state_for_node(np);
0a7de745 4971 }
b0d623f7
A
4972 lck_mtx_lock(&nofp->nof_lock);
4973 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPENING;
0a7de745 4974 if (done) {
b0d623f7 4975 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
0a7de745 4976 } else if (error) {
6d2010ae 4977 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error,
0a7de745
A
4978 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
4979 }
b0d623f7 4980 lck_mtx_unlock(&nofp->nof_lock);
0a7de745 4981 if (filename && (filename != &smallname[0])) {
b0d623f7 4982 FREE(filename, M_TEMP);
0a7de745
A
4983 }
4984 if (vname) {
b0d623f7 4985 vnode_putname(vname);
0a7de745
A
4986 }
4987 if (dvp != NULLVP) {
b0d623f7 4988 vnode_put(dvp);
0a7de745
A
4989 }
4990 return error;
b0d623f7
A
4991}
4992
4993/*
4994 * Send a normal OPEN RPC to open/create a file.
4995 */
4996int
4997nfs4_open_rpc(
4998 struct nfs_open_file *nofp,
4999 vfs_context_t ctx,
5000 struct componentname *cnp,
5001 struct vnode_attr *vap,
5002 vnode_t dvp,
5003 vnode_t *vpp,
5004 int create,
5005 int share_access,
5006 int share_deny)
5007{
0a7de745
A
5008 return nfs4_open_rpc_internal(nofp, ctx, vfs_context_thread(ctx), vfs_context_ucred(ctx),
5009 cnp, vap, dvp, vpp, create, share_access, share_deny);
b0d623f7
A
5010}
5011
5012/*
5013 * Send an OPEN RPC to reopen a file.
5014 */
5015int
5016nfs4_open_reopen_rpc(
5017 struct nfs_open_file *nofp,
5018 thread_t thd,
5019 kauth_cred_t cred,
5020 struct componentname *cnp,
5021 vnode_t dvp,
5022 vnode_t *vpp,
5023 int share_access,
5024 int share_deny)
5025{
0a7de745 5026 return nfs4_open_rpc_internal(nofp, NULL, thd, cred, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, share_access, share_deny);
6d2010ae
A
5027}
5028
5029/*
5030 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
5031 */
5032int
5033nfs4_open_confirm_rpc(
5034 struct nfsmount *nmp,
5035 nfsnode_t dnp,
5036 u_char *fhp,
5037 int fhlen,
5038 struct nfs_open_owner *noop,
5039 nfs_stateid *sid,
5040 thread_t thd,
5041 kauth_cred_t cred,
5042 struct nfs_vattr *nvap,
5043 uint64_t *xidp)
5044{
5045 struct nfsm_chain nmreq, nmrep;
5046 int error = 0, status, numops;
5047 struct nfsreq_secinfo_args si;
5048
5049 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
5050 nfsm_chain_null(&nmreq);
5051 nfsm_chain_null(&nmrep);
5052
5053 // PUTFH, OPEN_CONFIRM, GETATTR
5054 numops = 3;
5055 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
3e170ce0 5056 nfsm_chain_add_compound_header(error, &nmreq, "open_confirm", nmp->nm_minor_vers, numops);
6d2010ae
A
5057 numops--;
5058 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5059 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
5060 numops--;
5061 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_CONFIRM);
5062 nfsm_chain_add_stateid(error, &nmreq, sid);
5063 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5064 numops--;
5065 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5066 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
5067 nfsm_chain_build_done(error, &nmreq);
5068 nfsm_assert(error, (numops == 0), EPROTO);
5069 nfsmout_if(error);
5070 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, &nmrep, xidp, &status);
5071
5072 nfsm_chain_skip_tag(error, &nmrep);
5073 nfsm_chain_get_32(error, &nmrep, numops);
5074 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5075 nfsmout_if(error);
5076 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_CONFIRM);
5077 nfs_owner_seqid_increment(noop, NULL, error);
5078 nfsm_chain_get_stateid(error, &nmrep, sid);
5079 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5080 nfsmout_if(error);
5081 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
5082nfsmout:
5083 nfsm_chain_cleanup(&nmreq);
5084 nfsm_chain_cleanup(&nmrep);
0a7de745 5085 return error;
b0d623f7
A
5086}
5087
5088/*
5089 * common OPEN RPC code
5090 *
5091 * If create is set, ctx must be passed in.
6d2010ae 5092 * Returns a node on success if no node passed in.
b0d623f7
A
5093 */
5094int
5095nfs4_open_rpc_internal(
5096 struct nfs_open_file *nofp,
5097 vfs_context_t ctx,
5098 thread_t thd,
5099 kauth_cred_t cred,
5100 struct componentname *cnp,
5101 struct vnode_attr *vap,
5102 vnode_t dvp,
5103 vnode_t *vpp,
5104 int create,
5105 int share_access,
5106 int share_deny)
5107{
5108 struct nfsmount *nmp;
5109 struct nfs_open_owner *noop = nofp->nof_owner;
6d2010ae 5110 struct nfs_vattr nvattr;
b0d623f7 5111 int error = 0, open_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
6d2010ae 5112 int nfsvers, namedattrs, numops, exclusive = 0, gotuid, gotgid;
b0d623f7
A
5113 u_int64_t xid, savedxid = 0;
5114 nfsnode_t dnp = VTONFS(dvp);
5115 nfsnode_t np, newnp = NULL;
5116 vnode_t newvp = NULL;
5117 struct nfsm_chain nmreq, nmrep;
5118 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6d2010ae 5119 uint32_t rflags, delegation, recall;
b0d623f7
A
5120 struct nfs_stateid stateid, dstateid, *sid;
5121 fhandle_t fh;
6d2010ae 5122 struct nfsreq rq, *req = &rq;
b0d623f7 5123 struct nfs_dulookup dul;
6d2010ae
A
5124 char sbuf[64], *s;
5125 uint32_t ace_type, ace_flags, ace_mask, len, slen;
5126 struct kauth_ace ace;
5127 struct nfsreq_secinfo_args si;
b0d623f7 5128
0a7de745
A
5129 if (create && !ctx) {
5130 return EINVAL;
5131 }
b0d623f7
A
5132
5133 nmp = VTONMP(dvp);
0a7de745
A
5134 if (nfs_mount_gone(nmp)) {
5135 return ENXIO;
5136 }
b0d623f7 5137 nfsvers = nmp->nm_vers;
6d2010ae 5138 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
0a7de745
A
5139 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
5140 return EINVAL;
5141 }
b0d623f7
A
5142
5143 np = *vpp ? VTONFS(*vpp) : NULL;
5144 if (create && vap) {
5145 exclusive = (vap->va_vaflags & VA_EXCLUSIVE);
5146 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
5147 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
5148 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
0a7de745 5149 if (exclusive && (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time))) {
6d2010ae 5150 vap->va_vaflags |= VA_UTIMES_NULL;
0a7de745 5151 }
b0d623f7
A
5152 } else {
5153 exclusive = gotuid = gotgid = 0;
5154 }
5155 if (nofp) {
5156 sid = &nofp->nof_stateid;
5157 } else {
5158 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
5159 sid = &stateid;
5160 }
5161
0a7de745
A
5162 if ((error = nfs_open_owner_set_busy(noop, thd))) {
5163 return error;
5164 }
b0d623f7 5165again:
6d2010ae
A
5166 rflags = delegation = recall = 0;
5167 ace.ace_flags = 0;
5168 s = sbuf;
5169 slen = sizeof(sbuf);
5170 NVATTR_INIT(&nvattr);
5171 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, cnp->cn_nameptr, cnp->cn_namelen);
b0d623f7
A
5172
5173 nfsm_chain_null(&nmreq);
5174 nfsm_chain_null(&nmrep);
5175
5176 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
5177 numops = 6;
5178 nfsm_chain_build_alloc_init(error, &nmreq, 53 * NFSX_UNSIGNED + cnp->cn_namelen);
3e170ce0 5179 nfsm_chain_add_compound_header(error, &nmreq, create ? "create" : "open", nmp->nm_minor_vers, numops);
b0d623f7
A
5180 numops--;
5181 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5182 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
5183 numops--;
5184 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
5185 numops--;
5186 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5187 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5188 nfsm_chain_add_32(error, &nmreq, share_access);
5189 nfsm_chain_add_32(error, &nmreq, share_deny);
6d2010ae 5190 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
b0d623f7 5191 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
6d2010ae 5192 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
b0d623f7
A
5193 nfsm_chain_add_32(error, &nmreq, create);
5194 if (create) {
5195 if (exclusive) {
5196 static uint32_t create_verf; // XXX need a better verifier
5197 create_verf++;
5198 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE);
5199 /* insert 64 bit verifier */
5200 nfsm_chain_add_32(error, &nmreq, create_verf);
5201 nfsm_chain_add_32(error, &nmreq, create_verf);
5202 } else {
5203 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_UNCHECKED);
5204 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
5205 }
5206 }
b0d623f7 5207 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
6d2010ae 5208 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
b0d623f7
A
5209 numops--;
5210 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5211 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5212 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6d2010ae 5213 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
b0d623f7
A
5214 numops--;
5215 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
5216 numops--;
5217 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 5218 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
b0d623f7
A
5219 nfsm_chain_build_done(error, &nmreq);
5220 nfsm_assert(error, (numops == 0), EPROTO);
0a7de745 5221 if (!error) {
b0d623f7 5222 error = busyerror = nfs_node_set_busy(dnp, thd);
0a7de745 5223 }
b0d623f7
A
5224 nfsmout_if(error);
5225
0a7de745 5226 if (create && !namedattrs) {
b0d623f7 5227 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
0a7de745 5228 }
b0d623f7 5229
6d2010ae 5230 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, NULL, &req);
b0d623f7 5231 if (!error) {
0a7de745 5232 if (create && !namedattrs) {
b0d623f7 5233 nfs_dulookup_start(&dul, dnp, ctx);
0a7de745 5234 }
b0d623f7
A
5235 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
5236 savedxid = xid;
5237 }
5238
0a7de745 5239 if (create && !namedattrs) {
b0d623f7 5240 nfs_dulookup_finish(&dul, dnp, ctx);
0a7de745 5241 }
b0d623f7 5242
0a7de745 5243 if ((lockerror = nfs_node_lock(dnp))) {
b0d623f7 5244 error = lockerror;
0a7de745 5245 }
b0d623f7
A
5246 nfsm_chain_skip_tag(error, &nmrep);
5247 nfsm_chain_get_32(error, &nmrep, numops);
5248 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5249 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
5250 nfsmout_if(error);
5251 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5252 nfs_owner_seqid_increment(noop, NULL, error);
5253 nfsm_chain_get_stateid(error, &nmrep, sid);
5254 nfsm_chain_check_change_info(error, &nmrep, dnp);
5255 nfsm_chain_get_32(error, &nmrep, rflags);
5256 bmlen = NFS_ATTR_BITMAP_LEN;
5257 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5258 nfsm_chain_get_32(error, &nmrep, delegation);
0a7de745 5259 if (!error) {
b0d623f7
A
5260 switch (delegation) {
5261 case NFS_OPEN_DELEGATE_NONE:
5262 break;
5263 case NFS_OPEN_DELEGATE_READ:
b0d623f7
A
5264 case NFS_OPEN_DELEGATE_WRITE:
5265 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5266 nfsm_chain_get_32(error, &nmrep, recall);
0a7de745 5267 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
6d2010ae 5268 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
0a7de745 5269 }
6d2010ae
A
5270 /* if we have any trouble accepting the ACE, just invalidate it */
5271 ace_type = ace_flags = ace_mask = len = 0;
5272 nfsm_chain_get_32(error, &nmrep, ace_type);
5273 nfsm_chain_get_32(error, &nmrep, ace_flags);
5274 nfsm_chain_get_32(error, &nmrep, ace_mask);
5275 nfsm_chain_get_32(error, &nmrep, len);
5276 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5277 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5278 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5279 if (!error && (len >= slen)) {
0a7de745
A
5280 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5281 if (s) {
5282 slen = len + 1;
5283 } else {
6d2010ae 5284 ace.ace_flags = 0;
0a7de745 5285 }
6d2010ae 5286 }
0a7de745 5287 if (s) {
6d2010ae 5288 nfsm_chain_get_opaque(error, &nmrep, len, s);
0a7de745 5289 } else {
6d2010ae 5290 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
0a7de745 5291 }
6d2010ae
A
5292 if (!error && s) {
5293 s[len] = '\0';
0a7de745 5294 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
6d2010ae 5295 ace.ace_flags = 0;
0a7de745 5296 }
6d2010ae 5297 }
0a7de745 5298 if (error || !s) {
6d2010ae 5299 ace.ace_flags = 0;
0a7de745
A
5300 }
5301 if (s && (s != sbuf)) {
6d2010ae 5302 FREE(s, M_TEMP);
0a7de745 5303 }
b0d623f7
A
5304 break;
5305 default:
5306 error = EBADRPC;
5307 break;
5308 }
0a7de745 5309 }
b0d623f7 5310 /* At this point if we have no error, the object was created/opened. */
b0d623f7
A
5311 open_error = error;
5312 nfsmout_if(error);
0a7de745 5313 if (create && vap && !exclusive) {
b0d623f7 5314 nfs_vattr_set_supported(bitmap, vap);
0a7de745 5315 }
b0d623f7
A
5316 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5317 nfsmout_if(error);
6d2010ae 5318 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
b0d623f7
A
5319 nfsmout_if(error);
5320 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6d2010ae 5321 printf("nfs: open/create didn't return filehandle? %s\n", cnp->cn_nameptr);
2d21ac55
A
5322 error = EBADRPC;
5323 goto nfsmout;
5324 }
b0d623f7
A
5325 if (!create && np && !NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5326 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
6d2010ae 5327 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
0a7de745 5328 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 5329 NP(np, "nfs4_open_rpc: warning: file handle mismatch");
0a7de745 5330 }
b0d623f7 5331 }
2d21ac55
A
5332 /* directory attributes: if we don't get them, make sure to invalidate */
5333 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
5334 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 5335 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
0a7de745 5336 if (error) {
2d21ac55 5337 NATTRINVALIDATE(dnp);
0a7de745 5338 }
b0d623f7
A
5339 nfsmout_if(error);
5340
0a7de745 5341 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
b0d623f7 5342 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 5343 }
b0d623f7
A
5344
5345 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
5346 nfs_node_unlock(dnp);
5347 lockerror = ENOENT;
6d2010ae
A
5348 NVATTR_CLEANUP(&nvattr);
5349 error = nfs4_open_confirm_rpc(nmp, dnp, fh.fh_data, fh.fh_len, noop, sid, thd, cred, &nvattr, &xid);
b0d623f7
A
5350 nfsmout_if(error);
5351 savedxid = xid;
0a7de745 5352 if ((lockerror = nfs_node_lock(dnp))) {
b0d623f7 5353 error = lockerror;
0a7de745 5354 }
b0d623f7
A
5355 }
5356
5357nfsmout:
5358 nfsm_chain_cleanup(&nmreq);
5359 nfsm_chain_cleanup(&nmrep);
5360
5361 if (!lockerror && create) {
5362 if (!open_error && (dnp->n_flag & NNEGNCENTRIES)) {
5363 dnp->n_flag &= ~NNEGNCENTRIES;
5364 cache_purge_negatives(dvp);
5365 }
5366 dnp->n_flag |= NMODIFIED;
5367 nfs_node_unlock(dnp);
5368 lockerror = ENOENT;
6d2010ae 5369 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
b0d623f7 5370 }
0a7de745 5371 if (!lockerror) {
b0d623f7 5372 nfs_node_unlock(dnp);
0a7de745 5373 }
6d2010ae 5374 if (!error && !np && fh.fh_len) {
b0d623f7
A
5375 /* create the vnode with the filehandle and attributes */
5376 xid = savedxid;
6d2010ae 5377 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &newnp);
0a7de745 5378 if (!error) {
b0d623f7 5379 newvp = NFSTOV(newnp);
0a7de745 5380 }
b0d623f7 5381 }
6d2010ae 5382 NVATTR_CLEANUP(&nvattr);
0a7de745 5383 if (!busyerror) {
b0d623f7 5384 nfs_node_clear_busy(dnp);
0a7de745 5385 }
b0d623f7 5386 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
0a7de745 5387 if (!np) {
b0d623f7 5388 np = newnp;
0a7de745 5389 }
b0d623f7
A
5390 if (!error && np && !recall) {
5391 /* stuff the delegation state in the node */
5392 lck_mtx_lock(&np->n_openlock);
5393 np->n_openflags &= ~N_DELEG_MASK;
5394 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5395 np->n_dstateid = dstateid;
6d2010ae
A
5396 np->n_dace = ace;
5397 if (np->n_dlink.tqe_next == NFSNOLIST) {
5398 lck_mtx_lock(&nmp->nm_lock);
0a7de745 5399 if (np->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 5400 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
0a7de745 5401 }
6d2010ae
A
5402 lck_mtx_unlock(&nmp->nm_lock);
5403 }
b0d623f7 5404 lck_mtx_unlock(&np->n_openlock);
6d2010ae
A
5405 } else {
5406 /* give the delegation back */
b0d623f7 5407 if (np) {
6d2010ae
A
5408 if (NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5409 /* update delegation state and return it */
5410 lck_mtx_lock(&np->n_openlock);
5411 np->n_openflags &= ~N_DELEG_MASK;
5412 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5413 np->n_dstateid = dstateid;
5414 np->n_dace = ace;
5415 if (np->n_dlink.tqe_next == NFSNOLIST) {
5416 lck_mtx_lock(&nmp->nm_lock);
0a7de745 5417 if (np->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 5418 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
0a7de745 5419 }
6d2010ae
A
5420 lck_mtx_unlock(&nmp->nm_lock);
5421 }
5422 lck_mtx_unlock(&np->n_openlock);
5423 /* don't need to send a separate delegreturn for fh */
5424 fh.fh_len = 0;
5425 }
5426 /* return np's current delegation */
5427 nfs4_delegation_return(np, 0, thd, cred);
b0d623f7 5428 }
0a7de745 5429 if (fh.fh_len) { /* return fh's delegation if it wasn't for np */
6d2010ae 5430 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
0a7de745 5431 }
b0d623f7
A
5432 }
5433 }
5434 if (error) {
5435 if (exclusive && (error == NFSERR_NOTSUPP)) {
5436 exclusive = 0;
5437 goto again;
5438 }
5439 if (newvp) {
5440 nfs_node_unlock(newnp);
5441 vnode_put(newvp);
5442 }
5443 } else if (create) {
5444 nfs_node_unlock(newnp);
5445 if (exclusive) {
5446 error = nfs4_setattr_rpc(newnp, vap, ctx);
5447 if (error && (gotuid || gotgid)) {
5448 /* it's possible the server didn't like our attempt to set IDs. */
5449 /* so, let's try it again without those */
5450 VATTR_CLEAR_ACTIVE(vap, va_uid);
5451 VATTR_CLEAR_ACTIVE(vap, va_gid);
5452 error = nfs4_setattr_rpc(newnp, vap, ctx);
5453 }
5454 }
0a7de745 5455 if (error) {
b0d623f7 5456 vnode_put(newvp);
0a7de745 5457 } else {
b0d623f7 5458 *vpp = newvp;
0a7de745 5459 }
b0d623f7
A
5460 }
5461 nfs_open_owner_clear_busy(noop);
0a7de745 5462 return error;
b0d623f7
A
5463}
5464
6d2010ae
A
5465
5466/*
5467 * Send an OPEN RPC to claim a delegated open for a file
5468 */
5469int
5470nfs4_claim_delegated_open_rpc(
5471 struct nfs_open_file *nofp,
5472 int share_access,
5473 int share_deny,
5474 int flags)
5475{
5476 struct nfsmount *nmp;
5477 struct nfs_open_owner *noop = nofp->nof_owner;
5478 struct nfs_vattr nvattr;
5479 int error = 0, lockerror = ENOENT, status;
5480 int nfsvers, numops;
5481 u_int64_t xid;
5482 nfsnode_t np = nofp->nof_np;
5483 struct nfsm_chain nmreq, nmrep;
5484 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5485 uint32_t rflags = 0, delegation, recall = 0;
5486 fhandle_t fh;
5487 struct nfs_stateid dstateid;
5488 char sbuf[64], *s = sbuf;
5489 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5490 struct kauth_ace ace;
5491 vnode_t dvp = NULL;
5492 const char *vname = NULL;
5493 const char *name = NULL;
5494 size_t namelen;
5495 char smallname[128];
5496 char *filename = NULL;
5497 struct nfsreq_secinfo_args si;
5498
5499 nmp = NFSTONMP(np);
0a7de745
A
5500 if (nfs_mount_gone(nmp)) {
5501 return ENXIO;
5502 }
6d2010ae
A
5503 nfsvers = nmp->nm_vers;
5504
5505 nfs_node_lock_force(np);
5506 if ((vnode_vtype(NFSTOV(np)) != VDIR) && np->n_sillyrename) {
5507 /*
5508 * The node's been sillyrenamed, so we need to use
5509 * the sillyrename directory/name to do the open.
5510 */
5511 struct nfs_sillyrename *nsp = np->n_sillyrename;
5512 dvp = NFSTOV(nsp->nsr_dnp);
5513 if ((error = vnode_get(dvp))) {
cb323159 5514 dvp = NULLVP;
6d2010ae
A
5515 nfs_node_unlock(np);
5516 goto out;
5517 }
5518 name = nsp->nsr_name;
5519 } else {
5520 /*
5521 * [sigh] We can't trust VFS to get the parent right for named
5522 * attribute nodes. (It likes to reparent the nodes after we've
5523 * created them.) Luckily we can probably get the right parent
5524 * from the n_parent we have stashed away.
5525 */
5526 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
0a7de745 5527 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
6d2010ae 5528 dvp = NULL;
0a7de745
A
5529 }
5530 if (!dvp) {
6d2010ae 5531 dvp = vnode_getparent(NFSTOV(np));
0a7de745 5532 }
6d2010ae
A
5533 vname = vnode_getname(NFSTOV(np));
5534 if (!dvp || !vname) {
0a7de745 5535 if (!error) {
6d2010ae 5536 error = EIO;
0a7de745 5537 }
6d2010ae
A
5538 nfs_node_unlock(np);
5539 goto out;
5540 }
5541 name = vname;
5542 }
5543 filename = &smallname[0];
5544 namelen = snprintf(filename, sizeof(smallname), "%s", name);
5545 if (namelen >= sizeof(smallname)) {
0a7de745 5546 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
6d2010ae
A
5547 if (!filename) {
5548 error = ENOMEM;
3e170ce0 5549 nfs_node_unlock(np);
6d2010ae
A
5550 goto out;
5551 }
0a7de745 5552 snprintf(filename, namelen + 1, "%s", name);
6d2010ae
A
5553 }
5554 nfs_node_unlock(np);
5555
0a7de745 5556 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
3e170ce0 5557 goto out;
0a7de745 5558 }
6d2010ae
A
5559 NVATTR_INIT(&nvattr);
5560 delegation = NFS_OPEN_DELEGATE_NONE;
5561 dstateid = np->n_dstateid;
5562 NFSREQ_SECINFO_SET(&si, VTONFS(dvp), NULL, 0, filename, namelen);
5563
5564 nfsm_chain_null(&nmreq);
5565 nfsm_chain_null(&nmrep);
5566
5567 // PUTFH, OPEN, GETATTR(FH)
5568 numops = 3;
5569 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
3e170ce0 5570 nfsm_chain_add_compound_header(error, &nmreq, "open_claim_d", nmp->nm_minor_vers, numops);
6d2010ae
A
5571 numops--;
5572 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5573 nfsm_chain_add_fh(error, &nmreq, nfsvers, VTONFS(dvp)->n_fhp, VTONFS(dvp)->n_fhsize);
5574 numops--;
5575 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5576 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5577 nfsm_chain_add_32(error, &nmreq, share_access);
5578 nfsm_chain_add_32(error, &nmreq, share_deny);
5579 // open owner: clientid + uid
5580 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5581 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5582 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5583 // openflag4
5584 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5585 // open_claim4
5586 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_DELEGATE_CUR);
5587 nfsm_chain_add_stateid(error, &nmreq, &np->n_dstateid);
5588 nfsm_chain_add_name(error, &nmreq, filename, namelen, nmp);
5589 numops--;
5590 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5591 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5592 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5593 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5594 nfsm_chain_build_done(error, &nmreq);
5595 nfsm_assert(error, (numops == 0), EPROTO);
5596 nfsmout_if(error);
5597
5598 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
0a7de745 5599 noop->noo_cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
6d2010ae 5600
0a7de745 5601 if ((lockerror = nfs_node_lock(np))) {
6d2010ae 5602 error = lockerror;
0a7de745 5603 }
6d2010ae
A
5604 nfsm_chain_skip_tag(error, &nmrep);
5605 nfsm_chain_get_32(error, &nmrep, numops);
5606 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5607 nfsmout_if(error);
5608 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5609 nfs_owner_seqid_increment(noop, NULL, error);
5610 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5611 nfsm_chain_check_change_info(error, &nmrep, np);
5612 nfsm_chain_get_32(error, &nmrep, rflags);
5613 bmlen = NFS_ATTR_BITMAP_LEN;
5614 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5615 nfsm_chain_get_32(error, &nmrep, delegation);
0a7de745 5616 if (!error) {
6d2010ae
A
5617 switch (delegation) {
5618 case NFS_OPEN_DELEGATE_NONE:
5619 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
0a7de745 5620 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
6d2010ae
A
5621 break;
5622 case NFS_OPEN_DELEGATE_READ:
5623 case NFS_OPEN_DELEGATE_WRITE:
5624 if ((((np->n_openflags & N_DELEG_MASK) == N_DELEG_READ) &&
0a7de745 5625 (delegation == NFS_OPEN_DELEGATE_WRITE)) ||
6d2010ae 5626 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) &&
0a7de745 5627 (delegation == NFS_OPEN_DELEGATE_READ))) {
6d2010ae 5628 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
0a7de745
A
5629 ((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ? "W" : "R",
5630 (delegation == NFS_OPEN_DELEGATE_WRITE) ? "W" : "R", filename ? filename : "???");
5631 }
6d2010ae
A
5632 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5633 nfsm_chain_get_32(error, &nmrep, recall);
0a7de745 5634 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
6d2010ae 5635 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
0a7de745 5636 }
6d2010ae
A
5637 /* if we have any trouble accepting the ACE, just invalidate it */
5638 ace_type = ace_flags = ace_mask = len = 0;
5639 nfsm_chain_get_32(error, &nmrep, ace_type);
5640 nfsm_chain_get_32(error, &nmrep, ace_flags);
5641 nfsm_chain_get_32(error, &nmrep, ace_mask);
5642 nfsm_chain_get_32(error, &nmrep, len);
5643 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5644 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5645 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5646 if (!error && (len >= slen)) {
0a7de745
A
5647 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5648 if (s) {
5649 slen = len + 1;
5650 } else {
6d2010ae 5651 ace.ace_flags = 0;
0a7de745 5652 }
6d2010ae 5653 }
0a7de745 5654 if (s) {
6d2010ae 5655 nfsm_chain_get_opaque(error, &nmrep, len, s);
0a7de745 5656 } else {
6d2010ae 5657 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
0a7de745 5658 }
6d2010ae
A
5659 if (!error && s) {
5660 s[len] = '\0';
0a7de745 5661 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
6d2010ae 5662 ace.ace_flags = 0;
0a7de745 5663 }
6d2010ae 5664 }
0a7de745 5665 if (error || !s) {
6d2010ae 5666 ace.ace_flags = 0;
0a7de745
A
5667 }
5668 if (s && (s != sbuf)) {
6d2010ae 5669 FREE(s, M_TEMP);
0a7de745 5670 }
6d2010ae
A
5671 if (!error) {
5672 /* stuff the latest delegation state in the node */
5673 lck_mtx_lock(&np->n_openlock);
5674 np->n_openflags &= ~N_DELEG_MASK;
5675 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5676 np->n_dstateid = dstateid;
5677 np->n_dace = ace;
5678 if (np->n_dlink.tqe_next == NFSNOLIST) {
5679 lck_mtx_lock(&nmp->nm_lock);
0a7de745 5680 if (np->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 5681 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
0a7de745 5682 }
6d2010ae
A
5683 lck_mtx_unlock(&nmp->nm_lock);
5684 }
5685 lck_mtx_unlock(&np->n_openlock);
5686 }
5687 break;
5688 default:
5689 error = EBADRPC;
5690 break;
5691 }
0a7de745 5692 }
6d2010ae
A
5693 nfsmout_if(error);
5694 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5695 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5696 nfsmout_if(error);
5697 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5698 printf("nfs: open reclaim didn't return filehandle? %s\n", filename ? filename : "???");
5699 error = EBADRPC;
5700 goto nfsmout;
5701 }
5702 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5703 // XXX what if fh doesn't match the vnode we think we're re-opening?
5704 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
0a7de745 5705 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 5706 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename ? filename : "???");
0a7de745 5707 }
6d2010ae
A
5708 }
5709 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5710 nfsmout_if(error);
0a7de745 5711 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
6d2010ae 5712 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 5713 }
6d2010ae
A
5714nfsmout:
5715 NVATTR_CLEANUP(&nvattr);
5716 nfsm_chain_cleanup(&nmreq);
5717 nfsm_chain_cleanup(&nmrep);
0a7de745 5718 if (!lockerror) {
6d2010ae 5719 nfs_node_unlock(np);
0a7de745 5720 }
6d2010ae
A
5721 nfs_open_owner_clear_busy(noop);
5722 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5723 if (recall) {
5724 /*
5725 * We're making a delegated claim.
5726 * Don't return the delegation here in case we have more to claim.
5727 * Just make sure it's queued up to be returned.
5728 */
5729 nfs4_delegation_return_enqueue(np);
5730 }
5731 }
5732out:
5733 // if (!error)
0a7de745
A
5734 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5735 if (filename && (filename != &smallname[0])) {
6d2010ae 5736 FREE(filename, M_TEMP);
0a7de745
A
5737 }
5738 if (vname) {
6d2010ae 5739 vnode_putname(vname);
0a7de745
A
5740 }
5741 if (dvp != NULLVP) {
6d2010ae 5742 vnode_put(dvp);
0a7de745
A
5743 }
5744 return error;
6d2010ae
A
5745}
5746
b0d623f7
A
5747/*
5748 * Send an OPEN RPC to reclaim an open file.
5749 */
5750int
5751nfs4_open_reclaim_rpc(
5752 struct nfs_open_file *nofp,
5753 int share_access,
5754 int share_deny)
5755{
5756 struct nfsmount *nmp;
5757 struct nfs_open_owner *noop = nofp->nof_owner;
5758 struct nfs_vattr nvattr;
5759 int error = 0, lockerror = ENOENT, status;
5760 int nfsvers, numops;
5761 u_int64_t xid;
5762 nfsnode_t np = nofp->nof_np;
5763 struct nfsm_chain nmreq, nmrep;
5764 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6d2010ae 5765 uint32_t rflags = 0, delegation, recall = 0;
b0d623f7
A
5766 fhandle_t fh;
5767 struct nfs_stateid dstateid;
6d2010ae
A
5768 char sbuf[64], *s = sbuf;
5769 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5770 struct kauth_ace ace;
5771 struct nfsreq_secinfo_args si;
b0d623f7
A
5772
5773 nmp = NFSTONMP(np);
0a7de745
A
5774 if (nfs_mount_gone(nmp)) {
5775 return ENXIO;
5776 }
b0d623f7
A
5777 nfsvers = nmp->nm_vers;
5778
0a7de745
A
5779 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5780 return error;
5781 }
b0d623f7 5782
6d2010ae 5783 NVATTR_INIT(&nvattr);
b0d623f7 5784 delegation = NFS_OPEN_DELEGATE_NONE;
6d2010ae
A
5785 dstateid = np->n_dstateid;
5786 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
5787
5788 nfsm_chain_null(&nmreq);
5789 nfsm_chain_null(&nmrep);
5790
5791 // PUTFH, OPEN, GETATTR(FH)
5792 numops = 3;
5793 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
3e170ce0 5794 nfsm_chain_add_compound_header(error, &nmreq, "open_reclaim", nmp->nm_minor_vers, numops);
b0d623f7
A
5795 numops--;
5796 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5797 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5798 numops--;
5799 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5800 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5801 nfsm_chain_add_32(error, &nmreq, share_access);
5802 nfsm_chain_add_32(error, &nmreq, share_deny);
5803 // open owner: clientid + uid
5804 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5805 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5806 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5807 // openflag4
5808 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5809 // open_claim4
5810 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_PREVIOUS);
5811 delegation = (np->n_openflags & N_DELEG_READ) ? NFS_OPEN_DELEGATE_READ :
0a7de745
A
5812 (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE :
5813 NFS_OPEN_DELEGATE_NONE;
b0d623f7
A
5814 nfsm_chain_add_32(error, &nmreq, delegation);
5815 delegation = NFS_OPEN_DELEGATE_NONE;
5816 numops--;
5817 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5818 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5819 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6d2010ae 5820 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
b0d623f7
A
5821 nfsm_chain_build_done(error, &nmreq);
5822 nfsm_assert(error, (numops == 0), EPROTO);
5823 nfsmout_if(error);
5824
6d2010ae 5825 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
0a7de745 5826 noop->noo_cred, &si, R_RECOVER | R_NOINTR, &nmrep, &xid, &status);
b0d623f7 5827
0a7de745 5828 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 5829 error = lockerror;
0a7de745 5830 }
b0d623f7
A
5831 nfsm_chain_skip_tag(error, &nmrep);
5832 nfsm_chain_get_32(error, &nmrep, numops);
5833 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5834 nfsmout_if(error);
5835 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5836 nfs_owner_seqid_increment(noop, NULL, error);
5837 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5838 nfsm_chain_check_change_info(error, &nmrep, np);
5839 nfsm_chain_get_32(error, &nmrep, rflags);
5840 bmlen = NFS_ATTR_BITMAP_LEN;
5841 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5842 nfsm_chain_get_32(error, &nmrep, delegation);
0a7de745 5843 if (!error) {
b0d623f7
A
5844 switch (delegation) {
5845 case NFS_OPEN_DELEGATE_NONE:
6d2010ae
A
5846 if (np->n_openflags & N_DELEG_MASK) {
5847 /*
5848 * Hey! We were supposed to get our delegation back even
5849 * if it was getting immediately recalled. Bad server!
5850 *
5851 * Just try to return the existing delegation.
5852 */
5853 // NP(np, "nfs: open reclaim didn't return delegation?");
5854 delegation = (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE : NFS_OPEN_DELEGATE_READ;
5855 recall = 1;
b0d623f7
A
5856 }
5857 break;
6d2010ae 5858 case NFS_OPEN_DELEGATE_READ:
b0d623f7
A
5859 case NFS_OPEN_DELEGATE_WRITE:
5860 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5861 nfsm_chain_get_32(error, &nmrep, recall);
0a7de745 5862 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
6d2010ae 5863 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
0a7de745 5864 }
6d2010ae
A
5865 /* if we have any trouble accepting the ACE, just invalidate it */
5866 ace_type = ace_flags = ace_mask = len = 0;
5867 nfsm_chain_get_32(error, &nmrep, ace_type);
5868 nfsm_chain_get_32(error, &nmrep, ace_flags);
5869 nfsm_chain_get_32(error, &nmrep, ace_mask);
5870 nfsm_chain_get_32(error, &nmrep, len);
5871 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5872 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5873 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5874 if (!error && (len >= slen)) {
0a7de745
A
5875 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5876 if (s) {
5877 slen = len + 1;
5878 } else {
6d2010ae 5879 ace.ace_flags = 0;
0a7de745 5880 }
6d2010ae 5881 }
0a7de745 5882 if (s) {
6d2010ae 5883 nfsm_chain_get_opaque(error, &nmrep, len, s);
0a7de745 5884 } else {
6d2010ae 5885 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
0a7de745 5886 }
6d2010ae
A
5887 if (!error && s) {
5888 s[len] = '\0';
0a7de745 5889 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
6d2010ae 5890 ace.ace_flags = 0;
0a7de745 5891 }
6d2010ae 5892 }
0a7de745 5893 if (error || !s) {
6d2010ae 5894 ace.ace_flags = 0;
0a7de745
A
5895 }
5896 if (s && (s != sbuf)) {
6d2010ae 5897 FREE(s, M_TEMP);
0a7de745 5898 }
b0d623f7
A
5899 if (!error) {
5900 /* stuff the delegation state in the node */
5901 lck_mtx_lock(&np->n_openlock);
5902 np->n_openflags &= ~N_DELEG_MASK;
6d2010ae 5903 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
b0d623f7 5904 np->n_dstateid = dstateid;
6d2010ae
A
5905 np->n_dace = ace;
5906 if (np->n_dlink.tqe_next == NFSNOLIST) {
5907 lck_mtx_lock(&nmp->nm_lock);
0a7de745 5908 if (np->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 5909 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
0a7de745 5910 }
6d2010ae
A
5911 lck_mtx_unlock(&nmp->nm_lock);
5912 }
b0d623f7
A
5913 lck_mtx_unlock(&np->n_openlock);
5914 }
5915 break;
5916 default:
5917 error = EBADRPC;
5918 break;
5919 }
0a7de745 5920 }
b0d623f7
A
5921 nfsmout_if(error);
5922 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 5923 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
b0d623f7
A
5924 nfsmout_if(error);
5925 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6d2010ae 5926 NP(np, "nfs: open reclaim didn't return filehandle?");
b0d623f7
A
5927 error = EBADRPC;
5928 goto nfsmout;
5929 }
5930 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5931 // XXX what if fh doesn't match the vnode we think we're re-opening?
6d2010ae
A
5932 // That should be pretty hard in this case, given that we are doing
5933 // the open reclaim using the file handle (and not a dir/name pair).
5934 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
0a7de745 5935 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 5936 NP(np, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
0a7de745 5937 }
b0d623f7
A
5938 }
5939 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5940 nfsmout_if(error);
0a7de745 5941 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
b0d623f7 5942 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 5943 }
b0d623f7 5944nfsmout:
6d2010ae 5945 // if (!error)
0a7de745 5946 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
6d2010ae 5947 NVATTR_CLEANUP(&nvattr);
b0d623f7
A
5948 nfsm_chain_cleanup(&nmreq);
5949 nfsm_chain_cleanup(&nmrep);
0a7de745 5950 if (!lockerror) {
b0d623f7 5951 nfs_node_unlock(np);
0a7de745 5952 }
b0d623f7
A
5953 nfs_open_owner_clear_busy(noop);
5954 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
0a7de745 5955 if (recall) {
6d2010ae 5956 nfs4_delegation_return_enqueue(np);
0a7de745 5957 }
b0d623f7 5958 }
0a7de745 5959 return error;
b0d623f7 5960}
2d21ac55 5961
b0d623f7
A
5962int
5963nfs4_open_downgrade_rpc(
5964 nfsnode_t np,
5965 struct nfs_open_file *nofp,
5966 vfs_context_t ctx)
5967{
5968 struct nfs_open_owner *noop = nofp->nof_owner;
5969 struct nfsmount *nmp;
5970 int error, lockerror = ENOENT, status, nfsvers, numops;
5971 struct nfsm_chain nmreq, nmrep;
5972 u_int64_t xid;
6d2010ae 5973 struct nfsreq_secinfo_args si;
2d21ac55 5974
b0d623f7 5975 nmp = NFSTONMP(np);
0a7de745
A
5976 if (nfs_mount_gone(nmp)) {
5977 return ENXIO;
5978 }
b0d623f7
A
5979 nfsvers = nmp->nm_vers;
5980
0a7de745
A
5981 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5982 return error;
5983 }
b0d623f7 5984
6d2010ae 5985 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
5986 nfsm_chain_null(&nmreq);
5987 nfsm_chain_null(&nmrep);
5988
5989 // PUTFH, OPEN_DOWNGRADE, GETATTR
5990 numops = 3;
5991 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
3e170ce0 5992 nfsm_chain_add_compound_header(error, &nmreq, "open_downgrd", nmp->nm_minor_vers, numops);
b0d623f7
A
5993 numops--;
5994 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5995 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5996 numops--;
5997 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_DOWNGRADE);
5998 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
5999 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
6000 nfsm_chain_add_32(error, &nmreq, nofp->nof_access);
6001 nfsm_chain_add_32(error, &nmreq, nofp->nof_deny);
6002 numops--;
6003 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 6004 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
6005 nfsm_chain_build_done(error, &nmreq);
6006 nfsm_assert(error, (numops == 0), EPROTO);
6007 nfsmout_if(error);
6d2010ae 6008 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745
A
6009 vfs_context_thread(ctx), vfs_context_ucred(ctx),
6010 &si, R_NOINTR, &nmrep, &xid, &status);
b0d623f7 6011
0a7de745 6012 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 6013 error = lockerror;
0a7de745 6014 }
b0d623f7
A
6015 nfsm_chain_skip_tag(error, &nmrep);
6016 nfsm_chain_get_32(error, &nmrep, numops);
6017 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
2d21ac55 6018 nfsmout_if(error);
b0d623f7
A
6019 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_DOWNGRADE);
6020 nfs_owner_seqid_increment(noop, NULL, error);
6021 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6022 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 6023 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
b0d623f7 6024nfsmout:
0a7de745 6025 if (!lockerror) {
b0d623f7 6026 nfs_node_unlock(np);
0a7de745 6027 }
b0d623f7 6028 nfs_open_owner_clear_busy(noop);
2d21ac55
A
6029 nfsm_chain_cleanup(&nmreq);
6030 nfsm_chain_cleanup(&nmrep);
0a7de745 6031 return error;
b0d623f7 6032}
2d21ac55 6033
b0d623f7
A
6034int
6035nfs4_close_rpc(
6036 nfsnode_t np,
6037 struct nfs_open_file *nofp,
6038 thread_t thd,
6039 kauth_cred_t cred,
6d2010ae 6040 int flags)
b0d623f7
A
6041{
6042 struct nfs_open_owner *noop = nofp->nof_owner;
6043 struct nfsmount *nmp;
6044 int error, lockerror = ENOENT, status, nfsvers, numops;
6045 struct nfsm_chain nmreq, nmrep;
6046 u_int64_t xid;
6d2010ae 6047 struct nfsreq_secinfo_args si;
b0d623f7
A
6048
6049 nmp = NFSTONMP(np);
0a7de745
A
6050 if (nfs_mount_gone(nmp)) {
6051 return ENXIO;
6052 }
b0d623f7
A
6053 nfsvers = nmp->nm_vers;
6054
0a7de745
A
6055 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
6056 return error;
6057 }
b0d623f7 6058
6d2010ae 6059 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
6060 nfsm_chain_null(&nmreq);
6061 nfsm_chain_null(&nmrep);
6062
6d2010ae 6063 // PUTFH, CLOSE, GETATTR
b0d623f7
A
6064 numops = 3;
6065 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
3e170ce0 6066 nfsm_chain_add_compound_header(error, &nmreq, "close", nmp->nm_minor_vers, numops);
2d21ac55
A
6067 numops--;
6068 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
b0d623f7 6069 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
2d21ac55
A
6070 numops--;
6071 nfsm_chain_add_32(error, &nmreq, NFS_OP_CLOSE);
b0d623f7
A
6072 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
6073 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
6074 numops--;
6075 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 6076 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
6077 nfsm_chain_build_done(error, &nmreq);
6078 nfsm_assert(error, (numops == 0), EPROTO);
6079 nfsmout_if(error);
0a7de745 6080 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
2d21ac55 6081
0a7de745 6082 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 6083 error = lockerror;
0a7de745 6084 }
2d21ac55
A
6085 nfsm_chain_skip_tag(error, &nmrep);
6086 nfsm_chain_get_32(error, &nmrep, numops);
6087 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
b0d623f7 6088 nfsmout_if(error);
2d21ac55 6089 nfsm_chain_op_check(error, &nmrep, NFS_OP_CLOSE);
b0d623f7
A
6090 nfs_owner_seqid_increment(noop, NULL, error);
6091 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6092 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 6093 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
b0d623f7 6094nfsmout:
0a7de745 6095 if (!lockerror) {
b0d623f7 6096 nfs_node_unlock(np);
0a7de745 6097 }
b0d623f7
A
6098 nfs_open_owner_clear_busy(noop);
6099 nfsm_chain_cleanup(&nmreq);
6100 nfsm_chain_cleanup(&nmrep);
0a7de745 6101 return error;
b0d623f7
A
6102}
6103
6104
b0d623f7 6105/*
6d2010ae 6106 * Claim the delegated open combinations this open file holds.
b0d623f7
A
6107 */
6108int
6d2010ae 6109nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags)
b0d623f7 6110{
6d2010ae
A
6111 struct nfs_open_owner *noop = nofp->nof_owner;
6112 struct nfs_lock_owner *nlop;
6113 struct nfs_file_lock *nflp, *nextnflp;
b0d623f7 6114 struct nfsmount *nmp;
6d2010ae 6115 int error = 0, reopen = 0;
b0d623f7 6116
6d2010ae
A
6117 if (nofp->nof_d_rw_drw) {
6118 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_BOTH, flags);
6119 if (!error) {
6120 lck_mtx_lock(&nofp->nof_lock);
6121 nofp->nof_rw_drw += nofp->nof_d_rw_drw;
6122 nofp->nof_d_rw_drw = 0;
6123 lck_mtx_unlock(&nofp->nof_lock);
6124 }
b0d623f7 6125 }
6d2010ae
A
6126 if (!error && nofp->nof_d_w_drw) {
6127 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_BOTH, flags);
6128 if (!error) {
6129 lck_mtx_lock(&nofp->nof_lock);
6130 nofp->nof_w_drw += nofp->nof_d_w_drw;
6131 nofp->nof_d_w_drw = 0;
6132 lck_mtx_unlock(&nofp->nof_lock);
6133 }
b0d623f7 6134 }
6d2010ae
A
6135 if (!error && nofp->nof_d_r_drw) {
6136 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_BOTH, flags);
6137 if (!error) {
6138 lck_mtx_lock(&nofp->nof_lock);
6139 nofp->nof_r_drw += nofp->nof_d_r_drw;
6140 nofp->nof_d_r_drw = 0;
6141 lck_mtx_unlock(&nofp->nof_lock);
6142 }
6143 }
6144 if (!error && nofp->nof_d_rw_dw) {
6145 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_WRITE, flags);
6146 if (!error) {
6147 lck_mtx_lock(&nofp->nof_lock);
6148 nofp->nof_rw_dw += nofp->nof_d_rw_dw;
6149 nofp->nof_d_rw_dw = 0;
6150 lck_mtx_unlock(&nofp->nof_lock);
6151 }
6152 }
6153 if (!error && nofp->nof_d_w_dw) {
6154 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_WRITE, flags);
6155 if (!error) {
6156 lck_mtx_lock(&nofp->nof_lock);
6157 nofp->nof_w_dw += nofp->nof_d_w_dw;
6158 nofp->nof_d_w_dw = 0;
6159 lck_mtx_unlock(&nofp->nof_lock);
6160 }
6161 }
6162 if (!error && nofp->nof_d_r_dw) {
6163 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_WRITE, flags);
6164 if (!error) {
6165 lck_mtx_lock(&nofp->nof_lock);
6166 nofp->nof_r_dw += nofp->nof_d_r_dw;
6167 nofp->nof_d_r_dw = 0;
6168 lck_mtx_unlock(&nofp->nof_lock);
6169 }
6170 }
6171 /* non-deny-mode opens may be reopened if no locks are held */
6172 if (!error && nofp->nof_d_rw) {
6173 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, flags);
6174 /* for some errors, we should just try reopening the file */
0a7de745 6175 if (nfs_mount_state_error_delegation_lost(error)) {
6d2010ae 6176 reopen = error;
0a7de745 6177 }
6d2010ae
A
6178 if (!error || reopen) {
6179 lck_mtx_lock(&nofp->nof_lock);
6180 nofp->nof_rw += nofp->nof_d_rw;
6181 nofp->nof_d_rw = 0;
6182 lck_mtx_unlock(&nofp->nof_lock);
6183 }
6184 }
6185 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
6186 if ((!error || reopen) && nofp->nof_d_w) {
6187 if (!error) {
6188 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, flags);
6189 /* for some errors, we should just try reopening the file */
0a7de745 6190 if (nfs_mount_state_error_delegation_lost(error)) {
6d2010ae 6191 reopen = error;
0a7de745 6192 }
6d2010ae
A
6193 }
6194 if (!error || reopen) {
6195 lck_mtx_lock(&nofp->nof_lock);
6196 nofp->nof_w += nofp->nof_d_w;
6197 nofp->nof_d_w = 0;
6198 lck_mtx_unlock(&nofp->nof_lock);
6199 }
6200 }
6201 if ((!error || reopen) && nofp->nof_d_r) {
6202 if (!error) {
6203 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, flags);
6204 /* for some errors, we should just try reopening the file */
0a7de745 6205 if (nfs_mount_state_error_delegation_lost(error)) {
6d2010ae 6206 reopen = error;
0a7de745 6207 }
6d2010ae
A
6208 }
6209 if (!error || reopen) {
6210 lck_mtx_lock(&nofp->nof_lock);
6211 nofp->nof_r += nofp->nof_d_r;
6212 nofp->nof_d_r = 0;
6213 lck_mtx_unlock(&nofp->nof_lock);
6214 }
6215 }
6216
6217 if (reopen) {
6218 /*
6219 * Any problems with the delegation probably indicates that we
6220 * should review/return all of our current delegation state.
6221 */
6222 if ((nmp = NFSTONMP(nofp->nof_np))) {
6223 nfs4_delegation_return_enqueue(nofp->nof_np);
6224 lck_mtx_lock(&nmp->nm_lock);
6225 nfs_need_recover(nmp, NFSERR_EXPIRED);
6226 lck_mtx_unlock(&nmp->nm_lock);
6227 }
6228 if (reopen && (nfs_check_for_locks(noop, nofp) == 0)) {
6229 /* just reopen the file on next access */
6230 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
0a7de745 6231 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6d2010ae
A
6232 lck_mtx_lock(&nofp->nof_lock);
6233 nofp->nof_flags |= NFS_OPEN_FILE_REOPEN;
6234 lck_mtx_unlock(&nofp->nof_lock);
0a7de745 6235 return 0;
6d2010ae 6236 }
0a7de745 6237 if (reopen) {
6d2010ae 6238 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
0a7de745
A
6239 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6240 }
6d2010ae
A
6241 }
6242
6243 if (!error && ((nmp = NFSTONMP(nofp->nof_np)))) {
6244 /* claim delegated locks */
6245 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
0a7de745 6246 if (nlop->nlo_open_owner != noop) {
6d2010ae 6247 continue;
0a7de745 6248 }
6d2010ae
A
6249 TAILQ_FOREACH_SAFE(nflp, &nlop->nlo_locks, nfl_lolink, nextnflp) {
6250 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
0a7de745 6251 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6d2010ae 6252 continue;
0a7de745 6253 }
6d2010ae 6254 /* skip non-delegated locks */
0a7de745 6255 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6d2010ae 6256 continue;
0a7de745 6257 }
6d2010ae
A
6258 error = nmp->nm_funcs->nf_setlock_rpc(nofp->nof_np, nofp, nflp, 0, flags, current_thread(), noop->noo_cred);
6259 if (error) {
6260 NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
0a7de745 6261 nflp->nfl_start, nflp->nfl_end, error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6d2010ae
A
6262 break;
6263 }
6264 // else {
0a7de745
A
6265 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
6266 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6d2010ae
A
6267 // }
6268 }
0a7de745 6269 if (error) {
6d2010ae 6270 break;
0a7de745 6271 }
6d2010ae
A
6272 }
6273 }
6274
0a7de745
A
6275 if (!error) { /* all state claimed successfully! */
6276 return 0;
6277 }
6d2010ae
A
6278
6279 /* restart if it looks like a problem more than just losing the delegation */
6280 if (!nfs_mount_state_error_delegation_lost(error) &&
6281 ((error == ETIMEDOUT) || nfs_mount_state_error_should_restart(error))) {
6282 NP(nofp->nof_np, "nfs delegated lock claim error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 6283 if ((error == ETIMEDOUT) && ((nmp = NFSTONMP(nofp->nof_np)))) {
6d2010ae 6284 nfs_need_reconnect(nmp);
0a7de745
A
6285 }
6286 return error;
b0d623f7 6287 }
6d2010ae 6288
0a7de745 6289 /* delegated state lost (once held but now not claimable) */
6d2010ae
A
6290 NP(nofp->nof_np, "nfs delegated state claim error %d, state lost, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6291
6292 /*
6293 * Any problems with the delegation probably indicates that we
6294 * should review/return all of our current delegation state.
6295 */
6296 if ((nmp = NFSTONMP(nofp->nof_np))) {
6297 nfs4_delegation_return_enqueue(nofp->nof_np);
6298 lck_mtx_lock(&nmp->nm_lock);
6299 nfs_need_recover(nmp, NFSERR_EXPIRED);
6300 lck_mtx_unlock(&nmp->nm_lock);
6301 }
6302
6303 /* revoke all open file state */
6304 nfs_revoke_open_state_for_node(nofp->nof_np);
6305
0a7de745 6306 return error;
6d2010ae 6307}
cb323159 6308#endif /* CONFIG_NFS4*/
6d2010ae
A
6309
6310/*
6311 * Release all open state for the given node.
6312 */
6313void
6314nfs_release_open_state_for_node(nfsnode_t np, int force)
6315{
6316 struct nfsmount *nmp = NFSTONMP(np);
6317 struct nfs_open_file *nofp;
6318 struct nfs_file_lock *nflp, *nextnflp;
6319
6320 /* drop held locks */
6321 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
6322 /* skip dead & blocked lock requests */
0a7de745 6323 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6d2010ae 6324 continue;
0a7de745 6325 }
6d2010ae 6326 /* send an unlock if not a delegated lock */
0a7de745 6327 if (!force && nmp && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6d2010ae 6328 nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER,
0a7de745
A
6329 NULL, nflp->nfl_owner->nlo_open_owner->noo_cred);
6330 }
6d2010ae
A
6331 /* kill/remove the lock */
6332 lck_mtx_lock(&np->n_openlock);
6333 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
6334 lck_mtx_lock(&nflp->nfl_owner->nlo_lock);
6335 TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink);
6336 lck_mtx_unlock(&nflp->nfl_owner->nlo_lock);
6337 if (nflp->nfl_blockcnt) {
6338 /* wake up anyone blocked on this lock */
6339 wakeup(nflp);
6340 } else {
6341 /* remove nflp from lock list and destroy */
6342 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
6343 nfs_file_lock_destroy(nflp);
2d21ac55 6344 }
6d2010ae
A
6345 lck_mtx_unlock(&np->n_openlock);
6346 }
6347
6348 lck_mtx_lock(&np->n_openlock);
6349
6350 /* drop all opens */
6351 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
0a7de745 6352 if (nofp->nof_flags & NFS_OPEN_FILE_LOST) {
6d2010ae 6353 continue;
0a7de745 6354 }
6d2010ae
A
6355 /* mark open state as lost */
6356 lck_mtx_lock(&nofp->nof_lock);
6357 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
6358 nofp->nof_flags |= NFS_OPEN_FILE_LOST;
0a7de745 6359
6d2010ae 6360 lck_mtx_unlock(&nofp->nof_lock);
cb323159 6361#if CONFIG_NFS4
0a7de745 6362 if (!force && nmp && (nmp->nm_vers >= NFS_VER4)) {
6d2010ae 6363 nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER);
0a7de745 6364 }
cb323159 6365#endif
6d2010ae
A
6366 }
6367
6368 lck_mtx_unlock(&np->n_openlock);
6369}
6370
6371/*
6372 * State for a node has been lost, drop it, and revoke the node.
6373 * Attempt to return any state if possible in case the server
6374 * might somehow think we hold it.
6375 */
6376void
6377nfs_revoke_open_state_for_node(nfsnode_t np)
6378{
6379 struct nfsmount *nmp;
6380
6381 /* mark node as needing to be revoked */
6382 nfs_node_lock_force(np);
0a7de745 6383 if (np->n_flag & NREVOKE) { /* already revoked? */
6d2010ae
A
6384 NP(np, "nfs_revoke_open_state_for_node(): already revoked");
6385 nfs_node_unlock(np);
6386 return;
6387 }
6388 np->n_flag |= NREVOKE;
6389 nfs_node_unlock(np);
6390
6391 nfs_release_open_state_for_node(np, 0);
6392 NP(np, "nfs: state lost for %p 0x%x", np, np->n_flag);
6393
6394 /* mark mount as needing a revoke scan and have the socket thread do it. */
6395 if ((nmp = NFSTONMP(np))) {
6396 lck_mtx_lock(&nmp->nm_lock);
6397 nmp->nm_state |= NFSSTA_REVOKE;
6398 nfs_mount_sock_thread_wake(nmp);
6399 lck_mtx_unlock(&nmp->nm_lock);
6400 }
6401}
6402
cb323159 6403#if CONFIG_NFS4
6d2010ae
A
6404/*
6405 * Claim the delegated open combinations that each of this node's open files hold.
6406 */
6407int
6408nfs4_claim_delegated_state_for_node(nfsnode_t np, int flags)
6409{
6410 struct nfs_open_file *nofp;
6411 int error = 0;
6412
6413 lck_mtx_lock(&np->n_openlock);
6414
6415 /* walk the open file list looking for opens with delegated state to claim */
6416restart:
6417 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
6418 if (!nofp->nof_d_rw_drw && !nofp->nof_d_w_drw && !nofp->nof_d_r_drw &&
6419 !nofp->nof_d_rw_dw && !nofp->nof_d_w_dw && !nofp->nof_d_r_dw &&
0a7de745 6420 !nofp->nof_d_rw && !nofp->nof_d_w && !nofp->nof_d_r) {
6d2010ae 6421 continue;
0a7de745 6422 }
6d2010ae
A
6423 lck_mtx_unlock(&np->n_openlock);
6424 error = nfs4_claim_delegated_state_for_open_file(nofp, flags);
6425 lck_mtx_lock(&np->n_openlock);
0a7de745 6426 if (error) {
6d2010ae 6427 break;
0a7de745 6428 }
6d2010ae
A
6429 goto restart;
6430 }
6431
6432 lck_mtx_unlock(&np->n_openlock);
6433
0a7de745 6434 return error;
6d2010ae
A
6435}
6436
6437/*
6438 * Mark a node as needed to have its delegation returned.
6439 * Queue it up on the delegation return queue.
6440 * Make sure the thread is running.
6441 */
6442void
6443nfs4_delegation_return_enqueue(nfsnode_t np)
6444{
6445 struct nfsmount *nmp;
6446
6447 nmp = NFSTONMP(np);
0a7de745 6448 if (nfs_mount_gone(nmp)) {
6d2010ae 6449 return;
0a7de745 6450 }
6d2010ae
A
6451
6452 lck_mtx_lock(&np->n_openlock);
6453 np->n_openflags |= N_DELEG_RETURN;
6454 lck_mtx_unlock(&np->n_openlock);
6455
6456 lck_mtx_lock(&nmp->nm_lock);
0a7de745 6457 if (np->n_dreturn.tqe_next == NFSNOLIST) {
6d2010ae 6458 TAILQ_INSERT_TAIL(&nmp->nm_dreturnq, np, n_dreturn);
0a7de745 6459 }
6d2010ae
A
6460 nfs_mount_sock_thread_wake(nmp);
6461 lck_mtx_unlock(&nmp->nm_lock);
6462}
6463
6464/*
6465 * return any delegation we may have for the given node
6466 */
6467int
6468nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred)
6469{
6470 struct nfsmount *nmp;
6471 fhandle_t fh;
6472 nfs_stateid dstateid;
6473 int error;
6474
6475 nmp = NFSTONMP(np);
0a7de745
A
6476 if (nfs_mount_gone(nmp)) {
6477 return ENXIO;
6478 }
6d2010ae
A
6479
6480 /* first, make sure the node's marked for delegation return */
6481 lck_mtx_lock(&np->n_openlock);
0a7de745 6482 np->n_openflags |= (N_DELEG_RETURN | N_DELEG_RETURNING);
6d2010ae
A
6483 lck_mtx_unlock(&np->n_openlock);
6484
6485 /* make sure nobody else is using the delegation state */
0a7de745 6486 if ((error = nfs_open_state_set_busy(np, NULL))) {
6d2010ae 6487 goto out;
0a7de745 6488 }
6d2010ae
A
6489
6490 /* claim any delegated state */
0a7de745 6491 if ((error = nfs4_claim_delegated_state_for_node(np, flags))) {
6d2010ae 6492 goto out;
0a7de745 6493 }
6d2010ae
A
6494
6495 /* return the delegation */
6496 lck_mtx_lock(&np->n_openlock);
6497 dstateid = np->n_dstateid;
6498 fh.fh_len = np->n_fhsize;
6499 bcopy(np->n_fhp, &fh.fh_data, fh.fh_len);
6500 lck_mtx_unlock(&np->n_openlock);
6501 error = nfs4_delegreturn_rpc(NFSTONMP(np), fh.fh_data, fh.fh_len, &dstateid, flags, thd, cred);
6502 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
6503 if ((error != ETIMEDOUT) && (error != NFSERR_MOVED) && (error != NFSERR_LEASE_MOVED)) {
6504 lck_mtx_lock(&np->n_openlock);
6505 np->n_openflags &= ~N_DELEG_MASK;
6506 lck_mtx_lock(&nmp->nm_lock);
6507 if (np->n_dlink.tqe_next != NFSNOLIST) {
6508 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
6509 np->n_dlink.tqe_next = NFSNOLIST;
6510 }
6511 lck_mtx_unlock(&nmp->nm_lock);
6512 lck_mtx_unlock(&np->n_openlock);
6513 }
6514
6515out:
6516 /* make sure it's no longer on the return queue and clear the return flags */
6517 lck_mtx_lock(&nmp->nm_lock);
6518 if (np->n_dreturn.tqe_next != NFSNOLIST) {
6519 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
6520 np->n_dreturn.tqe_next = NFSNOLIST;
6521 }
6522 lck_mtx_unlock(&nmp->nm_lock);
6523 lck_mtx_lock(&np->n_openlock);
0a7de745 6524 np->n_openflags &= ~(N_DELEG_RETURN | N_DELEG_RETURNING);
6d2010ae
A
6525 lck_mtx_unlock(&np->n_openlock);
6526
6527 if (error) {
6528 NP(np, "nfs4_delegation_return, error %d", error);
0a7de745 6529 if (error == ETIMEDOUT) {
6d2010ae 6530 nfs_need_reconnect(nmp);
0a7de745 6531 }
6d2010ae
A
6532 if (nfs_mount_state_error_should_restart(error)) {
6533 /* make sure recovery happens */
6534 lck_mtx_lock(&nmp->nm_lock);
6535 nfs_need_recover(nmp, nfs_mount_state_error_delegation_lost(error) ? NFSERR_EXPIRED : 0);
6536 lck_mtx_unlock(&nmp->nm_lock);
2d21ac55
A
6537 }
6538 }
6d2010ae
A
6539
6540 nfs_open_state_clear_busy(np);
6541
0a7de745 6542 return error;
b0d623f7 6543}
2d21ac55 6544
b0d623f7 6545/*
6d2010ae
A
6546 * RPC to return a delegation for a file handle
6547 */
6548int
6549nfs4_delegreturn_rpc(struct nfsmount *nmp, u_char *fhp, int fhlen, struct nfs_stateid *sid, int flags, thread_t thd, kauth_cred_t cred)
6550{
6551 int error = 0, status, numops;
6552 uint64_t xid;
6553 struct nfsm_chain nmreq, nmrep;
6554 struct nfsreq_secinfo_args si;
6555
6556 NFSREQ_SECINFO_SET(&si, NULL, fhp, fhlen, NULL, 0);
6557 nfsm_chain_null(&nmreq);
6558 nfsm_chain_null(&nmrep);
6559
6560 // PUTFH, DELEGRETURN
6561 numops = 2;
6562 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
3e170ce0 6563 nfsm_chain_add_compound_header(error, &nmreq, "delegreturn", nmp->nm_minor_vers, numops);
6d2010ae
A
6564 numops--;
6565 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6566 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
6567 numops--;
6568 nfsm_chain_add_32(error, &nmreq, NFS_OP_DELEGRETURN);
6569 nfsm_chain_add_stateid(error, &nmreq, sid);
6570 nfsm_chain_build_done(error, &nmreq);
6571 nfsm_assert(error, (numops == 0), EPROTO);
6572 nfsmout_if(error);
6573 error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags, &nmrep, &xid, &status);
6574 nfsm_chain_skip_tag(error, &nmrep);
6575 nfsm_chain_get_32(error, &nmrep, numops);
6576 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6577 nfsm_chain_op_check(error, &nmrep, NFS_OP_DELEGRETURN);
6578nfsmout:
6579 nfsm_chain_cleanup(&nmreq);
6580 nfsm_chain_cleanup(&nmrep);
0a7de745 6581 return error;
6d2010ae 6582}
cb323159 6583#endif /* CONFIG_NFS4 */
6d2010ae
A
6584
6585/*
6586 * NFS read call.
6587 * Just call nfs_bioread() to do the work.
6588 *
6589 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6590 * without first calling VNOP_OPEN, so we make sure the file is open here.
6591 */
6592int
6593nfs_vnop_read(
6594 struct vnop_read_args /* {
0a7de745
A
6595 * struct vnodeop_desc *a_desc;
6596 * vnode_t a_vp;
6597 * struct uio *a_uio;
6598 * int a_ioflag;
6599 * vfs_context_t a_context;
6600 * } */*ap)
6d2010ae
A
6601{
6602 vnode_t vp = ap->a_vp;
6603 vfs_context_t ctx = ap->a_context;
6604 nfsnode_t np;
6605 struct nfsmount *nmp;
6606 struct nfs_open_owner *noop;
6607 struct nfs_open_file *nofp;
6608 int error;
6609
0a7de745 6610 if (vnode_vtype(ap->a_vp) != VREG) {
39236c6e 6611 return (vnode_vtype(vp) == VDIR) ? EISDIR : EPERM;
0a7de745 6612 }
6d2010ae
A
6613
6614 np = VTONFS(vp);
6615 nmp = NFSTONMP(np);
0a7de745
A
6616 if (nfs_mount_gone(nmp)) {
6617 return ENXIO;
6618 }
6619 if (np->n_flag & NREVOKE) {
6620 return EIO;
6621 }
6d2010ae
A
6622
6623 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
0a7de745
A
6624 if (!noop) {
6625 return ENOMEM;
6626 }
6d2010ae
A
6627restart:
6628 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
6629 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6630 NP(np, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop->noo_cred));
6631 error = EIO;
6632 }
cb323159 6633#if CONFIG_NFS4
6d2010ae
A
6634 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6635 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
6636 nofp = NULL;
0a7de745 6637 if (!error) {
6d2010ae 6638 goto restart;
0a7de745 6639 }
6d2010ae 6640 }
cb323159 6641#endif
6d2010ae
A
6642 if (error) {
6643 nfs_open_owner_rele(noop);
0a7de745 6644 return error;
6d2010ae 6645 }
3e170ce0
A
6646 /*
6647 * Since the read path is a hot path, if we already have
6648 * read access, lets go and try and do the read, without
6649 * busying the mount and open file node for this open owner.
6650 *
6651 * N.B. This is inherently racy w.r.t. an execve using
6652 * an already open file, in that the read at the end of
6653 * this routine will be racing with a potential close.
6654 * The code below ultimately has the same problem. In practice
6655 * this does not seem to be an issue.
6656 */
6657 if (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) {
6658 nfs_open_owner_rele(noop);
6659 goto do_read;
6660 }
6661 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6662 if (error) {
6663 nfs_open_owner_rele(noop);
0a7de745 6664 return error;
3e170ce0
A
6665 }
6666 /*
6667 * If we don't have a file already open with the access we need (read) then
6668 * we need to open one. Otherwise we just co-opt an open. We might not already
6669 * have access because we're trying to read the first page of the
6670 * file for execve.
6671 */
6672 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
6673 if (error) {
6674 nfs_mount_state_in_use_end(nmp, 0);
6675 nfs_open_owner_rele(noop);
0a7de745 6676 return error;
3e170ce0
A
6677 }
6678 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
6679 /* we don't have the file open, so open it for read access if we're not denied */
6680 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
6681 NP(np, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
0a7de745 6682 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
3e170ce0
A
6683 }
6684 if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) {
6685 nfs_open_file_clear_busy(nofp);
6686 nfs_mount_state_in_use_end(nmp, 0);
6d2010ae 6687 nfs_open_owner_rele(noop);
0a7de745 6688 return EPERM;
6d2010ae
A
6689 }
6690 if (np->n_flag & NREVOKE) {
6691 error = EIO;
3e170ce0 6692 nfs_open_file_clear_busy(nofp);
6d2010ae
A
6693 nfs_mount_state_in_use_end(nmp, 0);
6694 nfs_open_owner_rele(noop);
0a7de745 6695 return error;
6d2010ae 6696 }
3e170ce0
A
6697 if (nmp->nm_vers < NFS_VER4) {
6698 /* NFS v2/v3 opens are always allowed - so just add it. */
6699 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
cb323159
A
6700 }
6701#if CONFIG_NFS4
6702 else {
3e170ce0 6703 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
6d2010ae 6704 }
cb323159 6705#endif
0a7de745 6706 if (!error) {
6d2010ae 6707 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
0a7de745 6708 }
3e170ce0 6709 }
0a7de745 6710 if (nofp) {
3e170ce0 6711 nfs_open_file_clear_busy(nofp);
0a7de745 6712 }
3e170ce0
A
6713 if (nfs_mount_state_in_use_end(nmp, error)) {
6714 nofp = NULL;
6715 goto restart;
6d2010ae
A
6716 }
6717 nfs_open_owner_rele(noop);
0a7de745
A
6718 if (error) {
6719 return error;
6720 }
3e170ce0 6721do_read:
0a7de745 6722 return nfs_bioread(VTONFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_context);
6d2010ae
A
6723}
6724
cb323159 6725#if CONFIG_NFS4
6d2010ae
A
6726/*
6727 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6728 * Files are created using the NFSv4 OPEN RPC. So we must open the
6729 * file to create it and then close it.
6730 */
6731int
6732nfs4_vnop_create(
6733 struct vnop_create_args /* {
0a7de745
A
6734 * struct vnodeop_desc *a_desc;
6735 * vnode_t a_dvp;
6736 * vnode_t *a_vpp;
6737 * struct componentname *a_cnp;
6738 * struct vnode_attr *a_vap;
6739 * vfs_context_t a_context;
6740 * } */*ap)
6d2010ae
A
6741{
6742 vfs_context_t ctx = ap->a_context;
6743 struct componentname *cnp = ap->a_cnp;
6744 struct vnode_attr *vap = ap->a_vap;
6745 vnode_t dvp = ap->a_dvp;
6746 vnode_t *vpp = ap->a_vpp;
6747 struct nfsmount *nmp;
6748 nfsnode_t np;
6749 int error = 0, busyerror = 0, accessMode, denyMode;
6750 struct nfs_open_owner *noop = NULL;
6751 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
6752
6753 nmp = VTONMP(dvp);
0a7de745
A
6754 if (nfs_mount_gone(nmp)) {
6755 return ENXIO;
6756 }
6d2010ae 6757
0a7de745 6758 if (vap) {
6d2010ae 6759 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp), vap, ctx);
0a7de745 6760 }
6d2010ae
A
6761
6762 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
0a7de745
A
6763 if (!noop) {
6764 return ENOMEM;
6765 }
6d2010ae
A
6766
6767restart:
6768 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6769 if (error) {
6770 nfs_open_owner_rele(noop);
0a7de745 6771 return error;
6d2010ae
A
6772 }
6773
6774 /* grab a provisional, nodeless open file */
6775 error = nfs_open_file_find(NULL, noop, &newnofp, 0, 0, 1);
6776 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6777 printf("nfs_vnop_create: LOST\n");
6778 error = EIO;
6779 }
6780 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6781 /* This shouldn't happen given that this is a new, nodeless nofp */
6782 nfs_mount_state_in_use_end(nmp, 0);
6783 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
6784 nfs_open_file_destroy(newnofp);
6785 newnofp = NULL;
0a7de745 6786 if (!error) {
6d2010ae 6787 goto restart;
0a7de745 6788 }
6d2010ae 6789 }
0a7de745 6790 if (!error) {
6d2010ae 6791 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
0a7de745 6792 }
6d2010ae 6793 if (error) {
0a7de745 6794 if (newnofp) {
6d2010ae 6795 nfs_open_file_destroy(newnofp);
0a7de745 6796 }
6d2010ae
A
6797 newnofp = NULL;
6798 goto out;
6799 }
6800
6801 /*
6802 * We're just trying to create the file.
6803 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6804 */
6805 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
6806 denyMode = NFS_OPEN_SHARE_DENY_NONE;
6807
6808 /* Do the open/create */
6809 error = nfs4_open_rpc(newnofp, ctx, cnp, vap, dvp, vpp, NFS_OPEN_CREATE, accessMode, denyMode);
6810 if ((error == EACCES) && vap && !(vap->va_vaflags & VA_EXCLUSIVE) &&
6811 VATTR_IS_ACTIVE(vap, va_mode) && !(vap->va_mode & S_IWUSR)) {
6812 /*
6813 * Hmm... it looks like we may have a situation where the request was
6814 * retransmitted because we didn't get the first response which successfully
6815 * created/opened the file and then the second time we were denied the open
6816 * because the mode the file was created with doesn't allow write access.
6817 *
6818 * We'll try to work around this by temporarily updating the mode and
6819 * retrying the open.
6820 */
6821 struct vnode_attr vattr;
6822
6823 /* first make sure it's there */
6824 int error2 = nfs_lookitup(VTONFS(dvp), cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6825 if (!error2 && np) {
6826 nfs_node_unlock(np);
6827 *vpp = NFSTOV(np);
6828 if (vnode_vtype(NFSTOV(np)) == VREG) {
6829 VATTR_INIT(&vattr);
6830 VATTR_SET(&vattr, va_mode, (vap->va_mode | S_IWUSR));
6831 if (!nfs4_setattr_rpc(np, &vattr, ctx)) {
6832 error2 = nfs4_open_rpc(newnofp, ctx, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, accessMode, denyMode);
6833 VATTR_INIT(&vattr);
6834 VATTR_SET(&vattr, va_mode, vap->va_mode);
6835 nfs4_setattr_rpc(np, &vattr, ctx);
0a7de745 6836 if (!error2) {
6d2010ae 6837 error = 0;
0a7de745 6838 }
6d2010ae
A
6839 }
6840 }
6841 if (error) {
6842 vnode_put(*vpp);
6843 *vpp = NULL;
6844 }
6845 }
6846 }
6847 if (!error && !*vpp) {
6848 printf("nfs4_open_rpc returned without a node?\n");
6849 /* Hmmm... with no node, we have no filehandle and can't close it */
6850 error = EIO;
6851 }
6852 if (error) {
6853 /* need to cleanup our temporary nofp */
6854 nfs_open_file_clear_busy(newnofp);
6855 nfs_open_file_destroy(newnofp);
6856 newnofp = NULL;
6857 goto out;
6858 }
6859 /* After we have a node, add our open file struct to the node */
6860 np = VTONFS(*vpp);
6861 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
6862 nofp = newnofp;
6863 error = nfs_open_file_find_internal(np, noop, &nofp, 0, 0, 0);
6864 if (error) {
6865 /* This shouldn't happen, because we passed in a new nofp to use. */
6866 printf("nfs_open_file_find_internal failed! %d\n", error);
6867 goto out;
6868 } else if (nofp != newnofp) {
6869 /*
6870 * Hmm... an open file struct already exists.
6871 * Mark the existing one busy and merge our open into it.
6872 * Then destroy the one we created.
6873 * Note: there's no chance of an open confict because the
6874 * open has already been granted.
6875 */
6876 busyerror = nfs_open_file_set_busy(nofp, NULL);
6877 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
6878 nofp->nof_stateid = newnofp->nof_stateid;
0a7de745 6879 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
6d2010ae 6880 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 6881 }
6d2010ae
A
6882 nfs_open_file_clear_busy(newnofp);
6883 nfs_open_file_destroy(newnofp);
6884 }
6885 newnofp = NULL;
6886 /* mark the node as holding a create-initiated open */
6887 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
6888 nofp->nof_creator = current_thread();
6889out:
0a7de745 6890 if (nofp && !busyerror) {
6d2010ae 6891 nfs_open_file_clear_busy(nofp);
0a7de745 6892 }
6d2010ae
A
6893 if (nfs_mount_state_in_use_end(nmp, error)) {
6894 nofp = newnofp = NULL;
6895 busyerror = 0;
6896 goto restart;
6897 }
0a7de745 6898 if (noop) {
6d2010ae 6899 nfs_open_owner_rele(noop);
0a7de745
A
6900 }
6901 return error;
6d2010ae
A
6902}
6903
6904/*
6905 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6906 */
6907int
6908nfs4_create_rpc(
6909 vfs_context_t ctx,
6910 nfsnode_t dnp,
6911 struct componentname *cnp,
6912 struct vnode_attr *vap,
6913 int type,
6914 char *link,
6915 nfsnode_t *npp)
6916{
6917 struct nfsmount *nmp;
6918 struct nfs_vattr nvattr;
6919 int error = 0, create_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
6920 int nfsvers, namedattrs, numops;
6921 u_int64_t xid, savedxid = 0;
6922 nfsnode_t np = NULL;
6923 vnode_t newvp = NULL;
6924 struct nfsm_chain nmreq, nmrep;
6925 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6926 const char *tag;
6927 nfs_specdata sd;
6928 fhandle_t fh;
6929 struct nfsreq rq, *req = &rq;
6930 struct nfs_dulookup dul;
6931 struct nfsreq_secinfo_args si;
6932
6933 nmp = NFSTONMP(dnp);
0a7de745
A
6934 if (nfs_mount_gone(nmp)) {
6935 return ENXIO;
6936 }
6d2010ae
A
6937 nfsvers = nmp->nm_vers;
6938 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
0a7de745
A
6939 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
6940 return EINVAL;
6941 }
6d2010ae
A
6942
6943 sd.specdata1 = sd.specdata2 = 0;
6944
6945 switch (type) {
6946 case NFLNK:
6947 tag = "symlink";
6948 break;
6949 case NFBLK:
6950 case NFCHR:
6951 tag = "mknod";
0a7de745
A
6952 if (!VATTR_IS_ACTIVE(vap, va_rdev)) {
6953 return EINVAL;
6954 }
6d2010ae
A
6955 sd.specdata1 = major(vap->va_rdev);
6956 sd.specdata2 = minor(vap->va_rdev);
6957 break;
6958 case NFSOCK:
6959 case NFFIFO:
6960 tag = "mknod";
6961 break;
6962 case NFDIR:
6963 tag = "mkdir";
6964 break;
6965 default:
0a7de745 6966 return EINVAL;
6d2010ae
A
6967 }
6968
6969 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
6970
6971 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
0a7de745 6972 if (!namedattrs) {
6d2010ae 6973 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
0a7de745 6974 }
6d2010ae
A
6975
6976 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
6977 NVATTR_INIT(&nvattr);
6978 nfsm_chain_null(&nmreq);
6979 nfsm_chain_null(&nmrep);
6980
6981 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
6982 numops = 6;
6983 nfsm_chain_build_alloc_init(error, &nmreq, 66 * NFSX_UNSIGNED);
3e170ce0 6984 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
6d2010ae
A
6985 numops--;
6986 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6987 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
6988 numops--;
6989 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
6990 numops--;
6991 nfsm_chain_add_32(error, &nmreq, NFS_OP_CREATE);
6992 nfsm_chain_add_32(error, &nmreq, type);
6993 if (type == NFLNK) {
6994 nfsm_chain_add_name(error, &nmreq, link, strlen(link), nmp);
6995 } else if ((type == NFBLK) || (type == NFCHR)) {
6996 nfsm_chain_add_32(error, &nmreq, sd.specdata1);
6997 nfsm_chain_add_32(error, &nmreq, sd.specdata2);
6998 }
6999 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7000 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
7001 numops--;
7002 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7003 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7004 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7005 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
7006 numops--;
7007 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7008 numops--;
7009 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7010 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
7011 nfsm_chain_build_done(error, &nmreq);
7012 nfsm_assert(error, (numops == 0), EPROTO);
7013 nfsmout_if(error);
7014
7015 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745 7016 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
6d2010ae 7017 if (!error) {
0a7de745 7018 if (!namedattrs) {
6d2010ae 7019 nfs_dulookup_start(&dul, dnp, ctx);
0a7de745 7020 }
6d2010ae
A
7021 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7022 }
7023
0a7de745 7024 if ((lockerror = nfs_node_lock(dnp))) {
6d2010ae 7025 error = lockerror;
0a7de745 7026 }
6d2010ae
A
7027 nfsm_chain_skip_tag(error, &nmrep);
7028 nfsm_chain_get_32(error, &nmrep, numops);
7029 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7030 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7031 nfsmout_if(error);
7032 nfsm_chain_op_check(error, &nmrep, NFS_OP_CREATE);
7033 nfsm_chain_check_change_info(error, &nmrep, dnp);
7034 bmlen = NFS_ATTR_BITMAP_LEN;
7035 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7036 /* At this point if we have no error, the object was created. */
7037 /* if we don't get attributes, then we should lookitup. */
7038 create_error = error;
7039 nfsmout_if(error);
7040 nfs_vattr_set_supported(bitmap, vap);
7041 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7042 nfsmout_if(error);
7043 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7044 nfsmout_if(error);
7045 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
7046 printf("nfs: create/%s didn't return filehandle? %s\n", tag, cnp->cn_nameptr);
7047 error = EBADRPC;
7048 goto nfsmout;
7049 }
7050 /* directory attributes: if we don't get them, make sure to invalidate */
7051 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7052 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7053 savedxid = xid;
7054 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
0a7de745 7055 if (error) {
6d2010ae 7056 NATTRINVALIDATE(dnp);
0a7de745 7057 }
6d2010ae
A
7058
7059nfsmout:
7060 nfsm_chain_cleanup(&nmreq);
7061 nfsm_chain_cleanup(&nmrep);
7062
7063 if (!lockerror) {
7064 if (!create_error && (dnp->n_flag & NNEGNCENTRIES)) {
7065 dnp->n_flag &= ~NNEGNCENTRIES;
7066 cache_purge_negatives(NFSTOV(dnp));
7067 }
7068 dnp->n_flag |= NMODIFIED;
7069 nfs_node_unlock(dnp);
7070 /* nfs_getattr() will check changed and purge caches */
7071 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
7072 }
7073
7074 if (!error && fh.fh_len) {
7075 /* create the vnode with the filehandle and attributes */
7076 xid = savedxid;
7077 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np);
0a7de745 7078 if (!error) {
6d2010ae 7079 newvp = NFSTOV(np);
0a7de745 7080 }
6d2010ae
A
7081 }
7082 NVATTR_CLEANUP(&nvattr);
7083
0a7de745 7084 if (!namedattrs) {
6d2010ae 7085 nfs_dulookup_finish(&dul, dnp, ctx);
0a7de745 7086 }
6d2010ae
A
7087
7088 /*
7089 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
7090 * if we can succeed in looking up the object.
7091 */
7092 if ((create_error == EEXIST) || (!create_error && !newvp)) {
7093 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
7094 if (!error) {
7095 newvp = NFSTOV(np);
0a7de745 7096 if (vnode_vtype(newvp) != nfstov_type(type, nfsvers)) {
6d2010ae 7097 error = EEXIST;
0a7de745 7098 }
6d2010ae
A
7099 }
7100 }
0a7de745 7101 if (!busyerror) {
6d2010ae 7102 nfs_node_clear_busy(dnp);
0a7de745 7103 }
6d2010ae
A
7104 if (error) {
7105 if (newvp) {
7106 nfs_node_unlock(np);
7107 vnode_put(newvp);
7108 }
7109 } else {
7110 nfs_node_unlock(np);
7111 *npp = np;
7112 }
0a7de745 7113 return error;
6d2010ae
A
7114}
7115
7116int
7117nfs4_vnop_mknod(
7118 struct vnop_mknod_args /* {
0a7de745
A
7119 * struct vnodeop_desc *a_desc;
7120 * vnode_t a_dvp;
7121 * vnode_t *a_vpp;
7122 * struct componentname *a_cnp;
7123 * struct vnode_attr *a_vap;
7124 * vfs_context_t a_context;
7125 * } */*ap)
6d2010ae
A
7126{
7127 nfsnode_t np = NULL;
7128 struct nfsmount *nmp;
7129 int error;
7130
7131 nmp = VTONMP(ap->a_dvp);
0a7de745
A
7132 if (nfs_mount_gone(nmp)) {
7133 return ENXIO;
7134 }
6d2010ae 7135
0a7de745
A
7136 if (!VATTR_IS_ACTIVE(ap->a_vap, va_type)) {
7137 return EINVAL;
7138 }
6d2010ae
A
7139 switch (ap->a_vap->va_type) {
7140 case VBLK:
7141 case VCHR:
7142 case VFIFO:
7143 case VSOCK:
7144 break;
7145 default:
0a7de745 7146 return ENOTSUP;
6d2010ae
A
7147 }
7148
7149 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
0a7de745
A
7150 vtonfs_type(ap->a_vap->va_type, nmp->nm_vers), NULL, &np);
7151 if (!error) {
6d2010ae 7152 *ap->a_vpp = NFSTOV(np);
0a7de745
A
7153 }
7154 return error;
6d2010ae
A
7155}
7156
7157int
7158nfs4_vnop_mkdir(
7159 struct vnop_mkdir_args /* {
0a7de745
A
7160 * struct vnodeop_desc *a_desc;
7161 * vnode_t a_dvp;
7162 * vnode_t *a_vpp;
7163 * struct componentname *a_cnp;
7164 * struct vnode_attr *a_vap;
7165 * vfs_context_t a_context;
7166 * } */*ap)
6d2010ae
A
7167{
7168 nfsnode_t np = NULL;
7169 int error;
7170
7171 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
0a7de745
A
7172 NFDIR, NULL, &np);
7173 if (!error) {
6d2010ae 7174 *ap->a_vpp = NFSTOV(np);
0a7de745
A
7175 }
7176 return error;
6d2010ae
A
7177}
7178
7179int
7180nfs4_vnop_symlink(
7181 struct vnop_symlink_args /* {
0a7de745
A
7182 * struct vnodeop_desc *a_desc;
7183 * vnode_t a_dvp;
7184 * vnode_t *a_vpp;
7185 * struct componentname *a_cnp;
7186 * struct vnode_attr *a_vap;
7187 * char *a_target;
7188 * vfs_context_t a_context;
7189 * } */*ap)
6d2010ae
A
7190{
7191 nfsnode_t np = NULL;
7192 int error;
7193
7194 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
0a7de745
A
7195 NFLNK, ap->a_target, &np);
7196 if (!error) {
6d2010ae 7197 *ap->a_vpp = NFSTOV(np);
0a7de745
A
7198 }
7199 return error;
6d2010ae
A
7200}
7201
7202int
7203nfs4_vnop_link(
7204 struct vnop_link_args /* {
0a7de745
A
7205 * struct vnodeop_desc *a_desc;
7206 * vnode_t a_vp;
7207 * vnode_t a_tdvp;
7208 * struct componentname *a_cnp;
7209 * vfs_context_t a_context;
7210 * } */*ap)
6d2010ae
A
7211{
7212 vfs_context_t ctx = ap->a_context;
7213 vnode_t vp = ap->a_vp;
7214 vnode_t tdvp = ap->a_tdvp;
7215 struct componentname *cnp = ap->a_cnp;
7216 int error = 0, lockerror = ENOENT, status;
7217 struct nfsmount *nmp;
7218 nfsnode_t np = VTONFS(vp);
7219 nfsnode_t tdnp = VTONFS(tdvp);
7220 int nfsvers, numops;
7221 u_int64_t xid, savedxid;
7222 struct nfsm_chain nmreq, nmrep;
7223 struct nfsreq_secinfo_args si;
7224
0a7de745
A
7225 if (vnode_mount(vp) != vnode_mount(tdvp)) {
7226 return EXDEV;
7227 }
6d2010ae
A
7228
7229 nmp = VTONMP(vp);
0a7de745
A
7230 if (nfs_mount_gone(nmp)) {
7231 return ENXIO;
7232 }
6d2010ae 7233 nfsvers = nmp->nm_vers;
0a7de745
A
7234 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7235 return EINVAL;
7236 }
7237 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7238 return EINVAL;
7239 }
6d2010ae
A
7240
7241 /*
7242 * Push all writes to the server, so that the attribute cache
7243 * doesn't get "out of sync" with the server.
7244 * XXX There should be a better way!
7245 */
7246 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
7247
0a7de745
A
7248 if ((error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx)))) {
7249 return error;
7250 }
6d2010ae
A
7251
7252 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7253 nfsm_chain_null(&nmreq);
7254 nfsm_chain_null(&nmrep);
7255
7256 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
7257 numops = 7;
7258 nfsm_chain_build_alloc_init(error, &nmreq, 29 * NFSX_UNSIGNED + cnp->cn_namelen);
3e170ce0 7259 nfsm_chain_add_compound_header(error, &nmreq, "link", nmp->nm_minor_vers, numops);
6d2010ae
A
7260 numops--;
7261 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7262 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
7263 numops--;
7264 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7265 numops--;
7266 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7267 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
7268 numops--;
7269 nfsm_chain_add_32(error, &nmreq, NFS_OP_LINK);
7270 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7271 numops--;
7272 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7273 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
7274 numops--;
7275 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7276 numops--;
7277 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7278 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
7279 nfsm_chain_build_done(error, &nmreq);
7280 nfsm_assert(error, (numops == 0), EPROTO);
7281 nfsmout_if(error);
7282 error = nfs_request(tdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
7283
7284 if ((lockerror = nfs_node_lock2(tdnp, np))) {
7285 error = lockerror;
7286 goto nfsmout;
7287 }
7288 nfsm_chain_skip_tag(error, &nmrep);
7289 nfsm_chain_get_32(error, &nmrep, numops);
7290 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7291 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7292 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7293 nfsm_chain_op_check(error, &nmrep, NFS_OP_LINK);
7294 nfsm_chain_check_change_info(error, &nmrep, tdnp);
7295 /* directory attributes: if we don't get them, make sure to invalidate */
7296 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7297 savedxid = xid;
7298 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
0a7de745 7299 if (error) {
6d2010ae 7300 NATTRINVALIDATE(tdnp);
0a7de745 7301 }
6d2010ae
A
7302 /* link attributes: if we don't get them, make sure to invalidate */
7303 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7304 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7305 xid = savedxid;
7306 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
0a7de745 7307 if (error) {
6d2010ae 7308 NATTRINVALIDATE(np);
0a7de745 7309 }
6d2010ae
A
7310nfsmout:
7311 nfsm_chain_cleanup(&nmreq);
7312 nfsm_chain_cleanup(&nmrep);
0a7de745 7313 if (!lockerror) {
6d2010ae 7314 tdnp->n_flag |= NMODIFIED;
0a7de745 7315 }
6d2010ae 7316 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
0a7de745 7317 if (error == EEXIST) {
6d2010ae 7318 error = 0;
0a7de745 7319 }
6d2010ae
A
7320 if (!error && (tdnp->n_flag & NNEGNCENTRIES)) {
7321 tdnp->n_flag &= ~NNEGNCENTRIES;
7322 cache_purge_negatives(tdvp);
7323 }
0a7de745 7324 if (!lockerror) {
6d2010ae 7325 nfs_node_unlock2(tdnp, np);
0a7de745 7326 }
6d2010ae 7327 nfs_node_clear_busy2(tdnp, np);
0a7de745 7328 return error;
6d2010ae
A
7329}
7330
7331int
7332nfs4_vnop_rmdir(
7333 struct vnop_rmdir_args /* {
0a7de745
A
7334 * struct vnodeop_desc *a_desc;
7335 * vnode_t a_dvp;
7336 * vnode_t a_vp;
7337 * struct componentname *a_cnp;
7338 * vfs_context_t a_context;
7339 * } */*ap)
6d2010ae
A
7340{
7341 vfs_context_t ctx = ap->a_context;
7342 vnode_t vp = ap->a_vp;
7343 vnode_t dvp = ap->a_dvp;
7344 struct componentname *cnp = ap->a_cnp;
7345 struct nfsmount *nmp;
7346 int error = 0, namedattrs;
7347 nfsnode_t np = VTONFS(vp);
7348 nfsnode_t dnp = VTONFS(dvp);
7349 struct nfs_dulookup dul;
7350
0a7de745
A
7351 if (vnode_vtype(vp) != VDIR) {
7352 return EINVAL;
7353 }
6d2010ae
A
7354
7355 nmp = NFSTONMP(dnp);
0a7de745
A
7356 if (nfs_mount_gone(nmp)) {
7357 return ENXIO;
7358 }
6d2010ae
A
7359 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
7360
0a7de745
A
7361 if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx)))) {
7362 return error;
7363 }
6d2010ae
A
7364
7365 if (!namedattrs) {
7366 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
7367 nfs_dulookup_start(&dul, dnp, ctx);
7368 }
7369
7370 error = nfs4_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
0a7de745 7371 vfs_context_thread(ctx), vfs_context_ucred(ctx));
6d2010ae
A
7372
7373 nfs_name_cache_purge(dnp, np, cnp, ctx);
7374 /* nfs_getattr() will check changed and purge caches */
7375 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
0a7de745 7376 if (!namedattrs) {
6d2010ae 7377 nfs_dulookup_finish(&dul, dnp, ctx);
0a7de745 7378 }
6d2010ae
A
7379 nfs_node_clear_busy2(dnp, np);
7380
7381 /*
7382 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
7383 */
0a7de745 7384 if (error == ENOENT) {
6d2010ae 7385 error = 0;
0a7de745 7386 }
6d2010ae
A
7387 if (!error) {
7388 /*
7389 * remove nfsnode from hash now so we can't accidentally find it
7390 * again if another object gets created with the same filehandle
7391 * before this vnode gets reclaimed
7392 */
7393 lck_mtx_lock(nfs_node_hash_mutex);
7394 if (np->n_hflag & NHHASHED) {
7395 LIST_REMOVE(np, n_hash);
7396 np->n_hflag &= ~NHHASHED;
7397 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
7398 }
7399 lck_mtx_unlock(nfs_node_hash_mutex);
7400 }
0a7de745 7401 return error;
6d2010ae
A
7402}
7403
7404/*
7405 * NFSv4 Named Attributes
7406 *
7407 * Both the extended attributes interface and the named streams interface
7408 * are backed by NFSv4 named attributes. The implementations for both use
7409 * a common set of routines in an attempt to reduce code duplication, to
7410 * increase efficiency, to increase caching of both names and data, and to
7411 * confine the complexity.
7412 *
7413 * Each NFS node caches its named attribute directory's file handle.
7414 * The directory nodes for the named attribute directories are handled
7415 * exactly like regular directories (with a couple minor exceptions).
7416 * Named attribute nodes are also treated as much like regular files as
7417 * possible.
7418 *
7419 * Most of the heavy lifting is done by nfs4_named_attr_get().
7420 */
7421
7422/*
7423 * Get the given node's attribute directory node.
7424 * If !fetch, then only return a cached node.
7425 * Otherwise, we will attempt to fetch the node from the server.
7426 * (Note: the node should be marked busy.)
b0d623f7 7427 */
6d2010ae
A
7428nfsnode_t
7429nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx)
b0d623f7 7430{
6d2010ae 7431 nfsnode_t adnp = NULL;
b0d623f7 7432 struct nfsmount *nmp;
6d2010ae
A
7433 int error = 0, status, numops;
7434 struct nfsm_chain nmreq, nmrep;
7435 u_int64_t xid;
7436 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
7437 fhandle_t fh;
7438 struct nfs_vattr nvattr;
7439 struct componentname cn;
7440 struct nfsreq rq, *req = &rq;
7441 struct nfsreq_secinfo_args si;
b0d623f7 7442
6d2010ae 7443 nmp = NFSTONMP(np);
0a7de745
A
7444 if (nfs_mount_gone(nmp)) {
7445 return NULL;
7446 }
7447 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7448 return NULL;
7449 }
b0d623f7 7450
6d2010ae
A
7451 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7452 NVATTR_INIT(&nvattr);
7453 nfsm_chain_null(&nmreq);
7454 nfsm_chain_null(&nmrep);
b0d623f7 7455
6d2010ae
A
7456 bzero(&cn, sizeof(cn));
7457 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
7458 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
7459 cn.cn_nameiop = LOOKUP;
7460
7461 if (np->n_attrdirfh) {
7462 // XXX can't set parent correctly (to np) yet
0a7de745
A
7463 error = nfs_nget(nmp->nm_mountp, NULL, &cn, np->n_attrdirfh + 1, *np->n_attrdirfh,
7464 NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &adnp);
7465 if (adnp) {
6d2010ae 7466 goto nfsmout;
0a7de745 7467 }
6d2010ae
A
7468 }
7469 if (!fetch) {
7470 error = ENOENT;
7471 goto nfsmout;
2d21ac55
A
7472 }
7473
6d2010ae
A
7474 // PUTFH, OPENATTR, GETATTR
7475 numops = 3;
7476 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
3e170ce0 7477 nfsm_chain_add_compound_header(error, &nmreq, "openattr", nmp->nm_minor_vers, numops);
6d2010ae
A
7478 numops--;
7479 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7480 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7481 numops--;
7482 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7483 nfsm_chain_add_32(error, &nmreq, 0);
7484 numops--;
7485 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7486 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7487 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7488 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
0a7de745 7489 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6d2010ae
A
7490 nfsm_chain_build_done(error, &nmreq);
7491 nfsm_assert(error, (numops == 0), EPROTO);
7492 nfsmout_if(error);
7493 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745
A
7494 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
7495 if (!error) {
6d2010ae 7496 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
0a7de745 7497 }
b0d623f7 7498
6d2010ae
A
7499 nfsm_chain_skip_tag(error, &nmrep);
7500 nfsm_chain_get_32(error, &nmrep, numops);
7501 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7502 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7503 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7504 nfsmout_if(error);
7505 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7506 nfsmout_if(error);
7507 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
7508 error = ENOENT;
7509 goto nfsmout;
2d21ac55 7510 }
6d2010ae
A
7511 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
7512 /* (re)allocate attrdir fh buffer */
0a7de745 7513 if (np->n_attrdirfh) {
6d2010ae 7514 FREE(np->n_attrdirfh, M_TEMP);
0a7de745
A
7515 }
7516 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len + 1, M_TEMP, M_WAITOK);
2d21ac55 7517 }
6d2010ae
A
7518 if (!np->n_attrdirfh) {
7519 error = ENOMEM;
7520 goto nfsmout;
b0d623f7 7521 }
6d2010ae
A
7522 /* cache the attrdir fh in the node */
7523 *np->n_attrdirfh = fh.fh_len;
0a7de745 7524 bcopy(fh.fh_data, np->n_attrdirfh + 1, fh.fh_len);
6d2010ae
A
7525 /* create node for attrdir */
7526 // XXX can't set parent correctly (to np) yet
7527 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
7528nfsmout:
7529 NVATTR_CLEANUP(&nvattr);
7530 nfsm_chain_cleanup(&nmreq);
7531 nfsm_chain_cleanup(&nmrep);
2d21ac55 7532
6d2010ae
A
7533 if (adnp) {
7534 /* sanity check that this node is an attribute directory */
0a7de745 7535 if (adnp->n_vattr.nva_type != VDIR) {
6d2010ae 7536 error = EINVAL;
0a7de745
A
7537 }
7538 if (!(adnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 7539 error = EINVAL;
0a7de745 7540 }
6d2010ae 7541 nfs_node_unlock(adnp);
0a7de745 7542 if (error) {
6d2010ae 7543 vnode_put(NFSTOV(adnp));
0a7de745 7544 }
b0d623f7 7545 }
0a7de745 7546 return error ? NULL : adnp;
b0d623f7
A
7547}
7548
2d21ac55 7549/*
6d2010ae
A
7550 * Get the given node's named attribute node for the name given.
7551 *
7552 * In an effort to increase the performance of named attribute access, we try
7553 * to reduce server requests by doing the following:
7554 *
7555 * - cache the node's named attribute directory file handle in the node
7556 * - maintain a directory vnode for the attribute directory
7557 * - use name cache entries (positive and negative) to speed up lookups
7558 * - optionally open the named attribute (with the given accessMode) in the same RPC
7559 * - combine attribute directory retrieval with the lookup/open RPC
7560 * - optionally prefetch the named attribute's first block of data in the same RPC
7561 *
7562 * Also, in an attempt to reduce the number of copies/variations of this code,
7563 * parts of the RPC building/processing code are conditionalized on what is
7564 * needed for any particular request (openattr, lookup vs. open, read).
7565 *
7566 * Note that because we may not have the attribute directory node when we start
7567 * the lookup/open, we lock both the node and the attribute directory node.
2d21ac55 7568 */
6d2010ae 7569
0a7de745
A
7570#define NFS_GET_NAMED_ATTR_CREATE 0x1
7571#define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
7572#define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
7573#define NFS_GET_NAMED_ATTR_PREFETCH 0x8
6d2010ae 7574
b0d623f7 7575int
6d2010ae
A
7576nfs4_named_attr_get(
7577 nfsnode_t np,
2d21ac55 7578 struct componentname *cnp,
6d2010ae
A
7579 uint32_t accessMode,
7580 int flags,
7581 vfs_context_t ctx,
7582 nfsnode_t *anpp,
7583 struct nfs_open_file **nofpp)
2d21ac55
A
7584{
7585 struct nfsmount *nmp;
6d2010ae
A
7586 int error = 0, open_error = EIO;
7587 int inuse = 0, adlockerror = ENOENT, busyerror = ENOENT, adbusyerror = ENOENT, nofpbusyerror = ENOENT;
7588 int create, guarded, prefetch, truncate, noopbusy = 0;
7589 int open, status, numops, hadattrdir, negnamecache;
7590 struct nfs_vattr nvattr;
7591 struct vnode_attr vattr;
7592 nfsnode_t adnp = NULL, anp = NULL;
7593 vnode_t avp = NULL;
2d21ac55 7594 u_int64_t xid, savedxid = 0;
2d21ac55
A
7595 struct nfsm_chain nmreq, nmrep;
7596 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6d2010ae
A
7597 uint32_t denyMode, rflags, delegation, recall, eof, rlen, retlen;
7598 nfs_stateid stateid, dstateid;
2d21ac55 7599 fhandle_t fh;
6d2010ae
A
7600 struct nfs_open_owner *noop = NULL;
7601 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
7602 struct vnop_access_args naa;
7603 thread_t thd;
7604 kauth_cred_t cred;
7605 struct timeval now;
7606 char sbuf[64], *s;
7607 uint32_t ace_type, ace_flags, ace_mask, len, slen;
7608 struct kauth_ace ace;
7609 struct nfsreq rq, *req = &rq;
7610 struct nfsreq_secinfo_args si;
7611
7612 *anpp = NULL;
7613 fh.fh_len = 0;
7614 rflags = delegation = recall = eof = rlen = retlen = 0;
7615 ace.ace_flags = 0;
7616 s = sbuf;
7617 slen = sizeof(sbuf);
2d21ac55 7618
6d2010ae 7619 nmp = NFSTONMP(np);
0a7de745
A
7620 if (nfs_mount_gone(nmp)) {
7621 return ENXIO;
7622 }
6d2010ae
A
7623 NVATTR_INIT(&nvattr);
7624 negnamecache = !NMFLAG(nmp, NONEGNAMECACHE);
7625 thd = vfs_context_thread(ctx);
7626 cred = vfs_context_ucred(ctx);
7627 create = (flags & NFS_GET_NAMED_ATTR_CREATE) ? NFS_OPEN_CREATE : NFS_OPEN_NOCREATE;
7628 guarded = (flags & NFS_GET_NAMED_ATTR_CREATE_GUARDED) ? NFS_CREATE_GUARDED : NFS_CREATE_UNCHECKED;
7629 truncate = (flags & NFS_GET_NAMED_ATTR_TRUNCATE);
7630 prefetch = (flags & NFS_GET_NAMED_ATTR_PREFETCH);
7631
7632 if (!create) {
7633 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
0a7de745
A
7634 if (error) {
7635 return error;
7636 }
6d2010ae 7637 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
0a7de745
A
7638 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
7639 return ENOATTR;
7640 }
6d2010ae
A
7641 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_NONE) {
7642 /* shouldn't happen... but just be safe */
7643 printf("nfs4_named_attr_get: create with no access %s\n", cnp->cn_nameptr);
7644 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
7645 }
7646 open = (accessMode != NFS_OPEN_SHARE_ACCESS_NONE);
7647 if (open) {
7648 /*
7649 * We're trying to open the file.
7650 * We'll create/open it with the given access mode,
7651 * and set NFS_OPEN_FILE_CREATE.
7652 */
7653 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 7654 if (prefetch && guarded) {
6d2010ae 7655 prefetch = 0; /* no sense prefetching data that can't be there */
0a7de745 7656 }
6d2010ae 7657 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
0a7de745
A
7658 if (!noop) {
7659 return ENOMEM;
7660 }
2d21ac55
A
7661 }
7662
0a7de745
A
7663 if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
7664 return error;
7665 }
2d21ac55 7666
6d2010ae
A
7667 adnp = nfs4_named_attr_dir_get(np, 0, ctx);
7668 hadattrdir = (adnp != NULL);
7669 if (prefetch) {
7670 microuptime(&now);
7671 /* use the special state ID because we don't have a real one to send */
7672 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
7673 rlen = MIN(nmp->nm_rsize, nmp->nm_biosize);
7674 }
7675 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
7676 nfsm_chain_null(&nmreq);
7677 nfsm_chain_null(&nmrep);
7678
6d2010ae 7679 if (hadattrdir) {
0a7de745 7680 if ((error = adbusyerror = nfs_node_set_busy(adnp, vfs_context_thread(ctx)))) {
6d2010ae 7681 goto nfsmout;
0a7de745 7682 }
6d2010ae
A
7683 /* nfs_getattr() will check changed and purge caches */
7684 error = nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7685 nfsmout_if(error);
7686 error = cache_lookup(NFSTOV(adnp), &avp, cnp);
7687 switch (error) {
7688 case ENOENT:
7689 /* negative cache entry */
7690 goto nfsmout;
7691 case 0:
7692 /* cache miss */
7693 /* try dir buf cache lookup */
7694 error = nfs_dir_buf_cache_lookup(adnp, &anp, cnp, ctx, 0);
7695 if (!error && anp) {
7696 /* dir buf cache hit */
7697 *anpp = anp;
7698 error = -1;
7699 }
0a7de745 7700 if (error != -1) { /* cache miss */
6d2010ae 7701 break;
0a7de745
A
7702 }
7703 /* FALLTHROUGH */
6d2010ae
A
7704 case -1:
7705 /* cache hit, not really an error */
316670eb 7706 OSAddAtomic64(1, &nfsstats.lookupcache_hits);
0a7de745 7707 if (!anp && avp) {
6d2010ae 7708 *anpp = anp = VTONFS(avp);
0a7de745 7709 }
6d2010ae
A
7710
7711 nfs_node_clear_busy(adnp);
7712 adbusyerror = ENOENT;
7713
7714 /* check for directory access */
7715 naa.a_desc = &vnop_access_desc;
7716 naa.a_vp = NFSTOV(adnp);
7717 naa.a_action = KAUTH_VNODE_SEARCH;
7718 naa.a_context = ctx;
7719
7720 /* compute actual success/failure based on accessibility */
7721 error = nfs_vnop_access(&naa);
0a7de745 7722 /* FALLTHROUGH */
6d2010ae
A
7723 default:
7724 /* we either found it, or hit an error */
7725 if (!error && guarded) {
7726 /* found cached entry but told not to use it */
7727 error = EEXIST;
7728 vnode_put(NFSTOV(anp));
7729 *anpp = anp = NULL;
7730 }
7731 /* we're done if error or we don't need to open */
0a7de745 7732 if (error || !open) {
6d2010ae 7733 goto nfsmout;
0a7de745 7734 }
6d2010ae
A
7735 /* no error and we need to open... */
7736 }
7737 }
7738
7739 if (open) {
7740restart:
7741 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
7742 if (error) {
7743 nfs_open_owner_rele(noop);
7744 noop = NULL;
7745 goto nfsmout;
7746 }
7747 inuse = 1;
7748
7749 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7750 error = nfs_open_file_find(anp, noop, &newnofp, 0, 0, 1);
7751 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
7752 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop->noo_cred), cnp->cn_nameptr);
7753 error = EIO;
7754 }
7755 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
7756 nfs_mount_state_in_use_end(nmp, 0);
7757 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
7758 nfs_open_file_destroy(newnofp);
7759 newnofp = NULL;
0a7de745 7760 if (!error) {
6d2010ae 7761 goto restart;
0a7de745 7762 }
6d2010ae 7763 }
0a7de745 7764 if (!error) {
6d2010ae 7765 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
0a7de745 7766 }
6d2010ae 7767 if (error) {
0a7de745 7768 if (newnofp) {
6d2010ae 7769 nfs_open_file_destroy(newnofp);
0a7de745 7770 }
6d2010ae
A
7771 newnofp = NULL;
7772 goto nfsmout;
7773 }
7774 if (anp) {
7775 /*
7776 * We already have the node. So we just need to open
7777 * it - which we may be able to do with a delegation.
7778 */
7779 open_error = error = nfs4_open(anp, newnofp, accessMode, denyMode, ctx);
7780 if (!error) {
7781 /* open succeeded, so our open file is no longer temporary */
7782 nofp = newnofp;
7783 nofpbusyerror = 0;
7784 newnofp = NULL;
0a7de745 7785 if (nofpp) {
6d2010ae 7786 *nofpp = nofp;
0a7de745 7787 }
6d2010ae
A
7788 }
7789 goto nfsmout;
7790 }
7791 }
7792
7793 /*
7794 * We either don't have the attrdir or we didn't find the attribute
7795 * in the name cache, so we need to talk to the server.
7796 *
7797 * If we don't have the attrdir, we'll need to ask the server for that too.
7798 * If the caller is requesting that the attribute be created, we need to
7799 * make sure the attrdir is created.
7800 * The caller may also request that the first block of an existing attribute
7801 * be retrieved at the same time.
7802 */
7803
7804 if (open) {
7805 /* need to mark the open owner busy during the RPC */
0a7de745 7806 if ((error = nfs_open_owner_set_busy(noop, thd))) {
6d2010ae 7807 goto nfsmout;
0a7de745 7808 }
6d2010ae
A
7809 noopbusy = 1;
7810 }
7811
7812 /*
7813 * We'd like to get updated post-open/lookup attributes for the
7814 * directory and we may also want to prefetch some data via READ.
7815 * We'd like the READ results to be last so that we can leave the
7816 * data in the mbufs until the end.
7817 *
7818 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7819 */
7820 numops = 5;
0a7de745
A
7821 if (!hadattrdir) {
7822 numops += 3; // also sending: OPENATTR, GETATTR, OPENATTR
7823 }
7824 if (prefetch) {
7825 numops += 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7826 }
6d2010ae 7827 nfsm_chain_build_alloc_init(error, &nmreq, 64 * NFSX_UNSIGNED + cnp->cn_namelen);
3e170ce0 7828 nfsm_chain_add_compound_header(error, &nmreq, "getnamedattr", nmp->nm_minor_vers, numops);
6d2010ae
A
7829 if (hadattrdir) {
7830 numops--;
7831 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7832 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7833 } else {
7834 numops--;
7835 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7836 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7837 numops--;
7838 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7839 nfsm_chain_add_32(error, &nmreq, create ? 1 : 0);
7840 numops--;
7841 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7842 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7843 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7844 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
0a7de745 7845 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6d2010ae
A
7846 }
7847 if (open) {
7848 numops--;
7849 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
7850 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
7851 nfsm_chain_add_32(error, &nmreq, accessMode);
7852 nfsm_chain_add_32(error, &nmreq, denyMode);
7853 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
7854 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
7855 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
7856 nfsm_chain_add_32(error, &nmreq, create);
7857 if (create) {
7858 nfsm_chain_add_32(error, &nmreq, guarded);
7859 VATTR_INIT(&vattr);
0a7de745 7860 if (truncate) {
6d2010ae 7861 VATTR_SET(&vattr, va_data_size, 0);
0a7de745 7862 }
6d2010ae
A
7863 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7864 }
7865 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
7866 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7867 } else {
7868 numops--;
7869 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
7870 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
2d21ac55 7871 }
2d21ac55
A
7872 numops--;
7873 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7874 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7875 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7876 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
0a7de745 7877 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6d2010ae
A
7878 if (prefetch) {
7879 numops--;
7880 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7881 }
7882 if (hadattrdir) {
7883 numops--;
7884 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7885 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7886 } else {
7887 numops--;
7888 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7889 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7890 numops--;
7891 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7892 nfsm_chain_add_32(error, &nmreq, 0);
7893 }
2d21ac55
A
7894 numops--;
7895 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7896 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
0a7de745 7897 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6d2010ae
A
7898 if (prefetch) {
7899 numops--;
7900 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7901 numops--;
7902 nfsm_chain_add_32(error, &nmreq, NFS_OP_NVERIFY);
7903 VATTR_INIT(&vattr);
7904 VATTR_SET(&vattr, va_data_size, 0);
7905 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7906 numops--;
7907 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
7908 nfsm_chain_add_stateid(error, &nmreq, &stateid);
7909 nfsm_chain_add_64(error, &nmreq, 0);
7910 nfsm_chain_add_32(error, &nmreq, rlen);
7911 }
2d21ac55
A
7912 nfsm_chain_build_done(error, &nmreq);
7913 nfsm_assert(error, (numops == 0), EPROTO);
7914 nfsmout_if(error);
6d2010ae 7915 error = nfs_request_async(hadattrdir ? adnp : np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745
A
7916 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, open ? R_NOINTR: 0, NULL, &req);
7917 if (!error) {
2d21ac55 7918 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
0a7de745 7919 }
2d21ac55 7920
0a7de745 7921 if (hadattrdir && ((adlockerror = nfs_node_lock(adnp)))) {
6d2010ae 7922 error = adlockerror;
0a7de745 7923 }
6d2010ae 7924 savedxid = xid;
2d21ac55
A
7925 nfsm_chain_skip_tag(error, &nmrep);
7926 nfsm_chain_get_32(error, &nmrep, numops);
7927 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6d2010ae
A
7928 if (!hadattrdir) {
7929 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7930 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7931 nfsmout_if(error);
7932 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7933 nfsmout_if(error);
7934 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) && fh.fh_len) {
7935 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
7936 /* (re)allocate attrdir fh buffer */
0a7de745 7937 if (np->n_attrdirfh) {
6d2010ae 7938 FREE(np->n_attrdirfh, M_TEMP);
0a7de745
A
7939 }
7940 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len + 1, M_TEMP, M_WAITOK);
6d2010ae
A
7941 }
7942 if (np->n_attrdirfh) {
7943 /* remember the attrdir fh in the node */
7944 *np->n_attrdirfh = fh.fh_len;
0a7de745 7945 bcopy(fh.fh_data, np->n_attrdirfh + 1, fh.fh_len);
6d2010ae
A
7946 /* create busied node for attrdir */
7947 struct componentname cn;
7948 bzero(&cn, sizeof(cn));
7949 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
7950 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
7951 cn.cn_nameiop = LOOKUP;
7952 // XXX can't set parent correctly (to np) yet
7953 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
7954 if (!error) {
7955 adlockerror = 0;
7956 /* set the node busy */
7957 SET(adnp->n_flag, NBUSY);
7958 adbusyerror = 0;
7959 }
7960 /* if no adnp, oh well... */
7961 error = 0;
7962 }
7963 }
7964 NVATTR_CLEANUP(&nvattr);
7965 fh.fh_len = 0;
7966 }
7967 if (open) {
7968 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
7969 nfs_owner_seqid_increment(noop, NULL, error);
7970 nfsm_chain_get_stateid(error, &nmrep, &newnofp->nof_stateid);
7971 nfsm_chain_check_change_info(error, &nmrep, adnp);
7972 nfsm_chain_get_32(error, &nmrep, rflags);
7973 bmlen = NFS_ATTR_BITMAP_LEN;
7974 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7975 nfsm_chain_get_32(error, &nmrep, delegation);
0a7de745 7976 if (!error) {
6d2010ae
A
7977 switch (delegation) {
7978 case NFS_OPEN_DELEGATE_NONE:
7979 break;
7980 case NFS_OPEN_DELEGATE_READ:
7981 case NFS_OPEN_DELEGATE_WRITE:
7982 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
7983 nfsm_chain_get_32(error, &nmrep, recall);
0a7de745 7984 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
6d2010ae 7985 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
0a7de745 7986 }
6d2010ae
A
7987 /* if we have any trouble accepting the ACE, just invalidate it */
7988 ace_type = ace_flags = ace_mask = len = 0;
7989 nfsm_chain_get_32(error, &nmrep, ace_type);
7990 nfsm_chain_get_32(error, &nmrep, ace_flags);
7991 nfsm_chain_get_32(error, &nmrep, ace_mask);
7992 nfsm_chain_get_32(error, &nmrep, len);
7993 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
7994 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
7995 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
7996 if (!error && (len >= slen)) {
0a7de745
A
7997 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
7998 if (s) {
7999 slen = len + 1;
8000 } else {
6d2010ae 8001 ace.ace_flags = 0;
0a7de745 8002 }
6d2010ae 8003 }
0a7de745 8004 if (s) {
6d2010ae 8005 nfsm_chain_get_opaque(error, &nmrep, len, s);
0a7de745 8006 } else {
6d2010ae 8007 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
0a7de745 8008 }
6d2010ae
A
8009 if (!error && s) {
8010 s[len] = '\0';
0a7de745 8011 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
6d2010ae 8012 ace.ace_flags = 0;
0a7de745 8013 }
6d2010ae 8014 }
0a7de745 8015 if (error || !s) {
6d2010ae 8016 ace.ace_flags = 0;
0a7de745
A
8017 }
8018 if (s && (s != sbuf)) {
6d2010ae 8019 FREE(s, M_TEMP);
0a7de745 8020 }
6d2010ae
A
8021 break;
8022 default:
8023 error = EBADRPC;
8024 break;
8025 }
0a7de745 8026 }
6d2010ae
A
8027 /* At this point if we have no error, the object was created/opened. */
8028 open_error = error;
8029 } else {
8030 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOOKUP);
8031 }
2d21ac55
A
8032 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8033 nfsmout_if(error);
6d2010ae 8034 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
2d21ac55 8035 nfsmout_if(error);
6d2010ae
A
8036 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
8037 error = EIO;
2d21ac55
A
8038 goto nfsmout;
8039 }
0a7de745 8040 if (prefetch) {
6d2010ae 8041 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
0a7de745 8042 }
6d2010ae 8043 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
0a7de745 8044 if (!hadattrdir) {
6d2010ae 8045 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
0a7de745 8046 }
2d21ac55 8047 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae
A
8048 nfsmout_if(error);
8049 xid = savedxid;
8050 nfsm_chain_loadattr(error, &nmrep, adnp, nmp->nm_vers, &xid);
8051 nfsmout_if(error);
2d21ac55 8052
6d2010ae 8053 if (open) {
0a7de745 8054 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
6d2010ae 8055 newnofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 8056 }
6d2010ae
A
8057 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
8058 if (adnp) {
8059 nfs_node_unlock(adnp);
8060 adlockerror = ENOENT;
8061 }
8062 NVATTR_CLEANUP(&nvattr);
8063 error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh.fh_data, fh.fh_len, noop, &newnofp->nof_stateid, thd, cred, &nvattr, &xid);
8064 nfsmout_if(error);
8065 savedxid = xid;
0a7de745 8066 if ((adlockerror = nfs_node_lock(adnp))) {
6d2010ae 8067 error = adlockerror;
0a7de745 8068 }
2d21ac55 8069 }
2d21ac55
A
8070 }
8071
6d2010ae
A
8072nfsmout:
8073 if (open && adnp && !adlockerror) {
8074 if (!open_error && (adnp->n_flag & NNEGNCENTRIES)) {
8075 adnp->n_flag &= ~NNEGNCENTRIES;
8076 cache_purge_negatives(NFSTOV(adnp));
8077 }
8078 adnp->n_flag |= NMODIFIED;
8079 nfs_node_unlock(adnp);
8080 adlockerror = ENOENT;
8081 nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
8082 }
8083 if (adnp && !adlockerror && (error == ENOENT) &&
8084 (cnp->cn_flags & MAKEENTRY) && (cnp->cn_nameiop != CREATE) && negnamecache) {
8085 /* add a negative entry in the name cache */
8086 cache_enter(NFSTOV(adnp), NULL, cnp);
8087 adnp->n_flag |= NNEGNCENTRIES;
8088 }
8089 if (adnp && !adlockerror) {
8090 nfs_node_unlock(adnp);
8091 adlockerror = ENOENT;
8092 }
8093 if (!error && !anp && fh.fh_len) {
2d21ac55
A
8094 /* create the vnode with the filehandle and attributes */
8095 xid = savedxid;
6d2010ae
A
8096 error = nfs_nget(NFSTOMP(np), adnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &anp);
8097 if (!error) {
8098 *anpp = anp;
8099 nfs_node_unlock(anp);
8100 }
8101 if (!error && open) {
8102 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
8103 /* After we have a node, add our open file struct to the node */
8104 nofp = newnofp;
8105 error = nfs_open_file_find_internal(anp, noop, &nofp, 0, 0, 0);
8106 if (error) {
8107 /* This shouldn't happen, because we passed in a new nofp to use. */
8108 printf("nfs_open_file_find_internal failed! %d\n", error);
8109 nofp = NULL;
8110 } else if (nofp != newnofp) {
8111 /*
8112 * Hmm... an open file struct already exists.
8113 * Mark the existing one busy and merge our open into it.
8114 * Then destroy the one we created.
8115 * Note: there's no chance of an open confict because the
8116 * open has already been granted.
8117 */
8118 nofpbusyerror = nfs_open_file_set_busy(nofp, NULL);
8119 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
8120 nofp->nof_stateid = newnofp->nof_stateid;
0a7de745 8121 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
6d2010ae 8122 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 8123 }
6d2010ae
A
8124 nfs_open_file_clear_busy(newnofp);
8125 nfs_open_file_destroy(newnofp);
8126 newnofp = NULL;
8127 }
8128 if (!error) {
8129 newnofp = NULL;
8130 nofpbusyerror = 0;
8131 /* mark the node as holding a create-initiated open */
8132 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
8133 nofp->nof_creator = current_thread();
0a7de745 8134 if (nofpp) {
6d2010ae 8135 *nofpp = nofp;
0a7de745 8136 }
6d2010ae
A
8137 }
8138 }
2d21ac55 8139 }
6d2010ae
A
8140 NVATTR_CLEANUP(&nvattr);
8141 if (open && ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE))) {
8142 if (!error && anp && !recall) {
8143 /* stuff the delegation state in the node */
8144 lck_mtx_lock(&anp->n_openlock);
8145 anp->n_openflags &= ~N_DELEG_MASK;
8146 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8147 anp->n_dstateid = dstateid;
8148 anp->n_dace = ace;
8149 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8150 lck_mtx_lock(&nmp->nm_lock);
0a7de745 8151 if (anp->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 8152 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
0a7de745 8153 }
6d2010ae
A
8154 lck_mtx_unlock(&nmp->nm_lock);
8155 }
8156 lck_mtx_unlock(&anp->n_openlock);
8157 } else {
8158 /* give the delegation back */
8159 if (anp) {
8160 if (NFS_CMPFH(anp, fh.fh_data, fh.fh_len)) {
8161 /* update delegation state and return it */
8162 lck_mtx_lock(&anp->n_openlock);
8163 anp->n_openflags &= ~N_DELEG_MASK;
8164 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8165 anp->n_dstateid = dstateid;
8166 anp->n_dace = ace;
8167 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8168 lck_mtx_lock(&nmp->nm_lock);
0a7de745 8169 if (anp->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 8170 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
0a7de745 8171 }
6d2010ae
A
8172 lck_mtx_unlock(&nmp->nm_lock);
8173 }
8174 lck_mtx_unlock(&anp->n_openlock);
8175 /* don't need to send a separate delegreturn for fh */
8176 fh.fh_len = 0;
8177 }
8178 /* return anp's current delegation */
8179 nfs4_delegation_return(anp, 0, thd, cred);
8180 }
0a7de745 8181 if (fh.fh_len) { /* return fh's delegation if it wasn't for anp */
6d2010ae 8182 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
0a7de745 8183 }
6d2010ae
A
8184 }
8185 }
8186 if (open) {
8187 if (newnofp) {
8188 /* need to cleanup our temporary nofp */
8189 nfs_open_file_clear_busy(newnofp);
8190 nfs_open_file_destroy(newnofp);
8191 newnofp = NULL;
8192 } else if (nofp && !nofpbusyerror) {
8193 nfs_open_file_clear_busy(nofp);
8194 nofpbusyerror = ENOENT;
8195 }
8196 if (inuse && nfs_mount_state_in_use_end(nmp, error)) {
8197 inuse = 0;
8198 nofp = newnofp = NULL;
8199 rflags = delegation = recall = eof = rlen = retlen = 0;
8200 ace.ace_flags = 0;
8201 s = sbuf;
8202 slen = sizeof(sbuf);
8203 nfsm_chain_cleanup(&nmreq);
8204 nfsm_chain_cleanup(&nmrep);
8205 if (anp) {
8206 vnode_put(NFSTOV(anp));
8207 *anpp = anp = NULL;
8208 }
8209 hadattrdir = (adnp != NULL);
8210 if (noopbusy) {
8211 nfs_open_owner_clear_busy(noop);
8212 noopbusy = 0;
8213 }
8214 goto restart;
8215 }
8216 if (noop) {
8217 if (noopbusy) {
8218 nfs_open_owner_clear_busy(noop);
8219 noopbusy = 0;
8220 }
8221 nfs_open_owner_rele(noop);
8222 }
8223 }
8224 if (!error && prefetch && nmrep.nmc_mhead) {
8225 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
8226 nfsm_chain_op_check(error, &nmrep, NFS_OP_NVERIFY);
8227 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
8228 nfsm_chain_get_32(error, &nmrep, eof);
8229 nfsm_chain_get_32(error, &nmrep, retlen);
8230 if (!error && anp) {
8231 /*
8232 * There can be one problem with doing the prefetch.
8233 * Because we don't have the node before we start the RPC, we
8234 * can't have the buffer busy while the READ is performed.
8235 * So there is a chance that other I/O occured on the same
8236 * range of data while we were performing this RPC. If that
8237 * happens, then it's possible the data we have in the READ
8238 * response is no longer up to date.
8239 * Once we have the node and the buffer, we need to make sure
8240 * that there's no chance we could be putting stale data in
8241 * the buffer.
8242 * So, we check if the range read is dirty or if any I/O may
8243 * have occured on it while we were performing our RPC.
8244 */
8245 struct nfsbuf *bp = NULL;
8246 int lastpg;
8247 uint32_t pagemask;
8248
8249 retlen = MIN(retlen, rlen);
8250
8251 /* check if node needs size update or invalidation */
0a7de745 8252 if (ISSET(anp->n_flag, NUPDATESIZE)) {
6d2010ae 8253 nfs_data_update_size(anp, 0);
0a7de745 8254 }
6d2010ae
A
8255 if (!(error = nfs_node_lock(anp))) {
8256 if (anp->n_flag & NNEEDINVALIDATE) {
8257 anp->n_flag &= ~NNEEDINVALIDATE;
8258 nfs_node_unlock(anp);
0a7de745
A
8259 error = nfs_vinvalbuf(NFSTOV(anp), V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
8260 if (!error) { /* lets play it safe and just drop the data */
6d2010ae 8261 error = EIO;
0a7de745 8262 }
6d2010ae
A
8263 } else {
8264 nfs_node_unlock(anp);
8265 }
8266 }
2d21ac55 8267
6d2010ae
A
8268 /* calculate page mask for the range of data read */
8269 lastpg = (trunc_page_32(retlen) - 1) / PAGE_SIZE;
8270 pagemask = ((1 << (lastpg + 1)) - 1);
8271
0a7de745
A
8272 if (!error) {
8273 error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ | NBLK_NOWAIT, &bp);
8274 }
6d2010ae
A
8275 /* don't save the data if dirty or potential I/O conflict */
8276 if (!error && bp && !bp->nb_dirtyoff && !(bp->nb_dirty & pagemask) &&
8277 timevalcmp(&anp->n_lastio, &now, <)) {
316670eb 8278 OSAddAtomic64(1, &nfsstats.read_bios);
0a7de745 8279 CLR(bp->nb_flags, (NB_DONE | NB_ASYNC));
6d2010ae
A
8280 SET(bp->nb_flags, NB_READ);
8281 NFS_BUF_MAP(bp);
8282 nfsm_chain_get_opaque(error, &nmrep, retlen, bp->nb_data);
8283 if (error) {
8284 bp->nb_error = error;
8285 SET(bp->nb_flags, NB_ERROR);
8286 } else {
8287 bp->nb_offio = 0;
8288 bp->nb_endio = rlen;
0a7de745 8289 if ((retlen > 0) && (bp->nb_endio < (int)retlen)) {
6d2010ae 8290 bp->nb_endio = retlen;
0a7de745 8291 }
6d2010ae
A
8292 if (eof || (retlen == 0)) {
8293 /* zero out the remaining data (up to EOF) */
8294 off_t rpcrem, eofrem, rem;
8295 rpcrem = (rlen - retlen);
8296 eofrem = anp->n_size - (NBOFF(bp) + retlen);
8297 rem = (rpcrem < eofrem) ? rpcrem : eofrem;
0a7de745 8298 if (rem > 0) {
6d2010ae 8299 bzero(bp->nb_data + retlen, rem);
0a7de745 8300 }
6d2010ae
A
8301 } else if ((retlen < rlen) && !ISSET(bp->nb_flags, NB_ERROR)) {
8302 /* ugh... short read ... just invalidate for now... */
8303 SET(bp->nb_flags, NB_INVAL);
8304 }
8305 }
8306 nfs_buf_read_finish(bp);
8307 microuptime(&anp->n_lastio);
8308 }
0a7de745 8309 if (bp) {
6d2010ae 8310 nfs_buf_release(bp, 1);
0a7de745 8311 }
2d21ac55 8312 }
6d2010ae 8313 error = 0; /* ignore any transient error in processing the prefetch */
2d21ac55 8314 }
6d2010ae
A
8315 if (adnp && !adbusyerror) {
8316 nfs_node_clear_busy(adnp);
8317 adbusyerror = ENOENT;
8318 }
8319 if (!busyerror) {
8320 nfs_node_clear_busy(np);
8321 busyerror = ENOENT;
8322 }
0a7de745 8323 if (adnp) {
6d2010ae 8324 vnode_put(NFSTOV(adnp));
0a7de745 8325 }
6d2010ae
A
8326 if (error && *anpp) {
8327 vnode_put(NFSTOV(*anpp));
8328 *anpp = NULL;
8329 }
8330 nfsm_chain_cleanup(&nmreq);
8331 nfsm_chain_cleanup(&nmrep);
0a7de745 8332 return error;
6d2010ae
A
8333}
8334
8335/*
8336 * Remove a named attribute.
8337 */
8338int
8339nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_context_t ctx)
8340{
8341 nfsnode_t adnp = NULL;
8342 struct nfsmount *nmp;
8343 struct componentname cn;
8344 struct vnop_remove_args vra;
8345 int error, putanp = 0;
8346
8347 nmp = NFSTONMP(np);
0a7de745
A
8348 if (nfs_mount_gone(nmp)) {
8349 return ENXIO;
8350 }
6d2010ae
A
8351
8352 bzero(&cn, sizeof(cn));
8353 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
8354 cn.cn_namelen = strlen(name);
8355 cn.cn_nameiop = DELETE;
8356 cn.cn_flags = 0;
8357
8358 if (!anp) {
8359 error = nfs4_named_attr_get(np, &cn, NFS_OPEN_SHARE_ACCESS_NONE,
0a7de745
A
8360 0, ctx, &anp, NULL);
8361 if ((!error && !anp) || (error == ENOATTR)) {
6d2010ae 8362 error = ENOENT;
0a7de745 8363 }
6d2010ae
A
8364 if (error) {
8365 if (anp) {
8366 vnode_put(NFSTOV(anp));
8367 anp = NULL;
8368 }
8369 goto out;
2d21ac55 8370 }
6d2010ae
A
8371 putanp = 1;
8372 }
8373
0a7de745 8374 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
6d2010ae 8375 goto out;
0a7de745 8376 }
6d2010ae
A
8377 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8378 nfs_node_clear_busy(np);
8379 if (!adnp) {
8380 error = ENOENT;
8381 goto out;
2d21ac55 8382 }
6d2010ae
A
8383
8384 vra.a_desc = &vnop_remove_desc;
8385 vra.a_dvp = NFSTOV(adnp);
8386 vra.a_vp = NFSTOV(anp);
8387 vra.a_cnp = &cn;
8388 vra.a_flags = 0;
8389 vra.a_context = ctx;
8390 error = nfs_vnop_remove(&vra);
8391out:
0a7de745 8392 if (adnp) {
6d2010ae 8393 vnode_put(NFSTOV(adnp));
0a7de745
A
8394 }
8395 if (putanp) {
6d2010ae 8396 vnode_put(NFSTOV(anp));
0a7de745
A
8397 }
8398 return error;
2d21ac55
A
8399}
8400
8401int
6d2010ae
A
8402nfs4_vnop_getxattr(
8403 struct vnop_getxattr_args /* {
0a7de745
A
8404 * struct vnodeop_desc *a_desc;
8405 * vnode_t a_vp;
8406 * const char * a_name;
8407 * uio_t a_uio;
8408 * size_t *a_size;
8409 * int a_options;
8410 * vfs_context_t a_context;
8411 * } */*ap)
2d21ac55 8412{
6d2010ae 8413 vfs_context_t ctx = ap->a_context;
2d21ac55 8414 struct nfsmount *nmp;
6d2010ae
A
8415 struct nfs_vattr nvattr;
8416 struct componentname cn;
8417 nfsnode_t anp;
8418 int error = 0, isrsrcfork;
2d21ac55 8419
6d2010ae 8420 nmp = VTONMP(ap->a_vp);
0a7de745
A
8421 if (nfs_mount_gone(nmp)) {
8422 return ENXIO;
8423 }
2d21ac55 8424
0a7de745
A
8425 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8426 return ENOTSUP;
8427 }
6d2010ae 8428 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
0a7de745
A
8429 if (error) {
8430 return error;
8431 }
6d2010ae 8432 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
0a7de745
A
8433 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8434 return ENOATTR;
8435 }
6d2010ae
A
8436
8437 bzero(&cn, sizeof(cn));
8438 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8439 cn.cn_namelen = strlen(ap->a_name);
8440 cn.cn_nameiop = LOOKUP;
8441 cn.cn_flags = MAKEENTRY;
8442
8443 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
8444 isrsrcfork = (bcmp(ap->a_name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
8445
8446 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
0a7de745
A
8447 !isrsrcfork ? NFS_GET_NAMED_ATTR_PREFETCH : 0, ctx, &anp, NULL);
8448 if ((!error && !anp) || (error == ENOENT)) {
6d2010ae 8449 error = ENOATTR;
0a7de745 8450 }
6d2010ae 8451 if (!error) {
0a7de745 8452 if (ap->a_uio) {
6d2010ae 8453 error = nfs_bioread(anp, ap->a_uio, 0, ctx);
0a7de745 8454 } else {
6d2010ae 8455 *ap->a_size = anp->n_size;
0a7de745 8456 }
2d21ac55 8457 }
0a7de745 8458 if (anp) {
6d2010ae 8459 vnode_put(NFSTOV(anp));
0a7de745
A
8460 }
8461 return error;
6d2010ae 8462}
2d21ac55 8463
6d2010ae
A
8464int
8465nfs4_vnop_setxattr(
8466 struct vnop_setxattr_args /* {
0a7de745
A
8467 * struct vnodeop_desc *a_desc;
8468 * vnode_t a_vp;
8469 * const char * a_name;
8470 * uio_t a_uio;
8471 * int a_options;
8472 * vfs_context_t a_context;
8473 * } */*ap)
6d2010ae
A
8474{
8475 vfs_context_t ctx = ap->a_context;
8476 int options = ap->a_options;
8477 uio_t uio = ap->a_uio;
8478 const char *name = ap->a_name;
8479 struct nfsmount *nmp;
8480 struct componentname cn;
8481 nfsnode_t anp = NULL;
8482 int error = 0, closeerror = 0, flags, isrsrcfork, isfinderinfo, empty = 0, i;
8483#define FINDERINFOSIZE 32
8484 uint8_t finfo[FINDERINFOSIZE];
8485 uint32_t *finfop;
8486 struct nfs_open_file *nofp = NULL;
0a7de745 8487 char uio_buf[UIO_SIZEOF(1)];
6d2010ae
A
8488 uio_t auio;
8489 struct vnop_write_args vwa;
8490
8491 nmp = VTONMP(ap->a_vp);
0a7de745
A
8492 if (nfs_mount_gone(nmp)) {
8493 return ENXIO;
8494 }
6d2010ae 8495
0a7de745
A
8496 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8497 return ENOTSUP;
8498 }
6d2010ae 8499
0a7de745
A
8500 if ((options & XATTR_CREATE) && (options & XATTR_REPLACE)) {
8501 return EINVAL;
8502 }
6d2010ae
A
8503
8504 /* XXX limitation based on need to back up uio on short write */
8505 if (uio_iovcnt(uio) > 1) {
8506 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
0a7de745 8507 return EINVAL;
6d2010ae
A
8508 }
8509
8510 bzero(&cn, sizeof(cn));
8511 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
8512 cn.cn_namelen = strlen(name);
8513 cn.cn_nameiop = CREATE;
8514 cn.cn_flags = MAKEENTRY;
8515
8516 isfinderinfo = (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0);
8517 isrsrcfork = isfinderinfo ? 0 : (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
0a7de745 8518 if (!isrsrcfork) {
6d2010ae 8519 uio_setoffset(uio, 0);
0a7de745 8520 }
6d2010ae 8521 if (isfinderinfo) {
0a7de745
A
8522 if (uio_resid(uio) != sizeof(finfo)) {
8523 return ERANGE;
8524 }
6d2010ae 8525 error = uiomove((char*)&finfo, sizeof(finfo), uio);
0a7de745
A
8526 if (error) {
8527 return error;
8528 }
6d2010ae
A
8529 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
8530 empty = 1;
0a7de745 8531 for (i = 0, finfop = (uint32_t*)&finfo; i < (int)(sizeof(finfo) / sizeof(uint32_t)); i++) {
6d2010ae
A
8532 if (finfop[i]) {
8533 empty = 0;
8534 break;
8535 }
0a7de745
A
8536 }
8537 if (empty && !(options & (XATTR_CREATE | XATTR_REPLACE))) {
6d2010ae 8538 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
0a7de745 8539 if (error == ENOENT) {
6d2010ae 8540 error = 0;
0a7de745
A
8541 }
8542 return error;
6d2010ae
A
8543 }
8544 /* first, let's see if we get a create/replace error */
8545 }
8546
8547 /*
8548 * create/open the xattr
8549 *
8550 * We need to make sure not to create it if XATTR_REPLACE.
8551 * For all xattrs except the resource fork, we also want to
8552 * truncate the xattr to remove any current data. We'll do
8553 * that by setting the size to 0 on create/open.
8554 */
8555 flags = 0;
0a7de745 8556 if (!(options & XATTR_REPLACE)) {
6d2010ae 8557 flags |= NFS_GET_NAMED_ATTR_CREATE;
0a7de745
A
8558 }
8559 if (options & XATTR_CREATE) {
6d2010ae 8560 flags |= NFS_GET_NAMED_ATTR_CREATE_GUARDED;
0a7de745
A
8561 }
8562 if (!isrsrcfork) {
6d2010ae 8563 flags |= NFS_GET_NAMED_ATTR_TRUNCATE;
0a7de745 8564 }
6d2010ae
A
8565
8566 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
0a7de745
A
8567 flags, ctx, &anp, &nofp);
8568 if (!error && !anp) {
6d2010ae 8569 error = ENOATTR;
0a7de745
A
8570 }
8571 if (error) {
6d2010ae 8572 goto out;
0a7de745 8573 }
6d2010ae
A
8574 /* grab the open state from the get/create/open */
8575 if (nofp && !(error = nfs_open_file_set_busy(nofp, NULL))) {
8576 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
8577 nofp->nof_creator = NULL;
8578 nfs_open_file_clear_busy(nofp);
8579 }
8580
8581 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
0a7de745 8582 if (isfinderinfo && empty) {
6d2010ae 8583 goto doclose;
0a7de745 8584 }
6d2010ae
A
8585
8586 /*
8587 * Write the data out and flush.
8588 *
8589 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
8590 */
8591 vwa.a_desc = &vnop_write_desc;
8592 vwa.a_vp = NFSTOV(anp);
8593 vwa.a_uio = NULL;
8594 vwa.a_ioflag = 0;
8595 vwa.a_context = ctx;
8596 if (isfinderinfo) {
8597 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, &uio_buf, sizeof(uio_buf));
8598 uio_addiov(auio, (uintptr_t)&finfo, sizeof(finfo));
8599 vwa.a_uio = auio;
8600 } else if (uio_resid(uio) > 0) {
8601 vwa.a_uio = uio;
8602 }
8603 if (vwa.a_uio) {
8604 error = nfs_vnop_write(&vwa);
0a7de745 8605 if (!error) {
6d2010ae 8606 error = nfs_flush(anp, MNT_WAIT, vfs_context_thread(ctx), 0);
0a7de745 8607 }
6d2010ae
A
8608 }
8609doclose:
8610 /* Close the xattr. */
8611 if (nofp) {
8612 int busyerror = nfs_open_file_set_busy(nofp, NULL);
8613 closeerror = nfs_close(anp, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
0a7de745 8614 if (!busyerror) {
6d2010ae 8615 nfs_open_file_clear_busy(nofp);
0a7de745 8616 }
6d2010ae 8617 }
0a7de745 8618 if (!error && isfinderinfo && empty) { /* Setting an empty FinderInfo really means remove it */
6d2010ae 8619 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
0a7de745 8620 if (error == ENOENT) {
6d2010ae 8621 error = 0;
0a7de745 8622 }
6d2010ae 8623 }
0a7de745 8624 if (!error) {
6d2010ae 8625 error = closeerror;
0a7de745 8626 }
6d2010ae 8627out:
0a7de745 8628 if (anp) {
6d2010ae 8629 vnode_put(NFSTOV(anp));
0a7de745
A
8630 }
8631 if (error == ENOENT) {
6d2010ae 8632 error = ENOATTR;
0a7de745
A
8633 }
8634 return error;
2d21ac55
A
8635}
8636
8637int
6d2010ae
A
8638nfs4_vnop_removexattr(
8639 struct vnop_removexattr_args /* {
0a7de745
A
8640 * struct vnodeop_desc *a_desc;
8641 * vnode_t a_vp;
8642 * const char * a_name;
8643 * int a_options;
8644 * vfs_context_t a_context;
8645 * } */*ap)
2d21ac55 8646{
6d2010ae 8647 struct nfsmount *nmp = VTONMP(ap->a_vp);
2d21ac55
A
8648 int error;
8649
0a7de745
A
8650 if (nfs_mount_gone(nmp)) {
8651 return ENXIO;
8652 }
8653 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8654 return ENOTSUP;
8655 }
6d2010ae
A
8656
8657 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), NULL, ap->a_name, ap->a_context);
0a7de745 8658 if (error == ENOENT) {
6d2010ae 8659 error = ENOATTR;
0a7de745
A
8660 }
8661 return error;
2d21ac55
A
8662}
8663
8664int
6d2010ae
A
8665nfs4_vnop_listxattr(
8666 struct vnop_listxattr_args /* {
0a7de745
A
8667 * struct vnodeop_desc *a_desc;
8668 * vnode_t a_vp;
8669 * uio_t a_uio;
8670 * size_t *a_size;
8671 * int a_options;
8672 * vfs_context_t a_context;
8673 * } */*ap)
2d21ac55 8674{
6d2010ae
A
8675 vfs_context_t ctx = ap->a_context;
8676 nfsnode_t np = VTONFS(ap->a_vp);
8677 uio_t uio = ap->a_uio;
8678 nfsnode_t adnp = NULL;
8679 struct nfsmount *nmp;
8680 int error, done, i;
8681 struct nfs_vattr nvattr;
8682 uint64_t cookie, nextcookie, lbn = 0;
8683 struct nfsbuf *bp = NULL;
8684 struct nfs_dir_buf_header *ndbhp;
8685 struct direntry *dp;
2d21ac55 8686
6d2010ae 8687 nmp = VTONMP(ap->a_vp);
0a7de745
A
8688 if (nfs_mount_gone(nmp)) {
8689 return ENXIO;
8690 }
6d2010ae 8691
0a7de745
A
8692 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8693 return ENOTSUP;
8694 }
6d2010ae
A
8695
8696 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
0a7de745
A
8697 if (error) {
8698 return error;
8699 }
6d2010ae 8700 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
0a7de745
A
8701 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8702 return 0;
8703 }
6d2010ae 8704
0a7de745
A
8705 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
8706 return error;
8707 }
6d2010ae
A
8708 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8709 nfs_node_clear_busy(np);
0a7de745 8710 if (!adnp) {
6d2010ae 8711 goto out;
0a7de745 8712 }
6d2010ae 8713
0a7de745 8714 if ((error = nfs_node_lock(adnp))) {
6d2010ae 8715 goto out;
0a7de745 8716 }
6d2010ae
A
8717
8718 if (adnp->n_flag & NNEEDINVALIDATE) {
8719 adnp->n_flag &= ~NNEEDINVALIDATE;
8720 nfs_invaldir(adnp);
8721 nfs_node_unlock(adnp);
8722 error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
0a7de745 8723 if (!error) {
6d2010ae 8724 error = nfs_node_lock(adnp);
0a7de745
A
8725 }
8726 if (error) {
6d2010ae 8727 goto out;
0a7de745 8728 }
6d2010ae
A
8729 }
8730
8731 /*
8732 * check for need to invalidate when (re)starting at beginning
8733 */
8734 if (adnp->n_flag & NMODIFIED) {
8735 nfs_invaldir(adnp);
8736 nfs_node_unlock(adnp);
0a7de745 8737 if ((error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1))) {
6d2010ae 8738 goto out;
0a7de745 8739 }
6d2010ae
A
8740 } else {
8741 nfs_node_unlock(adnp);
8742 }
8743 /* nfs_getattr() will check changed and purge caches */
0a7de745 8744 if ((error = nfs_getattr(adnp, &nvattr, ctx, NGA_UNCACHED))) {
6d2010ae 8745 goto out;
0a7de745 8746 }
6d2010ae 8747
0a7de745 8748 if (uio && (uio_resid(uio) == 0)) {
6d2010ae 8749 goto out;
0a7de745 8750 }
6d2010ae
A
8751
8752 done = 0;
8753 nextcookie = lbn = 0;
8754
8755 while (!error && !done) {
316670eb 8756 OSAddAtomic64(1, &nfsstats.biocache_readdirs);
6d2010ae
A
8757 cookie = nextcookie;
8758getbuffer:
8759 error = nfs_buf_get(adnp, lbn, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
0a7de745 8760 if (error) {
6d2010ae 8761 goto out;
0a7de745 8762 }
6d2010ae
A
8763 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
8764 if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) {
8765 if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */
8766 ndbhp->ndbh_flags = 0;
8767 ndbhp->ndbh_count = 0;
8768 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
8769 ndbhp->ndbh_ncgen = adnp->n_ncgen;
8770 }
8771 error = nfs_buf_readdir(bp, ctx);
0a7de745 8772 if (error == NFSERR_DIRBUFDROPPED) {
6d2010ae 8773 goto getbuffer;
0a7de745
A
8774 }
8775 if (error) {
6d2010ae 8776 nfs_buf_release(bp, 1);
0a7de745 8777 }
6d2010ae
A
8778 if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) {
8779 if (!nfs_node_lock(adnp)) {
8780 nfs_invaldir(adnp);
8781 nfs_node_unlock(adnp);
8782 }
8783 nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
0a7de745 8784 if (error == NFSERR_BAD_COOKIE) {
6d2010ae 8785 error = ENOENT;
0a7de745 8786 }
6d2010ae 8787 }
0a7de745 8788 if (error) {
6d2010ae 8789 goto out;
0a7de745 8790 }
6d2010ae
A
8791 }
8792
8793 /* go through all the entries copying/counting */
8794 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
0a7de745 8795 for (i = 0; i < ndbhp->ndbh_count; i++) {
6d2010ae
A
8796 if (!xattr_protected(dp->d_name)) {
8797 if (uio == NULL) {
8798 *ap->a_size += dp->d_namlen + 1;
8799 } else if (uio_resid(uio) < (dp->d_namlen + 1)) {
8800 error = ERANGE;
8801 } else {
0a7de745
A
8802 error = uiomove(dp->d_name, dp->d_namlen + 1, uio);
8803 if (error && (error != EFAULT)) {
6d2010ae 8804 error = ERANGE;
0a7de745 8805 }
6d2010ae
A
8806 }
8807 }
8808 nextcookie = dp->d_seekoff;
8809 dp = NFS_DIRENTRY_NEXT(dp);
8810 }
8811
8812 if (i == ndbhp->ndbh_count) {
8813 /* hit end of buffer, move to next buffer */
8814 lbn = nextcookie;
8815 /* if we also hit EOF, we're done */
0a7de745 8816 if (ISSET(ndbhp->ndbh_flags, NDB_EOF)) {
6d2010ae 8817 done = 1;
0a7de745 8818 }
6d2010ae
A
8819 }
8820 if (!error && !done && (nextcookie == cookie)) {
8821 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count);
8822 error = EIO;
8823 }
8824 nfs_buf_release(bp, 1);
8825 }
8826out:
0a7de745 8827 if (adnp) {
6d2010ae 8828 vnode_put(NFSTOV(adnp));
0a7de745
A
8829 }
8830 return error;
2d21ac55
A
8831}
8832
6d2010ae 8833#if NAMEDSTREAMS
2d21ac55 8834int
6d2010ae
A
8835nfs4_vnop_getnamedstream(
8836 struct vnop_getnamedstream_args /* {
0a7de745
A
8837 * struct vnodeop_desc *a_desc;
8838 * vnode_t a_vp;
8839 * vnode_t *a_svpp;
8840 * const char *a_name;
8841 * enum nsoperation a_operation;
8842 * int a_flags;
8843 * vfs_context_t a_context;
8844 * } */*ap)
2d21ac55
A
8845{
8846 vfs_context_t ctx = ap->a_context;
2d21ac55 8847 struct nfsmount *nmp;
6d2010ae
A
8848 struct nfs_vattr nvattr;
8849 struct componentname cn;
8850 nfsnode_t anp;
8851 int error = 0;
2d21ac55 8852
6d2010ae 8853 nmp = VTONMP(ap->a_vp);
0a7de745
A
8854 if (nfs_mount_gone(nmp)) {
8855 return ENXIO;
8856 }
2d21ac55 8857
0a7de745
A
8858 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8859 return ENOTSUP;
8860 }
6d2010ae 8861 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
0a7de745
A
8862 if (error) {
8863 return error;
8864 }
6d2010ae 8865 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
0a7de745
A
8866 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8867 return ENOATTR;
8868 }
2d21ac55 8869
6d2010ae
A
8870 bzero(&cn, sizeof(cn));
8871 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8872 cn.cn_namelen = strlen(ap->a_name);
8873 cn.cn_nameiop = LOOKUP;
8874 cn.cn_flags = MAKEENTRY;
8875
8876 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
0a7de745
A
8877 0, ctx, &anp, NULL);
8878 if ((!error && !anp) || (error == ENOENT)) {
6d2010ae 8879 error = ENOATTR;
0a7de745
A
8880 }
8881 if (!error && anp) {
6d2010ae 8882 *ap->a_svpp = NFSTOV(anp);
0a7de745 8883 } else if (anp) {
6d2010ae 8884 vnode_put(NFSTOV(anp));
0a7de745
A
8885 }
8886 return error;
2d21ac55
A
8887}
8888
8889int
6d2010ae
A
8890nfs4_vnop_makenamedstream(
8891 struct vnop_makenamedstream_args /* {
0a7de745
A
8892 * struct vnodeop_desc *a_desc;
8893 * vnode_t *a_svpp;
8894 * vnode_t a_vp;
8895 * const char *a_name;
8896 * int a_flags;
8897 * vfs_context_t a_context;
8898 * } */*ap)
2d21ac55
A
8899{
8900 vfs_context_t ctx = ap->a_context;
6d2010ae
A
8901 struct nfsmount *nmp;
8902 struct componentname cn;
8903 nfsnode_t anp;
2d21ac55 8904 int error = 0;
2d21ac55 8905
6d2010ae 8906 nmp = VTONMP(ap->a_vp);
0a7de745
A
8907 if (nfs_mount_gone(nmp)) {
8908 return ENXIO;
8909 }
2d21ac55 8910
0a7de745
A
8911 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8912 return ENOTSUP;
8913 }
2d21ac55 8914
6d2010ae
A
8915 bzero(&cn, sizeof(cn));
8916 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8917 cn.cn_namelen = strlen(ap->a_name);
8918 cn.cn_nameiop = CREATE;
8919 cn.cn_flags = MAKEENTRY;
8920
8921 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
0a7de745
A
8922 NFS_GET_NAMED_ATTR_CREATE, ctx, &anp, NULL);
8923 if ((!error && !anp) || (error == ENOENT)) {
6d2010ae 8924 error = ENOATTR;
0a7de745
A
8925 }
8926 if (!error && anp) {
6d2010ae 8927 *ap->a_svpp = NFSTOV(anp);
0a7de745 8928 } else if (anp) {
6d2010ae 8929 vnode_put(NFSTOV(anp));
0a7de745
A
8930 }
8931 return error;
6d2010ae 8932}
2d21ac55 8933
6d2010ae
A
8934int
8935nfs4_vnop_removenamedstream(
8936 struct vnop_removenamedstream_args /* {
0a7de745
A
8937 * struct vnodeop_desc *a_desc;
8938 * vnode_t a_vp;
8939 * vnode_t a_svp;
8940 * const char *a_name;
8941 * int a_flags;
8942 * vfs_context_t a_context;
8943 * } */*ap)
6d2010ae
A
8944{
8945 struct nfsmount *nmp = VTONMP(ap->a_vp);
8946 nfsnode_t np = ap->a_vp ? VTONFS(ap->a_vp) : NULL;
8947 nfsnode_t anp = ap->a_svp ? VTONFS(ap->a_svp) : NULL;
2d21ac55 8948
0a7de745
A
8949 if (nfs_mount_gone(nmp)) {
8950 return ENXIO;
8951 }
2d21ac55
A
8952
8953 /*
6d2010ae
A
8954 * Given that a_svp is a named stream, checking for
8955 * named attribute support is kinda pointless.
2d21ac55 8956 */
0a7de745
A
8957 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8958 return ENOTSUP;
8959 }
6d2010ae 8960
0a7de745 8961 return nfs4_named_attr_remove(np, anp, ap->a_name, ap->a_context);
2d21ac55
A
8962}
8963
6d2010ae 8964#endif
cb323159 8965#endif /* CONFIG_NFS4 */
ea3f0419
A
8966
8967#endif /* CONFIG_NFS_CLIENT */