]> git.saurik.com Git - apple/xnu.git/blame - bsd/nfs/nfs4_vnops.c
xnu-6153.81.5.tar.gz
[apple/xnu.git] / bsd / nfs / nfs4_vnops.c
CommitLineData
2d21ac55 1/*
cb323159 2 * Copyright (c) 2006-2019 Apple Inc. All rights reserved.
2d21ac55
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * vnode op calls for NFS version 4
31 */
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/systm.h>
35#include <sys/resourcevar.h>
36#include <sys/proc_internal.h>
37#include <sys/kauth.h>
38#include <sys/mount_internal.h>
39#include <sys/malloc.h>
40#include <sys/kpi_mbuf.h>
41#include <sys/conf.h>
42#include <sys/vnode_internal.h>
43#include <sys/dirent.h>
44#include <sys/fcntl.h>
45#include <sys/lockf.h>
46#include <sys/ubc_internal.h>
47#include <sys/attr.h>
48#include <sys/signalvar.h>
6d2010ae
A
49#include <sys/uio_internal.h>
50#include <sys/xattr.h>
51#include <sys/paths.h>
2d21ac55
A
52
53#include <vfs/vfs_support.h>
54
55#include <sys/vm.h>
56
57#include <sys/time.h>
58#include <kern/clock.h>
59#include <libkern/OSAtomic.h>
60
61#include <miscfs/fifofs/fifo.h>
62#include <miscfs/specfs/specdev.h>
63
64#include <nfs/rpcv2.h>
65#include <nfs/nfsproto.h>
66#include <nfs/nfs.h>
67#include <nfs/nfsnode.h>
68#include <nfs/nfs_gss.h>
69#include <nfs/nfsmount.h>
70#include <nfs/nfs_lock.h>
71#include <nfs/xdr_subs.h>
72#include <nfs/nfsm_subs.h>
73
74#include <net/if.h>
75#include <netinet/in.h>
76#include <netinet/in_var.h>
77#include <vm/vm_kern.h>
78
79#include <kern/task.h>
80#include <kern/sched_prim.h>
81
cb323159 82#if CONFIG_NFS4
2d21ac55 83int
fe8ab488 84nfs4_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx)
2d21ac55 85{
b0d623f7 86 int error = 0, lockerror = ENOENT, status, numops, slot;
2d21ac55
A
87 u_int64_t xid;
88 struct nfsm_chain nmreq, nmrep;
89 struct timeval now;
6d2010ae 90 uint32_t access_result = 0, supported = 0, missing;
2d21ac55
A
91 struct nfsmount *nmp = NFSTONMP(np);
92 int nfsvers = nmp->nm_vers;
93 uid_t uid;
6d2010ae 94 struct nfsreq_secinfo_args si;
2d21ac55 95
0a7de745
A
96 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
97 return 0;
98 }
6d2010ae
A
99
100 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
101 nfsm_chain_null(&nmreq);
102 nfsm_chain_null(&nmrep);
103
b0d623f7
A
104 // PUTFH, ACCESS, GETATTR
105 numops = 3;
2d21ac55 106 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED);
3e170ce0 107 nfsm_chain_add_compound_header(error, &nmreq, "access", nmp->nm_minor_vers, numops);
2d21ac55
A
108 numops--;
109 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
110 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
111 numops--;
112 nfsm_chain_add_32(error, &nmreq, NFS_OP_ACCESS);
6d2010ae 113 nfsm_chain_add_32(error, &nmreq, *access);
2d21ac55
A
114 numops--;
115 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 116 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
117 nfsm_chain_build_done(error, &nmreq);
118 nfsm_assert(error, (numops == 0), EPROTO);
119 nfsmout_if(error);
fe8ab488 120 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745
A
121 vfs_context_thread(ctx), vfs_context_ucred(ctx),
122 &si, rpcflags, &nmrep, &xid, &status);
2d21ac55 123
0a7de745 124 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 125 error = lockerror;
0a7de745 126 }
2d21ac55
A
127 nfsm_chain_skip_tag(error, &nmrep);
128 nfsm_chain_get_32(error, &nmrep, numops);
129 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
130 nfsm_chain_op_check(error, &nmrep, NFS_OP_ACCESS);
131 nfsm_chain_get_32(error, &nmrep, supported);
6d2010ae 132 nfsm_chain_get_32(error, &nmrep, access_result);
2d21ac55 133 nfsmout_if(error);
6d2010ae 134 if ((missing = (*access & ~supported))) {
2d21ac55
A
135 /* missing support for something(s) we wanted */
136 if (missing & NFS_ACCESS_DELETE) {
137 /*
138 * If the server doesn't report DELETE (possible
139 * on UNIX systems), we'll assume that it is OK
140 * and just let any subsequent delete action fail
141 * if it really isn't deletable.
142 */
6d2010ae 143 access_result |= NFS_ACCESS_DELETE;
2d21ac55
A
144 }
145 }
6d2010ae
A
146 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
147 if (nfs_access_dotzfs) {
148 vnode_t dvp = NULLVP;
0a7de745
A
149 if (np->n_flag & NISDOTZFSCHILD) { /* may be able to create/delete snapshot dirs */
150 access_result |= (NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE);
151 } else if (((dvp = vnode_getparent(NFSTOV(np))) != NULLVP) && (VTONFS(dvp)->n_flag & NISDOTZFSCHILD)) {
6d2010ae 152 access_result |= NFS_ACCESS_DELETE; /* may be able to delete snapshot dirs */
0a7de745
A
153 }
154 if (dvp != NULLVP) {
6d2010ae 155 vnode_put(dvp);
0a7de745 156 }
6d2010ae 157 }
b0d623f7 158 /* Some servers report DELETE support but erroneously give a denied answer. */
0a7de745 159 if (nfs_access_delete && (*access & NFS_ACCESS_DELETE) && !(access_result & NFS_ACCESS_DELETE)) {
6d2010ae 160 access_result |= NFS_ACCESS_DELETE;
0a7de745 161 }
2d21ac55 162 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 163 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
2d21ac55
A
164 nfsmout_if(error);
165
813fb2f6
A
166 if (nfs_mount_gone(nmp)) {
167 error = ENXIO;
168 }
169 nfsmout_if(error);
170
171 if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) {
172 uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx));
173 } else {
174 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
175 }
6d2010ae
A
176 slot = nfs_node_access_slot(np, uid, 1);
177 np->n_accessuid[slot] = uid;
2d21ac55 178 microuptime(&now);
6d2010ae
A
179 np->n_accessstamp[slot] = now.tv_sec;
180 np->n_access[slot] = access_result;
2d21ac55 181
6d2010ae
A
182 /* pass back the access returned with this request */
183 *access = np->n_access[slot];
2d21ac55 184nfsmout:
0a7de745 185 if (!lockerror) {
b0d623f7 186 nfs_node_unlock(np);
0a7de745 187 }
2d21ac55
A
188 nfsm_chain_cleanup(&nmreq);
189 nfsm_chain_cleanup(&nmrep);
0a7de745 190 return error;
2d21ac55
A
191}
192
193int
194nfs4_getattr_rpc(
195 nfsnode_t np,
196 mount_t mp,
197 u_char *fhp,
198 size_t fhsize,
6d2010ae 199 int flags,
2d21ac55
A
200 vfs_context_t ctx,
201 struct nfs_vattr *nvap,
202 u_int64_t *xidp)
203{
204 struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
6d2010ae
A
205 int error = 0, status, nfsvers, numops, rpcflags = 0, acls;
206 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
2d21ac55 207 struct nfsm_chain nmreq, nmrep;
6d2010ae 208 struct nfsreq_secinfo_args si;
2d21ac55 209
0a7de745
A
210 if (nfs_mount_gone(nmp)) {
211 return ENXIO;
212 }
2d21ac55 213 nfsvers = nmp->nm_vers;
6d2010ae
A
214 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
215
216 if (np && (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)) {
217 nfs4_default_attrs_for_referral_trigger(VTONFS(np->n_parent), NULL, 0, nvap, NULL);
0a7de745 218 return 0;
6d2010ae
A
219 }
220
0a7de745 221 if (flags & NGA_MONITOR) { /* vnode monitor requests should be soft */
6d2010ae 222 rpcflags = R_RECOVER;
0a7de745 223 }
2d21ac55 224
0a7de745 225 if (flags & NGA_SOFT) { /* Return ETIMEDOUT if server not responding */
fe8ab488 226 rpcflags |= R_SOFT;
0a7de745 227 }
fe8ab488 228
6d2010ae 229 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
230 nfsm_chain_null(&nmreq);
231 nfsm_chain_null(&nmrep);
232
b0d623f7
A
233 // PUTFH, GETATTR
234 numops = 2;
2d21ac55 235 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
3e170ce0 236 nfsm_chain_add_compound_header(error, &nmreq, "getattr", nmp->nm_minor_vers, numops);
2d21ac55
A
237 numops--;
238 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
239 nfsm_chain_add_fh(error, &nmreq, nfsvers, fhp, fhsize);
240 numops--;
241 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 242 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
0a7de745 243 if ((flags & NGA_ACL) && acls) {
6d2010ae 244 NFS_BITMAP_SET(bitmap, NFS_FATTR_ACL);
0a7de745 245 }
6d2010ae 246 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
2d21ac55
A
247 nfsm_chain_build_done(error, &nmreq);
248 nfsm_assert(error, (numops == 0), EPROTO);
249 nfsmout_if(error);
0a7de745
A
250 error = nfs_request2(np, mp, &nmreq, NFSPROC4_COMPOUND,
251 vfs_context_thread(ctx), vfs_context_ucred(ctx),
252 NULL, rpcflags, &nmrep, xidp, &status);
2d21ac55
A
253
254 nfsm_chain_skip_tag(error, &nmrep);
255 nfsm_chain_get_32(error, &nmrep, numops);
256 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
257 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
258 nfsmout_if(error);
6d2010ae
A
259 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
260 nfsmout_if(error);
261 if ((flags & NGA_ACL) && acls && !NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) {
262 /* we asked for the ACL but didn't get one... assume there isn't one */
263 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_ACL);
264 nvap->nva_acl = NULL;
265 }
2d21ac55
A
266nfsmout:
267 nfsm_chain_cleanup(&nmreq);
268 nfsm_chain_cleanup(&nmrep);
0a7de745 269 return error;
2d21ac55
A
270}
271
272int
273nfs4_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx)
274{
275 struct nfsmount *nmp;
276 int error = 0, lockerror = ENOENT, status, numops;
277 uint32_t len = 0;
278 u_int64_t xid;
279 struct nfsm_chain nmreq, nmrep;
6d2010ae 280 struct nfsreq_secinfo_args si;
2d21ac55
A
281
282 nmp = NFSTONMP(np);
0a7de745
A
283 if (nfs_mount_gone(nmp)) {
284 return ENXIO;
285 }
286 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
287 return EINVAL;
288 }
6d2010ae 289 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
290 nfsm_chain_null(&nmreq);
291 nfsm_chain_null(&nmrep);
292
b0d623f7
A
293 // PUTFH, GETATTR, READLINK
294 numops = 3;
2d21ac55 295 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
3e170ce0 296 nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
2d21ac55
A
297 numops--;
298 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
299 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
300 numops--;
301 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 302 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
303 numops--;
304 nfsm_chain_add_32(error, &nmreq, NFS_OP_READLINK);
305 nfsm_chain_build_done(error, &nmreq);
306 nfsm_assert(error, (numops == 0), EPROTO);
307 nfsmout_if(error);
6d2010ae 308 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 309
0a7de745 310 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 311 error = lockerror;
0a7de745 312 }
2d21ac55
A
313 nfsm_chain_skip_tag(error, &nmrep);
314 nfsm_chain_get_32(error, &nmrep, numops);
315 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
316 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 317 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
2d21ac55
A
318 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
319 nfsm_chain_get_32(error, &nmrep, len);
320 nfsmout_if(error);
321 if (len >= *buflenp) {
0a7de745 322 if (np->n_size && (np->n_size < *buflenp)) {
2d21ac55 323 len = np->n_size;
0a7de745 324 } else {
2d21ac55 325 len = *buflenp - 1;
0a7de745 326 }
2d21ac55
A
327 }
328 nfsm_chain_get_opaque(error, &nmrep, len, buf);
0a7de745 329 if (!error) {
2d21ac55 330 *buflenp = len;
0a7de745 331 }
2d21ac55 332nfsmout:
0a7de745 333 if (!lockerror) {
b0d623f7 334 nfs_node_unlock(np);
0a7de745 335 }
2d21ac55
A
336 nfsm_chain_cleanup(&nmreq);
337 nfsm_chain_cleanup(&nmrep);
0a7de745 338 return error;
2d21ac55
A
339}
340
341int
342nfs4_read_rpc_async(
343 nfsnode_t np,
344 off_t offset,
345 size_t len,
346 thread_t thd,
347 kauth_cred_t cred,
348 struct nfsreq_cbinfo *cb,
349 struct nfsreq **reqp)
350{
351 struct nfsmount *nmp;
352 int error = 0, nfsvers, numops;
b0d623f7 353 nfs_stateid stateid;
2d21ac55 354 struct nfsm_chain nmreq;
6d2010ae 355 struct nfsreq_secinfo_args si;
2d21ac55
A
356
357 nmp = NFSTONMP(np);
0a7de745
A
358 if (nfs_mount_gone(nmp)) {
359 return ENXIO;
360 }
2d21ac55 361 nfsvers = nmp->nm_vers;
0a7de745
A
362 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
363 return EINVAL;
364 }
2d21ac55 365
6d2010ae 366 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
367 nfsm_chain_null(&nmreq);
368
b0d623f7 369 // PUTFH, READ, GETATTR
2d21ac55
A
370 numops = 3;
371 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
3e170ce0 372 nfsm_chain_add_compound_header(error, &nmreq, "read", nmp->nm_minor_vers, numops);
2d21ac55
A
373 numops--;
374 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
375 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
376 numops--;
377 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
b0d623f7
A
378 nfs_get_stateid(np, thd, cred, &stateid);
379 nfsm_chain_add_stateid(error, &nmreq, &stateid);
2d21ac55
A
380 nfsm_chain_add_64(error, &nmreq, offset);
381 nfsm_chain_add_32(error, &nmreq, len);
382 numops--;
383 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 384 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
385 nfsm_chain_build_done(error, &nmreq);
386 nfsm_assert(error, (numops == 0), EPROTO);
387 nfsmout_if(error);
6d2010ae 388 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
2d21ac55
A
389nfsmout:
390 nfsm_chain_cleanup(&nmreq);
0a7de745 391 return error;
2d21ac55
A
392}
393
394int
395nfs4_read_rpc_async_finish(
396 nfsnode_t np,
397 struct nfsreq *req,
b0d623f7 398 uio_t uio,
2d21ac55
A
399 size_t *lenp,
400 int *eofp)
401{
402 struct nfsmount *nmp;
403 int error = 0, lockerror, nfsvers, numops, status, eof = 0;
404 size_t retlen = 0;
405 u_int64_t xid;
406 struct nfsm_chain nmrep;
407
408 nmp = NFSTONMP(np);
fe8ab488 409 if (nfs_mount_gone(nmp)) {
2d21ac55 410 nfs_request_async_cancel(req);
0a7de745 411 return ENXIO;
2d21ac55
A
412 }
413 nfsvers = nmp->nm_vers;
414
415 nfsm_chain_null(&nmrep);
416
417 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
0a7de745
A
418 if (error == EINPROGRESS) { /* async request restarted */
419 return error;
420 }
2d21ac55 421
0a7de745 422 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 423 error = lockerror;
0a7de745 424 }
2d21ac55
A
425 nfsm_chain_skip_tag(error, &nmrep);
426 nfsm_chain_get_32(error, &nmrep, numops);
427 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
428 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
429 nfsm_chain_get_32(error, &nmrep, eof);
430 nfsm_chain_get_32(error, &nmrep, retlen);
431 if (!error) {
432 *lenp = MIN(retlen, *lenp);
b0d623f7 433 error = nfsm_chain_get_uio(&nmrep, *lenp, uio);
2d21ac55
A
434 }
435 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 436 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
0a7de745 437 if (!lockerror) {
b0d623f7 438 nfs_node_unlock(np);
0a7de745 439 }
2d21ac55 440 if (eofp) {
0a7de745 441 if (!eof && !retlen) {
2d21ac55 442 eof = 1;
0a7de745 443 }
2d21ac55
A
444 *eofp = eof;
445 }
446 nfsm_chain_cleanup(&nmrep);
0a7de745 447 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
6d2010ae 448 microuptime(&np->n_lastio);
0a7de745
A
449 }
450 return error;
2d21ac55
A
451}
452
453int
454nfs4_write_rpc_async(
455 nfsnode_t np,
b0d623f7 456 uio_t uio,
2d21ac55
A
457 size_t len,
458 thread_t thd,
459 kauth_cred_t cred,
460 int iomode,
461 struct nfsreq_cbinfo *cb,
462 struct nfsreq **reqp)
463{
464 struct nfsmount *nmp;
6d2010ae 465 mount_t mp;
2d21ac55 466 int error = 0, nfsvers, numops;
b0d623f7 467 nfs_stateid stateid;
2d21ac55 468 struct nfsm_chain nmreq;
6d2010ae 469 struct nfsreq_secinfo_args si;
2d21ac55
A
470
471 nmp = NFSTONMP(np);
0a7de745
A
472 if (nfs_mount_gone(nmp)) {
473 return ENXIO;
474 }
2d21ac55 475 nfsvers = nmp->nm_vers;
0a7de745
A
476 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
477 return EINVAL;
478 }
6d2010ae
A
479
480 /* for async mounts, don't bother sending sync write requests */
481 if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async &&
0a7de745 482 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
6d2010ae 483 iomode = NFS_WRITE_UNSTABLE;
0a7de745 484 }
2d21ac55 485
6d2010ae 486 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
487 nfsm_chain_null(&nmreq);
488
b0d623f7 489 // PUTFH, WRITE, GETATTR
2d21ac55
A
490 numops = 3;
491 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED + len);
3e170ce0 492 nfsm_chain_add_compound_header(error, &nmreq, "write", nmp->nm_minor_vers, numops);
2d21ac55
A
493 numops--;
494 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
495 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
496 numops--;
497 nfsm_chain_add_32(error, &nmreq, NFS_OP_WRITE);
b0d623f7
A
498 nfs_get_stateid(np, thd, cred, &stateid);
499 nfsm_chain_add_stateid(error, &nmreq, &stateid);
500 nfsm_chain_add_64(error, &nmreq, uio_offset(uio));
2d21ac55
A
501 nfsm_chain_add_32(error, &nmreq, iomode);
502 nfsm_chain_add_32(error, &nmreq, len);
0a7de745 503 if (!error) {
b0d623f7 504 error = nfsm_chain_add_uio(&nmreq, uio, len);
0a7de745 505 }
2d21ac55
A
506 numops--;
507 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 508 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
509 nfsm_chain_build_done(error, &nmreq);
510 nfsm_assert(error, (numops == 0), EPROTO);
511 nfsmout_if(error);
512
6d2010ae 513 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
2d21ac55
A
514nfsmout:
515 nfsm_chain_cleanup(&nmreq);
0a7de745 516 return error;
2d21ac55
A
517}
518
519int
520nfs4_write_rpc_async_finish(
521 nfsnode_t np,
522 struct nfsreq *req,
523 int *iomodep,
524 size_t *rlenp,
525 uint64_t *wverfp)
526{
527 struct nfsmount *nmp;
528 int error = 0, lockerror = ENOENT, nfsvers, numops, status;
529 int committed = NFS_WRITE_FILESYNC;
530 size_t rlen = 0;
531 u_int64_t xid, wverf;
532 mount_t mp;
533 struct nfsm_chain nmrep;
534
535 nmp = NFSTONMP(np);
fe8ab488 536 if (nfs_mount_gone(nmp)) {
2d21ac55 537 nfs_request_async_cancel(req);
0a7de745 538 return ENXIO;
2d21ac55
A
539 }
540 nfsvers = nmp->nm_vers;
541
542 nfsm_chain_null(&nmrep);
543
544 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
0a7de745
A
545 if (error == EINPROGRESS) { /* async request restarted */
546 return error;
547 }
2d21ac55 548 nmp = NFSTONMP(np);
0a7de745 549 if (nfs_mount_gone(nmp)) {
2d21ac55 550 error = ENXIO;
0a7de745
A
551 }
552 if (!error && (lockerror = nfs_node_lock(np))) {
2d21ac55 553 error = lockerror;
0a7de745 554 }
2d21ac55
A
555 nfsm_chain_skip_tag(error, &nmrep);
556 nfsm_chain_get_32(error, &nmrep, numops);
557 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
558 nfsm_chain_op_check(error, &nmrep, NFS_OP_WRITE);
559 nfsm_chain_get_32(error, &nmrep, rlen);
560 nfsmout_if(error);
561 *rlenp = rlen;
0a7de745 562 if (rlen <= 0) {
2d21ac55 563 error = NFSERR_IO;
0a7de745 564 }
2d21ac55
A
565 nfsm_chain_get_32(error, &nmrep, committed);
566 nfsm_chain_get_64(error, &nmrep, wverf);
567 nfsmout_if(error);
0a7de745 568 if (wverfp) {
2d21ac55 569 *wverfp = wverf;
0a7de745 570 }
2d21ac55
A
571 lck_mtx_lock(&nmp->nm_lock);
572 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
573 nmp->nm_verf = wverf;
574 nmp->nm_state |= NFSSTA_HASWRITEVERF;
575 } else if (nmp->nm_verf != wverf) {
576 nmp->nm_verf = wverf;
577 }
578 lck_mtx_unlock(&nmp->nm_lock);
579 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 580 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
2d21ac55 581nfsmout:
0a7de745 582 if (!lockerror) {
b0d623f7 583 nfs_node_unlock(np);
0a7de745 584 }
2d21ac55
A
585 nfsm_chain_cleanup(&nmrep);
586 if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async &&
0a7de745 587 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
2d21ac55 588 committed = NFS_WRITE_FILESYNC;
0a7de745 589 }
2d21ac55 590 *iomodep = committed;
0a7de745 591 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
6d2010ae 592 microuptime(&np->n_lastio);
0a7de745
A
593 }
594 return error;
2d21ac55
A
595}
596
597int
598nfs4_remove_rpc(
599 nfsnode_t dnp,
600 char *name,
601 int namelen,
602 thread_t thd,
603 kauth_cred_t cred)
604{
b0d623f7 605 int error = 0, lockerror = ENOENT, remove_error = 0, status;
2d21ac55
A
606 struct nfsmount *nmp;
607 int nfsvers, numops;
608 u_int64_t xid;
609 struct nfsm_chain nmreq, nmrep;
6d2010ae 610 struct nfsreq_secinfo_args si;
2d21ac55
A
611
612 nmp = NFSTONMP(dnp);
0a7de745
A
613 if (nfs_mount_gone(nmp)) {
614 return ENXIO;
615 }
2d21ac55 616 nfsvers = nmp->nm_vers;
0a7de745
A
617 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
618 return EINVAL;
619 }
6d2010ae 620 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
b0d623f7 621restart:
2d21ac55
A
622 nfsm_chain_null(&nmreq);
623 nfsm_chain_null(&nmrep);
624
625 // PUTFH, REMOVE, GETATTR
626 numops = 3;
627 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED + namelen);
3e170ce0 628 nfsm_chain_add_compound_header(error, &nmreq, "remove", nmp->nm_minor_vers, numops);
2d21ac55
A
629 numops--;
630 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
631 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
632 numops--;
633 nfsm_chain_add_32(error, &nmreq, NFS_OP_REMOVE);
6d2010ae 634 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
2d21ac55
A
635 numops--;
636 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 637 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
2d21ac55
A
638 nfsm_chain_build_done(error, &nmreq);
639 nfsm_assert(error, (numops == 0), EPROTO);
640 nfsmout_if(error);
641
6d2010ae 642 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, &nmrep, &xid, &status);
2d21ac55 643
0a7de745 644 if ((lockerror = nfs_node_lock(dnp))) {
b0d623f7 645 error = lockerror;
0a7de745 646 }
2d21ac55
A
647 nfsm_chain_skip_tag(error, &nmrep);
648 nfsm_chain_get_32(error, &nmrep, numops);
649 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
650 nfsm_chain_op_check(error, &nmrep, NFS_OP_REMOVE);
651 remove_error = error;
652 nfsm_chain_check_change_info(error, &nmrep, dnp);
653 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 654 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
0a7de745 655 if (error && !lockerror) {
2d21ac55 656 NATTRINVALIDATE(dnp);
0a7de745 657 }
2d21ac55
A
658nfsmout:
659 nfsm_chain_cleanup(&nmreq);
660 nfsm_chain_cleanup(&nmrep);
661
b0d623f7
A
662 if (!lockerror) {
663 dnp->n_flag |= NMODIFIED;
664 nfs_node_unlock(dnp);
665 }
666 if (error == NFSERR_GRACE) {
0a7de745 667 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
b0d623f7
A
668 goto restart;
669 }
2d21ac55 670
0a7de745 671 return remove_error;
2d21ac55
A
672}
673
674int
675nfs4_rename_rpc(
676 nfsnode_t fdnp,
677 char *fnameptr,
678 int fnamelen,
679 nfsnode_t tdnp,
680 char *tnameptr,
681 int tnamelen,
682 vfs_context_t ctx)
683{
b0d623f7 684 int error = 0, lockerror = ENOENT, status, nfsvers, numops;
2d21ac55
A
685 struct nfsmount *nmp;
686 u_int64_t xid, savedxid;
687 struct nfsm_chain nmreq, nmrep;
6d2010ae 688 struct nfsreq_secinfo_args si;
2d21ac55
A
689
690 nmp = NFSTONMP(fdnp);
0a7de745
A
691 if (nfs_mount_gone(nmp)) {
692 return ENXIO;
693 }
2d21ac55 694 nfsvers = nmp->nm_vers;
0a7de745
A
695 if (fdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
696 return EINVAL;
697 }
698 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
699 return EINVAL;
700 }
2d21ac55 701
6d2010ae 702 NFSREQ_SECINFO_SET(&si, fdnp, NULL, 0, NULL, 0);
2d21ac55
A
703 nfsm_chain_null(&nmreq);
704 nfsm_chain_null(&nmrep);
705
706 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
707 numops = 7;
708 nfsm_chain_build_alloc_init(error, &nmreq, 30 * NFSX_UNSIGNED + fnamelen + tnamelen);
3e170ce0 709 nfsm_chain_add_compound_header(error, &nmreq, "rename", nmp->nm_minor_vers, numops);
2d21ac55
A
710 numops--;
711 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
712 nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize);
713 numops--;
714 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
715 numops--;
716 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
717 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
718 numops--;
719 nfsm_chain_add_32(error, &nmreq, NFS_OP_RENAME);
6d2010ae
A
720 nfsm_chain_add_name(error, &nmreq, fnameptr, fnamelen, nmp);
721 nfsm_chain_add_name(error, &nmreq, tnameptr, tnamelen, nmp);
2d21ac55
A
722 numops--;
723 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 724 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
2d21ac55
A
725 numops--;
726 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
727 numops--;
728 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 729 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, fdnp);
2d21ac55
A
730 nfsm_chain_build_done(error, &nmreq);
731 nfsm_assert(error, (numops == 0), EPROTO);
732 nfsmout_if(error);
733
6d2010ae 734 error = nfs_request(fdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 735
0a7de745 736 if ((lockerror = nfs_node_lock2(fdnp, tdnp))) {
b0d623f7 737 error = lockerror;
0a7de745 738 }
2d21ac55
A
739 nfsm_chain_skip_tag(error, &nmrep);
740 nfsm_chain_get_32(error, &nmrep, numops);
741 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
742 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
743 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
744 nfsm_chain_op_check(error, &nmrep, NFS_OP_RENAME);
745 nfsm_chain_check_change_info(error, &nmrep, fdnp);
746 nfsm_chain_check_change_info(error, &nmrep, tdnp);
747 /* directory attributes: if we don't get them, make sure to invalidate */
748 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
749 savedxid = xid;
6d2010ae 750 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
0a7de745 751 if (error && !lockerror) {
2d21ac55 752 NATTRINVALIDATE(tdnp);
0a7de745 753 }
2d21ac55
A
754 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
755 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
756 xid = savedxid;
6d2010ae 757 nfsm_chain_loadattr(error, &nmrep, fdnp, nfsvers, &xid);
0a7de745 758 if (error && !lockerror) {
2d21ac55 759 NATTRINVALIDATE(fdnp);
0a7de745 760 }
2d21ac55
A
761nfsmout:
762 nfsm_chain_cleanup(&nmreq);
763 nfsm_chain_cleanup(&nmrep);
b0d623f7
A
764 if (!lockerror) {
765 fdnp->n_flag |= NMODIFIED;
766 tdnp->n_flag |= NMODIFIED;
767 nfs_node_unlock2(fdnp, tdnp);
768 }
0a7de745 769 return error;
2d21ac55
A
770}
771
772/*
773 * NFS V4 readdir RPC.
774 */
2d21ac55 775int
b0d623f7
A
776nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx)
777{
2d21ac55 778 struct nfsmount *nmp;
6d2010ae 779 int error = 0, lockerror, nfsvers, namedattr, rdirplus, bigcookies, numops;
b0d623f7
A
780 int i, status, more_entries = 1, eof, bp_dropped = 0;
781 uint32_t nmreaddirsize, nmrsize;
782 uint32_t namlen, skiplen, fhlen, xlen, attrlen, reclen, space_free, space_needed;
783 uint64_t cookie, lastcookie, xid, savedxid;
784 struct nfsm_chain nmreq, nmrep, nmrepsave;
785 fhandle_t fh;
786 struct nfs_vattr nvattr, *nvattrp;
787 struct nfs_dir_buf_header *ndbhp;
788 struct direntry *dp;
789 char *padstart, padlen;
2d21ac55
A
790 const char *tag;
791 uint32_t entry_attrs[NFS_ATTR_BITMAP_LEN];
b0d623f7 792 struct timeval now;
6d2010ae 793 struct nfsreq_secinfo_args si;
2d21ac55 794
2d21ac55 795 nmp = NFSTONMP(dnp);
0a7de745
A
796 if (nfs_mount_gone(nmp)) {
797 return ENXIO;
798 }
2d21ac55
A
799 nfsvers = nmp->nm_vers;
800 nmreaddirsize = nmp->nm_readdirsize;
801 nmrsize = nmp->nm_rsize;
b0d623f7 802 bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES;
6d2010ae
A
803 namedattr = (dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) ? 1 : 0;
804 rdirplus = (NMFLAG(nmp, RDIRPLUS) || namedattr) ? 1 : 0;
0a7de745
A
805 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
806 return EINVAL;
807 }
6d2010ae 808 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
2d21ac55
A
809
810 /*
811 * Set up attribute request for entries.
812 * For READDIRPLUS functionality, get everything.
b0d623f7 813 * Otherwise, just get what we need for struct direntry.
2d21ac55
A
814 */
815 if (rdirplus) {
b0d623f7 816 tag = "readdirplus";
6d2010ae 817 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, entry_attrs);
2d21ac55
A
818 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEHANDLE);
819 } else {
b0d623f7 820 tag = "readdir";
2d21ac55
A
821 NFS_CLEAR_ATTRIBUTES(entry_attrs);
822 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_TYPE);
823 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEID);
6d2010ae 824 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_MOUNTED_ON_FILEID);
2d21ac55 825 }
2d21ac55
A
826 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_RDATTR_ERROR);
827
b0d623f7 828 /* lock to protect access to cookie verifier */
0a7de745
A
829 if ((lockerror = nfs_node_lock(dnp))) {
830 return lockerror;
831 }
2d21ac55 832
b0d623f7
A
833 /* determine cookie to use, and move dp to the right offset */
834 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
835 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
836 if (ndbhp->ndbh_count) {
0a7de745 837 for (i = 0; i < ndbhp->ndbh_count - 1; i++) {
b0d623f7 838 dp = NFS_DIRENTRY_NEXT(dp);
0a7de745 839 }
b0d623f7
A
840 cookie = dp->d_seekoff;
841 dp = NFS_DIRENTRY_NEXT(dp);
842 } else {
843 cookie = bp->nb_lblkno;
844 /* increment with every buffer read */
316670eb 845 OSAddAtomic64(1, &nfsstats.readdir_bios);
2d21ac55 846 }
b0d623f7 847 lastcookie = cookie;
2d21ac55
A
848
849 /*
b0d623f7
A
850 * The NFS client is responsible for the "." and ".." entries in the
851 * directory. So, we put them at the start of the first buffer.
6d2010ae 852 * Don't bother for attribute directories.
2d21ac55 853 */
6d2010ae
A
854 if (((bp->nb_lblkno == 0) && (ndbhp->ndbh_count == 0)) &&
855 !(dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
b0d623f7
A
856 fh.fh_len = 0;
857 fhlen = rdirplus ? fh.fh_len + 1 : 0;
858 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
859 /* "." */
860 namlen = 1;
861 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
0a7de745
A
862 if (xlen) {
863 bzero(&dp->d_name[namlen + 1], xlen);
864 }
b0d623f7 865 dp->d_namlen = namlen;
0a7de745 866 strlcpy(dp->d_name, ".", namlen + 1);
2d21ac55 867 dp->d_fileno = dnp->n_vattr.nva_fileid;
2d21ac55 868 dp->d_type = DT_DIR;
b0d623f7
A
869 dp->d_reclen = reclen;
870 dp->d_seekoff = 1;
871 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
872 dp = NFS_DIRENTRY_NEXT(dp);
873 padlen = (char*)dp - padstart;
0a7de745 874 if (padlen > 0) {
b0d623f7 875 bzero(padstart, padlen);
0a7de745
A
876 }
877 if (rdirplus) { /* zero out attributes */
b0d623f7 878 bzero(NFS_DIR_BUF_NVATTR(bp, 0), sizeof(struct nfs_vattr));
0a7de745 879 }
b0d623f7
A
880
881 /* ".." */
882 namlen = 2;
883 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
0a7de745
A
884 if (xlen) {
885 bzero(&dp->d_name[namlen + 1], xlen);
886 }
b0d623f7 887 dp->d_namlen = namlen;
0a7de745
A
888 strlcpy(dp->d_name, "..", namlen + 1);
889 if (dnp->n_parent) {
2d21ac55 890 dp->d_fileno = VTONFS(dnp->n_parent)->n_vattr.nva_fileid;
0a7de745 891 } else {
2d21ac55 892 dp->d_fileno = dnp->n_vattr.nva_fileid;
0a7de745 893 }
2d21ac55 894 dp->d_type = DT_DIR;
b0d623f7
A
895 dp->d_reclen = reclen;
896 dp->d_seekoff = 2;
897 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
898 dp = NFS_DIRENTRY_NEXT(dp);
899 padlen = (char*)dp - padstart;
0a7de745 900 if (padlen > 0) {
b0d623f7 901 bzero(padstart, padlen);
0a7de745
A
902 }
903 if (rdirplus) { /* zero out attributes */
b0d623f7 904 bzero(NFS_DIR_BUF_NVATTR(bp, 1), sizeof(struct nfs_vattr));
0a7de745 905 }
b0d623f7
A
906
907 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
908 ndbhp->ndbh_count = 2;
2d21ac55
A
909 }
910
911 /*
b0d623f7
A
912 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
913 * the buffer is full (or we hit EOF). Then put the remainder of the
914 * results in the next buffer(s).
2d21ac55 915 */
b0d623f7
A
916 nfsm_chain_null(&nmreq);
917 nfsm_chain_null(&nmrep);
918 while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) {
b0d623f7
A
919 // PUTFH, GETATTR, READDIR
920 numops = 3;
2d21ac55 921 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3e170ce0 922 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
2d21ac55
A
923 numops--;
924 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
925 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
926 numops--;
927 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 928 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
2d21ac55
A
929 numops--;
930 nfsm_chain_add_32(error, &nmreq, NFS_OP_READDIR);
b0d623f7
A
931 nfsm_chain_add_64(error, &nmreq, (cookie <= 2) ? 0 : cookie);
932 nfsm_chain_add_64(error, &nmreq, dnp->n_cookieverf);
2d21ac55
A
933 nfsm_chain_add_32(error, &nmreq, nmreaddirsize);
934 nfsm_chain_add_32(error, &nmreq, nmrsize);
6d2010ae 935 nfsm_chain_add_bitmap_supported(error, &nmreq, entry_attrs, nmp, dnp);
2d21ac55
A
936 nfsm_chain_build_done(error, &nmreq);
937 nfsm_assert(error, (numops == 0), EPROTO);
b0d623f7 938 nfs_node_unlock(dnp);
2d21ac55 939 nfsmout_if(error);
6d2010ae 940 error = nfs_request(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 941
0a7de745 942 if ((lockerror = nfs_node_lock(dnp))) {
2d21ac55 943 error = lockerror;
0a7de745 944 }
b0d623f7
A
945
946 savedxid = xid;
2d21ac55
A
947 nfsm_chain_skip_tag(error, &nmrep);
948 nfsm_chain_get_32(error, &nmrep, numops);
949 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
950 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 951 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
2d21ac55 952 nfsm_chain_op_check(error, &nmrep, NFS_OP_READDIR);
b0d623f7 953 nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf);
2d21ac55 954 nfsm_chain_get_32(error, &nmrep, more_entries);
b0d623f7
A
955
956 if (!lockerror) {
957 nfs_node_unlock(dnp);
958 lockerror = ENOENT;
959 }
2d21ac55
A
960 nfsmout_if(error);
961
0a7de745 962 if (rdirplus) {
b0d623f7 963 microuptime(&now);
0a7de745 964 }
b0d623f7
A
965
966 /* loop through the entries packing them into the buffer */
967 while (more_entries) {
2d21ac55 968 /* Entry: COOKIE, NAME, FATTR */
b0d623f7
A
969 nfsm_chain_get_64(error, &nmrep, cookie);
970 nfsm_chain_get_32(error, &nmrep, namlen);
2d21ac55 971 nfsmout_if(error);
b0d623f7
A
972 if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) {
973 /* we've got a big cookie, make sure flag is set */
974 lck_mtx_lock(&nmp->nm_lock);
975 nmp->nm_state |= NFSSTA_BIGCOOKIES;
976 lck_mtx_unlock(&nmp->nm_lock);
977 bigcookies = 1;
978 }
979 /* just truncate names that don't fit in direntry.d_name */
980 if (namlen <= 0) {
2d21ac55
A
981 error = EBADRPC;
982 goto nfsmout;
983 }
0a7de745 984 if (namlen > (sizeof(dp->d_name) - 1)) {
b0d623f7
A
985 skiplen = namlen - sizeof(dp->d_name) + 1;
986 namlen = sizeof(dp->d_name) - 1;
2d21ac55
A
987 } else {
988 skiplen = 0;
989 }
b0d623f7
A
990 /* guess that fh size will be same as parent */
991 fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0;
992 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
993 attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0;
994 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
995 space_needed = reclen + attrlen;
996 space_free = nfs_dir_buf_freespace(bp, rdirplus);
997 if (space_needed > space_free) {
998 /*
999 * We still have entries to pack, but we've
1000 * run out of room in the current buffer.
1001 * So we need to move to the next buffer.
1002 * The block# for the next buffer is the
1003 * last cookie in the current buffer.
1004 */
1005nextbuffer:
1006 ndbhp->ndbh_flags |= NDB_FULL;
1007 nfs_buf_release(bp, 0);
1008 bp_dropped = 1;
1009 bp = NULL;
1010 error = nfs_buf_get(dnp, lastcookie, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
1011 nfsmout_if(error);
1012 /* initialize buffer */
1013 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
1014 ndbhp->ndbh_flags = 0;
1015 ndbhp->ndbh_count = 0;
1016 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
1017 ndbhp->ndbh_ncgen = dnp->n_ncgen;
1018 space_free = nfs_dir_buf_freespace(bp, rdirplus);
1019 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
1020 /* increment with every buffer read */
316670eb 1021 OSAddAtomic64(1, &nfsstats.readdir_bios);
2d21ac55 1022 }
b0d623f7
A
1023 nmrepsave = nmrep;
1024 dp->d_fileno = cookie; /* placeholder */
1025 dp->d_seekoff = cookie;
1026 dp->d_namlen = namlen;
1027 dp->d_reclen = reclen;
2d21ac55 1028 dp->d_type = DT_UNKNOWN;
b0d623f7
A
1029 nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name);
1030 nfsmout_if(error);
1031 dp->d_name[namlen] = '\0';
0a7de745 1032 if (skiplen) {
2d21ac55 1033 nfsm_chain_adv(error, &nmrep,
0a7de745
A
1034 nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen));
1035 }
2d21ac55 1036 nfsmout_if(error);
b0d623f7 1037 nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : &nvattr;
6d2010ae
A
1038 error = nfs4_parsefattr(&nmrep, NULL, nvattrp, &fh, NULL, NULL);
1039 if (!error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_ACL)) {
1040 /* we do NOT want ACLs returned to us here */
1041 NFS_BITMAP_CLR(nvattrp->nva_bitmap, NFS_FATTR_ACL);
1042 if (nvattrp->nva_acl) {
1043 kauth_acl_free(nvattrp->nva_acl);
1044 nvattrp->nva_acl = NULL;
1045 }
1046 }
b0d623f7 1047 if (error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_RDATTR_ERROR)) {
6d2010ae
A
1048 /* OK, we may not have gotten all of the attributes but we will use what we can. */
1049 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1050 /* set this up to look like a referral trigger */
1051 nfs4_default_attrs_for_referral_trigger(dnp, dp->d_name, namlen, nvattrp, &fh);
1052 }
2d21ac55
A
1053 error = 0;
1054 }
b0d623f7 1055 /* check for more entries after this one */
2d21ac55
A
1056 nfsm_chain_get_32(error, &nmrep, more_entries);
1057 nfsmout_if(error);
1058
b0d623f7 1059 /* Skip any "." and ".." entries returned from server. */
6d2010ae
A
1060 /* Also skip any bothersome named attribute entries. */
1061 if (((dp->d_name[0] == '.') && ((namlen == 1) || ((namlen == 2) && (dp->d_name[1] == '.')))) ||
1062 (namedattr && (namlen == 11) && (!strcmp(dp->d_name, "SUNWattr_ro") || !strcmp(dp->d_name, "SUNWattr_rw")))) {
b0d623f7 1063 lastcookie = cookie;
2d21ac55
A
1064 continue;
1065 }
1066
0a7de745 1067 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_TYPE)) {
b0d623f7 1068 dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type));
0a7de745
A
1069 }
1070 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEID)) {
b0d623f7 1071 dp->d_fileno = nvattrp->nva_fileid;
0a7de745 1072 }
b0d623f7
A
1073 if (rdirplus) {
1074 /* fileid is already in d_fileno, so stash xid in attrs */
1075 nvattrp->nva_fileid = savedxid;
1076 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
1077 fhlen = fh.fh_len + 1;
1078 xlen = fhlen + sizeof(time_t);
1079 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1080 space_needed = reclen + attrlen;
1081 if (space_needed > space_free) {
1082 /* didn't actually have the room... move on to next buffer */
1083 nmrep = nmrepsave;
1084 goto nextbuffer;
1085 }
1086 /* pack the file handle into the record */
0a7de745
A
1087 dp->d_name[dp->d_namlen + 1] = fh.fh_len;
1088 bcopy(fh.fh_data, &dp->d_name[dp->d_namlen + 2], fh.fh_len);
b0d623f7
A
1089 } else {
1090 /* mark the file handle invalid */
1091 fh.fh_len = 0;
1092 fhlen = fh.fh_len + 1;
1093 xlen = fhlen + sizeof(time_t);
1094 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
0a7de745 1095 bzero(&dp->d_name[dp->d_namlen + 1], fhlen);
2d21ac55 1096 }
0a7de745 1097 *(time_t*)(&dp->d_name[dp->d_namlen + 1 + fhlen]) = now.tv_sec;
b0d623f7 1098 dp->d_reclen = reclen;
2d21ac55 1099 }
b0d623f7
A
1100 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
1101 ndbhp->ndbh_count++;
1102 lastcookie = cookie;
1103
1104 /* advance to next direntry in buffer */
1105 dp = NFS_DIRENTRY_NEXT(dp);
1106 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
1107 /* zero out the pad bytes */
1108 padlen = (char*)dp - padstart;
0a7de745 1109 if (padlen > 0) {
b0d623f7 1110 bzero(padstart, padlen);
0a7de745 1111 }
b0d623f7
A
1112 }
1113 /* Finally, get the eof boolean */
1114 nfsm_chain_get_32(error, &nmrep, eof);
1115 nfsmout_if(error);
1116 if (eof) {
0a7de745 1117 ndbhp->ndbh_flags |= (NDB_FULL | NDB_EOF);
b0d623f7
A
1118 nfs_node_lock_force(dnp);
1119 dnp->n_eofcookie = lastcookie;
1120 nfs_node_unlock(dnp);
1121 } else {
1122 more_entries = 1;
2d21ac55 1123 }
b0d623f7
A
1124 if (bp_dropped) {
1125 nfs_buf_release(bp, 0);
1126 bp = NULL;
1127 break;
2d21ac55 1128 }
0a7de745 1129 if ((lockerror = nfs_node_lock(dnp))) {
2d21ac55 1130 error = lockerror;
0a7de745 1131 }
2d21ac55
A
1132 nfsmout_if(error);
1133 nfsm_chain_cleanup(&nmrep);
b0d623f7 1134 nfsm_chain_null(&nmreq);
2d21ac55 1135 }
2d21ac55 1136nfsmout:
0a7de745 1137 if (bp_dropped && bp) {
b0d623f7 1138 nfs_buf_release(bp, 0);
0a7de745
A
1139 }
1140 if (!lockerror) {
b0d623f7 1141 nfs_node_unlock(dnp);
0a7de745 1142 }
2d21ac55
A
1143 nfsm_chain_cleanup(&nmreq);
1144 nfsm_chain_cleanup(&nmrep);
0a7de745 1145 return bp_dropped ? NFSERR_DIRBUFDROPPED : error;
2d21ac55
A
1146}
1147
1148int
1149nfs4_lookup_rpc_async(
1150 nfsnode_t dnp,
1151 char *name,
1152 int namelen,
1153 vfs_context_t ctx,
1154 struct nfsreq **reqp)
1155{
6d2010ae 1156 int error = 0, isdotdot = 0, nfsvers, numops;
2d21ac55
A
1157 struct nfsm_chain nmreq;
1158 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1159 struct nfsmount *nmp;
6d2010ae 1160 struct nfsreq_secinfo_args si;
2d21ac55
A
1161
1162 nmp = NFSTONMP(dnp);
0a7de745
A
1163 if (nfs_mount_gone(nmp)) {
1164 return ENXIO;
1165 }
2d21ac55 1166 nfsvers = nmp->nm_vers;
0a7de745
A
1167 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1168 return EINVAL;
1169 }
2d21ac55 1170
6d2010ae 1171 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
2d21ac55 1172 isdotdot = 1;
6d2010ae
A
1173 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
1174 } else {
1175 NFSREQ_SECINFO_SET(&si, dnp, dnp->n_fhp, dnp->n_fhsize, name, namelen);
1176 }
2d21ac55
A
1177
1178 nfsm_chain_null(&nmreq);
1179
6d2010ae
A
1180 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1181 numops = 5;
2d21ac55 1182 nfsm_chain_build_alloc_init(error, &nmreq, 20 * NFSX_UNSIGNED + namelen);
3e170ce0 1183 nfsm_chain_add_compound_header(error, &nmreq, "lookup", nmp->nm_minor_vers, numops);
2d21ac55
A
1184 numops--;
1185 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1186 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
1187 numops--;
1188 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 1189 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
2d21ac55
A
1190 numops--;
1191 if (isdotdot) {
1192 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUPP);
1193 } else {
1194 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
6d2010ae 1195 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
2d21ac55 1196 }
6d2010ae
A
1197 numops--;
1198 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETFH);
1199 numops--;
1200 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1201 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1202 /* some ".zfs" directories can't handle being asked for some attributes */
0a7de745 1203 if ((dnp->n_flag & NISDOTZFS) && !isdotdot) {
6d2010ae 1204 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
0a7de745
A
1205 }
1206 if ((dnp->n_flag & NISDOTZFSCHILD) && isdotdot) {
6d2010ae 1207 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
0a7de745
A
1208 }
1209 if (((namelen == 4) && (name[0] == '.') && (name[1] == 'z') && (name[2] == 'f') && (name[3] == 's'))) {
6d2010ae 1210 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
0a7de745 1211 }
6d2010ae 1212 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
2d21ac55
A
1213 nfsm_chain_build_done(error, &nmreq);
1214 nfsm_assert(error, (numops == 0), EPROTO);
1215 nfsmout_if(error);
1216 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745 1217 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, reqp);
2d21ac55
A
1218nfsmout:
1219 nfsm_chain_cleanup(&nmreq);
0a7de745 1220 return error;
2d21ac55
A
1221}
1222
6d2010ae 1223
2d21ac55
A
1224int
1225nfs4_lookup_rpc_async_finish(
1226 nfsnode_t dnp,
6d2010ae
A
1227 char *name,
1228 int namelen,
1229 vfs_context_t ctx,
2d21ac55
A
1230 struct nfsreq *req,
1231 u_int64_t *xidp,
1232 fhandle_t *fhp,
1233 struct nfs_vattr *nvap)
1234{
6d2010ae
A
1235 int error = 0, lockerror = ENOENT, status, nfsvers, numops, isdotdot = 0;
1236 uint32_t op = NFS_OP_LOOKUP;
2d21ac55
A
1237 u_int64_t xid;
1238 struct nfsmount *nmp;
1239 struct nfsm_chain nmrep;
1240
1241 nmp = NFSTONMP(dnp);
0a7de745
A
1242 if (nmp == NULL) {
1243 return ENXIO;
1244 }
2d21ac55 1245 nfsvers = nmp->nm_vers;
0a7de745 1246 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
6d2010ae 1247 isdotdot = 1;
0a7de745 1248 }
2d21ac55
A
1249
1250 nfsm_chain_null(&nmrep);
1251
1252 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1253
0a7de745 1254 if ((lockerror = nfs_node_lock(dnp))) {
b0d623f7 1255 error = lockerror;
0a7de745 1256 }
2d21ac55
A
1257 nfsm_chain_skip_tag(error, &nmrep);
1258 nfsm_chain_get_32(error, &nmrep, numops);
1259 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1260 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
0a7de745 1261 if (xidp) {
2d21ac55 1262 *xidp = xid;
0a7de745 1263 }
6d2010ae 1264 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
2d21ac55 1265
6d2010ae 1266 nfsm_chain_op_check(error, &nmrep, (isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP));
2d21ac55 1267 nfsmout_if(error || !fhp || !nvap);
6d2010ae
A
1268 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
1269 nfsm_chain_get_32(error, &nmrep, fhp->fh_len);
0a7de745 1270 if (error == 0 && fhp->fh_len > sizeof(fhp->fh_data)) {
d26ffc64 1271 error = EBADRPC;
0a7de745 1272 }
d26ffc64 1273 nfsmout_if(error);
6d2010ae 1274 nfsm_chain_get_opaque(error, &nmrep, fhp->fh_len, fhp->fh_data);
2d21ac55 1275 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae
A
1276 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1277 /* set this up to look like a referral trigger */
1278 nfs4_default_attrs_for_referral_trigger(dnp, name, namelen, nvap, fhp);
1279 error = 0;
1280 } else {
1281 nfsmout_if(error);
1282 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
2d21ac55
A
1283 }
1284nfsmout:
0a7de745 1285 if (!lockerror) {
b0d623f7 1286 nfs_node_unlock(dnp);
0a7de745 1287 }
2d21ac55 1288 nfsm_chain_cleanup(&nmrep);
6d2010ae
A
1289 if (!error && (op == NFS_OP_LOOKUP) && (nmp->nm_state & NFSSTA_NEEDSECINFO)) {
1290 /* We still need to get SECINFO to set default for mount. */
1291 /* Do so for the first LOOKUP that returns successfully. */
1292 struct nfs_sec sec;
1293
1294 sec.count = NX_MAX_SEC_FLAVORS;
1295 error = nfs4_secinfo_rpc(nmp, &req->r_secinfo, vfs_context_ucred(ctx), sec.flavors, &sec.count);
1296 /* [sigh] some implementations return "illegal" error for unsupported ops */
0a7de745 1297 if (error == NFSERR_OP_ILLEGAL) {
6d2010ae 1298 error = 0;
0a7de745 1299 }
6d2010ae
A
1300 if (!error) {
1301 /* set our default security flavor to the first in the list */
1302 lck_mtx_lock(&nmp->nm_lock);
0a7de745 1303 if (sec.count) {
6d2010ae 1304 nmp->nm_auth = sec.flavors[0];
0a7de745 1305 }
6d2010ae
A
1306 nmp->nm_state &= ~NFSSTA_NEEDSECINFO;
1307 lck_mtx_unlock(&nmp->nm_lock);
1308 }
1309 }
0a7de745 1310 return error;
2d21ac55
A
1311}
1312
1313int
1314nfs4_commit_rpc(
1315 nfsnode_t np,
6d2010ae
A
1316 uint64_t offset,
1317 uint64_t count,
1318 kauth_cred_t cred,
1319 uint64_t wverf)
2d21ac55
A
1320{
1321 struct nfsmount *nmp;
1322 int error = 0, lockerror, status, nfsvers, numops;
6d2010ae 1323 u_int64_t xid, newwverf;
2d21ac55
A
1324 uint32_t count32;
1325 struct nfsm_chain nmreq, nmrep;
6d2010ae 1326 struct nfsreq_secinfo_args si;
2d21ac55
A
1327
1328 nmp = NFSTONMP(np);
1329 FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
0a7de745
A
1330 if (nfs_mount_gone(nmp)) {
1331 return ENXIO;
1332 }
1333 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1334 return EINVAL;
1335 }
1336 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
1337 return 0;
1338 }
2d21ac55
A
1339 nfsvers = nmp->nm_vers;
1340
0a7de745 1341 if (count > UINT32_MAX) {
2d21ac55 1342 count32 = 0;
0a7de745 1343 } else {
2d21ac55 1344 count32 = count;
0a7de745 1345 }
2d21ac55 1346
6d2010ae 1347 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
1348 nfsm_chain_null(&nmreq);
1349 nfsm_chain_null(&nmrep);
1350
1351 // PUTFH, COMMIT, GETATTR
1352 numops = 3;
1353 nfsm_chain_build_alloc_init(error, &nmreq, 19 * NFSX_UNSIGNED);
3e170ce0 1354 nfsm_chain_add_compound_header(error, &nmreq, "commit", nmp->nm_minor_vers, numops);
2d21ac55
A
1355 numops--;
1356 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1357 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1358 numops--;
1359 nfsm_chain_add_32(error, &nmreq, NFS_OP_COMMIT);
1360 nfsm_chain_add_64(error, &nmreq, offset);
1361 nfsm_chain_add_32(error, &nmreq, count32);
1362 numops--;
1363 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 1364 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
1365 nfsm_chain_build_done(error, &nmreq);
1366 nfsm_assert(error, (numops == 0), EPROTO);
1367 nfsmout_if(error);
1368 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745 1369 current_thread(), cred, &si, 0, &nmrep, &xid, &status);
2d21ac55 1370
0a7de745 1371 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 1372 error = lockerror;
0a7de745 1373 }
2d21ac55
A
1374 nfsm_chain_skip_tag(error, &nmrep);
1375 nfsm_chain_get_32(error, &nmrep, numops);
1376 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1377 nfsm_chain_op_check(error, &nmrep, NFS_OP_COMMIT);
6d2010ae 1378 nfsm_chain_get_64(error, &nmrep, newwverf);
2d21ac55 1379 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 1380 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
0a7de745 1381 if (!lockerror) {
b0d623f7 1382 nfs_node_unlock(np);
0a7de745 1383 }
2d21ac55
A
1384 nfsmout_if(error);
1385 lck_mtx_lock(&nmp->nm_lock);
0a7de745 1386 if (nmp->nm_verf != newwverf) {
6d2010ae 1387 nmp->nm_verf = newwverf;
0a7de745
A
1388 }
1389 if (wverf != newwverf) {
2d21ac55 1390 error = NFSERR_STALEWRITEVERF;
0a7de745 1391 }
2d21ac55
A
1392 lck_mtx_unlock(&nmp->nm_lock);
1393nfsmout:
1394 nfsm_chain_cleanup(&nmreq);
1395 nfsm_chain_cleanup(&nmrep);
0a7de745 1396 return error;
2d21ac55
A
1397}
1398
1399int
1400nfs4_pathconf_rpc(
1401 nfsnode_t np,
1402 struct nfs_fsattr *nfsap,
1403 vfs_context_t ctx)
1404{
1405 u_int64_t xid;
1406 int error = 0, lockerror, status, nfsvers, numops;
1407 struct nfsm_chain nmreq, nmrep;
1408 struct nfsmount *nmp = NFSTONMP(np);
1409 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1410 struct nfs_vattr nvattr;
6d2010ae 1411 struct nfsreq_secinfo_args si;
2d21ac55 1412
0a7de745
A
1413 if (nfs_mount_gone(nmp)) {
1414 return ENXIO;
1415 }
2d21ac55 1416 nfsvers = nmp->nm_vers;
0a7de745
A
1417 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1418 return EINVAL;
1419 }
2d21ac55 1420
6d2010ae
A
1421 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1422 NVATTR_INIT(&nvattr);
2d21ac55
A
1423 nfsm_chain_null(&nmreq);
1424 nfsm_chain_null(&nmrep);
1425
1426 /* NFSv4: fetch "pathconf" info for this node */
b0d623f7
A
1427 // PUTFH, GETATTR
1428 numops = 2;
2d21ac55 1429 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
3e170ce0 1430 nfsm_chain_add_compound_header(error, &nmreq, "pathconf", nmp->nm_minor_vers, numops);
2d21ac55
A
1431 numops--;
1432 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1433 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1434 numops--;
1435 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1436 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1437 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXLINK);
1438 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXNAME);
1439 NFS_BITMAP_SET(bitmap, NFS_FATTR_NO_TRUNC);
1440 NFS_BITMAP_SET(bitmap, NFS_FATTR_CHOWN_RESTRICTED);
1441 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_INSENSITIVE);
1442 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_PRESERVING);
6d2010ae 1443 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
2d21ac55
A
1444 nfsm_chain_build_done(error, &nmreq);
1445 nfsm_assert(error, (numops == 0), EPROTO);
1446 nfsmout_if(error);
6d2010ae 1447 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55
A
1448
1449 nfsm_chain_skip_tag(error, &nmrep);
1450 nfsm_chain_get_32(error, &nmrep, numops);
1451 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1452 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1453 nfsmout_if(error);
6d2010ae 1454 error = nfs4_parsefattr(&nmrep, nfsap, &nvattr, NULL, NULL, NULL);
2d21ac55 1455 nfsmout_if(error);
0a7de745 1456 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 1457 error = lockerror;
0a7de745
A
1458 }
1459 if (!error) {
b0d623f7 1460 nfs_loadattrcache(np, &nvattr, &xid, 0);
0a7de745
A
1461 }
1462 if (!lockerror) {
b0d623f7 1463 nfs_node_unlock(np);
0a7de745 1464 }
2d21ac55 1465nfsmout:
6d2010ae 1466 NVATTR_CLEANUP(&nvattr);
2d21ac55
A
1467 nfsm_chain_cleanup(&nmreq);
1468 nfsm_chain_cleanup(&nmrep);
0a7de745 1469 return error;
2d21ac55
A
1470}
1471
1472int
1473nfs4_vnop_getattr(
1474 struct vnop_getattr_args /* {
0a7de745
A
1475 * struct vnodeop_desc *a_desc;
1476 * vnode_t a_vp;
1477 * struct vnode_attr *a_vap;
1478 * vfs_context_t a_context;
1479 * } */*ap)
2d21ac55
A
1480{
1481 struct vnode_attr *vap = ap->a_vap;
6d2010ae 1482 struct nfsmount *nmp;
2d21ac55 1483 struct nfs_vattr nva;
6d2010ae
A
1484 int error, acls, ngaflags;
1485
fe8ab488 1486 nmp = VTONMP(ap->a_vp);
0a7de745
A
1487 if (nfs_mount_gone(nmp)) {
1488 return ENXIO;
1489 }
6d2010ae 1490 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
2d21ac55 1491
6d2010ae 1492 ngaflags = NGA_CACHED;
0a7de745 1493 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
6d2010ae 1494 ngaflags |= NGA_ACL;
0a7de745 1495 }
6d2010ae 1496 error = nfs_getattr(VTONFS(ap->a_vp), &nva, ap->a_context, ngaflags);
0a7de745
A
1497 if (error) {
1498 return error;
1499 }
5ba3f43e 1500
2d21ac55 1501 /* copy what we have in nva to *a_vap */
6d2010ae 1502 if (VATTR_IS_ACTIVE(vap, va_rdev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_RAWDEV)) {
2d21ac55
A
1503 dev_t rdev = makedev(nva.nva_rawdev.specdata1, nva.nva_rawdev.specdata2);
1504 VATTR_RETURN(vap, va_rdev, rdev);
1505 }
0a7de745 1506 if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_NUMLINKS)) {
2d21ac55 1507 VATTR_RETURN(vap, va_nlink, nva.nva_nlink);
0a7de745
A
1508 }
1509 if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SIZE)) {
2d21ac55 1510 VATTR_RETURN(vap, va_data_size, nva.nva_size);
0a7de745 1511 }
2d21ac55
A
1512 // VATTR_RETURN(vap, va_data_alloc, ???);
1513 // VATTR_RETURN(vap, va_total_size, ???);
0a7de745 1514 if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SPACE_USED)) {
2d21ac55 1515 VATTR_RETURN(vap, va_total_alloc, nva.nva_bytes);
0a7de745
A
1516 }
1517 if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER)) {
2d21ac55 1518 VATTR_RETURN(vap, va_uid, nva.nva_uid);
0a7de745
A
1519 }
1520 if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER)) {
6d2010ae 1521 VATTR_RETURN(vap, va_uuuid, nva.nva_uuuid);
0a7de745
A
1522 }
1523 if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
2d21ac55 1524 VATTR_RETURN(vap, va_gid, nva.nva_gid);
0a7de745
A
1525 }
1526 if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
6d2010ae 1527 VATTR_RETURN(vap, va_guuid, nva.nva_guuid);
0a7de745 1528 }
6d2010ae 1529 if (VATTR_IS_ACTIVE(vap, va_mode)) {
0a7de745 1530 if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_MODE)) {
6d2010ae 1531 VATTR_RETURN(vap, va_mode, 0777);
0a7de745 1532 } else {
6d2010ae 1533 VATTR_RETURN(vap, va_mode, nva.nva_mode);
0a7de745 1534 }
6d2010ae
A
1535 }
1536 if (VATTR_IS_ACTIVE(vap, va_flags) &&
1537 (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) ||
0a7de745
A
1538 NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) ||
1539 (nva.nva_flags & NFS_FFLAG_TRIGGER))) {
2d21ac55 1540 uint32_t flags = 0;
6d2010ae 1541 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) &&
0a7de745 1542 (nva.nva_flags & NFS_FFLAG_ARCHIVED)) {
2d21ac55 1543 flags |= SF_ARCHIVED;
0a7de745 1544 }
6d2010ae 1545 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) &&
0a7de745 1546 (nva.nva_flags & NFS_FFLAG_HIDDEN)) {
2d21ac55 1547 flags |= UF_HIDDEN;
0a7de745 1548 }
2d21ac55
A
1549 VATTR_RETURN(vap, va_flags, flags);
1550 }
6d2010ae 1551 if (VATTR_IS_ACTIVE(vap, va_create_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_CREATE)) {
2d21ac55
A
1552 vap->va_create_time.tv_sec = nva.nva_timesec[NFSTIME_CREATE];
1553 vap->va_create_time.tv_nsec = nva.nva_timensec[NFSTIME_CREATE];
1554 VATTR_SET_SUPPORTED(vap, va_create_time);
1555 }
6d2010ae 1556 if (VATTR_IS_ACTIVE(vap, va_access_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_ACCESS)) {
2d21ac55
A
1557 vap->va_access_time.tv_sec = nva.nva_timesec[NFSTIME_ACCESS];
1558 vap->va_access_time.tv_nsec = nva.nva_timensec[NFSTIME_ACCESS];
1559 VATTR_SET_SUPPORTED(vap, va_access_time);
1560 }
6d2010ae 1561 if (VATTR_IS_ACTIVE(vap, va_modify_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_MODIFY)) {
2d21ac55
A
1562 vap->va_modify_time.tv_sec = nva.nva_timesec[NFSTIME_MODIFY];
1563 vap->va_modify_time.tv_nsec = nva.nva_timensec[NFSTIME_MODIFY];
1564 VATTR_SET_SUPPORTED(vap, va_modify_time);
1565 }
6d2010ae 1566 if (VATTR_IS_ACTIVE(vap, va_change_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_METADATA)) {
2d21ac55
A
1567 vap->va_change_time.tv_sec = nva.nva_timesec[NFSTIME_CHANGE];
1568 vap->va_change_time.tv_nsec = nva.nva_timensec[NFSTIME_CHANGE];
1569 VATTR_SET_SUPPORTED(vap, va_change_time);
1570 }
6d2010ae 1571 if (VATTR_IS_ACTIVE(vap, va_backup_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_BACKUP)) {
2d21ac55
A
1572 vap->va_backup_time.tv_sec = nva.nva_timesec[NFSTIME_BACKUP];
1573 vap->va_backup_time.tv_nsec = nva.nva_timensec[NFSTIME_BACKUP];
1574 VATTR_SET_SUPPORTED(vap, va_backup_time);
1575 }
0a7de745 1576 if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_FILEID)) {
2d21ac55 1577 VATTR_RETURN(vap, va_fileid, nva.nva_fileid);
0a7de745
A
1578 }
1579 if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TYPE)) {
2d21ac55 1580 VATTR_RETURN(vap, va_type, nva.nva_type);
0a7de745
A
1581 }
1582 if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_CHANGE)) {
2d21ac55 1583 VATTR_RETURN(vap, va_filerev, nva.nva_change);
0a7de745 1584 }
2d21ac55 1585
6d2010ae
A
1586 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1587 VATTR_RETURN(vap, va_acl, nva.nva_acl);
1588 nva.nva_acl = NULL;
1589 }
1590
2d21ac55
A
1591 // other attrs we might support someday:
1592 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
2d21ac55 1593
6d2010ae 1594 NVATTR_CLEANUP(&nva);
0a7de745 1595 return error;
2d21ac55
A
1596}
1597
1598int
1599nfs4_setattr_rpc(
1600 nfsnode_t np,
1601 struct vnode_attr *vap,
b0d623f7 1602 vfs_context_t ctx)
2d21ac55
A
1603{
1604 struct nfsmount *nmp = NFSTONMP(np);
6d2010ae 1605 int error = 0, setattr_error = 0, lockerror = ENOENT, status, nfsvers, numops;
b0d623f7 1606 u_int64_t xid, nextxid;
2d21ac55 1607 struct nfsm_chain nmreq, nmrep;
b0d623f7 1608 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6d2010ae
A
1609 uint32_t getbitmap[NFS_ATTR_BITMAP_LEN];
1610 uint32_t setbitmap[NFS_ATTR_BITMAP_LEN];
b0d623f7 1611 nfs_stateid stateid;
6d2010ae 1612 struct nfsreq_secinfo_args si;
2d21ac55 1613
0a7de745
A
1614 if (nfs_mount_gone(nmp)) {
1615 return ENXIO;
1616 }
2d21ac55 1617 nfsvers = nmp->nm_vers;
0a7de745
A
1618 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1619 return EINVAL;
1620 }
2d21ac55 1621
0a7de745 1622 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & ~(SF_ARCHIVED | UF_HIDDEN))) {
2d21ac55 1623 /* we don't support setting unsupported flags (duh!) */
0a7de745
A
1624 if (vap->va_active & ~VNODE_ATTR_va_flags) {
1625 return EINVAL; /* return EINVAL if other attributes also set */
1626 } else {
1627 return ENOTSUP; /* return ENOTSUP for chflags(2) */
1628 }
2d21ac55
A
1629 }
1630
6d2010ae 1631 /* don't bother requesting some changes if they don't look like they are changing */
0a7de745 1632 if (VATTR_IS_ACTIVE(vap, va_uid) && (vap->va_uid == np->n_vattr.nva_uid)) {
6d2010ae 1633 VATTR_CLEAR_ACTIVE(vap, va_uid);
0a7de745
A
1634 }
1635 if (VATTR_IS_ACTIVE(vap, va_gid) && (vap->va_gid == np->n_vattr.nva_gid)) {
6d2010ae 1636 VATTR_CLEAR_ACTIVE(vap, va_gid);
0a7de745
A
1637 }
1638 if (VATTR_IS_ACTIVE(vap, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &np->n_vattr.nva_uuuid)) {
6d2010ae 1639 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
0a7de745
A
1640 }
1641 if (VATTR_IS_ACTIVE(vap, va_guuid) && kauth_guid_equal(&vap->va_guuid, &np->n_vattr.nva_guuid)) {
6d2010ae 1642 VATTR_CLEAR_ACTIVE(vap, va_guuid);
0a7de745 1643 }
6d2010ae
A
1644
1645tryagain:
1646 /* do nothing if no attributes will be sent */
1647 nfs_vattr_set_bitmap(nmp, bitmap, vap);
0a7de745
A
1648 if (!bitmap[0] && !bitmap[1]) {
1649 return 0;
1650 }
6d2010ae
A
1651
1652 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
1653 nfsm_chain_null(&nmreq);
1654 nfsm_chain_null(&nmrep);
1655
6d2010ae
A
1656 /*
1657 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1658 * need to invalidate any cached ACL. And if we had an ACL cached,
1659 * we might as well also fetch the new value.
1660 */
1661 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, getbitmap);
1662 if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL) ||
1663 NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) {
0a7de745 1664 if (NACLVALID(np)) {
6d2010ae 1665 NFS_BITMAP_SET(getbitmap, NFS_FATTR_ACL);
0a7de745 1666 }
6d2010ae
A
1667 NACLINVALIDATE(np);
1668 }
1669
2d21ac55
A
1670 // PUTFH, SETATTR, GETATTR
1671 numops = 3;
1672 nfsm_chain_build_alloc_init(error, &nmreq, 40 * NFSX_UNSIGNED);
3e170ce0 1673 nfsm_chain_add_compound_header(error, &nmreq, "setattr", nmp->nm_minor_vers, numops);
2d21ac55
A
1674 numops--;
1675 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1676 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1677 numops--;
1678 nfsm_chain_add_32(error, &nmreq, NFS_OP_SETATTR);
0a7de745 1679 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
b0d623f7 1680 nfs_get_stateid(np, vfs_context_thread(ctx), vfs_context_ucred(ctx), &stateid);
0a7de745 1681 } else {
b0d623f7 1682 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
0a7de745 1683 }
b0d623f7 1684 nfsm_chain_add_stateid(error, &nmreq, &stateid);
2d21ac55
A
1685 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
1686 numops--;
1687 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 1688 nfsm_chain_add_bitmap_supported(error, &nmreq, getbitmap, nmp, np);
2d21ac55
A
1689 nfsm_chain_build_done(error, &nmreq);
1690 nfsm_assert(error, (numops == 0), EPROTO);
1691 nfsmout_if(error);
6d2010ae 1692 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 1693
0a7de745 1694 if ((lockerror = nfs_node_lock(np))) {
2d21ac55 1695 error = lockerror;
0a7de745 1696 }
2d21ac55
A
1697 nfsm_chain_skip_tag(error, &nmrep);
1698 nfsm_chain_get_32(error, &nmrep, numops);
1699 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6d2010ae 1700 nfsmout_if(error);
2d21ac55 1701 nfsm_chain_op_check(error, &nmrep, NFS_OP_SETATTR);
6d2010ae
A
1702 nfsmout_if(error == EBADRPC);
1703 setattr_error = error;
1704 error = 0;
2d21ac55 1705 bmlen = NFS_ATTR_BITMAP_LEN;
6d2010ae
A
1706 nfsm_chain_get_bitmap(error, &nmrep, setbitmap, bmlen);
1707 if (!error) {
0a7de745 1708 if (VATTR_IS_ACTIVE(vap, va_data_size) && (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 1709 microuptime(&np->n_lastio);
0a7de745 1710 }
6d2010ae
A
1711 nfs_vattr_set_supported(setbitmap, vap);
1712 error = setattr_error;
1713 }
2d21ac55 1714 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 1715 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
0a7de745 1716 if (error) {
2d21ac55 1717 NATTRINVALIDATE(np);
0a7de745 1718 }
b0d623f7
A
1719 /*
1720 * We just changed the attributes and we want to make sure that we
1721 * see the latest attributes. Get the next XID. If it's not the
1722 * next XID after the SETATTR XID, then it's possible that another
1723 * RPC was in flight at the same time and it might put stale attributes
1724 * in the cache. In that case, we invalidate the attributes and set
1725 * the attribute cache XID to guarantee that newer attributes will
1726 * get loaded next.
1727 */
1728 nextxid = 0;
1729 nfs_get_xid(&nextxid);
1730 if (nextxid != (xid + 1)) {
1731 np->n_xid = nextxid;
1732 NATTRINVALIDATE(np);
1733 }
2d21ac55 1734nfsmout:
0a7de745 1735 if (!lockerror) {
b0d623f7 1736 nfs_node_unlock(np);
0a7de745 1737 }
2d21ac55
A
1738 nfsm_chain_cleanup(&nmreq);
1739 nfsm_chain_cleanup(&nmrep);
6d2010ae
A
1740 if ((setattr_error == EINVAL) && VATTR_IS_ACTIVE(vap, va_acl) && VATTR_IS_ACTIVE(vap, va_mode) && !NMFLAG(nmp, ACLONLY)) {
1741 /*
1742 * Some server's may not like ACL/mode combos that get sent.
1743 * If it looks like that's what the server choked on, try setting
1744 * just the ACL and not the mode (unless it looks like everything
1745 * but mode was already successfully set).
1746 */
1747 if (((bitmap[0] & setbitmap[0]) != bitmap[0]) ||
0a7de745 1748 ((bitmap[1] & (setbitmap[1] | NFS_FATTR_MODE)) != bitmap[1])) {
6d2010ae
A
1749 VATTR_CLEAR_ACTIVE(vap, va_mode);
1750 error = 0;
1751 goto tryagain;
1752 }
1753 }
0a7de745 1754 return error;
2d21ac55 1755}
cb323159 1756#endif /* CONFIG_NFS4 */
2d21ac55 1757
b0d623f7
A
1758/*
1759 * Wait for any pending recovery to complete.
1760 */
2d21ac55 1761int
b0d623f7 1762nfs_mount_state_wait_for_recovery(struct nfsmount *nmp)
2d21ac55 1763{
cb323159 1764 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
6d2010ae 1765 int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
b0d623f7
A
1766
1767 lck_mtx_lock(&nmp->nm_lock);
1768 while (nmp->nm_state & NFSSTA_RECOVER) {
0a7de745 1769 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1))) {
b0d623f7 1770 break;
0a7de745 1771 }
b0d623f7 1772 nfs_mount_sock_thread_wake(nmp);
0a7de745 1773 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
6d2010ae 1774 slpflag = 0;
b0d623f7
A
1775 }
1776 lck_mtx_unlock(&nmp->nm_lock);
1777
0a7de745 1778 return error;
2d21ac55
A
1779}
1780
b0d623f7
A
1781/*
1782 * We're about to use/manipulate NFS mount's open/lock state.
1783 * Wait for any pending state recovery to complete, then
1784 * mark the state as being in use (which will hold off
1785 * the recovery thread until we're done).
1786 */
2d21ac55 1787int
6d2010ae 1788nfs_mount_state_in_use_start(struct nfsmount *nmp, thread_t thd)
2d21ac55 1789{
cb323159 1790 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
6d2010ae 1791 int error = 0, slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7 1792
0a7de745
A
1793 if (nfs_mount_gone(nmp)) {
1794 return ENXIO;
1795 }
b0d623f7 1796 lck_mtx_lock(&nmp->nm_lock);
0a7de745 1797 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
6d2010ae 1798 lck_mtx_unlock(&nmp->nm_lock);
0a7de745 1799 return ENXIO;
6d2010ae 1800 }
b0d623f7 1801 while (nmp->nm_state & NFSSTA_RECOVER) {
0a7de745 1802 if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
b0d623f7 1803 break;
0a7de745 1804 }
b0d623f7 1805 nfs_mount_sock_thread_wake(nmp);
0a7de745 1806 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
6d2010ae 1807 slpflag = 0;
b0d623f7 1808 }
0a7de745 1809 if (!error) {
b0d623f7 1810 nmp->nm_stateinuse++;
0a7de745 1811 }
b0d623f7
A
1812 lck_mtx_unlock(&nmp->nm_lock);
1813
0a7de745 1814 return error;
2d21ac55
A
1815}
1816
b0d623f7
A
1817/*
1818 * We're done using/manipulating the NFS mount's open/lock
1819 * state. If the given error indicates that recovery should
1820 * be performed, we'll initiate recovery.
1821 */
2d21ac55 1822int
b0d623f7 1823nfs_mount_state_in_use_end(struct nfsmount *nmp, int error)
2d21ac55 1824{
b0d623f7
A
1825 int restart = nfs_mount_state_error_should_restart(error);
1826
0a7de745
A
1827 if (nfs_mount_gone(nmp)) {
1828 return restart;
1829 }
b0d623f7
A
1830 lck_mtx_lock(&nmp->nm_lock);
1831 if (restart && (error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE)) {
6d2010ae 1832 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
0a7de745 1833 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid);
6d2010ae 1834 nfs_need_recover(nmp, error);
b0d623f7 1835 }
0a7de745 1836 if (nmp->nm_stateinuse > 0) {
b0d623f7 1837 nmp->nm_stateinuse--;
0a7de745 1838 } else {
b0d623f7 1839 panic("NFS mount state in use count underrun");
0a7de745
A
1840 }
1841 if (!nmp->nm_stateinuse && (nmp->nm_state & NFSSTA_RECOVER)) {
b0d623f7 1842 wakeup(&nmp->nm_stateinuse);
0a7de745 1843 }
b0d623f7 1844 lck_mtx_unlock(&nmp->nm_lock);
0a7de745
A
1845 if (error == NFSERR_GRACE) {
1846 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
1847 }
b0d623f7 1848
0a7de745 1849 return restart;
2d21ac55
A
1850}
1851
1852/*
b0d623f7 1853 * Does the error mean we should restart/redo a state-related operation?
2d21ac55
A
1854 */
1855int
b0d623f7 1856nfs_mount_state_error_should_restart(int error)
2d21ac55 1857{
b0d623f7
A
1858 switch (error) {
1859 case NFSERR_STALE_STATEID:
1860 case NFSERR_STALE_CLIENTID:
1861 case NFSERR_ADMIN_REVOKED:
1862 case NFSERR_EXPIRED:
1863 case NFSERR_OLD_STATEID:
1864 case NFSERR_BAD_STATEID:
1865 case NFSERR_GRACE:
0a7de745 1866 return 1;
b0d623f7 1867 }
0a7de745 1868 return 0;
b0d623f7 1869}
2d21ac55 1870
b0d623f7
A
1871/*
1872 * In some cases we may want to limit how many times we restart a
1873 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1874 * Base the limit on the lease (as long as it's not too short).
1875 */
1876uint
1877nfs_mount_state_max_restarts(struct nfsmount *nmp)
1878{
0a7de745 1879 return MAX(nmp->nm_fsattr.nfsa_lease, 60);
b0d623f7 1880}
2d21ac55 1881
6d2010ae
A
1882/*
1883 * Does the error mean we probably lost a delegation?
1884 */
1885int
1886nfs_mount_state_error_delegation_lost(int error)
1887{
1888 switch (error) {
1889 case NFSERR_STALE_STATEID:
1890 case NFSERR_ADMIN_REVOKED:
1891 case NFSERR_EXPIRED:
1892 case NFSERR_OLD_STATEID:
1893 case NFSERR_BAD_STATEID:
1894 case NFSERR_GRACE: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
0a7de745 1895 return 1;
6d2010ae 1896 }
0a7de745 1897 return 0;
6d2010ae
A
1898}
1899
b0d623f7
A
1900
1901/*
1902 * Mark an NFS node's open state as busy.
1903 */
1904int
6d2010ae 1905nfs_open_state_set_busy(nfsnode_t np, thread_t thd)
b0d623f7
A
1906{
1907 struct nfsmount *nmp;
cb323159 1908 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
b0d623f7
A
1909 int error = 0, slpflag;
1910
1911 nmp = NFSTONMP(np);
0a7de745
A
1912 if (nfs_mount_gone(nmp)) {
1913 return ENXIO;
1914 }
6d2010ae 1915 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2d21ac55 1916
b0d623f7
A
1917 lck_mtx_lock(&np->n_openlock);
1918 while (np->n_openflags & N_OPENBUSY) {
0a7de745 1919 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 1920 break;
0a7de745 1921 }
b0d623f7
A
1922 np->n_openflags |= N_OPENWANT;
1923 msleep(&np->n_openflags, &np->n_openlock, slpflag, "nfs_open_state_set_busy", &ts);
6d2010ae 1924 slpflag = 0;
b0d623f7 1925 }
0a7de745 1926 if (!error) {
b0d623f7 1927 np->n_openflags |= N_OPENBUSY;
0a7de745 1928 }
b0d623f7 1929 lck_mtx_unlock(&np->n_openlock);
2d21ac55 1930
0a7de745 1931 return error;
b0d623f7 1932}
2d21ac55 1933
b0d623f7
A
1934/*
1935 * Clear an NFS node's open state busy flag and wake up
1936 * anyone wanting it.
1937 */
1938void
1939nfs_open_state_clear_busy(nfsnode_t np)
1940{
1941 int wanted;
1942
1943 lck_mtx_lock(&np->n_openlock);
0a7de745 1944 if (!(np->n_openflags & N_OPENBUSY)) {
b0d623f7 1945 panic("nfs_open_state_clear_busy");
0a7de745 1946 }
b0d623f7 1947 wanted = (np->n_openflags & N_OPENWANT);
0a7de745 1948 np->n_openflags &= ~(N_OPENBUSY | N_OPENWANT);
b0d623f7 1949 lck_mtx_unlock(&np->n_openlock);
0a7de745 1950 if (wanted) {
b0d623f7 1951 wakeup(&np->n_openflags);
0a7de745 1952 }
b0d623f7 1953}
2d21ac55 1954
b0d623f7
A
1955/*
1956 * Search a mount's open owner list for the owner for this credential.
1957 * If not found and "alloc" is set, then allocate a new one.
1958 */
1959struct nfs_open_owner *
1960nfs_open_owner_find(struct nfsmount *nmp, kauth_cred_t cred, int alloc)
1961{
1962 uid_t uid = kauth_cred_getuid(cred);
1963 struct nfs_open_owner *noop, *newnoop = NULL;
2d21ac55 1964
b0d623f7
A
1965tryagain:
1966 lck_mtx_lock(&nmp->nm_lock);
1967 TAILQ_FOREACH(noop, &nmp->nm_open_owners, noo_link) {
0a7de745 1968 if (kauth_cred_getuid(noop->noo_cred) == uid) {
b0d623f7 1969 break;
0a7de745 1970 }
2d21ac55 1971 }
2d21ac55 1972
b0d623f7
A
1973 if (!noop && !newnoop && alloc) {
1974 lck_mtx_unlock(&nmp->nm_lock);
1975 MALLOC(newnoop, struct nfs_open_owner *, sizeof(struct nfs_open_owner), M_TEMP, M_WAITOK);
0a7de745
A
1976 if (!newnoop) {
1977 return NULL;
1978 }
b0d623f7
A
1979 bzero(newnoop, sizeof(*newnoop));
1980 lck_mtx_init(&newnoop->noo_lock, nfs_open_grp, LCK_ATTR_NULL);
1981 newnoop->noo_mount = nmp;
1982 kauth_cred_ref(cred);
1983 newnoop->noo_cred = cred;
1984 newnoop->noo_name = OSAddAtomic(1, &nfs_open_owner_seqnum);
1985 TAILQ_INIT(&newnoop->noo_opens);
1986 goto tryagain;
1987 }
1988 if (!noop && newnoop) {
1989 newnoop->noo_flags |= NFS_OPEN_OWNER_LINK;
0a7de745 1990 os_ref_init(&newnoop->noo_refcnt, NULL);
b0d623f7
A
1991 TAILQ_INSERT_HEAD(&nmp->nm_open_owners, newnoop, noo_link);
1992 noop = newnoop;
1993 }
1994 lck_mtx_unlock(&nmp->nm_lock);
1995
0a7de745 1996 if (newnoop && (noop != newnoop)) {
b0d623f7 1997 nfs_open_owner_destroy(newnoop);
0a7de745 1998 }
b0d623f7 1999
0a7de745 2000 if (noop) {
b0d623f7 2001 nfs_open_owner_ref(noop);
0a7de745 2002 }
b0d623f7 2003
0a7de745 2004 return noop;
b0d623f7
A
2005}
2006
2007/*
2008 * destroy an open owner that's no longer needed
2009 */
2010void
2011nfs_open_owner_destroy(struct nfs_open_owner *noop)
2012{
0a7de745 2013 if (noop->noo_cred) {
b0d623f7 2014 kauth_cred_unref(&noop->noo_cred);
0a7de745 2015 }
b0d623f7
A
2016 lck_mtx_destroy(&noop->noo_lock, nfs_open_grp);
2017 FREE(noop, M_TEMP);
2018}
2019
2020/*
2021 * acquire a reference count on an open owner
2022 */
2023void
2024nfs_open_owner_ref(struct nfs_open_owner *noop)
2025{
2026 lck_mtx_lock(&noop->noo_lock);
0a7de745 2027 os_ref_retain_locked(&noop->noo_refcnt);
b0d623f7
A
2028 lck_mtx_unlock(&noop->noo_lock);
2029}
2030
2031/*
2032 * drop a reference count on an open owner and destroy it if
2033 * it is no longer referenced and no longer on the mount's list.
2034 */
2035void
2036nfs_open_owner_rele(struct nfs_open_owner *noop)
2037{
0a7de745
A
2038 os_ref_count_t newcount;
2039
b0d623f7 2040 lck_mtx_lock(&noop->noo_lock);
0a7de745 2041 if (os_ref_get_count(&noop->noo_refcnt) < 1) {
b0d623f7 2042 panic("nfs_open_owner_rele: no refcnt");
0a7de745
A
2043 }
2044 newcount = os_ref_release_locked(&noop->noo_refcnt);
2045 if (!newcount && (noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
b0d623f7 2046 panic("nfs_open_owner_rele: busy");
0a7de745 2047 }
b0d623f7 2048 /* XXX we may potentially want to clean up idle/unused open owner structures */
0a7de745 2049 if (newcount || (noop->noo_flags & NFS_OPEN_OWNER_LINK)) {
b0d623f7
A
2050 lck_mtx_unlock(&noop->noo_lock);
2051 return;
2052 }
2053 /* owner is no longer referenced or linked to mount, so destroy it */
2054 lck_mtx_unlock(&noop->noo_lock);
2055 nfs_open_owner_destroy(noop);
2056}
2057
2058/*
2059 * Mark an open owner as busy because we are about to
2060 * start an operation that uses and updates open owner state.
2061 */
2062int
2063nfs_open_owner_set_busy(struct nfs_open_owner *noop, thread_t thd)
2064{
2065 struct nfsmount *nmp;
cb323159 2066 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
b0d623f7
A
2067 int error = 0, slpflag;
2068
2069 nmp = noop->noo_mount;
0a7de745
A
2070 if (nfs_mount_gone(nmp)) {
2071 return ENXIO;
2072 }
6d2010ae 2073 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7
A
2074
2075 lck_mtx_lock(&noop->noo_lock);
2076 while (noop->noo_flags & NFS_OPEN_OWNER_BUSY) {
0a7de745 2077 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 2078 break;
0a7de745 2079 }
b0d623f7
A
2080 noop->noo_flags |= NFS_OPEN_OWNER_WANT;
2081 msleep(noop, &noop->noo_lock, slpflag, "nfs_open_owner_set_busy", &ts);
6d2010ae 2082 slpflag = 0;
b0d623f7 2083 }
0a7de745 2084 if (!error) {
b0d623f7 2085 noop->noo_flags |= NFS_OPEN_OWNER_BUSY;
0a7de745 2086 }
b0d623f7
A
2087 lck_mtx_unlock(&noop->noo_lock);
2088
0a7de745 2089 return error;
b0d623f7
A
2090}
2091
2092/*
2093 * Clear the busy flag on an open owner and wake up anyone waiting
2094 * to mark it busy.
2095 */
2096void
2097nfs_open_owner_clear_busy(struct nfs_open_owner *noop)
2098{
2099 int wanted;
2100
2101 lck_mtx_lock(&noop->noo_lock);
0a7de745 2102 if (!(noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
b0d623f7 2103 panic("nfs_open_owner_clear_busy");
0a7de745 2104 }
b0d623f7 2105 wanted = (noop->noo_flags & NFS_OPEN_OWNER_WANT);
0a7de745 2106 noop->noo_flags &= ~(NFS_OPEN_OWNER_BUSY | NFS_OPEN_OWNER_WANT);
b0d623f7 2107 lck_mtx_unlock(&noop->noo_lock);
0a7de745 2108 if (wanted) {
b0d623f7 2109 wakeup(noop);
0a7de745 2110 }
b0d623f7
A
2111}
2112
2113/*
2114 * Given an open/lock owner and an error code, increment the
2115 * sequence ID if appropriate.
2116 */
2117void
2118nfs_owner_seqid_increment(struct nfs_open_owner *noop, struct nfs_lock_owner *nlop, int error)
2119{
2120 switch (error) {
2121 case NFSERR_STALE_CLIENTID:
2122 case NFSERR_STALE_STATEID:
2123 case NFSERR_OLD_STATEID:
2124 case NFSERR_BAD_STATEID:
2125 case NFSERR_BAD_SEQID:
2126 case NFSERR_BADXDR:
2127 case NFSERR_RESOURCE:
2128 case NFSERR_NOFILEHANDLE:
2129 /* do not increment the open seqid on these errors */
2130 return;
2131 }
0a7de745 2132 if (noop) {
b0d623f7 2133 noop->noo_seqid++;
0a7de745
A
2134 }
2135 if (nlop) {
b0d623f7 2136 nlop->nlo_seqid++;
0a7de745 2137 }
b0d623f7
A
2138}
2139
2140/*
2141 * Search a node's open file list for any conflicts with this request.
2142 * Also find this open owner's open file structure.
2143 * If not found and "alloc" is set, then allocate one.
2144 */
2145int
2146nfs_open_file_find(
2147 nfsnode_t np,
2148 struct nfs_open_owner *noop,
2149 struct nfs_open_file **nofpp,
2150 uint32_t accessMode,
2151 uint32_t denyMode,
2152 int alloc)
6d2010ae
A
2153{
2154 *nofpp = NULL;
2155 return nfs_open_file_find_internal(np, noop, nofpp, accessMode, denyMode, alloc);
2156}
2157
2158/*
2159 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
2160 * if an existing one is not found. This is used in "create" scenarios to
2161 * officially add the provisional nofp to the node once the node is created.
2162 */
2163int
2164nfs_open_file_find_internal(
2165 nfsnode_t np,
2166 struct nfs_open_owner *noop,
2167 struct nfs_open_file **nofpp,
2168 uint32_t accessMode,
2169 uint32_t denyMode,
2170 int alloc)
b0d623f7
A
2171{
2172 struct nfs_open_file *nofp = NULL, *nofp2, *newnofp = NULL;
2173
0a7de745 2174 if (!np) {
b0d623f7 2175 goto alloc;
0a7de745 2176 }
b0d623f7
A
2177tryagain:
2178 lck_mtx_lock(&np->n_openlock);
2179 TAILQ_FOREACH(nofp2, &np->n_opens, nof_link) {
2180 if (nofp2->nof_owner == noop) {
2181 nofp = nofp2;
0a7de745 2182 if (!accessMode) {
b0d623f7 2183 break;
0a7de745 2184 }
b0d623f7
A
2185 }
2186 if ((accessMode & nofp2->nof_deny) || (denyMode & nofp2->nof_access)) {
2187 /* This request conflicts with an existing open on this client. */
2188 lck_mtx_unlock(&np->n_openlock);
0a7de745 2189 return EACCES;
b0d623f7
A
2190 }
2191 }
2192
2193 /*
2194 * If this open owner doesn't have an open
2195 * file structure yet, we create one for it.
2196 */
6d2010ae 2197 if (!nofp && !*nofpp && !newnofp && alloc) {
b0d623f7
A
2198 lck_mtx_unlock(&np->n_openlock);
2199alloc:
2200 MALLOC(newnofp, struct nfs_open_file *, sizeof(struct nfs_open_file), M_TEMP, M_WAITOK);
0a7de745
A
2201 if (!newnofp) {
2202 return ENOMEM;
2203 }
b0d623f7
A
2204 bzero(newnofp, sizeof(*newnofp));
2205 lck_mtx_init(&newnofp->nof_lock, nfs_open_grp, LCK_ATTR_NULL);
2206 newnofp->nof_owner = noop;
2207 nfs_open_owner_ref(noop);
2208 newnofp->nof_np = np;
2209 lck_mtx_lock(&noop->noo_lock);
2210 TAILQ_INSERT_HEAD(&noop->noo_opens, newnofp, nof_oolink);
2211 lck_mtx_unlock(&noop->noo_lock);
0a7de745 2212 if (np) {
b0d623f7 2213 goto tryagain;
0a7de745 2214 }
b0d623f7 2215 }
6d2010ae
A
2216 if (!nofp) {
2217 if (*nofpp) {
2218 (*nofpp)->nof_np = np;
2219 nofp = *nofpp;
2220 } else {
2221 nofp = newnofp;
2222 }
0a7de745 2223 if (nofp && np) {
6d2010ae 2224 TAILQ_INSERT_HEAD(&np->n_opens, nofp, nof_link);
0a7de745 2225 }
b0d623f7 2226 }
0a7de745 2227 if (np) {
b0d623f7 2228 lck_mtx_unlock(&np->n_openlock);
0a7de745 2229 }
b0d623f7 2230
0a7de745 2231 if (alloc && newnofp && (nofp != newnofp)) {
b0d623f7 2232 nfs_open_file_destroy(newnofp);
0a7de745 2233 }
b0d623f7
A
2234
2235 *nofpp = nofp;
0a7de745 2236 return nofp ? 0 : ESRCH;
b0d623f7
A
2237}
2238
2239/*
2240 * Destroy an open file structure.
2241 */
2242void
2243nfs_open_file_destroy(struct nfs_open_file *nofp)
2244{
2245 lck_mtx_lock(&nofp->nof_owner->noo_lock);
2246 TAILQ_REMOVE(&nofp->nof_owner->noo_opens, nofp, nof_oolink);
2247 lck_mtx_unlock(&nofp->nof_owner->noo_lock);
2248 nfs_open_owner_rele(nofp->nof_owner);
2249 lck_mtx_destroy(&nofp->nof_lock, nfs_open_grp);
2250 FREE(nofp, M_TEMP);
2251}
2252
2253/*
2254 * Mark an open file as busy because we are about to
2255 * start an operation that uses and updates open file state.
2256 */
2257int
2258nfs_open_file_set_busy(struct nfs_open_file *nofp, thread_t thd)
2259{
2260 struct nfsmount *nmp;
cb323159 2261 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
b0d623f7
A
2262 int error = 0, slpflag;
2263
2264 nmp = nofp->nof_owner->noo_mount;
0a7de745
A
2265 if (nfs_mount_gone(nmp)) {
2266 return ENXIO;
2267 }
6d2010ae 2268 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7
A
2269
2270 lck_mtx_lock(&nofp->nof_lock);
2271 while (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
0a7de745 2272 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 2273 break;
0a7de745 2274 }
b0d623f7
A
2275 nofp->nof_flags |= NFS_OPEN_FILE_WANT;
2276 msleep(nofp, &nofp->nof_lock, slpflag, "nfs_open_file_set_busy", &ts);
6d2010ae 2277 slpflag = 0;
b0d623f7 2278 }
0a7de745 2279 if (!error) {
b0d623f7 2280 nofp->nof_flags |= NFS_OPEN_FILE_BUSY;
0a7de745 2281 }
b0d623f7
A
2282 lck_mtx_unlock(&nofp->nof_lock);
2283
0a7de745 2284 return error;
b0d623f7
A
2285}
2286
2287/*
2288 * Clear the busy flag on an open file and wake up anyone waiting
2289 * to mark it busy.
2290 */
2291void
2292nfs_open_file_clear_busy(struct nfs_open_file *nofp)
2293{
2294 int wanted;
2295
2296 lck_mtx_lock(&nofp->nof_lock);
0a7de745 2297 if (!(nofp->nof_flags & NFS_OPEN_FILE_BUSY)) {
b0d623f7 2298 panic("nfs_open_file_clear_busy");
0a7de745 2299 }
b0d623f7 2300 wanted = (nofp->nof_flags & NFS_OPEN_FILE_WANT);
0a7de745 2301 nofp->nof_flags &= ~(NFS_OPEN_FILE_BUSY | NFS_OPEN_FILE_WANT);
b0d623f7 2302 lck_mtx_unlock(&nofp->nof_lock);
0a7de745 2303 if (wanted) {
b0d623f7 2304 wakeup(nofp);
0a7de745 2305 }
b0d623f7
A
2306}
2307
2308/*
6d2010ae 2309 * Add the open state for the given access/deny modes to this open file.
b0d623f7
A
2310 */
2311void
6d2010ae 2312nfs_open_file_add_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode, int delegated)
b0d623f7 2313{
6d2010ae
A
2314 lck_mtx_lock(&nofp->nof_lock);
2315 nofp->nof_access |= accessMode;
2316 nofp->nof_deny |= denyMode;
b0d623f7 2317
6d2010ae
A
2318 if (delegated) {
2319 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
0a7de745 2320 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2321 nofp->nof_d_r++;
0a7de745 2322 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2323 nofp->nof_d_w++;
0a7de745 2324 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2325 nofp->nof_d_rw++;
0a7de745 2326 }
6d2010ae 2327 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
0a7de745 2328 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2329 nofp->nof_d_r_dw++;
0a7de745 2330 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2331 nofp->nof_d_w_dw++;
0a7de745 2332 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2333 nofp->nof_d_rw_dw++;
0a7de745 2334 }
6d2010ae 2335 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
0a7de745 2336 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2337 nofp->nof_d_r_drw++;
0a7de745 2338 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2339 nofp->nof_d_w_drw++;
0a7de745 2340 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2341 nofp->nof_d_rw_drw++;
0a7de745 2342 }
6d2010ae 2343 }
b0d623f7 2344 } else {
6d2010ae 2345 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
0a7de745 2346 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2347 nofp->nof_r++;
0a7de745 2348 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2349 nofp->nof_w++;
0a7de745 2350 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2351 nofp->nof_rw++;
0a7de745 2352 }
6d2010ae 2353 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
0a7de745 2354 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2355 nofp->nof_r_dw++;
0a7de745 2356 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2357 nofp->nof_w_dw++;
0a7de745 2358 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2359 nofp->nof_rw_dw++;
0a7de745 2360 }
6d2010ae 2361 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
0a7de745 2362 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2363 nofp->nof_r_drw++;
0a7de745 2364 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2365 nofp->nof_w_drw++;
0a7de745 2366 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2367 nofp->nof_rw_drw++;
0a7de745 2368 }
6d2010ae 2369 }
b0d623f7 2370 }
6d2010ae
A
2371
2372 nofp->nof_opencnt++;
2373 lck_mtx_unlock(&nofp->nof_lock);
b0d623f7
A
2374}
2375
2376/*
6d2010ae
A
2377 * Find which particular open combo will be closed and report what
2378 * the new modes will be and whether the open was delegated.
b0d623f7 2379 */
6d2010ae
A
2380void
2381nfs_open_file_remove_open_find(
b0d623f7
A
2382 struct nfs_open_file *nofp,
2383 uint32_t accessMode,
2384 uint32_t denyMode,
6d2010ae
A
2385 uint32_t *newAccessMode,
2386 uint32_t *newDenyMode,
2387 int *delegated)
b0d623f7 2388{
6d2010ae
A
2389 /*
2390 * Calculate new modes: a mode bit gets removed when there's only
2391 * one count in all the corresponding counts
2392 */
2393 *newAccessMode = nofp->nof_access;
2394 *newDenyMode = nofp->nof_deny;
b0d623f7 2395
6d2010ae
A
2396 if ((accessMode & NFS_OPEN_SHARE_ACCESS_READ) &&
2397 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) &&
2398 ((nofp->nof_r + nofp->nof_d_r +
0a7de745
A
2399 nofp->nof_rw + nofp->nof_d_rw +
2400 nofp->nof_r_dw + nofp->nof_d_r_dw +
2401 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2402 nofp->nof_r_drw + nofp->nof_d_r_drw +
2403 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
6d2010ae 2404 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
0a7de745 2405 }
6d2010ae
A
2406 if ((accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2407 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2408 ((nofp->nof_w + nofp->nof_d_w +
0a7de745
A
2409 nofp->nof_rw + nofp->nof_d_rw +
2410 nofp->nof_w_dw + nofp->nof_d_w_dw +
2411 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2412 nofp->nof_w_drw + nofp->nof_d_w_drw +
2413 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
6d2010ae 2414 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_WRITE;
0a7de745 2415 }
6d2010ae
A
2416 if ((denyMode & NFS_OPEN_SHARE_DENY_READ) &&
2417 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) &&
2418 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
0a7de745
A
2419 nofp->nof_w_drw + nofp->nof_d_w_drw +
2420 nofp->nof_rw_drw + nofp->nof_d_rw_drw) == 1)) {
6d2010ae 2421 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_READ;
0a7de745 2422 }
6d2010ae
A
2423 if ((denyMode & NFS_OPEN_SHARE_DENY_WRITE) &&
2424 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) &&
2425 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
0a7de745
A
2426 nofp->nof_w_drw + nofp->nof_d_w_drw +
2427 nofp->nof_rw_drw + nofp->nof_d_rw_drw +
2428 nofp->nof_r_dw + nofp->nof_d_r_dw +
2429 nofp->nof_w_dw + nofp->nof_d_w_dw +
2430 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
6d2010ae 2431 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 2432 }
6d2010ae
A
2433
2434 /* Find the corresponding open access/deny mode counter. */
b0d623f7 2435 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
0a7de745 2436 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2437 *delegated = (nofp->nof_d_r != 0);
0a7de745 2438 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2439 *delegated = (nofp->nof_d_w != 0);
0a7de745 2440 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2441 *delegated = (nofp->nof_d_rw != 0);
0a7de745 2442 } else {
6d2010ae 2443 *delegated = 0;
0a7de745 2444 }
b0d623f7 2445 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
0a7de745 2446 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2447 *delegated = (nofp->nof_d_r_dw != 0);
0a7de745 2448 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2449 *delegated = (nofp->nof_d_w_dw != 0);
0a7de745 2450 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2451 *delegated = (nofp->nof_d_rw_dw != 0);
0a7de745 2452 } else {
6d2010ae 2453 *delegated = 0;
0a7de745 2454 }
b0d623f7 2455 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
0a7de745 2456 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2457 *delegated = (nofp->nof_d_r_drw != 0);
0a7de745 2458 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2459 *delegated = (nofp->nof_d_w_drw != 0);
0a7de745 2460 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
6d2010ae 2461 *delegated = (nofp->nof_d_rw_drw != 0);
0a7de745 2462 } else {
6d2010ae 2463 *delegated = 0;
0a7de745 2464 }
b0d623f7 2465 }
6d2010ae
A
2466}
2467
2468/*
2469 * Remove the open state for the given access/deny modes to this open file.
2470 */
2471void
2472nfs_open_file_remove_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode)
2473{
2474 uint32_t newAccessMode, newDenyMode;
2475 int delegated = 0;
2476
2477 lck_mtx_lock(&nofp->nof_lock);
2478 nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated);
2479
2480 /* Decrement the corresponding open access/deny mode counter. */
2481 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2482 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2483 if (delegated) {
0a7de745 2484 if (nofp->nof_d_r == 0) {
6d2010ae 2485 NP(nofp->nof_np, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2486 } else {
6d2010ae 2487 nofp->nof_d_r--;
0a7de745 2488 }
6d2010ae 2489 } else {
0a7de745 2490 if (nofp->nof_r == 0) {
6d2010ae 2491 NP(nofp->nof_np, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2492 } else {
6d2010ae 2493 nofp->nof_r--;
0a7de745 2494 }
6d2010ae
A
2495 }
2496 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2497 if (delegated) {
0a7de745 2498 if (nofp->nof_d_w == 0) {
6d2010ae 2499 NP(nofp->nof_np, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2500 } else {
6d2010ae 2501 nofp->nof_d_w--;
0a7de745 2502 }
6d2010ae 2503 } else {
0a7de745 2504 if (nofp->nof_w == 0) {
6d2010ae 2505 NP(nofp->nof_np, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2506 } else {
6d2010ae 2507 nofp->nof_w--;
0a7de745 2508 }
6d2010ae
A
2509 }
2510 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2511 if (delegated) {
0a7de745 2512 if (nofp->nof_d_rw == 0) {
6d2010ae 2513 NP(nofp->nof_np, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2514 } else {
6d2010ae 2515 nofp->nof_d_rw--;
0a7de745 2516 }
6d2010ae 2517 } else {
0a7de745 2518 if (nofp->nof_rw == 0) {
6d2010ae 2519 NP(nofp->nof_np, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2520 } else {
6d2010ae 2521 nofp->nof_rw--;
0a7de745 2522 }
6d2010ae
A
2523 }
2524 }
2525 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2526 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2527 if (delegated) {
0a7de745 2528 if (nofp->nof_d_r_dw == 0) {
6d2010ae 2529 NP(nofp->nof_np, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2530 } else {
6d2010ae 2531 nofp->nof_d_r_dw--;
0a7de745 2532 }
6d2010ae 2533 } else {
0a7de745 2534 if (nofp->nof_r_dw == 0) {
6d2010ae 2535 NP(nofp->nof_np, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2536 } else {
6d2010ae 2537 nofp->nof_r_dw--;
0a7de745 2538 }
6d2010ae
A
2539 }
2540 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2541 if (delegated) {
0a7de745 2542 if (nofp->nof_d_w_dw == 0) {
6d2010ae 2543 NP(nofp->nof_np, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2544 } else {
6d2010ae 2545 nofp->nof_d_w_dw--;
0a7de745 2546 }
6d2010ae 2547 } else {
0a7de745 2548 if (nofp->nof_w_dw == 0) {
6d2010ae 2549 NP(nofp->nof_np, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2550 } else {
6d2010ae 2551 nofp->nof_w_dw--;
0a7de745 2552 }
6d2010ae
A
2553 }
2554 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2555 if (delegated) {
0a7de745 2556 if (nofp->nof_d_rw_dw == 0) {
6d2010ae 2557 NP(nofp->nof_np, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2558 } else {
6d2010ae 2559 nofp->nof_d_rw_dw--;
0a7de745 2560 }
6d2010ae 2561 } else {
0a7de745 2562 if (nofp->nof_rw_dw == 0) {
6d2010ae 2563 NP(nofp->nof_np, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2564 } else {
6d2010ae 2565 nofp->nof_rw_dw--;
0a7de745 2566 }
6d2010ae
A
2567 }
2568 }
2569 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2570 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2571 if (delegated) {
0a7de745 2572 if (nofp->nof_d_r_drw == 0) {
6d2010ae 2573 NP(nofp->nof_np, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2574 } else {
6d2010ae 2575 nofp->nof_d_r_drw--;
0a7de745 2576 }
6d2010ae 2577 } else {
0a7de745 2578 if (nofp->nof_r_drw == 0) {
6d2010ae 2579 NP(nofp->nof_np, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2580 } else {
6d2010ae 2581 nofp->nof_r_drw--;
0a7de745 2582 }
6d2010ae
A
2583 }
2584 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2585 if (delegated) {
0a7de745 2586 if (nofp->nof_d_w_drw == 0) {
6d2010ae 2587 NP(nofp->nof_np, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2588 } else {
6d2010ae 2589 nofp->nof_d_w_drw--;
0a7de745 2590 }
6d2010ae 2591 } else {
0a7de745 2592 if (nofp->nof_w_drw == 0) {
6d2010ae 2593 NP(nofp->nof_np, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2594 } else {
6d2010ae 2595 nofp->nof_w_drw--;
0a7de745 2596 }
6d2010ae
A
2597 }
2598 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2599 if (delegated) {
0a7de745 2600 if (nofp->nof_d_rw_drw == 0) {
6d2010ae 2601 NP(nofp->nof_np, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2602 } else {
6d2010ae 2603 nofp->nof_d_rw_drw--;
0a7de745 2604 }
6d2010ae 2605 } else {
0a7de745 2606 if (nofp->nof_rw_drw == 0) {
6d2010ae 2607 NP(nofp->nof_np, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 2608 } else {
6d2010ae 2609 nofp->nof_rw_drw--;
0a7de745 2610 }
6d2010ae
A
2611 }
2612 }
2613 }
2614
2615 /* update the modes */
2616 nofp->nof_access = newAccessMode;
2617 nofp->nof_deny = newDenyMode;
2618 nofp->nof_opencnt--;
2619 lck_mtx_unlock(&nofp->nof_lock);
2620}
2621
cb323159 2622#if CONFIG_NFS4
6d2010ae
A
2623/*
2624 * Get the current (delegation, lock, open, default) stateid for this node.
2625 * If node has a delegation, use that stateid.
2626 * If pid has a lock, use the lockowner's stateid.
2627 * Or use the open file's stateid.
2628 * If no open file, use a default stateid of all ones.
2629 */
2630void
2631nfs_get_stateid(nfsnode_t np, thread_t thd, kauth_cred_t cred, nfs_stateid *sid)
2632{
2633 struct nfsmount *nmp = NFSTONMP(np);
2634 proc_t p = thd ? get_bsdthreadtask_info(thd) : current_proc(); // XXX async I/O requests don't have a thread
2635 struct nfs_open_owner *noop = NULL;
2636 struct nfs_open_file *nofp = NULL;
2637 struct nfs_lock_owner *nlop = NULL;
2638 nfs_stateid *s = NULL;
2639
2640 if (np->n_openflags & N_DELEG_MASK) {
2641 s = &np->n_dstateid;
2642 } else {
0a7de745 2643 if (p) {
6d2010ae 2644 nlop = nfs_lock_owner_find(np, p, 0);
0a7de745 2645 }
6d2010ae
A
2646 if (nlop && !TAILQ_EMPTY(&nlop->nlo_locks)) {
2647 /* we hold locks, use lock stateid */
2648 s = &nlop->nlo_stateid;
2649 } else if (((noop = nfs_open_owner_find(nmp, cred, 0))) &&
0a7de745
A
2650 (nfs_open_file_find(np, noop, &nofp, 0, 0, 0) == 0) &&
2651 !(nofp->nof_flags & NFS_OPEN_FILE_LOST) &&
2652 nofp->nof_access) {
6d2010ae 2653 /* we (should) have the file open, use open stateid */
0a7de745 2654 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
6d2010ae 2655 nfs4_reopen(nofp, thd);
0a7de745
A
2656 }
2657 if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6d2010ae 2658 s = &nofp->nof_stateid;
0a7de745 2659 }
6d2010ae
A
2660 }
2661 }
2662
2663 if (s) {
2664 sid->seqid = s->seqid;
2665 sid->other[0] = s->other[0];
2666 sid->other[1] = s->other[1];
2667 sid->other[2] = s->other[2];
2668 } else {
2669 /* named attributes may not have a stateid for reads, so don't complain for them */
0a7de745 2670 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 2671 NP(np, "nfs_get_stateid: no stateid");
0a7de745 2672 }
6d2010ae
A
2673 sid->seqid = sid->other[0] = sid->other[1] = sid->other[2] = 0xffffffff;
2674 }
0a7de745 2675 if (nlop) {
6d2010ae 2676 nfs_lock_owner_rele(nlop);
0a7de745
A
2677 }
2678 if (noop) {
6d2010ae 2679 nfs_open_owner_rele(noop);
0a7de745 2680 }
6d2010ae
A
2681}
2682
2683
2684/*
2685 * When we have a delegation, we may be able to perform the OPEN locally.
2686 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2687 */
2688int
2689nfs4_open_delegated(
2690 nfsnode_t np,
2691 struct nfs_open_file *nofp,
2692 uint32_t accessMode,
2693 uint32_t denyMode,
2694 vfs_context_t ctx)
2695{
2696 int error = 0, ismember, readtoo = 0, authorized = 0;
2697 uint32_t action;
2698 struct kauth_acl_eval eval;
2699 kauth_cred_t cred = vfs_context_ucred(ctx);
2700
2701 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2702 /*
2703 * Try to open it for read access too,
2704 * so the buffer cache can read data.
2705 */
2706 readtoo = 1;
2707 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2708 }
2709
2710tryagain:
2711 action = 0;
0a7de745 2712 if (accessMode & NFS_OPEN_SHARE_ACCESS_READ) {
6d2010ae 2713 action |= KAUTH_VNODE_READ_DATA;
0a7de745
A
2714 }
2715 if (accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) {
6d2010ae 2716 action |= KAUTH_VNODE_WRITE_DATA;
0a7de745 2717 }
6d2010ae
A
2718
2719 /* evaluate ACE (if we have one) */
2720 if (np->n_dace.ace_flags) {
2721 eval.ae_requested = action;
2722 eval.ae_acl = &np->n_dace;
2723 eval.ae_count = 1;
2724 eval.ae_options = 0;
0a7de745 2725 if (np->n_vattr.nva_uid == kauth_cred_getuid(cred)) {
6d2010ae 2726 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
0a7de745 2727 }
6d2010ae 2728 error = kauth_cred_ismember_gid(cred, np->n_vattr.nva_gid, &ismember);
0a7de745 2729 if (!error && ismember) {
6d2010ae 2730 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
0a7de745 2731 }
6d2010ae
A
2732
2733 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
2734 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
2735 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
2736 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
2737
2738 error = kauth_acl_evaluate(cred, &eval);
2739
0a7de745 2740 if (!error && (eval.ae_result == KAUTH_RESULT_ALLOW)) {
6d2010ae 2741 authorized = 1;
0a7de745 2742 }
6d2010ae
A
2743 }
2744
2745 if (!authorized) {
2746 /* need to ask the server via ACCESS */
2747 struct vnop_access_args naa;
2748 naa.a_desc = &vnop_access_desc;
2749 naa.a_vp = NFSTOV(np);
2750 naa.a_action = action;
2751 naa.a_context = ctx;
0a7de745 2752 if (!(error = nfs_vnop_access(&naa))) {
6d2010ae 2753 authorized = 1;
0a7de745 2754 }
6d2010ae
A
2755 }
2756
2757 if (!authorized) {
2758 if (readtoo) {
2759 /* try again without the extra read access */
2760 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2761 readtoo = 0;
2762 goto tryagain;
2763 }
0a7de745 2764 return error ? error : EACCES;
6d2010ae
A
2765 }
2766
2767 nfs_open_file_add_open(nofp, accessMode, denyMode, 1);
2768
0a7de745 2769 return 0;
6d2010ae
A
2770}
2771
2772
2773/*
2774 * Open a file with the given access/deny modes.
2775 *
2776 * If we have a delegation, we may be able to handle the open locally.
2777 * Otherwise, we will always send the open RPC even if this open's mode is
2778 * a subset of all the existing opens. This makes sure that we will always
2779 * be able to do a downgrade to any of the open modes.
2780 *
2781 * Note: local conflicts should have already been checked in nfs_open_file_find().
2782 */
2783int
2784nfs4_open(
2785 nfsnode_t np,
2786 struct nfs_open_file *nofp,
2787 uint32_t accessMode,
2788 uint32_t denyMode,
2789 vfs_context_t ctx)
2790{
2791 vnode_t vp = NFSTOV(np);
2792 vnode_t dvp = NULL;
2793 struct componentname cn;
2794 const char *vname = NULL;
2795 size_t namelen;
2796 char smallname[128];
2797 char *filename = NULL;
2798 int error = 0, readtoo = 0;
2799
2800 /*
2801 * We can handle the OPEN ourselves if we have a delegation,
2802 * unless it's a read delegation and the open is asking for
2803 * either write access or deny read. We also don't bother to
2804 * use the delegation if it's being returned.
2805 */
2806 if (np->n_openflags & N_DELEG_MASK) {
0a7de745
A
2807 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
2808 return error;
2809 }
6d2010ae
A
2810 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN) &&
2811 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ||
0a7de745 2812 (!(accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) && !(denyMode & NFS_OPEN_SHARE_DENY_READ)))) {
6d2010ae
A
2813 error = nfs4_open_delegated(np, nofp, accessMode, denyMode, ctx);
2814 nfs_open_state_clear_busy(np);
0a7de745 2815 return error;
6d2010ae
A
2816 }
2817 nfs_open_state_clear_busy(np);
2818 }
2819
2820 /*
2821 * [sigh] We can't trust VFS to get the parent right for named
2822 * attribute nodes. (It likes to reparent the nodes after we've
2823 * created them.) Luckily we can probably get the right parent
2824 * from the n_parent we have stashed away.
2825 */
2826 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
0a7de745 2827 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
6d2010ae 2828 dvp = NULL;
0a7de745
A
2829 }
2830 if (!dvp) {
6d2010ae 2831 dvp = vnode_getparent(vp);
0a7de745 2832 }
6d2010ae
A
2833 vname = vnode_getname(vp);
2834 if (!dvp || !vname) {
0a7de745 2835 if (!error) {
6d2010ae 2836 error = EIO;
0a7de745 2837 }
6d2010ae
A
2838 goto out;
2839 }
2840 filename = &smallname[0];
2841 namelen = snprintf(filename, sizeof(smallname), "%s", vname);
2842 if (namelen >= sizeof(smallname)) {
0a7de745 2843 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
6d2010ae
A
2844 if (!filename) {
2845 error = ENOMEM;
2846 goto out;
2847 }
0a7de745 2848 snprintf(filename, namelen + 1, "%s", vname);
6d2010ae
A
2849 }
2850 bzero(&cn, sizeof(cn));
2851 cn.cn_nameptr = filename;
2852 cn.cn_namelen = namelen;
2853
2854 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2855 /*
2856 * Try to open it for read access too,
2857 * so the buffer cache can read data.
2858 */
2859 readtoo = 1;
2860 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2861 }
2862tryagain:
2863 error = nfs4_open_rpc(nofp, ctx, &cn, NULL, dvp, &vp, NFS_OPEN_NOCREATE, accessMode, denyMode);
2864 if (error) {
2865 if (!nfs_mount_state_error_should_restart(error) &&
2866 (error != EINTR) && (error != ERESTART) && readtoo) {
2867 /* try again without the extra read access */
2868 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2869 readtoo = 0;
2870 goto tryagain;
2871 }
2872 goto out;
2873 }
2874 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
b0d623f7 2875out:
0a7de745 2876 if (filename && (filename != &smallname[0])) {
b0d623f7 2877 FREE(filename, M_TEMP);
0a7de745
A
2878 }
2879 if (vname) {
b0d623f7 2880 vnode_putname(vname);
0a7de745
A
2881 }
2882 if (dvp != NULLVP) {
b0d623f7 2883 vnode_put(dvp);
0a7de745
A
2884 }
2885 return error;
b0d623f7 2886}
cb323159 2887#endif /* CONFIG_NFS4 */
b0d623f7 2888
b0d623f7 2889int
6d2010ae
A
2890nfs_vnop_mmap(
2891 struct vnop_mmap_args /* {
0a7de745
A
2892 * struct vnodeop_desc *a_desc;
2893 * vnode_t a_vp;
2894 * int a_fflags;
2895 * vfs_context_t a_context;
2896 * } */*ap)
b0d623f7
A
2897{
2898 vfs_context_t ctx = ap->a_context;
2899 vnode_t vp = ap->a_vp;
2900 nfsnode_t np = VTONFS(vp);
6d2010ae 2901 int error = 0, accessMode, denyMode, delegated;
b0d623f7 2902 struct nfsmount *nmp;
b0d623f7
A
2903 struct nfs_open_owner *noop = NULL;
2904 struct nfs_open_file *nofp = NULL;
2905
b0d623f7 2906 nmp = VTONMP(vp);
0a7de745
A
2907 if (nfs_mount_gone(nmp)) {
2908 return ENXIO;
2909 }
b0d623f7 2910
0a7de745
A
2911 if (!vnode_isreg(vp) || !(ap->a_fflags & (PROT_READ | PROT_WRITE))) {
2912 return EINVAL;
2913 }
2914 if (np->n_flag & NREVOKE) {
2915 return EIO;
2916 }
b0d623f7 2917
6d2010ae
A
2918 /*
2919 * fflags contains some combination of: PROT_READ, PROT_WRITE
2920 * Since it's not possible to mmap() without having the file open for reading,
2921 * read access is always there (regardless if PROT_READ is not set).
2922 */
2923 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
0a7de745 2924 if (ap->a_fflags & PROT_WRITE) {
b0d623f7 2925 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
0a7de745 2926 }
6d2010ae 2927 denyMode = NFS_OPEN_SHARE_DENY_NONE;
b0d623f7
A
2928
2929 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
0a7de745
A
2930 if (!noop) {
2931 return ENOMEM;
2932 }
b0d623f7
A
2933
2934restart:
6d2010ae 2935 error = nfs_mount_state_in_use_start(nmp, NULL);
b0d623f7
A
2936 if (error) {
2937 nfs_open_owner_rele(noop);
0a7de745 2938 return error;
b0d623f7 2939 }
6d2010ae 2940 if (np->n_flag & NREVOKE) {
b0d623f7 2941 error = EIO;
6d2010ae
A
2942 nfs_mount_state_in_use_end(nmp, 0);
2943 nfs_open_owner_rele(noop);
0a7de745 2944 return error;
6d2010ae
A
2945 }
2946
2947 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
2948 if (error || (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST))) {
2949 NP(np, "nfs_vnop_mmap: no open file for owner, error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
2950 error = EPERM;
b0d623f7 2951 }
cb323159 2952#if CONFIG_NFS4
b0d623f7
A
2953 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
2954 nfs_mount_state_in_use_end(nmp, 0);
6d2010ae 2955 error = nfs4_reopen(nofp, NULL);
b0d623f7 2956 nofp = NULL;
0a7de745 2957 if (!error) {
6d2010ae 2958 goto restart;
0a7de745 2959 }
b0d623f7 2960 }
cb323159 2961#endif
0a7de745 2962 if (!error) {
6d2010ae 2963 error = nfs_open_file_set_busy(nofp, NULL);
0a7de745 2964 }
b0d623f7
A
2965 if (error) {
2966 nofp = NULL;
2967 goto out;
2968 }
2969
2970 /*
6d2010ae
A
2971 * The open reference for mmap must mirror an existing open because
2972 * we may need to reclaim it after the file is closed.
2973 * So grab another open count matching the accessMode passed in.
2974 * If we already had an mmap open, prefer read/write without deny mode.
2975 * This means we may have to drop the current mmap open first.
3e170ce0
A
2976 *
2977 * N.B. We should have an open for the mmap, because, mmap was
2978 * called on an open descriptor, or we've created an open for read
2979 * from reading the first page for execve. However, if we piggy
2980 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
2981 * that open may have closed.
b0d623f7 2982 */
6d2010ae 2983
3e170ce0
A
2984 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
2985 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
2986 /* We shouldn't get here. We've already open the file for execve */
2987 NP(np, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
0a7de745 2988 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
3e170ce0
A
2989 }
2990 /*
2991 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
2992 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
2993 */
2994 if (accessMode != NFS_OPEN_SHARE_ACCESS_READ || (accessMode & nofp->nof_deny)) {
6d2010ae
A
2995 /* not asking for just read access -> fail */
2996 error = EPERM;
2997 goto out;
2998 }
2999 /* we don't have the file open, so open it for read access */
3000 if (nmp->nm_vers < NFS_VER4) {
3001 /* NFS v2/v3 opens are always allowed - so just add it. */
3002 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
b0d623f7 3003 error = 0;
cb323159
A
3004 }
3005#if CONFIG_NFS4
3006 else {
6d2010ae 3007 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
b0d623f7 3008 }
cb323159 3009#endif
0a7de745 3010 if (!error) {
6d2010ae 3011 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
0a7de745
A
3012 }
3013 if (error) {
b0d623f7 3014 goto out;
0a7de745 3015 }
6d2010ae
A
3016 }
3017
3018 /* determine deny mode for open */
3019 if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
3020 if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3021 delegated = 1;
0a7de745 3022 if (nofp->nof_d_rw) {
6d2010ae 3023 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3024 } else if (nofp->nof_d_rw_dw) {
6d2010ae 3025 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3026 } else if (nofp->nof_d_rw_drw) {
6d2010ae 3027 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3028 }
6d2010ae
A
3029 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3030 delegated = 0;
0a7de745 3031 if (nofp->nof_rw) {
6d2010ae 3032 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3033 } else if (nofp->nof_rw_dw) {
6d2010ae 3034 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3035 } else if (nofp->nof_rw_drw) {
6d2010ae 3036 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3037 }
6d2010ae
A
3038 } else {
3039 error = EPERM;
3040 }
3041 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
3042 if (nofp->nof_d_r || nofp->nof_d_r_dw || nofp->nof_d_r_drw) {
3043 delegated = 1;
0a7de745 3044 if (nofp->nof_d_r) {
6d2010ae 3045 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3046 } else if (nofp->nof_d_r_dw) {
6d2010ae 3047 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3048 } else if (nofp->nof_d_r_drw) {
6d2010ae 3049 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3050 }
6d2010ae
A
3051 } else if (nofp->nof_r || nofp->nof_r_dw || nofp->nof_r_drw) {
3052 delegated = 0;
0a7de745 3053 if (nofp->nof_r) {
6d2010ae 3054 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3055 } else if (nofp->nof_r_dw) {
6d2010ae 3056 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3057 } else if (nofp->nof_r_drw) {
6d2010ae 3058 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3059 }
3e170ce0
A
3060 } else if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3061 /*
3062 * This clause and the one below is to co-opt a read write access
3063 * for a read only mmaping. We probably got here in that an
3064 * existing rw open for an executable file already exists.
3065 */
3066 delegated = 1;
3067 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
0a7de745 3068 if (nofp->nof_d_rw) {
3e170ce0 3069 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3070 } else if (nofp->nof_d_rw_dw) {
3e170ce0 3071 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3072 } else if (nofp->nof_d_rw_drw) {
3e170ce0 3073 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3074 }
3e170ce0
A
3075 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3076 delegated = 0;
3077 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
0a7de745 3078 if (nofp->nof_rw) {
3e170ce0 3079 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 3080 } else if (nofp->nof_rw_dw) {
3e170ce0 3081 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
0a7de745 3082 } else if (nofp->nof_rw_drw) {
3e170ce0 3083 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
0a7de745 3084 }
6d2010ae
A
3085 } else {
3086 error = EPERM;
3087 }
3088 }
0a7de745 3089 if (error) { /* mmap mode without proper open mode */
6d2010ae 3090 goto out;
0a7de745 3091 }
6d2010ae
A
3092
3093 /*
3094 * If the existing mmap access is more than the new access OR the
3095 * existing access is the same and the existing deny mode is less,
3096 * then we'll stick with the existing mmap open mode.
3097 */
3098 if ((nofp->nof_mmap_access > accessMode) ||
0a7de745 3099 ((nofp->nof_mmap_access == accessMode) && (nofp->nof_mmap_deny <= denyMode))) {
6d2010ae 3100 goto out;
0a7de745 3101 }
6d2010ae
A
3102
3103 /* update mmap open mode */
3104 if (nofp->nof_mmap_access) {
3105 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3106 if (error) {
0a7de745 3107 if (!nfs_mount_state_error_should_restart(error)) {
6d2010ae 3108 NP(np, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 3109 }
6d2010ae
A
3110 NP(np, "nfs_vnop_mmap: update, close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3111 goto out;
b0d623f7 3112 }
6d2010ae 3113 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
b0d623f7
A
3114 }
3115
6d2010ae
A
3116 nfs_open_file_add_open(nofp, accessMode, denyMode, delegated);
3117 nofp->nof_mmap_access = accessMode;
3118 nofp->nof_mmap_deny = denyMode;
3119
b0d623f7 3120out:
0a7de745 3121 if (nofp) {
b0d623f7 3122 nfs_open_file_clear_busy(nofp);
0a7de745 3123 }
b0d623f7
A
3124 if (nfs_mount_state_in_use_end(nmp, error)) {
3125 nofp = NULL;
3126 goto restart;
3127 }
0a7de745 3128 if (noop) {
b0d623f7 3129 nfs_open_owner_rele(noop);
0a7de745 3130 }
316670eb
A
3131
3132 if (!error) {
3133 int ismapped = 0;
3134 nfs_node_lock_force(np);
3135 if ((np->n_flag & NISMAPPED) == 0) {
3136 np->n_flag |= NISMAPPED;
3137 ismapped = 1;
3138 }
3139 nfs_node_unlock(np);
3140 if (ismapped) {
3141 lck_mtx_lock(&nmp->nm_lock);
3142 nmp->nm_state &= ~NFSSTA_SQUISHY;
3143 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
0a7de745 3144 if (nmp->nm_curdeadtimeout <= 0) {
316670eb 3145 nmp->nm_deadto_start = 0;
0a7de745 3146 }
316670eb
A
3147 nmp->nm_mappers++;
3148 lck_mtx_unlock(&nmp->nm_lock);
3149 }
3150 }
3151
0a7de745 3152 return error;
b0d623f7
A
3153}
3154
b0d623f7
A
3155
3156int
6d2010ae
A
3157nfs_vnop_mnomap(
3158 struct vnop_mnomap_args /* {
0a7de745
A
3159 * struct vnodeop_desc *a_desc;
3160 * vnode_t a_vp;
3161 * vfs_context_t a_context;
3162 * } */*ap)
b0d623f7
A
3163{
3164 vfs_context_t ctx = ap->a_context;
3165 vnode_t vp = ap->a_vp;
b0d623f7
A
3166 nfsnode_t np = VTONFS(vp);
3167 struct nfsmount *nmp;
b0d623f7 3168 struct nfs_open_file *nofp = NULL;
6d2010ae
A
3169 off_t size;
3170 int error;
316670eb 3171 int is_mapped_flag = 0;
0a7de745 3172
b0d623f7 3173 nmp = VTONMP(vp);
0a7de745
A
3174 if (nfs_mount_gone(nmp)) {
3175 return ENXIO;
3176 }
b0d623f7 3177
316670eb
A
3178 nfs_node_lock_force(np);
3179 if (np->n_flag & NISMAPPED) {
3180 is_mapped_flag = 1;
3181 np->n_flag &= ~NISMAPPED;
3182 }
3183 nfs_node_unlock(np);
3184 if (is_mapped_flag) {
3185 lck_mtx_lock(&nmp->nm_lock);
0a7de745 3186 if (nmp->nm_mappers) {
316670eb 3187 nmp->nm_mappers--;
0a7de745 3188 } else {
316670eb 3189 NP(np, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
0a7de745 3190 }
316670eb
A
3191 lck_mtx_unlock(&nmp->nm_lock);
3192 }
3193
6d2010ae
A
3194 /* flush buffers/ubc before we drop the open (in case it's our last open) */
3195 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
0a7de745 3196 if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp))) {
6d2010ae 3197 ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC);
0a7de745 3198 }
b0d623f7 3199
6d2010ae
A
3200 /* walk all open files and close all mmap opens */
3201loop:
3202 error = nfs_mount_state_in_use_start(nmp, NULL);
0a7de745
A
3203 if (error) {
3204 return error;
3205 }
6d2010ae
A
3206 lck_mtx_lock(&np->n_openlock);
3207 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
0a7de745 3208 if (!nofp->nof_mmap_access) {
6d2010ae 3209 continue;
0a7de745 3210 }
b0d623f7 3211 lck_mtx_unlock(&np->n_openlock);
cb323159 3212#if CONFIG_NFS4
6d2010ae
A
3213 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3214 nfs_mount_state_in_use_end(nmp, 0);
3215 error = nfs4_reopen(nofp, NULL);
0a7de745 3216 if (!error) {
6d2010ae 3217 goto loop;
0a7de745 3218 }
6d2010ae 3219 }
cb323159 3220#endif
0a7de745 3221 if (!error) {
6d2010ae 3222 error = nfs_open_file_set_busy(nofp, NULL);
0a7de745 3223 }
6d2010ae
A
3224 if (error) {
3225 lck_mtx_lock(&np->n_openlock);
3226 break;
3227 }
3228 if (nofp->nof_mmap_access) {
3229 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3230 if (!nfs_mount_state_error_should_restart(error)) {
0a7de745 3231 if (error) { /* not a state-operation-restarting error, so just clear the access */
6d2010ae 3232 NP(np, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 3233 }
6d2010ae
A
3234 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
3235 }
0a7de745 3236 if (error) {
6d2010ae 3237 NP(np, "nfs_vnop_mnomap: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 3238 }
6d2010ae
A
3239 }
3240 nfs_open_file_clear_busy(nofp);
3241 nfs_mount_state_in_use_end(nmp, error);
3242 goto loop;
b0d623f7 3243 }
6d2010ae
A
3244 lck_mtx_unlock(&np->n_openlock);
3245 nfs_mount_state_in_use_end(nmp, error);
0a7de745 3246 return error;
6d2010ae 3247}
b0d623f7 3248
6d2010ae
A
3249/*
3250 * Search a node's lock owner list for the owner for this process.
3251 * If not found and "alloc" is set, then allocate a new one.
3252 */
3253struct nfs_lock_owner *
3254nfs_lock_owner_find(nfsnode_t np, proc_t p, int alloc)
3255{
3256 pid_t pid = proc_pid(p);
3257 struct nfs_lock_owner *nlop, *newnlop = NULL;
b0d623f7 3258
6d2010ae
A
3259tryagain:
3260 lck_mtx_lock(&np->n_openlock);
3261 TAILQ_FOREACH(nlop, &np->n_lock_owners, nlo_link) {
0a7de745
A
3262 os_ref_count_t newcount;
3263
3264 if (nlop->nlo_pid != pid) {
6d2010ae 3265 continue;
0a7de745
A
3266 }
3267 if (timevalcmp(&nlop->nlo_pid_start, &p->p_start, ==)) {
6d2010ae 3268 break;
0a7de745 3269 }
6d2010ae 3270 /* stale lock owner... reuse it if we can */
0a7de745 3271 if (os_ref_get_count(&nlop->nlo_refcnt)) {
6d2010ae
A
3272 TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
3273 nlop->nlo_flags &= ~NFS_LOCK_OWNER_LINK;
0a7de745 3274 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
6d2010ae
A
3275 lck_mtx_unlock(&np->n_openlock);
3276 goto tryagain;
3277 }
3278 nlop->nlo_pid_start = p->p_start;
3279 nlop->nlo_seqid = 0;
3280 nlop->nlo_stategenid = 0;
3281 break;
b0d623f7
A
3282 }
3283
6d2010ae
A
3284 if (!nlop && !newnlop && alloc) {
3285 lck_mtx_unlock(&np->n_openlock);
3286 MALLOC(newnlop, struct nfs_lock_owner *, sizeof(struct nfs_lock_owner), M_TEMP, M_WAITOK);
0a7de745
A
3287 if (!newnlop) {
3288 return NULL;
3289 }
6d2010ae
A
3290 bzero(newnlop, sizeof(*newnlop));
3291 lck_mtx_init(&newnlop->nlo_lock, nfs_open_grp, LCK_ATTR_NULL);
3292 newnlop->nlo_pid = pid;
3293 newnlop->nlo_pid_start = p->p_start;
3294 newnlop->nlo_name = OSAddAtomic(1, &nfs_lock_owner_seqnum);
3295 TAILQ_INIT(&newnlop->nlo_locks);
3296 goto tryagain;
b0d623f7 3297 }
6d2010ae
A
3298 if (!nlop && newnlop) {
3299 newnlop->nlo_flags |= NFS_LOCK_OWNER_LINK;
0a7de745 3300 os_ref_init(&newnlop->nlo_refcnt, NULL);
6d2010ae
A
3301 TAILQ_INSERT_HEAD(&np->n_lock_owners, newnlop, nlo_link);
3302 nlop = newnlop;
b0d623f7 3303 }
6d2010ae 3304 lck_mtx_unlock(&np->n_openlock);
b0d623f7 3305
0a7de745 3306 if (newnlop && (nlop != newnlop)) {
6d2010ae 3307 nfs_lock_owner_destroy(newnlop);
0a7de745 3308 }
b0d623f7 3309
0a7de745 3310 if (nlop) {
6d2010ae 3311 nfs_lock_owner_ref(nlop);
0a7de745 3312 }
b0d623f7 3313
0a7de745 3314 return nlop;
6d2010ae 3315}
b0d623f7
A
3316
3317/*
3318 * destroy a lock owner that's no longer needed
3319 */
3320void
3321nfs_lock_owner_destroy(struct nfs_lock_owner *nlop)
3322{
3323 if (nlop->nlo_open_owner) {
3324 nfs_open_owner_rele(nlop->nlo_open_owner);
3325 nlop->nlo_open_owner = NULL;
3326 }
3327 lck_mtx_destroy(&nlop->nlo_lock, nfs_open_grp);
3328 FREE(nlop, M_TEMP);
3329}
3330
3331/*
3332 * acquire a reference count on a lock owner
3333 */
3334void
3335nfs_lock_owner_ref(struct nfs_lock_owner *nlop)
3336{
3337 lck_mtx_lock(&nlop->nlo_lock);
0a7de745 3338 os_ref_retain_locked(&nlop->nlo_refcnt);
b0d623f7
A
3339 lck_mtx_unlock(&nlop->nlo_lock);
3340}
3341
3342/*
3343 * drop a reference count on a lock owner and destroy it if
3344 * it is no longer referenced and no longer on the mount's list.
3345 */
3346void
3347nfs_lock_owner_rele(struct nfs_lock_owner *nlop)
3348{
0a7de745
A
3349 os_ref_count_t newcount;
3350
b0d623f7 3351 lck_mtx_lock(&nlop->nlo_lock);
0a7de745 3352 if (os_ref_get_count(&nlop->nlo_refcnt) < 1) {
b0d623f7 3353 panic("nfs_lock_owner_rele: no refcnt");
0a7de745
A
3354 }
3355 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
3356 if (!newcount && (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
b0d623f7 3357 panic("nfs_lock_owner_rele: busy");
0a7de745 3358 }
b0d623f7 3359 /* XXX we may potentially want to clean up idle/unused lock owner structures */
0a7de745 3360 if (newcount || (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) {
b0d623f7
A
3361 lck_mtx_unlock(&nlop->nlo_lock);
3362 return;
3363 }
3364 /* owner is no longer referenced or linked to mount, so destroy it */
3365 lck_mtx_unlock(&nlop->nlo_lock);
3366 nfs_lock_owner_destroy(nlop);
3367}
3368
3369/*
3370 * Mark a lock owner as busy because we are about to
3371 * start an operation that uses and updates lock owner state.
3372 */
3373int
3374nfs_lock_owner_set_busy(struct nfs_lock_owner *nlop, thread_t thd)
3375{
3376 struct nfsmount *nmp;
cb323159 3377 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
b0d623f7
A
3378 int error = 0, slpflag;
3379
3380 nmp = nlop->nlo_open_owner->noo_mount;
0a7de745
A
3381 if (nfs_mount_gone(nmp)) {
3382 return ENXIO;
3383 }
6d2010ae 3384 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7
A
3385
3386 lck_mtx_lock(&nlop->nlo_lock);
3387 while (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY) {
0a7de745 3388 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 3389 break;
0a7de745 3390 }
b0d623f7
A
3391 nlop->nlo_flags |= NFS_LOCK_OWNER_WANT;
3392 msleep(nlop, &nlop->nlo_lock, slpflag, "nfs_lock_owner_set_busy", &ts);
6d2010ae 3393 slpflag = 0;
b0d623f7 3394 }
0a7de745 3395 if (!error) {
b0d623f7 3396 nlop->nlo_flags |= NFS_LOCK_OWNER_BUSY;
0a7de745 3397 }
b0d623f7
A
3398 lck_mtx_unlock(&nlop->nlo_lock);
3399
0a7de745 3400 return error;
b0d623f7
A
3401}
3402
3403/*
3404 * Clear the busy flag on a lock owner and wake up anyone waiting
3405 * to mark it busy.
3406 */
3407void
3408nfs_lock_owner_clear_busy(struct nfs_lock_owner *nlop)
3409{
3410 int wanted;
3411
3412 lck_mtx_lock(&nlop->nlo_lock);
0a7de745 3413 if (!(nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
b0d623f7 3414 panic("nfs_lock_owner_clear_busy");
0a7de745 3415 }
b0d623f7 3416 wanted = (nlop->nlo_flags & NFS_LOCK_OWNER_WANT);
0a7de745 3417 nlop->nlo_flags &= ~(NFS_LOCK_OWNER_BUSY | NFS_LOCK_OWNER_WANT);
b0d623f7 3418 lck_mtx_unlock(&nlop->nlo_lock);
0a7de745 3419 if (wanted) {
b0d623f7 3420 wakeup(nlop);
0a7de745 3421 }
b0d623f7
A
3422}
3423
3424/*
3425 * Insert a held lock into a lock owner's sorted list.
3426 * (flock locks are always inserted at the head the list)
3427 */
3428void
3429nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *nlop, struct nfs_file_lock *newnflp)
3430{
3431 struct nfs_file_lock *nflp;
3432
3433 /* insert new lock in lock owner's held lock list */
3434 lck_mtx_lock(&nlop->nlo_lock);
3435 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) {
3436 TAILQ_INSERT_HEAD(&nlop->nlo_locks, newnflp, nfl_lolink);
3437 } else {
3438 TAILQ_FOREACH(nflp, &nlop->nlo_locks, nfl_lolink) {
0a7de745 3439 if (newnflp->nfl_start < nflp->nfl_start) {
b0d623f7 3440 break;
0a7de745 3441 }
b0d623f7 3442 }
0a7de745 3443 if (nflp) {
b0d623f7 3444 TAILQ_INSERT_BEFORE(nflp, newnflp, nfl_lolink);
0a7de745 3445 } else {
b0d623f7 3446 TAILQ_INSERT_TAIL(&nlop->nlo_locks, newnflp, nfl_lolink);
0a7de745 3447 }
b0d623f7
A
3448 }
3449 lck_mtx_unlock(&nlop->nlo_lock);
3450}
3451
3452/*
3453 * Get a file lock structure for this lock owner.
3454 */
3455struct nfs_file_lock *
3456nfs_file_lock_alloc(struct nfs_lock_owner *nlop)
3457{
3458 struct nfs_file_lock *nflp = NULL;
3459
3460 lck_mtx_lock(&nlop->nlo_lock);
3461 if (!nlop->nlo_alock.nfl_owner) {
3462 nflp = &nlop->nlo_alock;
3463 nflp->nfl_owner = nlop;
3464 }
3465 lck_mtx_unlock(&nlop->nlo_lock);
3466 if (!nflp) {
3467 MALLOC(nflp, struct nfs_file_lock *, sizeof(struct nfs_file_lock), M_TEMP, M_WAITOK);
0a7de745
A
3468 if (!nflp) {
3469 return NULL;
3470 }
b0d623f7
A
3471 bzero(nflp, sizeof(*nflp));
3472 nflp->nfl_flags |= NFS_FILE_LOCK_ALLOC;
3473 nflp->nfl_owner = nlop;
3474 }
3475 nfs_lock_owner_ref(nlop);
0a7de745 3476 return nflp;
b0d623f7
A
3477}
3478
3479/*
3480 * destroy the given NFS file lock structure
3481 */
3482void
3483nfs_file_lock_destroy(struct nfs_file_lock *nflp)
3484{
3485 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3486
3487 if (nflp->nfl_flags & NFS_FILE_LOCK_ALLOC) {
3488 nflp->nfl_owner = NULL;
3489 FREE(nflp, M_TEMP);
3490 } else {
3491 lck_mtx_lock(&nlop->nlo_lock);
3e170ce0 3492 bzero(nflp, sizeof(*nflp));
b0d623f7
A
3493 lck_mtx_unlock(&nlop->nlo_lock);
3494 }
3495 nfs_lock_owner_rele(nlop);
3496}
3497
3498/*
3499 * Check if one file lock conflicts with another.
3500 * (nflp1 is the new lock. nflp2 is the existing lock.)
3501 */
3502int
3503nfs_file_lock_conflict(struct nfs_file_lock *nflp1, struct nfs_file_lock *nflp2, int *willsplit)
3504{
3505 /* no conflict if lock is dead */
0a7de745
A
3506 if ((nflp1->nfl_flags & NFS_FILE_LOCK_DEAD) || (nflp2->nfl_flags & NFS_FILE_LOCK_DEAD)) {
3507 return 0;
3508 }
b0d623f7
A
3509 /* no conflict if it's ours - unless the lock style doesn't match */
3510 if ((nflp1->nfl_owner == nflp2->nfl_owner) &&
3511 ((nflp1->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == (nflp2->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))) {
3512 if (willsplit && (nflp1->nfl_type != nflp2->nfl_type) &&
3513 (nflp1->nfl_start > nflp2->nfl_start) &&
0a7de745 3514 (nflp1->nfl_end < nflp2->nfl_end)) {
b0d623f7 3515 *willsplit = 1;
0a7de745
A
3516 }
3517 return 0;
b0d623f7
A
3518 }
3519 /* no conflict if ranges don't overlap */
0a7de745
A
3520 if ((nflp1->nfl_start > nflp2->nfl_end) || (nflp1->nfl_end < nflp2->nfl_start)) {
3521 return 0;
3522 }
b0d623f7 3523 /* no conflict if neither lock is exclusive */
0a7de745
A
3524 if ((nflp1->nfl_type != F_WRLCK) && (nflp2->nfl_type != F_WRLCK)) {
3525 return 0;
3526 }
b0d623f7 3527 /* conflict */
0a7de745 3528 return 1;
b0d623f7
A
3529}
3530
cb323159 3531#if CONFIG_NFS4
b0d623f7
A
3532/*
3533 * Send an NFSv4 LOCK RPC to the server.
3534 */
3535int
6d2010ae 3536nfs4_setlock_rpc(
b0d623f7
A
3537 nfsnode_t np,
3538 struct nfs_open_file *nofp,
3539 struct nfs_file_lock *nflp,
3540 int reclaim,
6d2010ae 3541 int flags,
b0d623f7
A
3542 thread_t thd,
3543 kauth_cred_t cred)
3544{
3545 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3546 struct nfsmount *nmp;
3547 struct nfsm_chain nmreq, nmrep;
3548 uint64_t xid;
3549 uint32_t locktype;
3550 int error = 0, lockerror = ENOENT, newlocker, numops, status;
6d2010ae 3551 struct nfsreq_secinfo_args si;
b0d623f7
A
3552
3553 nmp = NFSTONMP(np);
0a7de745
A
3554 if (nfs_mount_gone(nmp)) {
3555 return ENXIO;
3556 }
3557 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3558 return EINVAL;
3559 }
b0d623f7
A
3560
3561 newlocker = (nlop->nlo_stategenid != nmp->nm_stategenid);
3562 locktype = (nflp->nfl_flags & NFS_FILE_LOCK_WAIT) ?
0a7de745
A
3563 ((nflp->nfl_type == F_WRLCK) ?
3564 NFS_LOCK_TYPE_WRITEW :
3565 NFS_LOCK_TYPE_READW) :
3566 ((nflp->nfl_type == F_WRLCK) ?
3567 NFS_LOCK_TYPE_WRITE :
3568 NFS_LOCK_TYPE_READ);
b0d623f7
A
3569 if (newlocker) {
3570 error = nfs_open_file_set_busy(nofp, thd);
0a7de745
A
3571 if (error) {
3572 return error;
3573 }
b0d623f7
A
3574 error = nfs_open_owner_set_busy(nofp->nof_owner, thd);
3575 if (error) {
3576 nfs_open_file_clear_busy(nofp);
0a7de745 3577 return error;
b0d623f7
A
3578 }
3579 if (!nlop->nlo_open_owner) {
3580 nfs_open_owner_ref(nofp->nof_owner);
3581 nlop->nlo_open_owner = nofp->nof_owner;
3582 }
3583 }
3584 error = nfs_lock_owner_set_busy(nlop, thd);
3585 if (error) {
3586 if (newlocker) {
3587 nfs_open_owner_clear_busy(nofp->nof_owner);
3588 nfs_open_file_clear_busy(nofp);
3589 }
0a7de745 3590 return error;
b0d623f7
A
3591 }
3592
6d2010ae 3593 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
3594 nfsm_chain_null(&nmreq);
3595 nfsm_chain_null(&nmrep);
3596
3597 // PUTFH, GETATTR, LOCK
3598 numops = 3;
3599 nfsm_chain_build_alloc_init(error, &nmreq, 33 * NFSX_UNSIGNED);
3e170ce0 3600 nfsm_chain_add_compound_header(error, &nmreq, "lock", nmp->nm_minor_vers, numops);
b0d623f7
A
3601 numops--;
3602 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3603 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3604 numops--;
3605 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 3606 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
3607 numops--;
3608 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCK);
3609 nfsm_chain_add_32(error, &nmreq, locktype);
3610 nfsm_chain_add_32(error, &nmreq, reclaim);
3611 nfsm_chain_add_64(error, &nmreq, nflp->nfl_start);
3612 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(nflp->nfl_start, nflp->nfl_end));
3613 nfsm_chain_add_32(error, &nmreq, newlocker);
3614 if (newlocker) {
3615 nfsm_chain_add_32(error, &nmreq, nofp->nof_owner->noo_seqid);
3616 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
3617 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3618 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3619 } else {
3620 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3621 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3622 }
3623 nfsm_chain_build_done(error, &nmreq);
3624 nfsm_assert(error, (numops == 0), EPROTO);
3625 nfsmout_if(error);
3626
0a7de745 3627 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
b0d623f7 3628
0a7de745 3629 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 3630 error = lockerror;
0a7de745 3631 }
b0d623f7
A
3632 nfsm_chain_skip_tag(error, &nmrep);
3633 nfsm_chain_get_32(error, &nmrep, numops);
3634 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3635 nfsmout_if(error);
3636 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 3637 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
b0d623f7
A
3638 nfsmout_if(error);
3639 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCK);
3640 nfs_owner_seqid_increment(newlocker ? nofp->nof_owner : NULL, nlop, error);
3641 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3642
3643 /* Update the lock owner's stategenid once it appears the server has state for it. */
3644 /* We determine this by noting the request was successful (we got a stateid). */
0a7de745 3645 if (newlocker && !error) {
b0d623f7 3646 nlop->nlo_stategenid = nmp->nm_stategenid;
0a7de745 3647 }
b0d623f7 3648nfsmout:
0a7de745 3649 if (!lockerror) {
b0d623f7 3650 nfs_node_unlock(np);
0a7de745 3651 }
b0d623f7
A
3652 nfs_lock_owner_clear_busy(nlop);
3653 if (newlocker) {
3654 nfs_open_owner_clear_busy(nofp->nof_owner);
3655 nfs_open_file_clear_busy(nofp);
3656 }
3657 nfsm_chain_cleanup(&nmreq);
3658 nfsm_chain_cleanup(&nmrep);
0a7de745 3659 return error;
b0d623f7
A
3660}
3661
3662/*
3663 * Send an NFSv4 LOCKU RPC to the server.
3664 */
3665int
3666nfs4_unlock_rpc(
3667 nfsnode_t np,
3668 struct nfs_lock_owner *nlop,
3669 int type,
3670 uint64_t start,
3671 uint64_t end,
6d2010ae
A
3672 int flags,
3673 thread_t thd,
3674 kauth_cred_t cred)
b0d623f7
A
3675{
3676 struct nfsmount *nmp;
3677 struct nfsm_chain nmreq, nmrep;
3678 uint64_t xid;
3679 int error = 0, lockerror = ENOENT, numops, status;
6d2010ae 3680 struct nfsreq_secinfo_args si;
b0d623f7
A
3681
3682 nmp = NFSTONMP(np);
0a7de745
A
3683 if (nfs_mount_gone(nmp)) {
3684 return ENXIO;
3685 }
3686 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3687 return EINVAL;
3688 }
b0d623f7 3689
6d2010ae 3690 error = nfs_lock_owner_set_busy(nlop, NULL);
0a7de745
A
3691 if (error) {
3692 return error;
3693 }
b0d623f7 3694
6d2010ae 3695 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
3696 nfsm_chain_null(&nmreq);
3697 nfsm_chain_null(&nmrep);
3698
3699 // PUTFH, GETATTR, LOCKU
3700 numops = 3;
3701 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3e170ce0 3702 nfsm_chain_add_compound_header(error, &nmreq, "unlock", nmp->nm_minor_vers, numops);
b0d623f7
A
3703 numops--;
3704 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3705 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3706 numops--;
3707 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 3708 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
3709 numops--;
3710 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKU);
3711 nfsm_chain_add_32(error, &nmreq, (type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3712 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3713 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3714 nfsm_chain_add_64(error, &nmreq, start);
3715 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3716 nfsm_chain_build_done(error, &nmreq);
3717 nfsm_assert(error, (numops == 0), EPROTO);
3718 nfsmout_if(error);
3719
0a7de745 3720 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
b0d623f7 3721
0a7de745 3722 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 3723 error = lockerror;
0a7de745 3724 }
b0d623f7
A
3725 nfsm_chain_skip_tag(error, &nmrep);
3726 nfsm_chain_get_32(error, &nmrep, numops);
3727 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3728 nfsmout_if(error);
3729 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 3730 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
b0d623f7
A
3731 nfsmout_if(error);
3732 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKU);
3733 nfs_owner_seqid_increment(NULL, nlop, error);
3734 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3735nfsmout:
0a7de745 3736 if (!lockerror) {
b0d623f7 3737 nfs_node_unlock(np);
0a7de745 3738 }
b0d623f7
A
3739 nfs_lock_owner_clear_busy(nlop);
3740 nfsm_chain_cleanup(&nmreq);
3741 nfsm_chain_cleanup(&nmrep);
0a7de745 3742 return error;
b0d623f7
A
3743}
3744
3745/*
6d2010ae 3746 * Send an NFSv4 LOCKT RPC to the server.
b0d623f7
A
3747 */
3748int
6d2010ae 3749nfs4_getlock_rpc(
b0d623f7
A
3750 nfsnode_t np,
3751 struct nfs_lock_owner *nlop,
3752 struct flock *fl,
3753 uint64_t start,
3754 uint64_t end,
3755 vfs_context_t ctx)
3756{
3757 struct nfsmount *nmp;
b0d623f7
A
3758 struct nfsm_chain nmreq, nmrep;
3759 uint64_t xid, val64 = 0;
3760 uint32_t val = 0;
6d2010ae
A
3761 int error = 0, lockerror, numops, status;
3762 struct nfsreq_secinfo_args si;
b0d623f7
A
3763
3764 nmp = NFSTONMP(np);
0a7de745
A
3765 if (nfs_mount_gone(nmp)) {
3766 return ENXIO;
3767 }
3768 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3769 return EINVAL;
3770 }
b0d623f7 3771
6d2010ae
A
3772 lockerror = ENOENT;
3773 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
3774 nfsm_chain_null(&nmreq);
3775 nfsm_chain_null(&nmrep);
3776
3777 // PUTFH, GETATTR, LOCKT
3778 numops = 3;
3779 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3e170ce0 3780 nfsm_chain_add_compound_header(error, &nmreq, "locktest", nmp->nm_minor_vers, numops);
b0d623f7
A
3781 numops--;
3782 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3783 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3784 numops--;
3785 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 3786 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
3787 numops--;
3788 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKT);
3789 nfsm_chain_add_32(error, &nmreq, (fl->l_type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3790 nfsm_chain_add_64(error, &nmreq, start);
3791 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3792 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3793 nfsm_chain_build_done(error, &nmreq);
3794 nfsm_assert(error, (numops == 0), EPROTO);
3795 nfsmout_if(error);
3796
6d2010ae 3797 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
b0d623f7 3798
0a7de745 3799 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 3800 error = lockerror;
0a7de745 3801 }
b0d623f7
A
3802 nfsm_chain_skip_tag(error, &nmrep);
3803 nfsm_chain_get_32(error, &nmrep, numops);
3804 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3805 nfsmout_if(error);
3806 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 3807 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
b0d623f7
A
3808 nfsmout_if(error);
3809 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKT);
3810 if (error == NFSERR_DENIED) {
3811 error = 0;
3812 nfsm_chain_get_64(error, &nmrep, fl->l_start);
3813 nfsm_chain_get_64(error, &nmrep, val64);
3814 fl->l_len = (val64 == UINT64_MAX) ? 0 : val64;
3815 nfsm_chain_get_32(error, &nmrep, val);
3816 fl->l_type = (val == NFS_LOCK_TYPE_WRITE) ? F_WRLCK : F_RDLCK;
3817 fl->l_pid = 0;
3818 fl->l_whence = SEEK_SET;
3819 } else if (!error) {
3820 fl->l_type = F_UNLCK;
3821 }
3822nfsmout:
0a7de745 3823 if (!lockerror) {
b0d623f7 3824 nfs_node_unlock(np);
0a7de745 3825 }
b0d623f7
A
3826 nfsm_chain_cleanup(&nmreq);
3827 nfsm_chain_cleanup(&nmrep);
0a7de745 3828 return error;
b0d623f7 3829}
cb323159 3830#endif /* CONFIG_NFS4 */
6d2010ae
A
3831
3832/*
3833 * Check for any conflicts with the given lock.
3834 *
3835 * Checking for a lock doesn't require the file to be opened.
3836 * So we skip all the open owner, open file, lock owner work
3837 * and just check for a conflicting lock.
3838 */
3839int
3840nfs_advlock_getlock(
3841 nfsnode_t np,
3842 struct nfs_lock_owner *nlop,
3843 struct flock *fl,
3844 uint64_t start,
3845 uint64_t end,
3846 vfs_context_t ctx)
3847{
3848 struct nfsmount *nmp;
3849 struct nfs_file_lock *nflp;
3850 int error = 0, answered = 0;
3851
3852 nmp = NFSTONMP(np);
0a7de745
A
3853 if (nfs_mount_gone(nmp)) {
3854 return ENXIO;
3855 }
6d2010ae
A
3856
3857restart:
0a7de745
A
3858 if ((error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)))) {
3859 return error;
3860 }
6d2010ae
A
3861
3862 lck_mtx_lock(&np->n_openlock);
3863 /* scan currently held locks for conflict */
3864 TAILQ_FOREACH(nflp, &np->n_locks, nfl_link) {
0a7de745 3865 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
6d2010ae 3866 continue;
0a7de745 3867 }
6d2010ae 3868 if ((start <= nflp->nfl_end) && (end >= nflp->nfl_start) &&
0a7de745 3869 ((fl->l_type == F_WRLCK) || (nflp->nfl_type == F_WRLCK))) {
6d2010ae 3870 break;
0a7de745 3871 }
6d2010ae
A
3872 }
3873 if (nflp) {
3874 /* found a conflicting lock */
3875 fl->l_type = nflp->nfl_type;
3876 fl->l_pid = (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_FLOCK) ? -1 : nflp->nfl_owner->nlo_pid;
3877 fl->l_start = nflp->nfl_start;
3878 fl->l_len = NFS_FLOCK_LENGTH(nflp->nfl_start, nflp->nfl_end);
3879 fl->l_whence = SEEK_SET;
3880 answered = 1;
3881 } else if ((np->n_openflags & N_DELEG_WRITE) && !(np->n_openflags & N_DELEG_RETURN)) {
3882 /*
3883 * If we have a write delegation, we know there can't be other
3884 * locks on the server. So the answer is no conflicting lock found.
3885 */
3886 fl->l_type = F_UNLCK;
3887 answered = 1;
3888 }
3889 lck_mtx_unlock(&np->n_openlock);
3890 if (answered) {
3891 nfs_mount_state_in_use_end(nmp, 0);
0a7de745 3892 return 0;
6d2010ae
A
3893 }
3894
3895 /* no conflict found locally, so ask the server */
3896 error = nmp->nm_funcs->nf_getlock_rpc(np, nlop, fl, start, end, ctx);
3897
0a7de745 3898 if (nfs_mount_state_in_use_end(nmp, error)) {
6d2010ae 3899 goto restart;
0a7de745
A
3900 }
3901 return error;
6d2010ae
A
3902}
3903
b0d623f7
A
3904/*
3905 * Acquire a file lock for the given range.
3906 *
3907 * Add the lock (request) to the lock queue.
3908 * Scan the lock queue for any conflicting locks.
3909 * If a conflict is found, block or return an error.
3910 * Once end of queue is reached, send request to the server.
3911 * If the server grants the lock, scan the lock queue and
3912 * update any existing locks. Then (optionally) scan the
3913 * queue again to coalesce any locks adjacent to the new one.
3914 */
3915int
6d2010ae 3916nfs_advlock_setlock(
b0d623f7
A
3917 nfsnode_t np,
3918 struct nfs_open_file *nofp,
3919 struct nfs_lock_owner *nlop,
3920 int op,
3921 uint64_t start,
3922 uint64_t end,
3923 int style,
3924 short type,
3925 vfs_context_t ctx)
3926{
3927 struct nfsmount *nmp;
3928 struct nfs_file_lock *newnflp, *nflp, *nflp2 = NULL, *nextnflp, *flocknflp = NULL;
3929 struct nfs_file_lock *coalnflp;
3930 int error = 0, error2, willsplit = 0, delay, slpflag, busy = 0, inuse = 0, restart, inqueue = 0;
cb323159 3931 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
b0d623f7
A
3932
3933 nmp = NFSTONMP(np);
0a7de745
A
3934 if (nfs_mount_gone(nmp)) {
3935 return ENXIO;
3936 }
6d2010ae
A
3937 slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
3938
0a7de745
A
3939 if ((type != F_RDLCK) && (type != F_WRLCK)) {
3940 return EINVAL;
3941 }
b0d623f7
A
3942
3943 /* allocate a new lock */
3944 newnflp = nfs_file_lock_alloc(nlop);
0a7de745
A
3945 if (!newnflp) {
3946 return ENOLCK;
3947 }
b0d623f7
A
3948 newnflp->nfl_start = start;
3949 newnflp->nfl_end = end;
3950 newnflp->nfl_type = type;
0a7de745 3951 if (op == F_SETLKW) {
b0d623f7 3952 newnflp->nfl_flags |= NFS_FILE_LOCK_WAIT;
0a7de745 3953 }
b0d623f7
A
3954 newnflp->nfl_flags |= style;
3955 newnflp->nfl_flags |= NFS_FILE_LOCK_BLOCKED;
3956
3957 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && (type == F_WRLCK)) {
3958 /*
3959 * For exclusive flock-style locks, if we block waiting for the
3960 * lock, we need to first release any currently held shared
3961 * flock-style lock. So, the first thing we do is check if we
3962 * have a shared flock-style lock.
3963 */
3964 nflp = TAILQ_FIRST(&nlop->nlo_locks);
0a7de745 3965 if (nflp && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_FLOCK)) {
b0d623f7 3966 nflp = NULL;
0a7de745
A
3967 }
3968 if (nflp && (nflp->nfl_type != F_RDLCK)) {
b0d623f7 3969 nflp = NULL;
0a7de745 3970 }
b0d623f7
A
3971 flocknflp = nflp;
3972 }
3973
3974restart:
3975 restart = 0;
6d2010ae 3976 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
0a7de745 3977 if (error) {
b0d623f7 3978 goto error_out;
0a7de745 3979 }
b0d623f7 3980 inuse = 1;
6d2010ae
A
3981 if (np->n_flag & NREVOKE) {
3982 error = EIO;
3983 nfs_mount_state_in_use_end(nmp, 0);
3984 inuse = 0;
3985 goto error_out;
3986 }
cb323159 3987#if CONFIG_NFS4
b0d623f7
A
3988 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3989 nfs_mount_state_in_use_end(nmp, 0);
3990 inuse = 0;
6d2010ae 3991 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
0a7de745 3992 if (error) {
6d2010ae 3993 goto error_out;
0a7de745 3994 }
b0d623f7
A
3995 goto restart;
3996 }
cb323159 3997#endif
b0d623f7
A
3998
3999 lck_mtx_lock(&np->n_openlock);
4000 if (!inqueue) {
4001 /* insert new lock at beginning of list */
4002 TAILQ_INSERT_HEAD(&np->n_locks, newnflp, nfl_link);
4003 inqueue = 1;
4004 }
4005
4006 /* scan current list of locks (held and pending) for conflicts */
6d2010ae
A
4007 for (nflp = TAILQ_NEXT(newnflp, nfl_link); nflp; nflp = nextnflp) {
4008 nextnflp = TAILQ_NEXT(nflp, nfl_link);
0a7de745 4009 if (!nfs_file_lock_conflict(newnflp, nflp, &willsplit)) {
b0d623f7 4010 continue;
0a7de745 4011 }
b0d623f7
A
4012 /* Conflict */
4013 if (!(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
4014 error = EAGAIN;
4015 break;
4016 }
4017 /* Block until this lock is no longer held. */
4018 if (nflp->nfl_blockcnt == UINT_MAX) {
4019 error = ENOLCK;
4020 break;
4021 }
4022 nflp->nfl_blockcnt++;
4023 do {
4024 if (flocknflp) {
4025 /* release any currently held shared lock before sleeping */
4026 lck_mtx_unlock(&np->n_openlock);
4027 nfs_mount_state_in_use_end(nmp, 0);
4028 inuse = 0;
6d2010ae 4029 error = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
b0d623f7 4030 flocknflp = NULL;
0a7de745 4031 if (!error) {
6d2010ae 4032 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
0a7de745 4033 }
b0d623f7
A
4034 if (error) {
4035 lck_mtx_lock(&np->n_openlock);
4036 break;
4037 }
4038 inuse = 1;
4039 lck_mtx_lock(&np->n_openlock);
4040 /* no need to block/sleep if the conflict is gone */
0a7de745 4041 if (!nfs_file_lock_conflict(newnflp, nflp, NULL)) {
b0d623f7 4042 break;
0a7de745 4043 }
b0d623f7 4044 }
6d2010ae
A
4045 msleep(nflp, &np->n_openlock, slpflag, "nfs_advlock_setlock_blocked", &ts);
4046 slpflag = 0;
b0d623f7
A
4047 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4048 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4049 /* looks like we have a recover pending... restart */
4050 restart = 1;
4051 lck_mtx_unlock(&np->n_openlock);
4052 nfs_mount_state_in_use_end(nmp, 0);
4053 inuse = 0;
4054 lck_mtx_lock(&np->n_openlock);
4055 break;
4056 }
0a7de745 4057 if (!error && (np->n_flag & NREVOKE)) {
6d2010ae 4058 error = EIO;
0a7de745 4059 }
b0d623f7
A
4060 } while (!error && nfs_file_lock_conflict(newnflp, nflp, NULL));
4061 nflp->nfl_blockcnt--;
4062 if ((nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !nflp->nfl_blockcnt) {
4063 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4064 nfs_file_lock_destroy(nflp);
4065 }
0a7de745 4066 if (error || restart) {
b0d623f7 4067 break;
0a7de745 4068 }
6d2010ae
A
4069 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
4070 /* So, start this lock-scanning loop over from where it started. */
4071 nextnflp = TAILQ_NEXT(newnflp, nfl_link);
b0d623f7
A
4072 }
4073 lck_mtx_unlock(&np->n_openlock);
0a7de745 4074 if (restart) {
b0d623f7 4075 goto restart;
0a7de745
A
4076 }
4077 if (error) {
b0d623f7 4078 goto error_out;
0a7de745 4079 }
b0d623f7
A
4080
4081 if (willsplit) {
4082 /*
4083 * It looks like this operation is splitting a lock.
4084 * We allocate a new lock now so we don't have to worry
4085 * about the allocation failing after we've updated some state.
4086 */
4087 nflp2 = nfs_file_lock_alloc(nlop);
4088 if (!nflp2) {
4089 error = ENOLCK;
4090 goto error_out;
4091 }
4092 }
4093
4094 /* once scan for local conflicts is clear, send request to server */
0a7de745 4095 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
b0d623f7 4096 goto error_out;
0a7de745 4097 }
b0d623f7
A
4098 busy = 1;
4099 delay = 0;
4100 do {
cb323159 4101#if CONFIG_NFS4
6d2010ae
A
4102 /* do we have a delegation? (that we're not returning?) */
4103 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN)) {
4104 if (np->n_openflags & N_DELEG_WRITE) {
4105 /* with a write delegation, just take the lock delegated */
4106 newnflp->nfl_flags |= NFS_FILE_LOCK_DELEGATED;
4107 error = 0;
4108 /* make sure the lock owner knows its open owner */
4109 if (!nlop->nlo_open_owner) {
4110 nfs_open_owner_ref(nofp->nof_owner);
4111 nlop->nlo_open_owner = nofp->nof_owner;
4112 }
4113 break;
4114 } else {
4115 /*
4116 * If we don't have any non-delegated opens but we do have
4117 * delegated opens, then we need to first claim the delegated
4118 * opens so that the lock request on the server can be associated
4119 * with an open it knows about.
4120 */
4121 if ((!nofp->nof_rw_drw && !nofp->nof_w_drw && !nofp->nof_r_drw &&
0a7de745
A
4122 !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw &&
4123 !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r) &&
6d2010ae 4124 (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw ||
0a7de745
A
4125 nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw ||
4126 nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r)) {
6d2010ae 4127 error = nfs4_claim_delegated_state_for_open_file(nofp, 0);
0a7de745 4128 if (error) {
6d2010ae 4129 break;
0a7de745 4130 }
6d2010ae
A
4131 }
4132 }
4133 }
cb323159 4134#endif
0a7de745 4135 if (np->n_flag & NREVOKE) {
6d2010ae 4136 error = EIO;
0a7de745
A
4137 }
4138 if (!error) {
6d2010ae 4139 error = nmp->nm_funcs->nf_setlock_rpc(np, nofp, newnflp, 0, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
0a7de745
A
4140 }
4141 if (!error || ((error != NFSERR_DENIED) && (error != NFSERR_GRACE))) {
b0d623f7 4142 break;
0a7de745 4143 }
b0d623f7 4144 /* request was denied due to either conflict or grace period */
6d2010ae 4145 if ((error == NFSERR_DENIED) && !(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
b0d623f7
A
4146 error = EAGAIN;
4147 break;
4148 }
4149 if (flocknflp) {
4150 /* release any currently held shared lock before sleeping */
4151 nfs_open_state_clear_busy(np);
4152 busy = 0;
4153 nfs_mount_state_in_use_end(nmp, 0);
4154 inuse = 0;
6d2010ae 4155 error2 = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
b0d623f7 4156 flocknflp = NULL;
0a7de745 4157 if (!error2) {
6d2010ae 4158 error2 = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
0a7de745 4159 }
b0d623f7
A
4160 if (!error2) {
4161 inuse = 1;
6d2010ae 4162 error2 = nfs_open_state_set_busy(np, vfs_context_thread(ctx));
b0d623f7
A
4163 }
4164 if (error2) {
4165 error = error2;
4166 break;
4167 }
4168 busy = 1;
4169 }
6d2010ae
A
4170 /*
4171 * Wait a little bit and send the request again.
4172 * Except for retries of blocked v2/v3 request where we've already waited a bit.
4173 */
4174 if ((nmp->nm_vers >= NFS_VER4) || (error == NFSERR_GRACE)) {
0a7de745 4175 if (error == NFSERR_GRACE) {
6d2010ae 4176 delay = 4;
0a7de745
A
4177 }
4178 if (delay < 4) {
6d2010ae 4179 delay++;
0a7de745
A
4180 }
4181 tsleep(newnflp, slpflag, "nfs_advlock_setlock_delay", delay * (hz / 2));
6d2010ae
A
4182 slpflag = 0;
4183 }
b0d623f7
A
4184 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4185 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4186 /* looks like we have a recover pending... restart */
4187 nfs_open_state_clear_busy(np);
4188 busy = 0;
4189 nfs_mount_state_in_use_end(nmp, 0);
4190 inuse = 0;
4191 goto restart;
4192 }
0a7de745 4193 if (!error && (np->n_flag & NREVOKE)) {
6d2010ae 4194 error = EIO;
0a7de745 4195 }
b0d623f7
A
4196 } while (!error);
4197
4198error_out:
4199 if (nfs_mount_state_error_should_restart(error)) {
4200 /* looks like we need to restart this operation */
4201 if (busy) {
4202 nfs_open_state_clear_busy(np);
4203 busy = 0;
4204 }
4205 if (inuse) {
4206 nfs_mount_state_in_use_end(nmp, error);
4207 inuse = 0;
4208 }
4209 goto restart;
4210 }
4211 lck_mtx_lock(&np->n_openlock);
4212 newnflp->nfl_flags &= ~NFS_FILE_LOCK_BLOCKED;
4213 if (error) {
4214 newnflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4215 if (newnflp->nfl_blockcnt) {
4216 /* wake up anyone blocked on this lock */
4217 wakeup(newnflp);
4218 } else {
4219 /* remove newnflp from lock list and destroy */
0a7de745 4220 if (inqueue) {
316670eb 4221 TAILQ_REMOVE(&np->n_locks, newnflp, nfl_link);
0a7de745 4222 }
b0d623f7
A
4223 nfs_file_lock_destroy(newnflp);
4224 }
4225 lck_mtx_unlock(&np->n_openlock);
0a7de745 4226 if (busy) {
b0d623f7 4227 nfs_open_state_clear_busy(np);
0a7de745
A
4228 }
4229 if (inuse) {
b0d623f7 4230 nfs_mount_state_in_use_end(nmp, error);
0a7de745
A
4231 }
4232 if (nflp2) {
b0d623f7 4233 nfs_file_lock_destroy(nflp2);
0a7de745
A
4234 }
4235 return error;
b0d623f7
A
4236 }
4237
4238 /* server granted the lock */
4239
4240 /*
4241 * Scan for locks to update.
4242 *
4243 * Locks completely covered are killed.
4244 * At most two locks may need to be clipped.
4245 * It's possible that a single lock may need to be split.
4246 */
4247 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
0a7de745 4248 if (nflp == newnflp) {
b0d623f7 4249 continue;
0a7de745
A
4250 }
4251 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
b0d623f7 4252 continue;
0a7de745
A
4253 }
4254 if (nflp->nfl_owner != nlop) {
b0d623f7 4255 continue;
0a7de745
A
4256 }
4257 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK)) {
b0d623f7 4258 continue;
0a7de745
A
4259 }
4260 if ((newnflp->nfl_start > nflp->nfl_end) || (newnflp->nfl_end < nflp->nfl_start)) {
b0d623f7 4261 continue;
0a7de745 4262 }
b0d623f7
A
4263 /* here's one to update */
4264 if ((newnflp->nfl_start <= nflp->nfl_start) && (newnflp->nfl_end >= nflp->nfl_end)) {
4265 /* The entire lock is being replaced. */
4266 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4267 lck_mtx_lock(&nlop->nlo_lock);
4268 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4269 lck_mtx_unlock(&nlop->nlo_lock);
4270 /* lock will be destroyed below, if no waiters */
4271 } else if ((newnflp->nfl_start > nflp->nfl_start) && (newnflp->nfl_end < nflp->nfl_end)) {
4272 /* We're replacing a range in the middle of a lock. */
4273 /* The current lock will be split into two locks. */
4274 /* Update locks and insert new lock after current lock. */
0a7de745 4275 nflp2->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
b0d623f7
A
4276 nflp2->nfl_type = nflp->nfl_type;
4277 nflp2->nfl_start = newnflp->nfl_end + 1;
4278 nflp2->nfl_end = nflp->nfl_end;
4279 nflp->nfl_end = newnflp->nfl_start - 1;
4280 TAILQ_INSERT_AFTER(&np->n_locks, nflp, nflp2, nfl_link);
4281 nfs_lock_owner_insert_held_lock(nlop, nflp2);
4282 nextnflp = nflp2;
4283 nflp2 = NULL;
4284 } else if (newnflp->nfl_start > nflp->nfl_start) {
4285 /* We're replacing the end of a lock. */
4286 nflp->nfl_end = newnflp->nfl_start - 1;
4287 } else if (newnflp->nfl_end < nflp->nfl_end) {
4288 /* We're replacing the start of a lock. */
4289 nflp->nfl_start = newnflp->nfl_end + 1;
4290 }
4291 if (nflp->nfl_blockcnt) {
4292 /* wake up anyone blocked on this lock */
4293 wakeup(nflp);
4294 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4295 /* remove nflp from lock list and destroy */
4296 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4297 nfs_file_lock_destroy(nflp);
4298 }
4299 }
4300
4301 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4302
4303 /*
4304 * POSIX locks should be coalesced when possible.
4305 */
4306 if ((style == NFS_FILE_LOCK_STYLE_POSIX) && (nofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)) {
4307 /*
4308 * Walk through the lock queue and check each of our held locks with
4309 * the previous and next locks in the lock owner's "held lock list".
4310 * If the two locks can be coalesced, we merge the current lock into
4311 * the other (previous or next) lock. Merging this way makes sure that
4312 * lock ranges are always merged forward in the lock queue. This is
4313 * important because anyone blocked on the lock being "merged away"
4314 * will still need to block on that range and it will simply continue
4315 * checking locks that are further down the list.
4316 */
4317 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
0a7de745 4318 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
b0d623f7 4319 continue;
0a7de745
A
4320 }
4321 if (nflp->nfl_owner != nlop) {
b0d623f7 4322 continue;
0a7de745
A
4323 }
4324 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_POSIX) {
b0d623f7 4325 continue;
0a7de745 4326 }
b0d623f7
A
4327 if (((coalnflp = TAILQ_PREV(nflp, nfs_file_lock_queue, nfl_lolink))) &&
4328 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4329 (coalnflp->nfl_type == nflp->nfl_type) &&
4330 (coalnflp->nfl_end == (nflp->nfl_start - 1))) {
4331 coalnflp->nfl_end = nflp->nfl_end;
4332 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4333 lck_mtx_lock(&nlop->nlo_lock);
4334 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4335 lck_mtx_unlock(&nlop->nlo_lock);
4336 } else if (((coalnflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4337 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4338 (coalnflp->nfl_type == nflp->nfl_type) &&
4339 (coalnflp->nfl_start == (nflp->nfl_end + 1))) {
4340 coalnflp->nfl_start = nflp->nfl_start;
4341 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4342 lck_mtx_lock(&nlop->nlo_lock);
4343 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4344 lck_mtx_unlock(&nlop->nlo_lock);
4345 }
0a7de745 4346 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD)) {
b0d623f7 4347 continue;
0a7de745 4348 }
b0d623f7
A
4349 if (nflp->nfl_blockcnt) {
4350 /* wake up anyone blocked on this lock */
4351 wakeup(nflp);
4352 } else {
4353 /* remove nflp from lock list and destroy */
4354 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4355 nfs_file_lock_destroy(nflp);
4356 }
4357 }
4358 }
4359
4360 lck_mtx_unlock(&np->n_openlock);
4361 nfs_open_state_clear_busy(np);
4362 nfs_mount_state_in_use_end(nmp, error);
4363
0a7de745 4364 if (nflp2) {
b0d623f7 4365 nfs_file_lock_destroy(nflp2);
0a7de745
A
4366 }
4367 return error;
b0d623f7
A
4368}
4369
6d2010ae
A
4370/*
4371 * Release all (same style) locks within the given range.
4372 */
b0d623f7 4373int
6d2010ae 4374nfs_advlock_unlock(
b0d623f7 4375 nfsnode_t np,
cb323159
A
4376 struct nfs_open_file *nofp
4377#if !CONFIG_NFS4
4378 __unused
4379#endif
4380 ,
b0d623f7
A
4381 struct nfs_lock_owner *nlop,
4382 uint64_t start,
4383 uint64_t end,
4384 int style,
4385 vfs_context_t ctx)
4386{
4387 struct nfsmount *nmp;
4388 struct nfs_file_lock *nflp, *nextnflp, *newnflp = NULL;
4389 int error = 0, willsplit = 0, send_unlock_rpcs = 1;
4390
4391 nmp = NFSTONMP(np);
0a7de745
A
4392 if (nfs_mount_gone(nmp)) {
4393 return ENXIO;
4394 }
b0d623f7
A
4395
4396restart:
0a7de745
A
4397 if ((error = nfs_mount_state_in_use_start(nmp, NULL))) {
4398 return error;
4399 }
cb323159 4400#if CONFIG_NFS4
b0d623f7
A
4401 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4402 nfs_mount_state_in_use_end(nmp, 0);
6d2010ae 4403 error = nfs4_reopen(nofp, NULL);
0a7de745
A
4404 if (error) {
4405 return error;
4406 }
b0d623f7
A
4407 goto restart;
4408 }
cb323159 4409#endif
6d2010ae 4410 if ((error = nfs_open_state_set_busy(np, NULL))) {
b0d623f7 4411 nfs_mount_state_in_use_end(nmp, error);
0a7de745 4412 return error;
b0d623f7
A
4413 }
4414
4415 lck_mtx_lock(&np->n_openlock);
4416 if ((start > 0) && (end < UINT64_MAX) && !willsplit) {
4417 /*
4418 * We may need to allocate a new lock if an existing lock gets split.
4419 * So, we first scan the list to check for a split, and if there's
4420 * going to be one, we'll allocate one now.
4421 */
4422 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
0a7de745 4423 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
b0d623f7 4424 continue;
0a7de745
A
4425 }
4426 if (nflp->nfl_owner != nlop) {
b0d623f7 4427 continue;
0a7de745
A
4428 }
4429 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
b0d623f7 4430 continue;
0a7de745
A
4431 }
4432 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
b0d623f7 4433 continue;
0a7de745 4434 }
b0d623f7
A
4435 if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4436 willsplit = 1;
4437 break;
4438 }
4439 }
4440 if (willsplit) {
4441 lck_mtx_unlock(&np->n_openlock);
4442 nfs_open_state_clear_busy(np);
4443 nfs_mount_state_in_use_end(nmp, 0);
4444 newnflp = nfs_file_lock_alloc(nlop);
0a7de745
A
4445 if (!newnflp) {
4446 return ENOMEM;
4447 }
b0d623f7
A
4448 goto restart;
4449 }
4450 }
4451
4452 /*
4453 * Free all of our locks in the given range.
4454 *
4455 * Note that this process requires sending requests to the server.
0a7de745 4456 * Because of this, we will release the n_openlock while performing
b0d623f7
A
4457 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4458 * locks from changing underneath us. However, other entries in the
4459 * list may be removed. So we need to be careful walking the list.
4460 */
4461
4462 /*
4463 * Don't unlock ranges that are held by other-style locks.
4464 * If style is posix, don't send any unlock rpcs if flock is held.
4465 * If we unlock an flock, don't send unlock rpcs for any posix-style
4466 * ranges held - instead send unlocks for the ranges not held.
4467 */
4468 if ((style == NFS_FILE_LOCK_STYLE_POSIX) &&
4469 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
0a7de745 4470 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK)) {
b0d623f7 4471 send_unlock_rpcs = 0;
0a7de745 4472 }
b0d623f7
A
4473 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) &&
4474 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4475 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) &&
4476 ((nflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4477 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX)) {
4478 uint64_t s = 0;
4479 int type = TAILQ_FIRST(&nlop->nlo_locks)->nfl_type;
6d2010ae
A
4480 int delegated = (TAILQ_FIRST(&nlop->nlo_locks)->nfl_flags & NFS_FILE_LOCK_DELEGATED);
4481 while (!delegated && nflp) {
b0d623f7
A
4482 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) {
4483 /* unlock the range preceding this lock */
4484 lck_mtx_unlock(&np->n_openlock);
0a7de745
A
4485 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, nflp->nfl_start - 1, 0,
4486 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4487 if (nfs_mount_state_error_should_restart(error)) {
4488 nfs_open_state_clear_busy(np);
4489 nfs_mount_state_in_use_end(nmp, error);
4490 goto restart;
4491 }
4492 lck_mtx_lock(&np->n_openlock);
0a7de745 4493 if (error) {
b0d623f7 4494 goto out;
0a7de745
A
4495 }
4496 s = nflp->nfl_end + 1;
b0d623f7
A
4497 }
4498 nflp = TAILQ_NEXT(nflp, nfl_lolink);
4499 }
6d2010ae
A
4500 if (!delegated) {
4501 lck_mtx_unlock(&np->n_openlock);
4502 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, end, 0,
0a7de745 4503 vfs_context_thread(ctx), vfs_context_ucred(ctx));
6d2010ae
A
4504 if (nfs_mount_state_error_should_restart(error)) {
4505 nfs_open_state_clear_busy(np);
4506 nfs_mount_state_in_use_end(nmp, error);
4507 goto restart;
4508 }
4509 lck_mtx_lock(&np->n_openlock);
0a7de745 4510 if (error) {
6d2010ae 4511 goto out;
0a7de745 4512 }
b0d623f7 4513 }
b0d623f7
A
4514 send_unlock_rpcs = 0;
4515 }
4516
4517 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
0a7de745 4518 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
b0d623f7 4519 continue;
0a7de745
A
4520 }
4521 if (nflp->nfl_owner != nlop) {
b0d623f7 4522 continue;
0a7de745
A
4523 }
4524 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
b0d623f7 4525 continue;
0a7de745
A
4526 }
4527 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
b0d623f7 4528 continue;
0a7de745 4529 }
b0d623f7
A
4530 /* here's one to unlock */
4531 if ((start <= nflp->nfl_start) && (end >= nflp->nfl_end)) {
4532 /* The entire lock is being unlocked. */
6d2010ae 4533 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4534 lck_mtx_unlock(&np->n_openlock);
6d2010ae 4535 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, nflp->nfl_end, 0,
0a7de745 4536 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4537 if (nfs_mount_state_error_should_restart(error)) {
4538 nfs_open_state_clear_busy(np);
4539 nfs_mount_state_in_use_end(nmp, error);
4540 goto restart;
4541 }
4542 lck_mtx_lock(&np->n_openlock);
4543 }
4544 nextnflp = TAILQ_NEXT(nflp, nfl_link);
0a7de745 4545 if (error) {
b0d623f7 4546 break;
0a7de745 4547 }
b0d623f7
A
4548 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4549 lck_mtx_lock(&nlop->nlo_lock);
4550 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4551 lck_mtx_unlock(&nlop->nlo_lock);
4552 /* lock will be destroyed below, if no waiters */
4553 } else if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4554 /* We're unlocking a range in the middle of a lock. */
4555 /* The current lock will be split into two locks. */
6d2010ae 4556 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4557 lck_mtx_unlock(&np->n_openlock);
6d2010ae 4558 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, end, 0,
0a7de745 4559 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4560 if (nfs_mount_state_error_should_restart(error)) {
4561 nfs_open_state_clear_busy(np);
4562 nfs_mount_state_in_use_end(nmp, error);
4563 goto restart;
4564 }
4565 lck_mtx_lock(&np->n_openlock);
4566 }
0a7de745 4567 if (error) {
b0d623f7 4568 break;
0a7de745 4569 }
b0d623f7 4570 /* update locks and insert new lock after current lock */
0a7de745 4571 newnflp->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
b0d623f7
A
4572 newnflp->nfl_type = nflp->nfl_type;
4573 newnflp->nfl_start = end + 1;
4574 newnflp->nfl_end = nflp->nfl_end;
4575 nflp->nfl_end = start - 1;
4576 TAILQ_INSERT_AFTER(&np->n_locks, nflp, newnflp, nfl_link);
4577 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4578 nextnflp = newnflp;
4579 newnflp = NULL;
4580 } else if (start > nflp->nfl_start) {
4581 /* We're unlocking the end of a lock. */
6d2010ae 4582 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4583 lck_mtx_unlock(&np->n_openlock);
6d2010ae 4584 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, nflp->nfl_end, 0,
0a7de745 4585 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4586 if (nfs_mount_state_error_should_restart(error)) {
4587 nfs_open_state_clear_busy(np);
4588 nfs_mount_state_in_use_end(nmp, error);
4589 goto restart;
4590 }
4591 lck_mtx_lock(&np->n_openlock);
4592 }
4593 nextnflp = TAILQ_NEXT(nflp, nfl_link);
0a7de745 4594 if (error) {
b0d623f7 4595 break;
0a7de745 4596 }
b0d623f7
A
4597 nflp->nfl_end = start - 1;
4598 } else if (end < nflp->nfl_end) {
4599 /* We're unlocking the start of a lock. */
6d2010ae 4600 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4601 lck_mtx_unlock(&np->n_openlock);
6d2010ae 4602 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, end, 0,
0a7de745 4603 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4604 if (nfs_mount_state_error_should_restart(error)) {
4605 nfs_open_state_clear_busy(np);
4606 nfs_mount_state_in_use_end(nmp, error);
4607 goto restart;
4608 }
4609 lck_mtx_lock(&np->n_openlock);
4610 }
4611 nextnflp = TAILQ_NEXT(nflp, nfl_link);
0a7de745 4612 if (error) {
b0d623f7 4613 break;
0a7de745 4614 }
b0d623f7
A
4615 nflp->nfl_start = end + 1;
4616 }
4617 if (nflp->nfl_blockcnt) {
4618 /* wake up anyone blocked on this lock */
4619 wakeup(nflp);
4620 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4621 /* remove nflp from lock list and destroy */
4622 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4623 nfs_file_lock_destroy(nflp);
4624 }
4625 }
4626out:
4627 lck_mtx_unlock(&np->n_openlock);
4628 nfs_open_state_clear_busy(np);
4629 nfs_mount_state_in_use_end(nmp, 0);
4630
0a7de745 4631 if (newnflp) {
b0d623f7 4632 nfs_file_lock_destroy(newnflp);
0a7de745
A
4633 }
4634 return error;
b0d623f7
A
4635}
4636
4637/*
4638 * NFSv4 advisory file locking
4639 */
4640int
6d2010ae 4641nfs_vnop_advlock(
b0d623f7 4642 struct vnop_advlock_args /* {
0a7de745
A
4643 * struct vnodeop_desc *a_desc;
4644 * vnode_t a_vp;
4645 * caddr_t a_id;
4646 * int a_op;
4647 * struct flock *a_fl;
4648 * int a_flags;
4649 * vfs_context_t a_context;
4650 * } */*ap)
b0d623f7
A
4651{
4652 vnode_t vp = ap->a_vp;
4653 nfsnode_t np = VTONFS(ap->a_vp);
4654 struct flock *fl = ap->a_fl;
4655 int op = ap->a_op;
4656 int flags = ap->a_flags;
4657 vfs_context_t ctx = ap->a_context;
4658 struct nfsmount *nmp;
b0d623f7
A
4659 struct nfs_open_owner *noop = NULL;
4660 struct nfs_open_file *nofp = NULL;
4661 struct nfs_lock_owner *nlop = NULL;
4662 off_t lstart;
4663 uint64_t start, end;
4664 int error = 0, modified, style;
6d2010ae 4665 enum vtype vtype;
b0d623f7
A
4666#define OFF_MAX QUAD_MAX
4667
4668 nmp = VTONMP(ap->a_vp);
0a7de745
A
4669 if (nfs_mount_gone(nmp)) {
4670 return ENXIO;
4671 }
6d2010ae
A
4672 lck_mtx_lock(&nmp->nm_lock);
4673 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) {
4674 lck_mtx_unlock(&nmp->nm_lock);
0a7de745 4675 return ENOTSUP;
6d2010ae
A
4676 }
4677 lck_mtx_unlock(&nmp->nm_lock);
b0d623f7 4678
0a7de745
A
4679 if (np->n_flag & NREVOKE) {
4680 return EIO;
4681 }
6d2010ae 4682 vtype = vnode_vtype(ap->a_vp);
0a7de745
A
4683 if (vtype == VDIR) { /* ignore lock requests on directories */
4684 return 0;
4685 }
4686 if (vtype != VREG) { /* anything other than regular files is invalid */
4687 return EINVAL;
4688 }
6d2010ae
A
4689
4690 /* Convert the flock structure into a start and end. */
b0d623f7
A
4691 switch (fl->l_whence) {
4692 case SEEK_SET:
4693 case SEEK_CUR:
4694 /*
4695 * Caller is responsible for adding any necessary offset
4696 * to fl->l_start when SEEK_CUR is used.
4697 */
4698 lstart = fl->l_start;
4699 break;
4700 case SEEK_END:
4701 /* need to flush, and refetch attributes to make */
4702 /* sure we have the correct end of file offset */
0a7de745
A
4703 if ((error = nfs_node_lock(np))) {
4704 return error;
4705 }
b0d623f7
A
4706 modified = (np->n_flag & NMODIFIED);
4707 nfs_node_unlock(np);
0a7de745
A
4708 if (modified && ((error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1)))) {
4709 return error;
4710 }
4711 if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED))) {
4712 return error;
4713 }
b0d623f7
A
4714 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
4715 if ((np->n_size > OFF_MAX) ||
0a7de745 4716 ((fl->l_start > 0) && (np->n_size > (u_quad_t)(OFF_MAX - fl->l_start)))) {
b0d623f7 4717 error = EOVERFLOW;
0a7de745 4718 }
b0d623f7
A
4719 lstart = np->n_size + fl->l_start;
4720 nfs_data_unlock(np);
0a7de745
A
4721 if (error) {
4722 return error;
4723 }
b0d623f7
A
4724 break;
4725 default:
0a7de745
A
4726 return EINVAL;
4727 }
4728 if (lstart < 0) {
4729 return EINVAL;
b0d623f7 4730 }
b0d623f7
A
4731 start = lstart;
4732 if (fl->l_len == 0) {
4733 end = UINT64_MAX;
4734 } else if (fl->l_len > 0) {
0a7de745
A
4735 if ((fl->l_len - 1) > (OFF_MAX - lstart)) {
4736 return EOVERFLOW;
4737 }
b0d623f7
A
4738 end = start - 1 + fl->l_len;
4739 } else { /* l_len is negative */
0a7de745
A
4740 if ((lstart + fl->l_len) < 0) {
4741 return EINVAL;
4742 }
b0d623f7
A
4743 end = start - 1;
4744 start += fl->l_len;
4745 }
0a7de745
A
4746 if ((nmp->nm_vers == NFS_VER2) && ((start > INT32_MAX) || (fl->l_len && (end > INT32_MAX)))) {
4747 return EINVAL;
4748 }
b0d623f7
A
4749
4750 style = (flags & F_FLOCK) ? NFS_FILE_LOCK_STYLE_FLOCK : NFS_FILE_LOCK_STYLE_POSIX;
0a7de745
A
4751 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((start != 0) || (end != UINT64_MAX))) {
4752 return EINVAL;
4753 }
b0d623f7
A
4754
4755 /* find the lock owner, alloc if not unlock */
4756 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), (op != F_UNLCK));
4757 if (!nlop) {
4758 error = (op == F_UNLCK) ? 0 : ENOMEM;
0a7de745 4759 if (error) {
6d2010ae 4760 NP(np, "nfs_vnop_advlock: no lock owner, error %d", error);
0a7de745 4761 }
b0d623f7
A
4762 goto out;
4763 }
4764
4765 if (op == F_GETLK) {
6d2010ae 4766 error = nfs_advlock_getlock(np, nlop, fl, start, end, ctx);
b0d623f7
A
4767 } else {
4768 /* find the open owner */
4769 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
4770 if (!noop) {
6d2010ae 4771 NP(np, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx)));
b0d623f7
A
4772 error = EPERM;
4773 goto out;
4774 }
4775 /* find the open file */
cb323159 4776#if CONFIG_NFS4
b0d623f7 4777restart:
cb323159 4778#endif
b0d623f7 4779 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
0a7de745 4780 if (error) {
b0d623f7 4781 error = EBADF;
0a7de745 4782 }
b0d623f7 4783 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6d2010ae 4784 NP(np, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
b0d623f7
A
4785 error = EIO;
4786 }
cb323159 4787#if CONFIG_NFS4
b0d623f7 4788 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6d2010ae 4789 error = nfs4_reopen(nofp, ((op == F_UNLCK) ? NULL : vfs_context_thread(ctx)));
b0d623f7 4790 nofp = NULL;
0a7de745 4791 if (!error) {
6d2010ae 4792 goto restart;
0a7de745 4793 }
b0d623f7 4794 }
cb323159 4795#endif
b0d623f7 4796 if (error) {
6d2010ae 4797 NP(np, "nfs_vnop_advlock: no open file %d, %d", error, kauth_cred_getuid(noop->noo_cred));
b0d623f7
A
4798 goto out;
4799 }
4800 if (op == F_UNLCK) {
6d2010ae 4801 error = nfs_advlock_unlock(np, nofp, nlop, start, end, style, ctx);
b0d623f7 4802 } else if ((op == F_SETLK) || (op == F_SETLKW)) {
0a7de745 4803 if ((op == F_SETLK) && (flags & F_WAIT)) {
b0d623f7 4804 op = F_SETLKW;
0a7de745 4805 }
6d2010ae 4806 error = nfs_advlock_setlock(np, nofp, nlop, op, start, end, style, fl->l_type, ctx);
b0d623f7
A
4807 } else {
4808 /* not getlk, unlock or lock? */
4809 error = EINVAL;
4810 }
4811 }
4812
4813out:
0a7de745 4814 if (nlop) {
b0d623f7 4815 nfs_lock_owner_rele(nlop);
0a7de745
A
4816 }
4817 if (noop) {
b0d623f7 4818 nfs_open_owner_rele(noop);
0a7de745
A
4819 }
4820 return error;
b0d623f7
A
4821}
4822
4823/*
4824 * Check if an open owner holds any locks on a file.
4825 */
4826int
6d2010ae 4827nfs_check_for_locks(struct nfs_open_owner *noop, struct nfs_open_file *nofp)
b0d623f7
A
4828{
4829 struct nfs_lock_owner *nlop;
4830
4831 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
0a7de745 4832 if (nlop->nlo_open_owner != noop) {
b0d623f7 4833 continue;
0a7de745
A
4834 }
4835 if (!TAILQ_EMPTY(&nlop->nlo_locks)) {
b0d623f7 4836 break;
0a7de745 4837 }
b0d623f7 4838 }
0a7de745 4839 return nlop ? 1 : 0;
b0d623f7
A
4840}
4841
cb323159 4842#if CONFIG_NFS4
b0d623f7
A
4843/*
4844 * Reopen simple (no deny, no locks) open state that was lost.
4845 */
6d2010ae 4846int
b0d623f7
A
4847nfs4_reopen(struct nfs_open_file *nofp, thread_t thd)
4848{
4849 struct nfs_open_owner *noop = nofp->nof_owner;
4850 struct nfsmount *nmp = NFSTONMP(nofp->nof_np);
6d2010ae
A
4851 nfsnode_t np = nofp->nof_np;
4852 vnode_t vp = NFSTOV(np);
b0d623f7
A
4853 vnode_t dvp = NULL;
4854 struct componentname cn;
4855 const char *vname = NULL;
6d2010ae 4856 const char *name = NULL;
b0d623f7
A
4857 size_t namelen;
4858 char smallname[128];
4859 char *filename = NULL;
6d2010ae 4860 int error = 0, done = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
cb323159 4861 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
b0d623f7
A
4862
4863 lck_mtx_lock(&nofp->nof_lock);
4864 while (nofp->nof_flags & NFS_OPEN_FILE_REOPENING) {
0a7de745 4865 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
b0d623f7 4866 break;
0a7de745
A
4867 }
4868 msleep(&nofp->nof_flags, &nofp->nof_lock, slpflag | (PZERO - 1), "nfsreopenwait", &ts);
6d2010ae 4869 slpflag = 0;
b0d623f7 4870 }
6d2010ae 4871 if (error || !(nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
b0d623f7 4872 lck_mtx_unlock(&nofp->nof_lock);
0a7de745 4873 return error;
b0d623f7
A
4874 }
4875 nofp->nof_flags |= NFS_OPEN_FILE_REOPENING;
4876 lck_mtx_unlock(&nofp->nof_lock);
4877
6d2010ae
A
4878 nfs_node_lock_force(np);
4879 if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) {
4880 /*
4881 * The node's been sillyrenamed, so we need to use
4882 * the sillyrename directory/name to do the open.
4883 */
4884 struct nfs_sillyrename *nsp = np->n_sillyrename;
4885 dvp = NFSTOV(nsp->nsr_dnp);
4886 if ((error = vnode_get(dvp))) {
cb323159 4887 dvp = NULLVP;
6d2010ae
A
4888 nfs_node_unlock(np);
4889 goto out;
4890 }
4891 name = nsp->nsr_name;
4892 } else {
4893 /*
4894 * [sigh] We can't trust VFS to get the parent right for named
4895 * attribute nodes. (It likes to reparent the nodes after we've
4896 * created them.) Luckily we can probably get the right parent
4897 * from the n_parent we have stashed away.
4898 */
4899 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
0a7de745 4900 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
6d2010ae 4901 dvp = NULL;
0a7de745
A
4902 }
4903 if (!dvp) {
6d2010ae 4904 dvp = vnode_getparent(vp);
0a7de745 4905 }
6d2010ae
A
4906 vname = vnode_getname(vp);
4907 if (!dvp || !vname) {
0a7de745 4908 if (!error) {
6d2010ae 4909 error = EIO;
0a7de745 4910 }
6d2010ae
A
4911 nfs_node_unlock(np);
4912 goto out;
4913 }
4914 name = vname;
b0d623f7
A
4915 }
4916 filename = &smallname[0];
6d2010ae 4917 namelen = snprintf(filename, sizeof(smallname), "%s", name);
b0d623f7 4918 if (namelen >= sizeof(smallname)) {
0a7de745 4919 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
b0d623f7
A
4920 if (!filename) {
4921 error = ENOMEM;
4922 goto out;
4923 }
0a7de745 4924 snprintf(filename, namelen + 1, "%s", name);
b0d623f7 4925 }
6d2010ae 4926 nfs_node_unlock(np);
b0d623f7
A
4927 bzero(&cn, sizeof(cn));
4928 cn.cn_nameptr = filename;
4929 cn.cn_namelen = namelen;
4930
4931restart:
4932 done = 0;
0a7de745 4933 if ((error = nfs_mount_state_in_use_start(nmp, thd))) {
b0d623f7 4934 goto out;
0a7de745 4935 }
b0d623f7 4936
0a7de745 4937 if (nofp->nof_rw) {
b0d623f7 4938 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE);
0a7de745
A
4939 }
4940 if (!error && nofp->nof_w) {
b0d623f7 4941 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE);
0a7de745
A
4942 }
4943 if (!error && nofp->nof_r) {
b0d623f7 4944 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE);
0a7de745 4945 }
b0d623f7
A
4946
4947 if (nfs_mount_state_in_use_end(nmp, error)) {
0a7de745 4948 if (error == NFSERR_GRACE) {
b0d623f7 4949 goto restart;
0a7de745 4950 }
6d2010ae 4951 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error,
0a7de745 4952 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
b0d623f7
A
4953 error = 0;
4954 goto out;
4955 }
4956 done = 1;
4957out:
0a7de745 4958 if (error && (error != EINTR) && (error != ERESTART)) {
6d2010ae 4959 nfs_revoke_open_state_for_node(np);
0a7de745 4960 }
b0d623f7
A
4961 lck_mtx_lock(&nofp->nof_lock);
4962 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPENING;
0a7de745 4963 if (done) {
b0d623f7 4964 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
0a7de745 4965 } else if (error) {
6d2010ae 4966 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error,
0a7de745
A
4967 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
4968 }
b0d623f7 4969 lck_mtx_unlock(&nofp->nof_lock);
0a7de745 4970 if (filename && (filename != &smallname[0])) {
b0d623f7 4971 FREE(filename, M_TEMP);
0a7de745
A
4972 }
4973 if (vname) {
b0d623f7 4974 vnode_putname(vname);
0a7de745
A
4975 }
4976 if (dvp != NULLVP) {
b0d623f7 4977 vnode_put(dvp);
0a7de745
A
4978 }
4979 return error;
b0d623f7
A
4980}
4981
4982/*
4983 * Send a normal OPEN RPC to open/create a file.
4984 */
4985int
4986nfs4_open_rpc(
4987 struct nfs_open_file *nofp,
4988 vfs_context_t ctx,
4989 struct componentname *cnp,
4990 struct vnode_attr *vap,
4991 vnode_t dvp,
4992 vnode_t *vpp,
4993 int create,
4994 int share_access,
4995 int share_deny)
4996{
0a7de745
A
4997 return nfs4_open_rpc_internal(nofp, ctx, vfs_context_thread(ctx), vfs_context_ucred(ctx),
4998 cnp, vap, dvp, vpp, create, share_access, share_deny);
b0d623f7
A
4999}
5000
5001/*
5002 * Send an OPEN RPC to reopen a file.
5003 */
5004int
5005nfs4_open_reopen_rpc(
5006 struct nfs_open_file *nofp,
5007 thread_t thd,
5008 kauth_cred_t cred,
5009 struct componentname *cnp,
5010 vnode_t dvp,
5011 vnode_t *vpp,
5012 int share_access,
5013 int share_deny)
5014{
0a7de745 5015 return nfs4_open_rpc_internal(nofp, NULL, thd, cred, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, share_access, share_deny);
6d2010ae
A
5016}
5017
5018/*
5019 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
5020 */
5021int
5022nfs4_open_confirm_rpc(
5023 struct nfsmount *nmp,
5024 nfsnode_t dnp,
5025 u_char *fhp,
5026 int fhlen,
5027 struct nfs_open_owner *noop,
5028 nfs_stateid *sid,
5029 thread_t thd,
5030 kauth_cred_t cred,
5031 struct nfs_vattr *nvap,
5032 uint64_t *xidp)
5033{
5034 struct nfsm_chain nmreq, nmrep;
5035 int error = 0, status, numops;
5036 struct nfsreq_secinfo_args si;
5037
5038 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
5039 nfsm_chain_null(&nmreq);
5040 nfsm_chain_null(&nmrep);
5041
5042 // PUTFH, OPEN_CONFIRM, GETATTR
5043 numops = 3;
5044 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
3e170ce0 5045 nfsm_chain_add_compound_header(error, &nmreq, "open_confirm", nmp->nm_minor_vers, numops);
6d2010ae
A
5046 numops--;
5047 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5048 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
5049 numops--;
5050 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_CONFIRM);
5051 nfsm_chain_add_stateid(error, &nmreq, sid);
5052 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5053 numops--;
5054 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5055 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
5056 nfsm_chain_build_done(error, &nmreq);
5057 nfsm_assert(error, (numops == 0), EPROTO);
5058 nfsmout_if(error);
5059 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, &nmrep, xidp, &status);
5060
5061 nfsm_chain_skip_tag(error, &nmrep);
5062 nfsm_chain_get_32(error, &nmrep, numops);
5063 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5064 nfsmout_if(error);
5065 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_CONFIRM);
5066 nfs_owner_seqid_increment(noop, NULL, error);
5067 nfsm_chain_get_stateid(error, &nmrep, sid);
5068 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5069 nfsmout_if(error);
5070 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
5071nfsmout:
5072 nfsm_chain_cleanup(&nmreq);
5073 nfsm_chain_cleanup(&nmrep);
0a7de745 5074 return error;
b0d623f7
A
5075}
5076
5077/*
5078 * common OPEN RPC code
5079 *
5080 * If create is set, ctx must be passed in.
6d2010ae 5081 * Returns a node on success if no node passed in.
b0d623f7
A
5082 */
5083int
5084nfs4_open_rpc_internal(
5085 struct nfs_open_file *nofp,
5086 vfs_context_t ctx,
5087 thread_t thd,
5088 kauth_cred_t cred,
5089 struct componentname *cnp,
5090 struct vnode_attr *vap,
5091 vnode_t dvp,
5092 vnode_t *vpp,
5093 int create,
5094 int share_access,
5095 int share_deny)
5096{
5097 struct nfsmount *nmp;
5098 struct nfs_open_owner *noop = nofp->nof_owner;
6d2010ae 5099 struct nfs_vattr nvattr;
b0d623f7 5100 int error = 0, open_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
6d2010ae 5101 int nfsvers, namedattrs, numops, exclusive = 0, gotuid, gotgid;
b0d623f7
A
5102 u_int64_t xid, savedxid = 0;
5103 nfsnode_t dnp = VTONFS(dvp);
5104 nfsnode_t np, newnp = NULL;
5105 vnode_t newvp = NULL;
5106 struct nfsm_chain nmreq, nmrep;
5107 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6d2010ae 5108 uint32_t rflags, delegation, recall;
b0d623f7
A
5109 struct nfs_stateid stateid, dstateid, *sid;
5110 fhandle_t fh;
6d2010ae 5111 struct nfsreq rq, *req = &rq;
b0d623f7 5112 struct nfs_dulookup dul;
6d2010ae
A
5113 char sbuf[64], *s;
5114 uint32_t ace_type, ace_flags, ace_mask, len, slen;
5115 struct kauth_ace ace;
5116 struct nfsreq_secinfo_args si;
b0d623f7 5117
0a7de745
A
5118 if (create && !ctx) {
5119 return EINVAL;
5120 }
b0d623f7
A
5121
5122 nmp = VTONMP(dvp);
0a7de745
A
5123 if (nfs_mount_gone(nmp)) {
5124 return ENXIO;
5125 }
b0d623f7 5126 nfsvers = nmp->nm_vers;
6d2010ae 5127 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
0a7de745
A
5128 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
5129 return EINVAL;
5130 }
b0d623f7
A
5131
5132 np = *vpp ? VTONFS(*vpp) : NULL;
5133 if (create && vap) {
5134 exclusive = (vap->va_vaflags & VA_EXCLUSIVE);
5135 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
5136 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
5137 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
0a7de745 5138 if (exclusive && (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time))) {
6d2010ae 5139 vap->va_vaflags |= VA_UTIMES_NULL;
0a7de745 5140 }
b0d623f7
A
5141 } else {
5142 exclusive = gotuid = gotgid = 0;
5143 }
5144 if (nofp) {
5145 sid = &nofp->nof_stateid;
5146 } else {
5147 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
5148 sid = &stateid;
5149 }
5150
0a7de745
A
5151 if ((error = nfs_open_owner_set_busy(noop, thd))) {
5152 return error;
5153 }
b0d623f7 5154again:
6d2010ae
A
5155 rflags = delegation = recall = 0;
5156 ace.ace_flags = 0;
5157 s = sbuf;
5158 slen = sizeof(sbuf);
5159 NVATTR_INIT(&nvattr);
5160 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, cnp->cn_nameptr, cnp->cn_namelen);
b0d623f7
A
5161
5162 nfsm_chain_null(&nmreq);
5163 nfsm_chain_null(&nmrep);
5164
5165 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
5166 numops = 6;
5167 nfsm_chain_build_alloc_init(error, &nmreq, 53 * NFSX_UNSIGNED + cnp->cn_namelen);
3e170ce0 5168 nfsm_chain_add_compound_header(error, &nmreq, create ? "create" : "open", nmp->nm_minor_vers, numops);
b0d623f7
A
5169 numops--;
5170 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5171 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
5172 numops--;
5173 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
5174 numops--;
5175 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5176 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5177 nfsm_chain_add_32(error, &nmreq, share_access);
5178 nfsm_chain_add_32(error, &nmreq, share_deny);
6d2010ae 5179 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
b0d623f7 5180 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
6d2010ae 5181 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
b0d623f7
A
5182 nfsm_chain_add_32(error, &nmreq, create);
5183 if (create) {
5184 if (exclusive) {
5185 static uint32_t create_verf; // XXX need a better verifier
5186 create_verf++;
5187 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE);
5188 /* insert 64 bit verifier */
5189 nfsm_chain_add_32(error, &nmreq, create_verf);
5190 nfsm_chain_add_32(error, &nmreq, create_verf);
5191 } else {
5192 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_UNCHECKED);
5193 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
5194 }
5195 }
b0d623f7 5196 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
6d2010ae 5197 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
b0d623f7
A
5198 numops--;
5199 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5200 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5201 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6d2010ae 5202 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
b0d623f7
A
5203 numops--;
5204 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
5205 numops--;
5206 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 5207 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
b0d623f7
A
5208 nfsm_chain_build_done(error, &nmreq);
5209 nfsm_assert(error, (numops == 0), EPROTO);
0a7de745 5210 if (!error) {
b0d623f7 5211 error = busyerror = nfs_node_set_busy(dnp, thd);
0a7de745 5212 }
b0d623f7
A
5213 nfsmout_if(error);
5214
0a7de745 5215 if (create && !namedattrs) {
b0d623f7 5216 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
0a7de745 5217 }
b0d623f7 5218
6d2010ae 5219 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, NULL, &req);
b0d623f7 5220 if (!error) {
0a7de745 5221 if (create && !namedattrs) {
b0d623f7 5222 nfs_dulookup_start(&dul, dnp, ctx);
0a7de745 5223 }
b0d623f7
A
5224 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
5225 savedxid = xid;
5226 }
5227
0a7de745 5228 if (create && !namedattrs) {
b0d623f7 5229 nfs_dulookup_finish(&dul, dnp, ctx);
0a7de745 5230 }
b0d623f7 5231
0a7de745 5232 if ((lockerror = nfs_node_lock(dnp))) {
b0d623f7 5233 error = lockerror;
0a7de745 5234 }
b0d623f7
A
5235 nfsm_chain_skip_tag(error, &nmrep);
5236 nfsm_chain_get_32(error, &nmrep, numops);
5237 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5238 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
5239 nfsmout_if(error);
5240 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5241 nfs_owner_seqid_increment(noop, NULL, error);
5242 nfsm_chain_get_stateid(error, &nmrep, sid);
5243 nfsm_chain_check_change_info(error, &nmrep, dnp);
5244 nfsm_chain_get_32(error, &nmrep, rflags);
5245 bmlen = NFS_ATTR_BITMAP_LEN;
5246 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5247 nfsm_chain_get_32(error, &nmrep, delegation);
0a7de745 5248 if (!error) {
b0d623f7
A
5249 switch (delegation) {
5250 case NFS_OPEN_DELEGATE_NONE:
5251 break;
5252 case NFS_OPEN_DELEGATE_READ:
b0d623f7
A
5253 case NFS_OPEN_DELEGATE_WRITE:
5254 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5255 nfsm_chain_get_32(error, &nmrep, recall);
0a7de745 5256 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
6d2010ae 5257 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
0a7de745 5258 }
6d2010ae
A
5259 /* if we have any trouble accepting the ACE, just invalidate it */
5260 ace_type = ace_flags = ace_mask = len = 0;
5261 nfsm_chain_get_32(error, &nmrep, ace_type);
5262 nfsm_chain_get_32(error, &nmrep, ace_flags);
5263 nfsm_chain_get_32(error, &nmrep, ace_mask);
5264 nfsm_chain_get_32(error, &nmrep, len);
5265 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5266 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5267 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5268 if (!error && (len >= slen)) {
0a7de745
A
5269 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5270 if (s) {
5271 slen = len + 1;
5272 } else {
6d2010ae 5273 ace.ace_flags = 0;
0a7de745 5274 }
6d2010ae 5275 }
0a7de745 5276 if (s) {
6d2010ae 5277 nfsm_chain_get_opaque(error, &nmrep, len, s);
0a7de745 5278 } else {
6d2010ae 5279 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
0a7de745 5280 }
6d2010ae
A
5281 if (!error && s) {
5282 s[len] = '\0';
0a7de745 5283 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
6d2010ae 5284 ace.ace_flags = 0;
0a7de745 5285 }
6d2010ae 5286 }
0a7de745 5287 if (error || !s) {
6d2010ae 5288 ace.ace_flags = 0;
0a7de745
A
5289 }
5290 if (s && (s != sbuf)) {
6d2010ae 5291 FREE(s, M_TEMP);
0a7de745 5292 }
b0d623f7
A
5293 break;
5294 default:
5295 error = EBADRPC;
5296 break;
5297 }
0a7de745 5298 }
b0d623f7 5299 /* At this point if we have no error, the object was created/opened. */
b0d623f7
A
5300 open_error = error;
5301 nfsmout_if(error);
0a7de745 5302 if (create && vap && !exclusive) {
b0d623f7 5303 nfs_vattr_set_supported(bitmap, vap);
0a7de745 5304 }
b0d623f7
A
5305 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5306 nfsmout_if(error);
6d2010ae 5307 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
b0d623f7
A
5308 nfsmout_if(error);
5309 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6d2010ae 5310 printf("nfs: open/create didn't return filehandle? %s\n", cnp->cn_nameptr);
2d21ac55
A
5311 error = EBADRPC;
5312 goto nfsmout;
5313 }
b0d623f7
A
5314 if (!create && np && !NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5315 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
6d2010ae 5316 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
0a7de745 5317 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 5318 NP(np, "nfs4_open_rpc: warning: file handle mismatch");
0a7de745 5319 }
b0d623f7 5320 }
2d21ac55
A
5321 /* directory attributes: if we don't get them, make sure to invalidate */
5322 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
5323 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 5324 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
0a7de745 5325 if (error) {
2d21ac55 5326 NATTRINVALIDATE(dnp);
0a7de745 5327 }
b0d623f7
A
5328 nfsmout_if(error);
5329
0a7de745 5330 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
b0d623f7 5331 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 5332 }
b0d623f7
A
5333
5334 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
5335 nfs_node_unlock(dnp);
5336 lockerror = ENOENT;
6d2010ae
A
5337 NVATTR_CLEANUP(&nvattr);
5338 error = nfs4_open_confirm_rpc(nmp, dnp, fh.fh_data, fh.fh_len, noop, sid, thd, cred, &nvattr, &xid);
b0d623f7
A
5339 nfsmout_if(error);
5340 savedxid = xid;
0a7de745 5341 if ((lockerror = nfs_node_lock(dnp))) {
b0d623f7 5342 error = lockerror;
0a7de745 5343 }
b0d623f7
A
5344 }
5345
5346nfsmout:
5347 nfsm_chain_cleanup(&nmreq);
5348 nfsm_chain_cleanup(&nmrep);
5349
5350 if (!lockerror && create) {
5351 if (!open_error && (dnp->n_flag & NNEGNCENTRIES)) {
5352 dnp->n_flag &= ~NNEGNCENTRIES;
5353 cache_purge_negatives(dvp);
5354 }
5355 dnp->n_flag |= NMODIFIED;
5356 nfs_node_unlock(dnp);
5357 lockerror = ENOENT;
6d2010ae 5358 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
b0d623f7 5359 }
0a7de745 5360 if (!lockerror) {
b0d623f7 5361 nfs_node_unlock(dnp);
0a7de745 5362 }
6d2010ae 5363 if (!error && !np && fh.fh_len) {
b0d623f7
A
5364 /* create the vnode with the filehandle and attributes */
5365 xid = savedxid;
6d2010ae 5366 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &newnp);
0a7de745 5367 if (!error) {
b0d623f7 5368 newvp = NFSTOV(newnp);
0a7de745 5369 }
b0d623f7 5370 }
6d2010ae 5371 NVATTR_CLEANUP(&nvattr);
0a7de745 5372 if (!busyerror) {
b0d623f7 5373 nfs_node_clear_busy(dnp);
0a7de745 5374 }
b0d623f7 5375 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
0a7de745 5376 if (!np) {
b0d623f7 5377 np = newnp;
0a7de745 5378 }
b0d623f7
A
5379 if (!error && np && !recall) {
5380 /* stuff the delegation state in the node */
5381 lck_mtx_lock(&np->n_openlock);
5382 np->n_openflags &= ~N_DELEG_MASK;
5383 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5384 np->n_dstateid = dstateid;
6d2010ae
A
5385 np->n_dace = ace;
5386 if (np->n_dlink.tqe_next == NFSNOLIST) {
5387 lck_mtx_lock(&nmp->nm_lock);
0a7de745 5388 if (np->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 5389 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
0a7de745 5390 }
6d2010ae
A
5391 lck_mtx_unlock(&nmp->nm_lock);
5392 }
b0d623f7 5393 lck_mtx_unlock(&np->n_openlock);
6d2010ae
A
5394 } else {
5395 /* give the delegation back */
b0d623f7 5396 if (np) {
6d2010ae
A
5397 if (NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5398 /* update delegation state and return it */
5399 lck_mtx_lock(&np->n_openlock);
5400 np->n_openflags &= ~N_DELEG_MASK;
5401 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5402 np->n_dstateid = dstateid;
5403 np->n_dace = ace;
5404 if (np->n_dlink.tqe_next == NFSNOLIST) {
5405 lck_mtx_lock(&nmp->nm_lock);
0a7de745 5406 if (np->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 5407 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
0a7de745 5408 }
6d2010ae
A
5409 lck_mtx_unlock(&nmp->nm_lock);
5410 }
5411 lck_mtx_unlock(&np->n_openlock);
5412 /* don't need to send a separate delegreturn for fh */
5413 fh.fh_len = 0;
5414 }
5415 /* return np's current delegation */
5416 nfs4_delegation_return(np, 0, thd, cred);
b0d623f7 5417 }
0a7de745 5418 if (fh.fh_len) { /* return fh's delegation if it wasn't for np */
6d2010ae 5419 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
0a7de745 5420 }
b0d623f7
A
5421 }
5422 }
5423 if (error) {
5424 if (exclusive && (error == NFSERR_NOTSUPP)) {
5425 exclusive = 0;
5426 goto again;
5427 }
5428 if (newvp) {
5429 nfs_node_unlock(newnp);
5430 vnode_put(newvp);
5431 }
5432 } else if (create) {
5433 nfs_node_unlock(newnp);
5434 if (exclusive) {
5435 error = nfs4_setattr_rpc(newnp, vap, ctx);
5436 if (error && (gotuid || gotgid)) {
5437 /* it's possible the server didn't like our attempt to set IDs. */
5438 /* so, let's try it again without those */
5439 VATTR_CLEAR_ACTIVE(vap, va_uid);
5440 VATTR_CLEAR_ACTIVE(vap, va_gid);
5441 error = nfs4_setattr_rpc(newnp, vap, ctx);
5442 }
5443 }
0a7de745 5444 if (error) {
b0d623f7 5445 vnode_put(newvp);
0a7de745 5446 } else {
b0d623f7 5447 *vpp = newvp;
0a7de745 5448 }
b0d623f7
A
5449 }
5450 nfs_open_owner_clear_busy(noop);
0a7de745 5451 return error;
b0d623f7
A
5452}
5453
6d2010ae
A
5454
5455/*
5456 * Send an OPEN RPC to claim a delegated open for a file
5457 */
5458int
5459nfs4_claim_delegated_open_rpc(
5460 struct nfs_open_file *nofp,
5461 int share_access,
5462 int share_deny,
5463 int flags)
5464{
5465 struct nfsmount *nmp;
5466 struct nfs_open_owner *noop = nofp->nof_owner;
5467 struct nfs_vattr nvattr;
5468 int error = 0, lockerror = ENOENT, status;
5469 int nfsvers, numops;
5470 u_int64_t xid;
5471 nfsnode_t np = nofp->nof_np;
5472 struct nfsm_chain nmreq, nmrep;
5473 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5474 uint32_t rflags = 0, delegation, recall = 0;
5475 fhandle_t fh;
5476 struct nfs_stateid dstateid;
5477 char sbuf[64], *s = sbuf;
5478 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5479 struct kauth_ace ace;
5480 vnode_t dvp = NULL;
5481 const char *vname = NULL;
5482 const char *name = NULL;
5483 size_t namelen;
5484 char smallname[128];
5485 char *filename = NULL;
5486 struct nfsreq_secinfo_args si;
5487
5488 nmp = NFSTONMP(np);
0a7de745
A
5489 if (nfs_mount_gone(nmp)) {
5490 return ENXIO;
5491 }
6d2010ae
A
5492 nfsvers = nmp->nm_vers;
5493
5494 nfs_node_lock_force(np);
5495 if ((vnode_vtype(NFSTOV(np)) != VDIR) && np->n_sillyrename) {
5496 /*
5497 * The node's been sillyrenamed, so we need to use
5498 * the sillyrename directory/name to do the open.
5499 */
5500 struct nfs_sillyrename *nsp = np->n_sillyrename;
5501 dvp = NFSTOV(nsp->nsr_dnp);
5502 if ((error = vnode_get(dvp))) {
cb323159 5503 dvp = NULLVP;
6d2010ae
A
5504 nfs_node_unlock(np);
5505 goto out;
5506 }
5507 name = nsp->nsr_name;
5508 } else {
5509 /*
5510 * [sigh] We can't trust VFS to get the parent right for named
5511 * attribute nodes. (It likes to reparent the nodes after we've
5512 * created them.) Luckily we can probably get the right parent
5513 * from the n_parent we have stashed away.
5514 */
5515 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
0a7de745 5516 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
6d2010ae 5517 dvp = NULL;
0a7de745
A
5518 }
5519 if (!dvp) {
6d2010ae 5520 dvp = vnode_getparent(NFSTOV(np));
0a7de745 5521 }
6d2010ae
A
5522 vname = vnode_getname(NFSTOV(np));
5523 if (!dvp || !vname) {
0a7de745 5524 if (!error) {
6d2010ae 5525 error = EIO;
0a7de745 5526 }
6d2010ae
A
5527 nfs_node_unlock(np);
5528 goto out;
5529 }
5530 name = vname;
5531 }
5532 filename = &smallname[0];
5533 namelen = snprintf(filename, sizeof(smallname), "%s", name);
5534 if (namelen >= sizeof(smallname)) {
0a7de745 5535 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
6d2010ae
A
5536 if (!filename) {
5537 error = ENOMEM;
3e170ce0 5538 nfs_node_unlock(np);
6d2010ae
A
5539 goto out;
5540 }
0a7de745 5541 snprintf(filename, namelen + 1, "%s", name);
6d2010ae
A
5542 }
5543 nfs_node_unlock(np);
5544
0a7de745 5545 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
3e170ce0 5546 goto out;
0a7de745 5547 }
6d2010ae
A
5548 NVATTR_INIT(&nvattr);
5549 delegation = NFS_OPEN_DELEGATE_NONE;
5550 dstateid = np->n_dstateid;
5551 NFSREQ_SECINFO_SET(&si, VTONFS(dvp), NULL, 0, filename, namelen);
5552
5553 nfsm_chain_null(&nmreq);
5554 nfsm_chain_null(&nmrep);
5555
5556 // PUTFH, OPEN, GETATTR(FH)
5557 numops = 3;
5558 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
3e170ce0 5559 nfsm_chain_add_compound_header(error, &nmreq, "open_claim_d", nmp->nm_minor_vers, numops);
6d2010ae
A
5560 numops--;
5561 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5562 nfsm_chain_add_fh(error, &nmreq, nfsvers, VTONFS(dvp)->n_fhp, VTONFS(dvp)->n_fhsize);
5563 numops--;
5564 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5565 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5566 nfsm_chain_add_32(error, &nmreq, share_access);
5567 nfsm_chain_add_32(error, &nmreq, share_deny);
5568 // open owner: clientid + uid
5569 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5570 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5571 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5572 // openflag4
5573 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5574 // open_claim4
5575 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_DELEGATE_CUR);
5576 nfsm_chain_add_stateid(error, &nmreq, &np->n_dstateid);
5577 nfsm_chain_add_name(error, &nmreq, filename, namelen, nmp);
5578 numops--;
5579 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5580 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5581 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5582 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5583 nfsm_chain_build_done(error, &nmreq);
5584 nfsm_assert(error, (numops == 0), EPROTO);
5585 nfsmout_if(error);
5586
5587 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
0a7de745 5588 noop->noo_cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
6d2010ae 5589
0a7de745 5590 if ((lockerror = nfs_node_lock(np))) {
6d2010ae 5591 error = lockerror;
0a7de745 5592 }
6d2010ae
A
5593 nfsm_chain_skip_tag(error, &nmrep);
5594 nfsm_chain_get_32(error, &nmrep, numops);
5595 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5596 nfsmout_if(error);
5597 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5598 nfs_owner_seqid_increment(noop, NULL, error);
5599 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5600 nfsm_chain_check_change_info(error, &nmrep, np);
5601 nfsm_chain_get_32(error, &nmrep, rflags);
5602 bmlen = NFS_ATTR_BITMAP_LEN;
5603 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5604 nfsm_chain_get_32(error, &nmrep, delegation);
0a7de745 5605 if (!error) {
6d2010ae
A
5606 switch (delegation) {
5607 case NFS_OPEN_DELEGATE_NONE:
5608 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
0a7de745 5609 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
6d2010ae
A
5610 break;
5611 case NFS_OPEN_DELEGATE_READ:
5612 case NFS_OPEN_DELEGATE_WRITE:
5613 if ((((np->n_openflags & N_DELEG_MASK) == N_DELEG_READ) &&
0a7de745 5614 (delegation == NFS_OPEN_DELEGATE_WRITE)) ||
6d2010ae 5615 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) &&
0a7de745 5616 (delegation == NFS_OPEN_DELEGATE_READ))) {
6d2010ae 5617 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
0a7de745
A
5618 ((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ? "W" : "R",
5619 (delegation == NFS_OPEN_DELEGATE_WRITE) ? "W" : "R", filename ? filename : "???");
5620 }
6d2010ae
A
5621 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5622 nfsm_chain_get_32(error, &nmrep, recall);
0a7de745 5623 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
6d2010ae 5624 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
0a7de745 5625 }
6d2010ae
A
5626 /* if we have any trouble accepting the ACE, just invalidate it */
5627 ace_type = ace_flags = ace_mask = len = 0;
5628 nfsm_chain_get_32(error, &nmrep, ace_type);
5629 nfsm_chain_get_32(error, &nmrep, ace_flags);
5630 nfsm_chain_get_32(error, &nmrep, ace_mask);
5631 nfsm_chain_get_32(error, &nmrep, len);
5632 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5633 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5634 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5635 if (!error && (len >= slen)) {
0a7de745
A
5636 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5637 if (s) {
5638 slen = len + 1;
5639 } else {
6d2010ae 5640 ace.ace_flags = 0;
0a7de745 5641 }
6d2010ae 5642 }
0a7de745 5643 if (s) {
6d2010ae 5644 nfsm_chain_get_opaque(error, &nmrep, len, s);
0a7de745 5645 } else {
6d2010ae 5646 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
0a7de745 5647 }
6d2010ae
A
5648 if (!error && s) {
5649 s[len] = '\0';
0a7de745 5650 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
6d2010ae 5651 ace.ace_flags = 0;
0a7de745 5652 }
6d2010ae 5653 }
0a7de745 5654 if (error || !s) {
6d2010ae 5655 ace.ace_flags = 0;
0a7de745
A
5656 }
5657 if (s && (s != sbuf)) {
6d2010ae 5658 FREE(s, M_TEMP);
0a7de745 5659 }
6d2010ae
A
5660 if (!error) {
5661 /* stuff the latest delegation state in the node */
5662 lck_mtx_lock(&np->n_openlock);
5663 np->n_openflags &= ~N_DELEG_MASK;
5664 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5665 np->n_dstateid = dstateid;
5666 np->n_dace = ace;
5667 if (np->n_dlink.tqe_next == NFSNOLIST) {
5668 lck_mtx_lock(&nmp->nm_lock);
0a7de745 5669 if (np->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 5670 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
0a7de745 5671 }
6d2010ae
A
5672 lck_mtx_unlock(&nmp->nm_lock);
5673 }
5674 lck_mtx_unlock(&np->n_openlock);
5675 }
5676 break;
5677 default:
5678 error = EBADRPC;
5679 break;
5680 }
0a7de745 5681 }
6d2010ae
A
5682 nfsmout_if(error);
5683 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5684 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5685 nfsmout_if(error);
5686 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5687 printf("nfs: open reclaim didn't return filehandle? %s\n", filename ? filename : "???");
5688 error = EBADRPC;
5689 goto nfsmout;
5690 }
5691 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5692 // XXX what if fh doesn't match the vnode we think we're re-opening?
5693 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
0a7de745 5694 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 5695 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename ? filename : "???");
0a7de745 5696 }
6d2010ae
A
5697 }
5698 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5699 nfsmout_if(error);
0a7de745 5700 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
6d2010ae 5701 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 5702 }
6d2010ae
A
5703nfsmout:
5704 NVATTR_CLEANUP(&nvattr);
5705 nfsm_chain_cleanup(&nmreq);
5706 nfsm_chain_cleanup(&nmrep);
0a7de745 5707 if (!lockerror) {
6d2010ae 5708 nfs_node_unlock(np);
0a7de745 5709 }
6d2010ae
A
5710 nfs_open_owner_clear_busy(noop);
5711 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5712 if (recall) {
5713 /*
5714 * We're making a delegated claim.
5715 * Don't return the delegation here in case we have more to claim.
5716 * Just make sure it's queued up to be returned.
5717 */
5718 nfs4_delegation_return_enqueue(np);
5719 }
5720 }
5721out:
5722 // if (!error)
0a7de745
A
5723 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5724 if (filename && (filename != &smallname[0])) {
6d2010ae 5725 FREE(filename, M_TEMP);
0a7de745
A
5726 }
5727 if (vname) {
6d2010ae 5728 vnode_putname(vname);
0a7de745
A
5729 }
5730 if (dvp != NULLVP) {
6d2010ae 5731 vnode_put(dvp);
0a7de745
A
5732 }
5733 return error;
6d2010ae
A
5734}
5735
b0d623f7
A
5736/*
5737 * Send an OPEN RPC to reclaim an open file.
5738 */
5739int
5740nfs4_open_reclaim_rpc(
5741 struct nfs_open_file *nofp,
5742 int share_access,
5743 int share_deny)
5744{
5745 struct nfsmount *nmp;
5746 struct nfs_open_owner *noop = nofp->nof_owner;
5747 struct nfs_vattr nvattr;
5748 int error = 0, lockerror = ENOENT, status;
5749 int nfsvers, numops;
5750 u_int64_t xid;
5751 nfsnode_t np = nofp->nof_np;
5752 struct nfsm_chain nmreq, nmrep;
5753 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6d2010ae 5754 uint32_t rflags = 0, delegation, recall = 0;
b0d623f7
A
5755 fhandle_t fh;
5756 struct nfs_stateid dstateid;
6d2010ae
A
5757 char sbuf[64], *s = sbuf;
5758 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5759 struct kauth_ace ace;
5760 struct nfsreq_secinfo_args si;
b0d623f7
A
5761
5762 nmp = NFSTONMP(np);
0a7de745
A
5763 if (nfs_mount_gone(nmp)) {
5764 return ENXIO;
5765 }
b0d623f7
A
5766 nfsvers = nmp->nm_vers;
5767
0a7de745
A
5768 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5769 return error;
5770 }
b0d623f7 5771
6d2010ae 5772 NVATTR_INIT(&nvattr);
b0d623f7 5773 delegation = NFS_OPEN_DELEGATE_NONE;
6d2010ae
A
5774 dstateid = np->n_dstateid;
5775 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
5776
5777 nfsm_chain_null(&nmreq);
5778 nfsm_chain_null(&nmrep);
5779
5780 // PUTFH, OPEN, GETATTR(FH)
5781 numops = 3;
5782 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
3e170ce0 5783 nfsm_chain_add_compound_header(error, &nmreq, "open_reclaim", nmp->nm_minor_vers, numops);
b0d623f7
A
5784 numops--;
5785 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5786 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5787 numops--;
5788 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5789 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5790 nfsm_chain_add_32(error, &nmreq, share_access);
5791 nfsm_chain_add_32(error, &nmreq, share_deny);
5792 // open owner: clientid + uid
5793 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5794 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5795 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5796 // openflag4
5797 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5798 // open_claim4
5799 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_PREVIOUS);
5800 delegation = (np->n_openflags & N_DELEG_READ) ? NFS_OPEN_DELEGATE_READ :
0a7de745
A
5801 (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE :
5802 NFS_OPEN_DELEGATE_NONE;
b0d623f7
A
5803 nfsm_chain_add_32(error, &nmreq, delegation);
5804 delegation = NFS_OPEN_DELEGATE_NONE;
5805 numops--;
5806 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5807 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5808 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6d2010ae 5809 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
b0d623f7
A
5810 nfsm_chain_build_done(error, &nmreq);
5811 nfsm_assert(error, (numops == 0), EPROTO);
5812 nfsmout_if(error);
5813
6d2010ae 5814 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
0a7de745 5815 noop->noo_cred, &si, R_RECOVER | R_NOINTR, &nmrep, &xid, &status);
b0d623f7 5816
0a7de745 5817 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 5818 error = lockerror;
0a7de745 5819 }
b0d623f7
A
5820 nfsm_chain_skip_tag(error, &nmrep);
5821 nfsm_chain_get_32(error, &nmrep, numops);
5822 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5823 nfsmout_if(error);
5824 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5825 nfs_owner_seqid_increment(noop, NULL, error);
5826 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5827 nfsm_chain_check_change_info(error, &nmrep, np);
5828 nfsm_chain_get_32(error, &nmrep, rflags);
5829 bmlen = NFS_ATTR_BITMAP_LEN;
5830 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5831 nfsm_chain_get_32(error, &nmrep, delegation);
0a7de745 5832 if (!error) {
b0d623f7
A
5833 switch (delegation) {
5834 case NFS_OPEN_DELEGATE_NONE:
6d2010ae
A
5835 if (np->n_openflags & N_DELEG_MASK) {
5836 /*
5837 * Hey! We were supposed to get our delegation back even
5838 * if it was getting immediately recalled. Bad server!
5839 *
5840 * Just try to return the existing delegation.
5841 */
5842 // NP(np, "nfs: open reclaim didn't return delegation?");
5843 delegation = (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE : NFS_OPEN_DELEGATE_READ;
5844 recall = 1;
b0d623f7
A
5845 }
5846 break;
6d2010ae 5847 case NFS_OPEN_DELEGATE_READ:
b0d623f7
A
5848 case NFS_OPEN_DELEGATE_WRITE:
5849 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5850 nfsm_chain_get_32(error, &nmrep, recall);
0a7de745 5851 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
6d2010ae 5852 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
0a7de745 5853 }
6d2010ae
A
5854 /* if we have any trouble accepting the ACE, just invalidate it */
5855 ace_type = ace_flags = ace_mask = len = 0;
5856 nfsm_chain_get_32(error, &nmrep, ace_type);
5857 nfsm_chain_get_32(error, &nmrep, ace_flags);
5858 nfsm_chain_get_32(error, &nmrep, ace_mask);
5859 nfsm_chain_get_32(error, &nmrep, len);
5860 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5861 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5862 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5863 if (!error && (len >= slen)) {
0a7de745
A
5864 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5865 if (s) {
5866 slen = len + 1;
5867 } else {
6d2010ae 5868 ace.ace_flags = 0;
0a7de745 5869 }
6d2010ae 5870 }
0a7de745 5871 if (s) {
6d2010ae 5872 nfsm_chain_get_opaque(error, &nmrep, len, s);
0a7de745 5873 } else {
6d2010ae 5874 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
0a7de745 5875 }
6d2010ae
A
5876 if (!error && s) {
5877 s[len] = '\0';
0a7de745 5878 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
6d2010ae 5879 ace.ace_flags = 0;
0a7de745 5880 }
6d2010ae 5881 }
0a7de745 5882 if (error || !s) {
6d2010ae 5883 ace.ace_flags = 0;
0a7de745
A
5884 }
5885 if (s && (s != sbuf)) {
6d2010ae 5886 FREE(s, M_TEMP);
0a7de745 5887 }
b0d623f7
A
5888 if (!error) {
5889 /* stuff the delegation state in the node */
5890 lck_mtx_lock(&np->n_openlock);
5891 np->n_openflags &= ~N_DELEG_MASK;
6d2010ae 5892 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
b0d623f7 5893 np->n_dstateid = dstateid;
6d2010ae
A
5894 np->n_dace = ace;
5895 if (np->n_dlink.tqe_next == NFSNOLIST) {
5896 lck_mtx_lock(&nmp->nm_lock);
0a7de745 5897 if (np->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 5898 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
0a7de745 5899 }
6d2010ae
A
5900 lck_mtx_unlock(&nmp->nm_lock);
5901 }
b0d623f7
A
5902 lck_mtx_unlock(&np->n_openlock);
5903 }
5904 break;
5905 default:
5906 error = EBADRPC;
5907 break;
5908 }
0a7de745 5909 }
b0d623f7
A
5910 nfsmout_if(error);
5911 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 5912 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
b0d623f7
A
5913 nfsmout_if(error);
5914 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6d2010ae 5915 NP(np, "nfs: open reclaim didn't return filehandle?");
b0d623f7
A
5916 error = EBADRPC;
5917 goto nfsmout;
5918 }
5919 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5920 // XXX what if fh doesn't match the vnode we think we're re-opening?
6d2010ae
A
5921 // That should be pretty hard in this case, given that we are doing
5922 // the open reclaim using the file handle (and not a dir/name pair).
5923 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
0a7de745 5924 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 5925 NP(np, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
0a7de745 5926 }
b0d623f7
A
5927 }
5928 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5929 nfsmout_if(error);
0a7de745 5930 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
b0d623f7 5931 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 5932 }
b0d623f7 5933nfsmout:
6d2010ae 5934 // if (!error)
0a7de745 5935 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
6d2010ae 5936 NVATTR_CLEANUP(&nvattr);
b0d623f7
A
5937 nfsm_chain_cleanup(&nmreq);
5938 nfsm_chain_cleanup(&nmrep);
0a7de745 5939 if (!lockerror) {
b0d623f7 5940 nfs_node_unlock(np);
0a7de745 5941 }
b0d623f7
A
5942 nfs_open_owner_clear_busy(noop);
5943 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
0a7de745 5944 if (recall) {
6d2010ae 5945 nfs4_delegation_return_enqueue(np);
0a7de745 5946 }
b0d623f7 5947 }
0a7de745 5948 return error;
b0d623f7 5949}
2d21ac55 5950
b0d623f7
A
5951int
5952nfs4_open_downgrade_rpc(
5953 nfsnode_t np,
5954 struct nfs_open_file *nofp,
5955 vfs_context_t ctx)
5956{
5957 struct nfs_open_owner *noop = nofp->nof_owner;
5958 struct nfsmount *nmp;
5959 int error, lockerror = ENOENT, status, nfsvers, numops;
5960 struct nfsm_chain nmreq, nmrep;
5961 u_int64_t xid;
6d2010ae 5962 struct nfsreq_secinfo_args si;
2d21ac55 5963
b0d623f7 5964 nmp = NFSTONMP(np);
0a7de745
A
5965 if (nfs_mount_gone(nmp)) {
5966 return ENXIO;
5967 }
b0d623f7
A
5968 nfsvers = nmp->nm_vers;
5969
0a7de745
A
5970 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5971 return error;
5972 }
b0d623f7 5973
6d2010ae 5974 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
5975 nfsm_chain_null(&nmreq);
5976 nfsm_chain_null(&nmrep);
5977
5978 // PUTFH, OPEN_DOWNGRADE, GETATTR
5979 numops = 3;
5980 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
3e170ce0 5981 nfsm_chain_add_compound_header(error, &nmreq, "open_downgrd", nmp->nm_minor_vers, numops);
b0d623f7
A
5982 numops--;
5983 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5984 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5985 numops--;
5986 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_DOWNGRADE);
5987 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
5988 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5989 nfsm_chain_add_32(error, &nmreq, nofp->nof_access);
5990 nfsm_chain_add_32(error, &nmreq, nofp->nof_deny);
5991 numops--;
5992 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 5993 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
5994 nfsm_chain_build_done(error, &nmreq);
5995 nfsm_assert(error, (numops == 0), EPROTO);
5996 nfsmout_if(error);
6d2010ae 5997 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745
A
5998 vfs_context_thread(ctx), vfs_context_ucred(ctx),
5999 &si, R_NOINTR, &nmrep, &xid, &status);
b0d623f7 6000
0a7de745 6001 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 6002 error = lockerror;
0a7de745 6003 }
b0d623f7
A
6004 nfsm_chain_skip_tag(error, &nmrep);
6005 nfsm_chain_get_32(error, &nmrep, numops);
6006 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
2d21ac55 6007 nfsmout_if(error);
b0d623f7
A
6008 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_DOWNGRADE);
6009 nfs_owner_seqid_increment(noop, NULL, error);
6010 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6011 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 6012 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
b0d623f7 6013nfsmout:
0a7de745 6014 if (!lockerror) {
b0d623f7 6015 nfs_node_unlock(np);
0a7de745 6016 }
b0d623f7 6017 nfs_open_owner_clear_busy(noop);
2d21ac55
A
6018 nfsm_chain_cleanup(&nmreq);
6019 nfsm_chain_cleanup(&nmrep);
0a7de745 6020 return error;
b0d623f7 6021}
2d21ac55 6022
b0d623f7
A
6023int
6024nfs4_close_rpc(
6025 nfsnode_t np,
6026 struct nfs_open_file *nofp,
6027 thread_t thd,
6028 kauth_cred_t cred,
6d2010ae 6029 int flags)
b0d623f7
A
6030{
6031 struct nfs_open_owner *noop = nofp->nof_owner;
6032 struct nfsmount *nmp;
6033 int error, lockerror = ENOENT, status, nfsvers, numops;
6034 struct nfsm_chain nmreq, nmrep;
6035 u_int64_t xid;
6d2010ae 6036 struct nfsreq_secinfo_args si;
b0d623f7
A
6037
6038 nmp = NFSTONMP(np);
0a7de745
A
6039 if (nfs_mount_gone(nmp)) {
6040 return ENXIO;
6041 }
b0d623f7
A
6042 nfsvers = nmp->nm_vers;
6043
0a7de745
A
6044 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
6045 return error;
6046 }
b0d623f7 6047
6d2010ae 6048 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
6049 nfsm_chain_null(&nmreq);
6050 nfsm_chain_null(&nmrep);
6051
6d2010ae 6052 // PUTFH, CLOSE, GETATTR
b0d623f7
A
6053 numops = 3;
6054 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
3e170ce0 6055 nfsm_chain_add_compound_header(error, &nmreq, "close", nmp->nm_minor_vers, numops);
2d21ac55
A
6056 numops--;
6057 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
b0d623f7 6058 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
2d21ac55
A
6059 numops--;
6060 nfsm_chain_add_32(error, &nmreq, NFS_OP_CLOSE);
b0d623f7
A
6061 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
6062 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
6063 numops--;
6064 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 6065 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
6066 nfsm_chain_build_done(error, &nmreq);
6067 nfsm_assert(error, (numops == 0), EPROTO);
6068 nfsmout_if(error);
0a7de745 6069 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
2d21ac55 6070
0a7de745 6071 if ((lockerror = nfs_node_lock(np))) {
b0d623f7 6072 error = lockerror;
0a7de745 6073 }
2d21ac55
A
6074 nfsm_chain_skip_tag(error, &nmrep);
6075 nfsm_chain_get_32(error, &nmrep, numops);
6076 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
b0d623f7 6077 nfsmout_if(error);
2d21ac55 6078 nfsm_chain_op_check(error, &nmrep, NFS_OP_CLOSE);
b0d623f7
A
6079 nfs_owner_seqid_increment(noop, NULL, error);
6080 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6081 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 6082 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
b0d623f7 6083nfsmout:
0a7de745 6084 if (!lockerror) {
b0d623f7 6085 nfs_node_unlock(np);
0a7de745 6086 }
b0d623f7
A
6087 nfs_open_owner_clear_busy(noop);
6088 nfsm_chain_cleanup(&nmreq);
6089 nfsm_chain_cleanup(&nmrep);
0a7de745 6090 return error;
b0d623f7
A
6091}
6092
6093
b0d623f7 6094/*
6d2010ae 6095 * Claim the delegated open combinations this open file holds.
b0d623f7
A
6096 */
6097int
6d2010ae 6098nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags)
b0d623f7 6099{
6d2010ae
A
6100 struct nfs_open_owner *noop = nofp->nof_owner;
6101 struct nfs_lock_owner *nlop;
6102 struct nfs_file_lock *nflp, *nextnflp;
b0d623f7 6103 struct nfsmount *nmp;
6d2010ae 6104 int error = 0, reopen = 0;
b0d623f7 6105
6d2010ae
A
6106 if (nofp->nof_d_rw_drw) {
6107 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_BOTH, flags);
6108 if (!error) {
6109 lck_mtx_lock(&nofp->nof_lock);
6110 nofp->nof_rw_drw += nofp->nof_d_rw_drw;
6111 nofp->nof_d_rw_drw = 0;
6112 lck_mtx_unlock(&nofp->nof_lock);
6113 }
b0d623f7 6114 }
6d2010ae
A
6115 if (!error && nofp->nof_d_w_drw) {
6116 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_BOTH, flags);
6117 if (!error) {
6118 lck_mtx_lock(&nofp->nof_lock);
6119 nofp->nof_w_drw += nofp->nof_d_w_drw;
6120 nofp->nof_d_w_drw = 0;
6121 lck_mtx_unlock(&nofp->nof_lock);
6122 }
b0d623f7 6123 }
6d2010ae
A
6124 if (!error && nofp->nof_d_r_drw) {
6125 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_BOTH, flags);
6126 if (!error) {
6127 lck_mtx_lock(&nofp->nof_lock);
6128 nofp->nof_r_drw += nofp->nof_d_r_drw;
6129 nofp->nof_d_r_drw = 0;
6130 lck_mtx_unlock(&nofp->nof_lock);
6131 }
6132 }
6133 if (!error && nofp->nof_d_rw_dw) {
6134 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_WRITE, flags);
6135 if (!error) {
6136 lck_mtx_lock(&nofp->nof_lock);
6137 nofp->nof_rw_dw += nofp->nof_d_rw_dw;
6138 nofp->nof_d_rw_dw = 0;
6139 lck_mtx_unlock(&nofp->nof_lock);
6140 }
6141 }
6142 if (!error && nofp->nof_d_w_dw) {
6143 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_WRITE, flags);
6144 if (!error) {
6145 lck_mtx_lock(&nofp->nof_lock);
6146 nofp->nof_w_dw += nofp->nof_d_w_dw;
6147 nofp->nof_d_w_dw = 0;
6148 lck_mtx_unlock(&nofp->nof_lock);
6149 }
6150 }
6151 if (!error && nofp->nof_d_r_dw) {
6152 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_WRITE, flags);
6153 if (!error) {
6154 lck_mtx_lock(&nofp->nof_lock);
6155 nofp->nof_r_dw += nofp->nof_d_r_dw;
6156 nofp->nof_d_r_dw = 0;
6157 lck_mtx_unlock(&nofp->nof_lock);
6158 }
6159 }
6160 /* non-deny-mode opens may be reopened if no locks are held */
6161 if (!error && nofp->nof_d_rw) {
6162 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, flags);
6163 /* for some errors, we should just try reopening the file */
0a7de745 6164 if (nfs_mount_state_error_delegation_lost(error)) {
6d2010ae 6165 reopen = error;
0a7de745 6166 }
6d2010ae
A
6167 if (!error || reopen) {
6168 lck_mtx_lock(&nofp->nof_lock);
6169 nofp->nof_rw += nofp->nof_d_rw;
6170 nofp->nof_d_rw = 0;
6171 lck_mtx_unlock(&nofp->nof_lock);
6172 }
6173 }
6174 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
6175 if ((!error || reopen) && nofp->nof_d_w) {
6176 if (!error) {
6177 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, flags);
6178 /* for some errors, we should just try reopening the file */
0a7de745 6179 if (nfs_mount_state_error_delegation_lost(error)) {
6d2010ae 6180 reopen = error;
0a7de745 6181 }
6d2010ae
A
6182 }
6183 if (!error || reopen) {
6184 lck_mtx_lock(&nofp->nof_lock);
6185 nofp->nof_w += nofp->nof_d_w;
6186 nofp->nof_d_w = 0;
6187 lck_mtx_unlock(&nofp->nof_lock);
6188 }
6189 }
6190 if ((!error || reopen) && nofp->nof_d_r) {
6191 if (!error) {
6192 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, flags);
6193 /* for some errors, we should just try reopening the file */
0a7de745 6194 if (nfs_mount_state_error_delegation_lost(error)) {
6d2010ae 6195 reopen = error;
0a7de745 6196 }
6d2010ae
A
6197 }
6198 if (!error || reopen) {
6199 lck_mtx_lock(&nofp->nof_lock);
6200 nofp->nof_r += nofp->nof_d_r;
6201 nofp->nof_d_r = 0;
6202 lck_mtx_unlock(&nofp->nof_lock);
6203 }
6204 }
6205
6206 if (reopen) {
6207 /*
6208 * Any problems with the delegation probably indicates that we
6209 * should review/return all of our current delegation state.
6210 */
6211 if ((nmp = NFSTONMP(nofp->nof_np))) {
6212 nfs4_delegation_return_enqueue(nofp->nof_np);
6213 lck_mtx_lock(&nmp->nm_lock);
6214 nfs_need_recover(nmp, NFSERR_EXPIRED);
6215 lck_mtx_unlock(&nmp->nm_lock);
6216 }
6217 if (reopen && (nfs_check_for_locks(noop, nofp) == 0)) {
6218 /* just reopen the file on next access */
6219 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
0a7de745 6220 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6d2010ae
A
6221 lck_mtx_lock(&nofp->nof_lock);
6222 nofp->nof_flags |= NFS_OPEN_FILE_REOPEN;
6223 lck_mtx_unlock(&nofp->nof_lock);
0a7de745 6224 return 0;
6d2010ae 6225 }
0a7de745 6226 if (reopen) {
6d2010ae 6227 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
0a7de745
A
6228 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6229 }
6d2010ae
A
6230 }
6231
6232 if (!error && ((nmp = NFSTONMP(nofp->nof_np)))) {
6233 /* claim delegated locks */
6234 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
0a7de745 6235 if (nlop->nlo_open_owner != noop) {
6d2010ae 6236 continue;
0a7de745 6237 }
6d2010ae
A
6238 TAILQ_FOREACH_SAFE(nflp, &nlop->nlo_locks, nfl_lolink, nextnflp) {
6239 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
0a7de745 6240 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6d2010ae 6241 continue;
0a7de745 6242 }
6d2010ae 6243 /* skip non-delegated locks */
0a7de745 6244 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6d2010ae 6245 continue;
0a7de745 6246 }
6d2010ae
A
6247 error = nmp->nm_funcs->nf_setlock_rpc(nofp->nof_np, nofp, nflp, 0, flags, current_thread(), noop->noo_cred);
6248 if (error) {
6249 NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
0a7de745 6250 nflp->nfl_start, nflp->nfl_end, error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6d2010ae
A
6251 break;
6252 }
6253 // else {
0a7de745
A
6254 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
6255 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6d2010ae
A
6256 // }
6257 }
0a7de745 6258 if (error) {
6d2010ae 6259 break;
0a7de745 6260 }
6d2010ae
A
6261 }
6262 }
6263
0a7de745
A
6264 if (!error) { /* all state claimed successfully! */
6265 return 0;
6266 }
6d2010ae
A
6267
6268 /* restart if it looks like a problem more than just losing the delegation */
6269 if (!nfs_mount_state_error_delegation_lost(error) &&
6270 ((error == ETIMEDOUT) || nfs_mount_state_error_should_restart(error))) {
6271 NP(nofp->nof_np, "nfs delegated lock claim error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
0a7de745 6272 if ((error == ETIMEDOUT) && ((nmp = NFSTONMP(nofp->nof_np)))) {
6d2010ae 6273 nfs_need_reconnect(nmp);
0a7de745
A
6274 }
6275 return error;
b0d623f7 6276 }
6d2010ae 6277
0a7de745 6278 /* delegated state lost (once held but now not claimable) */
6d2010ae
A
6279 NP(nofp->nof_np, "nfs delegated state claim error %d, state lost, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6280
6281 /*
6282 * Any problems with the delegation probably indicates that we
6283 * should review/return all of our current delegation state.
6284 */
6285 if ((nmp = NFSTONMP(nofp->nof_np))) {
6286 nfs4_delegation_return_enqueue(nofp->nof_np);
6287 lck_mtx_lock(&nmp->nm_lock);
6288 nfs_need_recover(nmp, NFSERR_EXPIRED);
6289 lck_mtx_unlock(&nmp->nm_lock);
6290 }
6291
6292 /* revoke all open file state */
6293 nfs_revoke_open_state_for_node(nofp->nof_np);
6294
0a7de745 6295 return error;
6d2010ae 6296}
cb323159 6297#endif /* CONFIG_NFS4*/
6d2010ae
A
6298
6299/*
6300 * Release all open state for the given node.
6301 */
6302void
6303nfs_release_open_state_for_node(nfsnode_t np, int force)
6304{
6305 struct nfsmount *nmp = NFSTONMP(np);
6306 struct nfs_open_file *nofp;
6307 struct nfs_file_lock *nflp, *nextnflp;
6308
6309 /* drop held locks */
6310 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
6311 /* skip dead & blocked lock requests */
0a7de745 6312 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6d2010ae 6313 continue;
0a7de745 6314 }
6d2010ae 6315 /* send an unlock if not a delegated lock */
0a7de745 6316 if (!force && nmp && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6d2010ae 6317 nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER,
0a7de745
A
6318 NULL, nflp->nfl_owner->nlo_open_owner->noo_cred);
6319 }
6d2010ae
A
6320 /* kill/remove the lock */
6321 lck_mtx_lock(&np->n_openlock);
6322 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
6323 lck_mtx_lock(&nflp->nfl_owner->nlo_lock);
6324 TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink);
6325 lck_mtx_unlock(&nflp->nfl_owner->nlo_lock);
6326 if (nflp->nfl_blockcnt) {
6327 /* wake up anyone blocked on this lock */
6328 wakeup(nflp);
6329 } else {
6330 /* remove nflp from lock list and destroy */
6331 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
6332 nfs_file_lock_destroy(nflp);
2d21ac55 6333 }
6d2010ae
A
6334 lck_mtx_unlock(&np->n_openlock);
6335 }
6336
6337 lck_mtx_lock(&np->n_openlock);
6338
6339 /* drop all opens */
6340 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
0a7de745 6341 if (nofp->nof_flags & NFS_OPEN_FILE_LOST) {
6d2010ae 6342 continue;
0a7de745 6343 }
6d2010ae
A
6344 /* mark open state as lost */
6345 lck_mtx_lock(&nofp->nof_lock);
6346 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
6347 nofp->nof_flags |= NFS_OPEN_FILE_LOST;
0a7de745 6348
6d2010ae 6349 lck_mtx_unlock(&nofp->nof_lock);
cb323159 6350#if CONFIG_NFS4
0a7de745 6351 if (!force && nmp && (nmp->nm_vers >= NFS_VER4)) {
6d2010ae 6352 nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER);
0a7de745 6353 }
cb323159 6354#endif
6d2010ae
A
6355 }
6356
6357 lck_mtx_unlock(&np->n_openlock);
6358}
6359
6360/*
6361 * State for a node has been lost, drop it, and revoke the node.
6362 * Attempt to return any state if possible in case the server
6363 * might somehow think we hold it.
6364 */
6365void
6366nfs_revoke_open_state_for_node(nfsnode_t np)
6367{
6368 struct nfsmount *nmp;
6369
6370 /* mark node as needing to be revoked */
6371 nfs_node_lock_force(np);
0a7de745 6372 if (np->n_flag & NREVOKE) { /* already revoked? */
6d2010ae
A
6373 NP(np, "nfs_revoke_open_state_for_node(): already revoked");
6374 nfs_node_unlock(np);
6375 return;
6376 }
6377 np->n_flag |= NREVOKE;
6378 nfs_node_unlock(np);
6379
6380 nfs_release_open_state_for_node(np, 0);
6381 NP(np, "nfs: state lost for %p 0x%x", np, np->n_flag);
6382
6383 /* mark mount as needing a revoke scan and have the socket thread do it. */
6384 if ((nmp = NFSTONMP(np))) {
6385 lck_mtx_lock(&nmp->nm_lock);
6386 nmp->nm_state |= NFSSTA_REVOKE;
6387 nfs_mount_sock_thread_wake(nmp);
6388 lck_mtx_unlock(&nmp->nm_lock);
6389 }
6390}
6391
cb323159 6392#if CONFIG_NFS4
6d2010ae
A
6393/*
6394 * Claim the delegated open combinations that each of this node's open files hold.
6395 */
6396int
6397nfs4_claim_delegated_state_for_node(nfsnode_t np, int flags)
6398{
6399 struct nfs_open_file *nofp;
6400 int error = 0;
6401
6402 lck_mtx_lock(&np->n_openlock);
6403
6404 /* walk the open file list looking for opens with delegated state to claim */
6405restart:
6406 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
6407 if (!nofp->nof_d_rw_drw && !nofp->nof_d_w_drw && !nofp->nof_d_r_drw &&
6408 !nofp->nof_d_rw_dw && !nofp->nof_d_w_dw && !nofp->nof_d_r_dw &&
0a7de745 6409 !nofp->nof_d_rw && !nofp->nof_d_w && !nofp->nof_d_r) {
6d2010ae 6410 continue;
0a7de745 6411 }
6d2010ae
A
6412 lck_mtx_unlock(&np->n_openlock);
6413 error = nfs4_claim_delegated_state_for_open_file(nofp, flags);
6414 lck_mtx_lock(&np->n_openlock);
0a7de745 6415 if (error) {
6d2010ae 6416 break;
0a7de745 6417 }
6d2010ae
A
6418 goto restart;
6419 }
6420
6421 lck_mtx_unlock(&np->n_openlock);
6422
0a7de745 6423 return error;
6d2010ae
A
6424}
6425
6426/*
6427 * Mark a node as needed to have its delegation returned.
6428 * Queue it up on the delegation return queue.
6429 * Make sure the thread is running.
6430 */
6431void
6432nfs4_delegation_return_enqueue(nfsnode_t np)
6433{
6434 struct nfsmount *nmp;
6435
6436 nmp = NFSTONMP(np);
0a7de745 6437 if (nfs_mount_gone(nmp)) {
6d2010ae 6438 return;
0a7de745 6439 }
6d2010ae
A
6440
6441 lck_mtx_lock(&np->n_openlock);
6442 np->n_openflags |= N_DELEG_RETURN;
6443 lck_mtx_unlock(&np->n_openlock);
6444
6445 lck_mtx_lock(&nmp->nm_lock);
0a7de745 6446 if (np->n_dreturn.tqe_next == NFSNOLIST) {
6d2010ae 6447 TAILQ_INSERT_TAIL(&nmp->nm_dreturnq, np, n_dreturn);
0a7de745 6448 }
6d2010ae
A
6449 nfs_mount_sock_thread_wake(nmp);
6450 lck_mtx_unlock(&nmp->nm_lock);
6451}
6452
6453/*
6454 * return any delegation we may have for the given node
6455 */
6456int
6457nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred)
6458{
6459 struct nfsmount *nmp;
6460 fhandle_t fh;
6461 nfs_stateid dstateid;
6462 int error;
6463
6464 nmp = NFSTONMP(np);
0a7de745
A
6465 if (nfs_mount_gone(nmp)) {
6466 return ENXIO;
6467 }
6d2010ae
A
6468
6469 /* first, make sure the node's marked for delegation return */
6470 lck_mtx_lock(&np->n_openlock);
0a7de745 6471 np->n_openflags |= (N_DELEG_RETURN | N_DELEG_RETURNING);
6d2010ae
A
6472 lck_mtx_unlock(&np->n_openlock);
6473
6474 /* make sure nobody else is using the delegation state */
0a7de745 6475 if ((error = nfs_open_state_set_busy(np, NULL))) {
6d2010ae 6476 goto out;
0a7de745 6477 }
6d2010ae
A
6478
6479 /* claim any delegated state */
0a7de745 6480 if ((error = nfs4_claim_delegated_state_for_node(np, flags))) {
6d2010ae 6481 goto out;
0a7de745 6482 }
6d2010ae
A
6483
6484 /* return the delegation */
6485 lck_mtx_lock(&np->n_openlock);
6486 dstateid = np->n_dstateid;
6487 fh.fh_len = np->n_fhsize;
6488 bcopy(np->n_fhp, &fh.fh_data, fh.fh_len);
6489 lck_mtx_unlock(&np->n_openlock);
6490 error = nfs4_delegreturn_rpc(NFSTONMP(np), fh.fh_data, fh.fh_len, &dstateid, flags, thd, cred);
6491 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
6492 if ((error != ETIMEDOUT) && (error != NFSERR_MOVED) && (error != NFSERR_LEASE_MOVED)) {
6493 lck_mtx_lock(&np->n_openlock);
6494 np->n_openflags &= ~N_DELEG_MASK;
6495 lck_mtx_lock(&nmp->nm_lock);
6496 if (np->n_dlink.tqe_next != NFSNOLIST) {
6497 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
6498 np->n_dlink.tqe_next = NFSNOLIST;
6499 }
6500 lck_mtx_unlock(&nmp->nm_lock);
6501 lck_mtx_unlock(&np->n_openlock);
6502 }
6503
6504out:
6505 /* make sure it's no longer on the return queue and clear the return flags */
6506 lck_mtx_lock(&nmp->nm_lock);
6507 if (np->n_dreturn.tqe_next != NFSNOLIST) {
6508 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
6509 np->n_dreturn.tqe_next = NFSNOLIST;
6510 }
6511 lck_mtx_unlock(&nmp->nm_lock);
6512 lck_mtx_lock(&np->n_openlock);
0a7de745 6513 np->n_openflags &= ~(N_DELEG_RETURN | N_DELEG_RETURNING);
6d2010ae
A
6514 lck_mtx_unlock(&np->n_openlock);
6515
6516 if (error) {
6517 NP(np, "nfs4_delegation_return, error %d", error);
0a7de745 6518 if (error == ETIMEDOUT) {
6d2010ae 6519 nfs_need_reconnect(nmp);
0a7de745 6520 }
6d2010ae
A
6521 if (nfs_mount_state_error_should_restart(error)) {
6522 /* make sure recovery happens */
6523 lck_mtx_lock(&nmp->nm_lock);
6524 nfs_need_recover(nmp, nfs_mount_state_error_delegation_lost(error) ? NFSERR_EXPIRED : 0);
6525 lck_mtx_unlock(&nmp->nm_lock);
2d21ac55
A
6526 }
6527 }
6d2010ae
A
6528
6529 nfs_open_state_clear_busy(np);
6530
0a7de745 6531 return error;
b0d623f7 6532}
2d21ac55 6533
b0d623f7 6534/*
6d2010ae
A
6535 * RPC to return a delegation for a file handle
6536 */
6537int
6538nfs4_delegreturn_rpc(struct nfsmount *nmp, u_char *fhp, int fhlen, struct nfs_stateid *sid, int flags, thread_t thd, kauth_cred_t cred)
6539{
6540 int error = 0, status, numops;
6541 uint64_t xid;
6542 struct nfsm_chain nmreq, nmrep;
6543 struct nfsreq_secinfo_args si;
6544
6545 NFSREQ_SECINFO_SET(&si, NULL, fhp, fhlen, NULL, 0);
6546 nfsm_chain_null(&nmreq);
6547 nfsm_chain_null(&nmrep);
6548
6549 // PUTFH, DELEGRETURN
6550 numops = 2;
6551 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
3e170ce0 6552 nfsm_chain_add_compound_header(error, &nmreq, "delegreturn", nmp->nm_minor_vers, numops);
6d2010ae
A
6553 numops--;
6554 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6555 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
6556 numops--;
6557 nfsm_chain_add_32(error, &nmreq, NFS_OP_DELEGRETURN);
6558 nfsm_chain_add_stateid(error, &nmreq, sid);
6559 nfsm_chain_build_done(error, &nmreq);
6560 nfsm_assert(error, (numops == 0), EPROTO);
6561 nfsmout_if(error);
6562 error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags, &nmrep, &xid, &status);
6563 nfsm_chain_skip_tag(error, &nmrep);
6564 nfsm_chain_get_32(error, &nmrep, numops);
6565 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6566 nfsm_chain_op_check(error, &nmrep, NFS_OP_DELEGRETURN);
6567nfsmout:
6568 nfsm_chain_cleanup(&nmreq);
6569 nfsm_chain_cleanup(&nmrep);
0a7de745 6570 return error;
6d2010ae 6571}
cb323159 6572#endif /* CONFIG_NFS4 */
6d2010ae
A
6573
6574/*
6575 * NFS read call.
6576 * Just call nfs_bioread() to do the work.
6577 *
6578 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6579 * without first calling VNOP_OPEN, so we make sure the file is open here.
6580 */
6581int
6582nfs_vnop_read(
6583 struct vnop_read_args /* {
0a7de745
A
6584 * struct vnodeop_desc *a_desc;
6585 * vnode_t a_vp;
6586 * struct uio *a_uio;
6587 * int a_ioflag;
6588 * vfs_context_t a_context;
6589 * } */*ap)
6d2010ae
A
6590{
6591 vnode_t vp = ap->a_vp;
6592 vfs_context_t ctx = ap->a_context;
6593 nfsnode_t np;
6594 struct nfsmount *nmp;
6595 struct nfs_open_owner *noop;
6596 struct nfs_open_file *nofp;
6597 int error;
6598
0a7de745 6599 if (vnode_vtype(ap->a_vp) != VREG) {
39236c6e 6600 return (vnode_vtype(vp) == VDIR) ? EISDIR : EPERM;
0a7de745 6601 }
6d2010ae
A
6602
6603 np = VTONFS(vp);
6604 nmp = NFSTONMP(np);
0a7de745
A
6605 if (nfs_mount_gone(nmp)) {
6606 return ENXIO;
6607 }
6608 if (np->n_flag & NREVOKE) {
6609 return EIO;
6610 }
6d2010ae
A
6611
6612 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
0a7de745
A
6613 if (!noop) {
6614 return ENOMEM;
6615 }
6d2010ae
A
6616restart:
6617 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
6618 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6619 NP(np, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop->noo_cred));
6620 error = EIO;
6621 }
cb323159 6622#if CONFIG_NFS4
6d2010ae
A
6623 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6624 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
6625 nofp = NULL;
0a7de745 6626 if (!error) {
6d2010ae 6627 goto restart;
0a7de745 6628 }
6d2010ae 6629 }
cb323159 6630#endif
6d2010ae
A
6631 if (error) {
6632 nfs_open_owner_rele(noop);
0a7de745 6633 return error;
6d2010ae 6634 }
3e170ce0
A
6635 /*
6636 * Since the read path is a hot path, if we already have
6637 * read access, lets go and try and do the read, without
6638 * busying the mount and open file node for this open owner.
6639 *
6640 * N.B. This is inherently racy w.r.t. an execve using
6641 * an already open file, in that the read at the end of
6642 * this routine will be racing with a potential close.
6643 * The code below ultimately has the same problem. In practice
6644 * this does not seem to be an issue.
6645 */
6646 if (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) {
6647 nfs_open_owner_rele(noop);
6648 goto do_read;
6649 }
6650 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6651 if (error) {
6652 nfs_open_owner_rele(noop);
0a7de745 6653 return error;
3e170ce0
A
6654 }
6655 /*
6656 * If we don't have a file already open with the access we need (read) then
6657 * we need to open one. Otherwise we just co-opt an open. We might not already
6658 * have access because we're trying to read the first page of the
6659 * file for execve.
6660 */
6661 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
6662 if (error) {
6663 nfs_mount_state_in_use_end(nmp, 0);
6664 nfs_open_owner_rele(noop);
0a7de745 6665 return error;
3e170ce0
A
6666 }
6667 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
6668 /* we don't have the file open, so open it for read access if we're not denied */
6669 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
6670 NP(np, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
0a7de745 6671 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
3e170ce0
A
6672 }
6673 if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) {
6674 nfs_open_file_clear_busy(nofp);
6675 nfs_mount_state_in_use_end(nmp, 0);
6d2010ae 6676 nfs_open_owner_rele(noop);
0a7de745 6677 return EPERM;
6d2010ae
A
6678 }
6679 if (np->n_flag & NREVOKE) {
6680 error = EIO;
3e170ce0 6681 nfs_open_file_clear_busy(nofp);
6d2010ae
A
6682 nfs_mount_state_in_use_end(nmp, 0);
6683 nfs_open_owner_rele(noop);
0a7de745 6684 return error;
6d2010ae 6685 }
3e170ce0
A
6686 if (nmp->nm_vers < NFS_VER4) {
6687 /* NFS v2/v3 opens are always allowed - so just add it. */
6688 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
cb323159
A
6689 }
6690#if CONFIG_NFS4
6691 else {
3e170ce0 6692 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
6d2010ae 6693 }
cb323159 6694#endif
0a7de745 6695 if (!error) {
6d2010ae 6696 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
0a7de745 6697 }
3e170ce0 6698 }
0a7de745 6699 if (nofp) {
3e170ce0 6700 nfs_open_file_clear_busy(nofp);
0a7de745 6701 }
3e170ce0
A
6702 if (nfs_mount_state_in_use_end(nmp, error)) {
6703 nofp = NULL;
6704 goto restart;
6d2010ae
A
6705 }
6706 nfs_open_owner_rele(noop);
0a7de745
A
6707 if (error) {
6708 return error;
6709 }
3e170ce0 6710do_read:
0a7de745 6711 return nfs_bioread(VTONFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_context);
6d2010ae
A
6712}
6713
cb323159 6714#if CONFIG_NFS4
6d2010ae
A
6715/*
6716 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6717 * Files are created using the NFSv4 OPEN RPC. So we must open the
6718 * file to create it and then close it.
6719 */
6720int
6721nfs4_vnop_create(
6722 struct vnop_create_args /* {
0a7de745
A
6723 * struct vnodeop_desc *a_desc;
6724 * vnode_t a_dvp;
6725 * vnode_t *a_vpp;
6726 * struct componentname *a_cnp;
6727 * struct vnode_attr *a_vap;
6728 * vfs_context_t a_context;
6729 * } */*ap)
6d2010ae
A
6730{
6731 vfs_context_t ctx = ap->a_context;
6732 struct componentname *cnp = ap->a_cnp;
6733 struct vnode_attr *vap = ap->a_vap;
6734 vnode_t dvp = ap->a_dvp;
6735 vnode_t *vpp = ap->a_vpp;
6736 struct nfsmount *nmp;
6737 nfsnode_t np;
6738 int error = 0, busyerror = 0, accessMode, denyMode;
6739 struct nfs_open_owner *noop = NULL;
6740 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
6741
6742 nmp = VTONMP(dvp);
0a7de745
A
6743 if (nfs_mount_gone(nmp)) {
6744 return ENXIO;
6745 }
6d2010ae 6746
0a7de745 6747 if (vap) {
6d2010ae 6748 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp), vap, ctx);
0a7de745 6749 }
6d2010ae
A
6750
6751 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
0a7de745
A
6752 if (!noop) {
6753 return ENOMEM;
6754 }
6d2010ae
A
6755
6756restart:
6757 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6758 if (error) {
6759 nfs_open_owner_rele(noop);
0a7de745 6760 return error;
6d2010ae
A
6761 }
6762
6763 /* grab a provisional, nodeless open file */
6764 error = nfs_open_file_find(NULL, noop, &newnofp, 0, 0, 1);
6765 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6766 printf("nfs_vnop_create: LOST\n");
6767 error = EIO;
6768 }
6769 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6770 /* This shouldn't happen given that this is a new, nodeless nofp */
6771 nfs_mount_state_in_use_end(nmp, 0);
6772 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
6773 nfs_open_file_destroy(newnofp);
6774 newnofp = NULL;
0a7de745 6775 if (!error) {
6d2010ae 6776 goto restart;
0a7de745 6777 }
6d2010ae 6778 }
0a7de745 6779 if (!error) {
6d2010ae 6780 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
0a7de745 6781 }
6d2010ae 6782 if (error) {
0a7de745 6783 if (newnofp) {
6d2010ae 6784 nfs_open_file_destroy(newnofp);
0a7de745 6785 }
6d2010ae
A
6786 newnofp = NULL;
6787 goto out;
6788 }
6789
6790 /*
6791 * We're just trying to create the file.
6792 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6793 */
6794 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
6795 denyMode = NFS_OPEN_SHARE_DENY_NONE;
6796
6797 /* Do the open/create */
6798 error = nfs4_open_rpc(newnofp, ctx, cnp, vap, dvp, vpp, NFS_OPEN_CREATE, accessMode, denyMode);
6799 if ((error == EACCES) && vap && !(vap->va_vaflags & VA_EXCLUSIVE) &&
6800 VATTR_IS_ACTIVE(vap, va_mode) && !(vap->va_mode & S_IWUSR)) {
6801 /*
6802 * Hmm... it looks like we may have a situation where the request was
6803 * retransmitted because we didn't get the first response which successfully
6804 * created/opened the file and then the second time we were denied the open
6805 * because the mode the file was created with doesn't allow write access.
6806 *
6807 * We'll try to work around this by temporarily updating the mode and
6808 * retrying the open.
6809 */
6810 struct vnode_attr vattr;
6811
6812 /* first make sure it's there */
6813 int error2 = nfs_lookitup(VTONFS(dvp), cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6814 if (!error2 && np) {
6815 nfs_node_unlock(np);
6816 *vpp = NFSTOV(np);
6817 if (vnode_vtype(NFSTOV(np)) == VREG) {
6818 VATTR_INIT(&vattr);
6819 VATTR_SET(&vattr, va_mode, (vap->va_mode | S_IWUSR));
6820 if (!nfs4_setattr_rpc(np, &vattr, ctx)) {
6821 error2 = nfs4_open_rpc(newnofp, ctx, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, accessMode, denyMode);
6822 VATTR_INIT(&vattr);
6823 VATTR_SET(&vattr, va_mode, vap->va_mode);
6824 nfs4_setattr_rpc(np, &vattr, ctx);
0a7de745 6825 if (!error2) {
6d2010ae 6826 error = 0;
0a7de745 6827 }
6d2010ae
A
6828 }
6829 }
6830 if (error) {
6831 vnode_put(*vpp);
6832 *vpp = NULL;
6833 }
6834 }
6835 }
6836 if (!error && !*vpp) {
6837 printf("nfs4_open_rpc returned without a node?\n");
6838 /* Hmmm... with no node, we have no filehandle and can't close it */
6839 error = EIO;
6840 }
6841 if (error) {
6842 /* need to cleanup our temporary nofp */
6843 nfs_open_file_clear_busy(newnofp);
6844 nfs_open_file_destroy(newnofp);
6845 newnofp = NULL;
6846 goto out;
6847 }
6848 /* After we have a node, add our open file struct to the node */
6849 np = VTONFS(*vpp);
6850 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
6851 nofp = newnofp;
6852 error = nfs_open_file_find_internal(np, noop, &nofp, 0, 0, 0);
6853 if (error) {
6854 /* This shouldn't happen, because we passed in a new nofp to use. */
6855 printf("nfs_open_file_find_internal failed! %d\n", error);
6856 goto out;
6857 } else if (nofp != newnofp) {
6858 /*
6859 * Hmm... an open file struct already exists.
6860 * Mark the existing one busy and merge our open into it.
6861 * Then destroy the one we created.
6862 * Note: there's no chance of an open confict because the
6863 * open has already been granted.
6864 */
6865 busyerror = nfs_open_file_set_busy(nofp, NULL);
6866 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
6867 nofp->nof_stateid = newnofp->nof_stateid;
0a7de745 6868 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
6d2010ae 6869 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 6870 }
6d2010ae
A
6871 nfs_open_file_clear_busy(newnofp);
6872 nfs_open_file_destroy(newnofp);
6873 }
6874 newnofp = NULL;
6875 /* mark the node as holding a create-initiated open */
6876 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
6877 nofp->nof_creator = current_thread();
6878out:
0a7de745 6879 if (nofp && !busyerror) {
6d2010ae 6880 nfs_open_file_clear_busy(nofp);
0a7de745 6881 }
6d2010ae
A
6882 if (nfs_mount_state_in_use_end(nmp, error)) {
6883 nofp = newnofp = NULL;
6884 busyerror = 0;
6885 goto restart;
6886 }
0a7de745 6887 if (noop) {
6d2010ae 6888 nfs_open_owner_rele(noop);
0a7de745
A
6889 }
6890 return error;
6d2010ae
A
6891}
6892
6893/*
6894 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6895 */
6896int
6897nfs4_create_rpc(
6898 vfs_context_t ctx,
6899 nfsnode_t dnp,
6900 struct componentname *cnp,
6901 struct vnode_attr *vap,
6902 int type,
6903 char *link,
6904 nfsnode_t *npp)
6905{
6906 struct nfsmount *nmp;
6907 struct nfs_vattr nvattr;
6908 int error = 0, create_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
6909 int nfsvers, namedattrs, numops;
6910 u_int64_t xid, savedxid = 0;
6911 nfsnode_t np = NULL;
6912 vnode_t newvp = NULL;
6913 struct nfsm_chain nmreq, nmrep;
6914 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6915 const char *tag;
6916 nfs_specdata sd;
6917 fhandle_t fh;
6918 struct nfsreq rq, *req = &rq;
6919 struct nfs_dulookup dul;
6920 struct nfsreq_secinfo_args si;
6921
6922 nmp = NFSTONMP(dnp);
0a7de745
A
6923 if (nfs_mount_gone(nmp)) {
6924 return ENXIO;
6925 }
6d2010ae
A
6926 nfsvers = nmp->nm_vers;
6927 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
0a7de745
A
6928 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
6929 return EINVAL;
6930 }
6d2010ae
A
6931
6932 sd.specdata1 = sd.specdata2 = 0;
6933
6934 switch (type) {
6935 case NFLNK:
6936 tag = "symlink";
6937 break;
6938 case NFBLK:
6939 case NFCHR:
6940 tag = "mknod";
0a7de745
A
6941 if (!VATTR_IS_ACTIVE(vap, va_rdev)) {
6942 return EINVAL;
6943 }
6d2010ae
A
6944 sd.specdata1 = major(vap->va_rdev);
6945 sd.specdata2 = minor(vap->va_rdev);
6946 break;
6947 case NFSOCK:
6948 case NFFIFO:
6949 tag = "mknod";
6950 break;
6951 case NFDIR:
6952 tag = "mkdir";
6953 break;
6954 default:
0a7de745 6955 return EINVAL;
6d2010ae
A
6956 }
6957
6958 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
6959
6960 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
0a7de745 6961 if (!namedattrs) {
6d2010ae 6962 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
0a7de745 6963 }
6d2010ae
A
6964
6965 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
6966 NVATTR_INIT(&nvattr);
6967 nfsm_chain_null(&nmreq);
6968 nfsm_chain_null(&nmrep);
6969
6970 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
6971 numops = 6;
6972 nfsm_chain_build_alloc_init(error, &nmreq, 66 * NFSX_UNSIGNED);
3e170ce0 6973 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
6d2010ae
A
6974 numops--;
6975 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6976 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
6977 numops--;
6978 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
6979 numops--;
6980 nfsm_chain_add_32(error, &nmreq, NFS_OP_CREATE);
6981 nfsm_chain_add_32(error, &nmreq, type);
6982 if (type == NFLNK) {
6983 nfsm_chain_add_name(error, &nmreq, link, strlen(link), nmp);
6984 } else if ((type == NFBLK) || (type == NFCHR)) {
6985 nfsm_chain_add_32(error, &nmreq, sd.specdata1);
6986 nfsm_chain_add_32(error, &nmreq, sd.specdata2);
6987 }
6988 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
6989 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
6990 numops--;
6991 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6992 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
6993 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6994 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
6995 numops--;
6996 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
6997 numops--;
6998 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6999 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
7000 nfsm_chain_build_done(error, &nmreq);
7001 nfsm_assert(error, (numops == 0), EPROTO);
7002 nfsmout_if(error);
7003
7004 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745 7005 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
6d2010ae 7006 if (!error) {
0a7de745 7007 if (!namedattrs) {
6d2010ae 7008 nfs_dulookup_start(&dul, dnp, ctx);
0a7de745 7009 }
6d2010ae
A
7010 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7011 }
7012
0a7de745 7013 if ((lockerror = nfs_node_lock(dnp))) {
6d2010ae 7014 error = lockerror;
0a7de745 7015 }
6d2010ae
A
7016 nfsm_chain_skip_tag(error, &nmrep);
7017 nfsm_chain_get_32(error, &nmrep, numops);
7018 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7019 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7020 nfsmout_if(error);
7021 nfsm_chain_op_check(error, &nmrep, NFS_OP_CREATE);
7022 nfsm_chain_check_change_info(error, &nmrep, dnp);
7023 bmlen = NFS_ATTR_BITMAP_LEN;
7024 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7025 /* At this point if we have no error, the object was created. */
7026 /* if we don't get attributes, then we should lookitup. */
7027 create_error = error;
7028 nfsmout_if(error);
7029 nfs_vattr_set_supported(bitmap, vap);
7030 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7031 nfsmout_if(error);
7032 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7033 nfsmout_if(error);
7034 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
7035 printf("nfs: create/%s didn't return filehandle? %s\n", tag, cnp->cn_nameptr);
7036 error = EBADRPC;
7037 goto nfsmout;
7038 }
7039 /* directory attributes: if we don't get them, make sure to invalidate */
7040 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7041 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7042 savedxid = xid;
7043 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
0a7de745 7044 if (error) {
6d2010ae 7045 NATTRINVALIDATE(dnp);
0a7de745 7046 }
6d2010ae
A
7047
7048nfsmout:
7049 nfsm_chain_cleanup(&nmreq);
7050 nfsm_chain_cleanup(&nmrep);
7051
7052 if (!lockerror) {
7053 if (!create_error && (dnp->n_flag & NNEGNCENTRIES)) {
7054 dnp->n_flag &= ~NNEGNCENTRIES;
7055 cache_purge_negatives(NFSTOV(dnp));
7056 }
7057 dnp->n_flag |= NMODIFIED;
7058 nfs_node_unlock(dnp);
7059 /* nfs_getattr() will check changed and purge caches */
7060 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
7061 }
7062
7063 if (!error && fh.fh_len) {
7064 /* create the vnode with the filehandle and attributes */
7065 xid = savedxid;
7066 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np);
0a7de745 7067 if (!error) {
6d2010ae 7068 newvp = NFSTOV(np);
0a7de745 7069 }
6d2010ae
A
7070 }
7071 NVATTR_CLEANUP(&nvattr);
7072
0a7de745 7073 if (!namedattrs) {
6d2010ae 7074 nfs_dulookup_finish(&dul, dnp, ctx);
0a7de745 7075 }
6d2010ae
A
7076
7077 /*
7078 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
7079 * if we can succeed in looking up the object.
7080 */
7081 if ((create_error == EEXIST) || (!create_error && !newvp)) {
7082 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
7083 if (!error) {
7084 newvp = NFSTOV(np);
0a7de745 7085 if (vnode_vtype(newvp) != nfstov_type(type, nfsvers)) {
6d2010ae 7086 error = EEXIST;
0a7de745 7087 }
6d2010ae
A
7088 }
7089 }
0a7de745 7090 if (!busyerror) {
6d2010ae 7091 nfs_node_clear_busy(dnp);
0a7de745 7092 }
6d2010ae
A
7093 if (error) {
7094 if (newvp) {
7095 nfs_node_unlock(np);
7096 vnode_put(newvp);
7097 }
7098 } else {
7099 nfs_node_unlock(np);
7100 *npp = np;
7101 }
0a7de745 7102 return error;
6d2010ae
A
7103}
7104
7105int
7106nfs4_vnop_mknod(
7107 struct vnop_mknod_args /* {
0a7de745
A
7108 * struct vnodeop_desc *a_desc;
7109 * vnode_t a_dvp;
7110 * vnode_t *a_vpp;
7111 * struct componentname *a_cnp;
7112 * struct vnode_attr *a_vap;
7113 * vfs_context_t a_context;
7114 * } */*ap)
6d2010ae
A
7115{
7116 nfsnode_t np = NULL;
7117 struct nfsmount *nmp;
7118 int error;
7119
7120 nmp = VTONMP(ap->a_dvp);
0a7de745
A
7121 if (nfs_mount_gone(nmp)) {
7122 return ENXIO;
7123 }
6d2010ae 7124
0a7de745
A
7125 if (!VATTR_IS_ACTIVE(ap->a_vap, va_type)) {
7126 return EINVAL;
7127 }
6d2010ae
A
7128 switch (ap->a_vap->va_type) {
7129 case VBLK:
7130 case VCHR:
7131 case VFIFO:
7132 case VSOCK:
7133 break;
7134 default:
0a7de745 7135 return ENOTSUP;
6d2010ae
A
7136 }
7137
7138 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
0a7de745
A
7139 vtonfs_type(ap->a_vap->va_type, nmp->nm_vers), NULL, &np);
7140 if (!error) {
6d2010ae 7141 *ap->a_vpp = NFSTOV(np);
0a7de745
A
7142 }
7143 return error;
6d2010ae
A
7144}
7145
7146int
7147nfs4_vnop_mkdir(
7148 struct vnop_mkdir_args /* {
0a7de745
A
7149 * struct vnodeop_desc *a_desc;
7150 * vnode_t a_dvp;
7151 * vnode_t *a_vpp;
7152 * struct componentname *a_cnp;
7153 * struct vnode_attr *a_vap;
7154 * vfs_context_t a_context;
7155 * } */*ap)
6d2010ae
A
7156{
7157 nfsnode_t np = NULL;
7158 int error;
7159
7160 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
0a7de745
A
7161 NFDIR, NULL, &np);
7162 if (!error) {
6d2010ae 7163 *ap->a_vpp = NFSTOV(np);
0a7de745
A
7164 }
7165 return error;
6d2010ae
A
7166}
7167
7168int
7169nfs4_vnop_symlink(
7170 struct vnop_symlink_args /* {
0a7de745
A
7171 * struct vnodeop_desc *a_desc;
7172 * vnode_t a_dvp;
7173 * vnode_t *a_vpp;
7174 * struct componentname *a_cnp;
7175 * struct vnode_attr *a_vap;
7176 * char *a_target;
7177 * vfs_context_t a_context;
7178 * } */*ap)
6d2010ae
A
7179{
7180 nfsnode_t np = NULL;
7181 int error;
7182
7183 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
0a7de745
A
7184 NFLNK, ap->a_target, &np);
7185 if (!error) {
6d2010ae 7186 *ap->a_vpp = NFSTOV(np);
0a7de745
A
7187 }
7188 return error;
6d2010ae
A
7189}
7190
7191int
7192nfs4_vnop_link(
7193 struct vnop_link_args /* {
0a7de745
A
7194 * struct vnodeop_desc *a_desc;
7195 * vnode_t a_vp;
7196 * vnode_t a_tdvp;
7197 * struct componentname *a_cnp;
7198 * vfs_context_t a_context;
7199 * } */*ap)
6d2010ae
A
7200{
7201 vfs_context_t ctx = ap->a_context;
7202 vnode_t vp = ap->a_vp;
7203 vnode_t tdvp = ap->a_tdvp;
7204 struct componentname *cnp = ap->a_cnp;
7205 int error = 0, lockerror = ENOENT, status;
7206 struct nfsmount *nmp;
7207 nfsnode_t np = VTONFS(vp);
7208 nfsnode_t tdnp = VTONFS(tdvp);
7209 int nfsvers, numops;
7210 u_int64_t xid, savedxid;
7211 struct nfsm_chain nmreq, nmrep;
7212 struct nfsreq_secinfo_args si;
7213
0a7de745
A
7214 if (vnode_mount(vp) != vnode_mount(tdvp)) {
7215 return EXDEV;
7216 }
6d2010ae
A
7217
7218 nmp = VTONMP(vp);
0a7de745
A
7219 if (nfs_mount_gone(nmp)) {
7220 return ENXIO;
7221 }
6d2010ae 7222 nfsvers = nmp->nm_vers;
0a7de745
A
7223 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7224 return EINVAL;
7225 }
7226 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7227 return EINVAL;
7228 }
6d2010ae
A
7229
7230 /*
7231 * Push all writes to the server, so that the attribute cache
7232 * doesn't get "out of sync" with the server.
7233 * XXX There should be a better way!
7234 */
7235 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
7236
0a7de745
A
7237 if ((error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx)))) {
7238 return error;
7239 }
6d2010ae
A
7240
7241 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7242 nfsm_chain_null(&nmreq);
7243 nfsm_chain_null(&nmrep);
7244
7245 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
7246 numops = 7;
7247 nfsm_chain_build_alloc_init(error, &nmreq, 29 * NFSX_UNSIGNED + cnp->cn_namelen);
3e170ce0 7248 nfsm_chain_add_compound_header(error, &nmreq, "link", nmp->nm_minor_vers, numops);
6d2010ae
A
7249 numops--;
7250 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7251 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
7252 numops--;
7253 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7254 numops--;
7255 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7256 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
7257 numops--;
7258 nfsm_chain_add_32(error, &nmreq, NFS_OP_LINK);
7259 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7260 numops--;
7261 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7262 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
7263 numops--;
7264 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7265 numops--;
7266 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7267 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
7268 nfsm_chain_build_done(error, &nmreq);
7269 nfsm_assert(error, (numops == 0), EPROTO);
7270 nfsmout_if(error);
7271 error = nfs_request(tdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
7272
7273 if ((lockerror = nfs_node_lock2(tdnp, np))) {
7274 error = lockerror;
7275 goto nfsmout;
7276 }
7277 nfsm_chain_skip_tag(error, &nmrep);
7278 nfsm_chain_get_32(error, &nmrep, numops);
7279 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7280 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7281 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7282 nfsm_chain_op_check(error, &nmrep, NFS_OP_LINK);
7283 nfsm_chain_check_change_info(error, &nmrep, tdnp);
7284 /* directory attributes: if we don't get them, make sure to invalidate */
7285 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7286 savedxid = xid;
7287 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
0a7de745 7288 if (error) {
6d2010ae 7289 NATTRINVALIDATE(tdnp);
0a7de745 7290 }
6d2010ae
A
7291 /* link attributes: if we don't get them, make sure to invalidate */
7292 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7293 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7294 xid = savedxid;
7295 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
0a7de745 7296 if (error) {
6d2010ae 7297 NATTRINVALIDATE(np);
0a7de745 7298 }
6d2010ae
A
7299nfsmout:
7300 nfsm_chain_cleanup(&nmreq);
7301 nfsm_chain_cleanup(&nmrep);
0a7de745 7302 if (!lockerror) {
6d2010ae 7303 tdnp->n_flag |= NMODIFIED;
0a7de745 7304 }
6d2010ae 7305 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
0a7de745 7306 if (error == EEXIST) {
6d2010ae 7307 error = 0;
0a7de745 7308 }
6d2010ae
A
7309 if (!error && (tdnp->n_flag & NNEGNCENTRIES)) {
7310 tdnp->n_flag &= ~NNEGNCENTRIES;
7311 cache_purge_negatives(tdvp);
7312 }
0a7de745 7313 if (!lockerror) {
6d2010ae 7314 nfs_node_unlock2(tdnp, np);
0a7de745 7315 }
6d2010ae 7316 nfs_node_clear_busy2(tdnp, np);
0a7de745 7317 return error;
6d2010ae
A
7318}
7319
7320int
7321nfs4_vnop_rmdir(
7322 struct vnop_rmdir_args /* {
0a7de745
A
7323 * struct vnodeop_desc *a_desc;
7324 * vnode_t a_dvp;
7325 * vnode_t a_vp;
7326 * struct componentname *a_cnp;
7327 * vfs_context_t a_context;
7328 * } */*ap)
6d2010ae
A
7329{
7330 vfs_context_t ctx = ap->a_context;
7331 vnode_t vp = ap->a_vp;
7332 vnode_t dvp = ap->a_dvp;
7333 struct componentname *cnp = ap->a_cnp;
7334 struct nfsmount *nmp;
7335 int error = 0, namedattrs;
7336 nfsnode_t np = VTONFS(vp);
7337 nfsnode_t dnp = VTONFS(dvp);
7338 struct nfs_dulookup dul;
7339
0a7de745
A
7340 if (vnode_vtype(vp) != VDIR) {
7341 return EINVAL;
7342 }
6d2010ae
A
7343
7344 nmp = NFSTONMP(dnp);
0a7de745
A
7345 if (nfs_mount_gone(nmp)) {
7346 return ENXIO;
7347 }
6d2010ae
A
7348 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
7349
0a7de745
A
7350 if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx)))) {
7351 return error;
7352 }
6d2010ae
A
7353
7354 if (!namedattrs) {
7355 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
7356 nfs_dulookup_start(&dul, dnp, ctx);
7357 }
7358
7359 error = nfs4_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
0a7de745 7360 vfs_context_thread(ctx), vfs_context_ucred(ctx));
6d2010ae
A
7361
7362 nfs_name_cache_purge(dnp, np, cnp, ctx);
7363 /* nfs_getattr() will check changed and purge caches */
7364 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
0a7de745 7365 if (!namedattrs) {
6d2010ae 7366 nfs_dulookup_finish(&dul, dnp, ctx);
0a7de745 7367 }
6d2010ae
A
7368 nfs_node_clear_busy2(dnp, np);
7369
7370 /*
7371 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
7372 */
0a7de745 7373 if (error == ENOENT) {
6d2010ae 7374 error = 0;
0a7de745 7375 }
6d2010ae
A
7376 if (!error) {
7377 /*
7378 * remove nfsnode from hash now so we can't accidentally find it
7379 * again if another object gets created with the same filehandle
7380 * before this vnode gets reclaimed
7381 */
7382 lck_mtx_lock(nfs_node_hash_mutex);
7383 if (np->n_hflag & NHHASHED) {
7384 LIST_REMOVE(np, n_hash);
7385 np->n_hflag &= ~NHHASHED;
7386 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
7387 }
7388 lck_mtx_unlock(nfs_node_hash_mutex);
7389 }
0a7de745 7390 return error;
6d2010ae
A
7391}
7392
7393/*
7394 * NFSv4 Named Attributes
7395 *
7396 * Both the extended attributes interface and the named streams interface
7397 * are backed by NFSv4 named attributes. The implementations for both use
7398 * a common set of routines in an attempt to reduce code duplication, to
7399 * increase efficiency, to increase caching of both names and data, and to
7400 * confine the complexity.
7401 *
7402 * Each NFS node caches its named attribute directory's file handle.
7403 * The directory nodes for the named attribute directories are handled
7404 * exactly like regular directories (with a couple minor exceptions).
7405 * Named attribute nodes are also treated as much like regular files as
7406 * possible.
7407 *
7408 * Most of the heavy lifting is done by nfs4_named_attr_get().
7409 */
7410
7411/*
7412 * Get the given node's attribute directory node.
7413 * If !fetch, then only return a cached node.
7414 * Otherwise, we will attempt to fetch the node from the server.
7415 * (Note: the node should be marked busy.)
b0d623f7 7416 */
6d2010ae
A
7417nfsnode_t
7418nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx)
b0d623f7 7419{
6d2010ae 7420 nfsnode_t adnp = NULL;
b0d623f7 7421 struct nfsmount *nmp;
6d2010ae
A
7422 int error = 0, status, numops;
7423 struct nfsm_chain nmreq, nmrep;
7424 u_int64_t xid;
7425 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
7426 fhandle_t fh;
7427 struct nfs_vattr nvattr;
7428 struct componentname cn;
7429 struct nfsreq rq, *req = &rq;
7430 struct nfsreq_secinfo_args si;
b0d623f7 7431
6d2010ae 7432 nmp = NFSTONMP(np);
0a7de745
A
7433 if (nfs_mount_gone(nmp)) {
7434 return NULL;
7435 }
7436 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7437 return NULL;
7438 }
b0d623f7 7439
6d2010ae
A
7440 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7441 NVATTR_INIT(&nvattr);
7442 nfsm_chain_null(&nmreq);
7443 nfsm_chain_null(&nmrep);
b0d623f7 7444
6d2010ae
A
7445 bzero(&cn, sizeof(cn));
7446 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
7447 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
7448 cn.cn_nameiop = LOOKUP;
7449
7450 if (np->n_attrdirfh) {
7451 // XXX can't set parent correctly (to np) yet
0a7de745
A
7452 error = nfs_nget(nmp->nm_mountp, NULL, &cn, np->n_attrdirfh + 1, *np->n_attrdirfh,
7453 NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &adnp);
7454 if (adnp) {
6d2010ae 7455 goto nfsmout;
0a7de745 7456 }
6d2010ae
A
7457 }
7458 if (!fetch) {
7459 error = ENOENT;
7460 goto nfsmout;
2d21ac55
A
7461 }
7462
6d2010ae
A
7463 // PUTFH, OPENATTR, GETATTR
7464 numops = 3;
7465 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
3e170ce0 7466 nfsm_chain_add_compound_header(error, &nmreq, "openattr", nmp->nm_minor_vers, numops);
6d2010ae
A
7467 numops--;
7468 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7469 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7470 numops--;
7471 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7472 nfsm_chain_add_32(error, &nmreq, 0);
7473 numops--;
7474 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7475 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7476 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7477 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
0a7de745 7478 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6d2010ae
A
7479 nfsm_chain_build_done(error, &nmreq);
7480 nfsm_assert(error, (numops == 0), EPROTO);
7481 nfsmout_if(error);
7482 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745
A
7483 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
7484 if (!error) {
6d2010ae 7485 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
0a7de745 7486 }
b0d623f7 7487
6d2010ae
A
7488 nfsm_chain_skip_tag(error, &nmrep);
7489 nfsm_chain_get_32(error, &nmrep, numops);
7490 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7491 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7492 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7493 nfsmout_if(error);
7494 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7495 nfsmout_if(error);
7496 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
7497 error = ENOENT;
7498 goto nfsmout;
2d21ac55 7499 }
6d2010ae
A
7500 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
7501 /* (re)allocate attrdir fh buffer */
0a7de745 7502 if (np->n_attrdirfh) {
6d2010ae 7503 FREE(np->n_attrdirfh, M_TEMP);
0a7de745
A
7504 }
7505 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len + 1, M_TEMP, M_WAITOK);
2d21ac55 7506 }
6d2010ae
A
7507 if (!np->n_attrdirfh) {
7508 error = ENOMEM;
7509 goto nfsmout;
b0d623f7 7510 }
6d2010ae
A
7511 /* cache the attrdir fh in the node */
7512 *np->n_attrdirfh = fh.fh_len;
0a7de745 7513 bcopy(fh.fh_data, np->n_attrdirfh + 1, fh.fh_len);
6d2010ae
A
7514 /* create node for attrdir */
7515 // XXX can't set parent correctly (to np) yet
7516 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
7517nfsmout:
7518 NVATTR_CLEANUP(&nvattr);
7519 nfsm_chain_cleanup(&nmreq);
7520 nfsm_chain_cleanup(&nmrep);
2d21ac55 7521
6d2010ae
A
7522 if (adnp) {
7523 /* sanity check that this node is an attribute directory */
0a7de745 7524 if (adnp->n_vattr.nva_type != VDIR) {
6d2010ae 7525 error = EINVAL;
0a7de745
A
7526 }
7527 if (!(adnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6d2010ae 7528 error = EINVAL;
0a7de745 7529 }
6d2010ae 7530 nfs_node_unlock(adnp);
0a7de745 7531 if (error) {
6d2010ae 7532 vnode_put(NFSTOV(adnp));
0a7de745 7533 }
b0d623f7 7534 }
0a7de745 7535 return error ? NULL : adnp;
b0d623f7
A
7536}
7537
2d21ac55 7538/*
6d2010ae
A
7539 * Get the given node's named attribute node for the name given.
7540 *
7541 * In an effort to increase the performance of named attribute access, we try
7542 * to reduce server requests by doing the following:
7543 *
7544 * - cache the node's named attribute directory file handle in the node
7545 * - maintain a directory vnode for the attribute directory
7546 * - use name cache entries (positive and negative) to speed up lookups
7547 * - optionally open the named attribute (with the given accessMode) in the same RPC
7548 * - combine attribute directory retrieval with the lookup/open RPC
7549 * - optionally prefetch the named attribute's first block of data in the same RPC
7550 *
7551 * Also, in an attempt to reduce the number of copies/variations of this code,
7552 * parts of the RPC building/processing code are conditionalized on what is
7553 * needed for any particular request (openattr, lookup vs. open, read).
7554 *
7555 * Note that because we may not have the attribute directory node when we start
7556 * the lookup/open, we lock both the node and the attribute directory node.
2d21ac55 7557 */
6d2010ae 7558
0a7de745
A
7559#define NFS_GET_NAMED_ATTR_CREATE 0x1
7560#define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
7561#define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
7562#define NFS_GET_NAMED_ATTR_PREFETCH 0x8
6d2010ae 7563
b0d623f7 7564int
6d2010ae
A
7565nfs4_named_attr_get(
7566 nfsnode_t np,
2d21ac55 7567 struct componentname *cnp,
6d2010ae
A
7568 uint32_t accessMode,
7569 int flags,
7570 vfs_context_t ctx,
7571 nfsnode_t *anpp,
7572 struct nfs_open_file **nofpp)
2d21ac55
A
7573{
7574 struct nfsmount *nmp;
6d2010ae
A
7575 int error = 0, open_error = EIO;
7576 int inuse = 0, adlockerror = ENOENT, busyerror = ENOENT, adbusyerror = ENOENT, nofpbusyerror = ENOENT;
7577 int create, guarded, prefetch, truncate, noopbusy = 0;
7578 int open, status, numops, hadattrdir, negnamecache;
7579 struct nfs_vattr nvattr;
7580 struct vnode_attr vattr;
7581 nfsnode_t adnp = NULL, anp = NULL;
7582 vnode_t avp = NULL;
2d21ac55 7583 u_int64_t xid, savedxid = 0;
2d21ac55
A
7584 struct nfsm_chain nmreq, nmrep;
7585 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6d2010ae
A
7586 uint32_t denyMode, rflags, delegation, recall, eof, rlen, retlen;
7587 nfs_stateid stateid, dstateid;
2d21ac55 7588 fhandle_t fh;
6d2010ae
A
7589 struct nfs_open_owner *noop = NULL;
7590 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
7591 struct vnop_access_args naa;
7592 thread_t thd;
7593 kauth_cred_t cred;
7594 struct timeval now;
7595 char sbuf[64], *s;
7596 uint32_t ace_type, ace_flags, ace_mask, len, slen;
7597 struct kauth_ace ace;
7598 struct nfsreq rq, *req = &rq;
7599 struct nfsreq_secinfo_args si;
7600
7601 *anpp = NULL;
7602 fh.fh_len = 0;
7603 rflags = delegation = recall = eof = rlen = retlen = 0;
7604 ace.ace_flags = 0;
7605 s = sbuf;
7606 slen = sizeof(sbuf);
2d21ac55 7607
6d2010ae 7608 nmp = NFSTONMP(np);
0a7de745
A
7609 if (nfs_mount_gone(nmp)) {
7610 return ENXIO;
7611 }
6d2010ae
A
7612 NVATTR_INIT(&nvattr);
7613 negnamecache = !NMFLAG(nmp, NONEGNAMECACHE);
7614 thd = vfs_context_thread(ctx);
7615 cred = vfs_context_ucred(ctx);
7616 create = (flags & NFS_GET_NAMED_ATTR_CREATE) ? NFS_OPEN_CREATE : NFS_OPEN_NOCREATE;
7617 guarded = (flags & NFS_GET_NAMED_ATTR_CREATE_GUARDED) ? NFS_CREATE_GUARDED : NFS_CREATE_UNCHECKED;
7618 truncate = (flags & NFS_GET_NAMED_ATTR_TRUNCATE);
7619 prefetch = (flags & NFS_GET_NAMED_ATTR_PREFETCH);
7620
7621 if (!create) {
7622 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
0a7de745
A
7623 if (error) {
7624 return error;
7625 }
6d2010ae 7626 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
0a7de745
A
7627 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
7628 return ENOATTR;
7629 }
6d2010ae
A
7630 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_NONE) {
7631 /* shouldn't happen... but just be safe */
7632 printf("nfs4_named_attr_get: create with no access %s\n", cnp->cn_nameptr);
7633 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
7634 }
7635 open = (accessMode != NFS_OPEN_SHARE_ACCESS_NONE);
7636 if (open) {
7637 /*
7638 * We're trying to open the file.
7639 * We'll create/open it with the given access mode,
7640 * and set NFS_OPEN_FILE_CREATE.
7641 */
7642 denyMode = NFS_OPEN_SHARE_DENY_NONE;
0a7de745 7643 if (prefetch && guarded) {
6d2010ae 7644 prefetch = 0; /* no sense prefetching data that can't be there */
0a7de745 7645 }
6d2010ae 7646 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
0a7de745
A
7647 if (!noop) {
7648 return ENOMEM;
7649 }
2d21ac55
A
7650 }
7651
0a7de745
A
7652 if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
7653 return error;
7654 }
2d21ac55 7655
6d2010ae
A
7656 adnp = nfs4_named_attr_dir_get(np, 0, ctx);
7657 hadattrdir = (adnp != NULL);
7658 if (prefetch) {
7659 microuptime(&now);
7660 /* use the special state ID because we don't have a real one to send */
7661 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
7662 rlen = MIN(nmp->nm_rsize, nmp->nm_biosize);
7663 }
7664 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
7665 nfsm_chain_null(&nmreq);
7666 nfsm_chain_null(&nmrep);
7667
6d2010ae 7668 if (hadattrdir) {
0a7de745 7669 if ((error = adbusyerror = nfs_node_set_busy(adnp, vfs_context_thread(ctx)))) {
6d2010ae 7670 goto nfsmout;
0a7de745 7671 }
6d2010ae
A
7672 /* nfs_getattr() will check changed and purge caches */
7673 error = nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7674 nfsmout_if(error);
7675 error = cache_lookup(NFSTOV(adnp), &avp, cnp);
7676 switch (error) {
7677 case ENOENT:
7678 /* negative cache entry */
7679 goto nfsmout;
7680 case 0:
7681 /* cache miss */
7682 /* try dir buf cache lookup */
7683 error = nfs_dir_buf_cache_lookup(adnp, &anp, cnp, ctx, 0);
7684 if (!error && anp) {
7685 /* dir buf cache hit */
7686 *anpp = anp;
7687 error = -1;
7688 }
0a7de745 7689 if (error != -1) { /* cache miss */
6d2010ae 7690 break;
0a7de745
A
7691 }
7692 /* FALLTHROUGH */
6d2010ae
A
7693 case -1:
7694 /* cache hit, not really an error */
316670eb 7695 OSAddAtomic64(1, &nfsstats.lookupcache_hits);
0a7de745 7696 if (!anp && avp) {
6d2010ae 7697 *anpp = anp = VTONFS(avp);
0a7de745 7698 }
6d2010ae
A
7699
7700 nfs_node_clear_busy(adnp);
7701 adbusyerror = ENOENT;
7702
7703 /* check for directory access */
7704 naa.a_desc = &vnop_access_desc;
7705 naa.a_vp = NFSTOV(adnp);
7706 naa.a_action = KAUTH_VNODE_SEARCH;
7707 naa.a_context = ctx;
7708
7709 /* compute actual success/failure based on accessibility */
7710 error = nfs_vnop_access(&naa);
0a7de745 7711 /* FALLTHROUGH */
6d2010ae
A
7712 default:
7713 /* we either found it, or hit an error */
7714 if (!error && guarded) {
7715 /* found cached entry but told not to use it */
7716 error = EEXIST;
7717 vnode_put(NFSTOV(anp));
7718 *anpp = anp = NULL;
7719 }
7720 /* we're done if error or we don't need to open */
0a7de745 7721 if (error || !open) {
6d2010ae 7722 goto nfsmout;
0a7de745 7723 }
6d2010ae
A
7724 /* no error and we need to open... */
7725 }
7726 }
7727
7728 if (open) {
7729restart:
7730 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
7731 if (error) {
7732 nfs_open_owner_rele(noop);
7733 noop = NULL;
7734 goto nfsmout;
7735 }
7736 inuse = 1;
7737
7738 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7739 error = nfs_open_file_find(anp, noop, &newnofp, 0, 0, 1);
7740 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
7741 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop->noo_cred), cnp->cn_nameptr);
7742 error = EIO;
7743 }
7744 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
7745 nfs_mount_state_in_use_end(nmp, 0);
7746 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
7747 nfs_open_file_destroy(newnofp);
7748 newnofp = NULL;
0a7de745 7749 if (!error) {
6d2010ae 7750 goto restart;
0a7de745 7751 }
6d2010ae 7752 }
0a7de745 7753 if (!error) {
6d2010ae 7754 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
0a7de745 7755 }
6d2010ae 7756 if (error) {
0a7de745 7757 if (newnofp) {
6d2010ae 7758 nfs_open_file_destroy(newnofp);
0a7de745 7759 }
6d2010ae
A
7760 newnofp = NULL;
7761 goto nfsmout;
7762 }
7763 if (anp) {
7764 /*
7765 * We already have the node. So we just need to open
7766 * it - which we may be able to do with a delegation.
7767 */
7768 open_error = error = nfs4_open(anp, newnofp, accessMode, denyMode, ctx);
7769 if (!error) {
7770 /* open succeeded, so our open file is no longer temporary */
7771 nofp = newnofp;
7772 nofpbusyerror = 0;
7773 newnofp = NULL;
0a7de745 7774 if (nofpp) {
6d2010ae 7775 *nofpp = nofp;
0a7de745 7776 }
6d2010ae
A
7777 }
7778 goto nfsmout;
7779 }
7780 }
7781
7782 /*
7783 * We either don't have the attrdir or we didn't find the attribute
7784 * in the name cache, so we need to talk to the server.
7785 *
7786 * If we don't have the attrdir, we'll need to ask the server for that too.
7787 * If the caller is requesting that the attribute be created, we need to
7788 * make sure the attrdir is created.
7789 * The caller may also request that the first block of an existing attribute
7790 * be retrieved at the same time.
7791 */
7792
7793 if (open) {
7794 /* need to mark the open owner busy during the RPC */
0a7de745 7795 if ((error = nfs_open_owner_set_busy(noop, thd))) {
6d2010ae 7796 goto nfsmout;
0a7de745 7797 }
6d2010ae
A
7798 noopbusy = 1;
7799 }
7800
7801 /*
7802 * We'd like to get updated post-open/lookup attributes for the
7803 * directory and we may also want to prefetch some data via READ.
7804 * We'd like the READ results to be last so that we can leave the
7805 * data in the mbufs until the end.
7806 *
7807 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7808 */
7809 numops = 5;
0a7de745
A
7810 if (!hadattrdir) {
7811 numops += 3; // also sending: OPENATTR, GETATTR, OPENATTR
7812 }
7813 if (prefetch) {
7814 numops += 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7815 }
6d2010ae 7816 nfsm_chain_build_alloc_init(error, &nmreq, 64 * NFSX_UNSIGNED + cnp->cn_namelen);
3e170ce0 7817 nfsm_chain_add_compound_header(error, &nmreq, "getnamedattr", nmp->nm_minor_vers, numops);
6d2010ae
A
7818 if (hadattrdir) {
7819 numops--;
7820 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7821 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7822 } else {
7823 numops--;
7824 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7825 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7826 numops--;
7827 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7828 nfsm_chain_add_32(error, &nmreq, create ? 1 : 0);
7829 numops--;
7830 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7831 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7832 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7833 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
0a7de745 7834 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6d2010ae
A
7835 }
7836 if (open) {
7837 numops--;
7838 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
7839 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
7840 nfsm_chain_add_32(error, &nmreq, accessMode);
7841 nfsm_chain_add_32(error, &nmreq, denyMode);
7842 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
7843 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
7844 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
7845 nfsm_chain_add_32(error, &nmreq, create);
7846 if (create) {
7847 nfsm_chain_add_32(error, &nmreq, guarded);
7848 VATTR_INIT(&vattr);
0a7de745 7849 if (truncate) {
6d2010ae 7850 VATTR_SET(&vattr, va_data_size, 0);
0a7de745 7851 }
6d2010ae
A
7852 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7853 }
7854 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
7855 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7856 } else {
7857 numops--;
7858 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
7859 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
2d21ac55 7860 }
2d21ac55
A
7861 numops--;
7862 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7863 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7864 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7865 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
0a7de745 7866 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6d2010ae
A
7867 if (prefetch) {
7868 numops--;
7869 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7870 }
7871 if (hadattrdir) {
7872 numops--;
7873 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7874 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7875 } else {
7876 numops--;
7877 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7878 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7879 numops--;
7880 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7881 nfsm_chain_add_32(error, &nmreq, 0);
7882 }
2d21ac55
A
7883 numops--;
7884 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7885 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
0a7de745 7886 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6d2010ae
A
7887 if (prefetch) {
7888 numops--;
7889 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7890 numops--;
7891 nfsm_chain_add_32(error, &nmreq, NFS_OP_NVERIFY);
7892 VATTR_INIT(&vattr);
7893 VATTR_SET(&vattr, va_data_size, 0);
7894 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7895 numops--;
7896 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
7897 nfsm_chain_add_stateid(error, &nmreq, &stateid);
7898 nfsm_chain_add_64(error, &nmreq, 0);
7899 nfsm_chain_add_32(error, &nmreq, rlen);
7900 }
2d21ac55
A
7901 nfsm_chain_build_done(error, &nmreq);
7902 nfsm_assert(error, (numops == 0), EPROTO);
7903 nfsmout_if(error);
6d2010ae 7904 error = nfs_request_async(hadattrdir ? adnp : np, NULL, &nmreq, NFSPROC4_COMPOUND,
0a7de745
A
7905 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, open ? R_NOINTR: 0, NULL, &req);
7906 if (!error) {
2d21ac55 7907 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
0a7de745 7908 }
2d21ac55 7909
0a7de745 7910 if (hadattrdir && ((adlockerror = nfs_node_lock(adnp)))) {
6d2010ae 7911 error = adlockerror;
0a7de745 7912 }
6d2010ae 7913 savedxid = xid;
2d21ac55
A
7914 nfsm_chain_skip_tag(error, &nmrep);
7915 nfsm_chain_get_32(error, &nmrep, numops);
7916 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6d2010ae
A
7917 if (!hadattrdir) {
7918 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7919 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7920 nfsmout_if(error);
7921 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7922 nfsmout_if(error);
7923 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) && fh.fh_len) {
7924 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
7925 /* (re)allocate attrdir fh buffer */
0a7de745 7926 if (np->n_attrdirfh) {
6d2010ae 7927 FREE(np->n_attrdirfh, M_TEMP);
0a7de745
A
7928 }
7929 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len + 1, M_TEMP, M_WAITOK);
6d2010ae
A
7930 }
7931 if (np->n_attrdirfh) {
7932 /* remember the attrdir fh in the node */
7933 *np->n_attrdirfh = fh.fh_len;
0a7de745 7934 bcopy(fh.fh_data, np->n_attrdirfh + 1, fh.fh_len);
6d2010ae
A
7935 /* create busied node for attrdir */
7936 struct componentname cn;
7937 bzero(&cn, sizeof(cn));
7938 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
7939 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
7940 cn.cn_nameiop = LOOKUP;
7941 // XXX can't set parent correctly (to np) yet
7942 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
7943 if (!error) {
7944 adlockerror = 0;
7945 /* set the node busy */
7946 SET(adnp->n_flag, NBUSY);
7947 adbusyerror = 0;
7948 }
7949 /* if no adnp, oh well... */
7950 error = 0;
7951 }
7952 }
7953 NVATTR_CLEANUP(&nvattr);
7954 fh.fh_len = 0;
7955 }
7956 if (open) {
7957 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
7958 nfs_owner_seqid_increment(noop, NULL, error);
7959 nfsm_chain_get_stateid(error, &nmrep, &newnofp->nof_stateid);
7960 nfsm_chain_check_change_info(error, &nmrep, adnp);
7961 nfsm_chain_get_32(error, &nmrep, rflags);
7962 bmlen = NFS_ATTR_BITMAP_LEN;
7963 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7964 nfsm_chain_get_32(error, &nmrep, delegation);
0a7de745 7965 if (!error) {
6d2010ae
A
7966 switch (delegation) {
7967 case NFS_OPEN_DELEGATE_NONE:
7968 break;
7969 case NFS_OPEN_DELEGATE_READ:
7970 case NFS_OPEN_DELEGATE_WRITE:
7971 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
7972 nfsm_chain_get_32(error, &nmrep, recall);
0a7de745 7973 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
6d2010ae 7974 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
0a7de745 7975 }
6d2010ae
A
7976 /* if we have any trouble accepting the ACE, just invalidate it */
7977 ace_type = ace_flags = ace_mask = len = 0;
7978 nfsm_chain_get_32(error, &nmrep, ace_type);
7979 nfsm_chain_get_32(error, &nmrep, ace_flags);
7980 nfsm_chain_get_32(error, &nmrep, ace_mask);
7981 nfsm_chain_get_32(error, &nmrep, len);
7982 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
7983 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
7984 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
7985 if (!error && (len >= slen)) {
0a7de745
A
7986 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
7987 if (s) {
7988 slen = len + 1;
7989 } else {
6d2010ae 7990 ace.ace_flags = 0;
0a7de745 7991 }
6d2010ae 7992 }
0a7de745 7993 if (s) {
6d2010ae 7994 nfsm_chain_get_opaque(error, &nmrep, len, s);
0a7de745 7995 } else {
6d2010ae 7996 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
0a7de745 7997 }
6d2010ae
A
7998 if (!error && s) {
7999 s[len] = '\0';
0a7de745 8000 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
6d2010ae 8001 ace.ace_flags = 0;
0a7de745 8002 }
6d2010ae 8003 }
0a7de745 8004 if (error || !s) {
6d2010ae 8005 ace.ace_flags = 0;
0a7de745
A
8006 }
8007 if (s && (s != sbuf)) {
6d2010ae 8008 FREE(s, M_TEMP);
0a7de745 8009 }
6d2010ae
A
8010 break;
8011 default:
8012 error = EBADRPC;
8013 break;
8014 }
0a7de745 8015 }
6d2010ae
A
8016 /* At this point if we have no error, the object was created/opened. */
8017 open_error = error;
8018 } else {
8019 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOOKUP);
8020 }
2d21ac55
A
8021 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8022 nfsmout_if(error);
6d2010ae 8023 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
2d21ac55 8024 nfsmout_if(error);
6d2010ae
A
8025 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
8026 error = EIO;
2d21ac55
A
8027 goto nfsmout;
8028 }
0a7de745 8029 if (prefetch) {
6d2010ae 8030 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
0a7de745 8031 }
6d2010ae 8032 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
0a7de745 8033 if (!hadattrdir) {
6d2010ae 8034 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
0a7de745 8035 }
2d21ac55 8036 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae
A
8037 nfsmout_if(error);
8038 xid = savedxid;
8039 nfsm_chain_loadattr(error, &nmrep, adnp, nmp->nm_vers, &xid);
8040 nfsmout_if(error);
2d21ac55 8041
6d2010ae 8042 if (open) {
0a7de745 8043 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
6d2010ae 8044 newnofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 8045 }
6d2010ae
A
8046 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
8047 if (adnp) {
8048 nfs_node_unlock(adnp);
8049 adlockerror = ENOENT;
8050 }
8051 NVATTR_CLEANUP(&nvattr);
8052 error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh.fh_data, fh.fh_len, noop, &newnofp->nof_stateid, thd, cred, &nvattr, &xid);
8053 nfsmout_if(error);
8054 savedxid = xid;
0a7de745 8055 if ((adlockerror = nfs_node_lock(adnp))) {
6d2010ae 8056 error = adlockerror;
0a7de745 8057 }
2d21ac55 8058 }
2d21ac55
A
8059 }
8060
6d2010ae
A
8061nfsmout:
8062 if (open && adnp && !adlockerror) {
8063 if (!open_error && (adnp->n_flag & NNEGNCENTRIES)) {
8064 adnp->n_flag &= ~NNEGNCENTRIES;
8065 cache_purge_negatives(NFSTOV(adnp));
8066 }
8067 adnp->n_flag |= NMODIFIED;
8068 nfs_node_unlock(adnp);
8069 adlockerror = ENOENT;
8070 nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
8071 }
8072 if (adnp && !adlockerror && (error == ENOENT) &&
8073 (cnp->cn_flags & MAKEENTRY) && (cnp->cn_nameiop != CREATE) && negnamecache) {
8074 /* add a negative entry in the name cache */
8075 cache_enter(NFSTOV(adnp), NULL, cnp);
8076 adnp->n_flag |= NNEGNCENTRIES;
8077 }
8078 if (adnp && !adlockerror) {
8079 nfs_node_unlock(adnp);
8080 adlockerror = ENOENT;
8081 }
8082 if (!error && !anp && fh.fh_len) {
2d21ac55
A
8083 /* create the vnode with the filehandle and attributes */
8084 xid = savedxid;
6d2010ae
A
8085 error = nfs_nget(NFSTOMP(np), adnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &anp);
8086 if (!error) {
8087 *anpp = anp;
8088 nfs_node_unlock(anp);
8089 }
8090 if (!error && open) {
8091 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
8092 /* After we have a node, add our open file struct to the node */
8093 nofp = newnofp;
8094 error = nfs_open_file_find_internal(anp, noop, &nofp, 0, 0, 0);
8095 if (error) {
8096 /* This shouldn't happen, because we passed in a new nofp to use. */
8097 printf("nfs_open_file_find_internal failed! %d\n", error);
8098 nofp = NULL;
8099 } else if (nofp != newnofp) {
8100 /*
8101 * Hmm... an open file struct already exists.
8102 * Mark the existing one busy and merge our open into it.
8103 * Then destroy the one we created.
8104 * Note: there's no chance of an open confict because the
8105 * open has already been granted.
8106 */
8107 nofpbusyerror = nfs_open_file_set_busy(nofp, NULL);
8108 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
8109 nofp->nof_stateid = newnofp->nof_stateid;
0a7de745 8110 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
6d2010ae 8111 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
0a7de745 8112 }
6d2010ae
A
8113 nfs_open_file_clear_busy(newnofp);
8114 nfs_open_file_destroy(newnofp);
8115 newnofp = NULL;
8116 }
8117 if (!error) {
8118 newnofp = NULL;
8119 nofpbusyerror = 0;
8120 /* mark the node as holding a create-initiated open */
8121 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
8122 nofp->nof_creator = current_thread();
0a7de745 8123 if (nofpp) {
6d2010ae 8124 *nofpp = nofp;
0a7de745 8125 }
6d2010ae
A
8126 }
8127 }
2d21ac55 8128 }
6d2010ae
A
8129 NVATTR_CLEANUP(&nvattr);
8130 if (open && ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE))) {
8131 if (!error && anp && !recall) {
8132 /* stuff the delegation state in the node */
8133 lck_mtx_lock(&anp->n_openlock);
8134 anp->n_openflags &= ~N_DELEG_MASK;
8135 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8136 anp->n_dstateid = dstateid;
8137 anp->n_dace = ace;
8138 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8139 lck_mtx_lock(&nmp->nm_lock);
0a7de745 8140 if (anp->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 8141 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
0a7de745 8142 }
6d2010ae
A
8143 lck_mtx_unlock(&nmp->nm_lock);
8144 }
8145 lck_mtx_unlock(&anp->n_openlock);
8146 } else {
8147 /* give the delegation back */
8148 if (anp) {
8149 if (NFS_CMPFH(anp, fh.fh_data, fh.fh_len)) {
8150 /* update delegation state and return it */
8151 lck_mtx_lock(&anp->n_openlock);
8152 anp->n_openflags &= ~N_DELEG_MASK;
8153 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8154 anp->n_dstateid = dstateid;
8155 anp->n_dace = ace;
8156 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8157 lck_mtx_lock(&nmp->nm_lock);
0a7de745 8158 if (anp->n_dlink.tqe_next == NFSNOLIST) {
6d2010ae 8159 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
0a7de745 8160 }
6d2010ae
A
8161 lck_mtx_unlock(&nmp->nm_lock);
8162 }
8163 lck_mtx_unlock(&anp->n_openlock);
8164 /* don't need to send a separate delegreturn for fh */
8165 fh.fh_len = 0;
8166 }
8167 /* return anp's current delegation */
8168 nfs4_delegation_return(anp, 0, thd, cred);
8169 }
0a7de745 8170 if (fh.fh_len) { /* return fh's delegation if it wasn't for anp */
6d2010ae 8171 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
0a7de745 8172 }
6d2010ae
A
8173 }
8174 }
8175 if (open) {
8176 if (newnofp) {
8177 /* need to cleanup our temporary nofp */
8178 nfs_open_file_clear_busy(newnofp);
8179 nfs_open_file_destroy(newnofp);
8180 newnofp = NULL;
8181 } else if (nofp && !nofpbusyerror) {
8182 nfs_open_file_clear_busy(nofp);
8183 nofpbusyerror = ENOENT;
8184 }
8185 if (inuse && nfs_mount_state_in_use_end(nmp, error)) {
8186 inuse = 0;
8187 nofp = newnofp = NULL;
8188 rflags = delegation = recall = eof = rlen = retlen = 0;
8189 ace.ace_flags = 0;
8190 s = sbuf;
8191 slen = sizeof(sbuf);
8192 nfsm_chain_cleanup(&nmreq);
8193 nfsm_chain_cleanup(&nmrep);
8194 if (anp) {
8195 vnode_put(NFSTOV(anp));
8196 *anpp = anp = NULL;
8197 }
8198 hadattrdir = (adnp != NULL);
8199 if (noopbusy) {
8200 nfs_open_owner_clear_busy(noop);
8201 noopbusy = 0;
8202 }
8203 goto restart;
8204 }
8205 if (noop) {
8206 if (noopbusy) {
8207 nfs_open_owner_clear_busy(noop);
8208 noopbusy = 0;
8209 }
8210 nfs_open_owner_rele(noop);
8211 }
8212 }
8213 if (!error && prefetch && nmrep.nmc_mhead) {
8214 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
8215 nfsm_chain_op_check(error, &nmrep, NFS_OP_NVERIFY);
8216 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
8217 nfsm_chain_get_32(error, &nmrep, eof);
8218 nfsm_chain_get_32(error, &nmrep, retlen);
8219 if (!error && anp) {
8220 /*
8221 * There can be one problem with doing the prefetch.
8222 * Because we don't have the node before we start the RPC, we
8223 * can't have the buffer busy while the READ is performed.
8224 * So there is a chance that other I/O occured on the same
8225 * range of data while we were performing this RPC. If that
8226 * happens, then it's possible the data we have in the READ
8227 * response is no longer up to date.
8228 * Once we have the node and the buffer, we need to make sure
8229 * that there's no chance we could be putting stale data in
8230 * the buffer.
8231 * So, we check if the range read is dirty or if any I/O may
8232 * have occured on it while we were performing our RPC.
8233 */
8234 struct nfsbuf *bp = NULL;
8235 int lastpg;
8236 uint32_t pagemask;
8237
8238 retlen = MIN(retlen, rlen);
8239
8240 /* check if node needs size update or invalidation */
0a7de745 8241 if (ISSET(anp->n_flag, NUPDATESIZE)) {
6d2010ae 8242 nfs_data_update_size(anp, 0);
0a7de745 8243 }
6d2010ae
A
8244 if (!(error = nfs_node_lock(anp))) {
8245 if (anp->n_flag & NNEEDINVALIDATE) {
8246 anp->n_flag &= ~NNEEDINVALIDATE;
8247 nfs_node_unlock(anp);
0a7de745
A
8248 error = nfs_vinvalbuf(NFSTOV(anp), V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
8249 if (!error) { /* lets play it safe and just drop the data */
6d2010ae 8250 error = EIO;
0a7de745 8251 }
6d2010ae
A
8252 } else {
8253 nfs_node_unlock(anp);
8254 }
8255 }
2d21ac55 8256
6d2010ae
A
8257 /* calculate page mask for the range of data read */
8258 lastpg = (trunc_page_32(retlen) - 1) / PAGE_SIZE;
8259 pagemask = ((1 << (lastpg + 1)) - 1);
8260
0a7de745
A
8261 if (!error) {
8262 error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ | NBLK_NOWAIT, &bp);
8263 }
6d2010ae
A
8264 /* don't save the data if dirty or potential I/O conflict */
8265 if (!error && bp && !bp->nb_dirtyoff && !(bp->nb_dirty & pagemask) &&
8266 timevalcmp(&anp->n_lastio, &now, <)) {
316670eb 8267 OSAddAtomic64(1, &nfsstats.read_bios);
0a7de745 8268 CLR(bp->nb_flags, (NB_DONE | NB_ASYNC));
6d2010ae
A
8269 SET(bp->nb_flags, NB_READ);
8270 NFS_BUF_MAP(bp);
8271 nfsm_chain_get_opaque(error, &nmrep, retlen, bp->nb_data);
8272 if (error) {
8273 bp->nb_error = error;
8274 SET(bp->nb_flags, NB_ERROR);
8275 } else {
8276 bp->nb_offio = 0;
8277 bp->nb_endio = rlen;
0a7de745 8278 if ((retlen > 0) && (bp->nb_endio < (int)retlen)) {
6d2010ae 8279 bp->nb_endio = retlen;
0a7de745 8280 }
6d2010ae
A
8281 if (eof || (retlen == 0)) {
8282 /* zero out the remaining data (up to EOF) */
8283 off_t rpcrem, eofrem, rem;
8284 rpcrem = (rlen - retlen);
8285 eofrem = anp->n_size - (NBOFF(bp) + retlen);
8286 rem = (rpcrem < eofrem) ? rpcrem : eofrem;
0a7de745 8287 if (rem > 0) {
6d2010ae 8288 bzero(bp->nb_data + retlen, rem);
0a7de745 8289 }
6d2010ae
A
8290 } else if ((retlen < rlen) && !ISSET(bp->nb_flags, NB_ERROR)) {
8291 /* ugh... short read ... just invalidate for now... */
8292 SET(bp->nb_flags, NB_INVAL);
8293 }
8294 }
8295 nfs_buf_read_finish(bp);
8296 microuptime(&anp->n_lastio);
8297 }
0a7de745 8298 if (bp) {
6d2010ae 8299 nfs_buf_release(bp, 1);
0a7de745 8300 }
2d21ac55 8301 }
6d2010ae 8302 error = 0; /* ignore any transient error in processing the prefetch */
2d21ac55 8303 }
6d2010ae
A
8304 if (adnp && !adbusyerror) {
8305 nfs_node_clear_busy(adnp);
8306 adbusyerror = ENOENT;
8307 }
8308 if (!busyerror) {
8309 nfs_node_clear_busy(np);
8310 busyerror = ENOENT;
8311 }
0a7de745 8312 if (adnp) {
6d2010ae 8313 vnode_put(NFSTOV(adnp));
0a7de745 8314 }
6d2010ae
A
8315 if (error && *anpp) {
8316 vnode_put(NFSTOV(*anpp));
8317 *anpp = NULL;
8318 }
8319 nfsm_chain_cleanup(&nmreq);
8320 nfsm_chain_cleanup(&nmrep);
0a7de745 8321 return error;
6d2010ae
A
8322}
8323
8324/*
8325 * Remove a named attribute.
8326 */
8327int
8328nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_context_t ctx)
8329{
8330 nfsnode_t adnp = NULL;
8331 struct nfsmount *nmp;
8332 struct componentname cn;
8333 struct vnop_remove_args vra;
8334 int error, putanp = 0;
8335
8336 nmp = NFSTONMP(np);
0a7de745
A
8337 if (nfs_mount_gone(nmp)) {
8338 return ENXIO;
8339 }
6d2010ae
A
8340
8341 bzero(&cn, sizeof(cn));
8342 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
8343 cn.cn_namelen = strlen(name);
8344 cn.cn_nameiop = DELETE;
8345 cn.cn_flags = 0;
8346
8347 if (!anp) {
8348 error = nfs4_named_attr_get(np, &cn, NFS_OPEN_SHARE_ACCESS_NONE,
0a7de745
A
8349 0, ctx, &anp, NULL);
8350 if ((!error && !anp) || (error == ENOATTR)) {
6d2010ae 8351 error = ENOENT;
0a7de745 8352 }
6d2010ae
A
8353 if (error) {
8354 if (anp) {
8355 vnode_put(NFSTOV(anp));
8356 anp = NULL;
8357 }
8358 goto out;
2d21ac55 8359 }
6d2010ae
A
8360 putanp = 1;
8361 }
8362
0a7de745 8363 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
6d2010ae 8364 goto out;
0a7de745 8365 }
6d2010ae
A
8366 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8367 nfs_node_clear_busy(np);
8368 if (!adnp) {
8369 error = ENOENT;
8370 goto out;
2d21ac55 8371 }
6d2010ae
A
8372
8373 vra.a_desc = &vnop_remove_desc;
8374 vra.a_dvp = NFSTOV(adnp);
8375 vra.a_vp = NFSTOV(anp);
8376 vra.a_cnp = &cn;
8377 vra.a_flags = 0;
8378 vra.a_context = ctx;
8379 error = nfs_vnop_remove(&vra);
8380out:
0a7de745 8381 if (adnp) {
6d2010ae 8382 vnode_put(NFSTOV(adnp));
0a7de745
A
8383 }
8384 if (putanp) {
6d2010ae 8385 vnode_put(NFSTOV(anp));
0a7de745
A
8386 }
8387 return error;
2d21ac55
A
8388}
8389
8390int
6d2010ae
A
8391nfs4_vnop_getxattr(
8392 struct vnop_getxattr_args /* {
0a7de745
A
8393 * struct vnodeop_desc *a_desc;
8394 * vnode_t a_vp;
8395 * const char * a_name;
8396 * uio_t a_uio;
8397 * size_t *a_size;
8398 * int a_options;
8399 * vfs_context_t a_context;
8400 * } */*ap)
2d21ac55 8401{
6d2010ae 8402 vfs_context_t ctx = ap->a_context;
2d21ac55 8403 struct nfsmount *nmp;
6d2010ae
A
8404 struct nfs_vattr nvattr;
8405 struct componentname cn;
8406 nfsnode_t anp;
8407 int error = 0, isrsrcfork;
2d21ac55 8408
6d2010ae 8409 nmp = VTONMP(ap->a_vp);
0a7de745
A
8410 if (nfs_mount_gone(nmp)) {
8411 return ENXIO;
8412 }
2d21ac55 8413
0a7de745
A
8414 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8415 return ENOTSUP;
8416 }
6d2010ae 8417 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
0a7de745
A
8418 if (error) {
8419 return error;
8420 }
6d2010ae 8421 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
0a7de745
A
8422 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8423 return ENOATTR;
8424 }
6d2010ae
A
8425
8426 bzero(&cn, sizeof(cn));
8427 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8428 cn.cn_namelen = strlen(ap->a_name);
8429 cn.cn_nameiop = LOOKUP;
8430 cn.cn_flags = MAKEENTRY;
8431
8432 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
8433 isrsrcfork = (bcmp(ap->a_name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
8434
8435 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
0a7de745
A
8436 !isrsrcfork ? NFS_GET_NAMED_ATTR_PREFETCH : 0, ctx, &anp, NULL);
8437 if ((!error && !anp) || (error == ENOENT)) {
6d2010ae 8438 error = ENOATTR;
0a7de745 8439 }
6d2010ae 8440 if (!error) {
0a7de745 8441 if (ap->a_uio) {
6d2010ae 8442 error = nfs_bioread(anp, ap->a_uio, 0, ctx);
0a7de745 8443 } else {
6d2010ae 8444 *ap->a_size = anp->n_size;
0a7de745 8445 }
2d21ac55 8446 }
0a7de745 8447 if (anp) {
6d2010ae 8448 vnode_put(NFSTOV(anp));
0a7de745
A
8449 }
8450 return error;
6d2010ae 8451}
2d21ac55 8452
6d2010ae
A
8453int
8454nfs4_vnop_setxattr(
8455 struct vnop_setxattr_args /* {
0a7de745
A
8456 * struct vnodeop_desc *a_desc;
8457 * vnode_t a_vp;
8458 * const char * a_name;
8459 * uio_t a_uio;
8460 * int a_options;
8461 * vfs_context_t a_context;
8462 * } */*ap)
6d2010ae
A
8463{
8464 vfs_context_t ctx = ap->a_context;
8465 int options = ap->a_options;
8466 uio_t uio = ap->a_uio;
8467 const char *name = ap->a_name;
8468 struct nfsmount *nmp;
8469 struct componentname cn;
8470 nfsnode_t anp = NULL;
8471 int error = 0, closeerror = 0, flags, isrsrcfork, isfinderinfo, empty = 0, i;
8472#define FINDERINFOSIZE 32
8473 uint8_t finfo[FINDERINFOSIZE];
8474 uint32_t *finfop;
8475 struct nfs_open_file *nofp = NULL;
0a7de745 8476 char uio_buf[UIO_SIZEOF(1)];
6d2010ae
A
8477 uio_t auio;
8478 struct vnop_write_args vwa;
8479
8480 nmp = VTONMP(ap->a_vp);
0a7de745
A
8481 if (nfs_mount_gone(nmp)) {
8482 return ENXIO;
8483 }
6d2010ae 8484
0a7de745
A
8485 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8486 return ENOTSUP;
8487 }
6d2010ae 8488
0a7de745
A
8489 if ((options & XATTR_CREATE) && (options & XATTR_REPLACE)) {
8490 return EINVAL;
8491 }
6d2010ae
A
8492
8493 /* XXX limitation based on need to back up uio on short write */
8494 if (uio_iovcnt(uio) > 1) {
8495 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
0a7de745 8496 return EINVAL;
6d2010ae
A
8497 }
8498
8499 bzero(&cn, sizeof(cn));
8500 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
8501 cn.cn_namelen = strlen(name);
8502 cn.cn_nameiop = CREATE;
8503 cn.cn_flags = MAKEENTRY;
8504
8505 isfinderinfo = (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0);
8506 isrsrcfork = isfinderinfo ? 0 : (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
0a7de745 8507 if (!isrsrcfork) {
6d2010ae 8508 uio_setoffset(uio, 0);
0a7de745 8509 }
6d2010ae 8510 if (isfinderinfo) {
0a7de745
A
8511 if (uio_resid(uio) != sizeof(finfo)) {
8512 return ERANGE;
8513 }
6d2010ae 8514 error = uiomove((char*)&finfo, sizeof(finfo), uio);
0a7de745
A
8515 if (error) {
8516 return error;
8517 }
6d2010ae
A
8518 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
8519 empty = 1;
0a7de745 8520 for (i = 0, finfop = (uint32_t*)&finfo; i < (int)(sizeof(finfo) / sizeof(uint32_t)); i++) {
6d2010ae
A
8521 if (finfop[i]) {
8522 empty = 0;
8523 break;
8524 }
0a7de745
A
8525 }
8526 if (empty && !(options & (XATTR_CREATE | XATTR_REPLACE))) {
6d2010ae 8527 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
0a7de745 8528 if (error == ENOENT) {
6d2010ae 8529 error = 0;
0a7de745
A
8530 }
8531 return error;
6d2010ae
A
8532 }
8533 /* first, let's see if we get a create/replace error */
8534 }
8535
8536 /*
8537 * create/open the xattr
8538 *
8539 * We need to make sure not to create it if XATTR_REPLACE.
8540 * For all xattrs except the resource fork, we also want to
8541 * truncate the xattr to remove any current data. We'll do
8542 * that by setting the size to 0 on create/open.
8543 */
8544 flags = 0;
0a7de745 8545 if (!(options & XATTR_REPLACE)) {
6d2010ae 8546 flags |= NFS_GET_NAMED_ATTR_CREATE;
0a7de745
A
8547 }
8548 if (options & XATTR_CREATE) {
6d2010ae 8549 flags |= NFS_GET_NAMED_ATTR_CREATE_GUARDED;
0a7de745
A
8550 }
8551 if (!isrsrcfork) {
6d2010ae 8552 flags |= NFS_GET_NAMED_ATTR_TRUNCATE;
0a7de745 8553 }
6d2010ae
A
8554
8555 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
0a7de745
A
8556 flags, ctx, &anp, &nofp);
8557 if (!error && !anp) {
6d2010ae 8558 error = ENOATTR;
0a7de745
A
8559 }
8560 if (error) {
6d2010ae 8561 goto out;
0a7de745 8562 }
6d2010ae
A
8563 /* grab the open state from the get/create/open */
8564 if (nofp && !(error = nfs_open_file_set_busy(nofp, NULL))) {
8565 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
8566 nofp->nof_creator = NULL;
8567 nfs_open_file_clear_busy(nofp);
8568 }
8569
8570 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
0a7de745 8571 if (isfinderinfo && empty) {
6d2010ae 8572 goto doclose;
0a7de745 8573 }
6d2010ae
A
8574
8575 /*
8576 * Write the data out and flush.
8577 *
8578 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
8579 */
8580 vwa.a_desc = &vnop_write_desc;
8581 vwa.a_vp = NFSTOV(anp);
8582 vwa.a_uio = NULL;
8583 vwa.a_ioflag = 0;
8584 vwa.a_context = ctx;
8585 if (isfinderinfo) {
8586 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, &uio_buf, sizeof(uio_buf));
8587 uio_addiov(auio, (uintptr_t)&finfo, sizeof(finfo));
8588 vwa.a_uio = auio;
8589 } else if (uio_resid(uio) > 0) {
8590 vwa.a_uio = uio;
8591 }
8592 if (vwa.a_uio) {
8593 error = nfs_vnop_write(&vwa);
0a7de745 8594 if (!error) {
6d2010ae 8595 error = nfs_flush(anp, MNT_WAIT, vfs_context_thread(ctx), 0);
0a7de745 8596 }
6d2010ae
A
8597 }
8598doclose:
8599 /* Close the xattr. */
8600 if (nofp) {
8601 int busyerror = nfs_open_file_set_busy(nofp, NULL);
8602 closeerror = nfs_close(anp, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
0a7de745 8603 if (!busyerror) {
6d2010ae 8604 nfs_open_file_clear_busy(nofp);
0a7de745 8605 }
6d2010ae 8606 }
0a7de745 8607 if (!error && isfinderinfo && empty) { /* Setting an empty FinderInfo really means remove it */
6d2010ae 8608 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
0a7de745 8609 if (error == ENOENT) {
6d2010ae 8610 error = 0;
0a7de745 8611 }
6d2010ae 8612 }
0a7de745 8613 if (!error) {
6d2010ae 8614 error = closeerror;
0a7de745 8615 }
6d2010ae 8616out:
0a7de745 8617 if (anp) {
6d2010ae 8618 vnode_put(NFSTOV(anp));
0a7de745
A
8619 }
8620 if (error == ENOENT) {
6d2010ae 8621 error = ENOATTR;
0a7de745
A
8622 }
8623 return error;
2d21ac55
A
8624}
8625
8626int
6d2010ae
A
8627nfs4_vnop_removexattr(
8628 struct vnop_removexattr_args /* {
0a7de745
A
8629 * struct vnodeop_desc *a_desc;
8630 * vnode_t a_vp;
8631 * const char * a_name;
8632 * int a_options;
8633 * vfs_context_t a_context;
8634 * } */*ap)
2d21ac55 8635{
6d2010ae 8636 struct nfsmount *nmp = VTONMP(ap->a_vp);
2d21ac55
A
8637 int error;
8638
0a7de745
A
8639 if (nfs_mount_gone(nmp)) {
8640 return ENXIO;
8641 }
8642 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8643 return ENOTSUP;
8644 }
6d2010ae
A
8645
8646 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), NULL, ap->a_name, ap->a_context);
0a7de745 8647 if (error == ENOENT) {
6d2010ae 8648 error = ENOATTR;
0a7de745
A
8649 }
8650 return error;
2d21ac55
A
8651}
8652
8653int
6d2010ae
A
8654nfs4_vnop_listxattr(
8655 struct vnop_listxattr_args /* {
0a7de745
A
8656 * struct vnodeop_desc *a_desc;
8657 * vnode_t a_vp;
8658 * uio_t a_uio;
8659 * size_t *a_size;
8660 * int a_options;
8661 * vfs_context_t a_context;
8662 * } */*ap)
2d21ac55 8663{
6d2010ae
A
8664 vfs_context_t ctx = ap->a_context;
8665 nfsnode_t np = VTONFS(ap->a_vp);
8666 uio_t uio = ap->a_uio;
8667 nfsnode_t adnp = NULL;
8668 struct nfsmount *nmp;
8669 int error, done, i;
8670 struct nfs_vattr nvattr;
8671 uint64_t cookie, nextcookie, lbn = 0;
8672 struct nfsbuf *bp = NULL;
8673 struct nfs_dir_buf_header *ndbhp;
8674 struct direntry *dp;
2d21ac55 8675
6d2010ae 8676 nmp = VTONMP(ap->a_vp);
0a7de745
A
8677 if (nfs_mount_gone(nmp)) {
8678 return ENXIO;
8679 }
6d2010ae 8680
0a7de745
A
8681 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8682 return ENOTSUP;
8683 }
6d2010ae
A
8684
8685 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
0a7de745
A
8686 if (error) {
8687 return error;
8688 }
6d2010ae 8689 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
0a7de745
A
8690 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8691 return 0;
8692 }
6d2010ae 8693
0a7de745
A
8694 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
8695 return error;
8696 }
6d2010ae
A
8697 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8698 nfs_node_clear_busy(np);
0a7de745 8699 if (!adnp) {
6d2010ae 8700 goto out;
0a7de745 8701 }
6d2010ae 8702
0a7de745 8703 if ((error = nfs_node_lock(adnp))) {
6d2010ae 8704 goto out;
0a7de745 8705 }
6d2010ae
A
8706
8707 if (adnp->n_flag & NNEEDINVALIDATE) {
8708 adnp->n_flag &= ~NNEEDINVALIDATE;
8709 nfs_invaldir(adnp);
8710 nfs_node_unlock(adnp);
8711 error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
0a7de745 8712 if (!error) {
6d2010ae 8713 error = nfs_node_lock(adnp);
0a7de745
A
8714 }
8715 if (error) {
6d2010ae 8716 goto out;
0a7de745 8717 }
6d2010ae
A
8718 }
8719
8720 /*
8721 * check for need to invalidate when (re)starting at beginning
8722 */
8723 if (adnp->n_flag & NMODIFIED) {
8724 nfs_invaldir(adnp);
8725 nfs_node_unlock(adnp);
0a7de745 8726 if ((error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1))) {
6d2010ae 8727 goto out;
0a7de745 8728 }
6d2010ae
A
8729 } else {
8730 nfs_node_unlock(adnp);
8731 }
8732 /* nfs_getattr() will check changed and purge caches */
0a7de745 8733 if ((error = nfs_getattr(adnp, &nvattr, ctx, NGA_UNCACHED))) {
6d2010ae 8734 goto out;
0a7de745 8735 }
6d2010ae 8736
0a7de745 8737 if (uio && (uio_resid(uio) == 0)) {
6d2010ae 8738 goto out;
0a7de745 8739 }
6d2010ae
A
8740
8741 done = 0;
8742 nextcookie = lbn = 0;
8743
8744 while (!error && !done) {
316670eb 8745 OSAddAtomic64(1, &nfsstats.biocache_readdirs);
6d2010ae
A
8746 cookie = nextcookie;
8747getbuffer:
8748 error = nfs_buf_get(adnp, lbn, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
0a7de745 8749 if (error) {
6d2010ae 8750 goto out;
0a7de745 8751 }
6d2010ae
A
8752 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
8753 if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) {
8754 if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */
8755 ndbhp->ndbh_flags = 0;
8756 ndbhp->ndbh_count = 0;
8757 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
8758 ndbhp->ndbh_ncgen = adnp->n_ncgen;
8759 }
8760 error = nfs_buf_readdir(bp, ctx);
0a7de745 8761 if (error == NFSERR_DIRBUFDROPPED) {
6d2010ae 8762 goto getbuffer;
0a7de745
A
8763 }
8764 if (error) {
6d2010ae 8765 nfs_buf_release(bp, 1);
0a7de745 8766 }
6d2010ae
A
8767 if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) {
8768 if (!nfs_node_lock(adnp)) {
8769 nfs_invaldir(adnp);
8770 nfs_node_unlock(adnp);
8771 }
8772 nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
0a7de745 8773 if (error == NFSERR_BAD_COOKIE) {
6d2010ae 8774 error = ENOENT;
0a7de745 8775 }
6d2010ae 8776 }
0a7de745 8777 if (error) {
6d2010ae 8778 goto out;
0a7de745 8779 }
6d2010ae
A
8780 }
8781
8782 /* go through all the entries copying/counting */
8783 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
0a7de745 8784 for (i = 0; i < ndbhp->ndbh_count; i++) {
6d2010ae
A
8785 if (!xattr_protected(dp->d_name)) {
8786 if (uio == NULL) {
8787 *ap->a_size += dp->d_namlen + 1;
8788 } else if (uio_resid(uio) < (dp->d_namlen + 1)) {
8789 error = ERANGE;
8790 } else {
0a7de745
A
8791 error = uiomove(dp->d_name, dp->d_namlen + 1, uio);
8792 if (error && (error != EFAULT)) {
6d2010ae 8793 error = ERANGE;
0a7de745 8794 }
6d2010ae
A
8795 }
8796 }
8797 nextcookie = dp->d_seekoff;
8798 dp = NFS_DIRENTRY_NEXT(dp);
8799 }
8800
8801 if (i == ndbhp->ndbh_count) {
8802 /* hit end of buffer, move to next buffer */
8803 lbn = nextcookie;
8804 /* if we also hit EOF, we're done */
0a7de745 8805 if (ISSET(ndbhp->ndbh_flags, NDB_EOF)) {
6d2010ae 8806 done = 1;
0a7de745 8807 }
6d2010ae
A
8808 }
8809 if (!error && !done && (nextcookie == cookie)) {
8810 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count);
8811 error = EIO;
8812 }
8813 nfs_buf_release(bp, 1);
8814 }
8815out:
0a7de745 8816 if (adnp) {
6d2010ae 8817 vnode_put(NFSTOV(adnp));
0a7de745
A
8818 }
8819 return error;
2d21ac55
A
8820}
8821
6d2010ae 8822#if NAMEDSTREAMS
2d21ac55 8823int
6d2010ae
A
8824nfs4_vnop_getnamedstream(
8825 struct vnop_getnamedstream_args /* {
0a7de745
A
8826 * struct vnodeop_desc *a_desc;
8827 * vnode_t a_vp;
8828 * vnode_t *a_svpp;
8829 * const char *a_name;
8830 * enum nsoperation a_operation;
8831 * int a_flags;
8832 * vfs_context_t a_context;
8833 * } */*ap)
2d21ac55
A
8834{
8835 vfs_context_t ctx = ap->a_context;
2d21ac55 8836 struct nfsmount *nmp;
6d2010ae
A
8837 struct nfs_vattr nvattr;
8838 struct componentname cn;
8839 nfsnode_t anp;
8840 int error = 0;
2d21ac55 8841
6d2010ae 8842 nmp = VTONMP(ap->a_vp);
0a7de745
A
8843 if (nfs_mount_gone(nmp)) {
8844 return ENXIO;
8845 }
2d21ac55 8846
0a7de745
A
8847 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8848 return ENOTSUP;
8849 }
6d2010ae 8850 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
0a7de745
A
8851 if (error) {
8852 return error;
8853 }
6d2010ae 8854 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
0a7de745
A
8855 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8856 return ENOATTR;
8857 }
2d21ac55 8858
6d2010ae
A
8859 bzero(&cn, sizeof(cn));
8860 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8861 cn.cn_namelen = strlen(ap->a_name);
8862 cn.cn_nameiop = LOOKUP;
8863 cn.cn_flags = MAKEENTRY;
8864
8865 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
0a7de745
A
8866 0, ctx, &anp, NULL);
8867 if ((!error && !anp) || (error == ENOENT)) {
6d2010ae 8868 error = ENOATTR;
0a7de745
A
8869 }
8870 if (!error && anp) {
6d2010ae 8871 *ap->a_svpp = NFSTOV(anp);
0a7de745 8872 } else if (anp) {
6d2010ae 8873 vnode_put(NFSTOV(anp));
0a7de745
A
8874 }
8875 return error;
2d21ac55
A
8876}
8877
8878int
6d2010ae
A
8879nfs4_vnop_makenamedstream(
8880 struct vnop_makenamedstream_args /* {
0a7de745
A
8881 * struct vnodeop_desc *a_desc;
8882 * vnode_t *a_svpp;
8883 * vnode_t a_vp;
8884 * const char *a_name;
8885 * int a_flags;
8886 * vfs_context_t a_context;
8887 * } */*ap)
2d21ac55
A
8888{
8889 vfs_context_t ctx = ap->a_context;
6d2010ae
A
8890 struct nfsmount *nmp;
8891 struct componentname cn;
8892 nfsnode_t anp;
2d21ac55 8893 int error = 0;
2d21ac55 8894
6d2010ae 8895 nmp = VTONMP(ap->a_vp);
0a7de745
A
8896 if (nfs_mount_gone(nmp)) {
8897 return ENXIO;
8898 }
2d21ac55 8899
0a7de745
A
8900 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8901 return ENOTSUP;
8902 }
2d21ac55 8903
6d2010ae
A
8904 bzero(&cn, sizeof(cn));
8905 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8906 cn.cn_namelen = strlen(ap->a_name);
8907 cn.cn_nameiop = CREATE;
8908 cn.cn_flags = MAKEENTRY;
8909
8910 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
0a7de745
A
8911 NFS_GET_NAMED_ATTR_CREATE, ctx, &anp, NULL);
8912 if ((!error && !anp) || (error == ENOENT)) {
6d2010ae 8913 error = ENOATTR;
0a7de745
A
8914 }
8915 if (!error && anp) {
6d2010ae 8916 *ap->a_svpp = NFSTOV(anp);
0a7de745 8917 } else if (anp) {
6d2010ae 8918 vnode_put(NFSTOV(anp));
0a7de745
A
8919 }
8920 return error;
6d2010ae 8921}
2d21ac55 8922
6d2010ae
A
8923int
8924nfs4_vnop_removenamedstream(
8925 struct vnop_removenamedstream_args /* {
0a7de745
A
8926 * struct vnodeop_desc *a_desc;
8927 * vnode_t a_vp;
8928 * vnode_t a_svp;
8929 * const char *a_name;
8930 * int a_flags;
8931 * vfs_context_t a_context;
8932 * } */*ap)
6d2010ae
A
8933{
8934 struct nfsmount *nmp = VTONMP(ap->a_vp);
8935 nfsnode_t np = ap->a_vp ? VTONFS(ap->a_vp) : NULL;
8936 nfsnode_t anp = ap->a_svp ? VTONFS(ap->a_svp) : NULL;
2d21ac55 8937
0a7de745
A
8938 if (nfs_mount_gone(nmp)) {
8939 return ENXIO;
8940 }
2d21ac55
A
8941
8942 /*
6d2010ae
A
8943 * Given that a_svp is a named stream, checking for
8944 * named attribute support is kinda pointless.
2d21ac55 8945 */
0a7de745
A
8946 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8947 return ENOTSUP;
8948 }
6d2010ae 8949
0a7de745 8950 return nfs4_named_attr_remove(np, anp, ap->a_name, ap->a_context);
2d21ac55
A
8951}
8952
6d2010ae 8953#endif
cb323159 8954#endif /* CONFIG_NFS4 */