]> git.saurik.com Git - apple/xnu.git/blame - bsd/nfs/nfs4_vnops.c
xnu-1699.24.23.tar.gz
[apple/xnu.git] / bsd / nfs / nfs4_vnops.c
CommitLineData
2d21ac55 1/*
6d2010ae 2 * Copyright (c) 2006-2011 Apple Inc. All rights reserved.
2d21ac55
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * vnode op calls for NFS version 4
31 */
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/systm.h>
35#include <sys/resourcevar.h>
36#include <sys/proc_internal.h>
37#include <sys/kauth.h>
38#include <sys/mount_internal.h>
39#include <sys/malloc.h>
40#include <sys/kpi_mbuf.h>
41#include <sys/conf.h>
42#include <sys/vnode_internal.h>
43#include <sys/dirent.h>
44#include <sys/fcntl.h>
45#include <sys/lockf.h>
46#include <sys/ubc_internal.h>
47#include <sys/attr.h>
48#include <sys/signalvar.h>
6d2010ae
A
49#include <sys/uio_internal.h>
50#include <sys/xattr.h>
51#include <sys/paths.h>
2d21ac55
A
52
53#include <vfs/vfs_support.h>
54
55#include <sys/vm.h>
56
57#include <sys/time.h>
58#include <kern/clock.h>
59#include <libkern/OSAtomic.h>
60
61#include <miscfs/fifofs/fifo.h>
62#include <miscfs/specfs/specdev.h>
63
64#include <nfs/rpcv2.h>
65#include <nfs/nfsproto.h>
66#include <nfs/nfs.h>
67#include <nfs/nfsnode.h>
68#include <nfs/nfs_gss.h>
69#include <nfs/nfsmount.h>
70#include <nfs/nfs_lock.h>
71#include <nfs/xdr_subs.h>
72#include <nfs/nfsm_subs.h>
73
74#include <net/if.h>
75#include <netinet/in.h>
76#include <netinet/in_var.h>
77#include <vm/vm_kern.h>
78
79#include <kern/task.h>
80#include <kern/sched_prim.h>
81
2d21ac55 82int
6d2010ae 83nfs4_access_rpc(nfsnode_t np, u_int32_t *access, vfs_context_t ctx)
2d21ac55 84{
b0d623f7 85 int error = 0, lockerror = ENOENT, status, numops, slot;
2d21ac55
A
86 u_int64_t xid;
87 struct nfsm_chain nmreq, nmrep;
88 struct timeval now;
6d2010ae 89 uint32_t access_result = 0, supported = 0, missing;
2d21ac55
A
90 struct nfsmount *nmp = NFSTONMP(np);
91 int nfsvers = nmp->nm_vers;
92 uid_t uid;
6d2010ae 93 struct nfsreq_secinfo_args si;
2d21ac55 94
6d2010ae
A
95 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
96 return (0);
97
98 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
99 nfsm_chain_null(&nmreq);
100 nfsm_chain_null(&nmrep);
101
b0d623f7
A
102 // PUTFH, ACCESS, GETATTR
103 numops = 3;
2d21ac55
A
104 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED);
105 nfsm_chain_add_compound_header(error, &nmreq, "access", numops);
106 numops--;
107 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
108 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
109 numops--;
110 nfsm_chain_add_32(error, &nmreq, NFS_OP_ACCESS);
6d2010ae 111 nfsm_chain_add_32(error, &nmreq, *access);
2d21ac55
A
112 numops--;
113 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 114 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
115 nfsm_chain_build_done(error, &nmreq);
116 nfsm_assert(error, (numops == 0), EPROTO);
117 nfsmout_if(error);
6d2010ae 118 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 119
b0d623f7
A
120 if ((lockerror = nfs_node_lock(np)))
121 error = lockerror;
2d21ac55
A
122 nfsm_chain_skip_tag(error, &nmrep);
123 nfsm_chain_get_32(error, &nmrep, numops);
124 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
125 nfsm_chain_op_check(error, &nmrep, NFS_OP_ACCESS);
126 nfsm_chain_get_32(error, &nmrep, supported);
6d2010ae 127 nfsm_chain_get_32(error, &nmrep, access_result);
2d21ac55 128 nfsmout_if(error);
6d2010ae 129 if ((missing = (*access & ~supported))) {
2d21ac55
A
130 /* missing support for something(s) we wanted */
131 if (missing & NFS_ACCESS_DELETE) {
132 /*
133 * If the server doesn't report DELETE (possible
134 * on UNIX systems), we'll assume that it is OK
135 * and just let any subsequent delete action fail
136 * if it really isn't deletable.
137 */
6d2010ae 138 access_result |= NFS_ACCESS_DELETE;
2d21ac55
A
139 }
140 }
6d2010ae
A
141 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
142 if (nfs_access_dotzfs) {
143 vnode_t dvp = NULLVP;
144 if (np->n_flag & NISDOTZFSCHILD) /* may be able to create/delete snapshot dirs */
145 access_result |= (NFS_ACCESS_MODIFY|NFS_ACCESS_EXTEND|NFS_ACCESS_DELETE);
146 else if (((dvp = vnode_getparent(NFSTOV(np))) != NULLVP) && (VTONFS(dvp)->n_flag & NISDOTZFSCHILD))
147 access_result |= NFS_ACCESS_DELETE; /* may be able to delete snapshot dirs */
148 if (dvp != NULLVP)
149 vnode_put(dvp);
150 }
b0d623f7 151 /* Some servers report DELETE support but erroneously give a denied answer. */
6d2010ae
A
152 if (nfs_access_delete && (*access & NFS_ACCESS_DELETE) && !(access_result & NFS_ACCESS_DELETE))
153 access_result |= NFS_ACCESS_DELETE;
2d21ac55 154 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 155 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
2d21ac55
A
156 nfsmout_if(error);
157
158 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
6d2010ae
A
159 slot = nfs_node_access_slot(np, uid, 1);
160 np->n_accessuid[slot] = uid;
2d21ac55 161 microuptime(&now);
6d2010ae
A
162 np->n_accessstamp[slot] = now.tv_sec;
163 np->n_access[slot] = access_result;
2d21ac55 164
6d2010ae
A
165 /* pass back the access returned with this request */
166 *access = np->n_access[slot];
2d21ac55 167nfsmout:
b0d623f7
A
168 if (!lockerror)
169 nfs_node_unlock(np);
2d21ac55
A
170 nfsm_chain_cleanup(&nmreq);
171 nfsm_chain_cleanup(&nmrep);
172 return (error);
173}
174
175int
176nfs4_getattr_rpc(
177 nfsnode_t np,
178 mount_t mp,
179 u_char *fhp,
180 size_t fhsize,
6d2010ae 181 int flags,
2d21ac55
A
182 vfs_context_t ctx,
183 struct nfs_vattr *nvap,
184 u_int64_t *xidp)
185{
186 struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
6d2010ae
A
187 int error = 0, status, nfsvers, numops, rpcflags = 0, acls;
188 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
2d21ac55 189 struct nfsm_chain nmreq, nmrep;
6d2010ae 190 struct nfsreq_secinfo_args si;
2d21ac55
A
191
192 if (!nmp)
193 return (ENXIO);
194 nfsvers = nmp->nm_vers;
6d2010ae
A
195 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
196
197 if (np && (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)) {
198 nfs4_default_attrs_for_referral_trigger(VTONFS(np->n_parent), NULL, 0, nvap, NULL);
199 return (0);
200 }
201
202 if (flags & NGA_MONITOR) /* vnode monitor requests should be soft */
203 rpcflags = R_RECOVER;
2d21ac55 204
6d2010ae 205 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
206 nfsm_chain_null(&nmreq);
207 nfsm_chain_null(&nmrep);
208
b0d623f7
A
209 // PUTFH, GETATTR
210 numops = 2;
2d21ac55
A
211 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
212 nfsm_chain_add_compound_header(error, &nmreq, "getattr", numops);
213 numops--;
214 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
215 nfsm_chain_add_fh(error, &nmreq, nfsvers, fhp, fhsize);
216 numops--;
217 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae
A
218 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
219 if ((flags & NGA_ACL) && acls)
220 NFS_BITMAP_SET(bitmap, NFS_FATTR_ACL);
221 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
2d21ac55
A
222 nfsm_chain_build_done(error, &nmreq);
223 nfsm_assert(error, (numops == 0), EPROTO);
224 nfsmout_if(error);
6d2010ae
A
225 error = nfs_request2(np, mp, &nmreq, NFSPROC4_COMPOUND,
226 vfs_context_thread(ctx), vfs_context_ucred(ctx),
227 NULL, rpcflags, &nmrep, xidp, &status);
2d21ac55
A
228
229 nfsm_chain_skip_tag(error, &nmrep);
230 nfsm_chain_get_32(error, &nmrep, numops);
231 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
232 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
233 nfsmout_if(error);
6d2010ae
A
234 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
235 nfsmout_if(error);
236 if ((flags & NGA_ACL) && acls && !NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) {
237 /* we asked for the ACL but didn't get one... assume there isn't one */
238 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_ACL);
239 nvap->nva_acl = NULL;
240 }
2d21ac55
A
241nfsmout:
242 nfsm_chain_cleanup(&nmreq);
243 nfsm_chain_cleanup(&nmrep);
244 return (error);
245}
246
247int
248nfs4_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx)
249{
250 struct nfsmount *nmp;
251 int error = 0, lockerror = ENOENT, status, numops;
252 uint32_t len = 0;
253 u_int64_t xid;
254 struct nfsm_chain nmreq, nmrep;
6d2010ae 255 struct nfsreq_secinfo_args si;
2d21ac55
A
256
257 nmp = NFSTONMP(np);
258 if (!nmp)
259 return (ENXIO);
6d2010ae
A
260 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
261 return (EINVAL);
262 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
263 nfsm_chain_null(&nmreq);
264 nfsm_chain_null(&nmrep);
265
b0d623f7
A
266 // PUTFH, GETATTR, READLINK
267 numops = 3;
2d21ac55
A
268 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
269 nfsm_chain_add_compound_header(error, &nmreq, "readlink", numops);
270 numops--;
271 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
272 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
273 numops--;
274 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 275 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
276 numops--;
277 nfsm_chain_add_32(error, &nmreq, NFS_OP_READLINK);
278 nfsm_chain_build_done(error, &nmreq);
279 nfsm_assert(error, (numops == 0), EPROTO);
280 nfsmout_if(error);
6d2010ae 281 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 282
b0d623f7 283 if ((lockerror = nfs_node_lock(np)))
2d21ac55
A
284 error = lockerror;
285 nfsm_chain_skip_tag(error, &nmrep);
286 nfsm_chain_get_32(error, &nmrep, numops);
287 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
288 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 289 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
2d21ac55
A
290 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
291 nfsm_chain_get_32(error, &nmrep, len);
292 nfsmout_if(error);
293 if (len >= *buflenp) {
294 if (np->n_size && (np->n_size < *buflenp))
295 len = np->n_size;
296 else
297 len = *buflenp - 1;
298 }
299 nfsm_chain_get_opaque(error, &nmrep, len, buf);
300 if (!error)
301 *buflenp = len;
302nfsmout:
303 if (!lockerror)
b0d623f7 304 nfs_node_unlock(np);
2d21ac55
A
305 nfsm_chain_cleanup(&nmreq);
306 nfsm_chain_cleanup(&nmrep);
307 return (error);
308}
309
310int
311nfs4_read_rpc_async(
312 nfsnode_t np,
313 off_t offset,
314 size_t len,
315 thread_t thd,
316 kauth_cred_t cred,
317 struct nfsreq_cbinfo *cb,
318 struct nfsreq **reqp)
319{
320 struct nfsmount *nmp;
321 int error = 0, nfsvers, numops;
b0d623f7 322 nfs_stateid stateid;
2d21ac55 323 struct nfsm_chain nmreq;
6d2010ae 324 struct nfsreq_secinfo_args si;
2d21ac55
A
325
326 nmp = NFSTONMP(np);
327 if (!nmp)
328 return (ENXIO);
329 nfsvers = nmp->nm_vers;
6d2010ae
A
330 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
331 return (EINVAL);
2d21ac55 332
6d2010ae 333 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
334 nfsm_chain_null(&nmreq);
335
b0d623f7 336 // PUTFH, READ, GETATTR
2d21ac55
A
337 numops = 3;
338 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
339 nfsm_chain_add_compound_header(error, &nmreq, "read", numops);
340 numops--;
341 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
342 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
343 numops--;
344 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
b0d623f7
A
345 nfs_get_stateid(np, thd, cred, &stateid);
346 nfsm_chain_add_stateid(error, &nmreq, &stateid);
2d21ac55
A
347 nfsm_chain_add_64(error, &nmreq, offset);
348 nfsm_chain_add_32(error, &nmreq, len);
349 numops--;
350 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 351 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
352 nfsm_chain_build_done(error, &nmreq);
353 nfsm_assert(error, (numops == 0), EPROTO);
354 nfsmout_if(error);
6d2010ae 355 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
2d21ac55
A
356nfsmout:
357 nfsm_chain_cleanup(&nmreq);
358 return (error);
359}
360
361int
362nfs4_read_rpc_async_finish(
363 nfsnode_t np,
364 struct nfsreq *req,
b0d623f7 365 uio_t uio,
2d21ac55
A
366 size_t *lenp,
367 int *eofp)
368{
369 struct nfsmount *nmp;
370 int error = 0, lockerror, nfsvers, numops, status, eof = 0;
371 size_t retlen = 0;
372 u_int64_t xid;
373 struct nfsm_chain nmrep;
374
375 nmp = NFSTONMP(np);
376 if (!nmp) {
377 nfs_request_async_cancel(req);
378 return (ENXIO);
379 }
380 nfsvers = nmp->nm_vers;
381
382 nfsm_chain_null(&nmrep);
383
384 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
385 if (error == EINPROGRESS) /* async request restarted */
386 return (error);
387
b0d623f7 388 if ((lockerror = nfs_node_lock(np)))
2d21ac55
A
389 error = lockerror;
390 nfsm_chain_skip_tag(error, &nmrep);
391 nfsm_chain_get_32(error, &nmrep, numops);
392 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
393 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
394 nfsm_chain_get_32(error, &nmrep, eof);
395 nfsm_chain_get_32(error, &nmrep, retlen);
396 if (!error) {
397 *lenp = MIN(retlen, *lenp);
b0d623f7 398 error = nfsm_chain_get_uio(&nmrep, *lenp, uio);
2d21ac55
A
399 }
400 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 401 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
2d21ac55 402 if (!lockerror)
b0d623f7 403 nfs_node_unlock(np);
2d21ac55
A
404 if (eofp) {
405 if (!eof && !retlen)
406 eof = 1;
407 *eofp = eof;
408 }
409 nfsm_chain_cleanup(&nmrep);
6d2010ae
A
410 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)
411 microuptime(&np->n_lastio);
2d21ac55
A
412 return (error);
413}
414
415int
416nfs4_write_rpc_async(
417 nfsnode_t np,
b0d623f7 418 uio_t uio,
2d21ac55
A
419 size_t len,
420 thread_t thd,
421 kauth_cred_t cred,
422 int iomode,
423 struct nfsreq_cbinfo *cb,
424 struct nfsreq **reqp)
425{
426 struct nfsmount *nmp;
6d2010ae 427 mount_t mp;
2d21ac55 428 int error = 0, nfsvers, numops;
b0d623f7 429 nfs_stateid stateid;
2d21ac55 430 struct nfsm_chain nmreq;
6d2010ae 431 struct nfsreq_secinfo_args si;
2d21ac55
A
432
433 nmp = NFSTONMP(np);
434 if (!nmp)
435 return (ENXIO);
436 nfsvers = nmp->nm_vers;
6d2010ae
A
437 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
438 return (EINVAL);
439
440 /* for async mounts, don't bother sending sync write requests */
441 if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async &&
442 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC))
443 iomode = NFS_WRITE_UNSTABLE;
2d21ac55 444
6d2010ae 445 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
446 nfsm_chain_null(&nmreq);
447
b0d623f7 448 // PUTFH, WRITE, GETATTR
2d21ac55
A
449 numops = 3;
450 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED + len);
451 nfsm_chain_add_compound_header(error, &nmreq, "write", numops);
452 numops--;
453 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
454 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
455 numops--;
456 nfsm_chain_add_32(error, &nmreq, NFS_OP_WRITE);
b0d623f7
A
457 nfs_get_stateid(np, thd, cred, &stateid);
458 nfsm_chain_add_stateid(error, &nmreq, &stateid);
459 nfsm_chain_add_64(error, &nmreq, uio_offset(uio));
2d21ac55
A
460 nfsm_chain_add_32(error, &nmreq, iomode);
461 nfsm_chain_add_32(error, &nmreq, len);
462 if (!error)
b0d623f7 463 error = nfsm_chain_add_uio(&nmreq, uio, len);
2d21ac55
A
464 numops--;
465 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 466 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
467 nfsm_chain_build_done(error, &nmreq);
468 nfsm_assert(error, (numops == 0), EPROTO);
469 nfsmout_if(error);
470
6d2010ae 471 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
2d21ac55
A
472nfsmout:
473 nfsm_chain_cleanup(&nmreq);
474 return (error);
475}
476
477int
478nfs4_write_rpc_async_finish(
479 nfsnode_t np,
480 struct nfsreq *req,
481 int *iomodep,
482 size_t *rlenp,
483 uint64_t *wverfp)
484{
485 struct nfsmount *nmp;
486 int error = 0, lockerror = ENOENT, nfsvers, numops, status;
487 int committed = NFS_WRITE_FILESYNC;
488 size_t rlen = 0;
489 u_int64_t xid, wverf;
490 mount_t mp;
491 struct nfsm_chain nmrep;
492
493 nmp = NFSTONMP(np);
494 if (!nmp) {
495 nfs_request_async_cancel(req);
496 return (ENXIO);
497 }
498 nfsvers = nmp->nm_vers;
499
500 nfsm_chain_null(&nmrep);
501
502 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
503 if (error == EINPROGRESS) /* async request restarted */
504 return (error);
505 nmp = NFSTONMP(np);
506 if (!nmp)
507 error = ENXIO;
b0d623f7 508 if (!error && (lockerror = nfs_node_lock(np)))
2d21ac55
A
509 error = lockerror;
510 nfsm_chain_skip_tag(error, &nmrep);
511 nfsm_chain_get_32(error, &nmrep, numops);
512 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
513 nfsm_chain_op_check(error, &nmrep, NFS_OP_WRITE);
514 nfsm_chain_get_32(error, &nmrep, rlen);
515 nfsmout_if(error);
516 *rlenp = rlen;
517 if (rlen <= 0)
518 error = NFSERR_IO;
519 nfsm_chain_get_32(error, &nmrep, committed);
520 nfsm_chain_get_64(error, &nmrep, wverf);
521 nfsmout_if(error);
522 if (wverfp)
523 *wverfp = wverf;
524 lck_mtx_lock(&nmp->nm_lock);
525 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
526 nmp->nm_verf = wverf;
527 nmp->nm_state |= NFSSTA_HASWRITEVERF;
528 } else if (nmp->nm_verf != wverf) {
529 nmp->nm_verf = wverf;
530 }
531 lck_mtx_unlock(&nmp->nm_lock);
532 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 533 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
2d21ac55
A
534nfsmout:
535 if (!lockerror)
b0d623f7 536 nfs_node_unlock(np);
2d21ac55
A
537 nfsm_chain_cleanup(&nmrep);
538 if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async &&
539 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC))
540 committed = NFS_WRITE_FILESYNC;
541 *iomodep = committed;
6d2010ae
A
542 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)
543 microuptime(&np->n_lastio);
2d21ac55
A
544 return (error);
545}
546
547int
548nfs4_remove_rpc(
549 nfsnode_t dnp,
550 char *name,
551 int namelen,
552 thread_t thd,
553 kauth_cred_t cred)
554{
b0d623f7 555 int error = 0, lockerror = ENOENT, remove_error = 0, status;
2d21ac55
A
556 struct nfsmount *nmp;
557 int nfsvers, numops;
558 u_int64_t xid;
559 struct nfsm_chain nmreq, nmrep;
6d2010ae 560 struct nfsreq_secinfo_args si;
2d21ac55
A
561
562 nmp = NFSTONMP(dnp);
563 if (!nmp)
564 return (ENXIO);
565 nfsvers = nmp->nm_vers;
6d2010ae
A
566 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
567 return (EINVAL);
568 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
b0d623f7 569restart:
2d21ac55
A
570 nfsm_chain_null(&nmreq);
571 nfsm_chain_null(&nmrep);
572
573 // PUTFH, REMOVE, GETATTR
574 numops = 3;
575 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED + namelen);
576 nfsm_chain_add_compound_header(error, &nmreq, "remove", numops);
577 numops--;
578 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
579 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
580 numops--;
581 nfsm_chain_add_32(error, &nmreq, NFS_OP_REMOVE);
6d2010ae 582 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
2d21ac55
A
583 numops--;
584 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 585 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
2d21ac55
A
586 nfsm_chain_build_done(error, &nmreq);
587 nfsm_assert(error, (numops == 0), EPROTO);
588 nfsmout_if(error);
589
6d2010ae 590 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, &nmrep, &xid, &status);
2d21ac55 591
b0d623f7
A
592 if ((lockerror = nfs_node_lock(dnp)))
593 error = lockerror;
2d21ac55
A
594 nfsm_chain_skip_tag(error, &nmrep);
595 nfsm_chain_get_32(error, &nmrep, numops);
596 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
597 nfsm_chain_op_check(error, &nmrep, NFS_OP_REMOVE);
598 remove_error = error;
599 nfsm_chain_check_change_info(error, &nmrep, dnp);
600 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 601 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
b0d623f7 602 if (error && !lockerror)
2d21ac55
A
603 NATTRINVALIDATE(dnp);
604nfsmout:
605 nfsm_chain_cleanup(&nmreq);
606 nfsm_chain_cleanup(&nmrep);
607
b0d623f7
A
608 if (!lockerror) {
609 dnp->n_flag |= NMODIFIED;
610 nfs_node_unlock(dnp);
611 }
612 if (error == NFSERR_GRACE) {
613 tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz);
614 goto restart;
615 }
2d21ac55
A
616
617 return (remove_error);
618}
619
620int
621nfs4_rename_rpc(
622 nfsnode_t fdnp,
623 char *fnameptr,
624 int fnamelen,
625 nfsnode_t tdnp,
626 char *tnameptr,
627 int tnamelen,
628 vfs_context_t ctx)
629{
b0d623f7 630 int error = 0, lockerror = ENOENT, status, nfsvers, numops;
2d21ac55
A
631 struct nfsmount *nmp;
632 u_int64_t xid, savedxid;
633 struct nfsm_chain nmreq, nmrep;
6d2010ae 634 struct nfsreq_secinfo_args si;
2d21ac55
A
635
636 nmp = NFSTONMP(fdnp);
637 if (!nmp)
638 return (ENXIO);
639 nfsvers = nmp->nm_vers;
6d2010ae
A
640 if (fdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
641 return (EINVAL);
642 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
643 return (EINVAL);
2d21ac55 644
6d2010ae 645 NFSREQ_SECINFO_SET(&si, fdnp, NULL, 0, NULL, 0);
2d21ac55
A
646 nfsm_chain_null(&nmreq);
647 nfsm_chain_null(&nmrep);
648
649 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
650 numops = 7;
651 nfsm_chain_build_alloc_init(error, &nmreq, 30 * NFSX_UNSIGNED + fnamelen + tnamelen);
652 nfsm_chain_add_compound_header(error, &nmreq, "rename", numops);
653 numops--;
654 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
655 nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize);
656 numops--;
657 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
658 numops--;
659 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
660 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
661 numops--;
662 nfsm_chain_add_32(error, &nmreq, NFS_OP_RENAME);
6d2010ae
A
663 nfsm_chain_add_name(error, &nmreq, fnameptr, fnamelen, nmp);
664 nfsm_chain_add_name(error, &nmreq, tnameptr, tnamelen, nmp);
2d21ac55
A
665 numops--;
666 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 667 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
2d21ac55
A
668 numops--;
669 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
670 numops--;
671 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 672 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, fdnp);
2d21ac55
A
673 nfsm_chain_build_done(error, &nmreq);
674 nfsm_assert(error, (numops == 0), EPROTO);
675 nfsmout_if(error);
676
6d2010ae 677 error = nfs_request(fdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 678
b0d623f7
A
679 if ((lockerror = nfs_node_lock2(fdnp, tdnp)))
680 error = lockerror;
2d21ac55
A
681 nfsm_chain_skip_tag(error, &nmrep);
682 nfsm_chain_get_32(error, &nmrep, numops);
683 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
684 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
685 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
686 nfsm_chain_op_check(error, &nmrep, NFS_OP_RENAME);
687 nfsm_chain_check_change_info(error, &nmrep, fdnp);
688 nfsm_chain_check_change_info(error, &nmrep, tdnp);
689 /* directory attributes: if we don't get them, make sure to invalidate */
690 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
691 savedxid = xid;
6d2010ae 692 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
b0d623f7 693 if (error && !lockerror)
2d21ac55
A
694 NATTRINVALIDATE(tdnp);
695 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
696 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
697 xid = savedxid;
6d2010ae 698 nfsm_chain_loadattr(error, &nmrep, fdnp, nfsvers, &xid);
b0d623f7 699 if (error && !lockerror)
2d21ac55
A
700 NATTRINVALIDATE(fdnp);
701nfsmout:
702 nfsm_chain_cleanup(&nmreq);
703 nfsm_chain_cleanup(&nmrep);
b0d623f7
A
704 if (!lockerror) {
705 fdnp->n_flag |= NMODIFIED;
706 tdnp->n_flag |= NMODIFIED;
707 nfs_node_unlock2(fdnp, tdnp);
708 }
2d21ac55
A
709 return (error);
710}
711
712/*
713 * NFS V4 readdir RPC.
714 */
2d21ac55 715int
b0d623f7
A
716nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx)
717{
2d21ac55 718 struct nfsmount *nmp;
6d2010ae 719 int error = 0, lockerror, nfsvers, namedattr, rdirplus, bigcookies, numops;
b0d623f7
A
720 int i, status, more_entries = 1, eof, bp_dropped = 0;
721 uint32_t nmreaddirsize, nmrsize;
722 uint32_t namlen, skiplen, fhlen, xlen, attrlen, reclen, space_free, space_needed;
723 uint64_t cookie, lastcookie, xid, savedxid;
724 struct nfsm_chain nmreq, nmrep, nmrepsave;
725 fhandle_t fh;
726 struct nfs_vattr nvattr, *nvattrp;
727 struct nfs_dir_buf_header *ndbhp;
728 struct direntry *dp;
729 char *padstart, padlen;
2d21ac55
A
730 const char *tag;
731 uint32_t entry_attrs[NFS_ATTR_BITMAP_LEN];
b0d623f7 732 struct timeval now;
6d2010ae 733 struct nfsreq_secinfo_args si;
2d21ac55 734
2d21ac55
A
735 nmp = NFSTONMP(dnp);
736 if (!nmp)
737 return (ENXIO);
738 nfsvers = nmp->nm_vers;
739 nmreaddirsize = nmp->nm_readdirsize;
740 nmrsize = nmp->nm_rsize;
b0d623f7 741 bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES;
6d2010ae
A
742 namedattr = (dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) ? 1 : 0;
743 rdirplus = (NMFLAG(nmp, RDIRPLUS) || namedattr) ? 1 : 0;
744 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
745 return (EINVAL);
746 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
2d21ac55
A
747
748 /*
749 * Set up attribute request for entries.
750 * For READDIRPLUS functionality, get everything.
b0d623f7 751 * Otherwise, just get what we need for struct direntry.
2d21ac55
A
752 */
753 if (rdirplus) {
b0d623f7 754 tag = "readdirplus";
6d2010ae 755 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, entry_attrs);
2d21ac55
A
756 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEHANDLE);
757 } else {
b0d623f7 758 tag = "readdir";
2d21ac55
A
759 NFS_CLEAR_ATTRIBUTES(entry_attrs);
760 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_TYPE);
761 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEID);
6d2010ae 762 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_MOUNTED_ON_FILEID);
2d21ac55 763 }
2d21ac55
A
764 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_RDATTR_ERROR);
765
b0d623f7
A
766 /* lock to protect access to cookie verifier */
767 if ((lockerror = nfs_node_lock(dnp)))
2d21ac55
A
768 return (lockerror);
769
b0d623f7
A
770 /* determine cookie to use, and move dp to the right offset */
771 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
772 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
773 if (ndbhp->ndbh_count) {
774 for (i=0; i < ndbhp->ndbh_count-1; i++)
775 dp = NFS_DIRENTRY_NEXT(dp);
776 cookie = dp->d_seekoff;
777 dp = NFS_DIRENTRY_NEXT(dp);
778 } else {
779 cookie = bp->nb_lblkno;
780 /* increment with every buffer read */
781 OSAddAtomic(1, &nfsstats.readdir_bios);
2d21ac55 782 }
b0d623f7 783 lastcookie = cookie;
2d21ac55
A
784
785 /*
b0d623f7
A
786 * The NFS client is responsible for the "." and ".." entries in the
787 * directory. So, we put them at the start of the first buffer.
6d2010ae 788 * Don't bother for attribute directories.
2d21ac55 789 */
6d2010ae
A
790 if (((bp->nb_lblkno == 0) && (ndbhp->ndbh_count == 0)) &&
791 !(dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
b0d623f7
A
792 fh.fh_len = 0;
793 fhlen = rdirplus ? fh.fh_len + 1 : 0;
794 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
795 /* "." */
796 namlen = 1;
797 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
798 if (xlen)
799 bzero(&dp->d_name[namlen+1], xlen);
800 dp->d_namlen = namlen;
801 strlcpy(dp->d_name, ".", namlen+1);
2d21ac55 802 dp->d_fileno = dnp->n_vattr.nva_fileid;
2d21ac55 803 dp->d_type = DT_DIR;
b0d623f7
A
804 dp->d_reclen = reclen;
805 dp->d_seekoff = 1;
806 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
807 dp = NFS_DIRENTRY_NEXT(dp);
808 padlen = (char*)dp - padstart;
809 if (padlen > 0)
810 bzero(padstart, padlen);
811 if (rdirplus) /* zero out attributes */
812 bzero(NFS_DIR_BUF_NVATTR(bp, 0), sizeof(struct nfs_vattr));
813
814 /* ".." */
815 namlen = 2;
816 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
817 if (xlen)
818 bzero(&dp->d_name[namlen+1], xlen);
819 dp->d_namlen = namlen;
820 strlcpy(dp->d_name, "..", namlen+1);
2d21ac55
A
821 if (dnp->n_parent)
822 dp->d_fileno = VTONFS(dnp->n_parent)->n_vattr.nva_fileid;
823 else
824 dp->d_fileno = dnp->n_vattr.nva_fileid;
2d21ac55 825 dp->d_type = DT_DIR;
b0d623f7
A
826 dp->d_reclen = reclen;
827 dp->d_seekoff = 2;
828 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
829 dp = NFS_DIRENTRY_NEXT(dp);
830 padlen = (char*)dp - padstart;
831 if (padlen > 0)
832 bzero(padstart, padlen);
833 if (rdirplus) /* zero out attributes */
834 bzero(NFS_DIR_BUF_NVATTR(bp, 1), sizeof(struct nfs_vattr));
835
836 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
837 ndbhp->ndbh_count = 2;
2d21ac55
A
838 }
839
840 /*
b0d623f7
A
841 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
842 * the buffer is full (or we hit EOF). Then put the remainder of the
843 * results in the next buffer(s).
2d21ac55 844 */
b0d623f7
A
845 nfsm_chain_null(&nmreq);
846 nfsm_chain_null(&nmrep);
847 while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) {
2d21ac55 848
b0d623f7
A
849 // PUTFH, GETATTR, READDIR
850 numops = 3;
2d21ac55
A
851 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
852 nfsm_chain_add_compound_header(error, &nmreq, tag, numops);
853 numops--;
854 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
855 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
856 numops--;
857 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 858 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
2d21ac55
A
859 numops--;
860 nfsm_chain_add_32(error, &nmreq, NFS_OP_READDIR);
b0d623f7
A
861 nfsm_chain_add_64(error, &nmreq, (cookie <= 2) ? 0 : cookie);
862 nfsm_chain_add_64(error, &nmreq, dnp->n_cookieverf);
2d21ac55
A
863 nfsm_chain_add_32(error, &nmreq, nmreaddirsize);
864 nfsm_chain_add_32(error, &nmreq, nmrsize);
6d2010ae 865 nfsm_chain_add_bitmap_supported(error, &nmreq, entry_attrs, nmp, dnp);
2d21ac55
A
866 nfsm_chain_build_done(error, &nmreq);
867 nfsm_assert(error, (numops == 0), EPROTO);
b0d623f7 868 nfs_node_unlock(dnp);
2d21ac55 869 nfsmout_if(error);
6d2010ae 870 error = nfs_request(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 871
b0d623f7 872 if ((lockerror = nfs_node_lock(dnp)))
2d21ac55 873 error = lockerror;
b0d623f7
A
874
875 savedxid = xid;
2d21ac55
A
876 nfsm_chain_skip_tag(error, &nmrep);
877 nfsm_chain_get_32(error, &nmrep, numops);
878 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
879 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 880 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
2d21ac55 881 nfsm_chain_op_check(error, &nmrep, NFS_OP_READDIR);
b0d623f7 882 nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf);
2d21ac55 883 nfsm_chain_get_32(error, &nmrep, more_entries);
b0d623f7
A
884
885 if (!lockerror) {
886 nfs_node_unlock(dnp);
887 lockerror = ENOENT;
888 }
2d21ac55
A
889 nfsmout_if(error);
890
b0d623f7
A
891 if (rdirplus)
892 microuptime(&now);
893
894 /* loop through the entries packing them into the buffer */
895 while (more_entries) {
2d21ac55 896 /* Entry: COOKIE, NAME, FATTR */
b0d623f7
A
897 nfsm_chain_get_64(error, &nmrep, cookie);
898 nfsm_chain_get_32(error, &nmrep, namlen);
2d21ac55 899 nfsmout_if(error);
b0d623f7
A
900 if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) {
901 /* we've got a big cookie, make sure flag is set */
902 lck_mtx_lock(&nmp->nm_lock);
903 nmp->nm_state |= NFSSTA_BIGCOOKIES;
904 lck_mtx_unlock(&nmp->nm_lock);
905 bigcookies = 1;
906 }
907 /* just truncate names that don't fit in direntry.d_name */
908 if (namlen <= 0) {
2d21ac55
A
909 error = EBADRPC;
910 goto nfsmout;
911 }
b0d623f7
A
912 if (namlen > (sizeof(dp->d_name)-1)) {
913 skiplen = namlen - sizeof(dp->d_name) + 1;
914 namlen = sizeof(dp->d_name) - 1;
2d21ac55
A
915 } else {
916 skiplen = 0;
917 }
b0d623f7
A
918 /* guess that fh size will be same as parent */
919 fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0;
920 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
921 attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0;
922 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
923 space_needed = reclen + attrlen;
924 space_free = nfs_dir_buf_freespace(bp, rdirplus);
925 if (space_needed > space_free) {
926 /*
927 * We still have entries to pack, but we've
928 * run out of room in the current buffer.
929 * So we need to move to the next buffer.
930 * The block# for the next buffer is the
931 * last cookie in the current buffer.
932 */
933nextbuffer:
934 ndbhp->ndbh_flags |= NDB_FULL;
935 nfs_buf_release(bp, 0);
936 bp_dropped = 1;
937 bp = NULL;
938 error = nfs_buf_get(dnp, lastcookie, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
939 nfsmout_if(error);
940 /* initialize buffer */
941 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
942 ndbhp->ndbh_flags = 0;
943 ndbhp->ndbh_count = 0;
944 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
945 ndbhp->ndbh_ncgen = dnp->n_ncgen;
946 space_free = nfs_dir_buf_freespace(bp, rdirplus);
947 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
948 /* increment with every buffer read */
949 OSAddAtomic(1, &nfsstats.readdir_bios);
2d21ac55 950 }
b0d623f7
A
951 nmrepsave = nmrep;
952 dp->d_fileno = cookie; /* placeholder */
953 dp->d_seekoff = cookie;
954 dp->d_namlen = namlen;
955 dp->d_reclen = reclen;
2d21ac55 956 dp->d_type = DT_UNKNOWN;
b0d623f7
A
957 nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name);
958 nfsmout_if(error);
959 dp->d_name[namlen] = '\0';
2d21ac55
A
960 if (skiplen)
961 nfsm_chain_adv(error, &nmrep,
b0d623f7 962 nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen));
2d21ac55 963 nfsmout_if(error);
b0d623f7 964 nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : &nvattr;
6d2010ae
A
965 error = nfs4_parsefattr(&nmrep, NULL, nvattrp, &fh, NULL, NULL);
966 if (!error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_ACL)) {
967 /* we do NOT want ACLs returned to us here */
968 NFS_BITMAP_CLR(nvattrp->nva_bitmap, NFS_FATTR_ACL);
969 if (nvattrp->nva_acl) {
970 kauth_acl_free(nvattrp->nva_acl);
971 nvattrp->nva_acl = NULL;
972 }
973 }
b0d623f7 974 if (error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_RDATTR_ERROR)) {
6d2010ae
A
975 /* OK, we may not have gotten all of the attributes but we will use what we can. */
976 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
977 /* set this up to look like a referral trigger */
978 nfs4_default_attrs_for_referral_trigger(dnp, dp->d_name, namlen, nvattrp, &fh);
979 }
2d21ac55
A
980 error = 0;
981 }
b0d623f7 982 /* check for more entries after this one */
2d21ac55
A
983 nfsm_chain_get_32(error, &nmrep, more_entries);
984 nfsmout_if(error);
985
b0d623f7 986 /* Skip any "." and ".." entries returned from server. */
6d2010ae
A
987 /* Also skip any bothersome named attribute entries. */
988 if (((dp->d_name[0] == '.') && ((namlen == 1) || ((namlen == 2) && (dp->d_name[1] == '.')))) ||
989 (namedattr && (namlen == 11) && (!strcmp(dp->d_name, "SUNWattr_ro") || !strcmp(dp->d_name, "SUNWattr_rw")))) {
b0d623f7 990 lastcookie = cookie;
2d21ac55
A
991 continue;
992 }
993
b0d623f7
A
994 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_TYPE))
995 dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type));
996 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEID))
997 dp->d_fileno = nvattrp->nva_fileid;
998 if (rdirplus) {
999 /* fileid is already in d_fileno, so stash xid in attrs */
1000 nvattrp->nva_fileid = savedxid;
1001 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
1002 fhlen = fh.fh_len + 1;
1003 xlen = fhlen + sizeof(time_t);
1004 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1005 space_needed = reclen + attrlen;
1006 if (space_needed > space_free) {
1007 /* didn't actually have the room... move on to next buffer */
1008 nmrep = nmrepsave;
1009 goto nextbuffer;
1010 }
1011 /* pack the file handle into the record */
1012 dp->d_name[dp->d_namlen+1] = fh.fh_len;
1013 bcopy(fh.fh_data, &dp->d_name[dp->d_namlen+2], fh.fh_len);
1014 } else {
1015 /* mark the file handle invalid */
1016 fh.fh_len = 0;
1017 fhlen = fh.fh_len + 1;
1018 xlen = fhlen + sizeof(time_t);
1019 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1020 bzero(&dp->d_name[dp->d_namlen+1], fhlen);
2d21ac55 1021 }
b0d623f7
A
1022 *(time_t*)(&dp->d_name[dp->d_namlen+1+fhlen]) = now.tv_sec;
1023 dp->d_reclen = reclen;
2d21ac55 1024 }
b0d623f7
A
1025 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
1026 ndbhp->ndbh_count++;
1027 lastcookie = cookie;
1028
1029 /* advance to next direntry in buffer */
1030 dp = NFS_DIRENTRY_NEXT(dp);
1031 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
1032 /* zero out the pad bytes */
1033 padlen = (char*)dp - padstart;
1034 if (padlen > 0)
1035 bzero(padstart, padlen);
1036 }
1037 /* Finally, get the eof boolean */
1038 nfsm_chain_get_32(error, &nmrep, eof);
1039 nfsmout_if(error);
1040 if (eof) {
1041 ndbhp->ndbh_flags |= (NDB_FULL|NDB_EOF);
1042 nfs_node_lock_force(dnp);
1043 dnp->n_eofcookie = lastcookie;
1044 nfs_node_unlock(dnp);
1045 } else {
1046 more_entries = 1;
2d21ac55 1047 }
b0d623f7
A
1048 if (bp_dropped) {
1049 nfs_buf_release(bp, 0);
1050 bp = NULL;
1051 break;
2d21ac55 1052 }
b0d623f7 1053 if ((lockerror = nfs_node_lock(dnp)))
2d21ac55
A
1054 error = lockerror;
1055 nfsmout_if(error);
1056 nfsm_chain_cleanup(&nmrep);
b0d623f7 1057 nfsm_chain_null(&nmreq);
2d21ac55 1058 }
2d21ac55 1059nfsmout:
b0d623f7
A
1060 if (bp_dropped && bp)
1061 nfs_buf_release(bp, 0);
1062 if (!lockerror)
1063 nfs_node_unlock(dnp);
2d21ac55
A
1064 nfsm_chain_cleanup(&nmreq);
1065 nfsm_chain_cleanup(&nmrep);
b0d623f7 1066 return (bp_dropped ? NFSERR_DIRBUFDROPPED : error);
2d21ac55
A
1067}
1068
1069int
1070nfs4_lookup_rpc_async(
1071 nfsnode_t dnp,
1072 char *name,
1073 int namelen,
1074 vfs_context_t ctx,
1075 struct nfsreq **reqp)
1076{
6d2010ae 1077 int error = 0, isdotdot = 0, nfsvers, numops;
2d21ac55
A
1078 struct nfsm_chain nmreq;
1079 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1080 struct nfsmount *nmp;
6d2010ae 1081 struct nfsreq_secinfo_args si;
2d21ac55
A
1082
1083 nmp = NFSTONMP(dnp);
1084 if (!nmp)
1085 return (ENXIO);
1086 nfsvers = nmp->nm_vers;
6d2010ae
A
1087 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1088 return (EINVAL);
2d21ac55 1089
6d2010ae 1090 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
2d21ac55 1091 isdotdot = 1;
6d2010ae
A
1092 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
1093 } else {
1094 NFSREQ_SECINFO_SET(&si, dnp, dnp->n_fhp, dnp->n_fhsize, name, namelen);
1095 }
2d21ac55
A
1096
1097 nfsm_chain_null(&nmreq);
1098
6d2010ae
A
1099 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1100 numops = 5;
2d21ac55
A
1101 nfsm_chain_build_alloc_init(error, &nmreq, 20 * NFSX_UNSIGNED + namelen);
1102 nfsm_chain_add_compound_header(error, &nmreq, "lookup", numops);
1103 numops--;
1104 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1105 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
1106 numops--;
1107 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 1108 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
2d21ac55
A
1109 numops--;
1110 if (isdotdot) {
1111 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUPP);
1112 } else {
1113 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
6d2010ae 1114 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
2d21ac55 1115 }
6d2010ae
A
1116 numops--;
1117 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETFH);
1118 numops--;
1119 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1120 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1121 /* some ".zfs" directories can't handle being asked for some attributes */
1122 if ((dnp->n_flag & NISDOTZFS) && !isdotdot)
1123 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1124 if ((dnp->n_flag & NISDOTZFSCHILD) && isdotdot)
1125 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1126 if (((namelen == 4) && (name[0] == '.') && (name[1] == 'z') && (name[2] == 'f') && (name[3] == 's')))
1127 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1128 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
2d21ac55
A
1129 nfsm_chain_build_done(error, &nmreq);
1130 nfsm_assert(error, (numops == 0), EPROTO);
1131 nfsmout_if(error);
1132 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
6d2010ae 1133 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, reqp);
2d21ac55
A
1134nfsmout:
1135 nfsm_chain_cleanup(&nmreq);
1136 return (error);
1137}
1138
6d2010ae 1139
2d21ac55
A
1140int
1141nfs4_lookup_rpc_async_finish(
1142 nfsnode_t dnp,
6d2010ae
A
1143 char *name,
1144 int namelen,
1145 vfs_context_t ctx,
2d21ac55
A
1146 struct nfsreq *req,
1147 u_int64_t *xidp,
1148 fhandle_t *fhp,
1149 struct nfs_vattr *nvap)
1150{
6d2010ae
A
1151 int error = 0, lockerror = ENOENT, status, nfsvers, numops, isdotdot = 0;
1152 uint32_t op = NFS_OP_LOOKUP;
2d21ac55
A
1153 u_int64_t xid;
1154 struct nfsmount *nmp;
1155 struct nfsm_chain nmrep;
1156
1157 nmp = NFSTONMP(dnp);
1158 nfsvers = nmp->nm_vers;
6d2010ae
A
1159 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2))
1160 isdotdot = 1;
2d21ac55
A
1161
1162 nfsm_chain_null(&nmrep);
1163
1164 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1165
b0d623f7
A
1166 if ((lockerror = nfs_node_lock(dnp)))
1167 error = lockerror;
2d21ac55
A
1168 nfsm_chain_skip_tag(error, &nmrep);
1169 nfsm_chain_get_32(error, &nmrep, numops);
1170 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1171 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1172 if (xidp)
1173 *xidp = xid;
6d2010ae 1174 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
2d21ac55 1175
6d2010ae 1176 nfsm_chain_op_check(error, &nmrep, (isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP));
2d21ac55 1177 nfsmout_if(error || !fhp || !nvap);
6d2010ae
A
1178 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
1179 nfsm_chain_get_32(error, &nmrep, fhp->fh_len);
1180 nfsm_chain_get_opaque(error, &nmrep, fhp->fh_len, fhp->fh_data);
2d21ac55 1181 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae
A
1182 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1183 /* set this up to look like a referral trigger */
1184 nfs4_default_attrs_for_referral_trigger(dnp, name, namelen, nvap, fhp);
1185 error = 0;
1186 } else {
1187 nfsmout_if(error);
1188 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
2d21ac55
A
1189 }
1190nfsmout:
b0d623f7
A
1191 if (!lockerror)
1192 nfs_node_unlock(dnp);
2d21ac55 1193 nfsm_chain_cleanup(&nmrep);
6d2010ae
A
1194 if (!error && (op == NFS_OP_LOOKUP) && (nmp->nm_state & NFSSTA_NEEDSECINFO)) {
1195 /* We still need to get SECINFO to set default for mount. */
1196 /* Do so for the first LOOKUP that returns successfully. */
1197 struct nfs_sec sec;
1198
1199 sec.count = NX_MAX_SEC_FLAVORS;
1200 error = nfs4_secinfo_rpc(nmp, &req->r_secinfo, vfs_context_ucred(ctx), sec.flavors, &sec.count);
1201 /* [sigh] some implementations return "illegal" error for unsupported ops */
1202 if (error == NFSERR_OP_ILLEGAL)
1203 error = 0;
1204 if (!error) {
1205 /* set our default security flavor to the first in the list */
1206 lck_mtx_lock(&nmp->nm_lock);
1207 if (sec.count)
1208 nmp->nm_auth = sec.flavors[0];
1209 nmp->nm_state &= ~NFSSTA_NEEDSECINFO;
1210 lck_mtx_unlock(&nmp->nm_lock);
1211 }
1212 }
2d21ac55
A
1213 return (error);
1214}
1215
1216int
1217nfs4_commit_rpc(
1218 nfsnode_t np,
6d2010ae
A
1219 uint64_t offset,
1220 uint64_t count,
1221 kauth_cred_t cred,
1222 uint64_t wverf)
2d21ac55
A
1223{
1224 struct nfsmount *nmp;
1225 int error = 0, lockerror, status, nfsvers, numops;
6d2010ae 1226 u_int64_t xid, newwverf;
2d21ac55
A
1227 uint32_t count32;
1228 struct nfsm_chain nmreq, nmrep;
6d2010ae 1229 struct nfsreq_secinfo_args si;
2d21ac55
A
1230
1231 nmp = NFSTONMP(np);
1232 FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
1233 if (!nmp)
1234 return (ENXIO);
6d2010ae
A
1235 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1236 return (EINVAL);
2d21ac55
A
1237 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF))
1238 return (0);
1239 nfsvers = nmp->nm_vers;
1240
1241 if (count > UINT32_MAX)
1242 count32 = 0;
1243 else
1244 count32 = count;
1245
6d2010ae 1246 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
1247 nfsm_chain_null(&nmreq);
1248 nfsm_chain_null(&nmrep);
1249
1250 // PUTFH, COMMIT, GETATTR
1251 numops = 3;
1252 nfsm_chain_build_alloc_init(error, &nmreq, 19 * NFSX_UNSIGNED);
1253 nfsm_chain_add_compound_header(error, &nmreq, "commit", numops);
1254 numops--;
1255 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1256 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1257 numops--;
1258 nfsm_chain_add_32(error, &nmreq, NFS_OP_COMMIT);
1259 nfsm_chain_add_64(error, &nmreq, offset);
1260 nfsm_chain_add_32(error, &nmreq, count32);
1261 numops--;
1262 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 1263 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
1264 nfsm_chain_build_done(error, &nmreq);
1265 nfsm_assert(error, (numops == 0), EPROTO);
1266 nfsmout_if(error);
1267 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
6d2010ae 1268 current_thread(), cred, &si, 0, &nmrep, &xid, &status);
2d21ac55 1269
b0d623f7 1270 if ((lockerror = nfs_node_lock(np)))
2d21ac55
A
1271 error = lockerror;
1272 nfsm_chain_skip_tag(error, &nmrep);
1273 nfsm_chain_get_32(error, &nmrep, numops);
1274 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1275 nfsm_chain_op_check(error, &nmrep, NFS_OP_COMMIT);
6d2010ae 1276 nfsm_chain_get_64(error, &nmrep, newwverf);
2d21ac55 1277 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 1278 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
2d21ac55 1279 if (!lockerror)
b0d623f7 1280 nfs_node_unlock(np);
2d21ac55
A
1281 nfsmout_if(error);
1282 lck_mtx_lock(&nmp->nm_lock);
6d2010ae
A
1283 if (nmp->nm_verf != newwverf)
1284 nmp->nm_verf = newwverf;
1285 if (wverf != newwverf)
2d21ac55 1286 error = NFSERR_STALEWRITEVERF;
2d21ac55
A
1287 lck_mtx_unlock(&nmp->nm_lock);
1288nfsmout:
1289 nfsm_chain_cleanup(&nmreq);
1290 nfsm_chain_cleanup(&nmrep);
1291 return (error);
1292}
1293
1294int
1295nfs4_pathconf_rpc(
1296 nfsnode_t np,
1297 struct nfs_fsattr *nfsap,
1298 vfs_context_t ctx)
1299{
1300 u_int64_t xid;
1301 int error = 0, lockerror, status, nfsvers, numops;
1302 struct nfsm_chain nmreq, nmrep;
1303 struct nfsmount *nmp = NFSTONMP(np);
1304 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1305 struct nfs_vattr nvattr;
6d2010ae 1306 struct nfsreq_secinfo_args si;
2d21ac55
A
1307
1308 if (!nmp)
1309 return (ENXIO);
1310 nfsvers = nmp->nm_vers;
6d2010ae
A
1311 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1312 return (EINVAL);
2d21ac55 1313
6d2010ae
A
1314 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1315 NVATTR_INIT(&nvattr);
2d21ac55
A
1316 nfsm_chain_null(&nmreq);
1317 nfsm_chain_null(&nmrep);
1318
1319 /* NFSv4: fetch "pathconf" info for this node */
b0d623f7
A
1320 // PUTFH, GETATTR
1321 numops = 2;
2d21ac55
A
1322 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
1323 nfsm_chain_add_compound_header(error, &nmreq, "pathconf", numops);
1324 numops--;
1325 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1326 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1327 numops--;
1328 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1329 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1330 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXLINK);
1331 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXNAME);
1332 NFS_BITMAP_SET(bitmap, NFS_FATTR_NO_TRUNC);
1333 NFS_BITMAP_SET(bitmap, NFS_FATTR_CHOWN_RESTRICTED);
1334 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_INSENSITIVE);
1335 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_PRESERVING);
6d2010ae 1336 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
2d21ac55
A
1337 nfsm_chain_build_done(error, &nmreq);
1338 nfsm_assert(error, (numops == 0), EPROTO);
1339 nfsmout_if(error);
6d2010ae 1340 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55
A
1341
1342 nfsm_chain_skip_tag(error, &nmrep);
1343 nfsm_chain_get_32(error, &nmrep, numops);
1344 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1345 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1346 nfsmout_if(error);
6d2010ae 1347 error = nfs4_parsefattr(&nmrep, nfsap, &nvattr, NULL, NULL, NULL);
2d21ac55 1348 nfsmout_if(error);
b0d623f7 1349 if ((lockerror = nfs_node_lock(np)))
2d21ac55 1350 error = lockerror;
b0d623f7
A
1351 if (!error)
1352 nfs_loadattrcache(np, &nvattr, &xid, 0);
2d21ac55 1353 if (!lockerror)
b0d623f7 1354 nfs_node_unlock(np);
2d21ac55 1355nfsmout:
6d2010ae 1356 NVATTR_CLEANUP(&nvattr);
2d21ac55
A
1357 nfsm_chain_cleanup(&nmreq);
1358 nfsm_chain_cleanup(&nmrep);
1359 return (error);
1360}
1361
1362int
1363nfs4_vnop_getattr(
1364 struct vnop_getattr_args /* {
1365 struct vnodeop_desc *a_desc;
1366 vnode_t a_vp;
1367 struct vnode_attr *a_vap;
1368 vfs_context_t a_context;
1369 } */ *ap)
1370{
1371 struct vnode_attr *vap = ap->a_vap;
6d2010ae 1372 struct nfsmount *nmp;
2d21ac55 1373 struct nfs_vattr nva;
6d2010ae
A
1374 int error, acls, ngaflags;
1375
1376 if (!(nmp = VTONMP(ap->a_vp)))
1377 return (ENXIO);
1378 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
2d21ac55 1379
6d2010ae
A
1380 ngaflags = NGA_CACHED;
1381 if (VATTR_IS_ACTIVE(vap, va_acl) && acls)
1382 ngaflags |= NGA_ACL;
1383 error = nfs_getattr(VTONFS(ap->a_vp), &nva, ap->a_context, ngaflags);
2d21ac55
A
1384 if (error)
1385 return (error);
1386
1387 /* copy what we have in nva to *a_vap */
6d2010ae 1388 if (VATTR_IS_ACTIVE(vap, va_rdev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_RAWDEV)) {
2d21ac55
A
1389 dev_t rdev = makedev(nva.nva_rawdev.specdata1, nva.nva_rawdev.specdata2);
1390 VATTR_RETURN(vap, va_rdev, rdev);
1391 }
6d2010ae 1392 if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_NUMLINKS))
2d21ac55 1393 VATTR_RETURN(vap, va_nlink, nva.nva_nlink);
6d2010ae 1394 if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SIZE))
2d21ac55
A
1395 VATTR_RETURN(vap, va_data_size, nva.nva_size);
1396 // VATTR_RETURN(vap, va_data_alloc, ???);
1397 // VATTR_RETURN(vap, va_total_size, ???);
6d2010ae 1398 if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SPACE_USED))
2d21ac55 1399 VATTR_RETURN(vap, va_total_alloc, nva.nva_bytes);
6d2010ae 1400 if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER))
2d21ac55 1401 VATTR_RETURN(vap, va_uid, nva.nva_uid);
6d2010ae
A
1402 if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER))
1403 VATTR_RETURN(vap, va_uuuid, nva.nva_uuuid);
1404 if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP))
2d21ac55 1405 VATTR_RETURN(vap, va_gid, nva.nva_gid);
6d2010ae
A
1406 if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP))
1407 VATTR_RETURN(vap, va_guuid, nva.nva_guuid);
1408 if (VATTR_IS_ACTIVE(vap, va_mode)) {
1409 if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_MODE))
1410 VATTR_RETURN(vap, va_mode, 0777);
1411 else
1412 VATTR_RETURN(vap, va_mode, nva.nva_mode);
1413 }
1414 if (VATTR_IS_ACTIVE(vap, va_flags) &&
1415 (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) ||
1416 NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) ||
1417 (nva.nva_flags & NFS_FFLAG_TRIGGER))) {
2d21ac55 1418 uint32_t flags = 0;
6d2010ae
A
1419 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) &&
1420 (nva.nva_flags & NFS_FFLAG_ARCHIVED))
2d21ac55 1421 flags |= SF_ARCHIVED;
6d2010ae
A
1422 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) &&
1423 (nva.nva_flags & NFS_FFLAG_HIDDEN))
2d21ac55
A
1424 flags |= UF_HIDDEN;
1425 VATTR_RETURN(vap, va_flags, flags);
1426 }
6d2010ae 1427 if (VATTR_IS_ACTIVE(vap, va_create_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_CREATE)) {
2d21ac55
A
1428 vap->va_create_time.tv_sec = nva.nva_timesec[NFSTIME_CREATE];
1429 vap->va_create_time.tv_nsec = nva.nva_timensec[NFSTIME_CREATE];
1430 VATTR_SET_SUPPORTED(vap, va_create_time);
1431 }
6d2010ae 1432 if (VATTR_IS_ACTIVE(vap, va_access_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_ACCESS)) {
2d21ac55
A
1433 vap->va_access_time.tv_sec = nva.nva_timesec[NFSTIME_ACCESS];
1434 vap->va_access_time.tv_nsec = nva.nva_timensec[NFSTIME_ACCESS];
1435 VATTR_SET_SUPPORTED(vap, va_access_time);
1436 }
6d2010ae 1437 if (VATTR_IS_ACTIVE(vap, va_modify_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_MODIFY)) {
2d21ac55
A
1438 vap->va_modify_time.tv_sec = nva.nva_timesec[NFSTIME_MODIFY];
1439 vap->va_modify_time.tv_nsec = nva.nva_timensec[NFSTIME_MODIFY];
1440 VATTR_SET_SUPPORTED(vap, va_modify_time);
1441 }
6d2010ae 1442 if (VATTR_IS_ACTIVE(vap, va_change_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_METADATA)) {
2d21ac55
A
1443 vap->va_change_time.tv_sec = nva.nva_timesec[NFSTIME_CHANGE];
1444 vap->va_change_time.tv_nsec = nva.nva_timensec[NFSTIME_CHANGE];
1445 VATTR_SET_SUPPORTED(vap, va_change_time);
1446 }
6d2010ae 1447 if (VATTR_IS_ACTIVE(vap, va_backup_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_BACKUP)) {
2d21ac55
A
1448 vap->va_backup_time.tv_sec = nva.nva_timesec[NFSTIME_BACKUP];
1449 vap->va_backup_time.tv_nsec = nva.nva_timensec[NFSTIME_BACKUP];
1450 VATTR_SET_SUPPORTED(vap, va_backup_time);
1451 }
6d2010ae 1452 if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_FILEID))
2d21ac55 1453 VATTR_RETURN(vap, va_fileid, nva.nva_fileid);
6d2010ae 1454 if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TYPE))
2d21ac55 1455 VATTR_RETURN(vap, va_type, nva.nva_type);
6d2010ae 1456 if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_CHANGE))
2d21ac55
A
1457 VATTR_RETURN(vap, va_filerev, nva.nva_change);
1458
6d2010ae
A
1459 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1460 VATTR_RETURN(vap, va_acl, nva.nva_acl);
1461 nva.nva_acl = NULL;
1462 }
1463
2d21ac55
A
1464 // other attrs we might support someday:
1465 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
2d21ac55 1466
6d2010ae 1467 NVATTR_CLEANUP(&nva);
2d21ac55
A
1468 return (error);
1469}
1470
1471int
1472nfs4_setattr_rpc(
1473 nfsnode_t np,
1474 struct vnode_attr *vap,
b0d623f7 1475 vfs_context_t ctx)
2d21ac55
A
1476{
1477 struct nfsmount *nmp = NFSTONMP(np);
6d2010ae 1478 int error = 0, setattr_error = 0, lockerror = ENOENT, status, nfsvers, numops;
b0d623f7 1479 u_int64_t xid, nextxid;
2d21ac55 1480 struct nfsm_chain nmreq, nmrep;
b0d623f7 1481 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6d2010ae
A
1482 uint32_t getbitmap[NFS_ATTR_BITMAP_LEN];
1483 uint32_t setbitmap[NFS_ATTR_BITMAP_LEN];
b0d623f7 1484 nfs_stateid stateid;
6d2010ae 1485 struct nfsreq_secinfo_args si;
2d21ac55
A
1486
1487 if (!nmp)
1488 return (ENXIO);
1489 nfsvers = nmp->nm_vers;
6d2010ae
A
1490 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1491 return (EINVAL);
2d21ac55
A
1492
1493 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & ~(SF_ARCHIVED|UF_HIDDEN))) {
1494 /* we don't support setting unsupported flags (duh!) */
1495 if (vap->va_active & ~VNODE_ATTR_va_flags)
1496 return (EINVAL); /* return EINVAL if other attributes also set */
1497 else
1498 return (ENOTSUP); /* return ENOTSUP for chflags(2) */
1499 }
1500
6d2010ae
A
1501 /* don't bother requesting some changes if they don't look like they are changing */
1502 if (VATTR_IS_ACTIVE(vap, va_uid) && (vap->va_uid == np->n_vattr.nva_uid))
1503 VATTR_CLEAR_ACTIVE(vap, va_uid);
1504 if (VATTR_IS_ACTIVE(vap, va_gid) && (vap->va_gid == np->n_vattr.nva_gid))
1505 VATTR_CLEAR_ACTIVE(vap, va_gid);
1506 if (VATTR_IS_ACTIVE(vap, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &np->n_vattr.nva_uuuid))
1507 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
1508 if (VATTR_IS_ACTIVE(vap, va_guuid) && kauth_guid_equal(&vap->va_guuid, &np->n_vattr.nva_guuid))
1509 VATTR_CLEAR_ACTIVE(vap, va_guuid);
1510
1511tryagain:
1512 /* do nothing if no attributes will be sent */
1513 nfs_vattr_set_bitmap(nmp, bitmap, vap);
1514 if (!bitmap[0] && !bitmap[1])
1515 return (0);
1516
1517 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
1518 nfsm_chain_null(&nmreq);
1519 nfsm_chain_null(&nmrep);
1520
6d2010ae
A
1521 /*
1522 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1523 * need to invalidate any cached ACL. And if we had an ACL cached,
1524 * we might as well also fetch the new value.
1525 */
1526 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, getbitmap);
1527 if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL) ||
1528 NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) {
1529 if (NACLVALID(np))
1530 NFS_BITMAP_SET(getbitmap, NFS_FATTR_ACL);
1531 NACLINVALIDATE(np);
1532 }
1533
2d21ac55
A
1534 // PUTFH, SETATTR, GETATTR
1535 numops = 3;
1536 nfsm_chain_build_alloc_init(error, &nmreq, 40 * NFSX_UNSIGNED);
1537 nfsm_chain_add_compound_header(error, &nmreq, "setattr", numops);
1538 numops--;
1539 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1540 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1541 numops--;
1542 nfsm_chain_add_32(error, &nmreq, NFS_OP_SETATTR);
1543 if (VATTR_IS_ACTIVE(vap, va_data_size))
b0d623f7 1544 nfs_get_stateid(np, vfs_context_thread(ctx), vfs_context_ucred(ctx), &stateid);
2d21ac55 1545 else
b0d623f7
A
1546 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
1547 nfsm_chain_add_stateid(error, &nmreq, &stateid);
2d21ac55
A
1548 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
1549 numops--;
1550 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 1551 nfsm_chain_add_bitmap_supported(error, &nmreq, getbitmap, nmp, np);
2d21ac55
A
1552 nfsm_chain_build_done(error, &nmreq);
1553 nfsm_assert(error, (numops == 0), EPROTO);
1554 nfsmout_if(error);
6d2010ae 1555 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
2d21ac55 1556
b0d623f7 1557 if ((lockerror = nfs_node_lock(np)))
2d21ac55
A
1558 error = lockerror;
1559 nfsm_chain_skip_tag(error, &nmrep);
1560 nfsm_chain_get_32(error, &nmrep, numops);
1561 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6d2010ae 1562 nfsmout_if(error);
2d21ac55 1563 nfsm_chain_op_check(error, &nmrep, NFS_OP_SETATTR);
6d2010ae
A
1564 nfsmout_if(error == EBADRPC);
1565 setattr_error = error;
1566 error = 0;
2d21ac55 1567 bmlen = NFS_ATTR_BITMAP_LEN;
6d2010ae
A
1568 nfsm_chain_get_bitmap(error, &nmrep, setbitmap, bmlen);
1569 if (!error) {
1570 if (VATTR_IS_ACTIVE(vap, va_data_size) && (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
1571 microuptime(&np->n_lastio);
1572 nfs_vattr_set_supported(setbitmap, vap);
1573 error = setattr_error;
1574 }
2d21ac55 1575 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 1576 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
2d21ac55
A
1577 if (error)
1578 NATTRINVALIDATE(np);
b0d623f7
A
1579 /*
1580 * We just changed the attributes and we want to make sure that we
1581 * see the latest attributes. Get the next XID. If it's not the
1582 * next XID after the SETATTR XID, then it's possible that another
1583 * RPC was in flight at the same time and it might put stale attributes
1584 * in the cache. In that case, we invalidate the attributes and set
1585 * the attribute cache XID to guarantee that newer attributes will
1586 * get loaded next.
1587 */
1588 nextxid = 0;
1589 nfs_get_xid(&nextxid);
1590 if (nextxid != (xid + 1)) {
1591 np->n_xid = nextxid;
1592 NATTRINVALIDATE(np);
1593 }
2d21ac55 1594nfsmout:
b0d623f7
A
1595 if (!lockerror)
1596 nfs_node_unlock(np);
2d21ac55
A
1597 nfsm_chain_cleanup(&nmreq);
1598 nfsm_chain_cleanup(&nmrep);
6d2010ae
A
1599 if ((setattr_error == EINVAL) && VATTR_IS_ACTIVE(vap, va_acl) && VATTR_IS_ACTIVE(vap, va_mode) && !NMFLAG(nmp, ACLONLY)) {
1600 /*
1601 * Some server's may not like ACL/mode combos that get sent.
1602 * If it looks like that's what the server choked on, try setting
1603 * just the ACL and not the mode (unless it looks like everything
1604 * but mode was already successfully set).
1605 */
1606 if (((bitmap[0] & setbitmap[0]) != bitmap[0]) ||
1607 ((bitmap[1] & (setbitmap[1]|NFS_FATTR_MODE)) != bitmap[1])) {
1608 VATTR_CLEAR_ACTIVE(vap, va_mode);
1609 error = 0;
1610 goto tryagain;
1611 }
1612 }
2d21ac55
A
1613 return (error);
1614}
1615
b0d623f7
A
1616/*
1617 * Wait for any pending recovery to complete.
1618 */
2d21ac55 1619int
b0d623f7 1620nfs_mount_state_wait_for_recovery(struct nfsmount *nmp)
2d21ac55 1621{
b0d623f7 1622 struct timespec ts = { 1, 0 };
6d2010ae 1623 int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
b0d623f7
A
1624
1625 lck_mtx_lock(&nmp->nm_lock);
1626 while (nmp->nm_state & NFSSTA_RECOVER) {
1627 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1)))
1628 break;
1629 nfs_mount_sock_thread_wake(nmp);
1630 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag|(PZERO-1), "nfsrecoverwait", &ts);
6d2010ae 1631 slpflag = 0;
b0d623f7
A
1632 }
1633 lck_mtx_unlock(&nmp->nm_lock);
1634
1635 return (error);
2d21ac55
A
1636}
1637
b0d623f7
A
1638/*
1639 * We're about to use/manipulate NFS mount's open/lock state.
1640 * Wait for any pending state recovery to complete, then
1641 * mark the state as being in use (which will hold off
1642 * the recovery thread until we're done).
1643 */
2d21ac55 1644int
6d2010ae 1645nfs_mount_state_in_use_start(struct nfsmount *nmp, thread_t thd)
2d21ac55 1646{
b0d623f7 1647 struct timespec ts = { 1, 0 };
6d2010ae 1648 int error = 0, slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7
A
1649
1650 if (!nmp)
1651 return (ENXIO);
1652 lck_mtx_lock(&nmp->nm_lock);
6d2010ae
A
1653 if (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) {
1654 lck_mtx_unlock(&nmp->nm_lock);
1655 return (ENXIO);
1656 }
b0d623f7 1657 while (nmp->nm_state & NFSSTA_RECOVER) {
6d2010ae 1658 if ((error = nfs_sigintr(nmp, NULL, thd, 1)))
b0d623f7
A
1659 break;
1660 nfs_mount_sock_thread_wake(nmp);
1661 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag|(PZERO-1), "nfsrecoverwait", &ts);
6d2010ae 1662 slpflag = 0;
b0d623f7
A
1663 }
1664 if (!error)
1665 nmp->nm_stateinuse++;
1666 lck_mtx_unlock(&nmp->nm_lock);
1667
1668 return (error);
2d21ac55
A
1669}
1670
b0d623f7
A
1671/*
1672 * We're done using/manipulating the NFS mount's open/lock
1673 * state. If the given error indicates that recovery should
1674 * be performed, we'll initiate recovery.
1675 */
2d21ac55 1676int
b0d623f7 1677nfs_mount_state_in_use_end(struct nfsmount *nmp, int error)
2d21ac55 1678{
b0d623f7
A
1679 int restart = nfs_mount_state_error_should_restart(error);
1680
1681 if (!nmp)
1682 return (restart);
1683 lck_mtx_lock(&nmp->nm_lock);
1684 if (restart && (error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE)) {
6d2010ae
A
1685 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
1686 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid);
1687 nfs_need_recover(nmp, error);
b0d623f7
A
1688 }
1689 if (nmp->nm_stateinuse > 0)
1690 nmp->nm_stateinuse--;
1691 else
1692 panic("NFS mount state in use count underrun");
1693 if (!nmp->nm_stateinuse && (nmp->nm_state & NFSSTA_RECOVER))
1694 wakeup(&nmp->nm_stateinuse);
1695 lck_mtx_unlock(&nmp->nm_lock);
1696 if (error == NFSERR_GRACE)
1697 tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz);
1698
1699 return (restart);
2d21ac55
A
1700}
1701
1702/*
b0d623f7 1703 * Does the error mean we should restart/redo a state-related operation?
2d21ac55
A
1704 */
1705int
b0d623f7 1706nfs_mount_state_error_should_restart(int error)
2d21ac55 1707{
b0d623f7
A
1708 switch (error) {
1709 case NFSERR_STALE_STATEID:
1710 case NFSERR_STALE_CLIENTID:
1711 case NFSERR_ADMIN_REVOKED:
1712 case NFSERR_EXPIRED:
1713 case NFSERR_OLD_STATEID:
1714 case NFSERR_BAD_STATEID:
1715 case NFSERR_GRACE:
1716 return (1);
1717 }
1718 return (0);
1719}
2d21ac55 1720
b0d623f7
A
1721/*
1722 * In some cases we may want to limit how many times we restart a
1723 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1724 * Base the limit on the lease (as long as it's not too short).
1725 */
1726uint
1727nfs_mount_state_max_restarts(struct nfsmount *nmp)
1728{
1729 return (MAX(nmp->nm_fsattr.nfsa_lease, 60));
1730}
2d21ac55 1731
6d2010ae
A
1732/*
1733 * Does the error mean we probably lost a delegation?
1734 */
1735int
1736nfs_mount_state_error_delegation_lost(int error)
1737{
1738 switch (error) {
1739 case NFSERR_STALE_STATEID:
1740 case NFSERR_ADMIN_REVOKED:
1741 case NFSERR_EXPIRED:
1742 case NFSERR_OLD_STATEID:
1743 case NFSERR_BAD_STATEID:
1744 case NFSERR_GRACE: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
1745 return (1);
1746 }
1747 return (0);
1748}
1749
b0d623f7
A
1750
1751/*
1752 * Mark an NFS node's open state as busy.
1753 */
1754int
6d2010ae 1755nfs_open_state_set_busy(nfsnode_t np, thread_t thd)
b0d623f7
A
1756{
1757 struct nfsmount *nmp;
b0d623f7
A
1758 struct timespec ts = {2, 0};
1759 int error = 0, slpflag;
1760
1761 nmp = NFSTONMP(np);
2d21ac55
A
1762 if (!nmp)
1763 return (ENXIO);
6d2010ae 1764 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2d21ac55 1765
b0d623f7
A
1766 lck_mtx_lock(&np->n_openlock);
1767 while (np->n_openflags & N_OPENBUSY) {
1768 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
1769 break;
1770 np->n_openflags |= N_OPENWANT;
1771 msleep(&np->n_openflags, &np->n_openlock, slpflag, "nfs_open_state_set_busy", &ts);
6d2010ae 1772 slpflag = 0;
b0d623f7
A
1773 }
1774 if (!error)
1775 np->n_openflags |= N_OPENBUSY;
1776 lck_mtx_unlock(&np->n_openlock);
2d21ac55 1777
b0d623f7
A
1778 return (error);
1779}
2d21ac55 1780
b0d623f7
A
1781/*
1782 * Clear an NFS node's open state busy flag and wake up
1783 * anyone wanting it.
1784 */
1785void
1786nfs_open_state_clear_busy(nfsnode_t np)
1787{
1788 int wanted;
1789
1790 lck_mtx_lock(&np->n_openlock);
1791 if (!(np->n_openflags & N_OPENBUSY))
1792 panic("nfs_open_state_clear_busy");
1793 wanted = (np->n_openflags & N_OPENWANT);
1794 np->n_openflags &= ~(N_OPENBUSY|N_OPENWANT);
1795 lck_mtx_unlock(&np->n_openlock);
1796 if (wanted)
1797 wakeup(&np->n_openflags);
1798}
2d21ac55 1799
b0d623f7
A
1800/*
1801 * Search a mount's open owner list for the owner for this credential.
1802 * If not found and "alloc" is set, then allocate a new one.
1803 */
1804struct nfs_open_owner *
1805nfs_open_owner_find(struct nfsmount *nmp, kauth_cred_t cred, int alloc)
1806{
1807 uid_t uid = kauth_cred_getuid(cred);
1808 struct nfs_open_owner *noop, *newnoop = NULL;
2d21ac55 1809
b0d623f7
A
1810tryagain:
1811 lck_mtx_lock(&nmp->nm_lock);
1812 TAILQ_FOREACH(noop, &nmp->nm_open_owners, noo_link) {
1813 if (kauth_cred_getuid(noop->noo_cred) == uid)
1814 break;
2d21ac55 1815 }
2d21ac55 1816
b0d623f7
A
1817 if (!noop && !newnoop && alloc) {
1818 lck_mtx_unlock(&nmp->nm_lock);
1819 MALLOC(newnoop, struct nfs_open_owner *, sizeof(struct nfs_open_owner), M_TEMP, M_WAITOK);
1820 if (!newnoop)
1821 return (NULL);
1822 bzero(newnoop, sizeof(*newnoop));
1823 lck_mtx_init(&newnoop->noo_lock, nfs_open_grp, LCK_ATTR_NULL);
1824 newnoop->noo_mount = nmp;
1825 kauth_cred_ref(cred);
1826 newnoop->noo_cred = cred;
1827 newnoop->noo_name = OSAddAtomic(1, &nfs_open_owner_seqnum);
1828 TAILQ_INIT(&newnoop->noo_opens);
1829 goto tryagain;
1830 }
1831 if (!noop && newnoop) {
1832 newnoop->noo_flags |= NFS_OPEN_OWNER_LINK;
1833 TAILQ_INSERT_HEAD(&nmp->nm_open_owners, newnoop, noo_link);
1834 noop = newnoop;
1835 }
1836 lck_mtx_unlock(&nmp->nm_lock);
1837
1838 if (newnoop && (noop != newnoop))
1839 nfs_open_owner_destroy(newnoop);
1840
1841 if (noop)
1842 nfs_open_owner_ref(noop);
1843
1844 return (noop);
1845}
1846
1847/*
1848 * destroy an open owner that's no longer needed
1849 */
1850void
1851nfs_open_owner_destroy(struct nfs_open_owner *noop)
1852{
1853 if (noop->noo_cred)
1854 kauth_cred_unref(&noop->noo_cred);
1855 lck_mtx_destroy(&noop->noo_lock, nfs_open_grp);
1856 FREE(noop, M_TEMP);
1857}
1858
1859/*
1860 * acquire a reference count on an open owner
1861 */
1862void
1863nfs_open_owner_ref(struct nfs_open_owner *noop)
1864{
1865 lck_mtx_lock(&noop->noo_lock);
1866 noop->noo_refcnt++;
1867 lck_mtx_unlock(&noop->noo_lock);
1868}
1869
1870/*
1871 * drop a reference count on an open owner and destroy it if
1872 * it is no longer referenced and no longer on the mount's list.
1873 */
1874void
1875nfs_open_owner_rele(struct nfs_open_owner *noop)
1876{
1877 lck_mtx_lock(&noop->noo_lock);
1878 if (noop->noo_refcnt < 1)
1879 panic("nfs_open_owner_rele: no refcnt");
1880 noop->noo_refcnt--;
1881 if (!noop->noo_refcnt && (noop->noo_flags & NFS_OPEN_OWNER_BUSY))
1882 panic("nfs_open_owner_rele: busy");
1883 /* XXX we may potentially want to clean up idle/unused open owner structures */
1884 if (noop->noo_refcnt || (noop->noo_flags & NFS_OPEN_OWNER_LINK)) {
1885 lck_mtx_unlock(&noop->noo_lock);
1886 return;
1887 }
1888 /* owner is no longer referenced or linked to mount, so destroy it */
1889 lck_mtx_unlock(&noop->noo_lock);
1890 nfs_open_owner_destroy(noop);
1891}
1892
1893/*
1894 * Mark an open owner as busy because we are about to
1895 * start an operation that uses and updates open owner state.
1896 */
1897int
1898nfs_open_owner_set_busy(struct nfs_open_owner *noop, thread_t thd)
1899{
1900 struct nfsmount *nmp;
1901 struct timespec ts = {2, 0};
1902 int error = 0, slpflag;
1903
1904 nmp = noop->noo_mount;
1905 if (!nmp)
1906 return (ENXIO);
6d2010ae 1907 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7
A
1908
1909 lck_mtx_lock(&noop->noo_lock);
1910 while (noop->noo_flags & NFS_OPEN_OWNER_BUSY) {
1911 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
1912 break;
1913 noop->noo_flags |= NFS_OPEN_OWNER_WANT;
1914 msleep(noop, &noop->noo_lock, slpflag, "nfs_open_owner_set_busy", &ts);
6d2010ae 1915 slpflag = 0;
b0d623f7
A
1916 }
1917 if (!error)
1918 noop->noo_flags |= NFS_OPEN_OWNER_BUSY;
1919 lck_mtx_unlock(&noop->noo_lock);
1920
1921 return (error);
1922}
1923
1924/*
1925 * Clear the busy flag on an open owner and wake up anyone waiting
1926 * to mark it busy.
1927 */
1928void
1929nfs_open_owner_clear_busy(struct nfs_open_owner *noop)
1930{
1931 int wanted;
1932
1933 lck_mtx_lock(&noop->noo_lock);
1934 if (!(noop->noo_flags & NFS_OPEN_OWNER_BUSY))
1935 panic("nfs_open_owner_clear_busy");
1936 wanted = (noop->noo_flags & NFS_OPEN_OWNER_WANT);
1937 noop->noo_flags &= ~(NFS_OPEN_OWNER_BUSY|NFS_OPEN_OWNER_WANT);
1938 lck_mtx_unlock(&noop->noo_lock);
1939 if (wanted)
1940 wakeup(noop);
1941}
1942
1943/*
1944 * Given an open/lock owner and an error code, increment the
1945 * sequence ID if appropriate.
1946 */
1947void
1948nfs_owner_seqid_increment(struct nfs_open_owner *noop, struct nfs_lock_owner *nlop, int error)
1949{
1950 switch (error) {
1951 case NFSERR_STALE_CLIENTID:
1952 case NFSERR_STALE_STATEID:
1953 case NFSERR_OLD_STATEID:
1954 case NFSERR_BAD_STATEID:
1955 case NFSERR_BAD_SEQID:
1956 case NFSERR_BADXDR:
1957 case NFSERR_RESOURCE:
1958 case NFSERR_NOFILEHANDLE:
1959 /* do not increment the open seqid on these errors */
1960 return;
1961 }
1962 if (noop)
1963 noop->noo_seqid++;
1964 if (nlop)
1965 nlop->nlo_seqid++;
1966}
1967
1968/*
1969 * Search a node's open file list for any conflicts with this request.
1970 * Also find this open owner's open file structure.
1971 * If not found and "alloc" is set, then allocate one.
1972 */
1973int
1974nfs_open_file_find(
1975 nfsnode_t np,
1976 struct nfs_open_owner *noop,
1977 struct nfs_open_file **nofpp,
1978 uint32_t accessMode,
1979 uint32_t denyMode,
1980 int alloc)
6d2010ae
A
1981{
1982 *nofpp = NULL;
1983 return nfs_open_file_find_internal(np, noop, nofpp, accessMode, denyMode, alloc);
1984}
1985
1986/*
1987 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
1988 * if an existing one is not found. This is used in "create" scenarios to
1989 * officially add the provisional nofp to the node once the node is created.
1990 */
1991int
1992nfs_open_file_find_internal(
1993 nfsnode_t np,
1994 struct nfs_open_owner *noop,
1995 struct nfs_open_file **nofpp,
1996 uint32_t accessMode,
1997 uint32_t denyMode,
1998 int alloc)
b0d623f7
A
1999{
2000 struct nfs_open_file *nofp = NULL, *nofp2, *newnofp = NULL;
2001
2002 if (!np)
2003 goto alloc;
2004tryagain:
2005 lck_mtx_lock(&np->n_openlock);
2006 TAILQ_FOREACH(nofp2, &np->n_opens, nof_link) {
2007 if (nofp2->nof_owner == noop) {
2008 nofp = nofp2;
2009 if (!accessMode)
2010 break;
2011 }
2012 if ((accessMode & nofp2->nof_deny) || (denyMode & nofp2->nof_access)) {
2013 /* This request conflicts with an existing open on this client. */
2014 lck_mtx_unlock(&np->n_openlock);
b0d623f7
A
2015 return (EACCES);
2016 }
2017 }
2018
2019 /*
2020 * If this open owner doesn't have an open
2021 * file structure yet, we create one for it.
2022 */
6d2010ae 2023 if (!nofp && !*nofpp && !newnofp && alloc) {
b0d623f7
A
2024 lck_mtx_unlock(&np->n_openlock);
2025alloc:
2026 MALLOC(newnofp, struct nfs_open_file *, sizeof(struct nfs_open_file), M_TEMP, M_WAITOK);
6d2010ae 2027 if (!newnofp)
b0d623f7 2028 return (ENOMEM);
b0d623f7
A
2029 bzero(newnofp, sizeof(*newnofp));
2030 lck_mtx_init(&newnofp->nof_lock, nfs_open_grp, LCK_ATTR_NULL);
2031 newnofp->nof_owner = noop;
2032 nfs_open_owner_ref(noop);
2033 newnofp->nof_np = np;
2034 lck_mtx_lock(&noop->noo_lock);
2035 TAILQ_INSERT_HEAD(&noop->noo_opens, newnofp, nof_oolink);
2036 lck_mtx_unlock(&noop->noo_lock);
2037 if (np)
2038 goto tryagain;
2039 }
6d2010ae
A
2040 if (!nofp) {
2041 if (*nofpp) {
2042 (*nofpp)->nof_np = np;
2043 nofp = *nofpp;
2044 } else {
2045 nofp = newnofp;
2046 }
2047 if (nofp && np)
2048 TAILQ_INSERT_HEAD(&np->n_opens, nofp, nof_link);
b0d623f7
A
2049 }
2050 if (np)
2051 lck_mtx_unlock(&np->n_openlock);
2052
6d2010ae 2053 if (alloc && newnofp && (nofp != newnofp))
b0d623f7
A
2054 nfs_open_file_destroy(newnofp);
2055
2056 *nofpp = nofp;
2057 return (nofp ? 0 : ESRCH);
2058}
2059
2060/*
2061 * Destroy an open file structure.
2062 */
2063void
2064nfs_open_file_destroy(struct nfs_open_file *nofp)
2065{
2066 lck_mtx_lock(&nofp->nof_owner->noo_lock);
2067 TAILQ_REMOVE(&nofp->nof_owner->noo_opens, nofp, nof_oolink);
2068 lck_mtx_unlock(&nofp->nof_owner->noo_lock);
2069 nfs_open_owner_rele(nofp->nof_owner);
2070 lck_mtx_destroy(&nofp->nof_lock, nfs_open_grp);
2071 FREE(nofp, M_TEMP);
2072}
2073
2074/*
2075 * Mark an open file as busy because we are about to
2076 * start an operation that uses and updates open file state.
2077 */
2078int
2079nfs_open_file_set_busy(struct nfs_open_file *nofp, thread_t thd)
2080{
2081 struct nfsmount *nmp;
2082 struct timespec ts = {2, 0};
2083 int error = 0, slpflag;
2084
2085 nmp = nofp->nof_owner->noo_mount;
2086 if (!nmp)
2087 return (ENXIO);
6d2010ae 2088 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7
A
2089
2090 lck_mtx_lock(&nofp->nof_lock);
2091 while (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
2092 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
2093 break;
2094 nofp->nof_flags |= NFS_OPEN_FILE_WANT;
2095 msleep(nofp, &nofp->nof_lock, slpflag, "nfs_open_file_set_busy", &ts);
6d2010ae 2096 slpflag = 0;
b0d623f7
A
2097 }
2098 if (!error)
2099 nofp->nof_flags |= NFS_OPEN_FILE_BUSY;
2100 lck_mtx_unlock(&nofp->nof_lock);
2101
2102 return (error);
2103}
2104
2105/*
2106 * Clear the busy flag on an open file and wake up anyone waiting
2107 * to mark it busy.
2108 */
2109void
2110nfs_open_file_clear_busy(struct nfs_open_file *nofp)
2111{
2112 int wanted;
2113
2114 lck_mtx_lock(&nofp->nof_lock);
2115 if (!(nofp->nof_flags & NFS_OPEN_FILE_BUSY))
2116 panic("nfs_open_file_clear_busy");
2117 wanted = (nofp->nof_flags & NFS_OPEN_FILE_WANT);
2118 nofp->nof_flags &= ~(NFS_OPEN_FILE_BUSY|NFS_OPEN_FILE_WANT);
2119 lck_mtx_unlock(&nofp->nof_lock);
2120 if (wanted)
2121 wakeup(nofp);
2122}
2123
2124/*
6d2010ae 2125 * Add the open state for the given access/deny modes to this open file.
b0d623f7
A
2126 */
2127void
6d2010ae 2128nfs_open_file_add_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode, int delegated)
b0d623f7 2129{
6d2010ae
A
2130 lck_mtx_lock(&nofp->nof_lock);
2131 nofp->nof_access |= accessMode;
2132 nofp->nof_deny |= denyMode;
b0d623f7 2133
6d2010ae
A
2134 if (delegated) {
2135 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2136 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2137 nofp->nof_d_r++;
2138 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2139 nofp->nof_d_w++;
2140 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2141 nofp->nof_d_rw++;
2142 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2143 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2144 nofp->nof_d_r_dw++;
2145 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2146 nofp->nof_d_w_dw++;
2147 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2148 nofp->nof_d_rw_dw++;
2149 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2150 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2151 nofp->nof_d_r_drw++;
2152 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2153 nofp->nof_d_w_drw++;
2154 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2155 nofp->nof_d_rw_drw++;
2156 }
b0d623f7 2157 } else {
6d2010ae
A
2158 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2159 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2160 nofp->nof_r++;
2161 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2162 nofp->nof_w++;
2163 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2164 nofp->nof_rw++;
2165 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2166 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2167 nofp->nof_r_dw++;
2168 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2169 nofp->nof_w_dw++;
2170 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2171 nofp->nof_rw_dw++;
2172 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2173 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2174 nofp->nof_r_drw++;
2175 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2176 nofp->nof_w_drw++;
2177 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2178 nofp->nof_rw_drw++;
2179 }
b0d623f7 2180 }
6d2010ae
A
2181
2182 nofp->nof_opencnt++;
2183 lck_mtx_unlock(&nofp->nof_lock);
b0d623f7
A
2184}
2185
2186/*
6d2010ae
A
2187 * Find which particular open combo will be closed and report what
2188 * the new modes will be and whether the open was delegated.
b0d623f7 2189 */
6d2010ae
A
2190void
2191nfs_open_file_remove_open_find(
b0d623f7
A
2192 struct nfs_open_file *nofp,
2193 uint32_t accessMode,
2194 uint32_t denyMode,
6d2010ae
A
2195 uint32_t *newAccessMode,
2196 uint32_t *newDenyMode,
2197 int *delegated)
b0d623f7 2198{
6d2010ae
A
2199 /*
2200 * Calculate new modes: a mode bit gets removed when there's only
2201 * one count in all the corresponding counts
2202 */
2203 *newAccessMode = nofp->nof_access;
2204 *newDenyMode = nofp->nof_deny;
b0d623f7 2205
6d2010ae
A
2206 if ((accessMode & NFS_OPEN_SHARE_ACCESS_READ) &&
2207 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) &&
2208 ((nofp->nof_r + nofp->nof_d_r +
2209 nofp->nof_rw + nofp->nof_d_rw +
2210 nofp->nof_r_dw + nofp->nof_d_r_dw +
2211 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2212 nofp->nof_r_drw + nofp->nof_d_r_drw +
2213 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1))
2214 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2215 if ((accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2216 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2217 ((nofp->nof_w + nofp->nof_d_w +
2218 nofp->nof_rw + nofp->nof_d_rw +
2219 nofp->nof_w_dw + nofp->nof_d_w_dw +
2220 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2221 nofp->nof_w_drw + nofp->nof_d_w_drw +
2222 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1))
2223 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_WRITE;
2224 if ((denyMode & NFS_OPEN_SHARE_DENY_READ) &&
2225 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) &&
2226 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2227 nofp->nof_w_drw + nofp->nof_d_w_drw +
2228 nofp->nof_rw_drw + nofp->nof_d_rw_drw) == 1))
2229 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_READ;
2230 if ((denyMode & NFS_OPEN_SHARE_DENY_WRITE) &&
2231 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) &&
2232 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2233 nofp->nof_w_drw + nofp->nof_d_w_drw +
2234 nofp->nof_rw_drw + nofp->nof_d_rw_drw +
2235 nofp->nof_r_dw + nofp->nof_d_r_dw +
2236 nofp->nof_w_dw + nofp->nof_d_w_dw +
2237 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1))
2238 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_WRITE;
2239
2240 /* Find the corresponding open access/deny mode counter. */
b0d623f7
A
2241 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2242 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
6d2010ae 2243 *delegated = (nofp->nof_d_r != 0);
b0d623f7 2244 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
6d2010ae 2245 *delegated = (nofp->nof_d_w != 0);
b0d623f7 2246 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
6d2010ae
A
2247 *delegated = (nofp->nof_d_rw != 0);
2248 else
2249 *delegated = 0;
b0d623f7
A
2250 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2251 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
6d2010ae 2252 *delegated = (nofp->nof_d_r_dw != 0);
b0d623f7 2253 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
6d2010ae 2254 *delegated = (nofp->nof_d_w_dw != 0);
b0d623f7 2255 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
6d2010ae
A
2256 *delegated = (nofp->nof_d_rw_dw != 0);
2257 else
2258 *delegated = 0;
b0d623f7
A
2259 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2260 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
6d2010ae 2261 *delegated = (nofp->nof_d_r_drw != 0);
b0d623f7 2262 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
6d2010ae 2263 *delegated = (nofp->nof_d_w_drw != 0);
b0d623f7 2264 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
6d2010ae
A
2265 *delegated = (nofp->nof_d_rw_drw != 0);
2266 else
2267 *delegated = 0;
b0d623f7 2268 }
6d2010ae
A
2269}
2270
2271/*
2272 * Remove the open state for the given access/deny modes to this open file.
2273 */
2274void
2275nfs_open_file_remove_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode)
2276{
2277 uint32_t newAccessMode, newDenyMode;
2278 int delegated = 0;
2279
2280 lck_mtx_lock(&nofp->nof_lock);
2281 nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated);
2282
2283 /* Decrement the corresponding open access/deny mode counter. */
2284 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2285 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2286 if (delegated) {
2287 if (nofp->nof_d_r == 0)
2288 NP(nofp->nof_np, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2289 else
2290 nofp->nof_d_r--;
2291 } else {
2292 if (nofp->nof_r == 0)
2293 NP(nofp->nof_np, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2294 else
2295 nofp->nof_r--;
2296 }
2297 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2298 if (delegated) {
2299 if (nofp->nof_d_w == 0)
2300 NP(nofp->nof_np, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2301 else
2302 nofp->nof_d_w--;
2303 } else {
2304 if (nofp->nof_w == 0)
2305 NP(nofp->nof_np, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2306 else
2307 nofp->nof_w--;
2308 }
2309 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2310 if (delegated) {
2311 if (nofp->nof_d_rw == 0)
2312 NP(nofp->nof_np, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2313 else
2314 nofp->nof_d_rw--;
2315 } else {
2316 if (nofp->nof_rw == 0)
2317 NP(nofp->nof_np, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2318 else
2319 nofp->nof_rw--;
2320 }
2321 }
2322 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2323 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2324 if (delegated) {
2325 if (nofp->nof_d_r_dw == 0)
2326 NP(nofp->nof_np, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2327 else
2328 nofp->nof_d_r_dw--;
2329 } else {
2330 if (nofp->nof_r_dw == 0)
2331 NP(nofp->nof_np, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2332 else
2333 nofp->nof_r_dw--;
2334 }
2335 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2336 if (delegated) {
2337 if (nofp->nof_d_w_dw == 0)
2338 NP(nofp->nof_np, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2339 else
2340 nofp->nof_d_w_dw--;
2341 } else {
2342 if (nofp->nof_w_dw == 0)
2343 NP(nofp->nof_np, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2344 else
2345 nofp->nof_w_dw--;
2346 }
2347 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2348 if (delegated) {
2349 if (nofp->nof_d_rw_dw == 0)
2350 NP(nofp->nof_np, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2351 else
2352 nofp->nof_d_rw_dw--;
2353 } else {
2354 if (nofp->nof_rw_dw == 0)
2355 NP(nofp->nof_np, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2356 else
2357 nofp->nof_rw_dw--;
2358 }
2359 }
2360 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2361 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2362 if (delegated) {
2363 if (nofp->nof_d_r_drw == 0)
2364 NP(nofp->nof_np, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2365 else
2366 nofp->nof_d_r_drw--;
2367 } else {
2368 if (nofp->nof_r_drw == 0)
2369 NP(nofp->nof_np, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2370 else
2371 nofp->nof_r_drw--;
2372 }
2373 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2374 if (delegated) {
2375 if (nofp->nof_d_w_drw == 0)
2376 NP(nofp->nof_np, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2377 else
2378 nofp->nof_d_w_drw--;
2379 } else {
2380 if (nofp->nof_w_drw == 0)
2381 NP(nofp->nof_np, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2382 else
2383 nofp->nof_w_drw--;
2384 }
2385 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2386 if (delegated) {
2387 if (nofp->nof_d_rw_drw == 0)
2388 NP(nofp->nof_np, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2389 else
2390 nofp->nof_d_rw_drw--;
2391 } else {
2392 if (nofp->nof_rw_drw == 0)
2393 NP(nofp->nof_np, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2394 else
2395 nofp->nof_rw_drw--;
2396 }
2397 }
2398 }
2399
2400 /* update the modes */
2401 nofp->nof_access = newAccessMode;
2402 nofp->nof_deny = newDenyMode;
2403 nofp->nof_opencnt--;
2404 lck_mtx_unlock(&nofp->nof_lock);
2405}
2406
2407
2408/*
2409 * Get the current (delegation, lock, open, default) stateid for this node.
2410 * If node has a delegation, use that stateid.
2411 * If pid has a lock, use the lockowner's stateid.
2412 * Or use the open file's stateid.
2413 * If no open file, use a default stateid of all ones.
2414 */
2415void
2416nfs_get_stateid(nfsnode_t np, thread_t thd, kauth_cred_t cred, nfs_stateid *sid)
2417{
2418 struct nfsmount *nmp = NFSTONMP(np);
2419 proc_t p = thd ? get_bsdthreadtask_info(thd) : current_proc(); // XXX async I/O requests don't have a thread
2420 struct nfs_open_owner *noop = NULL;
2421 struct nfs_open_file *nofp = NULL;
2422 struct nfs_lock_owner *nlop = NULL;
2423 nfs_stateid *s = NULL;
2424
2425 if (np->n_openflags & N_DELEG_MASK) {
2426 s = &np->n_dstateid;
2427 } else {
2428 if (p)
2429 nlop = nfs_lock_owner_find(np, p, 0);
2430 if (nlop && !TAILQ_EMPTY(&nlop->nlo_locks)) {
2431 /* we hold locks, use lock stateid */
2432 s = &nlop->nlo_stateid;
2433 } else if (((noop = nfs_open_owner_find(nmp, cred, 0))) &&
2434 (nfs_open_file_find(np, noop, &nofp, 0, 0, 0) == 0) &&
2435 !(nofp->nof_flags & NFS_OPEN_FILE_LOST) &&
2436 nofp->nof_access) {
2437 /* we (should) have the file open, use open stateid */
2438 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)
2439 nfs4_reopen(nofp, thd);
2440 if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST))
2441 s = &nofp->nof_stateid;
2442 }
2443 }
2444
2445 if (s) {
2446 sid->seqid = s->seqid;
2447 sid->other[0] = s->other[0];
2448 sid->other[1] = s->other[1];
2449 sid->other[2] = s->other[2];
2450 } else {
2451 /* named attributes may not have a stateid for reads, so don't complain for them */
2452 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
2453 NP(np, "nfs_get_stateid: no stateid");
2454 sid->seqid = sid->other[0] = sid->other[1] = sid->other[2] = 0xffffffff;
2455 }
2456 if (nlop)
2457 nfs_lock_owner_rele(nlop);
2458 if (noop)
2459 nfs_open_owner_rele(noop);
2460}
2461
2462
2463/*
2464 * When we have a delegation, we may be able to perform the OPEN locally.
2465 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2466 */
2467int
2468nfs4_open_delegated(
2469 nfsnode_t np,
2470 struct nfs_open_file *nofp,
2471 uint32_t accessMode,
2472 uint32_t denyMode,
2473 vfs_context_t ctx)
2474{
2475 int error = 0, ismember, readtoo = 0, authorized = 0;
2476 uint32_t action;
2477 struct kauth_acl_eval eval;
2478 kauth_cred_t cred = vfs_context_ucred(ctx);
2479
2480 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2481 /*
2482 * Try to open it for read access too,
2483 * so the buffer cache can read data.
2484 */
2485 readtoo = 1;
2486 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2487 }
2488
2489tryagain:
2490 action = 0;
2491 if (accessMode & NFS_OPEN_SHARE_ACCESS_READ)
2492 action |= KAUTH_VNODE_READ_DATA;
2493 if (accessMode & NFS_OPEN_SHARE_ACCESS_WRITE)
2494 action |= KAUTH_VNODE_WRITE_DATA;
2495
2496 /* evaluate ACE (if we have one) */
2497 if (np->n_dace.ace_flags) {
2498 eval.ae_requested = action;
2499 eval.ae_acl = &np->n_dace;
2500 eval.ae_count = 1;
2501 eval.ae_options = 0;
2502 if (np->n_vattr.nva_uid == kauth_cred_getuid(cred))
2503 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
2504 error = kauth_cred_ismember_gid(cred, np->n_vattr.nva_gid, &ismember);
2505 if (!error && ismember)
2506 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
2507
2508 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
2509 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
2510 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
2511 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
2512
2513 error = kauth_acl_evaluate(cred, &eval);
2514
2515 if (!error && (eval.ae_result == KAUTH_RESULT_ALLOW))
2516 authorized = 1;
2517 }
2518
2519 if (!authorized) {
2520 /* need to ask the server via ACCESS */
2521 struct vnop_access_args naa;
2522 naa.a_desc = &vnop_access_desc;
2523 naa.a_vp = NFSTOV(np);
2524 naa.a_action = action;
2525 naa.a_context = ctx;
2526 if (!(error = nfs_vnop_access(&naa)))
2527 authorized = 1;
2528 }
2529
2530 if (!authorized) {
2531 if (readtoo) {
2532 /* try again without the extra read access */
2533 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2534 readtoo = 0;
2535 goto tryagain;
2536 }
2537 return (error ? error : EACCES);
2538 }
2539
2540 nfs_open_file_add_open(nofp, accessMode, denyMode, 1);
2541
2542 return (0);
2543}
2544
2545
2546/*
2547 * Open a file with the given access/deny modes.
2548 *
2549 * If we have a delegation, we may be able to handle the open locally.
2550 * Otherwise, we will always send the open RPC even if this open's mode is
2551 * a subset of all the existing opens. This makes sure that we will always
2552 * be able to do a downgrade to any of the open modes.
2553 *
2554 * Note: local conflicts should have already been checked in nfs_open_file_find().
2555 */
2556int
2557nfs4_open(
2558 nfsnode_t np,
2559 struct nfs_open_file *nofp,
2560 uint32_t accessMode,
2561 uint32_t denyMode,
2562 vfs_context_t ctx)
2563{
2564 vnode_t vp = NFSTOV(np);
2565 vnode_t dvp = NULL;
2566 struct componentname cn;
2567 const char *vname = NULL;
2568 size_t namelen;
2569 char smallname[128];
2570 char *filename = NULL;
2571 int error = 0, readtoo = 0;
2572
2573 /*
2574 * We can handle the OPEN ourselves if we have a delegation,
2575 * unless it's a read delegation and the open is asking for
2576 * either write access or deny read. We also don't bother to
2577 * use the delegation if it's being returned.
2578 */
2579 if (np->n_openflags & N_DELEG_MASK) {
2580 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx))))
2581 return (error);
2582 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN) &&
2583 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ||
2584 (!(accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) && !(denyMode & NFS_OPEN_SHARE_DENY_READ)))) {
2585 error = nfs4_open_delegated(np, nofp, accessMode, denyMode, ctx);
2586 nfs_open_state_clear_busy(np);
2587 return (error);
2588 }
2589 nfs_open_state_clear_busy(np);
2590 }
2591
2592 /*
2593 * [sigh] We can't trust VFS to get the parent right for named
2594 * attribute nodes. (It likes to reparent the nodes after we've
2595 * created them.) Luckily we can probably get the right parent
2596 * from the n_parent we have stashed away.
2597 */
2598 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
2599 (((dvp = np->n_parent)) && (error = vnode_get(dvp))))
2600 dvp = NULL;
2601 if (!dvp)
2602 dvp = vnode_getparent(vp);
2603 vname = vnode_getname(vp);
2604 if (!dvp || !vname) {
2605 if (!error)
2606 error = EIO;
2607 goto out;
2608 }
2609 filename = &smallname[0];
2610 namelen = snprintf(filename, sizeof(smallname), "%s", vname);
2611 if (namelen >= sizeof(smallname)) {
2612 MALLOC(filename, char *, namelen+1, M_TEMP, M_WAITOK);
2613 if (!filename) {
2614 error = ENOMEM;
2615 goto out;
2616 }
2617 snprintf(filename, namelen+1, "%s", vname);
2618 }
2619 bzero(&cn, sizeof(cn));
2620 cn.cn_nameptr = filename;
2621 cn.cn_namelen = namelen;
2622
2623 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2624 /*
2625 * Try to open it for read access too,
2626 * so the buffer cache can read data.
2627 */
2628 readtoo = 1;
2629 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2630 }
2631tryagain:
2632 error = nfs4_open_rpc(nofp, ctx, &cn, NULL, dvp, &vp, NFS_OPEN_NOCREATE, accessMode, denyMode);
2633 if (error) {
2634 if (!nfs_mount_state_error_should_restart(error) &&
2635 (error != EINTR) && (error != ERESTART) && readtoo) {
2636 /* try again without the extra read access */
2637 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2638 readtoo = 0;
2639 goto tryagain;
2640 }
2641 goto out;
2642 }
2643 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
b0d623f7
A
2644out:
2645 if (filename && (filename != &smallname[0]))
2646 FREE(filename, M_TEMP);
2647 if (vname)
2648 vnode_putname(vname);
2649 if (dvp != NULLVP)
2650 vnode_put(dvp);
2651 return (error);
2652}
2653
b0d623f7 2654int
6d2010ae
A
2655nfs_vnop_mmap(
2656 struct vnop_mmap_args /* {
b0d623f7
A
2657 struct vnodeop_desc *a_desc;
2658 vnode_t a_vp;
6d2010ae 2659 int a_fflags;
b0d623f7
A
2660 vfs_context_t a_context;
2661 } */ *ap)
2662{
2663 vfs_context_t ctx = ap->a_context;
2664 vnode_t vp = ap->a_vp;
2665 nfsnode_t np = VTONFS(vp);
6d2010ae 2666 int error = 0, accessMode, denyMode, delegated;
b0d623f7 2667 struct nfsmount *nmp;
b0d623f7
A
2668 struct nfs_open_owner *noop = NULL;
2669 struct nfs_open_file *nofp = NULL;
2670
b0d623f7
A
2671 nmp = VTONMP(vp);
2672 if (!nmp)
2673 return (ENXIO);
2674
6d2010ae
A
2675 if (!vnode_isreg(vp) || !(ap->a_fflags & (PROT_READ|PROT_WRITE)))
2676 return (EINVAL);
2677 if (np->n_flag & NREVOKE)
2678 return (EIO);
b0d623f7 2679
6d2010ae
A
2680 /*
2681 * fflags contains some combination of: PROT_READ, PROT_WRITE
2682 * Since it's not possible to mmap() without having the file open for reading,
2683 * read access is always there (regardless if PROT_READ is not set).
2684 */
2685 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
2686 if (ap->a_fflags & PROT_WRITE)
b0d623f7 2687 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
6d2010ae 2688 denyMode = NFS_OPEN_SHARE_DENY_NONE;
b0d623f7
A
2689
2690 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
2691 if (!noop)
2692 return (ENOMEM);
2693
2694restart:
6d2010ae 2695 error = nfs_mount_state_in_use_start(nmp, NULL);
b0d623f7
A
2696 if (error) {
2697 nfs_open_owner_rele(noop);
2698 return (error);
2699 }
6d2010ae 2700 if (np->n_flag & NREVOKE) {
b0d623f7 2701 error = EIO;
6d2010ae
A
2702 nfs_mount_state_in_use_end(nmp, 0);
2703 nfs_open_owner_rele(noop);
2704 return (error);
2705 }
2706
2707 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
2708 if (error || (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST))) {
2709 NP(np, "nfs_vnop_mmap: no open file for owner, error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
2710 error = EPERM;
b0d623f7
A
2711 }
2712 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
2713 nfs_mount_state_in_use_end(nmp, 0);
6d2010ae 2714 error = nfs4_reopen(nofp, NULL);
b0d623f7 2715 nofp = NULL;
6d2010ae
A
2716 if (!error)
2717 goto restart;
b0d623f7
A
2718 }
2719 if (!error)
6d2010ae 2720 error = nfs_open_file_set_busy(nofp, NULL);
b0d623f7
A
2721 if (error) {
2722 nofp = NULL;
2723 goto out;
2724 }
2725
2726 /*
6d2010ae
A
2727 * The open reference for mmap must mirror an existing open because
2728 * we may need to reclaim it after the file is closed.
2729 * So grab another open count matching the accessMode passed in.
2730 * If we already had an mmap open, prefer read/write without deny mode.
2731 * This means we may have to drop the current mmap open first.
b0d623f7 2732 */
6d2010ae
A
2733
2734 if (!nofp->nof_access) {
2735 if (accessMode != NFS_OPEN_SHARE_ACCESS_READ) {
2736 /* not asking for just read access -> fail */
2737 error = EPERM;
2738 goto out;
2739 }
2740 /* we don't have the file open, so open it for read access */
2741 if (nmp->nm_vers < NFS_VER4) {
2742 /* NFS v2/v3 opens are always allowed - so just add it. */
2743 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
b0d623f7 2744 error = 0;
6d2010ae
A
2745 } else {
2746 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
b0d623f7 2747 }
6d2010ae
A
2748 if (!error)
2749 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
b0d623f7
A
2750 if (error)
2751 goto out;
6d2010ae
A
2752 }
2753
2754 /* determine deny mode for open */
2755 if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2756 if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
2757 delegated = 1;
2758 if (nofp->nof_d_rw)
2759 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2760 else if (nofp->nof_d_rw_dw)
2761 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2762 else if (nofp->nof_d_rw_drw)
2763 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2764 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
2765 delegated = 0;
2766 if (nofp->nof_rw)
2767 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2768 else if (nofp->nof_rw_dw)
2769 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2770 else if (nofp->nof_rw_drw)
2771 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2772 } else {
2773 error = EPERM;
2774 }
2775 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
2776 if (nofp->nof_d_r || nofp->nof_d_r_dw || nofp->nof_d_r_drw) {
2777 delegated = 1;
2778 if (nofp->nof_d_r)
2779 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2780 else if (nofp->nof_d_r_dw)
2781 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2782 else if (nofp->nof_d_r_drw)
2783 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2784 } else if (nofp->nof_r || nofp->nof_r_dw || nofp->nof_r_drw) {
2785 delegated = 0;
2786 if (nofp->nof_r)
2787 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2788 else if (nofp->nof_r_dw)
2789 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2790 else if (nofp->nof_r_drw)
2791 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2792 } else {
2793 error = EPERM;
2794 }
2795 }
2796 if (error) /* mmap mode without proper open mode */
2797 goto out;
2798
2799 /*
2800 * If the existing mmap access is more than the new access OR the
2801 * existing access is the same and the existing deny mode is less,
2802 * then we'll stick with the existing mmap open mode.
2803 */
2804 if ((nofp->nof_mmap_access > accessMode) ||
2805 ((nofp->nof_mmap_access == accessMode) && (nofp->nof_mmap_deny <= denyMode)))
2806 goto out;
2807
2808 /* update mmap open mode */
2809 if (nofp->nof_mmap_access) {
2810 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
2811 if (error) {
2812 if (!nfs_mount_state_error_should_restart(error))
2813 NP(np, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2814 NP(np, "nfs_vnop_mmap: update, close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2815 goto out;
b0d623f7 2816 }
6d2010ae 2817 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
b0d623f7
A
2818 }
2819
6d2010ae
A
2820 nfs_open_file_add_open(nofp, accessMode, denyMode, delegated);
2821 nofp->nof_mmap_access = accessMode;
2822 nofp->nof_mmap_deny = denyMode;
2823
b0d623f7
A
2824out:
2825 if (nofp)
2826 nfs_open_file_clear_busy(nofp);
2827 if (nfs_mount_state_in_use_end(nmp, error)) {
2828 nofp = NULL;
2829 goto restart;
2830 }
2831 if (noop)
2832 nfs_open_owner_rele(noop);
b0d623f7
A
2833 return (error);
2834}
2835
b0d623f7
A
2836
2837int
6d2010ae
A
2838nfs_vnop_mnomap(
2839 struct vnop_mnomap_args /* {
b0d623f7
A
2840 struct vnodeop_desc *a_desc;
2841 vnode_t a_vp;
b0d623f7
A
2842 vfs_context_t a_context;
2843 } */ *ap)
2844{
2845 vfs_context_t ctx = ap->a_context;
2846 vnode_t vp = ap->a_vp;
b0d623f7
A
2847 nfsnode_t np = VTONFS(vp);
2848 struct nfsmount *nmp;
b0d623f7 2849 struct nfs_open_file *nofp = NULL;
6d2010ae
A
2850 off_t size;
2851 int error;
b0d623f7
A
2852
2853 nmp = VTONMP(vp);
2854 if (!nmp)
2855 return (ENXIO);
2856
6d2010ae
A
2857 /* flush buffers/ubc before we drop the open (in case it's our last open) */
2858 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
2859 if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp)))
2860 ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC);
b0d623f7 2861
6d2010ae
A
2862 /* walk all open files and close all mmap opens */
2863loop:
2864 error = nfs_mount_state_in_use_start(nmp, NULL);
2865 if (error)
2866 return (error);
2867 lck_mtx_lock(&np->n_openlock);
2868 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
2869 if (!nofp->nof_mmap_access)
2870 continue;
b0d623f7 2871 lck_mtx_unlock(&np->n_openlock);
6d2010ae
A
2872 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
2873 nfs_mount_state_in_use_end(nmp, 0);
2874 error = nfs4_reopen(nofp, NULL);
2875 if (!error)
2876 goto loop;
2877 }
2878 if (!error)
2879 error = nfs_open_file_set_busy(nofp, NULL);
2880 if (error) {
2881 lck_mtx_lock(&np->n_openlock);
2882 break;
2883 }
2884 if (nofp->nof_mmap_access) {
2885 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
2886 if (!nfs_mount_state_error_should_restart(error)) {
2887 if (error) /* not a state-operation-restarting error, so just clear the access */
2888 NP(np, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2889 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
2890 }
2891 if (error)
2892 NP(np, "nfs_vnop_mnomap: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2893 }
2894 nfs_open_file_clear_busy(nofp);
2895 nfs_mount_state_in_use_end(nmp, error);
2896 goto loop;
b0d623f7 2897 }
6d2010ae
A
2898 lck_mtx_unlock(&np->n_openlock);
2899 nfs_mount_state_in_use_end(nmp, error);
2900 return (error);
2901}
b0d623f7 2902
6d2010ae
A
2903/*
2904 * Search a node's lock owner list for the owner for this process.
2905 * If not found and "alloc" is set, then allocate a new one.
2906 */
2907struct nfs_lock_owner *
2908nfs_lock_owner_find(nfsnode_t np, proc_t p, int alloc)
2909{
2910 pid_t pid = proc_pid(p);
2911 struct nfs_lock_owner *nlop, *newnlop = NULL;
b0d623f7 2912
6d2010ae
A
2913tryagain:
2914 lck_mtx_lock(&np->n_openlock);
2915 TAILQ_FOREACH(nlop, &np->n_lock_owners, nlo_link) {
2916 if (nlop->nlo_pid != pid)
2917 continue;
2918 if (timevalcmp(&nlop->nlo_pid_start, &p->p_start, ==))
2919 break;
2920 /* stale lock owner... reuse it if we can */
2921 if (nlop->nlo_refcnt) {
2922 TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
2923 nlop->nlo_flags &= ~NFS_LOCK_OWNER_LINK;
2924 lck_mtx_unlock(&np->n_openlock);
2925 goto tryagain;
2926 }
2927 nlop->nlo_pid_start = p->p_start;
2928 nlop->nlo_seqid = 0;
2929 nlop->nlo_stategenid = 0;
2930 break;
b0d623f7
A
2931 }
2932
6d2010ae
A
2933 if (!nlop && !newnlop && alloc) {
2934 lck_mtx_unlock(&np->n_openlock);
2935 MALLOC(newnlop, struct nfs_lock_owner *, sizeof(struct nfs_lock_owner), M_TEMP, M_WAITOK);
2936 if (!newnlop)
2937 return (NULL);
2938 bzero(newnlop, sizeof(*newnlop));
2939 lck_mtx_init(&newnlop->nlo_lock, nfs_open_grp, LCK_ATTR_NULL);
2940 newnlop->nlo_pid = pid;
2941 newnlop->nlo_pid_start = p->p_start;
2942 newnlop->nlo_name = OSAddAtomic(1, &nfs_lock_owner_seqnum);
2943 TAILQ_INIT(&newnlop->nlo_locks);
2944 goto tryagain;
b0d623f7 2945 }
6d2010ae
A
2946 if (!nlop && newnlop) {
2947 newnlop->nlo_flags |= NFS_LOCK_OWNER_LINK;
2948 TAILQ_INSERT_HEAD(&np->n_lock_owners, newnlop, nlo_link);
2949 nlop = newnlop;
b0d623f7 2950 }
6d2010ae 2951 lck_mtx_unlock(&np->n_openlock);
b0d623f7 2952
6d2010ae
A
2953 if (newnlop && (nlop != newnlop))
2954 nfs_lock_owner_destroy(newnlop);
b0d623f7 2955
6d2010ae
A
2956 if (nlop)
2957 nfs_lock_owner_ref(nlop);
b0d623f7 2958
6d2010ae
A
2959 return (nlop);
2960}
b0d623f7
A
2961
2962/*
2963 * destroy a lock owner that's no longer needed
2964 */
2965void
2966nfs_lock_owner_destroy(struct nfs_lock_owner *nlop)
2967{
2968 if (nlop->nlo_open_owner) {
2969 nfs_open_owner_rele(nlop->nlo_open_owner);
2970 nlop->nlo_open_owner = NULL;
2971 }
2972 lck_mtx_destroy(&nlop->nlo_lock, nfs_open_grp);
2973 FREE(nlop, M_TEMP);
2974}
2975
2976/*
2977 * acquire a reference count on a lock owner
2978 */
2979void
2980nfs_lock_owner_ref(struct nfs_lock_owner *nlop)
2981{
2982 lck_mtx_lock(&nlop->nlo_lock);
2983 nlop->nlo_refcnt++;
2984 lck_mtx_unlock(&nlop->nlo_lock);
2985}
2986
2987/*
2988 * drop a reference count on a lock owner and destroy it if
2989 * it is no longer referenced and no longer on the mount's list.
2990 */
2991void
2992nfs_lock_owner_rele(struct nfs_lock_owner *nlop)
2993{
2994 lck_mtx_lock(&nlop->nlo_lock);
2995 if (nlop->nlo_refcnt < 1)
2996 panic("nfs_lock_owner_rele: no refcnt");
2997 nlop->nlo_refcnt--;
2998 if (!nlop->nlo_refcnt && (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY))
2999 panic("nfs_lock_owner_rele: busy");
3000 /* XXX we may potentially want to clean up idle/unused lock owner structures */
3001 if (nlop->nlo_refcnt || (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) {
3002 lck_mtx_unlock(&nlop->nlo_lock);
3003 return;
3004 }
3005 /* owner is no longer referenced or linked to mount, so destroy it */
3006 lck_mtx_unlock(&nlop->nlo_lock);
3007 nfs_lock_owner_destroy(nlop);
3008}
3009
3010/*
3011 * Mark a lock owner as busy because we are about to
3012 * start an operation that uses and updates lock owner state.
3013 */
3014int
3015nfs_lock_owner_set_busy(struct nfs_lock_owner *nlop, thread_t thd)
3016{
3017 struct nfsmount *nmp;
3018 struct timespec ts = {2, 0};
3019 int error = 0, slpflag;
3020
3021 nmp = nlop->nlo_open_owner->noo_mount;
3022 if (!nmp)
3023 return (ENXIO);
6d2010ae 3024 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
b0d623f7
A
3025
3026 lck_mtx_lock(&nlop->nlo_lock);
3027 while (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY) {
3028 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
3029 break;
3030 nlop->nlo_flags |= NFS_LOCK_OWNER_WANT;
3031 msleep(nlop, &nlop->nlo_lock, slpflag, "nfs_lock_owner_set_busy", &ts);
6d2010ae 3032 slpflag = 0;
b0d623f7
A
3033 }
3034 if (!error)
3035 nlop->nlo_flags |= NFS_LOCK_OWNER_BUSY;
3036 lck_mtx_unlock(&nlop->nlo_lock);
3037
3038 return (error);
3039}
3040
3041/*
3042 * Clear the busy flag on a lock owner and wake up anyone waiting
3043 * to mark it busy.
3044 */
3045void
3046nfs_lock_owner_clear_busy(struct nfs_lock_owner *nlop)
3047{
3048 int wanted;
3049
3050 lck_mtx_lock(&nlop->nlo_lock);
3051 if (!(nlop->nlo_flags & NFS_LOCK_OWNER_BUSY))
3052 panic("nfs_lock_owner_clear_busy");
3053 wanted = (nlop->nlo_flags & NFS_LOCK_OWNER_WANT);
3054 nlop->nlo_flags &= ~(NFS_LOCK_OWNER_BUSY|NFS_LOCK_OWNER_WANT);
3055 lck_mtx_unlock(&nlop->nlo_lock);
3056 if (wanted)
3057 wakeup(nlop);
3058}
3059
3060/*
3061 * Insert a held lock into a lock owner's sorted list.
3062 * (flock locks are always inserted at the head the list)
3063 */
3064void
3065nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *nlop, struct nfs_file_lock *newnflp)
3066{
3067 struct nfs_file_lock *nflp;
3068
3069 /* insert new lock in lock owner's held lock list */
3070 lck_mtx_lock(&nlop->nlo_lock);
3071 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) {
3072 TAILQ_INSERT_HEAD(&nlop->nlo_locks, newnflp, nfl_lolink);
3073 } else {
3074 TAILQ_FOREACH(nflp, &nlop->nlo_locks, nfl_lolink) {
3075 if (newnflp->nfl_start < nflp->nfl_start)
3076 break;
3077 }
3078 if (nflp)
3079 TAILQ_INSERT_BEFORE(nflp, newnflp, nfl_lolink);
3080 else
3081 TAILQ_INSERT_TAIL(&nlop->nlo_locks, newnflp, nfl_lolink);
3082 }
3083 lck_mtx_unlock(&nlop->nlo_lock);
3084}
3085
3086/*
3087 * Get a file lock structure for this lock owner.
3088 */
3089struct nfs_file_lock *
3090nfs_file_lock_alloc(struct nfs_lock_owner *nlop)
3091{
3092 struct nfs_file_lock *nflp = NULL;
3093
3094 lck_mtx_lock(&nlop->nlo_lock);
3095 if (!nlop->nlo_alock.nfl_owner) {
3096 nflp = &nlop->nlo_alock;
3097 nflp->nfl_owner = nlop;
3098 }
3099 lck_mtx_unlock(&nlop->nlo_lock);
3100 if (!nflp) {
3101 MALLOC(nflp, struct nfs_file_lock *, sizeof(struct nfs_file_lock), M_TEMP, M_WAITOK);
3102 if (!nflp)
3103 return (NULL);
3104 bzero(nflp, sizeof(*nflp));
3105 nflp->nfl_flags |= NFS_FILE_LOCK_ALLOC;
3106 nflp->nfl_owner = nlop;
3107 }
3108 nfs_lock_owner_ref(nlop);
3109 return (nflp);
3110}
3111
3112/*
3113 * destroy the given NFS file lock structure
3114 */
3115void
3116nfs_file_lock_destroy(struct nfs_file_lock *nflp)
3117{
3118 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3119
3120 if (nflp->nfl_flags & NFS_FILE_LOCK_ALLOC) {
3121 nflp->nfl_owner = NULL;
3122 FREE(nflp, M_TEMP);
3123 } else {
3124 lck_mtx_lock(&nlop->nlo_lock);
3125 bzero(nflp, sizeof(nflp));
3126 lck_mtx_unlock(&nlop->nlo_lock);
3127 }
3128 nfs_lock_owner_rele(nlop);
3129}
3130
3131/*
3132 * Check if one file lock conflicts with another.
3133 * (nflp1 is the new lock. nflp2 is the existing lock.)
3134 */
3135int
3136nfs_file_lock_conflict(struct nfs_file_lock *nflp1, struct nfs_file_lock *nflp2, int *willsplit)
3137{
3138 /* no conflict if lock is dead */
3139 if ((nflp1->nfl_flags & NFS_FILE_LOCK_DEAD) || (nflp2->nfl_flags & NFS_FILE_LOCK_DEAD))
3140 return (0);
3141 /* no conflict if it's ours - unless the lock style doesn't match */
3142 if ((nflp1->nfl_owner == nflp2->nfl_owner) &&
3143 ((nflp1->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == (nflp2->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))) {
3144 if (willsplit && (nflp1->nfl_type != nflp2->nfl_type) &&
3145 (nflp1->nfl_start > nflp2->nfl_start) &&
3146 (nflp1->nfl_end < nflp2->nfl_end))
3147 *willsplit = 1;
3148 return (0);
3149 }
3150 /* no conflict if ranges don't overlap */
3151 if ((nflp1->nfl_start > nflp2->nfl_end) || (nflp1->nfl_end < nflp2->nfl_start))
3152 return (0);
3153 /* no conflict if neither lock is exclusive */
3154 if ((nflp1->nfl_type != F_WRLCK) && (nflp2->nfl_type != F_WRLCK))
3155 return (0);
3156 /* conflict */
3157 return (1);
3158}
3159
3160/*
3161 * Send an NFSv4 LOCK RPC to the server.
3162 */
3163int
6d2010ae 3164nfs4_setlock_rpc(
b0d623f7
A
3165 nfsnode_t np,
3166 struct nfs_open_file *nofp,
3167 struct nfs_file_lock *nflp,
3168 int reclaim,
6d2010ae 3169 int flags,
b0d623f7
A
3170 thread_t thd,
3171 kauth_cred_t cred)
3172{
3173 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3174 struct nfsmount *nmp;
3175 struct nfsm_chain nmreq, nmrep;
3176 uint64_t xid;
3177 uint32_t locktype;
3178 int error = 0, lockerror = ENOENT, newlocker, numops, status;
6d2010ae 3179 struct nfsreq_secinfo_args si;
b0d623f7
A
3180
3181 nmp = NFSTONMP(np);
3182 if (!nmp)
3183 return (ENXIO);
6d2010ae
A
3184 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
3185 return (EINVAL);
b0d623f7
A
3186
3187 newlocker = (nlop->nlo_stategenid != nmp->nm_stategenid);
3188 locktype = (nflp->nfl_flags & NFS_FILE_LOCK_WAIT) ?
3189 ((nflp->nfl_type == F_WRLCK) ?
3190 NFS_LOCK_TYPE_WRITEW :
3191 NFS_LOCK_TYPE_READW) :
3192 ((nflp->nfl_type == F_WRLCK) ?
3193 NFS_LOCK_TYPE_WRITE :
3194 NFS_LOCK_TYPE_READ);
3195 if (newlocker) {
3196 error = nfs_open_file_set_busy(nofp, thd);
3197 if (error)
3198 return (error);
3199 error = nfs_open_owner_set_busy(nofp->nof_owner, thd);
3200 if (error) {
3201 nfs_open_file_clear_busy(nofp);
3202 return (error);
3203 }
3204 if (!nlop->nlo_open_owner) {
3205 nfs_open_owner_ref(nofp->nof_owner);
3206 nlop->nlo_open_owner = nofp->nof_owner;
3207 }
3208 }
3209 error = nfs_lock_owner_set_busy(nlop, thd);
3210 if (error) {
3211 if (newlocker) {
3212 nfs_open_owner_clear_busy(nofp->nof_owner);
3213 nfs_open_file_clear_busy(nofp);
3214 }
3215 return (error);
3216 }
3217
6d2010ae 3218 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
3219 nfsm_chain_null(&nmreq);
3220 nfsm_chain_null(&nmrep);
3221
3222 // PUTFH, GETATTR, LOCK
3223 numops = 3;
3224 nfsm_chain_build_alloc_init(error, &nmreq, 33 * NFSX_UNSIGNED);
3225 nfsm_chain_add_compound_header(error, &nmreq, "lock", numops);
3226 numops--;
3227 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3228 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3229 numops--;
3230 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 3231 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
3232 numops--;
3233 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCK);
3234 nfsm_chain_add_32(error, &nmreq, locktype);
3235 nfsm_chain_add_32(error, &nmreq, reclaim);
3236 nfsm_chain_add_64(error, &nmreq, nflp->nfl_start);
3237 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(nflp->nfl_start, nflp->nfl_end));
3238 nfsm_chain_add_32(error, &nmreq, newlocker);
3239 if (newlocker) {
3240 nfsm_chain_add_32(error, &nmreq, nofp->nof_owner->noo_seqid);
3241 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
3242 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3243 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3244 } else {
3245 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3246 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3247 }
3248 nfsm_chain_build_done(error, &nmreq);
3249 nfsm_assert(error, (numops == 0), EPROTO);
3250 nfsmout_if(error);
3251
6d2010ae 3252 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
b0d623f7
A
3253
3254 if ((lockerror = nfs_node_lock(np)))
3255 error = lockerror;
3256 nfsm_chain_skip_tag(error, &nmrep);
3257 nfsm_chain_get_32(error, &nmrep, numops);
3258 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3259 nfsmout_if(error);
3260 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 3261 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
b0d623f7
A
3262 nfsmout_if(error);
3263 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCK);
3264 nfs_owner_seqid_increment(newlocker ? nofp->nof_owner : NULL, nlop, error);
3265 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3266
3267 /* Update the lock owner's stategenid once it appears the server has state for it. */
3268 /* We determine this by noting the request was successful (we got a stateid). */
3269 if (newlocker && !error)
3270 nlop->nlo_stategenid = nmp->nm_stategenid;
3271nfsmout:
3272 if (!lockerror)
3273 nfs_node_unlock(np);
3274 nfs_lock_owner_clear_busy(nlop);
3275 if (newlocker) {
3276 nfs_open_owner_clear_busy(nofp->nof_owner);
3277 nfs_open_file_clear_busy(nofp);
3278 }
3279 nfsm_chain_cleanup(&nmreq);
3280 nfsm_chain_cleanup(&nmrep);
3281 return (error);
3282}
3283
3284/*
3285 * Send an NFSv4 LOCKU RPC to the server.
3286 */
3287int
3288nfs4_unlock_rpc(
3289 nfsnode_t np,
3290 struct nfs_lock_owner *nlop,
3291 int type,
3292 uint64_t start,
3293 uint64_t end,
6d2010ae
A
3294 int flags,
3295 thread_t thd,
3296 kauth_cred_t cred)
b0d623f7
A
3297{
3298 struct nfsmount *nmp;
3299 struct nfsm_chain nmreq, nmrep;
3300 uint64_t xid;
3301 int error = 0, lockerror = ENOENT, numops, status;
6d2010ae 3302 struct nfsreq_secinfo_args si;
b0d623f7
A
3303
3304 nmp = NFSTONMP(np);
3305 if (!nmp)
3306 return (ENXIO);
6d2010ae
A
3307 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
3308 return (EINVAL);
b0d623f7 3309
6d2010ae 3310 error = nfs_lock_owner_set_busy(nlop, NULL);
b0d623f7
A
3311 if (error)
3312 return (error);
3313
6d2010ae 3314 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
3315 nfsm_chain_null(&nmreq);
3316 nfsm_chain_null(&nmrep);
3317
3318 // PUTFH, GETATTR, LOCKU
3319 numops = 3;
3320 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3321 nfsm_chain_add_compound_header(error, &nmreq, "unlock", numops);
3322 numops--;
3323 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3324 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3325 numops--;
3326 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 3327 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
3328 numops--;
3329 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKU);
3330 nfsm_chain_add_32(error, &nmreq, (type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3331 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3332 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3333 nfsm_chain_add_64(error, &nmreq, start);
3334 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3335 nfsm_chain_build_done(error, &nmreq);
3336 nfsm_assert(error, (numops == 0), EPROTO);
3337 nfsmout_if(error);
3338
6d2010ae 3339 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
b0d623f7
A
3340
3341 if ((lockerror = nfs_node_lock(np)))
3342 error = lockerror;
3343 nfsm_chain_skip_tag(error, &nmrep);
3344 nfsm_chain_get_32(error, &nmrep, numops);
3345 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3346 nfsmout_if(error);
3347 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 3348 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
b0d623f7
A
3349 nfsmout_if(error);
3350 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKU);
3351 nfs_owner_seqid_increment(NULL, nlop, error);
3352 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3353nfsmout:
3354 if (!lockerror)
3355 nfs_node_unlock(np);
3356 nfs_lock_owner_clear_busy(nlop);
3357 nfsm_chain_cleanup(&nmreq);
3358 nfsm_chain_cleanup(&nmrep);
3359 return (error);
3360}
3361
3362/*
6d2010ae 3363 * Send an NFSv4 LOCKT RPC to the server.
b0d623f7
A
3364 */
3365int
6d2010ae 3366nfs4_getlock_rpc(
b0d623f7
A
3367 nfsnode_t np,
3368 struct nfs_lock_owner *nlop,
3369 struct flock *fl,
3370 uint64_t start,
3371 uint64_t end,
3372 vfs_context_t ctx)
3373{
3374 struct nfsmount *nmp;
b0d623f7
A
3375 struct nfsm_chain nmreq, nmrep;
3376 uint64_t xid, val64 = 0;
3377 uint32_t val = 0;
6d2010ae
A
3378 int error = 0, lockerror, numops, status;
3379 struct nfsreq_secinfo_args si;
b0d623f7
A
3380
3381 nmp = NFSTONMP(np);
3382 if (!nmp)
3383 return (ENXIO);
6d2010ae
A
3384 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
3385 return (EINVAL);
b0d623f7 3386
6d2010ae
A
3387 lockerror = ENOENT;
3388 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
3389 nfsm_chain_null(&nmreq);
3390 nfsm_chain_null(&nmrep);
3391
3392 // PUTFH, GETATTR, LOCKT
3393 numops = 3;
3394 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3395 nfsm_chain_add_compound_header(error, &nmreq, "locktest", numops);
3396 numops--;
3397 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3398 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3399 numops--;
3400 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 3401 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
3402 numops--;
3403 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKT);
3404 nfsm_chain_add_32(error, &nmreq, (fl->l_type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3405 nfsm_chain_add_64(error, &nmreq, start);
3406 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3407 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3408 nfsm_chain_build_done(error, &nmreq);
3409 nfsm_assert(error, (numops == 0), EPROTO);
3410 nfsmout_if(error);
3411
6d2010ae 3412 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
b0d623f7
A
3413
3414 if ((lockerror = nfs_node_lock(np)))
3415 error = lockerror;
3416 nfsm_chain_skip_tag(error, &nmrep);
3417 nfsm_chain_get_32(error, &nmrep, numops);
3418 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3419 nfsmout_if(error);
3420 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 3421 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
b0d623f7
A
3422 nfsmout_if(error);
3423 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKT);
3424 if (error == NFSERR_DENIED) {
3425 error = 0;
3426 nfsm_chain_get_64(error, &nmrep, fl->l_start);
3427 nfsm_chain_get_64(error, &nmrep, val64);
3428 fl->l_len = (val64 == UINT64_MAX) ? 0 : val64;
3429 nfsm_chain_get_32(error, &nmrep, val);
3430 fl->l_type = (val == NFS_LOCK_TYPE_WRITE) ? F_WRLCK : F_RDLCK;
3431 fl->l_pid = 0;
3432 fl->l_whence = SEEK_SET;
3433 } else if (!error) {
3434 fl->l_type = F_UNLCK;
3435 }
3436nfsmout:
3437 if (!lockerror)
3438 nfs_node_unlock(np);
3439 nfsm_chain_cleanup(&nmreq);
3440 nfsm_chain_cleanup(&nmrep);
3441 return (error);
3442}
3443
6d2010ae
A
3444
3445/*
3446 * Check for any conflicts with the given lock.
3447 *
3448 * Checking for a lock doesn't require the file to be opened.
3449 * So we skip all the open owner, open file, lock owner work
3450 * and just check for a conflicting lock.
3451 */
3452int
3453nfs_advlock_getlock(
3454 nfsnode_t np,
3455 struct nfs_lock_owner *nlop,
3456 struct flock *fl,
3457 uint64_t start,
3458 uint64_t end,
3459 vfs_context_t ctx)
3460{
3461 struct nfsmount *nmp;
3462 struct nfs_file_lock *nflp;
3463 int error = 0, answered = 0;
3464
3465 nmp = NFSTONMP(np);
3466 if (!nmp)
3467 return (ENXIO);
3468
3469restart:
3470 if ((error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx))))
3471 return (error);
3472
3473 lck_mtx_lock(&np->n_openlock);
3474 /* scan currently held locks for conflict */
3475 TAILQ_FOREACH(nflp, &np->n_locks, nfl_link) {
3476 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3477 continue;
3478 if ((start <= nflp->nfl_end) && (end >= nflp->nfl_start) &&
3479 ((fl->l_type == F_WRLCK) || (nflp->nfl_type == F_WRLCK)))
3480 break;
3481 }
3482 if (nflp) {
3483 /* found a conflicting lock */
3484 fl->l_type = nflp->nfl_type;
3485 fl->l_pid = (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_FLOCK) ? -1 : nflp->nfl_owner->nlo_pid;
3486 fl->l_start = nflp->nfl_start;
3487 fl->l_len = NFS_FLOCK_LENGTH(nflp->nfl_start, nflp->nfl_end);
3488 fl->l_whence = SEEK_SET;
3489 answered = 1;
3490 } else if ((np->n_openflags & N_DELEG_WRITE) && !(np->n_openflags & N_DELEG_RETURN)) {
3491 /*
3492 * If we have a write delegation, we know there can't be other
3493 * locks on the server. So the answer is no conflicting lock found.
3494 */
3495 fl->l_type = F_UNLCK;
3496 answered = 1;
3497 }
3498 lck_mtx_unlock(&np->n_openlock);
3499 if (answered) {
3500 nfs_mount_state_in_use_end(nmp, 0);
3501 return (0);
3502 }
3503
3504 /* no conflict found locally, so ask the server */
3505 error = nmp->nm_funcs->nf_getlock_rpc(np, nlop, fl, start, end, ctx);
3506
3507 if (nfs_mount_state_in_use_end(nmp, error))
3508 goto restart;
3509 return (error);
3510}
3511
b0d623f7
A
3512/*
3513 * Acquire a file lock for the given range.
3514 *
3515 * Add the lock (request) to the lock queue.
3516 * Scan the lock queue for any conflicting locks.
3517 * If a conflict is found, block or return an error.
3518 * Once end of queue is reached, send request to the server.
3519 * If the server grants the lock, scan the lock queue and
3520 * update any existing locks. Then (optionally) scan the
3521 * queue again to coalesce any locks adjacent to the new one.
3522 */
3523int
6d2010ae 3524nfs_advlock_setlock(
b0d623f7
A
3525 nfsnode_t np,
3526 struct nfs_open_file *nofp,
3527 struct nfs_lock_owner *nlop,
3528 int op,
3529 uint64_t start,
3530 uint64_t end,
3531 int style,
3532 short type,
3533 vfs_context_t ctx)
3534{
3535 struct nfsmount *nmp;
3536 struct nfs_file_lock *newnflp, *nflp, *nflp2 = NULL, *nextnflp, *flocknflp = NULL;
3537 struct nfs_file_lock *coalnflp;
3538 int error = 0, error2, willsplit = 0, delay, slpflag, busy = 0, inuse = 0, restart, inqueue = 0;
3539 struct timespec ts = {1, 0};
3540
3541 nmp = NFSTONMP(np);
3542 if (!nmp)
3543 return (ENXIO);
6d2010ae
A
3544 slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
3545
3546 if ((type != F_RDLCK) && (type != F_WRLCK))
3547 return (EINVAL);
b0d623f7
A
3548
3549 /* allocate a new lock */
3550 newnflp = nfs_file_lock_alloc(nlop);
3551 if (!newnflp)
3552 return (ENOLCK);
3553 newnflp->nfl_start = start;
3554 newnflp->nfl_end = end;
3555 newnflp->nfl_type = type;
3556 if (op == F_SETLKW)
3557 newnflp->nfl_flags |= NFS_FILE_LOCK_WAIT;
3558 newnflp->nfl_flags |= style;
3559 newnflp->nfl_flags |= NFS_FILE_LOCK_BLOCKED;
3560
3561 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && (type == F_WRLCK)) {
3562 /*
3563 * For exclusive flock-style locks, if we block waiting for the
3564 * lock, we need to first release any currently held shared
3565 * flock-style lock. So, the first thing we do is check if we
3566 * have a shared flock-style lock.
3567 */
3568 nflp = TAILQ_FIRST(&nlop->nlo_locks);
3569 if (nflp && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_FLOCK))
3570 nflp = NULL;
3571 if (nflp && (nflp->nfl_type != F_RDLCK))
3572 nflp = NULL;
3573 flocknflp = nflp;
3574 }
3575
3576restart:
3577 restart = 0;
6d2010ae 3578 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
b0d623f7
A
3579 if (error)
3580 goto error_out;
3581 inuse = 1;
6d2010ae
A
3582 if (np->n_flag & NREVOKE) {
3583 error = EIO;
3584 nfs_mount_state_in_use_end(nmp, 0);
3585 inuse = 0;
3586 goto error_out;
3587 }
b0d623f7
A
3588 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3589 nfs_mount_state_in_use_end(nmp, 0);
3590 inuse = 0;
6d2010ae
A
3591 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
3592 if (error)
3593 goto error_out;
b0d623f7
A
3594 goto restart;
3595 }
3596
3597 lck_mtx_lock(&np->n_openlock);
3598 if (!inqueue) {
3599 /* insert new lock at beginning of list */
3600 TAILQ_INSERT_HEAD(&np->n_locks, newnflp, nfl_link);
3601 inqueue = 1;
3602 }
3603
3604 /* scan current list of locks (held and pending) for conflicts */
6d2010ae
A
3605 for (nflp = TAILQ_NEXT(newnflp, nfl_link); nflp; nflp = nextnflp) {
3606 nextnflp = TAILQ_NEXT(nflp, nfl_link);
b0d623f7
A
3607 if (!nfs_file_lock_conflict(newnflp, nflp, &willsplit))
3608 continue;
3609 /* Conflict */
3610 if (!(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
3611 error = EAGAIN;
3612 break;
3613 }
3614 /* Block until this lock is no longer held. */
3615 if (nflp->nfl_blockcnt == UINT_MAX) {
3616 error = ENOLCK;
3617 break;
3618 }
3619 nflp->nfl_blockcnt++;
3620 do {
3621 if (flocknflp) {
3622 /* release any currently held shared lock before sleeping */
3623 lck_mtx_unlock(&np->n_openlock);
3624 nfs_mount_state_in_use_end(nmp, 0);
3625 inuse = 0;
6d2010ae 3626 error = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
b0d623f7
A
3627 flocknflp = NULL;
3628 if (!error)
6d2010ae 3629 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
b0d623f7
A
3630 if (error) {
3631 lck_mtx_lock(&np->n_openlock);
3632 break;
3633 }
3634 inuse = 1;
3635 lck_mtx_lock(&np->n_openlock);
3636 /* no need to block/sleep if the conflict is gone */
3637 if (!nfs_file_lock_conflict(newnflp, nflp, NULL))
3638 break;
3639 }
6d2010ae
A
3640 msleep(nflp, &np->n_openlock, slpflag, "nfs_advlock_setlock_blocked", &ts);
3641 slpflag = 0;
b0d623f7
A
3642 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
3643 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
3644 /* looks like we have a recover pending... restart */
3645 restart = 1;
3646 lck_mtx_unlock(&np->n_openlock);
3647 nfs_mount_state_in_use_end(nmp, 0);
3648 inuse = 0;
3649 lck_mtx_lock(&np->n_openlock);
3650 break;
3651 }
6d2010ae
A
3652 if (!error && (np->n_flag & NREVOKE))
3653 error = EIO;
b0d623f7
A
3654 } while (!error && nfs_file_lock_conflict(newnflp, nflp, NULL));
3655 nflp->nfl_blockcnt--;
3656 if ((nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !nflp->nfl_blockcnt) {
3657 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
3658 nfs_file_lock_destroy(nflp);
3659 }
3660 if (error || restart)
3661 break;
6d2010ae
A
3662 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
3663 /* So, start this lock-scanning loop over from where it started. */
3664 nextnflp = TAILQ_NEXT(newnflp, nfl_link);
b0d623f7
A
3665 }
3666 lck_mtx_unlock(&np->n_openlock);
3667 if (restart)
3668 goto restart;
3669 if (error)
3670 goto error_out;
3671
3672 if (willsplit) {
3673 /*
3674 * It looks like this operation is splitting a lock.
3675 * We allocate a new lock now so we don't have to worry
3676 * about the allocation failing after we've updated some state.
3677 */
3678 nflp2 = nfs_file_lock_alloc(nlop);
3679 if (!nflp2) {
3680 error = ENOLCK;
3681 goto error_out;
3682 }
3683 }
3684
3685 /* once scan for local conflicts is clear, send request to server */
6d2010ae 3686 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx))))
b0d623f7
A
3687 goto error_out;
3688 busy = 1;
3689 delay = 0;
3690 do {
6d2010ae
A
3691 /* do we have a delegation? (that we're not returning?) */
3692 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN)) {
3693 if (np->n_openflags & N_DELEG_WRITE) {
3694 /* with a write delegation, just take the lock delegated */
3695 newnflp->nfl_flags |= NFS_FILE_LOCK_DELEGATED;
3696 error = 0;
3697 /* make sure the lock owner knows its open owner */
3698 if (!nlop->nlo_open_owner) {
3699 nfs_open_owner_ref(nofp->nof_owner);
3700 nlop->nlo_open_owner = nofp->nof_owner;
3701 }
3702 break;
3703 } else {
3704 /*
3705 * If we don't have any non-delegated opens but we do have
3706 * delegated opens, then we need to first claim the delegated
3707 * opens so that the lock request on the server can be associated
3708 * with an open it knows about.
3709 */
3710 if ((!nofp->nof_rw_drw && !nofp->nof_w_drw && !nofp->nof_r_drw &&
3711 !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw &&
3712 !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r) &&
3713 (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw ||
3714 nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw ||
3715 nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r)) {
3716 error = nfs4_claim_delegated_state_for_open_file(nofp, 0);
3717 if (error)
3718 break;
3719 }
3720 }
3721 }
3722 if (np->n_flag & NREVOKE)
3723 error = EIO;
3724 if (!error)
3725 error = nmp->nm_funcs->nf_setlock_rpc(np, nofp, newnflp, 0, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
3726 if (!error || ((error != NFSERR_DENIED) && (error != NFSERR_GRACE)))
3727 break;
3728 /* request was denied due to either conflict or grace period */
6d2010ae 3729 if ((error == NFSERR_DENIED) && !(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
b0d623f7
A
3730 error = EAGAIN;
3731 break;
3732 }
3733 if (flocknflp) {
3734 /* release any currently held shared lock before sleeping */
3735 nfs_open_state_clear_busy(np);
3736 busy = 0;
3737 nfs_mount_state_in_use_end(nmp, 0);
3738 inuse = 0;
6d2010ae 3739 error2 = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
b0d623f7
A
3740 flocknflp = NULL;
3741 if (!error2)
6d2010ae 3742 error2 = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
b0d623f7
A
3743 if (!error2) {
3744 inuse = 1;
6d2010ae 3745 error2 = nfs_open_state_set_busy(np, vfs_context_thread(ctx));
b0d623f7
A
3746 }
3747 if (error2) {
3748 error = error2;
3749 break;
3750 }
3751 busy = 1;
3752 }
6d2010ae
A
3753 /*
3754 * Wait a little bit and send the request again.
3755 * Except for retries of blocked v2/v3 request where we've already waited a bit.
3756 */
3757 if ((nmp->nm_vers >= NFS_VER4) || (error == NFSERR_GRACE)) {
3758 if (error == NFSERR_GRACE)
3759 delay = 4;
3760 if (delay < 4)
3761 delay++;
3762 tsleep(newnflp, slpflag, "nfs_advlock_setlock_delay", delay * (hz/2));
3763 slpflag = 0;
3764 }
b0d623f7
A
3765 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
3766 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
3767 /* looks like we have a recover pending... restart */
3768 nfs_open_state_clear_busy(np);
3769 busy = 0;
3770 nfs_mount_state_in_use_end(nmp, 0);
3771 inuse = 0;
3772 goto restart;
3773 }
6d2010ae
A
3774 if (!error && (np->n_flag & NREVOKE))
3775 error = EIO;
b0d623f7
A
3776 } while (!error);
3777
3778error_out:
3779 if (nfs_mount_state_error_should_restart(error)) {
3780 /* looks like we need to restart this operation */
3781 if (busy) {
3782 nfs_open_state_clear_busy(np);
3783 busy = 0;
3784 }
3785 if (inuse) {
3786 nfs_mount_state_in_use_end(nmp, error);
3787 inuse = 0;
3788 }
3789 goto restart;
3790 }
3791 lck_mtx_lock(&np->n_openlock);
3792 newnflp->nfl_flags &= ~NFS_FILE_LOCK_BLOCKED;
3793 if (error) {
3794 newnflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3795 if (newnflp->nfl_blockcnt) {
3796 /* wake up anyone blocked on this lock */
3797 wakeup(newnflp);
3798 } else {
3799 /* remove newnflp from lock list and destroy */
3800 TAILQ_REMOVE(&np->n_locks, newnflp, nfl_link);
3801 nfs_file_lock_destroy(newnflp);
3802 }
3803 lck_mtx_unlock(&np->n_openlock);
3804 if (busy)
3805 nfs_open_state_clear_busy(np);
3806 if (inuse)
3807 nfs_mount_state_in_use_end(nmp, error);
3808 if (nflp2)
3809 nfs_file_lock_destroy(nflp2);
3810 return (error);
3811 }
3812
3813 /* server granted the lock */
3814
3815 /*
3816 * Scan for locks to update.
3817 *
3818 * Locks completely covered are killed.
3819 * At most two locks may need to be clipped.
3820 * It's possible that a single lock may need to be split.
3821 */
3822 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
3823 if (nflp == newnflp)
3824 continue;
3825 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3826 continue;
3827 if (nflp->nfl_owner != nlop)
3828 continue;
3829 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))
3830 continue;
3831 if ((newnflp->nfl_start > nflp->nfl_end) || (newnflp->nfl_end < nflp->nfl_start))
3832 continue;
3833 /* here's one to update */
3834 if ((newnflp->nfl_start <= nflp->nfl_start) && (newnflp->nfl_end >= nflp->nfl_end)) {
3835 /* The entire lock is being replaced. */
3836 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3837 lck_mtx_lock(&nlop->nlo_lock);
3838 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
3839 lck_mtx_unlock(&nlop->nlo_lock);
3840 /* lock will be destroyed below, if no waiters */
3841 } else if ((newnflp->nfl_start > nflp->nfl_start) && (newnflp->nfl_end < nflp->nfl_end)) {
3842 /* We're replacing a range in the middle of a lock. */
3843 /* The current lock will be split into two locks. */
3844 /* Update locks and insert new lock after current lock. */
6d2010ae 3845 nflp2->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK|NFS_FILE_LOCK_DELEGATED));
b0d623f7
A
3846 nflp2->nfl_type = nflp->nfl_type;
3847 nflp2->nfl_start = newnflp->nfl_end + 1;
3848 nflp2->nfl_end = nflp->nfl_end;
3849 nflp->nfl_end = newnflp->nfl_start - 1;
3850 TAILQ_INSERT_AFTER(&np->n_locks, nflp, nflp2, nfl_link);
3851 nfs_lock_owner_insert_held_lock(nlop, nflp2);
3852 nextnflp = nflp2;
3853 nflp2 = NULL;
3854 } else if (newnflp->nfl_start > nflp->nfl_start) {
3855 /* We're replacing the end of a lock. */
3856 nflp->nfl_end = newnflp->nfl_start - 1;
3857 } else if (newnflp->nfl_end < nflp->nfl_end) {
3858 /* We're replacing the start of a lock. */
3859 nflp->nfl_start = newnflp->nfl_end + 1;
3860 }
3861 if (nflp->nfl_blockcnt) {
3862 /* wake up anyone blocked on this lock */
3863 wakeup(nflp);
3864 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
3865 /* remove nflp from lock list and destroy */
3866 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
3867 nfs_file_lock_destroy(nflp);
3868 }
3869 }
3870
3871 nfs_lock_owner_insert_held_lock(nlop, newnflp);
3872
3873 /*
3874 * POSIX locks should be coalesced when possible.
3875 */
3876 if ((style == NFS_FILE_LOCK_STYLE_POSIX) && (nofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)) {
3877 /*
3878 * Walk through the lock queue and check each of our held locks with
3879 * the previous and next locks in the lock owner's "held lock list".
3880 * If the two locks can be coalesced, we merge the current lock into
3881 * the other (previous or next) lock. Merging this way makes sure that
3882 * lock ranges are always merged forward in the lock queue. This is
3883 * important because anyone blocked on the lock being "merged away"
3884 * will still need to block on that range and it will simply continue
3885 * checking locks that are further down the list.
3886 */
3887 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
3888 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3889 continue;
3890 if (nflp->nfl_owner != nlop)
3891 continue;
3892 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_POSIX)
3893 continue;
3894 if (((coalnflp = TAILQ_PREV(nflp, nfs_file_lock_queue, nfl_lolink))) &&
3895 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
3896 (coalnflp->nfl_type == nflp->nfl_type) &&
3897 (coalnflp->nfl_end == (nflp->nfl_start - 1))) {
3898 coalnflp->nfl_end = nflp->nfl_end;
3899 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3900 lck_mtx_lock(&nlop->nlo_lock);
3901 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
3902 lck_mtx_unlock(&nlop->nlo_lock);
3903 } else if (((coalnflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
3904 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
3905 (coalnflp->nfl_type == nflp->nfl_type) &&
3906 (coalnflp->nfl_start == (nflp->nfl_end + 1))) {
3907 coalnflp->nfl_start = nflp->nfl_start;
3908 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3909 lck_mtx_lock(&nlop->nlo_lock);
3910 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
3911 lck_mtx_unlock(&nlop->nlo_lock);
3912 }
3913 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD))
3914 continue;
3915 if (nflp->nfl_blockcnt) {
3916 /* wake up anyone blocked on this lock */
3917 wakeup(nflp);
3918 } else {
3919 /* remove nflp from lock list and destroy */
3920 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
3921 nfs_file_lock_destroy(nflp);
3922 }
3923 }
3924 }
3925
3926 lck_mtx_unlock(&np->n_openlock);
3927 nfs_open_state_clear_busy(np);
3928 nfs_mount_state_in_use_end(nmp, error);
3929
3930 if (nflp2)
3931 nfs_file_lock_destroy(nflp2);
3932 return (error);
3933}
3934
6d2010ae
A
3935/*
3936 * Release all (same style) locks within the given range.
3937 */
b0d623f7 3938int
6d2010ae 3939nfs_advlock_unlock(
b0d623f7
A
3940 nfsnode_t np,
3941 struct nfs_open_file *nofp,
3942 struct nfs_lock_owner *nlop,
3943 uint64_t start,
3944 uint64_t end,
3945 int style,
3946 vfs_context_t ctx)
3947{
3948 struct nfsmount *nmp;
3949 struct nfs_file_lock *nflp, *nextnflp, *newnflp = NULL;
3950 int error = 0, willsplit = 0, send_unlock_rpcs = 1;
3951
3952 nmp = NFSTONMP(np);
3953 if (!nmp)
3954 return (ENXIO);
3955
3956restart:
6d2010ae 3957 if ((error = nfs_mount_state_in_use_start(nmp, NULL)))
b0d623f7
A
3958 return (error);
3959 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3960 nfs_mount_state_in_use_end(nmp, 0);
6d2010ae
A
3961 error = nfs4_reopen(nofp, NULL);
3962 if (error)
3963 return (error);
b0d623f7
A
3964 goto restart;
3965 }
6d2010ae 3966 if ((error = nfs_open_state_set_busy(np, NULL))) {
b0d623f7
A
3967 nfs_mount_state_in_use_end(nmp, error);
3968 return (error);
3969 }
3970
3971 lck_mtx_lock(&np->n_openlock);
3972 if ((start > 0) && (end < UINT64_MAX) && !willsplit) {
3973 /*
3974 * We may need to allocate a new lock if an existing lock gets split.
3975 * So, we first scan the list to check for a split, and if there's
3976 * going to be one, we'll allocate one now.
3977 */
3978 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
3979 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3980 continue;
3981 if (nflp->nfl_owner != nlop)
3982 continue;
3983 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style)
3984 continue;
3985 if ((start > nflp->nfl_end) || (end < nflp->nfl_start))
3986 continue;
3987 if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
3988 willsplit = 1;
3989 break;
3990 }
3991 }
3992 if (willsplit) {
3993 lck_mtx_unlock(&np->n_openlock);
3994 nfs_open_state_clear_busy(np);
3995 nfs_mount_state_in_use_end(nmp, 0);
3996 newnflp = nfs_file_lock_alloc(nlop);
3997 if (!newnflp)
3998 return (ENOMEM);
3999 goto restart;
4000 }
4001 }
4002
4003 /*
4004 * Free all of our locks in the given range.
4005 *
4006 * Note that this process requires sending requests to the server.
4007 * Because of this, we will release the n_openlock while performing
4008 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4009 * locks from changing underneath us. However, other entries in the
4010 * list may be removed. So we need to be careful walking the list.
4011 */
4012
4013 /*
4014 * Don't unlock ranges that are held by other-style locks.
4015 * If style is posix, don't send any unlock rpcs if flock is held.
4016 * If we unlock an flock, don't send unlock rpcs for any posix-style
4017 * ranges held - instead send unlocks for the ranges not held.
4018 */
4019 if ((style == NFS_FILE_LOCK_STYLE_POSIX) &&
4020 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4021 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK))
4022 send_unlock_rpcs = 0;
4023 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) &&
4024 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4025 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) &&
4026 ((nflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4027 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX)) {
4028 uint64_t s = 0;
4029 int type = TAILQ_FIRST(&nlop->nlo_locks)->nfl_type;
6d2010ae
A
4030 int delegated = (TAILQ_FIRST(&nlop->nlo_locks)->nfl_flags & NFS_FILE_LOCK_DELEGATED);
4031 while (!delegated && nflp) {
b0d623f7
A
4032 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) {
4033 /* unlock the range preceding this lock */
4034 lck_mtx_unlock(&np->n_openlock);
6d2010ae
A
4035 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, nflp->nfl_start-1, 0,
4036 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4037 if (nfs_mount_state_error_should_restart(error)) {
4038 nfs_open_state_clear_busy(np);
4039 nfs_mount_state_in_use_end(nmp, error);
4040 goto restart;
4041 }
4042 lck_mtx_lock(&np->n_openlock);
4043 if (error)
4044 goto out;
4045 s = nflp->nfl_end+1;
4046 }
4047 nflp = TAILQ_NEXT(nflp, nfl_lolink);
4048 }
6d2010ae
A
4049 if (!delegated) {
4050 lck_mtx_unlock(&np->n_openlock);
4051 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, end, 0,
4052 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4053 if (nfs_mount_state_error_should_restart(error)) {
4054 nfs_open_state_clear_busy(np);
4055 nfs_mount_state_in_use_end(nmp, error);
4056 goto restart;
4057 }
4058 lck_mtx_lock(&np->n_openlock);
4059 if (error)
4060 goto out;
b0d623f7 4061 }
b0d623f7
A
4062 send_unlock_rpcs = 0;
4063 }
4064
4065 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4066 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
4067 continue;
4068 if (nflp->nfl_owner != nlop)
4069 continue;
4070 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style)
4071 continue;
4072 if ((start > nflp->nfl_end) || (end < nflp->nfl_start))
4073 continue;
4074 /* here's one to unlock */
4075 if ((start <= nflp->nfl_start) && (end >= nflp->nfl_end)) {
4076 /* The entire lock is being unlocked. */
6d2010ae 4077 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4078 lck_mtx_unlock(&np->n_openlock);
6d2010ae
A
4079 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, nflp->nfl_end, 0,
4080 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4081 if (nfs_mount_state_error_should_restart(error)) {
4082 nfs_open_state_clear_busy(np);
4083 nfs_mount_state_in_use_end(nmp, error);
4084 goto restart;
4085 }
4086 lck_mtx_lock(&np->n_openlock);
4087 }
4088 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4089 if (error)
4090 break;
4091 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4092 lck_mtx_lock(&nlop->nlo_lock);
4093 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4094 lck_mtx_unlock(&nlop->nlo_lock);
4095 /* lock will be destroyed below, if no waiters */
4096 } else if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4097 /* We're unlocking a range in the middle of a lock. */
4098 /* The current lock will be split into two locks. */
6d2010ae 4099 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4100 lck_mtx_unlock(&np->n_openlock);
6d2010ae
A
4101 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, end, 0,
4102 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4103 if (nfs_mount_state_error_should_restart(error)) {
4104 nfs_open_state_clear_busy(np);
4105 nfs_mount_state_in_use_end(nmp, error);
4106 goto restart;
4107 }
4108 lck_mtx_lock(&np->n_openlock);
4109 }
4110 if (error)
4111 break;
4112 /* update locks and insert new lock after current lock */
6d2010ae 4113 newnflp->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK|NFS_FILE_LOCK_DELEGATED));
b0d623f7
A
4114 newnflp->nfl_type = nflp->nfl_type;
4115 newnflp->nfl_start = end + 1;
4116 newnflp->nfl_end = nflp->nfl_end;
4117 nflp->nfl_end = start - 1;
4118 TAILQ_INSERT_AFTER(&np->n_locks, nflp, newnflp, nfl_link);
4119 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4120 nextnflp = newnflp;
4121 newnflp = NULL;
4122 } else if (start > nflp->nfl_start) {
4123 /* We're unlocking the end of a lock. */
6d2010ae 4124 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4125 lck_mtx_unlock(&np->n_openlock);
6d2010ae
A
4126 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, nflp->nfl_end, 0,
4127 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4128 if (nfs_mount_state_error_should_restart(error)) {
4129 nfs_open_state_clear_busy(np);
4130 nfs_mount_state_in_use_end(nmp, error);
4131 goto restart;
4132 }
4133 lck_mtx_lock(&np->n_openlock);
4134 }
4135 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4136 if (error)
4137 break;
4138 nflp->nfl_end = start - 1;
4139 } else if (end < nflp->nfl_end) {
4140 /* We're unlocking the start of a lock. */
6d2010ae 4141 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
b0d623f7 4142 lck_mtx_unlock(&np->n_openlock);
6d2010ae
A
4143 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, end, 0,
4144 vfs_context_thread(ctx), vfs_context_ucred(ctx));
b0d623f7
A
4145 if (nfs_mount_state_error_should_restart(error)) {
4146 nfs_open_state_clear_busy(np);
4147 nfs_mount_state_in_use_end(nmp, error);
4148 goto restart;
4149 }
4150 lck_mtx_lock(&np->n_openlock);
4151 }
4152 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4153 if (error)
4154 break;
4155 nflp->nfl_start = end + 1;
4156 }
4157 if (nflp->nfl_blockcnt) {
4158 /* wake up anyone blocked on this lock */
4159 wakeup(nflp);
4160 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4161 /* remove nflp from lock list and destroy */
4162 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4163 nfs_file_lock_destroy(nflp);
4164 }
4165 }
4166out:
4167 lck_mtx_unlock(&np->n_openlock);
4168 nfs_open_state_clear_busy(np);
4169 nfs_mount_state_in_use_end(nmp, 0);
4170
4171 if (newnflp)
4172 nfs_file_lock_destroy(newnflp);
4173 return (error);
4174}
4175
4176/*
4177 * NFSv4 advisory file locking
4178 */
4179int
6d2010ae 4180nfs_vnop_advlock(
b0d623f7
A
4181 struct vnop_advlock_args /* {
4182 struct vnodeop_desc *a_desc;
4183 vnode_t a_vp;
4184 caddr_t a_id;
4185 int a_op;
4186 struct flock *a_fl;
4187 int a_flags;
4188 vfs_context_t a_context;
4189 } */ *ap)
4190{
4191 vnode_t vp = ap->a_vp;
4192 nfsnode_t np = VTONFS(ap->a_vp);
4193 struct flock *fl = ap->a_fl;
4194 int op = ap->a_op;
4195 int flags = ap->a_flags;
4196 vfs_context_t ctx = ap->a_context;
4197 struct nfsmount *nmp;
b0d623f7
A
4198 struct nfs_open_owner *noop = NULL;
4199 struct nfs_open_file *nofp = NULL;
4200 struct nfs_lock_owner *nlop = NULL;
4201 off_t lstart;
4202 uint64_t start, end;
4203 int error = 0, modified, style;
6d2010ae 4204 enum vtype vtype;
b0d623f7
A
4205#define OFF_MAX QUAD_MAX
4206
4207 nmp = VTONMP(ap->a_vp);
4208 if (!nmp)
4209 return (ENXIO);
6d2010ae
A
4210 lck_mtx_lock(&nmp->nm_lock);
4211 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) {
4212 lck_mtx_unlock(&nmp->nm_lock);
4213 return (ENOTSUP);
4214 }
4215 lck_mtx_unlock(&nmp->nm_lock);
b0d623f7 4216
6d2010ae
A
4217 if (np->n_flag & NREVOKE)
4218 return (EIO);
4219 vtype = vnode_vtype(ap->a_vp);
4220 if (vtype == VDIR) /* ignore lock requests on directories */
4221 return (0);
4222 if (vtype != VREG) /* anything other than regular files is invalid */
4223 return (EINVAL);
4224
4225 /* Convert the flock structure into a start and end. */
b0d623f7
A
4226 switch (fl->l_whence) {
4227 case SEEK_SET:
4228 case SEEK_CUR:
4229 /*
4230 * Caller is responsible for adding any necessary offset
4231 * to fl->l_start when SEEK_CUR is used.
4232 */
4233 lstart = fl->l_start;
4234 break;
4235 case SEEK_END:
4236 /* need to flush, and refetch attributes to make */
4237 /* sure we have the correct end of file offset */
4238 if ((error = nfs_node_lock(np)))
4239 return (error);
4240 modified = (np->n_flag & NMODIFIED);
4241 nfs_node_unlock(np);
4242 if (modified && ((error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1))))
4243 return (error);
6d2010ae 4244 if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED)))
b0d623f7
A
4245 return (error);
4246 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
4247 if ((np->n_size > OFF_MAX) ||
4248 ((fl->l_start > 0) && (np->n_size > (u_quad_t)(OFF_MAX - fl->l_start))))
4249 error = EOVERFLOW;
4250 lstart = np->n_size + fl->l_start;
4251 nfs_data_unlock(np);
4252 if (error)
4253 return (error);
4254 break;
4255 default:
4256 return (EINVAL);
4257 }
4258 if (lstart < 0)
4259 return (EINVAL);
4260 start = lstart;
4261 if (fl->l_len == 0) {
4262 end = UINT64_MAX;
4263 } else if (fl->l_len > 0) {
4264 if ((fl->l_len - 1) > (OFF_MAX - lstart))
4265 return (EOVERFLOW);
4266 end = start - 1 + fl->l_len;
4267 } else { /* l_len is negative */
4268 if ((lstart + fl->l_len) < 0)
4269 return (EINVAL);
4270 end = start - 1;
4271 start += fl->l_len;
4272 }
6d2010ae
A
4273 if ((nmp->nm_vers == NFS_VER2) && ((start > INT32_MAX) || (fl->l_len && (end > INT32_MAX))))
4274 return (EINVAL);
b0d623f7
A
4275
4276 style = (flags & F_FLOCK) ? NFS_FILE_LOCK_STYLE_FLOCK : NFS_FILE_LOCK_STYLE_POSIX;
4277 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((start != 0) || (end != UINT64_MAX)))
4278 return (EINVAL);
4279
4280 /* find the lock owner, alloc if not unlock */
4281 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), (op != F_UNLCK));
4282 if (!nlop) {
4283 error = (op == F_UNLCK) ? 0 : ENOMEM;
4284 if (error)
6d2010ae 4285 NP(np, "nfs_vnop_advlock: no lock owner, error %d", error);
b0d623f7
A
4286 goto out;
4287 }
4288
4289 if (op == F_GETLK) {
6d2010ae 4290 error = nfs_advlock_getlock(np, nlop, fl, start, end, ctx);
b0d623f7
A
4291 } else {
4292 /* find the open owner */
4293 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
4294 if (!noop) {
6d2010ae 4295 NP(np, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx)));
b0d623f7
A
4296 error = EPERM;
4297 goto out;
4298 }
4299 /* find the open file */
4300restart:
4301 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
4302 if (error)
4303 error = EBADF;
4304 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6d2010ae 4305 NP(np, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
b0d623f7
A
4306 error = EIO;
4307 }
4308 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6d2010ae 4309 error = nfs4_reopen(nofp, ((op == F_UNLCK) ? NULL : vfs_context_thread(ctx)));
b0d623f7 4310 nofp = NULL;
6d2010ae
A
4311 if (!error)
4312 goto restart;
b0d623f7
A
4313 }
4314 if (error) {
6d2010ae 4315 NP(np, "nfs_vnop_advlock: no open file %d, %d", error, kauth_cred_getuid(noop->noo_cred));
b0d623f7
A
4316 goto out;
4317 }
4318 if (op == F_UNLCK) {
6d2010ae 4319 error = nfs_advlock_unlock(np, nofp, nlop, start, end, style, ctx);
b0d623f7
A
4320 } else if ((op == F_SETLK) || (op == F_SETLKW)) {
4321 if ((op == F_SETLK) && (flags & F_WAIT))
4322 op = F_SETLKW;
6d2010ae 4323 error = nfs_advlock_setlock(np, nofp, nlop, op, start, end, style, fl->l_type, ctx);
b0d623f7
A
4324 } else {
4325 /* not getlk, unlock or lock? */
4326 error = EINVAL;
4327 }
4328 }
4329
4330out:
4331 if (nlop)
4332 nfs_lock_owner_rele(nlop);
4333 if (noop)
4334 nfs_open_owner_rele(noop);
4335 return (error);
4336}
4337
4338/*
4339 * Check if an open owner holds any locks on a file.
4340 */
4341int
6d2010ae 4342nfs_check_for_locks(struct nfs_open_owner *noop, struct nfs_open_file *nofp)
b0d623f7
A
4343{
4344 struct nfs_lock_owner *nlop;
4345
4346 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
4347 if (nlop->nlo_open_owner != noop)
4348 continue;
4349 if (!TAILQ_EMPTY(&nlop->nlo_locks))
4350 break;
4351 }
4352 return (nlop ? 1 : 0);
4353}
4354
4355/*
4356 * Reopen simple (no deny, no locks) open state that was lost.
4357 */
6d2010ae 4358int
b0d623f7
A
4359nfs4_reopen(struct nfs_open_file *nofp, thread_t thd)
4360{
4361 struct nfs_open_owner *noop = nofp->nof_owner;
4362 struct nfsmount *nmp = NFSTONMP(nofp->nof_np);
6d2010ae
A
4363 nfsnode_t np = nofp->nof_np;
4364 vnode_t vp = NFSTOV(np);
b0d623f7
A
4365 vnode_t dvp = NULL;
4366 struct componentname cn;
4367 const char *vname = NULL;
6d2010ae 4368 const char *name = NULL;
b0d623f7
A
4369 size_t namelen;
4370 char smallname[128];
4371 char *filename = NULL;
6d2010ae 4372 int error = 0, done = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
b0d623f7
A
4373 struct timespec ts = { 1, 0 };
4374
4375 lck_mtx_lock(&nofp->nof_lock);
4376 while (nofp->nof_flags & NFS_OPEN_FILE_REOPENING) {
4377 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
4378 break;
4379 msleep(&nofp->nof_flags, &nofp->nof_lock, slpflag|(PZERO-1), "nfsreopenwait", &ts);
6d2010ae 4380 slpflag = 0;
b0d623f7 4381 }
6d2010ae 4382 if (error || !(nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
b0d623f7 4383 lck_mtx_unlock(&nofp->nof_lock);
6d2010ae 4384 return (error);
b0d623f7
A
4385 }
4386 nofp->nof_flags |= NFS_OPEN_FILE_REOPENING;
4387 lck_mtx_unlock(&nofp->nof_lock);
4388
6d2010ae
A
4389 nfs_node_lock_force(np);
4390 if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) {
4391 /*
4392 * The node's been sillyrenamed, so we need to use
4393 * the sillyrename directory/name to do the open.
4394 */
4395 struct nfs_sillyrename *nsp = np->n_sillyrename;
4396 dvp = NFSTOV(nsp->nsr_dnp);
4397 if ((error = vnode_get(dvp))) {
4398 nfs_node_unlock(np);
4399 goto out;
4400 }
4401 name = nsp->nsr_name;
4402 } else {
4403 /*
4404 * [sigh] We can't trust VFS to get the parent right for named
4405 * attribute nodes. (It likes to reparent the nodes after we've
4406 * created them.) Luckily we can probably get the right parent
4407 * from the n_parent we have stashed away.
4408 */
4409 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
4410 (((dvp = np->n_parent)) && (error = vnode_get(dvp))))
4411 dvp = NULL;
4412 if (!dvp)
4413 dvp = vnode_getparent(vp);
4414 vname = vnode_getname(vp);
4415 if (!dvp || !vname) {
4416 if (!error)
4417 error = EIO;
4418 nfs_node_unlock(np);
4419 goto out;
4420 }
4421 name = vname;
b0d623f7
A
4422 }
4423 filename = &smallname[0];
6d2010ae 4424 namelen = snprintf(filename, sizeof(smallname), "%s", name);
b0d623f7 4425 if (namelen >= sizeof(smallname)) {
6d2010ae 4426 MALLOC(filename, char *, namelen+1, M_TEMP, M_WAITOK);
b0d623f7
A
4427 if (!filename) {
4428 error = ENOMEM;
4429 goto out;
4430 }
6d2010ae 4431 snprintf(filename, namelen+1, "%s", name);
b0d623f7 4432 }
6d2010ae 4433 nfs_node_unlock(np);
b0d623f7
A
4434 bzero(&cn, sizeof(cn));
4435 cn.cn_nameptr = filename;
4436 cn.cn_namelen = namelen;
4437
4438restart:
4439 done = 0;
6d2010ae 4440 if ((error = nfs_mount_state_in_use_start(nmp, thd)))
b0d623f7
A
4441 goto out;
4442
4443 if (nofp->nof_rw)
4444 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE);
4445 if (!error && nofp->nof_w)
4446 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE);
4447 if (!error && nofp->nof_r)
4448 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE);
4449
4450 if (nfs_mount_state_in_use_end(nmp, error)) {
4451 if (error == NFSERR_GRACE)
4452 goto restart;
6d2010ae
A
4453 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error,
4454 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
b0d623f7
A
4455 error = 0;
4456 goto out;
4457 }
4458 done = 1;
4459out:
6d2010ae
A
4460 if (error && (error != EINTR) && (error != ERESTART))
4461 nfs_revoke_open_state_for_node(np);
b0d623f7
A
4462 lck_mtx_lock(&nofp->nof_lock);
4463 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPENING;
b0d623f7
A
4464 if (done)
4465 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
6d2010ae
A
4466 else if (error)
4467 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error,
4468 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
b0d623f7
A
4469 lck_mtx_unlock(&nofp->nof_lock);
4470 if (filename && (filename != &smallname[0]))
4471 FREE(filename, M_TEMP);
4472 if (vname)
4473 vnode_putname(vname);
4474 if (dvp != NULLVP)
4475 vnode_put(dvp);
6d2010ae 4476 return (error);
b0d623f7
A
4477}
4478
4479/*
4480 * Send a normal OPEN RPC to open/create a file.
4481 */
4482int
4483nfs4_open_rpc(
4484 struct nfs_open_file *nofp,
4485 vfs_context_t ctx,
4486 struct componentname *cnp,
4487 struct vnode_attr *vap,
4488 vnode_t dvp,
4489 vnode_t *vpp,
4490 int create,
4491 int share_access,
4492 int share_deny)
4493{
4494 return (nfs4_open_rpc_internal(nofp, ctx, vfs_context_thread(ctx), vfs_context_ucred(ctx),
4495 cnp, vap, dvp, vpp, create, share_access, share_deny));
4496}
4497
4498/*
4499 * Send an OPEN RPC to reopen a file.
4500 */
4501int
4502nfs4_open_reopen_rpc(
4503 struct nfs_open_file *nofp,
4504 thread_t thd,
4505 kauth_cred_t cred,
4506 struct componentname *cnp,
4507 vnode_t dvp,
4508 vnode_t *vpp,
4509 int share_access,
4510 int share_deny)
4511{
6d2010ae
A
4512 return (nfs4_open_rpc_internal(nofp, NULL, thd, cred, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, share_access, share_deny));
4513}
4514
4515/*
4516 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
4517 */
4518int
4519nfs4_open_confirm_rpc(
4520 struct nfsmount *nmp,
4521 nfsnode_t dnp,
4522 u_char *fhp,
4523 int fhlen,
4524 struct nfs_open_owner *noop,
4525 nfs_stateid *sid,
4526 thread_t thd,
4527 kauth_cred_t cred,
4528 struct nfs_vattr *nvap,
4529 uint64_t *xidp)
4530{
4531 struct nfsm_chain nmreq, nmrep;
4532 int error = 0, status, numops;
4533 struct nfsreq_secinfo_args si;
4534
4535 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
4536 nfsm_chain_null(&nmreq);
4537 nfsm_chain_null(&nmrep);
4538
4539 // PUTFH, OPEN_CONFIRM, GETATTR
4540 numops = 3;
4541 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
4542 nfsm_chain_add_compound_header(error, &nmreq, "open_confirm", numops);
4543 numops--;
4544 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
4545 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
4546 numops--;
4547 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_CONFIRM);
4548 nfsm_chain_add_stateid(error, &nmreq, sid);
4549 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
4550 numops--;
4551 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4552 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
4553 nfsm_chain_build_done(error, &nmreq);
4554 nfsm_assert(error, (numops == 0), EPROTO);
4555 nfsmout_if(error);
4556 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, &nmrep, xidp, &status);
4557
4558 nfsm_chain_skip_tag(error, &nmrep);
4559 nfsm_chain_get_32(error, &nmrep, numops);
4560 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
4561 nfsmout_if(error);
4562 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_CONFIRM);
4563 nfs_owner_seqid_increment(noop, NULL, error);
4564 nfsm_chain_get_stateid(error, &nmrep, sid);
4565 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4566 nfsmout_if(error);
4567 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
4568nfsmout:
4569 nfsm_chain_cleanup(&nmreq);
4570 nfsm_chain_cleanup(&nmrep);
4571 return (error);
b0d623f7
A
4572}
4573
4574/*
4575 * common OPEN RPC code
4576 *
4577 * If create is set, ctx must be passed in.
6d2010ae 4578 * Returns a node on success if no node passed in.
b0d623f7
A
4579 */
4580int
4581nfs4_open_rpc_internal(
4582 struct nfs_open_file *nofp,
4583 vfs_context_t ctx,
4584 thread_t thd,
4585 kauth_cred_t cred,
4586 struct componentname *cnp,
4587 struct vnode_attr *vap,
4588 vnode_t dvp,
4589 vnode_t *vpp,
4590 int create,
4591 int share_access,
4592 int share_deny)
4593{
4594 struct nfsmount *nmp;
4595 struct nfs_open_owner *noop = nofp->nof_owner;
6d2010ae 4596 struct nfs_vattr nvattr;
b0d623f7 4597 int error = 0, open_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
6d2010ae 4598 int nfsvers, namedattrs, numops, exclusive = 0, gotuid, gotgid;
b0d623f7
A
4599 u_int64_t xid, savedxid = 0;
4600 nfsnode_t dnp = VTONFS(dvp);
4601 nfsnode_t np, newnp = NULL;
4602 vnode_t newvp = NULL;
4603 struct nfsm_chain nmreq, nmrep;
4604 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6d2010ae 4605 uint32_t rflags, delegation, recall;
b0d623f7
A
4606 struct nfs_stateid stateid, dstateid, *sid;
4607 fhandle_t fh;
6d2010ae 4608 struct nfsreq rq, *req = &rq;
b0d623f7 4609 struct nfs_dulookup dul;
6d2010ae
A
4610 char sbuf[64], *s;
4611 uint32_t ace_type, ace_flags, ace_mask, len, slen;
4612 struct kauth_ace ace;
4613 struct nfsreq_secinfo_args si;
b0d623f7
A
4614
4615 if (create && !ctx)
4616 return (EINVAL);
4617
4618 nmp = VTONMP(dvp);
4619 if (!nmp)
4620 return (ENXIO);
4621 nfsvers = nmp->nm_vers;
6d2010ae
A
4622 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
4623 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
4624 return (EINVAL);
b0d623f7
A
4625
4626 np = *vpp ? VTONFS(*vpp) : NULL;
4627 if (create && vap) {
4628 exclusive = (vap->va_vaflags & VA_EXCLUSIVE);
4629 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
4630 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
4631 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
6d2010ae
A
4632 if (exclusive && (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time)))
4633 vap->va_vaflags |= VA_UTIMES_NULL;
b0d623f7
A
4634 } else {
4635 exclusive = gotuid = gotgid = 0;
4636 }
4637 if (nofp) {
4638 sid = &nofp->nof_stateid;
4639 } else {
4640 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
4641 sid = &stateid;
4642 }
4643
4644 if ((error = nfs_open_owner_set_busy(noop, thd)))
4645 return (error);
4646again:
6d2010ae
A
4647 rflags = delegation = recall = 0;
4648 ace.ace_flags = 0;
4649 s = sbuf;
4650 slen = sizeof(sbuf);
4651 NVATTR_INIT(&nvattr);
4652 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, cnp->cn_nameptr, cnp->cn_namelen);
b0d623f7
A
4653
4654 nfsm_chain_null(&nmreq);
4655 nfsm_chain_null(&nmrep);
4656
4657 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
4658 numops = 6;
4659 nfsm_chain_build_alloc_init(error, &nmreq, 53 * NFSX_UNSIGNED + cnp->cn_namelen);
4660 nfsm_chain_add_compound_header(error, &nmreq, create ? "create" : "open", numops);
4661 numops--;
4662 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
4663 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
4664 numops--;
4665 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
4666 numops--;
4667 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
4668 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
4669 nfsm_chain_add_32(error, &nmreq, share_access);
4670 nfsm_chain_add_32(error, &nmreq, share_deny);
6d2010ae 4671 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
b0d623f7 4672 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
6d2010ae 4673 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
b0d623f7
A
4674 nfsm_chain_add_32(error, &nmreq, create);
4675 if (create) {
4676 if (exclusive) {
4677 static uint32_t create_verf; // XXX need a better verifier
4678 create_verf++;
4679 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE);
4680 /* insert 64 bit verifier */
4681 nfsm_chain_add_32(error, &nmreq, create_verf);
4682 nfsm_chain_add_32(error, &nmreq, create_verf);
4683 } else {
4684 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_UNCHECKED);
4685 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
4686 }
4687 }
b0d623f7 4688 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
6d2010ae 4689 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
b0d623f7
A
4690 numops--;
4691 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4692 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
4693 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6d2010ae 4694 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
b0d623f7
A
4695 numops--;
4696 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
4697 numops--;
4698 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 4699 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
b0d623f7
A
4700 nfsm_chain_build_done(error, &nmreq);
4701 nfsm_assert(error, (numops == 0), EPROTO);
4702 if (!error)
4703 error = busyerror = nfs_node_set_busy(dnp, thd);
4704 nfsmout_if(error);
4705
6d2010ae 4706 if (create && !namedattrs)
b0d623f7
A
4707 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
4708
6d2010ae 4709 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, NULL, &req);
b0d623f7 4710 if (!error) {
6d2010ae 4711 if (create && !namedattrs)
b0d623f7
A
4712 nfs_dulookup_start(&dul, dnp, ctx);
4713 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
4714 savedxid = xid;
4715 }
4716
6d2010ae 4717 if (create && !namedattrs)
b0d623f7
A
4718 nfs_dulookup_finish(&dul, dnp, ctx);
4719
4720 if ((lockerror = nfs_node_lock(dnp)))
4721 error = lockerror;
4722 nfsm_chain_skip_tag(error, &nmrep);
4723 nfsm_chain_get_32(error, &nmrep, numops);
4724 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
4725 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
4726 nfsmout_if(error);
4727 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
4728 nfs_owner_seqid_increment(noop, NULL, error);
4729 nfsm_chain_get_stateid(error, &nmrep, sid);
4730 nfsm_chain_check_change_info(error, &nmrep, dnp);
4731 nfsm_chain_get_32(error, &nmrep, rflags);
4732 bmlen = NFS_ATTR_BITMAP_LEN;
4733 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
4734 nfsm_chain_get_32(error, &nmrep, delegation);
4735 if (!error)
4736 switch (delegation) {
4737 case NFS_OPEN_DELEGATE_NONE:
4738 break;
4739 case NFS_OPEN_DELEGATE_READ:
b0d623f7
A
4740 case NFS_OPEN_DELEGATE_WRITE:
4741 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
4742 nfsm_chain_get_32(error, &nmrep, recall);
6d2010ae
A
4743 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
4744 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
4745 /* if we have any trouble accepting the ACE, just invalidate it */
4746 ace_type = ace_flags = ace_mask = len = 0;
4747 nfsm_chain_get_32(error, &nmrep, ace_type);
4748 nfsm_chain_get_32(error, &nmrep, ace_flags);
4749 nfsm_chain_get_32(error, &nmrep, ace_mask);
4750 nfsm_chain_get_32(error, &nmrep, len);
4751 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
4752 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
4753 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
4754 if (!error && (len >= slen)) {
4755 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
4756 if (s)
4757 slen = len+1;
4758 else
4759 ace.ace_flags = 0;
4760 }
4761 if (s)
4762 nfsm_chain_get_opaque(error, &nmrep, len, s);
4763 else
4764 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
4765 if (!error && s) {
4766 s[len] = '\0';
4767 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
4768 ace.ace_flags = 0;
4769 }
4770 if (error || !s)
4771 ace.ace_flags = 0;
4772 if (s && (s != sbuf))
4773 FREE(s, M_TEMP);
b0d623f7
A
4774 break;
4775 default:
4776 error = EBADRPC;
4777 break;
4778 }
4779 /* At this point if we have no error, the object was created/opened. */
b0d623f7
A
4780 open_error = error;
4781 nfsmout_if(error);
6d2010ae 4782 if (create && vap && !exclusive)
b0d623f7
A
4783 nfs_vattr_set_supported(bitmap, vap);
4784 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4785 nfsmout_if(error);
6d2010ae 4786 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
b0d623f7
A
4787 nfsmout_if(error);
4788 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6d2010ae 4789 printf("nfs: open/create didn't return filehandle? %s\n", cnp->cn_nameptr);
2d21ac55
A
4790 error = EBADRPC;
4791 goto nfsmout;
4792 }
b0d623f7
A
4793 if (!create && np && !NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
4794 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
6d2010ae
A
4795 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
4796 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
4797 NP(np, "nfs4_open_rpc: warning: file handle mismatch");
b0d623f7 4798 }
2d21ac55
A
4799 /* directory attributes: if we don't get them, make sure to invalidate */
4800 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
4801 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 4802 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
2d21ac55
A
4803 if (error)
4804 NATTRINVALIDATE(dnp);
b0d623f7
A
4805 nfsmout_if(error);
4806
4807 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
4808 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
4809
4810 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
4811 nfs_node_unlock(dnp);
4812 lockerror = ENOENT;
6d2010ae
A
4813 NVATTR_CLEANUP(&nvattr);
4814 error = nfs4_open_confirm_rpc(nmp, dnp, fh.fh_data, fh.fh_len, noop, sid, thd, cred, &nvattr, &xid);
b0d623f7
A
4815 nfsmout_if(error);
4816 savedxid = xid;
4817 if ((lockerror = nfs_node_lock(dnp)))
4818 error = lockerror;
4819 }
4820
4821nfsmout:
4822 nfsm_chain_cleanup(&nmreq);
4823 nfsm_chain_cleanup(&nmrep);
4824
4825 if (!lockerror && create) {
4826 if (!open_error && (dnp->n_flag & NNEGNCENTRIES)) {
4827 dnp->n_flag &= ~NNEGNCENTRIES;
4828 cache_purge_negatives(dvp);
4829 }
4830 dnp->n_flag |= NMODIFIED;
4831 nfs_node_unlock(dnp);
4832 lockerror = ENOENT;
6d2010ae 4833 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
b0d623f7
A
4834 }
4835 if (!lockerror)
4836 nfs_node_unlock(dnp);
6d2010ae 4837 if (!error && !np && fh.fh_len) {
b0d623f7
A
4838 /* create the vnode with the filehandle and attributes */
4839 xid = savedxid;
6d2010ae 4840 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &newnp);
b0d623f7
A
4841 if (!error)
4842 newvp = NFSTOV(newnp);
4843 }
6d2010ae 4844 NVATTR_CLEANUP(&nvattr);
b0d623f7
A
4845 if (!busyerror)
4846 nfs_node_clear_busy(dnp);
4847 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
4848 if (!np)
4849 np = newnp;
4850 if (!error && np && !recall) {
4851 /* stuff the delegation state in the node */
4852 lck_mtx_lock(&np->n_openlock);
4853 np->n_openflags &= ~N_DELEG_MASK;
4854 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
4855 np->n_dstateid = dstateid;
6d2010ae
A
4856 np->n_dace = ace;
4857 if (np->n_dlink.tqe_next == NFSNOLIST) {
4858 lck_mtx_lock(&nmp->nm_lock);
4859 if (np->n_dlink.tqe_next == NFSNOLIST)
4860 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
4861 lck_mtx_unlock(&nmp->nm_lock);
4862 }
b0d623f7 4863 lck_mtx_unlock(&np->n_openlock);
6d2010ae
A
4864 } else {
4865 /* give the delegation back */
b0d623f7 4866 if (np) {
6d2010ae
A
4867 if (NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
4868 /* update delegation state and return it */
4869 lck_mtx_lock(&np->n_openlock);
4870 np->n_openflags &= ~N_DELEG_MASK;
4871 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
4872 np->n_dstateid = dstateid;
4873 np->n_dace = ace;
4874 if (np->n_dlink.tqe_next == NFSNOLIST) {
4875 lck_mtx_lock(&nmp->nm_lock);
4876 if (np->n_dlink.tqe_next == NFSNOLIST)
4877 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
4878 lck_mtx_unlock(&nmp->nm_lock);
4879 }
4880 lck_mtx_unlock(&np->n_openlock);
4881 /* don't need to send a separate delegreturn for fh */
4882 fh.fh_len = 0;
4883 }
4884 /* return np's current delegation */
4885 nfs4_delegation_return(np, 0, thd, cred);
b0d623f7 4886 }
6d2010ae
A
4887 if (fh.fh_len) /* return fh's delegation if it wasn't for np */
4888 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
b0d623f7
A
4889 }
4890 }
4891 if (error) {
4892 if (exclusive && (error == NFSERR_NOTSUPP)) {
4893 exclusive = 0;
4894 goto again;
4895 }
4896 if (newvp) {
4897 nfs_node_unlock(newnp);
4898 vnode_put(newvp);
4899 }
4900 } else if (create) {
4901 nfs_node_unlock(newnp);
4902 if (exclusive) {
4903 error = nfs4_setattr_rpc(newnp, vap, ctx);
4904 if (error && (gotuid || gotgid)) {
4905 /* it's possible the server didn't like our attempt to set IDs. */
4906 /* so, let's try it again without those */
4907 VATTR_CLEAR_ACTIVE(vap, va_uid);
4908 VATTR_CLEAR_ACTIVE(vap, va_gid);
4909 error = nfs4_setattr_rpc(newnp, vap, ctx);
4910 }
4911 }
4912 if (error)
4913 vnode_put(newvp);
4914 else
4915 *vpp = newvp;
4916 }
4917 nfs_open_owner_clear_busy(noop);
4918 return (error);
4919}
4920
6d2010ae
A
4921
4922/*
4923 * Send an OPEN RPC to claim a delegated open for a file
4924 */
4925int
4926nfs4_claim_delegated_open_rpc(
4927 struct nfs_open_file *nofp,
4928 int share_access,
4929 int share_deny,
4930 int flags)
4931{
4932 struct nfsmount *nmp;
4933 struct nfs_open_owner *noop = nofp->nof_owner;
4934 struct nfs_vattr nvattr;
4935 int error = 0, lockerror = ENOENT, status;
4936 int nfsvers, numops;
4937 u_int64_t xid;
4938 nfsnode_t np = nofp->nof_np;
4939 struct nfsm_chain nmreq, nmrep;
4940 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
4941 uint32_t rflags = 0, delegation, recall = 0;
4942 fhandle_t fh;
4943 struct nfs_stateid dstateid;
4944 char sbuf[64], *s = sbuf;
4945 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
4946 struct kauth_ace ace;
4947 vnode_t dvp = NULL;
4948 const char *vname = NULL;
4949 const char *name = NULL;
4950 size_t namelen;
4951 char smallname[128];
4952 char *filename = NULL;
4953 struct nfsreq_secinfo_args si;
4954
4955 nmp = NFSTONMP(np);
4956 if (!nmp)
4957 return (ENXIO);
4958 nfsvers = nmp->nm_vers;
4959
4960 nfs_node_lock_force(np);
4961 if ((vnode_vtype(NFSTOV(np)) != VDIR) && np->n_sillyrename) {
4962 /*
4963 * The node's been sillyrenamed, so we need to use
4964 * the sillyrename directory/name to do the open.
4965 */
4966 struct nfs_sillyrename *nsp = np->n_sillyrename;
4967 dvp = NFSTOV(nsp->nsr_dnp);
4968 if ((error = vnode_get(dvp))) {
4969 nfs_node_unlock(np);
4970 goto out;
4971 }
4972 name = nsp->nsr_name;
4973 } else {
4974 /*
4975 * [sigh] We can't trust VFS to get the parent right for named
4976 * attribute nodes. (It likes to reparent the nodes after we've
4977 * created them.) Luckily we can probably get the right parent
4978 * from the n_parent we have stashed away.
4979 */
4980 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
4981 (((dvp = np->n_parent)) && (error = vnode_get(dvp))))
4982 dvp = NULL;
4983 if (!dvp)
4984 dvp = vnode_getparent(NFSTOV(np));
4985 vname = vnode_getname(NFSTOV(np));
4986 if (!dvp || !vname) {
4987 if (!error)
4988 error = EIO;
4989 nfs_node_unlock(np);
4990 goto out;
4991 }
4992 name = vname;
4993 }
4994 filename = &smallname[0];
4995 namelen = snprintf(filename, sizeof(smallname), "%s", name);
4996 if (namelen >= sizeof(smallname)) {
4997 MALLOC(filename, char *, namelen+1, M_TEMP, M_WAITOK);
4998 if (!filename) {
4999 error = ENOMEM;
5000 goto out;
5001 }
5002 snprintf(filename, namelen+1, "%s", name);
5003 }
5004 nfs_node_unlock(np);
5005
5006 if ((error = nfs_open_owner_set_busy(noop, NULL)))
5007 return (error);
5008
5009 NVATTR_INIT(&nvattr);
5010 delegation = NFS_OPEN_DELEGATE_NONE;
5011 dstateid = np->n_dstateid;
5012 NFSREQ_SECINFO_SET(&si, VTONFS(dvp), NULL, 0, filename, namelen);
5013
5014 nfsm_chain_null(&nmreq);
5015 nfsm_chain_null(&nmrep);
5016
5017 // PUTFH, OPEN, GETATTR(FH)
5018 numops = 3;
5019 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5020 nfsm_chain_add_compound_header(error, &nmreq, "open_claim_d", numops);
5021 numops--;
5022 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5023 nfsm_chain_add_fh(error, &nmreq, nfsvers, VTONFS(dvp)->n_fhp, VTONFS(dvp)->n_fhsize);
5024 numops--;
5025 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5026 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5027 nfsm_chain_add_32(error, &nmreq, share_access);
5028 nfsm_chain_add_32(error, &nmreq, share_deny);
5029 // open owner: clientid + uid
5030 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5031 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5032 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5033 // openflag4
5034 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5035 // open_claim4
5036 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_DELEGATE_CUR);
5037 nfsm_chain_add_stateid(error, &nmreq, &np->n_dstateid);
5038 nfsm_chain_add_name(error, &nmreq, filename, namelen, nmp);
5039 numops--;
5040 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5041 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5042 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5043 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5044 nfsm_chain_build_done(error, &nmreq);
5045 nfsm_assert(error, (numops == 0), EPROTO);
5046 nfsmout_if(error);
5047
5048 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5049 noop->noo_cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
5050
5051 if ((lockerror = nfs_node_lock(np)))
5052 error = lockerror;
5053 nfsm_chain_skip_tag(error, &nmrep);
5054 nfsm_chain_get_32(error, &nmrep, numops);
5055 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5056 nfsmout_if(error);
5057 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5058 nfs_owner_seqid_increment(noop, NULL, error);
5059 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5060 nfsm_chain_check_change_info(error, &nmrep, np);
5061 nfsm_chain_get_32(error, &nmrep, rflags);
5062 bmlen = NFS_ATTR_BITMAP_LEN;
5063 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5064 nfsm_chain_get_32(error, &nmrep, delegation);
5065 if (!error)
5066 switch (delegation) {
5067 case NFS_OPEN_DELEGATE_NONE:
5068 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
5069 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
5070 break;
5071 case NFS_OPEN_DELEGATE_READ:
5072 case NFS_OPEN_DELEGATE_WRITE:
5073 if ((((np->n_openflags & N_DELEG_MASK) == N_DELEG_READ) &&
5074 (delegation == NFS_OPEN_DELEGATE_WRITE)) ||
5075 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) &&
5076 (delegation == NFS_OPEN_DELEGATE_READ)))
5077 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
5078 ((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ? "W" : "R",
5079 (delegation == NFS_OPEN_DELEGATE_WRITE) ? "W" : "R", filename ? filename : "???");
5080 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5081 nfsm_chain_get_32(error, &nmrep, recall);
5082 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
5083 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5084 /* if we have any trouble accepting the ACE, just invalidate it */
5085 ace_type = ace_flags = ace_mask = len = 0;
5086 nfsm_chain_get_32(error, &nmrep, ace_type);
5087 nfsm_chain_get_32(error, &nmrep, ace_flags);
5088 nfsm_chain_get_32(error, &nmrep, ace_mask);
5089 nfsm_chain_get_32(error, &nmrep, len);
5090 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5091 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5092 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5093 if (!error && (len >= slen)) {
5094 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
5095 if (s)
5096 slen = len+1;
5097 else
5098 ace.ace_flags = 0;
5099 }
5100 if (s)
5101 nfsm_chain_get_opaque(error, &nmrep, len, s);
5102 else
5103 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5104 if (!error && s) {
5105 s[len] = '\0';
5106 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
5107 ace.ace_flags = 0;
5108 }
5109 if (error || !s)
5110 ace.ace_flags = 0;
5111 if (s && (s != sbuf))
5112 FREE(s, M_TEMP);
5113 if (!error) {
5114 /* stuff the latest delegation state in the node */
5115 lck_mtx_lock(&np->n_openlock);
5116 np->n_openflags &= ~N_DELEG_MASK;
5117 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5118 np->n_dstateid = dstateid;
5119 np->n_dace = ace;
5120 if (np->n_dlink.tqe_next == NFSNOLIST) {
5121 lck_mtx_lock(&nmp->nm_lock);
5122 if (np->n_dlink.tqe_next == NFSNOLIST)
5123 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5124 lck_mtx_unlock(&nmp->nm_lock);
5125 }
5126 lck_mtx_unlock(&np->n_openlock);
5127 }
5128 break;
5129 default:
5130 error = EBADRPC;
5131 break;
5132 }
5133 nfsmout_if(error);
5134 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5135 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5136 nfsmout_if(error);
5137 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5138 printf("nfs: open reclaim didn't return filehandle? %s\n", filename ? filename : "???");
5139 error = EBADRPC;
5140 goto nfsmout;
5141 }
5142 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5143 // XXX what if fh doesn't match the vnode we think we're re-opening?
5144 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5145 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
5146 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename ? filename : "???");
5147 }
5148 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5149 nfsmout_if(error);
5150 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
5151 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5152nfsmout:
5153 NVATTR_CLEANUP(&nvattr);
5154 nfsm_chain_cleanup(&nmreq);
5155 nfsm_chain_cleanup(&nmrep);
5156 if (!lockerror)
5157 nfs_node_unlock(np);
5158 nfs_open_owner_clear_busy(noop);
5159 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5160 if (recall) {
5161 /*
5162 * We're making a delegated claim.
5163 * Don't return the delegation here in case we have more to claim.
5164 * Just make sure it's queued up to be returned.
5165 */
5166 nfs4_delegation_return_enqueue(np);
5167 }
5168 }
5169out:
5170 // if (!error)
5171 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5172 if (filename && (filename != &smallname[0]))
5173 FREE(filename, M_TEMP);
5174 if (vname)
5175 vnode_putname(vname);
5176 if (dvp != NULLVP)
5177 vnode_put(dvp);
5178 return (error);
5179}
5180
b0d623f7
A
5181/*
5182 * Send an OPEN RPC to reclaim an open file.
5183 */
5184int
5185nfs4_open_reclaim_rpc(
5186 struct nfs_open_file *nofp,
5187 int share_access,
5188 int share_deny)
5189{
5190 struct nfsmount *nmp;
5191 struct nfs_open_owner *noop = nofp->nof_owner;
5192 struct nfs_vattr nvattr;
5193 int error = 0, lockerror = ENOENT, status;
5194 int nfsvers, numops;
5195 u_int64_t xid;
5196 nfsnode_t np = nofp->nof_np;
5197 struct nfsm_chain nmreq, nmrep;
5198 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6d2010ae 5199 uint32_t rflags = 0, delegation, recall = 0;
b0d623f7
A
5200 fhandle_t fh;
5201 struct nfs_stateid dstateid;
6d2010ae
A
5202 char sbuf[64], *s = sbuf;
5203 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5204 struct kauth_ace ace;
5205 struct nfsreq_secinfo_args si;
b0d623f7
A
5206
5207 nmp = NFSTONMP(np);
5208 if (!nmp)
5209 return (ENXIO);
5210 nfsvers = nmp->nm_vers;
5211
6d2010ae 5212 if ((error = nfs_open_owner_set_busy(noop, NULL)))
b0d623f7
A
5213 return (error);
5214
6d2010ae 5215 NVATTR_INIT(&nvattr);
b0d623f7 5216 delegation = NFS_OPEN_DELEGATE_NONE;
6d2010ae
A
5217 dstateid = np->n_dstateid;
5218 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
5219
5220 nfsm_chain_null(&nmreq);
5221 nfsm_chain_null(&nmrep);
5222
5223 // PUTFH, OPEN, GETATTR(FH)
5224 numops = 3;
5225 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5226 nfsm_chain_add_compound_header(error, &nmreq, "open_reclaim", numops);
5227 numops--;
5228 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5229 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5230 numops--;
5231 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5232 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5233 nfsm_chain_add_32(error, &nmreq, share_access);
5234 nfsm_chain_add_32(error, &nmreq, share_deny);
5235 // open owner: clientid + uid
5236 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5237 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5238 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5239 // openflag4
5240 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5241 // open_claim4
5242 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_PREVIOUS);
5243 delegation = (np->n_openflags & N_DELEG_READ) ? NFS_OPEN_DELEGATE_READ :
5244 (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE :
5245 NFS_OPEN_DELEGATE_NONE;
5246 nfsm_chain_add_32(error, &nmreq, delegation);
5247 delegation = NFS_OPEN_DELEGATE_NONE;
5248 numops--;
5249 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5250 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5251 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6d2010ae 5252 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
b0d623f7
A
5253 nfsm_chain_build_done(error, &nmreq);
5254 nfsm_assert(error, (numops == 0), EPROTO);
5255 nfsmout_if(error);
5256
6d2010ae
A
5257 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5258 noop->noo_cred, &si, R_RECOVER|R_NOINTR, &nmrep, &xid, &status);
b0d623f7
A
5259
5260 if ((lockerror = nfs_node_lock(np)))
5261 error = lockerror;
5262 nfsm_chain_skip_tag(error, &nmrep);
5263 nfsm_chain_get_32(error, &nmrep, numops);
5264 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5265 nfsmout_if(error);
5266 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5267 nfs_owner_seqid_increment(noop, NULL, error);
5268 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5269 nfsm_chain_check_change_info(error, &nmrep, np);
5270 nfsm_chain_get_32(error, &nmrep, rflags);
5271 bmlen = NFS_ATTR_BITMAP_LEN;
5272 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5273 nfsm_chain_get_32(error, &nmrep, delegation);
5274 if (!error)
5275 switch (delegation) {
5276 case NFS_OPEN_DELEGATE_NONE:
6d2010ae
A
5277 if (np->n_openflags & N_DELEG_MASK) {
5278 /*
5279 * Hey! We were supposed to get our delegation back even
5280 * if it was getting immediately recalled. Bad server!
5281 *
5282 * Just try to return the existing delegation.
5283 */
5284 // NP(np, "nfs: open reclaim didn't return delegation?");
5285 delegation = (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE : NFS_OPEN_DELEGATE_READ;
5286 recall = 1;
b0d623f7
A
5287 }
5288 break;
6d2010ae 5289 case NFS_OPEN_DELEGATE_READ:
b0d623f7
A
5290 case NFS_OPEN_DELEGATE_WRITE:
5291 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5292 nfsm_chain_get_32(error, &nmrep, recall);
6d2010ae
A
5293 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
5294 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5295 /* if we have any trouble accepting the ACE, just invalidate it */
5296 ace_type = ace_flags = ace_mask = len = 0;
5297 nfsm_chain_get_32(error, &nmrep, ace_type);
5298 nfsm_chain_get_32(error, &nmrep, ace_flags);
5299 nfsm_chain_get_32(error, &nmrep, ace_mask);
5300 nfsm_chain_get_32(error, &nmrep, len);
5301 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5302 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5303 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5304 if (!error && (len >= slen)) {
5305 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
5306 if (s)
5307 slen = len+1;
5308 else
5309 ace.ace_flags = 0;
5310 }
5311 if (s)
5312 nfsm_chain_get_opaque(error, &nmrep, len, s);
5313 else
5314 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5315 if (!error && s) {
5316 s[len] = '\0';
5317 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
5318 ace.ace_flags = 0;
5319 }
5320 if (error || !s)
5321 ace.ace_flags = 0;
5322 if (s && (s != sbuf))
5323 FREE(s, M_TEMP);
b0d623f7
A
5324 if (!error) {
5325 /* stuff the delegation state in the node */
5326 lck_mtx_lock(&np->n_openlock);
5327 np->n_openflags &= ~N_DELEG_MASK;
6d2010ae 5328 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
b0d623f7 5329 np->n_dstateid = dstateid;
6d2010ae
A
5330 np->n_dace = ace;
5331 if (np->n_dlink.tqe_next == NFSNOLIST) {
5332 lck_mtx_lock(&nmp->nm_lock);
5333 if (np->n_dlink.tqe_next == NFSNOLIST)
5334 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5335 lck_mtx_unlock(&nmp->nm_lock);
5336 }
b0d623f7
A
5337 lck_mtx_unlock(&np->n_openlock);
5338 }
5339 break;
5340 default:
5341 error = EBADRPC;
5342 break;
5343 }
5344 nfsmout_if(error);
5345 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 5346 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
b0d623f7
A
5347 nfsmout_if(error);
5348 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6d2010ae 5349 NP(np, "nfs: open reclaim didn't return filehandle?");
b0d623f7
A
5350 error = EBADRPC;
5351 goto nfsmout;
5352 }
5353 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5354 // XXX what if fh doesn't match the vnode we think we're re-opening?
6d2010ae
A
5355 // That should be pretty hard in this case, given that we are doing
5356 // the open reclaim using the file handle (and not a dir/name pair).
5357 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5358 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
5359 NP(np, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
b0d623f7
A
5360 }
5361 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5362 nfsmout_if(error);
5363 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
5364 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5365nfsmout:
6d2010ae
A
5366 // if (!error)
5367 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
5368 NVATTR_CLEANUP(&nvattr);
b0d623f7
A
5369 nfsm_chain_cleanup(&nmreq);
5370 nfsm_chain_cleanup(&nmrep);
5371 if (!lockerror)
5372 nfs_node_unlock(np);
5373 nfs_open_owner_clear_busy(noop);
5374 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
6d2010ae
A
5375 if (recall)
5376 nfs4_delegation_return_enqueue(np);
b0d623f7
A
5377 }
5378 return (error);
5379}
2d21ac55 5380
b0d623f7
A
5381int
5382nfs4_open_downgrade_rpc(
5383 nfsnode_t np,
5384 struct nfs_open_file *nofp,
5385 vfs_context_t ctx)
5386{
5387 struct nfs_open_owner *noop = nofp->nof_owner;
5388 struct nfsmount *nmp;
5389 int error, lockerror = ENOENT, status, nfsvers, numops;
5390 struct nfsm_chain nmreq, nmrep;
5391 u_int64_t xid;
6d2010ae 5392 struct nfsreq_secinfo_args si;
2d21ac55 5393
b0d623f7
A
5394 nmp = NFSTONMP(np);
5395 if (!nmp)
5396 return (ENXIO);
5397 nfsvers = nmp->nm_vers;
5398
6d2010ae 5399 if ((error = nfs_open_owner_set_busy(noop, NULL)))
b0d623f7
A
5400 return (error);
5401
6d2010ae 5402 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
5403 nfsm_chain_null(&nmreq);
5404 nfsm_chain_null(&nmrep);
5405
5406 // PUTFH, OPEN_DOWNGRADE, GETATTR
5407 numops = 3;
5408 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5409 nfsm_chain_add_compound_header(error, &nmreq, "open_downgrd", numops);
5410 numops--;
5411 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5412 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5413 numops--;
5414 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_DOWNGRADE);
5415 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
5416 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5417 nfsm_chain_add_32(error, &nmreq, nofp->nof_access);
5418 nfsm_chain_add_32(error, &nmreq, nofp->nof_deny);
5419 numops--;
5420 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 5421 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
b0d623f7
A
5422 nfsm_chain_build_done(error, &nmreq);
5423 nfsm_assert(error, (numops == 0), EPROTO);
5424 nfsmout_if(error);
6d2010ae
A
5425 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
5426 vfs_context_thread(ctx), vfs_context_ucred(ctx),
5427 &si, R_NOINTR, &nmrep, &xid, &status);
b0d623f7
A
5428
5429 if ((lockerror = nfs_node_lock(np)))
5430 error = lockerror;
5431 nfsm_chain_skip_tag(error, &nmrep);
5432 nfsm_chain_get_32(error, &nmrep, numops);
5433 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
2d21ac55 5434 nfsmout_if(error);
b0d623f7
A
5435 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_DOWNGRADE);
5436 nfs_owner_seqid_increment(noop, NULL, error);
5437 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5438 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 5439 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
b0d623f7
A
5440nfsmout:
5441 if (!lockerror)
5442 nfs_node_unlock(np);
5443 nfs_open_owner_clear_busy(noop);
2d21ac55
A
5444 nfsm_chain_cleanup(&nmreq);
5445 nfsm_chain_cleanup(&nmrep);
b0d623f7
A
5446 return (error);
5447}
2d21ac55 5448
b0d623f7
A
5449int
5450nfs4_close_rpc(
5451 nfsnode_t np,
5452 struct nfs_open_file *nofp,
5453 thread_t thd,
5454 kauth_cred_t cred,
6d2010ae 5455 int flags)
b0d623f7
A
5456{
5457 struct nfs_open_owner *noop = nofp->nof_owner;
5458 struct nfsmount *nmp;
5459 int error, lockerror = ENOENT, status, nfsvers, numops;
5460 struct nfsm_chain nmreq, nmrep;
5461 u_int64_t xid;
6d2010ae 5462 struct nfsreq_secinfo_args si;
b0d623f7
A
5463
5464 nmp = NFSTONMP(np);
5465 if (!nmp)
5466 return (ENXIO);
5467 nfsvers = nmp->nm_vers;
5468
6d2010ae 5469 if ((error = nfs_open_owner_set_busy(noop, NULL)))
b0d623f7
A
5470 return (error);
5471
6d2010ae 5472 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
b0d623f7
A
5473 nfsm_chain_null(&nmreq);
5474 nfsm_chain_null(&nmrep);
5475
6d2010ae 5476 // PUTFH, CLOSE, GETATTR
b0d623f7
A
5477 numops = 3;
5478 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5479 nfsm_chain_add_compound_header(error, &nmreq, "close", numops);
2d21ac55
A
5480 numops--;
5481 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
b0d623f7 5482 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
2d21ac55
A
5483 numops--;
5484 nfsm_chain_add_32(error, &nmreq, NFS_OP_CLOSE);
b0d623f7
A
5485 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5486 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
5487 numops--;
5488 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6d2010ae 5489 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
2d21ac55
A
5490 nfsm_chain_build_done(error, &nmreq);
5491 nfsm_assert(error, (numops == 0), EPROTO);
5492 nfsmout_if(error);
6d2010ae 5493 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
2d21ac55 5494
b0d623f7
A
5495 if ((lockerror = nfs_node_lock(np)))
5496 error = lockerror;
2d21ac55
A
5497 nfsm_chain_skip_tag(error, &nmrep);
5498 nfsm_chain_get_32(error, &nmrep, numops);
5499 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
b0d623f7 5500 nfsmout_if(error);
2d21ac55 5501 nfsm_chain_op_check(error, &nmrep, NFS_OP_CLOSE);
b0d623f7
A
5502 nfs_owner_seqid_increment(noop, NULL, error);
5503 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5504 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae 5505 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
b0d623f7
A
5506nfsmout:
5507 if (!lockerror)
5508 nfs_node_unlock(np);
5509 nfs_open_owner_clear_busy(noop);
5510 nfsm_chain_cleanup(&nmreq);
5511 nfsm_chain_cleanup(&nmrep);
5512 return (error);
5513}
5514
5515
b0d623f7 5516/*
6d2010ae 5517 * Claim the delegated open combinations this open file holds.
b0d623f7
A
5518 */
5519int
6d2010ae 5520nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags)
b0d623f7 5521{
6d2010ae
A
5522 struct nfs_open_owner *noop = nofp->nof_owner;
5523 struct nfs_lock_owner *nlop;
5524 struct nfs_file_lock *nflp, *nextnflp;
b0d623f7 5525 struct nfsmount *nmp;
6d2010ae 5526 int error = 0, reopen = 0;
b0d623f7 5527
6d2010ae
A
5528 if (nofp->nof_d_rw_drw) {
5529 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_BOTH, flags);
5530 if (!error) {
5531 lck_mtx_lock(&nofp->nof_lock);
5532 nofp->nof_rw_drw += nofp->nof_d_rw_drw;
5533 nofp->nof_d_rw_drw = 0;
5534 lck_mtx_unlock(&nofp->nof_lock);
5535 }
b0d623f7 5536 }
6d2010ae
A
5537 if (!error && nofp->nof_d_w_drw) {
5538 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_BOTH, flags);
5539 if (!error) {
5540 lck_mtx_lock(&nofp->nof_lock);
5541 nofp->nof_w_drw += nofp->nof_d_w_drw;
5542 nofp->nof_d_w_drw = 0;
5543 lck_mtx_unlock(&nofp->nof_lock);
5544 }
b0d623f7 5545 }
6d2010ae
A
5546 if (!error && nofp->nof_d_r_drw) {
5547 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_BOTH, flags);
5548 if (!error) {
5549 lck_mtx_lock(&nofp->nof_lock);
5550 nofp->nof_r_drw += nofp->nof_d_r_drw;
5551 nofp->nof_d_r_drw = 0;
5552 lck_mtx_unlock(&nofp->nof_lock);
5553 }
5554 }
5555 if (!error && nofp->nof_d_rw_dw) {
5556 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_WRITE, flags);
5557 if (!error) {
5558 lck_mtx_lock(&nofp->nof_lock);
5559 nofp->nof_rw_dw += nofp->nof_d_rw_dw;
5560 nofp->nof_d_rw_dw = 0;
5561 lck_mtx_unlock(&nofp->nof_lock);
5562 }
5563 }
5564 if (!error && nofp->nof_d_w_dw) {
5565 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_WRITE, flags);
5566 if (!error) {
5567 lck_mtx_lock(&nofp->nof_lock);
5568 nofp->nof_w_dw += nofp->nof_d_w_dw;
5569 nofp->nof_d_w_dw = 0;
5570 lck_mtx_unlock(&nofp->nof_lock);
5571 }
5572 }
5573 if (!error && nofp->nof_d_r_dw) {
5574 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_WRITE, flags);
5575 if (!error) {
5576 lck_mtx_lock(&nofp->nof_lock);
5577 nofp->nof_r_dw += nofp->nof_d_r_dw;
5578 nofp->nof_d_r_dw = 0;
5579 lck_mtx_unlock(&nofp->nof_lock);
5580 }
5581 }
5582 /* non-deny-mode opens may be reopened if no locks are held */
5583 if (!error && nofp->nof_d_rw) {
5584 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, flags);
5585 /* for some errors, we should just try reopening the file */
5586 if (nfs_mount_state_error_delegation_lost(error))
5587 reopen = error;
5588 if (!error || reopen) {
5589 lck_mtx_lock(&nofp->nof_lock);
5590 nofp->nof_rw += nofp->nof_d_rw;
5591 nofp->nof_d_rw = 0;
5592 lck_mtx_unlock(&nofp->nof_lock);
5593 }
5594 }
5595 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
5596 if ((!error || reopen) && nofp->nof_d_w) {
5597 if (!error) {
5598 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, flags);
5599 /* for some errors, we should just try reopening the file */
5600 if (nfs_mount_state_error_delegation_lost(error))
5601 reopen = error;
5602 }
5603 if (!error || reopen) {
5604 lck_mtx_lock(&nofp->nof_lock);
5605 nofp->nof_w += nofp->nof_d_w;
5606 nofp->nof_d_w = 0;
5607 lck_mtx_unlock(&nofp->nof_lock);
5608 }
5609 }
5610 if ((!error || reopen) && nofp->nof_d_r) {
5611 if (!error) {
5612 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, flags);
5613 /* for some errors, we should just try reopening the file */
5614 if (nfs_mount_state_error_delegation_lost(error))
5615 reopen = error;
5616 }
5617 if (!error || reopen) {
5618 lck_mtx_lock(&nofp->nof_lock);
5619 nofp->nof_r += nofp->nof_d_r;
5620 nofp->nof_d_r = 0;
5621 lck_mtx_unlock(&nofp->nof_lock);
5622 }
5623 }
5624
5625 if (reopen) {
5626 /*
5627 * Any problems with the delegation probably indicates that we
5628 * should review/return all of our current delegation state.
5629 */
5630 if ((nmp = NFSTONMP(nofp->nof_np))) {
5631 nfs4_delegation_return_enqueue(nofp->nof_np);
5632 lck_mtx_lock(&nmp->nm_lock);
5633 nfs_need_recover(nmp, NFSERR_EXPIRED);
5634 lck_mtx_unlock(&nmp->nm_lock);
5635 }
5636 if (reopen && (nfs_check_for_locks(noop, nofp) == 0)) {
5637 /* just reopen the file on next access */
5638 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
5639 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5640 lck_mtx_lock(&nofp->nof_lock);
5641 nofp->nof_flags |= NFS_OPEN_FILE_REOPEN;
5642 lck_mtx_unlock(&nofp->nof_lock);
5643 return (0);
5644 }
5645 if (reopen)
5646 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
5647 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5648 }
5649
5650 if (!error && ((nmp = NFSTONMP(nofp->nof_np)))) {
5651 /* claim delegated locks */
5652 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
5653 if (nlop->nlo_open_owner != noop)
5654 continue;
5655 TAILQ_FOREACH_SAFE(nflp, &nlop->nlo_locks, nfl_lolink, nextnflp) {
5656 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
5657 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD|NFS_FILE_LOCK_BLOCKED))
5658 continue;
5659 /* skip non-delegated locks */
5660 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED))
5661 continue;
5662 error = nmp->nm_funcs->nf_setlock_rpc(nofp->nof_np, nofp, nflp, 0, flags, current_thread(), noop->noo_cred);
5663 if (error) {
5664 NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
5665 nflp->nfl_start, nflp->nfl_end, error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5666 break;
5667 }
5668 // else {
5669 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
5670 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5671 // }
5672 }
5673 if (error)
5674 break;
5675 }
5676 }
5677
5678 if (!error) /* all state claimed successfully! */
5679 return (0);
5680
5681 /* restart if it looks like a problem more than just losing the delegation */
5682 if (!nfs_mount_state_error_delegation_lost(error) &&
5683 ((error == ETIMEDOUT) || nfs_mount_state_error_should_restart(error))) {
5684 NP(nofp->nof_np, "nfs delegated lock claim error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5685 if ((error == ETIMEDOUT) && ((nmp = NFSTONMP(nofp->nof_np))))
5686 nfs_need_reconnect(nmp);
b0d623f7
A
5687 return (error);
5688 }
6d2010ae
A
5689
5690 /* delegated state lost (once held but now not claimable) */
5691 NP(nofp->nof_np, "nfs delegated state claim error %d, state lost, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5692
5693 /*
5694 * Any problems with the delegation probably indicates that we
5695 * should review/return all of our current delegation state.
5696 */
5697 if ((nmp = NFSTONMP(nofp->nof_np))) {
5698 nfs4_delegation_return_enqueue(nofp->nof_np);
5699 lck_mtx_lock(&nmp->nm_lock);
5700 nfs_need_recover(nmp, NFSERR_EXPIRED);
5701 lck_mtx_unlock(&nmp->nm_lock);
5702 }
5703
5704 /* revoke all open file state */
5705 nfs_revoke_open_state_for_node(nofp->nof_np);
5706
5707 return (error);
5708}
5709
5710/*
5711 * Release all open state for the given node.
5712 */
5713void
5714nfs_release_open_state_for_node(nfsnode_t np, int force)
5715{
5716 struct nfsmount *nmp = NFSTONMP(np);
5717 struct nfs_open_file *nofp;
5718 struct nfs_file_lock *nflp, *nextnflp;
5719
5720 /* drop held locks */
5721 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
5722 /* skip dead & blocked lock requests */
5723 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD|NFS_FILE_LOCK_BLOCKED))
5724 continue;
5725 /* send an unlock if not a delegated lock */
5726 if (!force && nmp && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED))
5727 nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER,
5728 NULL, nflp->nfl_owner->nlo_open_owner->noo_cred);
5729 /* kill/remove the lock */
5730 lck_mtx_lock(&np->n_openlock);
5731 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
5732 lck_mtx_lock(&nflp->nfl_owner->nlo_lock);
5733 TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink);
5734 lck_mtx_unlock(&nflp->nfl_owner->nlo_lock);
5735 if (nflp->nfl_blockcnt) {
5736 /* wake up anyone blocked on this lock */
5737 wakeup(nflp);
5738 } else {
5739 /* remove nflp from lock list and destroy */
5740 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
5741 nfs_file_lock_destroy(nflp);
2d21ac55 5742 }
6d2010ae
A
5743 lck_mtx_unlock(&np->n_openlock);
5744 }
5745
5746 lck_mtx_lock(&np->n_openlock);
5747
5748 /* drop all opens */
5749 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
5750 if (nofp->nof_flags & NFS_OPEN_FILE_LOST)
5751 continue;
5752 /* mark open state as lost */
5753 lck_mtx_lock(&nofp->nof_lock);
5754 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
5755 nofp->nof_flags |= NFS_OPEN_FILE_LOST;
5756 lck_mtx_unlock(&nofp->nof_lock);
5757 if (!force && nmp && (nmp->nm_vers >= NFS_VER4))
5758 nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER);
5759 }
5760
5761 lck_mtx_unlock(&np->n_openlock);
5762}
5763
5764/*
5765 * State for a node has been lost, drop it, and revoke the node.
5766 * Attempt to return any state if possible in case the server
5767 * might somehow think we hold it.
5768 */
5769void
5770nfs_revoke_open_state_for_node(nfsnode_t np)
5771{
5772 struct nfsmount *nmp;
5773
5774 /* mark node as needing to be revoked */
5775 nfs_node_lock_force(np);
5776 if (np->n_flag & NREVOKE) /* already revoked? */
5777 {
5778 NP(np, "nfs_revoke_open_state_for_node(): already revoked");
5779 nfs_node_unlock(np);
5780 return;
5781 }
5782 np->n_flag |= NREVOKE;
5783 nfs_node_unlock(np);
5784
5785 nfs_release_open_state_for_node(np, 0);
5786 NP(np, "nfs: state lost for %p 0x%x", np, np->n_flag);
5787
5788 /* mark mount as needing a revoke scan and have the socket thread do it. */
5789 if ((nmp = NFSTONMP(np))) {
5790 lck_mtx_lock(&nmp->nm_lock);
5791 nmp->nm_state |= NFSSTA_REVOKE;
5792 nfs_mount_sock_thread_wake(nmp);
5793 lck_mtx_unlock(&nmp->nm_lock);
5794 }
5795}
5796
5797/*
5798 * Claim the delegated open combinations that each of this node's open files hold.
5799 */
5800int
5801nfs4_claim_delegated_state_for_node(nfsnode_t np, int flags)
5802{
5803 struct nfs_open_file *nofp;
5804 int error = 0;
5805
5806 lck_mtx_lock(&np->n_openlock);
5807
5808 /* walk the open file list looking for opens with delegated state to claim */
5809restart:
5810 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
5811 if (!nofp->nof_d_rw_drw && !nofp->nof_d_w_drw && !nofp->nof_d_r_drw &&
5812 !nofp->nof_d_rw_dw && !nofp->nof_d_w_dw && !nofp->nof_d_r_dw &&
5813 !nofp->nof_d_rw && !nofp->nof_d_w && !nofp->nof_d_r)
5814 continue;
5815 lck_mtx_unlock(&np->n_openlock);
5816 error = nfs4_claim_delegated_state_for_open_file(nofp, flags);
5817 lck_mtx_lock(&np->n_openlock);
b0d623f7 5818 if (error)
6d2010ae
A
5819 break;
5820 goto restart;
5821 }
5822
5823 lck_mtx_unlock(&np->n_openlock);
5824
5825 return (error);
5826}
5827
5828/*
5829 * Mark a node as needed to have its delegation returned.
5830 * Queue it up on the delegation return queue.
5831 * Make sure the thread is running.
5832 */
5833void
5834nfs4_delegation_return_enqueue(nfsnode_t np)
5835{
5836 struct nfsmount *nmp;
5837
5838 nmp = NFSTONMP(np);
5839 if (!nmp)
5840 return;
5841
5842 lck_mtx_lock(&np->n_openlock);
5843 np->n_openflags |= N_DELEG_RETURN;
5844 lck_mtx_unlock(&np->n_openlock);
5845
5846 lck_mtx_lock(&nmp->nm_lock);
5847 if (np->n_dreturn.tqe_next == NFSNOLIST)
5848 TAILQ_INSERT_TAIL(&nmp->nm_dreturnq, np, n_dreturn);
5849 nfs_mount_sock_thread_wake(nmp);
5850 lck_mtx_unlock(&nmp->nm_lock);
5851}
5852
5853/*
5854 * return any delegation we may have for the given node
5855 */
5856int
5857nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred)
5858{
5859 struct nfsmount *nmp;
5860 fhandle_t fh;
5861 nfs_stateid dstateid;
5862 int error;
5863
5864 nmp = NFSTONMP(np);
5865 if (!nmp)
5866 return (ENXIO);
5867
5868 /* first, make sure the node's marked for delegation return */
5869 lck_mtx_lock(&np->n_openlock);
5870 np->n_openflags |= (N_DELEG_RETURN|N_DELEG_RETURNING);
5871 lck_mtx_unlock(&np->n_openlock);
5872
5873 /* make sure nobody else is using the delegation state */
5874 if ((error = nfs_open_state_set_busy(np, NULL)))
5875 goto out;
5876
5877 /* claim any delegated state */
5878 if ((error = nfs4_claim_delegated_state_for_node(np, flags)))
5879 goto out;
5880
5881 /* return the delegation */
5882 lck_mtx_lock(&np->n_openlock);
5883 dstateid = np->n_dstateid;
5884 fh.fh_len = np->n_fhsize;
5885 bcopy(np->n_fhp, &fh.fh_data, fh.fh_len);
5886 lck_mtx_unlock(&np->n_openlock);
5887 error = nfs4_delegreturn_rpc(NFSTONMP(np), fh.fh_data, fh.fh_len, &dstateid, flags, thd, cred);
5888 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
5889 if ((error != ETIMEDOUT) && (error != NFSERR_MOVED) && (error != NFSERR_LEASE_MOVED)) {
5890 lck_mtx_lock(&np->n_openlock);
5891 np->n_openflags &= ~N_DELEG_MASK;
5892 lck_mtx_lock(&nmp->nm_lock);
5893 if (np->n_dlink.tqe_next != NFSNOLIST) {
5894 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
5895 np->n_dlink.tqe_next = NFSNOLIST;
5896 }
5897 lck_mtx_unlock(&nmp->nm_lock);
5898 lck_mtx_unlock(&np->n_openlock);
5899 }
5900
5901out:
5902 /* make sure it's no longer on the return queue and clear the return flags */
5903 lck_mtx_lock(&nmp->nm_lock);
5904 if (np->n_dreturn.tqe_next != NFSNOLIST) {
5905 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
5906 np->n_dreturn.tqe_next = NFSNOLIST;
5907 }
5908 lck_mtx_unlock(&nmp->nm_lock);
5909 lck_mtx_lock(&np->n_openlock);
5910 np->n_openflags &= ~(N_DELEG_RETURN|N_DELEG_RETURNING);
5911 lck_mtx_unlock(&np->n_openlock);
5912
5913 if (error) {
5914 NP(np, "nfs4_delegation_return, error %d", error);
5915 if (error == ETIMEDOUT)
5916 nfs_need_reconnect(nmp);
5917 if (nfs_mount_state_error_should_restart(error)) {
5918 /* make sure recovery happens */
5919 lck_mtx_lock(&nmp->nm_lock);
5920 nfs_need_recover(nmp, nfs_mount_state_error_delegation_lost(error) ? NFSERR_EXPIRED : 0);
5921 lck_mtx_unlock(&nmp->nm_lock);
2d21ac55
A
5922 }
5923 }
6d2010ae
A
5924
5925 nfs_open_state_clear_busy(np);
5926
5927 return (error);
b0d623f7 5928}
2d21ac55 5929
b0d623f7 5930/*
6d2010ae
A
5931 * RPC to return a delegation for a file handle
5932 */
5933int
5934nfs4_delegreturn_rpc(struct nfsmount *nmp, u_char *fhp, int fhlen, struct nfs_stateid *sid, int flags, thread_t thd, kauth_cred_t cred)
5935{
5936 int error = 0, status, numops;
5937 uint64_t xid;
5938 struct nfsm_chain nmreq, nmrep;
5939 struct nfsreq_secinfo_args si;
5940
5941 NFSREQ_SECINFO_SET(&si, NULL, fhp, fhlen, NULL, 0);
5942 nfsm_chain_null(&nmreq);
5943 nfsm_chain_null(&nmrep);
5944
5945 // PUTFH, DELEGRETURN
5946 numops = 2;
5947 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
5948 nfsm_chain_add_compound_header(error, &nmreq, "delegreturn", numops);
5949 numops--;
5950 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5951 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
5952 numops--;
5953 nfsm_chain_add_32(error, &nmreq, NFS_OP_DELEGRETURN);
5954 nfsm_chain_add_stateid(error, &nmreq, sid);
5955 nfsm_chain_build_done(error, &nmreq);
5956 nfsm_assert(error, (numops == 0), EPROTO);
5957 nfsmout_if(error);
5958 error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags, &nmrep, &xid, &status);
5959 nfsm_chain_skip_tag(error, &nmrep);
5960 nfsm_chain_get_32(error, &nmrep, numops);
5961 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5962 nfsm_chain_op_check(error, &nmrep, NFS_OP_DELEGRETURN);
5963nfsmout:
5964 nfsm_chain_cleanup(&nmreq);
5965 nfsm_chain_cleanup(&nmrep);
5966 return (error);
5967}
5968
5969
5970/*
5971 * NFS read call.
5972 * Just call nfs_bioread() to do the work.
5973 *
5974 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
5975 * without first calling VNOP_OPEN, so we make sure the file is open here.
5976 */
5977int
5978nfs_vnop_read(
5979 struct vnop_read_args /* {
5980 struct vnodeop_desc *a_desc;
5981 vnode_t a_vp;
5982 struct uio *a_uio;
5983 int a_ioflag;
5984 vfs_context_t a_context;
5985 } */ *ap)
5986{
5987 vnode_t vp = ap->a_vp;
5988 vfs_context_t ctx = ap->a_context;
5989 nfsnode_t np;
5990 struct nfsmount *nmp;
5991 struct nfs_open_owner *noop;
5992 struct nfs_open_file *nofp;
5993 int error;
5994
5995 if (vnode_vtype(ap->a_vp) != VREG)
5996 return (EPERM);
5997
5998 np = VTONFS(vp);
5999 nmp = NFSTONMP(np);
6000 if (!nmp)
6001 return (ENXIO);
6002 if (np->n_flag & NREVOKE)
6003 return (EIO);
6004
6005 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6006 if (!noop)
6007 return (ENOMEM);
6008restart:
6009 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
6010 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6011 NP(np, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop->noo_cred));
6012 error = EIO;
6013 }
6014 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6015 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
6016 nofp = NULL;
6017 if (!error)
6018 goto restart;
6019 }
6020 if (error) {
6021 nfs_open_owner_rele(noop);
6022 return (error);
6023 }
6024 if (!nofp->nof_access) {
6025 /* we don't have the file open, so open it for read access */
6026 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6027 if (error) {
6028 nfs_open_owner_rele(noop);
6029 return (error);
6030 }
6031 if (np->n_flag & NREVOKE) {
6032 error = EIO;
6033 nfs_mount_state_in_use_end(nmp, 0);
6034 nfs_open_owner_rele(noop);
6035 return (error);
6036 }
6037 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
6038 if (error)
6039 nofp = NULL;
6040 if (!error) {
6041 if (nmp->nm_vers < NFS_VER4) {
6042 /* NFS v2/v3 opens are always allowed - so just add it. */
6043 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
6044 } else {
6045 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
6046 }
6047 }
6048 if (!error)
6049 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
6050 if (nofp)
6051 nfs_open_file_clear_busy(nofp);
6052 if (nfs_mount_state_in_use_end(nmp, error)) {
6053 nofp = NULL;
6054 goto restart;
6055 }
6056 }
6057 nfs_open_owner_rele(noop);
6058 if (error)
6059 return (error);
6060 return (nfs_bioread(VTONFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_context));
6061}
6062
6063/*
6064 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6065 * Files are created using the NFSv4 OPEN RPC. So we must open the
6066 * file to create it and then close it.
6067 */
6068int
6069nfs4_vnop_create(
6070 struct vnop_create_args /* {
6071 struct vnodeop_desc *a_desc;
6072 vnode_t a_dvp;
6073 vnode_t *a_vpp;
6074 struct componentname *a_cnp;
6075 struct vnode_attr *a_vap;
6076 vfs_context_t a_context;
6077 } */ *ap)
6078{
6079 vfs_context_t ctx = ap->a_context;
6080 struct componentname *cnp = ap->a_cnp;
6081 struct vnode_attr *vap = ap->a_vap;
6082 vnode_t dvp = ap->a_dvp;
6083 vnode_t *vpp = ap->a_vpp;
6084 struct nfsmount *nmp;
6085 nfsnode_t np;
6086 int error = 0, busyerror = 0, accessMode, denyMode;
6087 struct nfs_open_owner *noop = NULL;
6088 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
6089
6090 nmp = VTONMP(dvp);
6091 if (!nmp)
6092 return (ENXIO);
6093
6094 if (vap)
6095 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp), vap, ctx);
6096
6097 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6098 if (!noop)
6099 return (ENOMEM);
6100
6101restart:
6102 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6103 if (error) {
6104 nfs_open_owner_rele(noop);
6105 return (error);
6106 }
6107
6108 /* grab a provisional, nodeless open file */
6109 error = nfs_open_file_find(NULL, noop, &newnofp, 0, 0, 1);
6110 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6111 printf("nfs_vnop_create: LOST\n");
6112 error = EIO;
6113 }
6114 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6115 /* This shouldn't happen given that this is a new, nodeless nofp */
6116 nfs_mount_state_in_use_end(nmp, 0);
6117 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
6118 nfs_open_file_destroy(newnofp);
6119 newnofp = NULL;
6120 if (!error)
6121 goto restart;
6122 }
6123 if (!error)
6124 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
6125 if (error) {
6126 if (newnofp)
6127 nfs_open_file_destroy(newnofp);
6128 newnofp = NULL;
6129 goto out;
6130 }
6131
6132 /*
6133 * We're just trying to create the file.
6134 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6135 */
6136 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
6137 denyMode = NFS_OPEN_SHARE_DENY_NONE;
6138
6139 /* Do the open/create */
6140 error = nfs4_open_rpc(newnofp, ctx, cnp, vap, dvp, vpp, NFS_OPEN_CREATE, accessMode, denyMode);
6141 if ((error == EACCES) && vap && !(vap->va_vaflags & VA_EXCLUSIVE) &&
6142 VATTR_IS_ACTIVE(vap, va_mode) && !(vap->va_mode & S_IWUSR)) {
6143 /*
6144 * Hmm... it looks like we may have a situation where the request was
6145 * retransmitted because we didn't get the first response which successfully
6146 * created/opened the file and then the second time we were denied the open
6147 * because the mode the file was created with doesn't allow write access.
6148 *
6149 * We'll try to work around this by temporarily updating the mode and
6150 * retrying the open.
6151 */
6152 struct vnode_attr vattr;
6153
6154 /* first make sure it's there */
6155 int error2 = nfs_lookitup(VTONFS(dvp), cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6156 if (!error2 && np) {
6157 nfs_node_unlock(np);
6158 *vpp = NFSTOV(np);
6159 if (vnode_vtype(NFSTOV(np)) == VREG) {
6160 VATTR_INIT(&vattr);
6161 VATTR_SET(&vattr, va_mode, (vap->va_mode | S_IWUSR));
6162 if (!nfs4_setattr_rpc(np, &vattr, ctx)) {
6163 error2 = nfs4_open_rpc(newnofp, ctx, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, accessMode, denyMode);
6164 VATTR_INIT(&vattr);
6165 VATTR_SET(&vattr, va_mode, vap->va_mode);
6166 nfs4_setattr_rpc(np, &vattr, ctx);
6167 if (!error2)
6168 error = 0;
6169 }
6170 }
6171 if (error) {
6172 vnode_put(*vpp);
6173 *vpp = NULL;
6174 }
6175 }
6176 }
6177 if (!error && !*vpp) {
6178 printf("nfs4_open_rpc returned without a node?\n");
6179 /* Hmmm... with no node, we have no filehandle and can't close it */
6180 error = EIO;
6181 }
6182 if (error) {
6183 /* need to cleanup our temporary nofp */
6184 nfs_open_file_clear_busy(newnofp);
6185 nfs_open_file_destroy(newnofp);
6186 newnofp = NULL;
6187 goto out;
6188 }
6189 /* After we have a node, add our open file struct to the node */
6190 np = VTONFS(*vpp);
6191 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
6192 nofp = newnofp;
6193 error = nfs_open_file_find_internal(np, noop, &nofp, 0, 0, 0);
6194 if (error) {
6195 /* This shouldn't happen, because we passed in a new nofp to use. */
6196 printf("nfs_open_file_find_internal failed! %d\n", error);
6197 goto out;
6198 } else if (nofp != newnofp) {
6199 /*
6200 * Hmm... an open file struct already exists.
6201 * Mark the existing one busy and merge our open into it.
6202 * Then destroy the one we created.
6203 * Note: there's no chance of an open confict because the
6204 * open has already been granted.
6205 */
6206 busyerror = nfs_open_file_set_busy(nofp, NULL);
6207 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
6208 nofp->nof_stateid = newnofp->nof_stateid;
6209 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)
6210 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
6211 nfs_open_file_clear_busy(newnofp);
6212 nfs_open_file_destroy(newnofp);
6213 }
6214 newnofp = NULL;
6215 /* mark the node as holding a create-initiated open */
6216 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
6217 nofp->nof_creator = current_thread();
6218out:
6219 if (nofp && !busyerror)
6220 nfs_open_file_clear_busy(nofp);
6221 if (nfs_mount_state_in_use_end(nmp, error)) {
6222 nofp = newnofp = NULL;
6223 busyerror = 0;
6224 goto restart;
6225 }
6226 if (noop)
6227 nfs_open_owner_rele(noop);
6228 return (error);
6229}
6230
6231/*
6232 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6233 */
6234int
6235nfs4_create_rpc(
6236 vfs_context_t ctx,
6237 nfsnode_t dnp,
6238 struct componentname *cnp,
6239 struct vnode_attr *vap,
6240 int type,
6241 char *link,
6242 nfsnode_t *npp)
6243{
6244 struct nfsmount *nmp;
6245 struct nfs_vattr nvattr;
6246 int error = 0, create_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
6247 int nfsvers, namedattrs, numops;
6248 u_int64_t xid, savedxid = 0;
6249 nfsnode_t np = NULL;
6250 vnode_t newvp = NULL;
6251 struct nfsm_chain nmreq, nmrep;
6252 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6253 const char *tag;
6254 nfs_specdata sd;
6255 fhandle_t fh;
6256 struct nfsreq rq, *req = &rq;
6257 struct nfs_dulookup dul;
6258 struct nfsreq_secinfo_args si;
6259
6260 nmp = NFSTONMP(dnp);
6261 if (!nmp)
6262 return (ENXIO);
6263 nfsvers = nmp->nm_vers;
6264 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
6265 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6266 return (EINVAL);
6267
6268 sd.specdata1 = sd.specdata2 = 0;
6269
6270 switch (type) {
6271 case NFLNK:
6272 tag = "symlink";
6273 break;
6274 case NFBLK:
6275 case NFCHR:
6276 tag = "mknod";
6277 if (!VATTR_IS_ACTIVE(vap, va_rdev))
6278 return (EINVAL);
6279 sd.specdata1 = major(vap->va_rdev);
6280 sd.specdata2 = minor(vap->va_rdev);
6281 break;
6282 case NFSOCK:
6283 case NFFIFO:
6284 tag = "mknod";
6285 break;
6286 case NFDIR:
6287 tag = "mkdir";
6288 break;
6289 default:
6290 return (EINVAL);
6291 }
6292
6293 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
6294
6295 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
6296 if (!namedattrs)
6297 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
6298
6299 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
6300 NVATTR_INIT(&nvattr);
6301 nfsm_chain_null(&nmreq);
6302 nfsm_chain_null(&nmrep);
6303
6304 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
6305 numops = 6;
6306 nfsm_chain_build_alloc_init(error, &nmreq, 66 * NFSX_UNSIGNED);
6307 nfsm_chain_add_compound_header(error, &nmreq, tag, numops);
6308 numops--;
6309 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6310 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
6311 numops--;
6312 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
6313 numops--;
6314 nfsm_chain_add_32(error, &nmreq, NFS_OP_CREATE);
6315 nfsm_chain_add_32(error, &nmreq, type);
6316 if (type == NFLNK) {
6317 nfsm_chain_add_name(error, &nmreq, link, strlen(link), nmp);
6318 } else if ((type == NFBLK) || (type == NFCHR)) {
6319 nfsm_chain_add_32(error, &nmreq, sd.specdata1);
6320 nfsm_chain_add_32(error, &nmreq, sd.specdata2);
6321 }
6322 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
6323 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
6324 numops--;
6325 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6326 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
6327 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6328 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
6329 numops--;
6330 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
6331 numops--;
6332 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6333 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
6334 nfsm_chain_build_done(error, &nmreq);
6335 nfsm_assert(error, (numops == 0), EPROTO);
6336 nfsmout_if(error);
6337
6338 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
6339 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
6340 if (!error) {
6341 if (!namedattrs)
6342 nfs_dulookup_start(&dul, dnp, ctx);
6343 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
6344 }
6345
6346 if ((lockerror = nfs_node_lock(dnp)))
6347 error = lockerror;
6348 nfsm_chain_skip_tag(error, &nmrep);
6349 nfsm_chain_get_32(error, &nmrep, numops);
6350 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6351 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
6352 nfsmout_if(error);
6353 nfsm_chain_op_check(error, &nmrep, NFS_OP_CREATE);
6354 nfsm_chain_check_change_info(error, &nmrep, dnp);
6355 bmlen = NFS_ATTR_BITMAP_LEN;
6356 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
6357 /* At this point if we have no error, the object was created. */
6358 /* if we don't get attributes, then we should lookitup. */
6359 create_error = error;
6360 nfsmout_if(error);
6361 nfs_vattr_set_supported(bitmap, vap);
6362 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6363 nfsmout_if(error);
6364 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
6365 nfsmout_if(error);
6366 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6367 printf("nfs: create/%s didn't return filehandle? %s\n", tag, cnp->cn_nameptr);
6368 error = EBADRPC;
6369 goto nfsmout;
6370 }
6371 /* directory attributes: if we don't get them, make sure to invalidate */
6372 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
6373 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6374 savedxid = xid;
6375 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
6376 if (error)
6377 NATTRINVALIDATE(dnp);
6378
6379nfsmout:
6380 nfsm_chain_cleanup(&nmreq);
6381 nfsm_chain_cleanup(&nmrep);
6382
6383 if (!lockerror) {
6384 if (!create_error && (dnp->n_flag & NNEGNCENTRIES)) {
6385 dnp->n_flag &= ~NNEGNCENTRIES;
6386 cache_purge_negatives(NFSTOV(dnp));
6387 }
6388 dnp->n_flag |= NMODIFIED;
6389 nfs_node_unlock(dnp);
6390 /* nfs_getattr() will check changed and purge caches */
6391 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
6392 }
6393
6394 if (!error && fh.fh_len) {
6395 /* create the vnode with the filehandle and attributes */
6396 xid = savedxid;
6397 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np);
6398 if (!error)
6399 newvp = NFSTOV(np);
6400 }
6401 NVATTR_CLEANUP(&nvattr);
6402
6403 if (!namedattrs)
6404 nfs_dulookup_finish(&dul, dnp, ctx);
6405
6406 /*
6407 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
6408 * if we can succeed in looking up the object.
6409 */
6410 if ((create_error == EEXIST) || (!create_error && !newvp)) {
6411 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6412 if (!error) {
6413 newvp = NFSTOV(np);
6414 if (vnode_vtype(newvp) != nfstov_type(type, nfsvers))
6415 error = EEXIST;
6416 }
6417 }
6418 if (!busyerror)
6419 nfs_node_clear_busy(dnp);
6420 if (error) {
6421 if (newvp) {
6422 nfs_node_unlock(np);
6423 vnode_put(newvp);
6424 }
6425 } else {
6426 nfs_node_unlock(np);
6427 *npp = np;
6428 }
6429 return (error);
6430}
6431
6432int
6433nfs4_vnop_mknod(
6434 struct vnop_mknod_args /* {
6435 struct vnodeop_desc *a_desc;
6436 vnode_t a_dvp;
6437 vnode_t *a_vpp;
6438 struct componentname *a_cnp;
6439 struct vnode_attr *a_vap;
6440 vfs_context_t a_context;
6441 } */ *ap)
6442{
6443 nfsnode_t np = NULL;
6444 struct nfsmount *nmp;
6445 int error;
6446
6447 nmp = VTONMP(ap->a_dvp);
6448 if (!nmp)
6449 return (ENXIO);
6450
6451 if (!VATTR_IS_ACTIVE(ap->a_vap, va_type))
6452 return (EINVAL);
6453 switch (ap->a_vap->va_type) {
6454 case VBLK:
6455 case VCHR:
6456 case VFIFO:
6457 case VSOCK:
6458 break;
6459 default:
6460 return (ENOTSUP);
6461 }
6462
6463 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
6464 vtonfs_type(ap->a_vap->va_type, nmp->nm_vers), NULL, &np);
6465 if (!error)
6466 *ap->a_vpp = NFSTOV(np);
6467 return (error);
6468}
6469
6470int
6471nfs4_vnop_mkdir(
6472 struct vnop_mkdir_args /* {
6473 struct vnodeop_desc *a_desc;
6474 vnode_t a_dvp;
6475 vnode_t *a_vpp;
6476 struct componentname *a_cnp;
6477 struct vnode_attr *a_vap;
6478 vfs_context_t a_context;
6479 } */ *ap)
6480{
6481 nfsnode_t np = NULL;
6482 int error;
6483
6484 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
6485 NFDIR, NULL, &np);
6486 if (!error)
6487 *ap->a_vpp = NFSTOV(np);
6488 return (error);
6489}
6490
6491int
6492nfs4_vnop_symlink(
6493 struct vnop_symlink_args /* {
6494 struct vnodeop_desc *a_desc;
6495 vnode_t a_dvp;
6496 vnode_t *a_vpp;
6497 struct componentname *a_cnp;
6498 struct vnode_attr *a_vap;
6499 char *a_target;
6500 vfs_context_t a_context;
6501 } */ *ap)
6502{
6503 nfsnode_t np = NULL;
6504 int error;
6505
6506 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
6507 NFLNK, ap->a_target, &np);
6508 if (!error)
6509 *ap->a_vpp = NFSTOV(np);
6510 return (error);
6511}
6512
6513int
6514nfs4_vnop_link(
6515 struct vnop_link_args /* {
6516 struct vnodeop_desc *a_desc;
6517 vnode_t a_vp;
6518 vnode_t a_tdvp;
6519 struct componentname *a_cnp;
6520 vfs_context_t a_context;
6521 } */ *ap)
6522{
6523 vfs_context_t ctx = ap->a_context;
6524 vnode_t vp = ap->a_vp;
6525 vnode_t tdvp = ap->a_tdvp;
6526 struct componentname *cnp = ap->a_cnp;
6527 int error = 0, lockerror = ENOENT, status;
6528 struct nfsmount *nmp;
6529 nfsnode_t np = VTONFS(vp);
6530 nfsnode_t tdnp = VTONFS(tdvp);
6531 int nfsvers, numops;
6532 u_int64_t xid, savedxid;
6533 struct nfsm_chain nmreq, nmrep;
6534 struct nfsreq_secinfo_args si;
6535
6536 if (vnode_mount(vp) != vnode_mount(tdvp))
6537 return (EXDEV);
6538
6539 nmp = VTONMP(vp);
6540 if (!nmp)
6541 return (ENXIO);
6542 nfsvers = nmp->nm_vers;
6543 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6544 return (EINVAL);
6545 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6546 return (EINVAL);
6547
6548 /*
6549 * Push all writes to the server, so that the attribute cache
6550 * doesn't get "out of sync" with the server.
6551 * XXX There should be a better way!
6552 */
6553 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
6554
6555 if ((error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx))))
6556 return (error);
6557
6558 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6559 nfsm_chain_null(&nmreq);
6560 nfsm_chain_null(&nmrep);
6561
6562 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
6563 numops = 7;
6564 nfsm_chain_build_alloc_init(error, &nmreq, 29 * NFSX_UNSIGNED + cnp->cn_namelen);
6565 nfsm_chain_add_compound_header(error, &nmreq, "link", numops);
6566 numops--;
6567 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6568 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
6569 numops--;
6570 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
6571 numops--;
6572 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6573 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
6574 numops--;
6575 nfsm_chain_add_32(error, &nmreq, NFS_OP_LINK);
6576 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
6577 numops--;
6578 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6579 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
6580 numops--;
6581 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
6582 numops--;
6583 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6584 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
6585 nfsm_chain_build_done(error, &nmreq);
6586 nfsm_assert(error, (numops == 0), EPROTO);
6587 nfsmout_if(error);
6588 error = nfs_request(tdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
6589
6590 if ((lockerror = nfs_node_lock2(tdnp, np))) {
6591 error = lockerror;
6592 goto nfsmout;
6593 }
6594 nfsm_chain_skip_tag(error, &nmrep);
6595 nfsm_chain_get_32(error, &nmrep, numops);
6596 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6597 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
6598 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6599 nfsm_chain_op_check(error, &nmrep, NFS_OP_LINK);
6600 nfsm_chain_check_change_info(error, &nmrep, tdnp);
6601 /* directory attributes: if we don't get them, make sure to invalidate */
6602 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6603 savedxid = xid;
6604 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
6605 if (error)
6606 NATTRINVALIDATE(tdnp);
6607 /* link attributes: if we don't get them, make sure to invalidate */
6608 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
6609 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6610 xid = savedxid;
6611 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
6612 if (error)
6613 NATTRINVALIDATE(np);
6614nfsmout:
6615 nfsm_chain_cleanup(&nmreq);
6616 nfsm_chain_cleanup(&nmrep);
6617 if (!lockerror)
6618 tdnp->n_flag |= NMODIFIED;
6619 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
6620 if (error == EEXIST)
6621 error = 0;
6622 if (!error && (tdnp->n_flag & NNEGNCENTRIES)) {
6623 tdnp->n_flag &= ~NNEGNCENTRIES;
6624 cache_purge_negatives(tdvp);
6625 }
6626 if (!lockerror)
6627 nfs_node_unlock2(tdnp, np);
6628 nfs_node_clear_busy2(tdnp, np);
6629 return (error);
6630}
6631
6632int
6633nfs4_vnop_rmdir(
6634 struct vnop_rmdir_args /* {
6635 struct vnodeop_desc *a_desc;
6636 vnode_t a_dvp;
6637 vnode_t a_vp;
6638 struct componentname *a_cnp;
6639 vfs_context_t a_context;
6640 } */ *ap)
6641{
6642 vfs_context_t ctx = ap->a_context;
6643 vnode_t vp = ap->a_vp;
6644 vnode_t dvp = ap->a_dvp;
6645 struct componentname *cnp = ap->a_cnp;
6646 struct nfsmount *nmp;
6647 int error = 0, namedattrs;
6648 nfsnode_t np = VTONFS(vp);
6649 nfsnode_t dnp = VTONFS(dvp);
6650 struct nfs_dulookup dul;
6651
6652 if (vnode_vtype(vp) != VDIR)
6653 return (EINVAL);
6654
6655 nmp = NFSTONMP(dnp);
6656 if (!nmp)
6657 return (ENXIO);
6658 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
6659
6660 if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx))))
6661 return (error);
6662
6663 if (!namedattrs) {
6664 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
6665 nfs_dulookup_start(&dul, dnp, ctx);
6666 }
6667
6668 error = nfs4_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
6669 vfs_context_thread(ctx), vfs_context_ucred(ctx));
6670
6671 nfs_name_cache_purge(dnp, np, cnp, ctx);
6672 /* nfs_getattr() will check changed and purge caches */
6673 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
6674 if (!namedattrs)
6675 nfs_dulookup_finish(&dul, dnp, ctx);
6676 nfs_node_clear_busy2(dnp, np);
6677
6678 /*
6679 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
6680 */
6681 if (error == ENOENT)
6682 error = 0;
6683 if (!error) {
6684 /*
6685 * remove nfsnode from hash now so we can't accidentally find it
6686 * again if another object gets created with the same filehandle
6687 * before this vnode gets reclaimed
6688 */
6689 lck_mtx_lock(nfs_node_hash_mutex);
6690 if (np->n_hflag & NHHASHED) {
6691 LIST_REMOVE(np, n_hash);
6692 np->n_hflag &= ~NHHASHED;
6693 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
6694 }
6695 lck_mtx_unlock(nfs_node_hash_mutex);
6696 }
6697 return (error);
6698}
6699
6700/*
6701 * NFSv4 Named Attributes
6702 *
6703 * Both the extended attributes interface and the named streams interface
6704 * are backed by NFSv4 named attributes. The implementations for both use
6705 * a common set of routines in an attempt to reduce code duplication, to
6706 * increase efficiency, to increase caching of both names and data, and to
6707 * confine the complexity.
6708 *
6709 * Each NFS node caches its named attribute directory's file handle.
6710 * The directory nodes for the named attribute directories are handled
6711 * exactly like regular directories (with a couple minor exceptions).
6712 * Named attribute nodes are also treated as much like regular files as
6713 * possible.
6714 *
6715 * Most of the heavy lifting is done by nfs4_named_attr_get().
6716 */
6717
6718/*
6719 * Get the given node's attribute directory node.
6720 * If !fetch, then only return a cached node.
6721 * Otherwise, we will attempt to fetch the node from the server.
6722 * (Note: the node should be marked busy.)
b0d623f7 6723 */
6d2010ae
A
6724nfsnode_t
6725nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx)
b0d623f7 6726{
6d2010ae 6727 nfsnode_t adnp = NULL;
b0d623f7 6728 struct nfsmount *nmp;
6d2010ae
A
6729 int error = 0, status, numops;
6730 struct nfsm_chain nmreq, nmrep;
6731 u_int64_t xid;
6732 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
6733 fhandle_t fh;
6734 struct nfs_vattr nvattr;
6735 struct componentname cn;
6736 struct nfsreq rq, *req = &rq;
6737 struct nfsreq_secinfo_args si;
b0d623f7 6738
6d2010ae 6739 nmp = NFSTONMP(np);
b0d623f7 6740 if (!nmp)
6d2010ae
A
6741 return (NULL);
6742 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6743 return (NULL);
b0d623f7 6744
6d2010ae
A
6745 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6746 NVATTR_INIT(&nvattr);
6747 nfsm_chain_null(&nmreq);
6748 nfsm_chain_null(&nmrep);
b0d623f7 6749
6d2010ae
A
6750 bzero(&cn, sizeof(cn));
6751 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
6752 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
6753 cn.cn_nameiop = LOOKUP;
6754
6755 if (np->n_attrdirfh) {
6756 // XXX can't set parent correctly (to np) yet
6757 error = nfs_nget(nmp->nm_mountp, NULL, &cn, np->n_attrdirfh+1, *np->n_attrdirfh,
6758 NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &adnp);
6759 if (adnp)
6760 goto nfsmout;
6761 }
6762 if (!fetch) {
6763 error = ENOENT;
6764 goto nfsmout;
2d21ac55
A
6765 }
6766
6d2010ae
A
6767 // PUTFH, OPENATTR, GETATTR
6768 numops = 3;
6769 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
6770 nfsm_chain_add_compound_header(error, &nmreq, "openattr", numops);
6771 numops--;
6772 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6773 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
6774 numops--;
6775 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
6776 nfsm_chain_add_32(error, &nmreq, 0);
6777 numops--;
6778 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6779 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
6780 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6781 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
6782 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6783 nfsm_chain_build_done(error, &nmreq);
6784 nfsm_assert(error, (numops == 0), EPROTO);
6785 nfsmout_if(error);
6786 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND,
6787 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
b0d623f7 6788 if (!error)
6d2010ae 6789 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
b0d623f7 6790
6d2010ae
A
6791 nfsm_chain_skip_tag(error, &nmrep);
6792 nfsm_chain_get_32(error, &nmrep, numops);
6793 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6794 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
6795 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6796 nfsmout_if(error);
6797 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
6798 nfsmout_if(error);
6799 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
6800 error = ENOENT;
6801 goto nfsmout;
2d21ac55 6802 }
6d2010ae
A
6803 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
6804 /* (re)allocate attrdir fh buffer */
6805 if (np->n_attrdirfh)
6806 FREE(np->n_attrdirfh, M_TEMP);
6807 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len+1, M_TEMP, M_WAITOK);
2d21ac55 6808 }
6d2010ae
A
6809 if (!np->n_attrdirfh) {
6810 error = ENOMEM;
6811 goto nfsmout;
b0d623f7 6812 }
6d2010ae
A
6813 /* cache the attrdir fh in the node */
6814 *np->n_attrdirfh = fh.fh_len;
6815 bcopy(fh.fh_data, np->n_attrdirfh+1, fh.fh_len);
6816 /* create node for attrdir */
6817 // XXX can't set parent correctly (to np) yet
6818 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
6819nfsmout:
6820 NVATTR_CLEANUP(&nvattr);
6821 nfsm_chain_cleanup(&nmreq);
6822 nfsm_chain_cleanup(&nmrep);
2d21ac55 6823
6d2010ae
A
6824 if (adnp) {
6825 /* sanity check that this node is an attribute directory */
6826 if (adnp->n_vattr.nva_type != VDIR)
6827 error = EINVAL;
6828 if (!(adnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
6829 error = EINVAL;
6830 nfs_node_unlock(adnp);
6831 if (error)
6832 vnode_put(NFSTOV(adnp));
b0d623f7 6833 }
6d2010ae 6834 return (error ? NULL : adnp);
b0d623f7
A
6835}
6836
2d21ac55 6837/*
6d2010ae
A
6838 * Get the given node's named attribute node for the name given.
6839 *
6840 * In an effort to increase the performance of named attribute access, we try
6841 * to reduce server requests by doing the following:
6842 *
6843 * - cache the node's named attribute directory file handle in the node
6844 * - maintain a directory vnode for the attribute directory
6845 * - use name cache entries (positive and negative) to speed up lookups
6846 * - optionally open the named attribute (with the given accessMode) in the same RPC
6847 * - combine attribute directory retrieval with the lookup/open RPC
6848 * - optionally prefetch the named attribute's first block of data in the same RPC
6849 *
6850 * Also, in an attempt to reduce the number of copies/variations of this code,
6851 * parts of the RPC building/processing code are conditionalized on what is
6852 * needed for any particular request (openattr, lookup vs. open, read).
6853 *
6854 * Note that because we may not have the attribute directory node when we start
6855 * the lookup/open, we lock both the node and the attribute directory node.
2d21ac55 6856 */
6d2010ae
A
6857
6858#define NFS_GET_NAMED_ATTR_CREATE 0x1
6859#define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
6860#define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
6861#define NFS_GET_NAMED_ATTR_PREFETCH 0x8
6862
b0d623f7 6863int
6d2010ae
A
6864nfs4_named_attr_get(
6865 nfsnode_t np,
2d21ac55 6866 struct componentname *cnp,
6d2010ae
A
6867 uint32_t accessMode,
6868 int flags,
6869 vfs_context_t ctx,
6870 nfsnode_t *anpp,
6871 struct nfs_open_file **nofpp)
2d21ac55
A
6872{
6873 struct nfsmount *nmp;
6d2010ae
A
6874 int error = 0, open_error = EIO;
6875 int inuse = 0, adlockerror = ENOENT, busyerror = ENOENT, adbusyerror = ENOENT, nofpbusyerror = ENOENT;
6876 int create, guarded, prefetch, truncate, noopbusy = 0;
6877 int open, status, numops, hadattrdir, negnamecache;
6878 struct nfs_vattr nvattr;
6879 struct vnode_attr vattr;
6880 nfsnode_t adnp = NULL, anp = NULL;
6881 vnode_t avp = NULL;
2d21ac55 6882 u_int64_t xid, savedxid = 0;
2d21ac55
A
6883 struct nfsm_chain nmreq, nmrep;
6884 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6d2010ae
A
6885 uint32_t denyMode, rflags, delegation, recall, eof, rlen, retlen;
6886 nfs_stateid stateid, dstateid;
2d21ac55 6887 fhandle_t fh;
6d2010ae
A
6888 struct nfs_open_owner *noop = NULL;
6889 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
6890 struct vnop_access_args naa;
6891 thread_t thd;
6892 kauth_cred_t cred;
6893 struct timeval now;
6894 char sbuf[64], *s;
6895 uint32_t ace_type, ace_flags, ace_mask, len, slen;
6896 struct kauth_ace ace;
6897 struct nfsreq rq, *req = &rq;
6898 struct nfsreq_secinfo_args si;
6899
6900 *anpp = NULL;
6901 fh.fh_len = 0;
6902 rflags = delegation = recall = eof = rlen = retlen = 0;
6903 ace.ace_flags = 0;
6904 s = sbuf;
6905 slen = sizeof(sbuf);
2d21ac55 6906
6d2010ae 6907 nmp = NFSTONMP(np);
2d21ac55
A
6908 if (!nmp)
6909 return (ENXIO);
6d2010ae
A
6910 NVATTR_INIT(&nvattr);
6911 negnamecache = !NMFLAG(nmp, NONEGNAMECACHE);
6912 thd = vfs_context_thread(ctx);
6913 cred = vfs_context_ucred(ctx);
6914 create = (flags & NFS_GET_NAMED_ATTR_CREATE) ? NFS_OPEN_CREATE : NFS_OPEN_NOCREATE;
6915 guarded = (flags & NFS_GET_NAMED_ATTR_CREATE_GUARDED) ? NFS_CREATE_GUARDED : NFS_CREATE_UNCHECKED;
6916 truncate = (flags & NFS_GET_NAMED_ATTR_TRUNCATE);
6917 prefetch = (flags & NFS_GET_NAMED_ATTR_PREFETCH);
6918
6919 if (!create) {
6920 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
6921 if (error)
6922 return (error);
6923 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
6924 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
6925 return (ENOATTR);
6926 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_NONE) {
6927 /* shouldn't happen... but just be safe */
6928 printf("nfs4_named_attr_get: create with no access %s\n", cnp->cn_nameptr);
6929 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
6930 }
6931 open = (accessMode != NFS_OPEN_SHARE_ACCESS_NONE);
6932 if (open) {
6933 /*
6934 * We're trying to open the file.
6935 * We'll create/open it with the given access mode,
6936 * and set NFS_OPEN_FILE_CREATE.
6937 */
6938 denyMode = NFS_OPEN_SHARE_DENY_NONE;
6939 if (prefetch && guarded)
6940 prefetch = 0; /* no sense prefetching data that can't be there */
2d21ac55 6941
6d2010ae
A
6942 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6943 if (!noop)
6944 return (ENOMEM);
2d21ac55
A
6945 }
6946
6d2010ae
A
6947 if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx))))
6948 return (error);
2d21ac55 6949
6d2010ae
A
6950 adnp = nfs4_named_attr_dir_get(np, 0, ctx);
6951 hadattrdir = (adnp != NULL);
6952 if (prefetch) {
6953 microuptime(&now);
6954 /* use the special state ID because we don't have a real one to send */
6955 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
6956 rlen = MIN(nmp->nm_rsize, nmp->nm_biosize);
6957 }
6958 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
2d21ac55
A
6959 nfsm_chain_null(&nmreq);
6960 nfsm_chain_null(&nmrep);
6961
6d2010ae
A
6962 if (hadattrdir) {
6963 if ((error = adbusyerror = nfs_node_set_busy(adnp, vfs_context_thread(ctx))))
6964 goto nfsmout;
6965 /* nfs_getattr() will check changed and purge caches */
6966 error = nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
6967 nfsmout_if(error);
6968 error = cache_lookup(NFSTOV(adnp), &avp, cnp);
6969 switch (error) {
6970 case ENOENT:
6971 /* negative cache entry */
6972 goto nfsmout;
6973 case 0:
6974 /* cache miss */
6975 /* try dir buf cache lookup */
6976 error = nfs_dir_buf_cache_lookup(adnp, &anp, cnp, ctx, 0);
6977 if (!error && anp) {
6978 /* dir buf cache hit */
6979 *anpp = anp;
6980 error = -1;
6981 }
6982 if (error != -1) /* cache miss */
6983 break;
6984 /* FALLTHROUGH */
6985 case -1:
6986 /* cache hit, not really an error */
6987 OSAddAtomic(1, &nfsstats.lookupcache_hits);
6988 if (!anp && avp)
6989 *anpp = anp = VTONFS(avp);
6990
6991 nfs_node_clear_busy(adnp);
6992 adbusyerror = ENOENT;
6993
6994 /* check for directory access */
6995 naa.a_desc = &vnop_access_desc;
6996 naa.a_vp = NFSTOV(adnp);
6997 naa.a_action = KAUTH_VNODE_SEARCH;
6998 naa.a_context = ctx;
6999
7000 /* compute actual success/failure based on accessibility */
7001 error = nfs_vnop_access(&naa);
7002 /* FALLTHROUGH */
7003 default:
7004 /* we either found it, or hit an error */
7005 if (!error && guarded) {
7006 /* found cached entry but told not to use it */
7007 error = EEXIST;
7008 vnode_put(NFSTOV(anp));
7009 *anpp = anp = NULL;
7010 }
7011 /* we're done if error or we don't need to open */
7012 if (error || !open)
7013 goto nfsmout;
7014 /* no error and we need to open... */
7015 }
7016 }
7017
7018 if (open) {
7019restart:
7020 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
7021 if (error) {
7022 nfs_open_owner_rele(noop);
7023 noop = NULL;
7024 goto nfsmout;
7025 }
7026 inuse = 1;
7027
7028 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7029 error = nfs_open_file_find(anp, noop, &newnofp, 0, 0, 1);
7030 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
7031 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop->noo_cred), cnp->cn_nameptr);
7032 error = EIO;
7033 }
7034 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
7035 nfs_mount_state_in_use_end(nmp, 0);
7036 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
7037 nfs_open_file_destroy(newnofp);
7038 newnofp = NULL;
7039 if (!error)
7040 goto restart;
7041 }
7042 if (!error)
7043 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
7044 if (error) {
7045 if (newnofp)
7046 nfs_open_file_destroy(newnofp);
7047 newnofp = NULL;
7048 goto nfsmout;
7049 }
7050 if (anp) {
7051 /*
7052 * We already have the node. So we just need to open
7053 * it - which we may be able to do with a delegation.
7054 */
7055 open_error = error = nfs4_open(anp, newnofp, accessMode, denyMode, ctx);
7056 if (!error) {
7057 /* open succeeded, so our open file is no longer temporary */
7058 nofp = newnofp;
7059 nofpbusyerror = 0;
7060 newnofp = NULL;
7061 if (nofpp)
7062 *nofpp = nofp;
7063 }
7064 goto nfsmout;
7065 }
7066 }
7067
7068 /*
7069 * We either don't have the attrdir or we didn't find the attribute
7070 * in the name cache, so we need to talk to the server.
7071 *
7072 * If we don't have the attrdir, we'll need to ask the server for that too.
7073 * If the caller is requesting that the attribute be created, we need to
7074 * make sure the attrdir is created.
7075 * The caller may also request that the first block of an existing attribute
7076 * be retrieved at the same time.
7077 */
7078
7079 if (open) {
7080 /* need to mark the open owner busy during the RPC */
7081 if ((error = nfs_open_owner_set_busy(noop, thd)))
7082 goto nfsmout;
7083 noopbusy = 1;
7084 }
7085
7086 /*
7087 * We'd like to get updated post-open/lookup attributes for the
7088 * directory and we may also want to prefetch some data via READ.
7089 * We'd like the READ results to be last so that we can leave the
7090 * data in the mbufs until the end.
7091 *
7092 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7093 */
7094 numops = 5;
7095 if (!hadattrdir)
7096 numops += 3; // also sending: OPENATTR, GETATTR, OPENATTR
7097 if (prefetch)
7098 numops += 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7099 nfsm_chain_build_alloc_init(error, &nmreq, 64 * NFSX_UNSIGNED + cnp->cn_namelen);
7100 nfsm_chain_add_compound_header(error, &nmreq, "getnamedattr", numops);
7101 if (hadattrdir) {
7102 numops--;
7103 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7104 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7105 } else {
7106 numops--;
7107 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7108 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7109 numops--;
7110 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7111 nfsm_chain_add_32(error, &nmreq, create ? 1 : 0);
7112 numops--;
7113 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7114 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7115 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7116 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7117 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7118 }
7119 if (open) {
7120 numops--;
7121 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
7122 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
7123 nfsm_chain_add_32(error, &nmreq, accessMode);
7124 nfsm_chain_add_32(error, &nmreq, denyMode);
7125 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
7126 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
7127 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
7128 nfsm_chain_add_32(error, &nmreq, create);
7129 if (create) {
7130 nfsm_chain_add_32(error, &nmreq, guarded);
7131 VATTR_INIT(&vattr);
7132 if (truncate)
7133 VATTR_SET(&vattr, va_data_size, 0);
7134 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7135 }
7136 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
7137 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7138 } else {
7139 numops--;
7140 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
7141 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
2d21ac55 7142 }
2d21ac55
A
7143 numops--;
7144 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7145 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7146 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7147 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7148 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6d2010ae
A
7149 if (prefetch) {
7150 numops--;
7151 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7152 }
7153 if (hadattrdir) {
7154 numops--;
7155 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7156 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7157 } else {
7158 numops--;
7159 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7160 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7161 numops--;
7162 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7163 nfsm_chain_add_32(error, &nmreq, 0);
7164 }
2d21ac55
A
7165 numops--;
7166 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7167 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
7168 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6d2010ae
A
7169 if (prefetch) {
7170 numops--;
7171 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7172 numops--;
7173 nfsm_chain_add_32(error, &nmreq, NFS_OP_NVERIFY);
7174 VATTR_INIT(&vattr);
7175 VATTR_SET(&vattr, va_data_size, 0);
7176 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7177 numops--;
7178 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
7179 nfsm_chain_add_stateid(error, &nmreq, &stateid);
7180 nfsm_chain_add_64(error, &nmreq, 0);
7181 nfsm_chain_add_32(error, &nmreq, rlen);
7182 }
2d21ac55
A
7183 nfsm_chain_build_done(error, &nmreq);
7184 nfsm_assert(error, (numops == 0), EPROTO);
7185 nfsmout_if(error);
6d2010ae
A
7186 error = nfs_request_async(hadattrdir ? adnp : np, NULL, &nmreq, NFSPROC4_COMPOUND,
7187 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, open ? R_NOINTR: 0, NULL, &req);
7188 if (!error)
2d21ac55 7189 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
2d21ac55 7190
6d2010ae
A
7191 if (hadattrdir && ((adlockerror = nfs_node_lock(adnp))))
7192 error = adlockerror;
7193 savedxid = xid;
2d21ac55
A
7194 nfsm_chain_skip_tag(error, &nmrep);
7195 nfsm_chain_get_32(error, &nmrep, numops);
7196 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6d2010ae
A
7197 if (!hadattrdir) {
7198 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7199 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7200 nfsmout_if(error);
7201 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7202 nfsmout_if(error);
7203 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) && fh.fh_len) {
7204 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
7205 /* (re)allocate attrdir fh buffer */
7206 if (np->n_attrdirfh)
7207 FREE(np->n_attrdirfh, M_TEMP);
7208 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len+1, M_TEMP, M_WAITOK);
7209 }
7210 if (np->n_attrdirfh) {
7211 /* remember the attrdir fh in the node */
7212 *np->n_attrdirfh = fh.fh_len;
7213 bcopy(fh.fh_data, np->n_attrdirfh+1, fh.fh_len);
7214 /* create busied node for attrdir */
7215 struct componentname cn;
7216 bzero(&cn, sizeof(cn));
7217 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
7218 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
7219 cn.cn_nameiop = LOOKUP;
7220 // XXX can't set parent correctly (to np) yet
7221 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
7222 if (!error) {
7223 adlockerror = 0;
7224 /* set the node busy */
7225 SET(adnp->n_flag, NBUSY);
7226 adbusyerror = 0;
7227 }
7228 /* if no adnp, oh well... */
7229 error = 0;
7230 }
7231 }
7232 NVATTR_CLEANUP(&nvattr);
7233 fh.fh_len = 0;
7234 }
7235 if (open) {
7236 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
7237 nfs_owner_seqid_increment(noop, NULL, error);
7238 nfsm_chain_get_stateid(error, &nmrep, &newnofp->nof_stateid);
7239 nfsm_chain_check_change_info(error, &nmrep, adnp);
7240 nfsm_chain_get_32(error, &nmrep, rflags);
7241 bmlen = NFS_ATTR_BITMAP_LEN;
7242 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7243 nfsm_chain_get_32(error, &nmrep, delegation);
7244 if (!error)
7245 switch (delegation) {
7246 case NFS_OPEN_DELEGATE_NONE:
7247 break;
7248 case NFS_OPEN_DELEGATE_READ:
7249 case NFS_OPEN_DELEGATE_WRITE:
7250 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
7251 nfsm_chain_get_32(error, &nmrep, recall);
7252 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
7253 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
7254 /* if we have any trouble accepting the ACE, just invalidate it */
7255 ace_type = ace_flags = ace_mask = len = 0;
7256 nfsm_chain_get_32(error, &nmrep, ace_type);
7257 nfsm_chain_get_32(error, &nmrep, ace_flags);
7258 nfsm_chain_get_32(error, &nmrep, ace_mask);
7259 nfsm_chain_get_32(error, &nmrep, len);
7260 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
7261 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
7262 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
7263 if (!error && (len >= slen)) {
7264 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
7265 if (s)
7266 slen = len+1;
7267 else
7268 ace.ace_flags = 0;
7269 }
7270 if (s)
7271 nfsm_chain_get_opaque(error, &nmrep, len, s);
7272 else
7273 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
7274 if (!error && s) {
7275 s[len] = '\0';
7276 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
7277 ace.ace_flags = 0;
7278 }
7279 if (error || !s)
7280 ace.ace_flags = 0;
7281 if (s && (s != sbuf))
7282 FREE(s, M_TEMP);
7283 break;
7284 default:
7285 error = EBADRPC;
7286 break;
7287 }
7288 /* At this point if we have no error, the object was created/opened. */
7289 open_error = error;
7290 } else {
7291 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOOKUP);
7292 }
2d21ac55
A
7293 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7294 nfsmout_if(error);
6d2010ae 7295 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
2d21ac55 7296 nfsmout_if(error);
6d2010ae
A
7297 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
7298 error = EIO;
2d21ac55
A
7299 goto nfsmout;
7300 }
6d2010ae
A
7301 if (prefetch)
7302 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7303 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7304 if (!hadattrdir)
7305 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
2d21ac55 7306 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6d2010ae
A
7307 nfsmout_if(error);
7308 xid = savedxid;
7309 nfsm_chain_loadattr(error, &nmrep, adnp, nmp->nm_vers, &xid);
7310 nfsmout_if(error);
2d21ac55 7311
6d2010ae
A
7312 if (open) {
7313 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
7314 newnofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
7315 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
7316 if (adnp) {
7317 nfs_node_unlock(adnp);
7318 adlockerror = ENOENT;
7319 }
7320 NVATTR_CLEANUP(&nvattr);
7321 error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh.fh_data, fh.fh_len, noop, &newnofp->nof_stateid, thd, cred, &nvattr, &xid);
7322 nfsmout_if(error);
7323 savedxid = xid;
7324 if ((adlockerror = nfs_node_lock(adnp)))
7325 error = adlockerror;
2d21ac55 7326 }
2d21ac55
A
7327 }
7328
6d2010ae
A
7329nfsmout:
7330 if (open && adnp && !adlockerror) {
7331 if (!open_error && (adnp->n_flag & NNEGNCENTRIES)) {
7332 adnp->n_flag &= ~NNEGNCENTRIES;
7333 cache_purge_negatives(NFSTOV(adnp));
7334 }
7335 adnp->n_flag |= NMODIFIED;
7336 nfs_node_unlock(adnp);
7337 adlockerror = ENOENT;
7338 nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7339 }
7340 if (adnp && !adlockerror && (error == ENOENT) &&
7341 (cnp->cn_flags & MAKEENTRY) && (cnp->cn_nameiop != CREATE) && negnamecache) {
7342 /* add a negative entry in the name cache */
7343 cache_enter(NFSTOV(adnp), NULL, cnp);
7344 adnp->n_flag |= NNEGNCENTRIES;
7345 }
7346 if (adnp && !adlockerror) {
7347 nfs_node_unlock(adnp);
7348 adlockerror = ENOENT;
7349 }
7350 if (!error && !anp && fh.fh_len) {
2d21ac55
A
7351 /* create the vnode with the filehandle and attributes */
7352 xid = savedxid;
6d2010ae
A
7353 error = nfs_nget(NFSTOMP(np), adnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &anp);
7354 if (!error) {
7355 *anpp = anp;
7356 nfs_node_unlock(anp);
7357 }
7358 if (!error && open) {
7359 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
7360 /* After we have a node, add our open file struct to the node */
7361 nofp = newnofp;
7362 error = nfs_open_file_find_internal(anp, noop, &nofp, 0, 0, 0);
7363 if (error) {
7364 /* This shouldn't happen, because we passed in a new nofp to use. */
7365 printf("nfs_open_file_find_internal failed! %d\n", error);
7366 nofp = NULL;
7367 } else if (nofp != newnofp) {
7368 /*
7369 * Hmm... an open file struct already exists.
7370 * Mark the existing one busy and merge our open into it.
7371 * Then destroy the one we created.
7372 * Note: there's no chance of an open confict because the
7373 * open has already been granted.
7374 */
7375 nofpbusyerror = nfs_open_file_set_busy(nofp, NULL);
7376 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
7377 nofp->nof_stateid = newnofp->nof_stateid;
7378 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)
7379 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
7380 nfs_open_file_clear_busy(newnofp);
7381 nfs_open_file_destroy(newnofp);
7382 newnofp = NULL;
7383 }
7384 if (!error) {
7385 newnofp = NULL;
7386 nofpbusyerror = 0;
7387 /* mark the node as holding a create-initiated open */
7388 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
7389 nofp->nof_creator = current_thread();
7390 if (nofpp)
7391 *nofpp = nofp;
7392 }
7393 }
2d21ac55 7394 }
6d2010ae
A
7395 NVATTR_CLEANUP(&nvattr);
7396 if (open && ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE))) {
7397 if (!error && anp && !recall) {
7398 /* stuff the delegation state in the node */
7399 lck_mtx_lock(&anp->n_openlock);
7400 anp->n_openflags &= ~N_DELEG_MASK;
7401 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
7402 anp->n_dstateid = dstateid;
7403 anp->n_dace = ace;
7404 if (anp->n_dlink.tqe_next == NFSNOLIST) {
7405 lck_mtx_lock(&nmp->nm_lock);
7406 if (anp->n_dlink.tqe_next == NFSNOLIST)
7407 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
7408 lck_mtx_unlock(&nmp->nm_lock);
7409 }
7410 lck_mtx_unlock(&anp->n_openlock);
7411 } else {
7412 /* give the delegation back */
7413 if (anp) {
7414 if (NFS_CMPFH(anp, fh.fh_data, fh.fh_len)) {
7415 /* update delegation state and return it */
7416 lck_mtx_lock(&anp->n_openlock);
7417 anp->n_openflags &= ~N_DELEG_MASK;
7418 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
7419 anp->n_dstateid = dstateid;
7420 anp->n_dace = ace;
7421 if (anp->n_dlink.tqe_next == NFSNOLIST) {
7422 lck_mtx_lock(&nmp->nm_lock);
7423 if (anp->n_dlink.tqe_next == NFSNOLIST)
7424 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
7425 lck_mtx_unlock(&nmp->nm_lock);
7426 }
7427 lck_mtx_unlock(&anp->n_openlock);
7428 /* don't need to send a separate delegreturn for fh */
7429 fh.fh_len = 0;
7430 }
7431 /* return anp's current delegation */
7432 nfs4_delegation_return(anp, 0, thd, cred);
7433 }
7434 if (fh.fh_len) /* return fh's delegation if it wasn't for anp */
7435 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
7436 }
7437 }
7438 if (open) {
7439 if (newnofp) {
7440 /* need to cleanup our temporary nofp */
7441 nfs_open_file_clear_busy(newnofp);
7442 nfs_open_file_destroy(newnofp);
7443 newnofp = NULL;
7444 } else if (nofp && !nofpbusyerror) {
7445 nfs_open_file_clear_busy(nofp);
7446 nofpbusyerror = ENOENT;
7447 }
7448 if (inuse && nfs_mount_state_in_use_end(nmp, error)) {
7449 inuse = 0;
7450 nofp = newnofp = NULL;
7451 rflags = delegation = recall = eof = rlen = retlen = 0;
7452 ace.ace_flags = 0;
7453 s = sbuf;
7454 slen = sizeof(sbuf);
7455 nfsm_chain_cleanup(&nmreq);
7456 nfsm_chain_cleanup(&nmrep);
7457 if (anp) {
7458 vnode_put(NFSTOV(anp));
7459 *anpp = anp = NULL;
7460 }
7461 hadattrdir = (adnp != NULL);
7462 if (noopbusy) {
7463 nfs_open_owner_clear_busy(noop);
7464 noopbusy = 0;
7465 }
7466 goto restart;
7467 }
7468 if (noop) {
7469 if (noopbusy) {
7470 nfs_open_owner_clear_busy(noop);
7471 noopbusy = 0;
7472 }
7473 nfs_open_owner_rele(noop);
7474 }
7475 }
7476 if (!error && prefetch && nmrep.nmc_mhead) {
7477 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7478 nfsm_chain_op_check(error, &nmrep, NFS_OP_NVERIFY);
7479 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
7480 nfsm_chain_get_32(error, &nmrep, eof);
7481 nfsm_chain_get_32(error, &nmrep, retlen);
7482 if (!error && anp) {
7483 /*
7484 * There can be one problem with doing the prefetch.
7485 * Because we don't have the node before we start the RPC, we
7486 * can't have the buffer busy while the READ is performed.
7487 * So there is a chance that other I/O occured on the same
7488 * range of data while we were performing this RPC. If that
7489 * happens, then it's possible the data we have in the READ
7490 * response is no longer up to date.
7491 * Once we have the node and the buffer, we need to make sure
7492 * that there's no chance we could be putting stale data in
7493 * the buffer.
7494 * So, we check if the range read is dirty or if any I/O may
7495 * have occured on it while we were performing our RPC.
7496 */
7497 struct nfsbuf *bp = NULL;
7498 int lastpg;
7499 uint32_t pagemask;
7500
7501 retlen = MIN(retlen, rlen);
7502
7503 /* check if node needs size update or invalidation */
7504 if (ISSET(anp->n_flag, NUPDATESIZE))
7505 nfs_data_update_size(anp, 0);
7506 if (!(error = nfs_node_lock(anp))) {
7507 if (anp->n_flag & NNEEDINVALIDATE) {
7508 anp->n_flag &= ~NNEEDINVALIDATE;
7509 nfs_node_unlock(anp);
7510 error = nfs_vinvalbuf(NFSTOV(anp), V_SAVE|V_IGNORE_WRITEERR, ctx, 1);
7511 if (!error) /* lets play it safe and just drop the data */
7512 error = EIO;
7513 } else {
7514 nfs_node_unlock(anp);
7515 }
7516 }
2d21ac55 7517
6d2010ae
A
7518 /* calculate page mask for the range of data read */
7519 lastpg = (trunc_page_32(retlen) - 1) / PAGE_SIZE;
7520 pagemask = ((1 << (lastpg + 1)) - 1);
7521
7522 if (!error)
7523 error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ|NBLK_NOWAIT, &bp);
7524 /* don't save the data if dirty or potential I/O conflict */
7525 if (!error && bp && !bp->nb_dirtyoff && !(bp->nb_dirty & pagemask) &&
7526 timevalcmp(&anp->n_lastio, &now, <)) {
7527 OSAddAtomic(1, &nfsstats.read_bios);
7528 CLR(bp->nb_flags, (NB_DONE|NB_ASYNC));
7529 SET(bp->nb_flags, NB_READ);
7530 NFS_BUF_MAP(bp);
7531 nfsm_chain_get_opaque(error, &nmrep, retlen, bp->nb_data);
7532 if (error) {
7533 bp->nb_error = error;
7534 SET(bp->nb_flags, NB_ERROR);
7535 } else {
7536 bp->nb_offio = 0;
7537 bp->nb_endio = rlen;
7538 if ((retlen > 0) && (bp->nb_endio < (int)retlen))
7539 bp->nb_endio = retlen;
7540 if (eof || (retlen == 0)) {
7541 /* zero out the remaining data (up to EOF) */
7542 off_t rpcrem, eofrem, rem;
7543 rpcrem = (rlen - retlen);
7544 eofrem = anp->n_size - (NBOFF(bp) + retlen);
7545 rem = (rpcrem < eofrem) ? rpcrem : eofrem;
7546 if (rem > 0)
7547 bzero(bp->nb_data + retlen, rem);
7548 } else if ((retlen < rlen) && !ISSET(bp->nb_flags, NB_ERROR)) {
7549 /* ugh... short read ... just invalidate for now... */
7550 SET(bp->nb_flags, NB_INVAL);
7551 }
7552 }
7553 nfs_buf_read_finish(bp);
7554 microuptime(&anp->n_lastio);
7555 }
7556 if (bp)
7557 nfs_buf_release(bp, 1);
2d21ac55 7558 }
6d2010ae 7559 error = 0; /* ignore any transient error in processing the prefetch */
2d21ac55 7560 }
6d2010ae
A
7561 if (adnp && !adbusyerror) {
7562 nfs_node_clear_busy(adnp);
7563 adbusyerror = ENOENT;
7564 }
7565 if (!busyerror) {
7566 nfs_node_clear_busy(np);
7567 busyerror = ENOENT;
7568 }
7569 if (adnp)
7570 vnode_put(NFSTOV(adnp));
7571 if (error && *anpp) {
7572 vnode_put(NFSTOV(*anpp));
7573 *anpp = NULL;
7574 }
7575 nfsm_chain_cleanup(&nmreq);
7576 nfsm_chain_cleanup(&nmrep);
7577 return (error);
7578}
7579
7580/*
7581 * Remove a named attribute.
7582 */
7583int
7584nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_context_t ctx)
7585{
7586 nfsnode_t adnp = NULL;
7587 struct nfsmount *nmp;
7588 struct componentname cn;
7589 struct vnop_remove_args vra;
7590 int error, putanp = 0;
7591
7592 nmp = NFSTONMP(np);
7593 if (!nmp)
7594 return (ENXIO);
7595
7596 bzero(&cn, sizeof(cn));
7597 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
7598 cn.cn_namelen = strlen(name);
7599 cn.cn_nameiop = DELETE;
7600 cn.cn_flags = 0;
7601
7602 if (!anp) {
7603 error = nfs4_named_attr_get(np, &cn, NFS_OPEN_SHARE_ACCESS_NONE,
7604 0, ctx, &anp, NULL);
7605 if ((!error && !anp) || (error == ENOATTR))
7606 error = ENOENT;
7607 if (error) {
7608 if (anp) {
7609 vnode_put(NFSTOV(anp));
7610 anp = NULL;
7611 }
7612 goto out;
2d21ac55 7613 }
6d2010ae
A
7614 putanp = 1;
7615 }
7616
7617 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx))))
7618 goto out;
7619 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
7620 nfs_node_clear_busy(np);
7621 if (!adnp) {
7622 error = ENOENT;
7623 goto out;
2d21ac55 7624 }
6d2010ae
A
7625
7626 vra.a_desc = &vnop_remove_desc;
7627 vra.a_dvp = NFSTOV(adnp);
7628 vra.a_vp = NFSTOV(anp);
7629 vra.a_cnp = &cn;
7630 vra.a_flags = 0;
7631 vra.a_context = ctx;
7632 error = nfs_vnop_remove(&vra);
7633out:
7634 if (adnp)
7635 vnode_put(NFSTOV(adnp));
7636 if (putanp)
7637 vnode_put(NFSTOV(anp));
2d21ac55
A
7638 return (error);
7639}
7640
7641int
6d2010ae
A
7642nfs4_vnop_getxattr(
7643 struct vnop_getxattr_args /* {
2d21ac55 7644 struct vnodeop_desc *a_desc;
6d2010ae
A
7645 vnode_t a_vp;
7646 const char * a_name;
7647 uio_t a_uio;
7648 size_t *a_size;
7649 int a_options;
2d21ac55
A
7650 vfs_context_t a_context;
7651 } */ *ap)
7652{
6d2010ae 7653 vfs_context_t ctx = ap->a_context;
2d21ac55 7654 struct nfsmount *nmp;
6d2010ae
A
7655 struct nfs_vattr nvattr;
7656 struct componentname cn;
7657 nfsnode_t anp;
7658 int error = 0, isrsrcfork;
2d21ac55 7659
6d2010ae 7660 nmp = VTONMP(ap->a_vp);
2d21ac55
A
7661 if (!nmp)
7662 return (ENXIO);
7663
6d2010ae 7664 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
2d21ac55 7665 return (ENOTSUP);
6d2010ae
A
7666 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
7667 if (error)
7668 return (error);
7669 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
7670 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
7671 return (ENOATTR);
7672
7673 bzero(&cn, sizeof(cn));
7674 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
7675 cn.cn_namelen = strlen(ap->a_name);
7676 cn.cn_nameiop = LOOKUP;
7677 cn.cn_flags = MAKEENTRY;
7678
7679 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
7680 isrsrcfork = (bcmp(ap->a_name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
7681
7682 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
7683 !isrsrcfork ? NFS_GET_NAMED_ATTR_PREFETCH : 0, ctx, &anp, NULL);
7684 if ((!error && !anp) || (error == ENOENT))
7685 error = ENOATTR;
7686 if (!error) {
7687 if (ap->a_uio)
7688 error = nfs_bioread(anp, ap->a_uio, 0, ctx);
7689 else
7690 *ap->a_size = anp->n_size;
2d21ac55 7691 }
6d2010ae
A
7692 if (anp)
7693 vnode_put(NFSTOV(anp));
7694 return (error);
7695}
2d21ac55 7696
6d2010ae
A
7697int
7698nfs4_vnop_setxattr(
7699 struct vnop_setxattr_args /* {
7700 struct vnodeop_desc *a_desc;
7701 vnode_t a_vp;
7702 const char * a_name;
7703 uio_t a_uio;
7704 int a_options;
7705 vfs_context_t a_context;
7706 } */ *ap)
7707{
7708 vfs_context_t ctx = ap->a_context;
7709 int options = ap->a_options;
7710 uio_t uio = ap->a_uio;
7711 const char *name = ap->a_name;
7712 struct nfsmount *nmp;
7713 struct componentname cn;
7714 nfsnode_t anp = NULL;
7715 int error = 0, closeerror = 0, flags, isrsrcfork, isfinderinfo, empty = 0, i;
7716#define FINDERINFOSIZE 32
7717 uint8_t finfo[FINDERINFOSIZE];
7718 uint32_t *finfop;
7719 struct nfs_open_file *nofp = NULL;
7720 char uio_buf [ UIO_SIZEOF(1) ];
7721 uio_t auio;
7722 struct vnop_write_args vwa;
7723
7724 nmp = VTONMP(ap->a_vp);
7725 if (!nmp)
7726 return (ENXIO);
7727
7728 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
7729 return (ENOTSUP);
7730
7731 if ((options & XATTR_CREATE) && (options & XATTR_REPLACE))
7732 return (EINVAL);
7733
7734 /* XXX limitation based on need to back up uio on short write */
7735 if (uio_iovcnt(uio) > 1) {
7736 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
7737 return (EINVAL);
7738 }
7739
7740 bzero(&cn, sizeof(cn));
7741 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
7742 cn.cn_namelen = strlen(name);
7743 cn.cn_nameiop = CREATE;
7744 cn.cn_flags = MAKEENTRY;
7745
7746 isfinderinfo = (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0);
7747 isrsrcfork = isfinderinfo ? 0 : (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
7748 if (!isrsrcfork)
7749 uio_setoffset(uio, 0);
7750 if (isfinderinfo) {
7751 if (uio_resid(uio) != sizeof(finfo))
7752 return (ERANGE);
7753 error = uiomove((char*)&finfo, sizeof(finfo), uio);
7754 if (error)
7755 return (error);
7756 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
7757 empty = 1;
7758 for (i=0, finfop=(uint32_t*)&finfo; i < (int)(sizeof(finfo)/sizeof(uint32_t)); i++)
7759 if (finfop[i]) {
7760 empty = 0;
7761 break;
7762 }
7763 if (empty && !(options & (XATTR_CREATE|XATTR_REPLACE))) {
7764 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
7765 if (error == ENOENT)
7766 error = 0;
7767 return (error);
7768 }
7769 /* first, let's see if we get a create/replace error */
7770 }
7771
7772 /*
7773 * create/open the xattr
7774 *
7775 * We need to make sure not to create it if XATTR_REPLACE.
7776 * For all xattrs except the resource fork, we also want to
7777 * truncate the xattr to remove any current data. We'll do
7778 * that by setting the size to 0 on create/open.
7779 */
7780 flags = 0;
7781 if (!(options & XATTR_REPLACE))
7782 flags |= NFS_GET_NAMED_ATTR_CREATE;
7783 if (options & XATTR_CREATE)
7784 flags |= NFS_GET_NAMED_ATTR_CREATE_GUARDED;
7785 if (!isrsrcfork)
7786 flags |= NFS_GET_NAMED_ATTR_TRUNCATE;
7787
7788 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
7789 flags, ctx, &anp, &nofp);
7790 if (!error && !anp)
7791 error = ENOATTR;
7792 if (error)
7793 goto out;
7794 /* grab the open state from the get/create/open */
7795 if (nofp && !(error = nfs_open_file_set_busy(nofp, NULL))) {
7796 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
7797 nofp->nof_creator = NULL;
7798 nfs_open_file_clear_busy(nofp);
7799 }
7800
7801 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
7802 if (isfinderinfo && empty)
7803 goto doclose;
7804
7805 /*
7806 * Write the data out and flush.
7807 *
7808 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
7809 */
7810 vwa.a_desc = &vnop_write_desc;
7811 vwa.a_vp = NFSTOV(anp);
7812 vwa.a_uio = NULL;
7813 vwa.a_ioflag = 0;
7814 vwa.a_context = ctx;
7815 if (isfinderinfo) {
7816 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, &uio_buf, sizeof(uio_buf));
7817 uio_addiov(auio, (uintptr_t)&finfo, sizeof(finfo));
7818 vwa.a_uio = auio;
7819 } else if (uio_resid(uio) > 0) {
7820 vwa.a_uio = uio;
7821 }
7822 if (vwa.a_uio) {
7823 error = nfs_vnop_write(&vwa);
7824 if (!error)
7825 error = nfs_flush(anp, MNT_WAIT, vfs_context_thread(ctx), 0);
7826 }
7827doclose:
7828 /* Close the xattr. */
7829 if (nofp) {
7830 int busyerror = nfs_open_file_set_busy(nofp, NULL);
7831 closeerror = nfs_close(anp, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
7832 if (!busyerror)
7833 nfs_open_file_clear_busy(nofp);
7834 }
7835 if (!error && isfinderinfo && empty) { /* Setting an empty FinderInfo really means remove it */
7836 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
7837 if (error == ENOENT)
7838 error = 0;
7839 }
2d21ac55 7840 if (!error)
6d2010ae
A
7841 error = closeerror;
7842out:
7843 if (anp)
7844 vnode_put(NFSTOV(anp));
7845 if (error == ENOENT)
7846 error = ENOATTR;
2d21ac55
A
7847 return (error);
7848}
7849
7850int
6d2010ae
A
7851nfs4_vnop_removexattr(
7852 struct vnop_removexattr_args /* {
2d21ac55 7853 struct vnodeop_desc *a_desc;
6d2010ae
A
7854 vnode_t a_vp;
7855 const char * a_name;
7856 int a_options;
2d21ac55
A
7857 vfs_context_t a_context;
7858 } */ *ap)
7859{
6d2010ae 7860 struct nfsmount *nmp = VTONMP(ap->a_vp);
2d21ac55
A
7861 int error;
7862
6d2010ae
A
7863 if (!nmp)
7864 return (ENXIO);
7865 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
7866 return (ENOTSUP);
7867
7868 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), NULL, ap->a_name, ap->a_context);
7869 if (error == ENOENT)
7870 error = ENOATTR;
2d21ac55
A
7871 return (error);
7872}
7873
7874int
6d2010ae
A
7875nfs4_vnop_listxattr(
7876 struct vnop_listxattr_args /* {
2d21ac55 7877 struct vnodeop_desc *a_desc;
6d2010ae
A
7878 vnode_t a_vp;
7879 uio_t a_uio;
7880 size_t *a_size;
7881 int a_options;
2d21ac55
A
7882 vfs_context_t a_context;
7883 } */ *ap)
7884{
6d2010ae
A
7885 vfs_context_t ctx = ap->a_context;
7886 nfsnode_t np = VTONFS(ap->a_vp);
7887 uio_t uio = ap->a_uio;
7888 nfsnode_t adnp = NULL;
7889 struct nfsmount *nmp;
7890 int error, done, i;
7891 struct nfs_vattr nvattr;
7892 uint64_t cookie, nextcookie, lbn = 0;
7893 struct nfsbuf *bp = NULL;
7894 struct nfs_dir_buf_header *ndbhp;
7895 struct direntry *dp;
2d21ac55 7896
6d2010ae
A
7897 nmp = VTONMP(ap->a_vp);
7898 if (!nmp)
7899 return (ENXIO);
7900
7901 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
7902 return (ENOTSUP);
7903
7904 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
7905 if (error)
7906 return (error);
7907 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
7908 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
7909 return (0);
7910
7911 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx))))
7912 return (error);
7913 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
7914 nfs_node_clear_busy(np);
7915 if (!adnp)
7916 goto out;
7917
7918 if ((error = nfs_node_lock(adnp)))
7919 goto out;
7920
7921 if (adnp->n_flag & NNEEDINVALIDATE) {
7922 adnp->n_flag &= ~NNEEDINVALIDATE;
7923 nfs_invaldir(adnp);
7924 nfs_node_unlock(adnp);
7925 error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
7926 if (!error)
7927 error = nfs_node_lock(adnp);
7928 if (error)
7929 goto out;
7930 }
7931
7932 /*
7933 * check for need to invalidate when (re)starting at beginning
7934 */
7935 if (adnp->n_flag & NMODIFIED) {
7936 nfs_invaldir(adnp);
7937 nfs_node_unlock(adnp);
7938 if ((error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1)))
7939 goto out;
7940 } else {
7941 nfs_node_unlock(adnp);
7942 }
7943 /* nfs_getattr() will check changed and purge caches */
7944 if ((error = nfs_getattr(adnp, &nvattr, ctx, NGA_UNCACHED)))
7945 goto out;
7946
7947 if (uio && (uio_resid(uio) == 0))
7948 goto out;
7949
7950 done = 0;
7951 nextcookie = lbn = 0;
7952
7953 while (!error && !done) {
7954 OSAddAtomic(1, &nfsstats.biocache_readdirs);
7955 cookie = nextcookie;
7956getbuffer:
7957 error = nfs_buf_get(adnp, lbn, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
7958 if (error)
7959 goto out;
7960 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
7961 if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) {
7962 if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */
7963 ndbhp->ndbh_flags = 0;
7964 ndbhp->ndbh_count = 0;
7965 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
7966 ndbhp->ndbh_ncgen = adnp->n_ncgen;
7967 }
7968 error = nfs_buf_readdir(bp, ctx);
7969 if (error == NFSERR_DIRBUFDROPPED)
7970 goto getbuffer;
7971 if (error)
7972 nfs_buf_release(bp, 1);
7973 if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) {
7974 if (!nfs_node_lock(adnp)) {
7975 nfs_invaldir(adnp);
7976 nfs_node_unlock(adnp);
7977 }
7978 nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
7979 if (error == NFSERR_BAD_COOKIE)
7980 error = ENOENT;
7981 }
7982 if (error)
7983 goto out;
7984 }
7985
7986 /* go through all the entries copying/counting */
7987 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
7988 for (i=0; i < ndbhp->ndbh_count; i++) {
7989 if (!xattr_protected(dp->d_name)) {
7990 if (uio == NULL) {
7991 *ap->a_size += dp->d_namlen + 1;
7992 } else if (uio_resid(uio) < (dp->d_namlen + 1)) {
7993 error = ERANGE;
7994 } else {
7995 error = uiomove(dp->d_name, dp->d_namlen+1, uio);
7996 if (error && (error != EFAULT))
7997 error = ERANGE;
7998 }
7999 }
8000 nextcookie = dp->d_seekoff;
8001 dp = NFS_DIRENTRY_NEXT(dp);
8002 }
8003
8004 if (i == ndbhp->ndbh_count) {
8005 /* hit end of buffer, move to next buffer */
8006 lbn = nextcookie;
8007 /* if we also hit EOF, we're done */
8008 if (ISSET(ndbhp->ndbh_flags, NDB_EOF))
8009 done = 1;
8010 }
8011 if (!error && !done && (nextcookie == cookie)) {
8012 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count);
8013 error = EIO;
8014 }
8015 nfs_buf_release(bp, 1);
8016 }
8017out:
8018 if (adnp)
8019 vnode_put(NFSTOV(adnp));
2d21ac55
A
8020 return (error);
8021}
8022
6d2010ae 8023#if NAMEDSTREAMS
2d21ac55 8024int
6d2010ae
A
8025nfs4_vnop_getnamedstream(
8026 struct vnop_getnamedstream_args /* {
2d21ac55
A
8027 struct vnodeop_desc *a_desc;
8028 vnode_t a_vp;
6d2010ae
A
8029 vnode_t *a_svpp;
8030 const char *a_name;
8031 enum nsoperation a_operation;
8032 int a_flags;
2d21ac55
A
8033 vfs_context_t a_context;
8034 } */ *ap)
8035{
8036 vfs_context_t ctx = ap->a_context;
2d21ac55 8037 struct nfsmount *nmp;
6d2010ae
A
8038 struct nfs_vattr nvattr;
8039 struct componentname cn;
8040 nfsnode_t anp;
8041 int error = 0;
2d21ac55 8042
6d2010ae 8043 nmp = VTONMP(ap->a_vp);
2d21ac55
A
8044 if (!nmp)
8045 return (ENXIO);
2d21ac55 8046
6d2010ae
A
8047 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8048 return (ENOTSUP);
8049 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
8050 if (error)
2d21ac55 8051 return (error);
6d2010ae
A
8052 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8053 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
8054 return (ENOATTR);
2d21ac55 8055
6d2010ae
A
8056 bzero(&cn, sizeof(cn));
8057 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8058 cn.cn_namelen = strlen(ap->a_name);
8059 cn.cn_nameiop = LOOKUP;
8060 cn.cn_flags = MAKEENTRY;
8061
8062 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8063 0, ctx, &anp, NULL);
8064 if ((!error && !anp) || (error == ENOENT))
8065 error = ENOATTR;
8066 if (!error && anp)
8067 *ap->a_svpp = NFSTOV(anp);
8068 else if (anp)
8069 vnode_put(NFSTOV(anp));
2d21ac55
A
8070 return (error);
8071}
8072
8073int
6d2010ae
A
8074nfs4_vnop_makenamedstream(
8075 struct vnop_makenamedstream_args /* {
2d21ac55 8076 struct vnodeop_desc *a_desc;
6d2010ae 8077 vnode_t *a_svpp;
2d21ac55 8078 vnode_t a_vp;
6d2010ae
A
8079 const char *a_name;
8080 int a_flags;
2d21ac55
A
8081 vfs_context_t a_context;
8082 } */ *ap)
8083{
8084 vfs_context_t ctx = ap->a_context;
6d2010ae
A
8085 struct nfsmount *nmp;
8086 struct componentname cn;
8087 nfsnode_t anp;
2d21ac55 8088 int error = 0;
2d21ac55 8089
6d2010ae
A
8090 nmp = VTONMP(ap->a_vp);
8091 if (!nmp)
8092 return (ENXIO);
2d21ac55 8093
6d2010ae
A
8094 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8095 return (ENOTSUP);
2d21ac55 8096
6d2010ae
A
8097 bzero(&cn, sizeof(cn));
8098 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8099 cn.cn_namelen = strlen(ap->a_name);
8100 cn.cn_nameiop = CREATE;
8101 cn.cn_flags = MAKEENTRY;
8102
8103 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
8104 NFS_GET_NAMED_ATTR_CREATE, ctx, &anp, NULL);
8105 if ((!error && !anp) || (error == ENOENT))
8106 error = ENOATTR;
8107 if (!error && anp)
8108 *ap->a_svpp = NFSTOV(anp);
8109 else if (anp)
8110 vnode_put(NFSTOV(anp));
8111 return (error);
8112}
2d21ac55 8113
6d2010ae
A
8114int
8115nfs4_vnop_removenamedstream(
8116 struct vnop_removenamedstream_args /* {
8117 struct vnodeop_desc *a_desc;
8118 vnode_t a_vp;
8119 vnode_t a_svp;
8120 const char *a_name;
8121 int a_flags;
8122 vfs_context_t a_context;
8123 } */ *ap)
8124{
8125 struct nfsmount *nmp = VTONMP(ap->a_vp);
8126 nfsnode_t np = ap->a_vp ? VTONFS(ap->a_vp) : NULL;
8127 nfsnode_t anp = ap->a_svp ? VTONFS(ap->a_svp) : NULL;
2d21ac55 8128
6d2010ae
A
8129 if (!nmp)
8130 return (ENXIO);
2d21ac55
A
8131
8132 /*
6d2010ae
A
8133 * Given that a_svp is a named stream, checking for
8134 * named attribute support is kinda pointless.
2d21ac55 8135 */
6d2010ae
A
8136 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8137 return (ENOTSUP);
8138
8139 return (nfs4_named_attr_remove(np, anp, ap->a_name, ap->a_context));
2d21ac55
A
8140}
8141
6d2010ae 8142#endif